code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import cv2
import keras.backend as K
import numpy as np
import tensorflow as tf
from keras.layers import Conv2D
from keras.models import Model
from app.main.Actions import Actions
from app.models.model_factory import get_model
class Visualizer(Actions):
y = None
y_hat = None
FONT = cv2.FONT_HERSHEY_SIMPLEX
model: Model = []
test_generator = None
def __init__(self, config_file, weight):
super().__init__(config_file)
if weight is not None:
self.weight = weight
else:
self.MDConfig.use_trained_model_weights = True
self.weight = self.MDConfig.trained_model_weights
print(f"** Visualize weight file: {self.weight}")
def prepare_model(self):
print("** load model **")
self.MDConfig.use_trained_model_weights = True
print(f"** Trained Model = {self.MDConfig.trained_model_weights} **")
self.model = get_model(self.DSConfig.class_names, weights_path=self.MDConfig.trained_model_weights,
image_dimension=self.IMConfig.img_dim, color_mode=self.IMConfig.color_mode,
class_mode=self.DSConfig.class_mode)
def kernel_visualize(self):
self.prepare_model()
if self.MDConfig.show_model_summary:
self.model.summary()
layer: Conv2D = self.model.get_layer("conv1/conv")
weights: tf.Variable = layer.weights[0]
weights = np.array(K.eval(weights))
weights -= np.min(weights)
weights /= np.max(weights)
weights *= 255
weights_mosaic = np.zeros((56, 56, 3))
for i in range(8):
for j in range(8):
weights_mosaic[i * 7:(i + 1) * 7, j * 7:(j + 1) * 7, :] = weights[:, :, :, i * 8 + j].squeeze()
weights_mosaic = cv2.resize(weights_mosaic, (1024, 1024))
cv2.imwrite("kernels.bmp", weights_mosaic)
| [
"cv2.resize",
"cv2.imwrite",
"numpy.zeros",
"numpy.min",
"numpy.max",
"keras.backend.eval",
"app.models.model_factory.get_model"
] | [((931, 1139), 'app.models.model_factory.get_model', 'get_model', (['self.DSConfig.class_names'], {'weights_path': 'self.MDConfig.trained_model_weights', 'image_dimension': 'self.IMConfig.img_dim', 'color_mode': 'self.IMConfig.color_mode', 'class_mode': 'self.DSConfig.class_mode'}), '(self.DSConfig.class_names, weights_path=self.MDConfig.\n trained_model_weights, image_dimension=self.IMConfig.img_dim,\n color_mode=self.IMConfig.color_mode, class_mode=self.DSConfig.class_mode)\n', (940, 1139), False, 'from app.models.model_factory import get_model\n'), ((1504, 1519), 'numpy.min', 'np.min', (['weights'], {}), '(weights)\n', (1510, 1519), True, 'import numpy as np\n'), ((1539, 1554), 'numpy.max', 'np.max', (['weights'], {}), '(weights)\n', (1545, 1554), True, 'import numpy as np\n'), ((1603, 1624), 'numpy.zeros', 'np.zeros', (['(56, 56, 3)'], {}), '((56, 56, 3))\n', (1611, 1624), True, 'import numpy as np\n'), ((1820, 1860), 'cv2.resize', 'cv2.resize', (['weights_mosaic', '(1024, 1024)'], {}), '(weights_mosaic, (1024, 1024))\n', (1830, 1860), False, 'import cv2\n'), ((1869, 1911), 'cv2.imwrite', 'cv2.imwrite', (['"""kernels.bmp"""', 'weights_mosaic'], {}), "('kernels.bmp', weights_mosaic)\n", (1880, 1911), False, 'import cv2\n'), ((1468, 1483), 'keras.backend.eval', 'K.eval', (['weights'], {}), '(weights)\n', (1474, 1483), True, 'import keras.backend as K\n')] |
# from pyvirtualdisplay import Display
# display = Display(visible=1, size=(480, 320))
# display.start()
import numpy as np
import torch
from toy.value_iteration import *
from toy.network import AttentionNet
from toy.env.fourrooms import Fourrooms
from toy.env.fourrooms_withcoin import FourroomsCoin
from torch import optim
import os
import cv2
import pickle
from toy.util import *
# what to do: create an env(like fourroom, compute the value of its states,
# and train a neural network with mask to see if it can learn desired mask)
lr = 2e-4
epochs = 3000
batch_size = 208
log_interval = 1
feature_map_size = 15
device = torch.device("cuda")
# env = Fourrooms()
env = FourroomsCoin()
dataset, transition = gen_dataset_with_value_iteration(env, device)
value_network = AttentionNet(input_size=feature_map_size, device=device).to(device)
# value_network = AttentionNet(input_size=144*2).to(device)
optimizer = optim.Adam(value_network.parameters(), lr=lr, weight_decay=1e-5)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True)
# train network
for epoch in range(1, epochs + 1):
value_network.train()
train_loss = 0
total_correct = 0
for batch_idx, (obs, value_gt) in enumerate(data_loader):
optimizer.zero_grad()
mask, value_predict, _ = value_network(obs)
encoder_loss, decoder_loss = value_network.loss_func(mask, value_predict, value_gt)
obs_tar, obs_pos, obs_neg = sample_contrast(transition, batch_size, dataset)
contrast_loss = value_network.contrast_loss_func(obs_tar, obs_pos, obs_neg) + value_network.contrast_loss_func(obs_pos, obs_tar, obs_neg)
loss = 1e-2*encoder_loss + decoder_loss + contrast_loss
loss.sum().backward()
# total_correct += correct.sum().item()
train_loss += loss.sum().item()
optimizer.step()
if epoch % log_interval == 0:
print('Encoder Loss:{} Decoder Loss:{}'.format(encoder_loss,decoder_loss))
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(obs), len(data_loader.dataset),
100. * batch_idx / len(data_loader),
loss.sum().item() / len(obs), end='\r'))
# if epoch % 1 == 0:
# torch.save(model.state_dict(), "./model/FC_{}_epoch{}_predict{}.model".format(b, epoch, args.predict))
# print mask
target_path = "./attention/"
# if not os.path.exists(target_path):
os.makedirs(os.path.join(target_path, "./mask/"), exist_ok=True)
os.makedirs(os.path.join(target_path, "./image/"), exist_ok=True)
os.makedirs(os.path.join(target_path, "./feature_map/"), exist_ok=True)
os.makedirs(os.path.join(target_path, "./masked_image/"), exist_ok=True)
predict_values = []
feature_maps = []
def generate_image(add_noise=False):
obs, values = dataset.X, dataset.y
predict_values = []
feature_maps = []
for i, data in enumerate(zip(obs, values)):
obs, value = data
if add_noise:
obs = obs + torch.rand(*(obs.shape)).to(device) * 0.1 - 0.05
mask, value_predict, feature_map = value_network(obs[np.newaxis, ...])
mask = mask.detach().cpu().numpy()[0, 0]
mask = (mask - np.min(mask)) / (np.max(mask) - np.min(mask) + 1e-12)
feature_map = feature_map.detach().cpu().numpy()[0, 0]
feature_maps.append(feature_map)
# if i == 0:
# print(feature_map)
predict_values.append(value_predict.detach().cpu().numpy().squeeze())
feature_map = (feature_map - np.min(feature_map)) / (np.max(feature_map) - np.min(feature_map) + 1e-12)
obs = obs.cpu().numpy().transpose((1, 2, 0))
if add_noise:
suffix = "noise"
else:
suffix = ""
obs = obs * 255
mask = cv2.resize(mask, (obs.shape[0], obs.shape[1]))
mask = np.repeat(mask[..., np.newaxis], 3, axis=2)
feature_map = cv2.resize(feature_map, (obs.shape[0], obs.shape[1]))
feature_map = np.repeat(feature_map[..., np.newaxis], 3, axis=2)
# print(self.obs_shape)
masked_image = obs * mask
cv2.imwrite(os.path.join(target_path, "./mask/", "{}_{}.png".format(suffix, i)), mask * 255)
cv2.imwrite(os.path.join(target_path, "./image/", "{}_{}.png".format(suffix, i)), obs)
cv2.imwrite(os.path.join(target_path, "./masked_image/", "{}_{}.png".format(suffix, i)), masked_image)
cv2.imwrite(os.path.join(target_path, "./feature_map/", "{}_{}.png".format(suffix, i)), feature_map * 255)
values = values.cpu().numpy()
print("mse", np.mean((values - predict_values) ** 2))
generate_image()
generate_image(True)
# print(value_network.value_fc_4.weight.shape)
# print(np.mean(abs(value_network.value_fc_4.weight.cpu().detach().numpy())))
print(value_network.value_fc_4.weight[0,:feature_map_size**2].reshape(feature_map_size,feature_map_size))
# print(value_network.value_fc_4.weight[:,feature_map_size**2:].reshape(feature_map_size,feature_map_size))
print(np.array(predict_values))
feature_maps = np.array(feature_maps)
print(feature_maps.shape)
pickle.dump(feature_maps, open("feature_map.pkl", "wb"))
| [
"toy.network.AttentionNet",
"torch.utils.data.DataLoader",
"torch.rand",
"numpy.min",
"toy.env.fourrooms_withcoin.FourroomsCoin",
"numpy.array",
"numpy.mean",
"numpy.max",
"torch.device",
"os.path.join",
"cv2.resize",
"numpy.repeat"
] | [((627, 647), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (639, 647), False, 'import torch\n'), ((675, 690), 'toy.env.fourrooms_withcoin.FourroomsCoin', 'FourroomsCoin', ([], {}), '()\n', (688, 690), False, 'from toy.env.fourrooms_withcoin import FourroomsCoin\n'), ((998, 1071), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True)\n', (1025, 1071), False, 'import torch\n'), ((5108, 5130), 'numpy.array', 'np.array', (['feature_maps'], {}), '(feature_maps)\n', (5116, 5130), True, 'import numpy as np\n'), ((2511, 2547), 'os.path.join', 'os.path.join', (['target_path', '"""./mask/"""'], {}), "(target_path, './mask/')\n", (2523, 2547), False, 'import os\n'), ((2576, 2613), 'os.path.join', 'os.path.join', (['target_path', '"""./image/"""'], {}), "(target_path, './image/')\n", (2588, 2613), False, 'import os\n'), ((2642, 2685), 'os.path.join', 'os.path.join', (['target_path', '"""./feature_map/"""'], {}), "(target_path, './feature_map/')\n", (2654, 2685), False, 'import os\n'), ((2714, 2758), 'os.path.join', 'os.path.join', (['target_path', '"""./masked_image/"""'], {}), "(target_path, './masked_image/')\n", (2726, 2758), False, 'import os\n'), ((5067, 5091), 'numpy.array', 'np.array', (['predict_values'], {}), '(predict_values)\n', (5075, 5091), True, 'import numpy as np\n'), ((777, 833), 'toy.network.AttentionNet', 'AttentionNet', ([], {'input_size': 'feature_map_size', 'device': 'device'}), '(input_size=feature_map_size, device=device)\n', (789, 833), False, 'from toy.network import AttentionNet\n'), ((3845, 3891), 'cv2.resize', 'cv2.resize', (['mask', '(obs.shape[0], obs.shape[1])'], {}), '(mask, (obs.shape[0], obs.shape[1]))\n', (3855, 3891), False, 'import cv2\n'), ((3907, 3950), 'numpy.repeat', 'np.repeat', (['mask[..., np.newaxis]', '(3)'], {'axis': '(2)'}), '(mask[..., np.newaxis], 3, axis=2)\n', (3916, 3950), True, 'import numpy as np\n'), ((3974, 4027), 'cv2.resize', 'cv2.resize', (['feature_map', '(obs.shape[0], obs.shape[1])'], {}), '(feature_map, (obs.shape[0], obs.shape[1]))\n', (3984, 4027), False, 'import cv2\n'), ((4050, 4100), 'numpy.repeat', 'np.repeat', (['feature_map[..., np.newaxis]', '(3)'], {'axis': '(2)'}), '(feature_map[..., np.newaxis], 3, axis=2)\n', (4059, 4100), True, 'import numpy as np\n'), ((4641, 4680), 'numpy.mean', 'np.mean', (['((values - predict_values) ** 2)'], {}), '((values - predict_values) ** 2)\n', (4648, 4680), True, 'import numpy as np\n'), ((3259, 3271), 'numpy.min', 'np.min', (['mask'], {}), '(mask)\n', (3265, 3271), True, 'import numpy as np\n'), ((3587, 3606), 'numpy.min', 'np.min', (['feature_map'], {}), '(feature_map)\n', (3593, 3606), True, 'import numpy as np\n'), ((3276, 3288), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (3282, 3288), True, 'import numpy as np\n'), ((3291, 3303), 'numpy.min', 'np.min', (['mask'], {}), '(mask)\n', (3297, 3303), True, 'import numpy as np\n'), ((3611, 3630), 'numpy.max', 'np.max', (['feature_map'], {}), '(feature_map)\n', (3617, 3630), True, 'import numpy as np\n'), ((3633, 3652), 'numpy.min', 'np.min', (['feature_map'], {}), '(feature_map)\n', (3639, 3652), True, 'import numpy as np\n'), ((3058, 3080), 'torch.rand', 'torch.rand', (['*obs.shape'], {}), '(*obs.shape)\n', (3068, 3080), False, 'import torch\n')] |
import unittest
import numpy as np
import torch
from torch import nn
from torchvision.ops import box_convert
from rastervision.pytorch_learner.object_detection_utils import (
BoxList, collate_fn, TorchVisionODAdapter)
class MockModel(nn.Module):
def __init__(self, num_classes: int) -> None:
super().__init__()
self.num_classes = num_classes
def forward(self, x, y=None):
if self.training:
assert y is not None
return {'loss1': 0, 'loss2': 0}
else:
N = len(x)
nboxes = np.random.randint(0, 10)
outs = [{
'boxes': torch.rand((nboxes, 4)),
'labels': torch.randint(0, self.num_classes, (nboxes, )),
'scores': torch.rand((nboxes, )),
} for _ in range(N)]
return outs
class TestTorchVisionODAdapter(unittest.TestCase):
def test_train_output(self):
true_num_classes = 3
model = TorchVisionODAdapter(
MockModel(num_classes=true_num_classes + 2),
ignored_output_inds=[0, true_num_classes + 1])
model.train()
N = 10
x = torch.empty(N, 3, 100, 100)
y = [
BoxList(
boxes=torch.rand((10, 4)),
class_ids=torch.randint(0, 5, (N, ))) for _ in range(N)
]
self.assertRaises(Exception, lambda: model(x))
out = model(x, y)
self.assertIsInstance(out, dict)
self.assertIn('total_loss', out)
def test_eval_output_with_bogus_class(self):
true_num_classes = 3
model = TorchVisionODAdapter(
MockModel(num_classes=true_num_classes + 2),
ignored_output_inds=[0, true_num_classes + 1])
model.eval()
N = 10
x = torch.empty(N, 3, 100, 100)
outs = model(x)
for out in outs:
self.assertIsInstance(out, BoxList)
self.assertIn('class_ids', out)
self.assertIn('scores', out)
self.assertTrue(
all((0 <= c < true_num_classes)
for c in out.get_field('class_ids')))
def test_eval_output_without_bogus_class(self):
true_num_classes = 3
model = TorchVisionODAdapter(
MockModel(num_classes=true_num_classes + 1),
ignored_output_inds=[0])
model.eval()
N = 10
x = torch.empty(N, 3, 100, 100)
outs = model(x)
for out in outs:
self.assertIsInstance(out, BoxList)
self.assertIn('class_ids', out)
self.assertIn('scores', out)
self.assertTrue(
all((0 <= c < true_num_classes)
for c in out.get_field('class_ids')))
class TestBoxList(unittest.TestCase):
def test_init(self):
boxes = torch.rand((10, 4))
# no conversion when format = xyxy
boxlist = BoxList(boxes)
self.assertTrue(torch.equal(boxlist.boxes, boxes))
boxlist = BoxList(boxes, format='xyxy')
self.assertTrue(torch.equal(boxlist.boxes, boxes))
# test correct conversion from yxyx to xyxy
boxlist = BoxList(boxes, format='yxyx')
self.assertTrue(torch.equal(boxlist.boxes, boxes[:, [1, 0, 3, 2]]))
# test correct conversion from other formats to xyxy
for in_fmt in ['xywh', 'cxcywh']:
boxlist = BoxList(boxes, format=in_fmt)
self.assertTrue(
torch.equal(boxlist.boxes, box_convert(boxes, in_fmt, 'xyxy')))
def test_get_field(self):
boxes = torch.rand((10, 4))
class_ids = torch.randint(0, 5, (10, ))
boxlist = BoxList(boxes, class_ids=class_ids)
self.assertTrue(torch.equal(boxlist.get_field('class_ids'), class_ids))
def test_map_extras(self):
boxes = torch.rand((10, 4))
class_ids = torch.randint(0, 3, (10, ))
scores = torch.rand((10, ))
class_names = np.array(['a', 'b', 'c'])[class_ids.numpy()]
boxlist = BoxList(
boxes, class_ids=class_ids, scores=scores, class_names=class_names)
boxlist = BoxList(
boxes,
**boxlist._map_extras(
func=lambda k, v: v[:-1],
cond=lambda k, v: torch.is_tensor(v)))
self.assertTrue(
torch.equal(boxlist.get_field('class_ids'), class_ids[:-1]))
self.assertTrue(torch.equal(boxlist.get_field('scores'), scores[:-1]))
self.assertTrue(all(class_names == boxlist.get_field('class_names')))
def test_to(self):
boxes = torch.rand((10, 4))
class_ids = torch.randint(0, 3, (10, ))
scores = torch.rand((10, ))
class_names = np.array(['a', 'b', 'c'])[class_ids.numpy()]
boxlist = BoxList(
boxes, class_ids=class_ids, scores=scores, class_names=class_names)
boxlist = boxlist.to(dtype=torch.float32)
self.assertTrue(
torch.equal(boxlist.get_field('class_ids'), class_ids.float()))
self.assertTrue(
torch.equal(boxlist.get_field('scores'), scores.float()))
self.assertTrue(all(class_names == boxlist.get_field('class_names')))
def test_collate_fn(self):
imgs = [torch.empty(3, 100, 100) for _ in range(4)]
boxlists = []
for _ in range(4):
boxes = torch.rand((10, 4))
class_ids = torch.randint(0, 3, (10, ))
boxlist = BoxList(boxes, class_ids=class_ids)
boxlists.append(boxlist)
x, y = collate_fn(zip(imgs, boxes))
self.assertEqual(x.shape, (4, 3, 100, 100))
self.assertTrue(all(b1 == b2 for b1, b2 in zip(boxlists, y)))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"torch.randint",
"torch.empty",
"torch.equal",
"numpy.random.randint",
"numpy.array",
"torch.rand",
"torch.is_tensor",
"torchvision.ops.box_convert",
"rastervision.pytorch_learner.object_detection_utils.BoxList"
] | [((5710, 5725), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5723, 5725), False, 'import unittest\n'), ((1163, 1190), 'torch.empty', 'torch.empty', (['N', '(3)', '(100)', '(100)'], {}), '(N, 3, 100, 100)\n', (1174, 1190), False, 'import torch\n'), ((1796, 1823), 'torch.empty', 'torch.empty', (['N', '(3)', '(100)', '(100)'], {}), '(N, 3, 100, 100)\n', (1807, 1823), False, 'import torch\n'), ((2404, 2431), 'torch.empty', 'torch.empty', (['N', '(3)', '(100)', '(100)'], {}), '(N, 3, 100, 100)\n', (2415, 2431), False, 'import torch\n'), ((2830, 2849), 'torch.rand', 'torch.rand', (['(10, 4)'], {}), '((10, 4))\n', (2840, 2849), False, 'import torch\n'), ((2911, 2925), 'rastervision.pytorch_learner.object_detection_utils.BoxList', 'BoxList', (['boxes'], {}), '(boxes)\n', (2918, 2925), False, 'from rastervision.pytorch_learner.object_detection_utils import BoxList, collate_fn, TorchVisionODAdapter\n'), ((3003, 3032), 'rastervision.pytorch_learner.object_detection_utils.BoxList', 'BoxList', (['boxes'], {'format': '"""xyxy"""'}), "(boxes, format='xyxy')\n", (3010, 3032), False, 'from rastervision.pytorch_learner.object_detection_utils import BoxList, collate_fn, TorchVisionODAdapter\n'), ((3162, 3191), 'rastervision.pytorch_learner.object_detection_utils.BoxList', 'BoxList', (['boxes'], {'format': '"""yxyx"""'}), "(boxes, format='yxyx')\n", (3169, 3191), False, 'from rastervision.pytorch_learner.object_detection_utils import BoxList, collate_fn, TorchVisionODAdapter\n'), ((3579, 3598), 'torch.rand', 'torch.rand', (['(10, 4)'], {}), '((10, 4))\n', (3589, 3598), False, 'import torch\n'), ((3619, 3645), 'torch.randint', 'torch.randint', (['(0)', '(5)', '(10,)'], {}), '(0, 5, (10,))\n', (3632, 3645), False, 'import torch\n'), ((3665, 3700), 'rastervision.pytorch_learner.object_detection_utils.BoxList', 'BoxList', (['boxes'], {'class_ids': 'class_ids'}), '(boxes, class_ids=class_ids)\n', (3672, 3700), False, 'from rastervision.pytorch_learner.object_detection_utils import BoxList, collate_fn, TorchVisionODAdapter\n'), ((3829, 3848), 'torch.rand', 'torch.rand', (['(10, 4)'], {}), '((10, 4))\n', (3839, 3848), False, 'import torch\n'), ((3869, 3895), 'torch.randint', 'torch.randint', (['(0)', '(3)', '(10,)'], {}), '(0, 3, (10,))\n', (3882, 3895), False, 'import torch\n'), ((3914, 3931), 'torch.rand', 'torch.rand', (['(10,)'], {}), '((10,))\n', (3924, 3931), False, 'import torch\n'), ((4018, 4093), 'rastervision.pytorch_learner.object_detection_utils.BoxList', 'BoxList', (['boxes'], {'class_ids': 'class_ids', 'scores': 'scores', 'class_names': 'class_names'}), '(boxes, class_ids=class_ids, scores=scores, class_names=class_names)\n', (4025, 4093), False, 'from rastervision.pytorch_learner.object_detection_utils import BoxList, collate_fn, TorchVisionODAdapter\n'), ((4580, 4599), 'torch.rand', 'torch.rand', (['(10, 4)'], {}), '((10, 4))\n', (4590, 4599), False, 'import torch\n'), ((4620, 4646), 'torch.randint', 'torch.randint', (['(0)', '(3)', '(10,)'], {}), '(0, 3, (10,))\n', (4633, 4646), False, 'import torch\n'), ((4665, 4682), 'torch.rand', 'torch.rand', (['(10,)'], {}), '((10,))\n', (4675, 4682), False, 'import torch\n'), ((4769, 4844), 'rastervision.pytorch_learner.object_detection_utils.BoxList', 'BoxList', (['boxes'], {'class_ids': 'class_ids', 'scores': 'scores', 'class_names': 'class_names'}), '(boxes, class_ids=class_ids, scores=scores, class_names=class_names)\n', (4776, 4844), False, 'from rastervision.pytorch_learner.object_detection_utils import BoxList, collate_fn, TorchVisionODAdapter\n'), ((566, 590), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (583, 590), True, 'import numpy as np\n'), ((2950, 2983), 'torch.equal', 'torch.equal', (['boxlist.boxes', 'boxes'], {}), '(boxlist.boxes, boxes)\n', (2961, 2983), False, 'import torch\n'), ((3057, 3090), 'torch.equal', 'torch.equal', (['boxlist.boxes', 'boxes'], {}), '(boxlist.boxes, boxes)\n', (3068, 3090), False, 'import torch\n'), ((3216, 3266), 'torch.equal', 'torch.equal', (['boxlist.boxes', 'boxes[:, [1, 0, 3, 2]]'], {}), '(boxlist.boxes, boxes[:, [1, 0, 3, 2]])\n', (3227, 3266), False, 'import torch\n'), ((3393, 3422), 'rastervision.pytorch_learner.object_detection_utils.BoxList', 'BoxList', (['boxes'], {'format': 'in_fmt'}), '(boxes, format=in_fmt)\n', (3400, 3422), False, 'from rastervision.pytorch_learner.object_detection_utils import BoxList, collate_fn, TorchVisionODAdapter\n'), ((3955, 3980), 'numpy.array', 'np.array', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (3963, 3980), True, 'import numpy as np\n'), ((4706, 4731), 'numpy.array', 'np.array', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (4714, 4731), True, 'import numpy as np\n'), ((5230, 5254), 'torch.empty', 'torch.empty', (['(3)', '(100)', '(100)'], {}), '(3, 100, 100)\n', (5241, 5254), False, 'import torch\n'), ((5343, 5362), 'torch.rand', 'torch.rand', (['(10, 4)'], {}), '((10, 4))\n', (5353, 5362), False, 'import torch\n'), ((5387, 5413), 'torch.randint', 'torch.randint', (['(0)', '(3)', '(10,)'], {}), '(0, 3, (10,))\n', (5400, 5413), False, 'import torch\n'), ((5437, 5472), 'rastervision.pytorch_learner.object_detection_utils.BoxList', 'BoxList', (['boxes'], {'class_ids': 'class_ids'}), '(boxes, class_ids=class_ids)\n', (5444, 5472), False, 'from rastervision.pytorch_learner.object_detection_utils import BoxList, collate_fn, TorchVisionODAdapter\n'), ((638, 661), 'torch.rand', 'torch.rand', (['(nboxes, 4)'], {}), '((nboxes, 4))\n', (648, 661), False, 'import torch\n'), ((689, 734), 'torch.randint', 'torch.randint', (['(0)', 'self.num_classes', '(nboxes,)'], {}), '(0, self.num_classes, (nboxes,))\n', (702, 734), False, 'import torch\n'), ((763, 784), 'torch.rand', 'torch.rand', (['(nboxes,)'], {}), '((nboxes,))\n', (773, 784), False, 'import torch\n'), ((1248, 1267), 'torch.rand', 'torch.rand', (['(10, 4)'], {}), '((10, 4))\n', (1258, 1267), False, 'import torch\n'), ((1295, 1320), 'torch.randint', 'torch.randint', (['(0)', '(5)', '(N,)'], {}), '(0, 5, (N,))\n', (1308, 1320), False, 'import torch\n'), ((3495, 3529), 'torchvision.ops.box_convert', 'box_convert', (['boxes', 'in_fmt', '"""xyxy"""'], {}), "(boxes, in_fmt, 'xyxy')\n", (3506, 3529), False, 'from torchvision.ops import box_convert\n'), ((4264, 4282), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (4279, 4282), False, 'import torch\n')] |
import timeit
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
from object_detection.metrics.coco_evaluation import CocoDetectionEvaluator
from object_detection.core.standard_fields import InputDataFields, DetectionResultFields
from yolov3_tf2.models import YoloV3
from yolov3_tf2.utils import draw_outputs
data, info = tfds.load("coco", with_info=True, data_dir="./data")
n_classes = info.features["objects"]["label"].num_classes
# n_samples = info.splits["validation"].num_examples
n_samples = 2048
minibatch_size = 32
img_size = 416
yolo = YoloV3(size=img_size, classes=n_classes, iou_threshold=0.5, score_threshold=0.5)
yolo.load_weights("./checkpoints/yolov3.tf")
with open("./data/coco/2014/1.1.0/objects-label.labels.txt") as f:
class_names = [c.strip() for c in f.readlines()]
assert len(class_names) == n_classes
validation_data = (
data["validation"]
.map(
lambda feat: tf.image.resize(feat["image"], (img_size, img_size)),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
.batch(minibatch_size, drop_remainder=True)
.map(lambda img: img / 255, num_parallel_calls=tf.data.experimental.AUTOTUNE)
.prefetch(tf.data.experimental.AUTOTUNE)
)
# start = timeit.default_timer()
# for i, _ in enumerate(validation_data.take(100)):
# print(i)
# print(timeit.default_timer() - start)
print("data")
print(info)
print(info.features)
print(info.splits)
print(validation_data)
boxes, scores, classes, nums = yolo.predict(
validation_data, verbose=1, steps=n_samples // minibatch_size
)
# for img in validation_data.take(n_samples // minibatch_size):
# img = img.numpy()
#
# start = timeit.default_timer()
# boxes, scores, classes, nums = yolo.predict(img)
# print(timeit.default_timer() - start)
print("output")
print(boxes.shape)
print(scores.shape)
print(classes.shape)
print(nums.shape)
# compute map
evaluator = CocoDetectionEvaluator(
[{"id": i, "name": n} for i, n in enumerate(class_names)]
)
for i, feat in enumerate(data["validation"].take(n_samples)):
img_id = feat["image/id"].numpy()
objects = feat["objects"]
ground_truth = {
InputDataFields.groundtruth_boxes: objects["bbox"].numpy(),
InputDataFields.groundtruth_classes: objects["label"].numpy(),
InputDataFields.groundtruth_is_crowd: objects["is_crowd"].numpy(),
}
# print("ground truth")
# print(ground_truth)
evaluator.add_single_ground_truth_image_info(img_id, ground_truth)
# height, width = feat["image"].shape[:2]
detection_results = {
# note: need to swap bboxes from (x, y, x, y) to (y, x, y, x)
DetectionResultFields.detection_boxes: boxes[i, : nums[i]][..., [1, 0, 3, 2]],
DetectionResultFields.detection_scores: scores[i, : nums[i]],
DetectionResultFields.detection_classes: classes[i, : nums[i]],
}
# print("detection results")
# print(detection_results)
evaluator.add_single_detected_image_info(img_id, detection_results)
results = evaluator.evaluate()
for i, img in enumerate(validation_data.unbatch().take(5)):
img = img.numpy()
print("example", i)
print("detections:")
for j in range(nums[i]):
print(
"\t{}, {}, {}".format(
class_names[int(classes[i][j])],
np.array(scores[i][j]),
np.array(boxes[i][j]),
)
)
img = draw_outputs(
img, (boxes[[i]], scores[[i]], classes[[i]], nums[[i]]), class_names
)
plt.figure()
plt.imshow(img)
plt.savefig("./data/output/image_%d" % i)
| [
"tensorflow_datasets.load",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.figure",
"yolov3_tf2.models.YoloV3",
"yolov3_tf2.utils.draw_outputs",
"numpy.array",
"tensorflow.image.resize",
"matplotlib.pyplot.savefig"
] | [((385, 437), 'tensorflow_datasets.load', 'tfds.load', (['"""coco"""'], {'with_info': '(True)', 'data_dir': '"""./data"""'}), "('coco', with_info=True, data_dir='./data')\n", (394, 437), True, 'import tensorflow_datasets as tfds\n'), ((610, 695), 'yolov3_tf2.models.YoloV3', 'YoloV3', ([], {'size': 'img_size', 'classes': 'n_classes', 'iou_threshold': '(0.5)', 'score_threshold': '(0.5)'}), '(size=img_size, classes=n_classes, iou_threshold=0.5, score_threshold=0.5\n )\n', (616, 695), False, 'from yolov3_tf2.models import YoloV3\n'), ((3468, 3554), 'yolov3_tf2.utils.draw_outputs', 'draw_outputs', (['img', '(boxes[[i]], scores[[i]], classes[[i]], nums[[i]])', 'class_names'], {}), '(img, (boxes[[i]], scores[[i]], classes[[i]], nums[[i]]),\n class_names)\n', (3480, 3554), False, 'from yolov3_tf2.utils import draw_outputs\n'), ((3570, 3582), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3580, 3582), True, 'import matplotlib.pyplot as plt\n'), ((3587, 3602), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (3597, 3602), True, 'import matplotlib.pyplot as plt\n'), ((3607, 3648), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./data/output/image_%d' % i)"], {}), "('./data/output/image_%d' % i)\n", (3618, 3648), True, 'import matplotlib.pyplot as plt\n'), ((3371, 3393), 'numpy.array', 'np.array', (['scores[i][j]'], {}), '(scores[i][j])\n', (3379, 3393), True, 'import numpy as np\n'), ((3411, 3432), 'numpy.array', 'np.array', (['boxes[i][j]'], {}), '(boxes[i][j])\n', (3419, 3432), True, 'import numpy as np\n'), ((971, 1023), 'tensorflow.image.resize', 'tf.image.resize', (["feat['image']", '(img_size, img_size)'], {}), "(feat['image'], (img_size, img_size))\n", (986, 1023), True, 'import tensorflow as tf\n')] |
import pySALESetup as pss
import numpy as np
import random
"""
This is a simple script that creates a particle bed of elliptical grains
with ice shrouds covering them. It creates two meshes and then merges them
to make one mirror-impact setup.
"""
# create two identical meshes
mesh1 = pss.Mesh(X=400,Y=200)
mesh2 = pss.Mesh(X=400,Y=200)
# use the meshes to make two ensembles;
# one ensemble for each domain
group1 = pss.Ensemble(mesh1)
group2 = pss.Ensemble(mesh2)
# we want to create (and place) 200
# different particles in this case
for i in range(150):
# generate some random params for each ellipse
rot = random.random()*np.pi
# force eccentricities to be between 0.5 and 0.8
# (just for this script, so that they look elliptical)
ecc = min(random.random()*0.75+0.5,0.8)
# Create a Grain instance! elps_params = [major radius in cells, eccentricity]
# default is 10 cells equivalent radius
grain = pss.Grain(shape='ellipse',rot=rot,eqr=10.,elps_eccen=ecc)
# place the first 100 grains randomly into free spaces
# for now we are just using one material.
if i < 100:
# insert grain instance into mesh1
grain.insertRandomly(mesh1,1)
# add grain instance to group1
# NB do this before placing grain again! grain.x, grain.y, etc.
# are changed after every placement
group1.add(grain)
# insert grain instance into mesh2
grain.insertRandomly(mesh2,1)
# add grain instance to group2
group2.add(grain)
else:
# place 2nd half of the grains into mesh such that each is in
# contact with at least one other grain (randomWalk's purpose)
grain.insertRandomwalk(mesh1,1)
group1.add(grain)
grain.insertRandomwalk(mesh2,1)
group2.add(grain)
# Calculate the optimal material number distribution for
# each group individually.
# and assign the correct materials to each grain
group1.optimise_materials(np.array([1,2,3,4,5,6,7]),populate=True)
group2.optimise_materials(np.array([1,2,3,4,5,6,7]),populate=True)
# add an elliptical shroud over each grain
for x1,y1,g1,x2,y2,g2 in zip(group1.xc,group1.yc,group1.grains,group2.xc,group2.yc,group2.grains):
# increase grain radius
r1 = g1.radius+4
# generate new grain with new radius
g1.mesh = pss.grainfromEllipse(r1,g1.angle,g1.eccentricity)
# place new grain in mesh1
g1.place(x1,y1,8,mesh1)
#repeat for mesh2
r2 = g2.radius+4
g2.mesh = pss.grainfromEllipse(r2,g2.angle,g2.eccentricity)
g2.place(x2,y2,8,mesh2)
# Fill all remaining space with material 9
mesh2.fillAll(m=9)
mesh1.fillAll(m=9)
# Give each mesh an opposing velocity (mirror impact)
mesh1.blanketVel(-500.)
mesh2.blanketVel(+500.)
# Combine the two meshes vertically
mesh3 = pss.combine_meshes(mesh1,mesh2,axis=1)
# View our handiwork!
mesh3.viewVels()
mesh3.viewMats(save=True)
# Save the new mesh, can specify a filename but defaults to
# meso_m.iSALE or meso_m.iSALE.gz if compress = True.
# (compressed input files are also taken as input by iSALE)
mesh3.save(compress=True)
| [
"pySALESetup.Mesh",
"pySALESetup.combine_meshes",
"pySALESetup.Grain",
"random.random",
"numpy.array",
"pySALESetup.grainfromEllipse",
"pySALESetup.Ensemble"
] | [((287, 309), 'pySALESetup.Mesh', 'pss.Mesh', ([], {'X': '(400)', 'Y': '(200)'}), '(X=400, Y=200)\n', (295, 309), True, 'import pySALESetup as pss\n'), ((317, 339), 'pySALESetup.Mesh', 'pss.Mesh', ([], {'X': '(400)', 'Y': '(200)'}), '(X=400, Y=200)\n', (325, 339), True, 'import pySALESetup as pss\n'), ((419, 438), 'pySALESetup.Ensemble', 'pss.Ensemble', (['mesh1'], {}), '(mesh1)\n', (431, 438), True, 'import pySALESetup as pss\n'), ((448, 467), 'pySALESetup.Ensemble', 'pss.Ensemble', (['mesh2'], {}), '(mesh2)\n', (460, 467), True, 'import pySALESetup as pss\n'), ((2808, 2848), 'pySALESetup.combine_meshes', 'pss.combine_meshes', (['mesh1', 'mesh2'], {'axis': '(1)'}), '(mesh1, mesh2, axis=1)\n', (2826, 2848), True, 'import pySALESetup as pss\n'), ((939, 1000), 'pySALESetup.Grain', 'pss.Grain', ([], {'shape': '"""ellipse"""', 'rot': 'rot', 'eqr': '(10.0)', 'elps_eccen': 'ecc'}), "(shape='ellipse', rot=rot, eqr=10.0, elps_eccen=ecc)\n", (948, 1000), True, 'import pySALESetup as pss\n'), ((1971, 2002), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7]'], {}), '([1, 2, 3, 4, 5, 6, 7])\n', (1979, 2002), True, 'import numpy as np\n'), ((2038, 2069), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7]'], {}), '([1, 2, 3, 4, 5, 6, 7])\n', (2046, 2069), True, 'import numpy as np\n'), ((2326, 2377), 'pySALESetup.grainfromEllipse', 'pss.grainfromEllipse', (['r1', 'g1.angle', 'g1.eccentricity'], {}), '(r1, g1.angle, g1.eccentricity)\n', (2346, 2377), True, 'import pySALESetup as pss\n'), ((2498, 2549), 'pySALESetup.grainfromEllipse', 'pss.grainfromEllipse', (['r2', 'g2.angle', 'g2.eccentricity'], {}), '(r2, g2.angle, g2.eccentricity)\n', (2518, 2549), True, 'import pySALESetup as pss\n'), ((622, 637), 'random.random', 'random.random', ([], {}), '()\n', (635, 637), False, 'import random\n'), ((770, 785), 'random.random', 'random.random', ([], {}), '()\n', (783, 785), False, 'import random\n')] |
from ..CodonSpecification import CodonSpecification
from python_codon_tables import get_codons_table
import numpy as np
from ...Location import Location
from ...biotools import group_nearby_indices
class BaseCodonOptimizationClass(CodonSpecification):
best_possible_score = 0 # Don't forget to change in subclasses if needed
localization_group_spread = 3
def __init__(
self, species=None, location=None, codon_usage_table=None, boost=1.0
):
self.boost = boost
self.location = Location.from_data(location)
self.species = species
self.codon_usage_table = self.get_codons_table(
species, codon_usage_table
)
def get_codons(self, problem):
subsequence = self.location.extract_sequence(problem.sequence)
if len(subsequence) % 3:
raise ValueError(
"Spec. %s is on a window/sequence with size not multiple of 3)"
% (self.label())
)
return [
subsequence[3 * i : 3 * (i + 1)]
for i in range(int(len(subsequence) / 3))
]
@staticmethod
def get_codons_table(species, codon_usage_table):
if codon_usage_table is None:
if species is None:
raise ValueError(
"Provide either an species name or a codon usage table"
)
else:
codon_usage_table = get_codons_table(species)
return codon_usage_table
def initialized_on_problem(self, problem, role):
"""Get location from sequence if no location provided."""
return self._copy_with_full_span_if_no_location(problem)
def codons_indices_to_locations(self, indices):
"""Convert a list of codon positions to a list of Locations"""
indices = np.array(indices)
if self.location.strand == -1:
indices = sorted(self.location.end - 3 * indices)
return [
Location(group[0] - 3, group[-1], strand=-1)
for group in group_nearby_indices(
indices, max_group_spread=self.localization_group_spread
)
]
else:
indices = self.location.start + 3 * indices
return [
Location(group[0], group[-1] + 3)
for group in group_nearby_indices(
indices, max_group_spread=self.localization_group_spread
)
]
def get_codons_synonyms(self):
"""Return a dict {"GTG": [GTG, GTC, ...]} of synonymous codons."""
return {
codon: [c for c in aa_codons]
for aa, aa_codons in self.codon_usage_table.items()
if len(aa) == 1
for codon in aa_codons
}
def get_codons_translations(self):
"""Return a dict {"ATG": "M", "TAG": "*", ...}."""
return {
codon: aa
for aa, aa_codons in self.codon_usage_table.items()
if len(aa) == 1
for codon in aa_codons.keys()
}
def localized_on_window(self, new_location, start_codon, end_codon):
"""Relocate without changing much."""
# The "new_location" already has exactly the right span and strand
# thanks to superclass CodonSpecification
return self.copy_with_changes(location=new_location)
| [
"numpy.array",
"python_codon_tables.get_codons_table"
] | [((1820, 1837), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (1828, 1837), True, 'import numpy as np\n'), ((1434, 1459), 'python_codon_tables.get_codons_table', 'get_codons_table', (['species'], {}), '(species)\n', (1450, 1459), False, 'from python_codon_tables import get_codons_table\n')] |
from __future__ import division
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
class RandomShapeletForest(BaseEstimator):
def __init__(self,
number_shapelets = 20,
min_shapelet_length=30,
max_shapelet_length=50):
"""
:param number_shapelets: number of shapelets
:param min_shapelet_length: minimum shapelet lengths
:param max_shapelet_length: maximum shapelet lengths
"""
# Shapelet related
self.number_shapelets = number_shapelets
self.min_shapelet_length = min_shapelet_length
self.max_shapelet_length = max_shapelet_length
# Training data related
self.train_data = None
self.train_labels = None
self._orig_labels = None
self.output_size = None
self.train_size = None
# validation data
self.valid_data = None
self.valid_labels = None
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def fit(self, X, y):
self.number_shapelets = self.number_shapelets
self.X = X
self.y = pd.Series(y).values.ravel()
#self.train_labels = utils.get_one_active_representation(y)
self.model = self.train_forest()
return self
def predict(self, X):
self.transformed_data = self.transform(X)
return self.model.predict(self.transformed_data)
def train_forest(self):
self.RANDOM_SHAPELETS = self.get_random_shapelets()
self.transformed_data = self.transform(self.X)
tuned_parameters = [{'n_estimators': [1, 19, 50, 100], 'max_depth': [1, 2, 5],
'min_samples_leaf': [1, 2, 5]}]
model = GridSearchCV(RandomForestClassifier(), tuned_parameters, cv=2, scoring='precision_macro')
model.fit(self.transformed_data, self.y)
return model
def get_random_shapelets(self):
RANDOM_SHAPELETS = {}
for i in range(0, self.number_shapelets):
timeseries = self.sample_column()
shapelet = self.sample_shapelet(timeseries)
RANDOM_SHAPELETS['shapelet_%s'%i] = shapelet
return RANDOM_SHAPELETS
def transform(self, X):
TRANSFORMED = pd.DataFrame(index = X.keys(), columns = self.RANDOM_SHAPELETS.keys())
for s in self.RANDOM_SHAPELETS.keys():
for c in X.columns:
TRANSFORMED.loc[c, s] = self.get_max_correlation(X[c], self.RANDOM_SHAPELETS[s])
TRANSFORMED = TRANSFORMED.dropna(axis='index', how='any', thresh = 2)
TRANSFORMED.fillna(-1., inplace = True)
return TRANSFORMED
def get_max_correlation(self, timeseries, shapelet):
'''
identifies the maximum correlation between a timeseries and a given shapelet
the maximum correlation is defined as the maximum of all correlations
between the shapelet and a subsample of the timeseries with the length of the shapelet
'''
as_strided = np.lib.stride_tricks.as_strided
window = len(shapelet)
v = as_strided(timeseries, (len(timeseries) - (window - 1), window), (timeseries.values.strides * 2))
array_list = pd.Series(v.tolist(), index=timeseries.index[:-window+1])
corr_list = [self.get_corr(i, shapelet) for i in array_list]
max_corr = np.max(corr_list)
return max_corr
def get_corr(self, shapelet, subsample):
'''
return the correlation between ths shapelet and the subsample that is compared to the shapelet
'''
#print np.corrcoef(shapelet, subsample)[0][1]
return np.corrcoef(shapelet, subsample)[0][1]
def sample_column(self):
random_column = self.X.columns[np.random.choice(np.array(range(0, len(self.X.columns))))]
random_series = self.X[random_column]
return random_series
def sample_shapelet(self, timeseries):
random_shapelet_length = np.random.choice(range(self.min_shapelet_length,
self.max_shapelet_length))
ii = np.random.choice(np.array(range(0, len(timeseries) - random_shapelet_length)))
random_shapelet = np.array(timeseries[ii:ii + random_shapelet_length])
return random_shapelet
| [
"sklearn.ensemble.RandomForestClassifier",
"numpy.corrcoef",
"numpy.max",
"numpy.array",
"pandas.Series"
] | [((3637, 3654), 'numpy.max', 'np.max', (['corr_list'], {}), '(corr_list)\n', (3643, 3654), True, 'import numpy as np\n'), ((4544, 4596), 'numpy.array', 'np.array', (['timeseries[ii:ii + random_shapelet_length]'], {}), '(timeseries[ii:ii + random_shapelet_length])\n', (4552, 4596), True, 'import numpy as np\n'), ((1994, 2018), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (2016, 2018), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3930, 3962), 'numpy.corrcoef', 'np.corrcoef', (['shapelet', 'subsample'], {}), '(shapelet, subsample)\n', (3941, 3962), True, 'import numpy as np\n'), ((1365, 1377), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (1374, 1377), True, 'import pandas as pd\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from imfits import Imfits
# intensity map
def intensitymap(self, ax=None, outname=None, imscale=[], outformat='pdf',
color=True, cmap='Blues', colorbar=True, cbaroptions=np.array(['right','4%','0%','Jy/beam']), vmin=None,vmax=None,
contour=True, clevels=None, ccolor='k',
data=None, axis=0, xticks=[], yticks=[], relativecoords=True, csize=18, scalebar=[],
cstar=True, prop_star=[], color_norm=None, bcolor='k',figsize=(11.69,8.27),
tickcolor='k',axiscolor='k',labelcolor='k',coord_center=None, plot_beam = True,
interpolation=None, noreg=True, inmode=None, exact_coord=False):
'''
Draw a map from an self cube. You can overplot maps by giving ax where other maps are drawn.
Parameters (must be given)
--------------------------
self: Imfits object read by Imfits.
Return
------
ax: matplotlib axis.
How to use
----------
- Draw a map:
imdata = Imfits('fitsfile')
imdata.drawmaps.intensitymap(outname='test', imscale=[-10,10,-10,10], color=True, cmap='Greys')
- Overplot:
rms = 10. # rms
ax = imdata.drawmaps.intensitymap('object01.fits', outname='test', imscale=[-10,10,-10,10], color=True)
ax = imdata.drawmaps.intensitymap('object02.fits', outname='test', ax=ax, color=False,
contour=True, clevels=np.array([3,6,9,12])*rms) # put ax=ax and the same outname
# If you want to overplot something more
ax.plot(0, 0, marker='*', size=40) # overplot
plt.savefig('test.pdf', transparent=True) # use the same name
Make a map editting data:
fitsdata = 'name.fits'
data, hd = fits.getdata(fitsdata, header=True) # get data
data = data*3. # edit data
Idistmap(fitsdata, data=data, header=hd, inmode='data') # set inmode='data'
Parameters (optional)
----------------------
- Often used
outname (str): Output file name. Do not include file extension.
outformat (str): Extension of the output file. Default is pdf.
imscale (ndarray): self scale [arcsec]. Input as np.array([xmin,xmax,ymin,ymax]).
color (bool): If True, self will be described in color scale.
cmap: Choose colortype of the color scale.
colorbar (bool): If True, color bar will be put in a map. Default False.
cbaroptions: Detailed setting for colorbar. np.array(['position','width','pad','label']).
vmin, vmax: Minimun and maximun values in the color scale. Put abusolute values.
contour (bool): If True, contour will be drawn.
clevels (ndarray): Set contour levels. Put abusolute values.
ccolor: Set contour color.
coord_center (str): Put an coordinate for the map center. The shape must be '00h00m00.00s 00d00m00.00s', or
'hh:mm:ss.ss dd:mm:ss.ss'. RA and DEC must be separated by space.
- Setting details
xticks, yticks: Optional setting. If input ndarray, xticks and yticsk will be set as the input.
relativecoords (bool): If True, the coordinate is shown in relativecoordinate. Default True.
Absolute coordinate is currently now not supported.
csize: Font size.
cstar (bool): If True, a cross denoting central stellar position will be drawn.
prop_star: Detailed setting for the cross showing stellar position.
np.array(['length','width','color']) or np.array(['length','width','color', 'coordinates']).
logscale (bool): If True, the color scale will be in log scale.
figsize (tapule): figure size. Default A4 size.
plot_beam (bool): If True, an ellipse denoting the beam size will be drawn.
bcolor: color for the ellipse for the beam.
tickcolor, axiscolor, labelcolor: color set for the map.
interpolation (str): The color map is shown with interpolation.
noreg (bool): If False, coordinates will be regrided when the deprojection is calculated.
scalebar: Optional setting. Input ndarray([barx, bary, barlength, textx, texty, text ]).
barx and bary are the position where scalebar will be putted. [arcsec].
barlength is the length of the scalebar in the figure, so in arcsec.
textx and texty are the position where a label of scalebar will be putted. [arcsec].
text is a text which represents the scale of the scalebar.
cstar (bool): If True, central star position will be marked by cross.
inmode: 'fits' or 'data'. If 'data' is selected, header must be provided. Default 'fits'.
'''
# modules
import matplotlib.figure as figure
import matplotlib as mpl
from astropy.coordinates import SkyCoord
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1 import make_axes_locatable
# format
formatlist = np.array(['eps','pdf','png','jpeg'])
# properties of plots
#mpl.use('Agg')
#plt.rcParams['font.family'] ='Arial' # font (Times New Roman, Helvetica, Arial)
plt.rcParams['xtick.direction'] = 'in' # directions of x ticks ('in'), ('out') or ('inout')
plt.rcParams['ytick.direction'] = 'in' # directions of y ticks ('in'), ('out') or ('inout')
plt.rcParams['font.size'] = csize # fontsize
# setting output file name & format
outname = outname if outname else self.file.replace('.fits', '_intensitymap')
if (outformat == formatlist).any():
outname = outname + '.' + outformat
else:
print ('ERROR\tintensitymap: Outformat is wrong.')
return
if inmode == 'data':
if data is None:
print ("inmode ='data' is selected. data must be provided.")
return
naxis = len(data.shape)
else:
data = self.data
naxis = self.naxis
if self.beam is None:
plot_beam = False
else:
bmaj, bmin, bpa = self.beam
if coord_center:
self.shift_coord_center(coord_center)
# coordinate style
if relativecoords:
xx = self.xx
yy = self.yy
cc = self.cc
xlabel = 'RA offset (arcsec)'
ylabel = 'DEC offset (arcsec)'
else:
print ('WARNING: Abusolute coordinates are still in development.')
xlabel = self.label_i[0]
ylabel = self.label_i[1]
xx = self.xx
yy = self.yy
cc = self.cc
# check data axes
if len(data.shape) == 2:
pass
elif len(data.shape) == 3:
data = data[axis,:,:]
elif len(data.shape) == 4:
data = data[0,axis,:,:]
else:
print ('Error\tintensitymap: Input fits size is incorrect.\
Must have 2 to 4 axes. Check the shape of the input image.')
return
# deg --> arcsec
xx = xx*3600.
yy = yy*3600.
# figure extent
xmin = xx[0,0]
xmax = xx[-1,-1]
ymin = yy[0,0]
ymax = yy[-1,-1]
del_x = xx[1,1] - xx[0,0]
del_y = yy[1,1] - yy[0,0]
extent = (xmin-0.5*del_x, xmax+0.5*del_x, ymin-0.5*del_y, ymax+0.5*del_y)
#print (extent)
# image scale
if len(imscale) == 0:
figxmin, figxmax, figymin, figymax = extent # arcsec
if figxmin < figxmax:
cp = figxmax
figxmax = figxmin
figxmin = cp
if figymin > figymax:
cp = figymax
figymax = figymin
figymin = cp
elif len(imscale) == 4:
figxmax, figxmin, figymin, figymax = imscale # arcsec
else:
print ('ERROR\tIdistmap: Input imscale is wrong. Must be [xmin, xmax, ymin, ymax]')
# !!!!! plot !!!!!
# setting figure
if ax is not None:
pass
else:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# set colorscale
if vmax:
pass
else:
vmax = np.nanmax(data)
# color scale
if type(color_norm) == str:
if color_norm == 'log':
norm = mpl.colors.LogNorm(vmin=vmin,vmax=vmax)
elif type(color_norm) == tuple:
if hasattr(color_norm[0], '__call__'):
norm = mpl.colors.FuncNorm(color_norm, vmin=vmin, vmax=vmax)
else:
print ('ERROR\tintensitymap: color_norm must be strings or tuple of functions.')
else:
norm = mpl.colors.Normalize(vmin=vmin,vmax=vmax)
# color map
if color:
if exact_coord:
imcolor = ax.pcolor(xx, yy, data, cmap=cmap, vmin=vmin, vmax=vmax)
else:
imcolor = ax.imshow(data, cmap=cmap, origin='lower', extent=extent, norm=norm, interpolation=interpolation, rasterized=True)
# color bar
if colorbar:
cbar_loc, cbar_wd, cbar_pad, cbar_lbl = cbaroptions
divider = make_axes_locatable(ax)
cax = divider.append_axes(cbar_loc, size=cbar_wd, pad=cbar_pad)
cbar = plt.colorbar(imcolor, cax=cax )#, ax = ax, orientation=cbar_loc, aspect=float(cbar_wd), pad=float(cbar_pad))
cbar.set_label(cbar_lbl)
# contour map
if contour:
if exact_coord:
imcont = ax.contour(xx, yy, data, colors=ccolor, origin='lower', levels=clevels, linewidths=1)
else:
imcont = ax.contour(data, colors=ccolor, origin='lower', levels=clevels,linewidths=1, extent=(xmin,xmax,ymin,ymax))
# set axes
ax.set_xlim(figxmin,figxmax)
ax.set_ylim(figymin,figymax)
ax.set_xlabel(xlabel,fontsize=csize)
ax.set_ylabel(ylabel, fontsize=csize)
if len(xticks) != 0:
ax.set_xticks(xticks)
ax.set_xticklabels(xticks)
if len(yticks) != 0:
ax.set_yticks(yticks)
ax.set_yticklabels(yticks)
ax.set_aspect(1)
ax.tick_params(which='both', direction='in',bottom=True, top=True, left=True, right=True, labelsize=csize, color=tickcolor, labelcolor=labelcolor, pad=9)
# plot beam size
if plot_beam:
bmin_plot, bmaj_plot = ax.transLimits.transform((bmin,bmaj)) - ax.transLimits.transform((0,0)) # data --> Axes coordinate
beam = patches.Ellipse(xy=(0.1, 0.1), width=bmin_plot, height=bmaj_plot, fc=bcolor, angle=bpa, transform=ax.transAxes)
ax.add_patch(beam)
# central star position
if cstar:
if len(prop_star) == 0:
ll = 0.1*np.abs(figxmax - figxmin)
lw = 1.
cl = 'k'
if relativecoords:
pos_cstar = np.array([0,0])
else:
pos_cstar = cc
elif len(prop_star) == 3:
ll, lw, cl = prop_star
ll = float(ll)
lw = float(lw)
if relativecoords:
pos_cstar = np.array([0,0])
else:
pos_cstar = cc
elif len(prop_star) == 4:
ll, lw, cl, pos_cstar = prop_star
ll = float(ll)
lw = float(lw)
ra_st, dec_st = pos_cstar.split(' ')
radec_st = SkyCoord(ra_st, dec_st, frame='icrs')
ra_stdeg = radec_st.ra.degree # in degree
dec_stdeg = radec_st.dec.degree # in degree
if relativecoords:
pos_cstar = np.array([(ra_stdeg - cc[0])*3600., (dec_stdeg - cc[1])*3600.])
else:
pos_cstar = np.array([ra_stdeg, dec_stdeg])
else:
print ('ERROR\tIdistmap: prop_star must be size of 3 or 4.')
return
ax.hlines(pos_cstar[1], pos_cstar[0]-ll*0.5, pos_cstar[0]+ll*0.5, lw=lw, color=cl,zorder=11)
ax.vlines(pos_cstar[0], pos_cstar[1]-ll*0.5, pos_cstar[1]+ll*0.5, lw=lw, color=cl,zorder=11)
# scale bar
if len(scalebar) == 0:
pass
elif len(scalebar) == 8:
barx, bary, barlength, textx, texty, text, colors, barcsize = scalebar
barx = float(barx)
bary = float(bary)
barlength = float(barlength)
textx = float(textx)
texty = float(texty)
ax.hlines(bary, barx - barlength*0.5, barx + barlength*0.5, lw=2, color=colors,zorder=10)
ax.text(textx, texty, text, color=colors, fontsize=barcsize,
horizontalalignment='center', verticalalignment='center')
else:
print ('The parameter scalebar must have 8 elements but does not.')
plt.savefig(outname, transparent = True)
return ax
# Channel maps
def channelmaps(self, grid=None, data=None, outname=None, outformat='pdf', imscale=[], color=False, cbaron=False, cmap='Blues', vmin=None, vmax=None,
contour=True, clevels=np.array([0.15, 0.3, 0.45, 0.6, 0.75, 0.9]), ccolor='k',
nrow=5, ncol=5,velmin=None, velmax=None, nskip=1, cw=0.5,
xticks=[], yticks=[], relativecoords=True, vsys=None, csize=14, scalebar=np.empty(0),
cstar=True, prop_star=[], logscale=False, tickcolor='k',axiscolor='k',
labelcolor='k',cbarlabel=None, txtcolor='k', bcolor='k', figsize=(11.69,8.27),
cbarticks=None, coord_center=None, noreg=True, arcsec=True, sbar_vertical=False,
cbaroptions=np.array(['right','5%','0%']), inmode='fits', vlabel_on=True):
'''
Make channel maps from a fits file.
Usage (examples)
Draw a map:
channelmap('object.co.fits', outname='test', imscale=[-10,10,-10,10],
color=True, cmap='Greys', velmin=5.2, velmax=12.5, nrow=5, ncol=8)
Overplot:
rms = 10. # rms
grid = channelmap('object.co.fits', outname='test', imscale=[-10,10,-10,10], color=True)
grid = channelmap('object.13co.fits', outname='test', grid=grid, color=False,
contour=True, clevels=np.array([3,6,9,12])*rms) # put grid=grid, the same outname
# If you want to overplot something more
grid[nrow*(ncol-1)].plot(0, 0, marker='*', size=40) # overplot
plt.savefig('test.pdf', transparent=True) # use the same name
Args
fitsdata: Input fitsdata. It must be an self cube having 3 or 4 axes.
outname: Output file name. Do not include file extension.
outformat: Extension of the output file. Default is eps.
imscale: scale to be shown (arcsec). It must be given as [xmin, xmax, ymin, ymax].
color (bool): If True, images will be shown in colorscale. Default is False.
cmap: color of the colorscale.
vmin: Minimum value of colorscale. Default is None.
vmax: Maximum value of colorscale. Default is the maximum value of the self cube.
logscale (bool): If True, the color will be shown in logscale.
contour (bool): If True, images will be shown with contour. Default is True.
clevels (ndarray): Contour levels. Input will be treated as absolute values.
ccolor: color of contour.
nrow, ncol: the number of row and column of the channel map.
relativecoords (bool): If True, the channel map will be produced in relative coordinate. Abusolute coordinate mode is (probably) coming soon.
velmin, velmax: Minimum and maximum velocity to be shown.
vsys: Systemic velicity [km/s]. If no input value, velocities will be described in LSRK.
csize: Caracter size. Default is 9.
cstar: If True, a central star or the center of an self will be shown as a cross.
prop_star: Detailed setting for the cross showing stellar position.
np.array(['length','width','color']) or np.array(['length','width','color', 'coordinates']).
logscale (bool): If True, the color scale will be in log scale.
coord_center (str): Put an coordinate for the map center. The shape must be '00h00m00.00s 00d00m00.00s', or
'hh:mm:ss.ss dd:mm:ss.ss'. RA and DEC must be separated by space.
locsym: Removed. A factor to decide locations of symbols (beam and velocity label). It must be 0 - 1.
tickcolor, axiscolor, labelcolor, txtcolor: Colors for the maps.
scalebar (array): If it is given, scalebar will be drawn. It must be given as [barx, bary, bar length, textx, texty, text].
Barx, bary, textx, and texty are locations of a scalebar and a text in arcsec.
nskip: the number of channel skipped
'''
# modules
import matplotlib.figure as figure
import matplotlib as mpl
#from mpl_toolkits.mplot3d import axes3d
from astropy.coordinates import SkyCoord
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1 import ImageGrid
# format
formatlist = np.array(['eps','pdf','png','jpeg'])
# properties of plots
#mpl.use('Agg')
plt.rcParams['font.family'] ='Arial' # font (Times New Roman, Helvetica, Arial)
plt.rcParams['xtick.direction'] = 'in' # directions of x ticks ('in'), ('out') or ('inout')
plt.rcParams['ytick.direction'] = 'in' # directions of y ticks ('in'), ('out') or ('inout')
plt.rcParams['font.size'] = csize # fontsize
# Setting output file name & format
if (outformat == formatlist).any():
#outfile = outname + '_nmap{0:02d}'.format(nmap) + '.' + outformat
outfile = outname + '.' + outformat
else:
print ('ERROR\tdraw_channelmaps: Outformat is wrong.')
return
if inmode == 'data':
if data is None:
print ("inmode ='data' is selected. data must be provided.")
return
naxis = len(data.shape)
else:
data = self.data
header = self.header
naxis = self.naxis
# number of axis
if naxis < 3:
print ('ERROR\tdraw_channelmaps: NAXIS of fits is < 3 although It must be > 3.')
return
if self.beam is None:
plot_beam = False
else:
bmaj, bmin, bpa = self.beam
# coordinate center
if coord_center:
self.shift_coord_center(coord_center)
# Coordinates
if relativecoords:
xx = self.xx
yy = self.yy
cc = self.cc
xlabel = 'RA offset (arcsec)'
ylabel = 'DEC offset (arcsec)'
else:
print ('WARNING: Abusolute coordinates are still in development.')
xlabel = self.label_i[0]
ylabel = self.label_i[1]
xx = self.xx
yy = self.yy
cc = self.cc
# check data axes
if len(data.shape) == 3:
pass
elif len(data.shape) == 4:
data = data[0,:,:,:]
else:
print ('Error\tchannelmaps: Input fits size is not corrected.\
It is allowed only to have 3 or 4 axes. Check the shape of the fits file.')
return
# unit: arcsec or deg
if arcsec:
xx = xx*3600.
yy = yy*3600.
# Get velocity axis
vaxis = self.vaxis
delv = self.delv
nchan = self.naxis_i[2]
if delv < 0:
delv = - delv
vaxis = vaxis[::-1]
data = data[::-1,:,:]
# Figure extent
xmin = xx[0,0]
xmax = xx[-1,-1]
ymin = yy[0,0]
ymax = yy[-1,-1]
del_x = xx[1,1] - xx[0,0]
del_y = yy[1,1] - yy[0,0]
extent = (xmin-0.5*del_x, xmax+0.5*del_x, ymin-0.5*del_y, ymax+0.5*del_y)
#print (extent)
# Relative velocity
if vsys:
vaxis = vaxis - vsys
# Set colorscale
vmax = vmax if vmax else np.nanmax(data)
vmin = vmin if vmin else np.nanmin(data)
# Logscale
norm = mpl.colors.LogNorm(vmin=vmin,vmax=vmax) if logscale else mpl.colors.Normalize(vmin=vmin,vmax=vmax)
# !!! Plot !!!
# Setting colorbar
cbar_mode = 'single' if color and cbaron else None
if grid:
pass
else:
fig = plt.figure(figsize=figsize)
# Setting grid
cbar_loc, cbar_wd, cbar_pad = cbaroptions
grid = ImageGrid(fig, rect=111, nrows_ncols=(nrow,ncol),
axes_pad=0,share_all=True,cbar_mode=cbar_mode,
cbar_location=cbar_loc,cbar_size=cbar_wd,cbar_pad=cbar_pad,
label_mode='1')
# Setting parameters used to plot
if len(imscale) == 0:
figxmin, figxmax, figymin, figymax = extent
elif len(imscale) == 4:
figxmax, figxmin, figymin, figymax = imscale
else:
print ('ERROR\tchannelmaps: Input imscale is wrong. Must be [xmin, xmax, ymin, ymax]')
# data for plot
if velmax:
d_plt = data[vaxis <= velmax,:,:]
v_plt = vaxis[vaxis <= velmax]
nv_plt = len(v_plt)
else:
d_plt = data.copy()
v_plt = vaxis.copy()
nv_plt = len(v_plt)
if velmin:
d_plt = d_plt[v_plt >= velmin,:,:]
v_plt = v_plt[v_plt >= velmin]
nv_plt = len(v_plt)
if nskip:
d_plt = d_plt[::nskip,:,:]
v_plt = v_plt[::nskip]
nv_plt = len(v_plt)
# Counter
i, j, gridi = [0,0,0]
gridimax = nrow*ncol-1
ax = None
# Loop
for ichan in range(nv_plt):
# maximum grid
if gridi > gridimax:
break
# Select channel
Sv = d_plt[ichan,:,:]
# velocity at nchan
v_i = v_plt[ichan]
# Axis
ax = grid[gridi]
print ('Channel ', '%s'%ichan, ', velocity: ', '%4.2f'%v_i, ' km/s')
# showing in color scale
if color:
imcolor = ax.imshow(Sv, cmap=cmap, origin='lower', extent=extent, norm=norm, rasterized=True)
if contour:
imcont = ax.contour(Sv, colors=ccolor, origin='lower',extent=extent, levels=clevels, linewidths=cw)
# set axes
ax.set_xlim(figxmin,figxmax)
ax.set_ylim(figymin,figymax)
ax.spines["bottom"].set_color(axiscolor)
ax.spines["top"].set_color(axiscolor)
ax.spines["left"].set_color(axiscolor)
ax.spines["right"].set_color(axiscolor)
if len(xticks) != 0:
ax.set_xticks(xticks)
if len(yticks) != 0:
ax.set_yticks(yticks)
ax.set_aspect(1)
ax.tick_params(which='both', direction='in',bottom=True,
top=True, left=True, right=True, color=tickcolor,
labelcolor=labelcolor, pad=9, labelsize=csize)
# Velocity label
if vlabel_on:
vlabel = '%3.2f'%v_i
ax.text(0.1, 0.9,vlabel,color=txtcolor,size=csize,
horizontalalignment='left', verticalalignment='top',
transform=ax.transAxes)
# Central star position
if cstar:
if len(prop_star) == 0:
ll = 0.1*np.abs(figxmax - figxmin)
lw = 1.
cl = 'k'
pos_cstar = np.array([0,0]) if relativecoords else cc
elif len(prop_star) == 3:
ll,lw, cl = prop_star
ll = float(ll)
lw = float(lw)
pos_cstar = np.array([0,0]) if relativecoords else cc
else:
print ('WARNING\tchannelmaps: The parameter prop_star\
must have 3 elements but does not. The input is ignored.')
ax.hlines(pos_cstar[1], pos_cstar[0]-ll*0.5, pos_cstar[0]+ll*0.5, lw=lw, color=cl,zorder=11)
ax.vlines(pos_cstar[0], pos_cstar[1]-ll*0.5, pos_cstar[1]+ll*0.5, lw=lw, color=cl,zorder=11)
gridi += 1
# On the bottom-left corner pannel
# Labels
grid[(nrow-1)*ncol].set_xlabel(xlabel)
grid[(nrow-1)*ncol].set_ylabel(ylabel)
grid[(nrow-1)*ncol].xaxis.label.set_color(labelcolor)
grid[(nrow-1)*ncol].yaxis.label.set_color(labelcolor)
# Plot beam
bmin_plot, bmaj_plot = grid[(nrow-1)*ncol].transLimits.transform((0,bmaj)) - grid[(nrow-1)*ncol].transLimits.transform((bmin,0)) # data --> Axes coordinate
beam = patches.Ellipse(xy=(0.1, 0.1), width=bmin_plot, height=bmaj_plot, fc=bcolor, angle=bpa, transform=grid[(nrow-1)*ncol].transAxes)
grid[(nrow-1)*ncol].add_patch(beam)
# Scale bar
if len(scalebar) == 0:
pass
elif len(scalebar) == 8:
barx, bary, barlength, textx, texty, text, barcolor, barcsize = scalebar
barx = float(barx)
bary = float(bary)
barlength = float(barlength)
textx = float(textx)
texty = float(texty)
if sbar_vertical:
grid[(nrow-1)*ncol].vlines(barx, bary - barlength*0.5,bary + barlength*0.5, color=barcolor, lw=2, zorder=10)
else:
grid[(nrow-1)*ncol].hlines(bary, barx - barlength*0.5,barx + barlength*0.5, color=barcolor, lw=2, zorder=10)
grid[(nrow-1)*ncol].text(textx,texty,text,color=barcolor,fontsize=barcsize,horizontalalignment='center',verticalalignment='center')
else:
print ('scalebar must consist of 8 elements. Check scalebar.')
if color and cbaron and ax:
# With cbar_mode="single", cax attribute of all axes are identical.
cax = grid.cbar_axes[0]
cbar = plt.colorbar(imcolor, ticks=cbarticks, cax=cax)
#cbar = cax.colorbar(imcolor,ticks=cbarticks)
cax.toggle_label(True)
cbar.ax.yaxis.set_tick_params(color=tickcolor) # tick color
cbar.ax.spines["bottom"].set_color(axiscolor) # axes color
cbar.ax.spines["top"].set_color(axiscolor)
cbar.ax.spines["left"].set_color(axiscolor)
cbar.ax.spines["right"].set_color(axiscolor)
if cbarlabel:
cbar.ax.set_ylabel(cbarlabel,color=labelcolor) # label
if gridi != gridimax+1 and gridi != 0:
while gridi != gridimax+1:
#print gridi
ax = grid[gridi]
ax.spines["right"].set_color("none") # right
ax.spines["left"].set_color("none") # left
ax.spines["top"].set_color("none") # top
ax.spines["bottom"].set_color("none") # bottom
ax.axis('off')
gridi = gridi+1
plt.savefig(outfile, transparent = True)
return grid
# Draw pv diagram
def pvdiagram(self,outname,data=None,header=None,ax=None,outformat='pdf',color=True,cmap='Blues',
vmin=None,vmax=None,vsys=0,contour=True,clevels=None,ccolor='k', pa=None,
vrel=False,logscale=False,x_offset=False,ratio=1.2, prop_vkep=None,fontsize=14,
lw=1,clip=None,plot_res=True,inmode='fits',xranges=[], yranges=[],
ln_hor=True, ln_var=True, alpha=None):
'''
Draw a PV diagram.
Args:
- outname:
'''
# Modules
import copy
import matplotlib as mpl
# format
formatlist = np.array(['eps','pdf','png','jpeg'])
# properties of plots
#mpl.use('Agg')
plt.rcParams['font.family'] ='Arial' # font (Times New Roman, Helvetica, Arial)
plt.rcParams['xtick.direction'] = 'in' # directions of x ticks ('in'), ('out') or ('inout')
plt.rcParams['ytick.direction'] = 'in' # directions of y ticks ('in'), ('out') or ('inout')
plt.rcParams['font.size'] = fontsize # fontsize
def change_aspect_ratio(ax, ratio):
'''
This function change aspect ratio of figure.
Parameters:
ax: ax (matplotlit.pyplot.subplots())
Axes object
ratio: float or int
relative x axis width compared to y axis width.
'''
aspect = (1/ratio) *(ax.get_xlim()[1] - ax.get_xlim()[0]) / (ax.get_ylim()[1] - ax.get_ylim()[0])
aspect = np.abs(aspect)
aspect = float(aspect)
ax.set_aspect(aspect)
# output file
if (outformat == formatlist).any():
outname = outname + '.' + outformat
else:
print ('ERROR\tsingleim_to_fig: Outformat is wrong.')
return
# Input
if inmode == 'data':
if data is None:
print ("inmode ='data' is selected. data must be provided.")
return
naxis = len(data.shape)
else:
data = self.data
header = self.header
naxis = self.naxis
# figures
if ax:
pass
else:
fig = plt.figure(figsize=(11.69,8.27)) # figsize=(11.69,8.27)
ax = fig.add_subplot(111)
# Read
xaxis = self.xaxis
vaxis = self.vaxis
delx = self.delx
delv = self.delv
nx = len(xaxis)
nv = len(vaxis)
# Beam
bmaj, bmin, bpa = self.beam
if self.res_off:
res_off = self.res_off
else:
# Resolution along offset axis
if self.pa:
pa = self.pa
if pa:
# an ellipse of the beam
# (x/bmin)**2 + (y/bmaj)**2 = 1
# y = x*tan(theta)
# --> solve to get resolution in the direction of pv cut with P.A.=pa
del_pa = pa - bpa
del_pa = del_pa*np.pi/180. # radian
term_sin = (np.sin(del_pa)/bmin)**2.
term_cos = (np.cos(del_pa)/bmaj)**2.
res_off = np.sqrt(1./(term_sin + term_cos))
else:
res_off = bmaj
# relative velocity or LSRK
offlabel = r'$\mathrm{Offset\ (arcsec)}$'
if vrel:
vaxis = vaxis - vsys
vlabel = r'$\mathrm{Relative\ velocity\ (km\ s^{-1})}$'
vcenter = 0
else:
vlabel = r'$\mathrm{LSR\ velocity\ (km\ s^{-1})}$'
vcenter = vsys
# set extent of an self
offmin = xaxis[0] - delx*0.5
offmax = xaxis[-1] + delx*0.5
velmin = vaxis[0] - delv*0.5
velmax = vaxis[-1] + delv*0.5
# set axes
if x_offset:
data = data[0,:,:]
extent = (offmin,offmax,velmin,velmax)
xlabel = offlabel
ylabel = vlabel
hline_params = [vsys,offmin,offmax]
vline_params = [0.,velmin,velmax]
res_x = res_off
res_y = delv
else:
data = np.rot90(data[0,:,:])
extent = (velmin,velmax,offmin,offmax)
xlabel = vlabel
ylabel = offlabel
hline_params = [0.,velmin,velmax]
vline_params = [vcenter,offmin,offmax]
res_x = delv
res_y = res_off
# set colorscale
if vmax:
pass
else:
vmax = np.nanmax(data)
# logscale
if logscale:
norm = mpl.colors.LogNorm(vmin=vmin,vmax=vmax)
else:
norm = mpl.colors.Normalize(vmin=vmin,vmax=vmax)
# clip data at some value
data_color = copy.copy(data)
if clip:
data_color[np.where(data < clip)] = np.nan
# plot images
if color:
imcolor = ax.imshow(data_color, cmap=cmap, origin='lower',
extent=extent, norm=norm, alpha=alpha)
if contour:
imcont = ax.contour(data, colors=ccolor, origin='lower',
extent=extent, levels=clevels, linewidths=lw, alpha=alpha)
# axis labels
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# set xlim, ylim
if len(xranges) == 0:
ax.set_xlim(extent[0],extent[1])
elif len(xranges) == 2:
xmin, xmax = xranges
ax.set_xlim(xmin, xmax)
else:
print ('WARRING: Input xranges is wrong. Must be [xmin, xmax].')
ax.set_xlim(extent[0],extent[1])
if len(yranges) == 0:
ax.set_ylim(extent[2],extent[3])
elif len(yranges) == 2:
ymin, ymax = yranges
ax.set_ylim(ymin, ymax)
else:
print ('WARRING: Input yranges is wrong. Must be [ymin, ymax].')
ax.set_ylim(extent[2],extent[3])
# lines showing offset 0 and relative velocity 0
if ln_hor:
xline = plt.hlines(hline_params[0], hline_params[1], hline_params[2], ccolor, linestyles='dashed', linewidths = 1.)
if ln_var:
yline = plt.vlines(vline_params[0], vline_params[1], vline_params[2], ccolor, linestyles='dashed', linewidths = 1.)
ax.tick_params(which='both', direction='in',bottom=True, top=True, left=True, right=True, pad=9)
# plot resolutions
if plot_res:
# x axis
#print (res_x, res_y)
res_x_plt, res_y_plt = ax.transLimits.transform((res_x*0.5, res_y*0.5)) - ax.transLimits.transform((0, 0)) # data --> Axes coordinate
ax.errorbar(0.1, 0.1, xerr=res_x_plt, yerr=res_y_plt, color=ccolor, capsize=3, capthick=1., elinewidth=1., transform=ax.transAxes)
# aspect ratio
if ratio:
change_aspect_ratio(ax, ratio)
# save figure
plt.savefig(outname, transparent=True)
return ax
def generate_grid(nrow, ncol, figsize=(11.69, 8.27),
cbar_mode=None, axes_pad=(0.2, 0.2), share_all=True,
cbaroptions=['right', '3%', '0'], label_mode='1'):
'''
Generate grid to contain multiple figures. Just using ImageGrid of matplotlib but adjust default parameters for convenience.
For more detail of the function, check https://matplotlib.org/stable/api/_as_gen/mpl_toolkits.axes_grid1.axes_grid.ImageGrid.html.
Parameters
----------
nrow(int): Number of rows
ncol(int): Number of columns
axes_pad (tuple or float): Padding between axes. In cases that tuple is given, it will be treated as (vertical, horizontal) padding.
share_all (bool): Whether all axes share their x- and y-axis.
cbarmode: If each, colorbar will be shown in each axis. If single, one common colorbar will be prepared. Default None.
'''
fig = plt.figure(figsize=figsize)
# Generate grid
cbar_loc, cbar_wd, cbar_pad = cbaroptions
grid = ImageGrid(fig, rect=111, nrows_ncols=(nrow,ncol),
axes_pad=axes_pad, share_all=share_all, cbar_mode=cbar_mode,
cbar_location=cbar_loc,cbar_size=cbar_wd, cbar_pad=cbar_pad,
label_mode=label_mode)
return grid | [
"mpl_toolkits.axes_grid1.ImageGrid",
"numpy.abs",
"numpy.empty",
"matplotlib.pyplot.figure",
"matplotlib.colors.LogNorm",
"numpy.rot90",
"numpy.sin",
"matplotlib.pyplot.hlines",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.colorbar",
"numpy.cos",
"matplotlib.patches.Ellipse",
"matplotli... | [((282, 324), 'numpy.array', 'np.array', (["['right', '4%', '0%', 'Jy/beam']"], {}), "(['right', '4%', '0%', 'Jy/beam'])\n", (290, 324), True, 'import numpy as np\n'), ((4519, 4558), 'numpy.array', 'np.array', (["['eps', 'pdf', 'png', 'jpeg']"], {}), "(['eps', 'pdf', 'png', 'jpeg'])\n", (4527, 4558), True, 'import numpy as np\n'), ((10867, 10905), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outname'], {'transparent': '(True)'}), '(outname, transparent=True)\n', (10878, 10905), True, 'import matplotlib.pyplot as plt\n'), ((11110, 11153), 'numpy.array', 'np.array', (['[0.15, 0.3, 0.45, 0.6, 0.75, 0.9]'], {}), '([0.15, 0.3, 0.45, 0.6, 0.75, 0.9])\n', (11118, 11153), True, 'import numpy as np\n'), ((11300, 11311), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (11308, 11311), True, 'import numpy as np\n'), ((11560, 11591), 'numpy.array', 'np.array', (["['right', '5%', '0%']"], {}), "(['right', '5%', '0%'])\n", (11568, 11591), True, 'import numpy as np\n'), ((14701, 14740), 'numpy.array', 'np.array', (["['eps', 'pdf', 'png', 'jpeg']"], {}), "(['eps', 'pdf', 'png', 'jpeg'])\n", (14709, 14740), True, 'import numpy as np\n'), ((20718, 20854), 'matplotlib.patches.Ellipse', 'patches.Ellipse', ([], {'xy': '(0.1, 0.1)', 'width': 'bmin_plot', 'height': 'bmaj_plot', 'fc': 'bcolor', 'angle': 'bpa', 'transform': 'grid[(nrow - 1) * ncol].transAxes'}), '(xy=(0.1, 0.1), width=bmin_plot, height=bmaj_plot, fc=bcolor,\n angle=bpa, transform=grid[(nrow - 1) * ncol].transAxes)\n', (20733, 20854), True, 'import matplotlib.patches as patches\n'), ((22563, 22601), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {'transparent': '(True)'}), '(outfile, transparent=True)\n', (22574, 22601), True, 'import matplotlib.pyplot as plt\n'), ((23127, 23166), 'numpy.array', 'np.array', (["['eps', 'pdf', 'png', 'jpeg']"], {}), "(['eps', 'pdf', 'png', 'jpeg'])\n", (23135, 23166), True, 'import numpy as np\n'), ((26270, 26285), 'copy.copy', 'copy.copy', (['data'], {}), '(data)\n', (26279, 26285), False, 'import copy\n'), ((28001, 28039), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outname'], {'transparent': '(True)'}), '(outname, transparent=True)\n', (28012, 28039), True, 'import matplotlib.pyplot as plt\n'), ((28894, 28921), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (28904, 28921), True, 'import matplotlib.pyplot as plt\n'), ((28991, 29195), 'mpl_toolkits.axes_grid1.ImageGrid', 'ImageGrid', (['fig'], {'rect': '(111)', 'nrows_ncols': '(nrow, ncol)', 'axes_pad': 'axes_pad', 'share_all': 'share_all', 'cbar_mode': 'cbar_mode', 'cbar_location': 'cbar_loc', 'cbar_size': 'cbar_wd', 'cbar_pad': 'cbar_pad', 'label_mode': 'label_mode'}), '(fig, rect=111, nrows_ncols=(nrow, ncol), axes_pad=axes_pad,\n share_all=share_all, cbar_mode=cbar_mode, cbar_location=cbar_loc,\n cbar_size=cbar_wd, cbar_pad=cbar_pad, label_mode=label_mode)\n', (29000, 29195), False, 'from mpl_toolkits.axes_grid1 import ImageGrid\n'), ((6962, 6989), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6972, 6989), True, 'import matplotlib.pyplot as plt\n'), ((7072, 7087), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (7081, 7087), True, 'import numpy as np\n'), ((9008, 9123), 'matplotlib.patches.Ellipse', 'patches.Ellipse', ([], {'xy': '(0.1, 0.1)', 'width': 'bmin_plot', 'height': 'bmaj_plot', 'fc': 'bcolor', 'angle': 'bpa', 'transform': 'ax.transAxes'}), '(xy=(0.1, 0.1), width=bmin_plot, height=bmaj_plot, fc=bcolor,\n angle=bpa, transform=ax.transAxes)\n', (9023, 9123), True, 'import matplotlib.patches as patches\n'), ((17040, 17055), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (17049, 17055), True, 'import numpy as np\n'), ((17082, 17097), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (17091, 17097), True, 'import numpy as np\n'), ((17119, 17159), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (17137, 17159), True, 'import matplotlib as mpl\n'), ((17176, 17218), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (17196, 17218), True, 'import matplotlib as mpl\n'), ((17342, 17369), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (17352, 17369), True, 'import matplotlib.pyplot as plt\n'), ((17441, 17627), 'mpl_toolkits.axes_grid1.ImageGrid', 'ImageGrid', (['fig'], {'rect': '(111)', 'nrows_ncols': '(nrow, ncol)', 'axes_pad': '(0)', 'share_all': '(True)', 'cbar_mode': 'cbar_mode', 'cbar_location': 'cbar_loc', 'cbar_size': 'cbar_wd', 'cbar_pad': 'cbar_pad', 'label_mode': '"""1"""'}), "(fig, rect=111, nrows_ncols=(nrow, ncol), axes_pad=0, share_all=\n True, cbar_mode=cbar_mode, cbar_location=cbar_loc, cbar_size=cbar_wd,\n cbar_pad=cbar_pad, label_mode='1')\n", (17450, 17627), False, 'from mpl_toolkits.axes_grid1 import ImageGrid\n'), ((21765, 21812), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imcolor'], {'ticks': 'cbarticks', 'cax': 'cax'}), '(imcolor, ticks=cbarticks, cax=cax)\n', (21777, 21812), True, 'import matplotlib.pyplot as plt\n'), ((23908, 23922), 'numpy.abs', 'np.abs', (['aspect'], {}), '(aspect)\n', (23914, 23922), True, 'import numpy as np\n'), ((24403, 24436), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11.69, 8.27)'}), '(figsize=(11.69, 8.27))\n', (24413, 24436), True, 'import matplotlib.pyplot as plt\n'), ((25812, 25835), 'numpy.rot90', 'np.rot90', (['data[0, :, :]'], {}), '(data[0, :, :])\n', (25820, 25835), True, 'import numpy as np\n'), ((26076, 26091), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (26085, 26091), True, 'import numpy as np\n'), ((26129, 26169), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (26147, 26169), True, 'import matplotlib as mpl\n'), ((26185, 26227), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (26205, 26227), True, 'import matplotlib as mpl\n'), ((27246, 27356), 'matplotlib.pyplot.hlines', 'plt.hlines', (['hline_params[0]', 'hline_params[1]', 'hline_params[2]', 'ccolor'], {'linestyles': '"""dashed"""', 'linewidths': '(1.0)'}), "(hline_params[0], hline_params[1], hline_params[2], ccolor,\n linestyles='dashed', linewidths=1.0)\n", (27256, 27356), True, 'import matplotlib.pyplot as plt\n'), ((27376, 27486), 'matplotlib.pyplot.vlines', 'plt.vlines', (['vline_params[0]', 'vline_params[1]', 'vline_params[2]', 'ccolor'], {'linestyles': '"""dashed"""', 'linewidths': '(1.0)'}), "(vline_params[0], vline_params[1], vline_params[2], ccolor,\n linestyles='dashed', linewidths=1.0)\n", (27386, 27486), True, 'import matplotlib.pyplot as plt\n'), ((7169, 7209), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (7187, 7209), True, 'import matplotlib as mpl\n'), ((7455, 7497), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (7475, 7497), True, 'import matplotlib as mpl\n'), ((7845, 7868), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (7864, 7868), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((7953, 7983), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imcolor'], {'cax': 'cax'}), '(imcolor, cax=cax)\n', (7965, 7983), True, 'import matplotlib.pyplot as plt\n'), ((25089, 25125), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (term_sin + term_cos))'], {}), '(1.0 / (term_sin + term_cos))\n', (25096, 25125), True, 'import numpy as np\n'), ((26309, 26330), 'numpy.where', 'np.where', (['(data < clip)'], {}), '(data < clip)\n', (26317, 26330), True, 'import numpy as np\n'), ((7293, 7346), 'matplotlib.colors.FuncNorm', 'mpl.colors.FuncNorm', (['color_norm'], {'vmin': 'vmin', 'vmax': 'vmax'}), '(color_norm, vmin=vmin, vmax=vmax)\n', (7312, 7346), True, 'import matplotlib as mpl\n'), ((9216, 9241), 'numpy.abs', 'np.abs', (['(figxmax - figxmin)'], {}), '(figxmax - figxmin)\n', (9222, 9241), True, 'import numpy as np\n'), ((9303, 9319), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9311, 9319), True, 'import numpy as np\n'), ((9475, 9491), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9483, 9491), True, 'import numpy as np\n'), ((9678, 9715), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra_st', 'dec_st'], {'frame': '"""icrs"""'}), "(ra_st, dec_st, frame='icrs')\n", (9686, 9715), False, 'from astropy.coordinates import SkyCoord\n'), ((19699, 19724), 'numpy.abs', 'np.abs', (['(figxmax - figxmin)'], {}), '(figxmax - figxmin)\n', (19705, 19724), True, 'import numpy as np\n'), ((19766, 19782), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (19774, 19782), True, 'import numpy as np\n'), ((25010, 25024), 'numpy.sin', 'np.sin', (['del_pa'], {}), '(del_pa)\n', (25016, 25024), True, 'import numpy as np\n'), ((25050, 25064), 'numpy.cos', 'np.cos', (['del_pa'], {}), '(del_pa)\n', (25056, 25064), True, 'import numpy as np\n'), ((9894, 9963), 'numpy.array', 'np.array', (['[(ra_stdeg - cc[0]) * 3600.0, (dec_stdeg - cc[1]) * 3600.0]'], {}), '([(ra_stdeg - cc[0]) * 3600.0, (dec_stdeg - cc[1]) * 3600.0])\n', (9902, 9963), True, 'import numpy as np\n'), ((9983, 10014), 'numpy.array', 'np.array', (['[ra_stdeg, dec_stdeg]'], {}), '([ra_stdeg, dec_stdeg])\n', (9991, 10014), True, 'import numpy as np\n'), ((19918, 19934), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (19926, 19934), True, 'import numpy as np\n')] |
"""
Lun. 12 Sep 2018
Author: <NAME>
"""
#IMPORTING LIBS
import numpy as np
import scipy.stats as si
import matplotlib.pyplot as plt
#Helper functions:
def d_m(x, v):
d_ = np.log(x) / np.sqrt(v) - .5 * np.sqrt(v)
return d_
#Q1-a
def C0(r, T, S0, K, sigma):
X0 = S0 / (K * np.exp(-r*T))
C = si.norm.cdf(- d_m(X0, sigma**2 * T))
return np.exp(-r*T) * C
def Delta0(r, T, S0, K, sigma):
X0 = S0 / (K * np.exp(-r*T))
D = si.norm.pdf(- d_m(X0, sigma**2 * T))
return - np.exp(-r*T) * D / (S0 * np.sqrt(sigma**2 * T))
#Q1-b
def Brownian(n, i, T):
#We fist define the time step Dt
dt = T/n
#We then create an array of i drawings following N(0, Dt)
z = np.random.normal(0, 1, i) * np.sqrt(dt)
#We specify W_0 = 0
z[0] = 0
#We then compute the cumulative sums to obtain W
W = np.cumsum(z)
return list(W)
def S_(r, sigma, S0, T, n, W):
S__ = []
for i in range(n):
S__ += [S0 * np.exp((r - sigma**2 / 2) * i * T/n + sigma * W[i])]
return S__
def MC_C(r, T, S0, K, sigma, M, n):
Ind_ = []
for k in range(M):
W = Brownian(n, n, T)
S_T = S_(r, sigma, S0, T, n, W)[-1]
if S_T <= K:
Ind_ += [1]
else:
Ind_ += [0]
Ind_ = np.array(Ind_)
Esp = np.cumsum(Ind_)[-1] / M
return np.exp(-r * T) * Esp
def Diff_Delta0(r, T, S0, K, sigma, M, n, epsilon):
return (MC_C(r, T, S0 + epsilon, K, sigma, M, n) - MC_C(r, T, S0 - epsilon, K, sigma, M, n))/ (2 * epsilon)
def MC_Delta(r, T, S0, K, sigma, M, n):
Ind_ = []
for k in range(M):
W = Brownian(n, n, T)
S_T = S_(r, sigma, S0, T, n, W)[-1]
if S_T <= K:
Ind_ += [W[-1] / (S0 * sigma * T)]
else:
Ind_ += [0]
Ind_ = np.array(Ind_)
Esp = np.cumsum(Ind_)[-1] / M
return np.exp(-r * T) * Esp
| [
"numpy.log",
"numpy.cumsum",
"numpy.array",
"numpy.exp",
"numpy.random.normal",
"numpy.sqrt"
] | [((847, 859), 'numpy.cumsum', 'np.cumsum', (['z'], {}), '(z)\n', (856, 859), True, 'import numpy as np\n'), ((1287, 1301), 'numpy.array', 'np.array', (['Ind_'], {}), '(Ind_)\n', (1295, 1301), True, 'import numpy as np\n'), ((1809, 1823), 'numpy.array', 'np.array', (['Ind_'], {}), '(Ind_)\n', (1817, 1823), True, 'import numpy as np\n'), ((366, 380), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (372, 380), True, 'import numpy as np\n'), ((707, 732), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'i'], {}), '(0, 1, i)\n', (723, 732), True, 'import numpy as np\n'), ((735, 746), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (742, 746), True, 'import numpy as np\n'), ((1348, 1362), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (1354, 1362), True, 'import numpy as np\n'), ((1870, 1884), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (1876, 1884), True, 'import numpy as np\n'), ((183, 192), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (189, 192), True, 'import numpy as np\n'), ((195, 205), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (202, 205), True, 'import numpy as np\n'), ((213, 223), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (220, 223), True, 'import numpy as np\n'), ((294, 308), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (300, 308), True, 'import numpy as np\n'), ((436, 450), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (442, 450), True, 'import numpy as np\n'), ((535, 558), 'numpy.sqrt', 'np.sqrt', (['(sigma ** 2 * T)'], {}), '(sigma ** 2 * T)\n', (542, 558), True, 'import numpy as np\n'), ((1312, 1327), 'numpy.cumsum', 'np.cumsum', (['Ind_'], {}), '(Ind_)\n', (1321, 1327), True, 'import numpy as np\n'), ((1834, 1849), 'numpy.cumsum', 'np.cumsum', (['Ind_'], {}), '(Ind_)\n', (1843, 1849), True, 'import numpy as np\n'), ((510, 524), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (516, 524), True, 'import numpy as np\n'), ((966, 1021), 'numpy.exp', 'np.exp', (['((r - sigma ** 2 / 2) * i * T / n + sigma * W[i])'], {}), '((r - sigma ** 2 / 2) * i * T / n + sigma * W[i])\n', (972, 1021), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
import matplotlib
import matplotlib.pyplot as plt
def find_error(X, y, w):
"""
Returns || Xw - y ||_2^2 (squared error)
"""
return np.linalg.norm(X@w - y, ord=2)**2
def get_lambda_lims(X, y, eps):
"""
Returns the limits of the regularization path:
lambda_max = (1/N) max(X.T @ y)
lambda_min = eps * lambda_max
"""
n = X.shape[0]
lambda_max = (1/n)*np.linalg.norm(X.T @ y, np.inf)
lambda_min = eps*lambda_max
return lambda_min, lambda_max
def scale_X_y(X,y):
"""
Scales columns of matrix X to have zero mean and unit variance
Scales column vector y to have zero mean
"""
#scaledX = scale(X)
#scaledy = scale(y, with_std=False)
scalerX = StandardScaler()
scalery = StandardScaler(with_std=False)
scaledX = scalerX.fit_transform(X)
scaledy = scalery.fit_transform(y)
return scaledX, scaledy
def pde_string(w, rhs_description, ut = 'u_t', print_imag=False):
"""
Prints a pde based on:
w: weights vector
rhs_description: a list of strings corresponding to the entries in w
ut: string descriptor of the time derivative
print_imag: whether to print the imaginary part of the weights
Returns:
pde: string with the pde
"""
pde = ut + ' = '
first = True
for i in range(len(w)):
if w[i] != 0:
if not first:
pde = pde + ' + '
if print_imag==False:
pde = pde + "(%.5f)" % (w[i].real) + rhs_description[i] + "\n "
else:
pde = pde + "(%.5f %0.5fi)" % (w[i].real, w[i].imag) + rhs_description[i] + "\n "
first = False
return pde
def is_pareto_efficient(costs):
"""
Find the pareto-efficient points
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
is_efficient = np.ones(costs.shape[0], dtype = bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(costs[is_efficient]<c, axis=1) # Keep any point with a lower cost
is_efficient[i] = True # And keep self
return is_efficient
def print_pde(w, rhs_description, ut = 'u_t', print_imag=False):
"""Prints the pde string"""
print(pde_string(w, rhs_description, ut, print_imag=False))
def find_pareto_front(obj1, obj2, order_axis=1, plot_fig=True, \
xlabel='Log(loss)', ylabel='Complexity', file_name='pareto_front.pdf'):
"""
Plots the Pareto front between values the lists of obj1 and obj2
INPUT:
obj1, obj2: Objectives over which to find the Pareto front
order_axis: which axis to to use for order the indices
plot_fig: True or False to make a figure
pareto_file: location for saving the figure
xlabel, ylabel: labels for the axes
Returns:
inds of the pareto front sorted according to the order_axis
"""
#find the pareto front
obj1_col = np.expand_dims(np.array(obj1), axis=1)
obj2_col = np.expand_dims(np.array(obj2), axis=1)
costs = np.hstack([obj1_col, obj2_col])
inds = is_pareto_efficient(costs)
pareto_obj1 = obj1_col[inds].flatten()
pareto_obj2 = obj2_col[inds].flatten()
pareto_inds = np.arange(0, costs.shape[0], dtype=int)[inds]
if plot_fig:
plt.figure(figsize=(8,3), dpi=300)
plt.subplot(121)
plt.scatter(obj1, obj2, 10, color='k')
plt.title('All the solutions', fontsize=10)
plt.xlabel(xlabel); plt.ylabel(ylabel)
plt.subplot(122)
plt.scatter(pareto_obj1, pareto_obj2, 10, color='k')
plt.title('Pareto Front')
plt.xlabel(xlabel); plt.ylabel(ylabel)
plt.tight_layout()
plt.savefig(file_name)
#Order the PDEs as per the error and print
if order_axis==1:
inds = np.argsort(pareto_obj1)
else:
inds = np.argsort(pareto_obj2)
sorted_pareto_inds = pareto_inds[inds]
return sorted_pareto_inds
def find_relaxed_intersection(*sets, q=0):
"""
This function finds the q-relaxed intersection set of the sets supplied in
*sets as a list.
"""
n = len(sets)
#form union
union = set.union(*sets)
q_relaxed_set = []
score = []
for i, elem in enumerate(union):
count = 0
for s in sets:
if elem not in s: count += 1
if count <= q:
q_relaxed_set.append(elem)
score.append(1-count/n)
return q_relaxed_set, score
def find_IC(sq_error, complexity, n, use='aic'):
"""
Find AIC or BIC using lists for error and complexity
n: number of data points
"""
IC = []
for (RSS, k) in zip(sq_error, complexity):
if use=='aic':
#IC = n*np.log(RSS/n) + 2*k + 2*(k)*(k+1)/(n-k-1)
ic = n*np.log(RSS/n) + 2*k
else:
ic = n*np.log(RSS/n) + 2*k*np.log(n)
IC.append(ic)
return IC
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"sklearn.preprocessing.StandardScaler",
"numpy.log",
"matplotlib.pyplot.scatter",
"numpy.ones",
"numpy.hstack",
"numpy.argsort",
"numpy.any",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linalg.norm",
"numpy.arange",
"matplotlib.... | [((830, 846), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (844, 846), False, 'from sklearn.preprocessing import StandardScaler\n'), ((861, 891), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_std': '(False)'}), '(with_std=False)\n', (875, 891), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2029, 2064), 'numpy.ones', 'np.ones', (['costs.shape[0]'], {'dtype': 'bool'}), '(costs.shape[0], dtype=bool)\n', (2036, 2064), True, 'import numpy as np\n'), ((3205, 3236), 'numpy.hstack', 'np.hstack', (['[obj1_col, obj2_col]'], {}), '([obj1_col, obj2_col])\n', (3214, 3236), True, 'import numpy as np\n'), ((256, 288), 'numpy.linalg.norm', 'np.linalg.norm', (['(X @ w - y)'], {'ord': '(2)'}), '(X @ w - y, ord=2)\n', (270, 288), True, 'import numpy as np\n'), ((502, 533), 'numpy.linalg.norm', 'np.linalg.norm', (['(X.T @ y)', 'np.inf'], {}), '(X.T @ y, np.inf)\n', (516, 533), True, 'import numpy as np\n'), ((3114, 3128), 'numpy.array', 'np.array', (['obj1'], {}), '(obj1)\n', (3122, 3128), True, 'import numpy as np\n'), ((3168, 3182), 'numpy.array', 'np.array', (['obj2'], {}), '(obj2)\n', (3176, 3182), True, 'import numpy as np\n'), ((3380, 3419), 'numpy.arange', 'np.arange', (['(0)', 'costs.shape[0]'], {'dtype': 'int'}), '(0, costs.shape[0], dtype=int)\n', (3389, 3419), True, 'import numpy as np\n'), ((3452, 3487), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)', 'dpi': '(300)'}), '(figsize=(8, 3), dpi=300)\n', (3462, 3487), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3511), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (3506, 3511), True, 'import matplotlib.pyplot as plt\n'), ((3520, 3558), 'matplotlib.pyplot.scatter', 'plt.scatter', (['obj1', 'obj2', '(10)'], {'color': '"""k"""'}), "(obj1, obj2, 10, color='k')\n", (3531, 3558), True, 'import matplotlib.pyplot as plt\n'), ((3567, 3610), 'matplotlib.pyplot.title', 'plt.title', (['"""All the solutions"""'], {'fontsize': '(10)'}), "('All the solutions', fontsize=10)\n", (3576, 3610), True, 'import matplotlib.pyplot as plt\n'), ((3619, 3637), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (3629, 3637), True, 'import matplotlib.pyplot as plt\n'), ((3639, 3657), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (3649, 3657), True, 'import matplotlib.pyplot as plt\n'), ((3667, 3683), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (3678, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3692, 3744), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pareto_obj1', 'pareto_obj2', '(10)'], {'color': '"""k"""'}), "(pareto_obj1, pareto_obj2, 10, color='k')\n", (3703, 3744), True, 'import matplotlib.pyplot as plt\n'), ((3753, 3778), 'matplotlib.pyplot.title', 'plt.title', (['"""Pareto Front"""'], {}), "('Pareto Front')\n", (3762, 3778), True, 'import matplotlib.pyplot as plt\n'), ((3787, 3805), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (3797, 3805), True, 'import matplotlib.pyplot as plt\n'), ((3807, 3825), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (3817, 3825), True, 'import matplotlib.pyplot as plt\n'), ((3835, 3853), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3851, 3853), True, 'import matplotlib.pyplot as plt\n'), ((3862, 3884), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (3873, 3884), True, 'import matplotlib.pyplot as plt\n'), ((3970, 3993), 'numpy.argsort', 'np.argsort', (['pareto_obj1'], {}), '(pareto_obj1)\n', (3980, 3993), True, 'import numpy as np\n'), ((4019, 4042), 'numpy.argsort', 'np.argsort', (['pareto_obj2'], {}), '(pareto_obj2)\n', (4029, 4042), True, 'import numpy as np\n'), ((2170, 2209), 'numpy.any', 'np.any', (['(costs[is_efficient] < c)'], {'axis': '(1)'}), '(costs[is_efficient] < c, axis=1)\n', (2176, 2209), True, 'import numpy as np\n'), ((4945, 4960), 'numpy.log', 'np.log', (['(RSS / n)'], {}), '(RSS / n)\n', (4951, 4960), True, 'import numpy as np\n'), ((4998, 5013), 'numpy.log', 'np.log', (['(RSS / n)'], {}), '(RSS / n)\n', (5004, 5013), True, 'import numpy as np\n'), ((5018, 5027), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (5024, 5027), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from PCAfold import preprocess
from PCAfold import reduction
from PCAfold import analysis
class Preprocess(unittest.TestCase):
def test_preprocess__get_centroids__allowed_calls(self):
pass
# ------------------------------------------------------------------------------
def test_preprocess__get_centroidss__not_allowed_calls(self):
X = np.random.rand(100,10)
idx = np.zeros((90,))
with self.assertRaises(ValueError):
centroids = preprocess.get_centroids(X, idx)
X = np.random.rand(100,10)
idx = np.zeros((110,))
with self.assertRaises(ValueError):
centroids = preprocess.get_centroids(X, idx)
# ------------------------------------------------------------------------------
def test_preprocess__get_centroids__allowed_calls(self):
try:
x = np.array([[1,2,10],[1,2,10],[1,2,10]])
idx = np.array([0,0,0])
idx_centroids = np.array([[1, 2, 10]])
centroids = preprocess.get_centroids(x, idx)
comparison = (idx_centroids == centroids)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
try:
x = np.array([[1,2,10],[1,2,10],[20,30,40]])
idx = np.array([0,0,1])
idx_centroids = np.array([[1, 2, 10], [20,30,40]])
centroids = preprocess.get_centroids(x, idx)
comparison = (idx_centroids == centroids)
self.assertTrue(comparison.all())
except Exception:
self.assertTrue(False)
# ------------------------------------------------------------------------------
| [
"numpy.random.rand",
"PCAfold.preprocess.get_centroids",
"numpy.zeros",
"numpy.array"
] | [((401, 424), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)'], {}), '(100, 10)\n', (415, 424), True, 'import numpy as np\n'), ((438, 453), 'numpy.zeros', 'np.zeros', (['(90,)'], {}), '((90,))\n', (446, 453), True, 'import numpy as np\n'), ((569, 592), 'numpy.random.rand', 'np.random.rand', (['(100)', '(10)'], {}), '(100, 10)\n', (583, 592), True, 'import numpy as np\n'), ((606, 622), 'numpy.zeros', 'np.zeros', (['(110,)'], {}), '((110,))\n', (614, 622), True, 'import numpy as np\n'), ((523, 555), 'PCAfold.preprocess.get_centroids', 'preprocess.get_centroids', (['X', 'idx'], {}), '(X, idx)\n', (547, 555), False, 'from PCAfold import preprocess\n'), ((692, 724), 'PCAfold.preprocess.get_centroids', 'preprocess.get_centroids', (['X', 'idx'], {}), '(X, idx)\n', (716, 724), False, 'from PCAfold import preprocess\n'), ((899, 945), 'numpy.array', 'np.array', (['[[1, 2, 10], [1, 2, 10], [1, 2, 10]]'], {}), '([[1, 2, 10], [1, 2, 10], [1, 2, 10]])\n', (907, 945), True, 'import numpy as np\n'), ((956, 975), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (964, 975), True, 'import numpy as np\n'), ((1002, 1024), 'numpy.array', 'np.array', (['[[1, 2, 10]]'], {}), '([[1, 2, 10]])\n', (1010, 1024), True, 'import numpy as np\n'), ((1049, 1081), 'PCAfold.preprocess.get_centroids', 'preprocess.get_centroids', (['x', 'idx'], {}), '(x, idx)\n', (1073, 1081), False, 'from PCAfold import preprocess\n'), ((1273, 1321), 'numpy.array', 'np.array', (['[[1, 2, 10], [1, 2, 10], [20, 30, 40]]'], {}), '([[1, 2, 10], [1, 2, 10], [20, 30, 40]])\n', (1281, 1321), True, 'import numpy as np\n'), ((1332, 1351), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1340, 1351), True, 'import numpy as np\n'), ((1378, 1414), 'numpy.array', 'np.array', (['[[1, 2, 10], [20, 30, 40]]'], {}), '([[1, 2, 10], [20, 30, 40]])\n', (1386, 1414), True, 'import numpy as np\n'), ((1437, 1469), 'PCAfold.preprocess.get_centroids', 'preprocess.get_centroids', (['x', 'idx'], {}), '(x, idx)\n', (1461, 1469), False, 'from PCAfold import preprocess\n')] |
# coding=utf-8
import math
import numpy as np
TIMES = 1000
choose = 0
if choose:
dTypeEdge = np.dtype([('last_mid', np.str_, 16), ('mid', np.str_, 16)])
nDEdges = np.loadtxt('Weibo/res/edges.csv', dtype=dTypeEdge, delimiter=',')
dTypeNode = np.dtype([('mid', np.str_, 16)])
nDNodes = np.loadtxt('Weibo/res/nodes.csv', dtype=dTypeNode, unpack=True)
else:
dTypeEdge = np.dtype([('source', np.str_, 16), ('target', np.str_, 16), ('weight', np.int8)])
nDEdges = np.loadtxt('Twitter/res/higgs-retweet_network.edgelist', dtype=dTypeEdge, delimiter=' ')
dTypeNode = np.dtype([('id', np.str_, 8)])
nDNodes = np.loadtxt('Twitter/res/nodes.csv', dtype=dTypeNode, unpack=True)
nodesLength = len(nDNodes)
edgesLength = len(nDEdges)
X = np.random.rand(nodesLength, 1) * 4 - 2
Y = np.random.rand(nodesLength, 1) * 4 - 2
Z = np.random.rand(nodesLength, 1) * 4 - 2
ZEROS = np.zeros((nodesLength, 1), np.float16)
WEIGHT = np.ones((len(nDEdges), 1), np.int8)
if choose:
DNodes = np.array(nDNodes['mid'][:, np.newaxis], dtype=np.dtype(
[('mid', np.str_, 16), ('x', np.float16), ('y', np.float16), ('z', np.float16), ('dx', np.float16),
('dy', np.float16), ('dz', np.float16)]))
else:
DNodes = np.array(nDNodes['id'][:, np.newaxis], dtype=np.dtype(
[('id', np.str_, 16), ('x', np.float16), ('y', np.float16), ('z', np.float16), ('dx', np.float16),
('dy', np.float16), ('dz', np.float16)]))
DNodes['x'] = X
DNodes['y'] = Y
DNodes['z'] = Z
DNodes['dx'] = ZEROS
DNodes['dy'] = ZEROS
DNodes['dz'] = ZEROS
if choose:
DEdges = np.array(nDEdges['last_mid'][:, np.newaxis],
dtype=np.dtype([('last_mid', np.str_, 16), ('mid', np.str_, 16), ('weight', np.int8)]))
DEdges['mid'] = nDEdges['mid'][:, np.newaxis]
DEdges['weight'] = WEIGHT
else:
DEdges = np.array(nDEdges['source'][:, np.newaxis],
dtype=np.dtype([('source', np.str_, 16), ('target', np.str_, 16), ('weight', np.int8)]))
DEdges['target'] = nDEdges['target'][:, np.newaxis]
DEdges['weight'] = nDEdges['weight'][:, np.newaxis]
Nodes = DNodes.tolist()
Edges = DEdges.tolist()
# THESE PARAMETERS MUST BE FLOAT (ref: Gephi/Gephi wiki)
SPEED_DIVISOR = 800.0
AREA_MULTIPLICATOR = 10000.0
speed = 1.0
area = 10000.0
gravity = 10.0
maxD = math.sqrt(AREA_MULTIPLICATOR * area) / 10
k = math.sqrt(AREA_MULTIPLICATOR * area) / (1 + nodesLength)
for i in range(1, TIMES + 1):
# Repulsive Force
for N1 in Nodes:
for N2 in Nodes:
if N1 != N2:
deltaX = N1[0][1] - N2[0][1]
deltaY = N1[0][2] - N2[0][2]
deltaZ = N1[0][3] - N2[0][3]
dist = float(math.sqrt(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2))
if dist > 0:
n = list(N1[0])
repulsiveF = k ** 2 / dist
n[4] += deltaX / dist * repulsiveF
n[5] += deltaY / dist * repulsiveF
n[6] += deltaZ / dist * repulsiveF
N1[0] = tuple(n)
# Attractive Force
for edge in Edges:
# binary search is faster
for node in Nodes:
if node[0][0] == edge[0][0]:
ns = node
break
for node in Nodes:
if node[0][0] == edge[0][1]:
nt = node
break
deltaX = ns[0][1] - nt[0][1]
deltaY = ns[0][2] - nt[0][2]
deltaZ = ns[0][3] - nt[0][3]
dist = float(math.sqrt(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2))
attractiveF = dist * dist / k
if dist > 0:
s = list(ns[0])
t = list(nt[0])
s[4] -= deltaX / dist * attractiveF
s[5] -= deltaY / dist * attractiveF
s[6] -= deltaZ / dist * attractiveF
s[4] += deltaX / dist * attractiveF
s[5] += deltaY / dist * attractiveF
s[6] += deltaZ / dist * attractiveF
ns[0] = tuple(s)
nt[0] = tuple(t)
# Gravity Force
for node in Nodes:
d = float(math.sqrt(node[0][1] ** 2 + node[0][2] ** 2 + node[0][3] ** 2))
gravityF = 0.01 * k * gravity * d
x = node[0][1]
y = node[0][2]
z = node[0][3]
if d != 0:
n = list(node[0])
n[4] -= gravityF * x / d
n[5] -= gravityF * y / d
n[6] -= gravityF * z / d
node[0] = tuple(n)
for node in Nodes:
n = list(node[0])
n[4] *= speed / SPEED_DIVISOR
n[5] *= speed / SPEED_DIVISOR
n[6] *= speed / SPEED_DIVISOR
node[0] = tuple(n)
for node in Nodes:
deltaX = node[0][4]
deltaY = node[0][5]
deltaZ = node[0][6]
dist = float(math.sqrt(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2))
if dist > 0:
n = list(node[0])
lDist = min(maxD * (float(speed / SPEED_DIVISOR)), dist)
n[1] += deltaX / dist * lDist
n[2] += deltaY / dist * lDist
n[3] += deltaZ / dist * lDist
node[0] = tuple(n)
for node in Nodes:
n = list(node[0])
n[4] = 0.0
n[5] = 0.0
n[6] = 0.0
node[0] = tuple(n)
print(Nodes[0][0][1:4])
nDNodes = np.array(Nodes).reshape(nodesLength, 7)
nDEdges = np.array(Edges).reshape(edgesLength, 3)
if choose:
np.savetxt('Weibo/Layout/nodes.csv', X=nDNodes[:, 0:4], fmt='%s', delimiter=',')
np.savetxt('Weibo/Layout/edges.csv', X=nDEdges[...], fmt='%s', delimiter=',')
else:
np.savetxt('Twitter/Layout/nodes.csv', X=nDNodes[:, 0:4], fmt='%s', delimiter=',')
np.savetxt('Twitter/Layout/edges.csv', X=nDEdges[...], fmt='%s', delimiter=',')
| [
"math.sqrt",
"numpy.savetxt",
"numpy.dtype",
"numpy.zeros",
"numpy.array",
"numpy.loadtxt",
"numpy.random.rand"
] | [((897, 935), 'numpy.zeros', 'np.zeros', (['(nodesLength, 1)', 'np.float16'], {}), '((nodesLength, 1), np.float16)\n', (905, 935), True, 'import numpy as np\n'), ((101, 160), 'numpy.dtype', 'np.dtype', (["[('last_mid', np.str_, 16), ('mid', np.str_, 16)]"], {}), "([('last_mid', np.str_, 16), ('mid', np.str_, 16)])\n", (109, 160), True, 'import numpy as np\n'), ((175, 240), 'numpy.loadtxt', 'np.loadtxt', (['"""Weibo/res/edges.csv"""'], {'dtype': 'dTypeEdge', 'delimiter': '""","""'}), "('Weibo/res/edges.csv', dtype=dTypeEdge, delimiter=',')\n", (185, 240), True, 'import numpy as np\n'), ((258, 290), 'numpy.dtype', 'np.dtype', (["[('mid', np.str_, 16)]"], {}), "([('mid', np.str_, 16)])\n", (266, 290), True, 'import numpy as np\n'), ((305, 368), 'numpy.loadtxt', 'np.loadtxt', (['"""Weibo/res/nodes.csv"""'], {'dtype': 'dTypeNode', 'unpack': '(True)'}), "('Weibo/res/nodes.csv', dtype=dTypeNode, unpack=True)\n", (315, 368), True, 'import numpy as np\n'), ((392, 478), 'numpy.dtype', 'np.dtype', (["[('source', np.str_, 16), ('target', np.str_, 16), ('weight', np.int8)]"], {}), "([('source', np.str_, 16), ('target', np.str_, 16), ('weight', np.\n int8)])\n", (400, 478), True, 'import numpy as np\n'), ((488, 580), 'numpy.loadtxt', 'np.loadtxt', (['"""Twitter/res/higgs-retweet_network.edgelist"""'], {'dtype': 'dTypeEdge', 'delimiter': '""" """'}), "('Twitter/res/higgs-retweet_network.edgelist', dtype=dTypeEdge,\n delimiter=' ')\n", (498, 580), True, 'import numpy as np\n'), ((594, 624), 'numpy.dtype', 'np.dtype', (["[('id', np.str_, 8)]"], {}), "([('id', np.str_, 8)])\n", (602, 624), True, 'import numpy as np\n'), ((639, 704), 'numpy.loadtxt', 'np.loadtxt', (['"""Twitter/res/nodes.csv"""'], {'dtype': 'dTypeNode', 'unpack': '(True)'}), "('Twitter/res/nodes.csv', dtype=dTypeNode, unpack=True)\n", (649, 704), True, 'import numpy as np\n'), ((2318, 2354), 'math.sqrt', 'math.sqrt', (['(AREA_MULTIPLICATOR * area)'], {}), '(AREA_MULTIPLICATOR * area)\n', (2327, 2354), False, 'import math\n'), ((2364, 2400), 'math.sqrt', 'math.sqrt', (['(AREA_MULTIPLICATOR * area)'], {}), '(AREA_MULTIPLICATOR * area)\n', (2373, 2400), False, 'import math\n'), ((5384, 5469), 'numpy.savetxt', 'np.savetxt', (['"""Weibo/Layout/nodes.csv"""'], {'X': 'nDNodes[:, 0:4]', 'fmt': '"""%s"""', 'delimiter': '""","""'}), "('Weibo/Layout/nodes.csv', X=nDNodes[:, 0:4], fmt='%s', delimiter=','\n )\n", (5394, 5469), True, 'import numpy as np\n'), ((5469, 5546), 'numpy.savetxt', 'np.savetxt', (['"""Weibo/Layout/edges.csv"""'], {'X': 'nDEdges[...]', 'fmt': '"""%s"""', 'delimiter': '""","""'}), "('Weibo/Layout/edges.csv', X=nDEdges[...], fmt='%s', delimiter=',')\n", (5479, 5546), True, 'import numpy as np\n'), ((5557, 5643), 'numpy.savetxt', 'np.savetxt', (['"""Twitter/Layout/nodes.csv"""'], {'X': 'nDNodes[:, 0:4]', 'fmt': '"""%s"""', 'delimiter': '""","""'}), "('Twitter/Layout/nodes.csv', X=nDNodes[:, 0:4], fmt='%s',\n delimiter=',')\n", (5567, 5643), True, 'import numpy as np\n'), ((5644, 5723), 'numpy.savetxt', 'np.savetxt', (['"""Twitter/Layout/edges.csv"""'], {'X': 'nDEdges[...]', 'fmt': '"""%s"""', 'delimiter': '""","""'}), "('Twitter/Layout/edges.csv', X=nDEdges[...], fmt='%s', delimiter=',')\n", (5654, 5723), True, 'import numpy as np\n'), ((764, 794), 'numpy.random.rand', 'np.random.rand', (['nodesLength', '(1)'], {}), '(nodesLength, 1)\n', (778, 794), True, 'import numpy as np\n'), ((807, 837), 'numpy.random.rand', 'np.random.rand', (['nodesLength', '(1)'], {}), '(nodesLength, 1)\n', (821, 837), True, 'import numpy as np\n'), ((850, 880), 'numpy.random.rand', 'np.random.rand', (['nodesLength', '(1)'], {}), '(nodesLength, 1)\n', (864, 880), True, 'import numpy as np\n'), ((5278, 5293), 'numpy.array', 'np.array', (['Nodes'], {}), '(Nodes)\n', (5286, 5293), True, 'import numpy as np\n'), ((5328, 5343), 'numpy.array', 'np.array', (['Edges'], {}), '(Edges)\n', (5336, 5343), True, 'import numpy as np\n'), ((1052, 1205), 'numpy.dtype', 'np.dtype', (["[('mid', np.str_, 16), ('x', np.float16), ('y', np.float16), ('z', np.\n float16), ('dx', np.float16), ('dy', np.float16), ('dz', np.float16)]"], {}), "([('mid', np.str_, 16), ('x', np.float16), ('y', np.float16), ('z',\n np.float16), ('dx', np.float16), ('dy', np.float16), ('dz', np.float16)])\n", (1060, 1205), True, 'import numpy as np\n'), ((1285, 1437), 'numpy.dtype', 'np.dtype', (["[('id', np.str_, 16), ('x', np.float16), ('y', np.float16), ('z', np.\n float16), ('dx', np.float16), ('dy', np.float16), ('dz', np.float16)]"], {}), "([('id', np.str_, 16), ('x', np.float16), ('y', np.float16), ('z',\n np.float16), ('dx', np.float16), ('dy', np.float16), ('dz', np.float16)])\n", (1293, 1437), True, 'import numpy as np\n'), ((1663, 1748), 'numpy.dtype', 'np.dtype', (["[('last_mid', np.str_, 16), ('mid', np.str_, 16), ('weight', np.int8)]"], {}), "([('last_mid', np.str_, 16), ('mid', np.str_, 16), ('weight', np.int8)]\n )\n", (1671, 1748), True, 'import numpy as np\n'), ((1916, 2002), 'numpy.dtype', 'np.dtype', (["[('source', np.str_, 16), ('target', np.str_, 16), ('weight', np.int8)]"], {}), "([('source', np.str_, 16), ('target', np.str_, 16), ('weight', np.\n int8)])\n", (1924, 2002), True, 'import numpy as np\n'), ((3519, 3569), 'math.sqrt', 'math.sqrt', (['(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2)'], {}), '(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2)\n', (3528, 3569), False, 'import math\n'), ((4093, 4155), 'math.sqrt', 'math.sqrt', (['(node[0][1] ** 2 + node[0][2] ** 2 + node[0][3] ** 2)'], {}), '(node[0][1] ** 2 + node[0][2] ** 2 + node[0][3] ** 2)\n', (4102, 4155), False, 'import math\n'), ((4777, 4827), 'math.sqrt', 'math.sqrt', (['(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2)'], {}), '(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2)\n', (4786, 4827), False, 'import math\n'), ((2709, 2759), 'math.sqrt', 'math.sqrt', (['(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2)'], {}), '(deltaX ** 2 + deltaY ** 2 + deltaZ ** 2)\n', (2718, 2759), False, 'import math\n')] |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 CEA
# <NAME>
# Licensed under the terms of the CECILL License
# (see guiqwt/__init__.py for details)
"""Rotate/crop test: using the scaler C++ engine to rotate/crop images"""
from __future__ import print_function
SHOW = True # Show test in GUI-based test launcher
import os.path as osp
import numpy as np
from guiqwt.builder import make
from guiqwt.plot import ImageDialog
from guiqwt.widgets.rotatecrop import (RotateCropDialog, RotateCropWidget,
MultipleRotateCropWidget)
from guiqwt import io
def imshow(data, title=None, hold=False):
dlg = ImageDialog(wintitle=title)
dlg.get_plot().add_item(make.image(data))
if hold:
dlg.show()
else:
dlg.exec_()
def create_test_data(fname, func=None):
array0 = io.imread(osp.join(osp.dirname(__file__), fname),
to_grayscale=True)
if func is not None:
array0 = func(array0)
item0 = make.trimage(array0, dx=.1, dy=.1)
return array0, item0
def widget_test(fname, qapp):
"""Test the rotate/crop widget"""
array0, item = create_test_data(fname)
widget = RotateCropWidget(None)
widget.set_item(item)
widget.show()
qapp.exec_()
widget.accept_changes()
def multiple_widget_test(fname, qapp):
"""Test the multiple rotate/crop widget"""
array0, item0 = create_test_data(fname)
array1, item1 = create_test_data(fname, func=lambda arr: np.rot90(arr, 1))
array2, item2 = create_test_data(fname, func=lambda arr: np.rot90(arr, 2))
widget = MultipleRotateCropWidget(None)
widget.set_items(item0, item1, item2)
widget.show()
qapp.exec_()
widget.accept_changes()
def dialog_test(fname, interactive=True):
"""Test the rotate/crop dialog"""
array0, item = create_test_data(fname)
dlg = RotateCropDialog(None)
dlg.set_item(item)
if interactive:
ok = dlg.exec_()
else:
dlg.show()
dlg.accept()
ok = True
if ok:
array1 = dlg.output_array
if array0.shape == array1.shape:
if (array1 == array0).all() and not interactive:
print("Test passed successfully.")
return
imshow(array1-array0, title="array1-array0")
else:
print(array0.shape, '-->', array1.shape)
imshow(array0, title="array0", hold=True)
imshow(array1, title="array1")
if __name__ == '__main__':
from guidata import qapplication
qapp = qapplication() # analysis:ignore
multiple_widget_test("brain.png", qapp)
widget_test("brain.png", qapp)
dialog_test(fname="brain.png", interactive=False)
# dialog_test(fname="contrast.png", interactive=False)
dialog_test(fname="brain.png", interactive=True)
| [
"os.path.dirname",
"guiqwt.builder.make.image",
"guiqwt.widgets.rotatecrop.RotateCropWidget",
"guiqwt.widgets.rotatecrop.RotateCropDialog",
"guiqwt.widgets.rotatecrop.MultipleRotateCropWidget",
"numpy.rot90",
"guiqwt.plot.ImageDialog",
"guiqwt.builder.make.trimage",
"guidata.qapplication"
] | [((639, 666), 'guiqwt.plot.ImageDialog', 'ImageDialog', ([], {'wintitle': 'title'}), '(wintitle=title)\n', (650, 666), False, 'from guiqwt.plot import ImageDialog\n'), ((988, 1024), 'guiqwt.builder.make.trimage', 'make.trimage', (['array0'], {'dx': '(0.1)', 'dy': '(0.1)'}), '(array0, dx=0.1, dy=0.1)\n', (1000, 1024), False, 'from guiqwt.builder import make\n'), ((1177, 1199), 'guiqwt.widgets.rotatecrop.RotateCropWidget', 'RotateCropWidget', (['None'], {}), '(None)\n', (1193, 1199), False, 'from guiqwt.widgets.rotatecrop import RotateCropDialog, RotateCropWidget, MultipleRotateCropWidget\n'), ((1595, 1625), 'guiqwt.widgets.rotatecrop.MultipleRotateCropWidget', 'MultipleRotateCropWidget', (['None'], {}), '(None)\n', (1619, 1625), False, 'from guiqwt.widgets.rotatecrop import RotateCropDialog, RotateCropWidget, MultipleRotateCropWidget\n'), ((1865, 1887), 'guiqwt.widgets.rotatecrop.RotateCropDialog', 'RotateCropDialog', (['None'], {}), '(None)\n', (1881, 1887), False, 'from guiqwt.widgets.rotatecrop import RotateCropDialog, RotateCropWidget, MultipleRotateCropWidget\n'), ((2535, 2549), 'guidata.qapplication', 'qapplication', ([], {}), '()\n', (2547, 2549), False, 'from guidata import qapplication\n'), ((695, 711), 'guiqwt.builder.make.image', 'make.image', (['data'], {}), '(data)\n', (705, 711), False, 'from guiqwt.builder import make\n'), ((848, 869), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (859, 869), True, 'import os.path as osp\n'), ((1485, 1501), 'numpy.rot90', 'np.rot90', (['arr', '(1)'], {}), '(arr, 1)\n', (1493, 1501), True, 'import numpy as np\n'), ((1564, 1580), 'numpy.rot90', 'np.rot90', (['arr', '(2)'], {}), '(arr, 2)\n', (1572, 1580), True, 'import numpy as np\n')] |
from __future__ import division
import os
import sys
import random
import argparse
import time
from shutil import copyfile
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from scipy.signal import medfilt
from scipy import stats
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from torch.utils.data import DataLoader
from torch.autograd import Variable
from sklearn import svm, linear_model, neural_network, preprocessing
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import mean_squared_error, accuracy_score, f1_score, log_loss, make_scorer, confusion_matrix
from sklearn.model_selection import cross_val_score, GridSearchCV
from scipy.stats import itemfreq
#from mlens.metrics import make_scorer
#from mlens.model_selection import Evaluator
from scipy.stats import uniform, randint
from dataset_skeleton_ASD import EngagementDataset, load_dataset
from utils import suppress_stdout, EarlyStopping, plot_confusion_matrix, WeightedMSELoss, scores, save_cnf, save_vec
from net import MyNet, calc_gradients
def arg_parse():
parser = argparse.ArgumentParser(description='Engagement estimation with LSTM')
parser.add_argument("--seg_len", dest = "seg_len", help = "Segment length", default = 5, type = int)
parser.add_argument("--seg_overlap", dest = "seg_overlap", help = "Segment overlap", default = 0, type = int)
parser.add_argument("--seq_len", dest = "seq_len", help = "Number of segments per sequence", default = 30, type = int)
parser.add_argument("--median", dest = "median", help = "Median filter size", default = 5, type = int)
parser.add_argument("--hidden_size", dest = "hidden_size", help = "Hidden size", default = 560, type = int)
parser.add_argument("--initial_lr", dest = "initial_lr", help = "Initial learning rate", default = 0.1, type = float)
parser.add_argument("--decay", dest = "decay", help = "Weight decay", default = 1e-6, type = float)
parser.add_argument("--momentum", dest = "momentum", help = "Training momentum", default = 0.5, type = float)
parser.add_argument("--one_hot", dest = "one_hot", help = "Use cross entropy loss instead of MSE", default = 1, type = int)
parser.add_argument("--patience", dest = "patience", help = "Early stopping patience", default = 10, type = int)
parser.add_argument("--batch", dest = "batch_size", help = "Batch size", default = 16, type = int)
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
seed = 41
np.random.seed(seed)
seg_len = args.seg_len
seg_overlap = args.seg_overlap
train_data_path = os.path.realpath('../TrainData')
test_data_path = os.path.realpath('../TrainDataASD')
sequence_length = args.seq_len #Number of segments per sequence
num_epochs = 500
batch_size = args.batch_size
median_filter_size = args.median
cuda = True
hidden_size = args.hidden_size
initial_lr = args.initial_lr
weight_decay = args.decay
momentum = args.momentum
patience = args.patience
num_classes = 3
num_files = len(os.listdir(test_data_path))
dataset_loader = load_dataset(train_data_path, test_data_path, seg_len, seg_overlap, sequence_length, 0.2, num_classes, cuda)
label_weights = next(dataset_loader)
one_hot_int = args.one_hot
one_hot = one_hot_int == 1
if one_hot:
loss_function = nn.CrossEntropyLoss(weight=torch.tensor(label_weights).float())
else:
loss_function = WeightedMSELoss(label_weights)
#loss_function = nn.MSELoss()
if cuda:
loss_function = loss_function.cuda()
output_dir = '../output_final_ASD/' + '_'.join(['{}' for x in [list(args.__dict__.values())+[time.time()]][0]])
output_dir = output_dir.format(*([list(args.__dict__.values())+[time.time()]][0]))
# os.mkdir(output_dir)
# os.mkdir(os.path.join(output_dir, 'each_fold'))
# copyfile('net.py', os.path.join(output_dir,'net.py'))
# copyfile('leave1out.py', os.path.join(output_dir,'leave1out.py'))
# copyfile('dataset_skeleton.py', os.path.join(output_dir,'dataset_skeleton.py'))
total_test_loss = 0
total_test_results = None
total_test_targets = None
test_results_dict = dict()
cv_index = 0
all_cnfs = []
loop_cnt = 0
for train_dataset, valid_dataset, test_dataset in dataset_loader:
train_generator = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
valid_generator = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)
test_generator = DataLoader(test_dataset, batch_size=1, shuffle=False)
#print len(train_generator), len(valid_generator), len(test_generator)
if loop_cnt == 0:
loop_cnt += 1
model = MyNet(num_classes, 2*(58-12), sequence_length, hidden_size, one_hot, cuda)
if cuda:
model = model.cuda()
early_stopping = EarlyStopping(patience=patience)
early_stopping.save_weights(model)
optimizer = optim.SGD(model.parameters(), lr=0.02, momentum=momentum, weight_decay=weight_decay)
for stage in [0,1]:
print('Stage: {}'.format(stage))
model.change_net(stage)
if stage == 0:
lrs = [initial_lr, initial_lr/10]
else:
lrs = [initial_lr, initial_lr/10]
for lr in lrs:#, initial_lr/100]:
early_stopping.reset()
print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
for epoch in range(num_epochs):
model.train()
for i, data in enumerate(train_generator):
input, targets = Variable(data['data']), Variable(data['target'])
model.zero_grad()
if input.size(0) == 1:
continue
output = model(input)
if one_hot:
loss = loss_function(output.transpose(1,2).view(-1,num_classes), (targets*(num_classes-1)+0.02).long().view(-1))
else:
loss = loss_function(output.squeeze(2)*(num_classes-1), targets*(num_classes-1))/((num_classes-1)**2)
loss.backward()
#calc_gradients(model.parameters())
model.grad_clip(0.1)
#print 'Train loss: ', loss
#print output.size(), targets.size()
optimizer.step()
model.eval()
total_valid_loss = 0
for i, data in enumerate(valid_generator):
input, targets = Variable(data['data']), Variable(data['target'])
output = model(input)
if one_hot:
loss = loss_function(output.transpose(1,2).view(-1,num_classes), (targets*(num_classes-1)+0.02).long().view(-1))
else:
loss = loss_function(output.squeeze(2)*(num_classes-1), targets*(num_classes-1))/((num_classes-1)**2)
total_valid_loss += loss.item()*input.shape[0]/len(valid_dataset)
print('Valid loss: ', total_valid_loss)
if early_stopping.step(total_valid_loss, model):
early_stopping.load_weights(model)
break
print('DONE')
exit()
test_loss = 0
test_results = None
test_targets = None
for i, data in enumerate(test_generator):
input, targets = Variable(data['data']), Variable(data['target'])
#plt.figure()
#print targets.size()
#plt.plot(input[0,0,:].squeeze().cpu().numpy())
#plt.show()
output = model(input)
if one_hot:
max_val, max_ind = output.max(1)
if test_results is None:
test_results = max_ind.data.cpu().numpy().transpose()
test_targets = (num_classes-1)*targets.data.cpu().numpy().transpose()
else:
test_results = np.append(test_results, max_ind.data.cpu().numpy())
test_targets = np.append(test_targets, (num_classes-1)*targets.data.cpu().numpy())
loss = loss_function(output.transpose(1,2).view(-1,num_classes), (targets*(num_classes-1)+0.02).long().view(-1))
else:
if test_results is None:
test_results = (num_classes-1)*output.data.cpu().numpy().transpose()
test_targets = (num_classes-1)*targets.data.cpu().numpy().transpose()
else:
test_results = np.append(test_results, (num_classes-1)*output.data.cpu().numpy())
test_targets = np.append(test_targets, (num_classes-1)*targets.data.cpu().numpy())
#test_results.append(3*output.data.cpu().numpy()[0,0])
#test_targets.append(3*targets.data.cpu().numpy()[0,0])
loss = loss_function(output.squeeze(2)*(num_classes-1), targets*(num_classes-1))
test_loss += loss.item()*input.shape[0]/len(test_dataset)
#print 'Test loss: ', test_loss, mean_squared_error(medfilt(np.round(np.array(test_results)), median_filter_size), np.round(np.array(test_targets))), len(test_dataset)
#test_results_dict[(os.listdir(data_path)[0:3]+os.listdir(data_path)[4:13]+os.listdir(data_path)[14:])[cv_index]] = test_loss
test_results_dict[os.listdir(test_data_path)[cv_index]] = test_loss
total_test_loss += mean_squared_error(np.round(np.array(test_targets)), medfilt(np.round(np.array(test_results)), median_filter_size))/num_files
test_results = medfilt(np.round(np.array(test_results)), median_filter_size)
test_targets = np.round(np.array(test_targets))
cnf = confusion_matrix(test_targets, test_results)
save_cnf(cnf, os.path.join(output_dir, 'each_fold', 'cfn{}.txt'.format(cv_index)))
save_vec(test_targets, os.path.join(output_dir,'each_fold','targets{}.txt'.format(cv_index)))
save_vec(test_results, os.path.join(output_dir,'each_fold','results{}.txt'.format(cv_index)))
if total_test_results is None:
total_test_results = test_results
total_test_targets = test_targets
else:
total_test_results = np.append(total_test_results, test_results, axis=0)
total_test_targets = np.append(total_test_targets, test_targets, axis=0)
test_results = stats.mode(np.reshape(test_results, (-1, int(30/seg_len))), axis=1)[0]
test_targets = stats.mode(np.reshape(test_targets, (-1, int(30/seg_len))), axis=1)[0]
cnf = confusion_matrix(test_targets, test_results)
save_cnf(cnf, os.path.join(output_dir, 'each_fold', 'cfn{}_1sec.txt'.format(cv_index)))
save_vec(test_targets, os.path.join(output_dir,'each_fold','targets{}_1sec.txt'.format(cv_index)))
save_vec(test_results, os.path.join(output_dir,'each_fold','results{}_1sec.txt'.format(cv_index)))
cv_index += 1
#total_test_loss += test_loss/num_files
#print total_test_targets, np.unique(total_test_targets)
#print total_test_results, np.unique(total_test_results)
total_test_results = np.clip(total_test_results, 0, (num_classes-1))
#print total_test_loss, mean_squared_error(total_test_targets, total_test_results), f1_score(total_test_targets.astype(int), total_test_results.astype(int), average=None)
#print itemfreq(total_test_targets).astype(int)
# Compute confusion matrix
cnf_matrix = confusion_matrix(total_test_targets, total_test_results)
cnf_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
save_cnf(cnf_matrix, os.path.join(output_dir,'cnf.txt'))
save_cnf(cnf_norm, os.path.join(output_dir,'cnf_norm.txt'))
print(cnf_matrix)
#print cnf_norm
#print np.mean(np.diag(cnf_matrix)/np.sum(cnf_matrix, axis=0))
#print f1_score(total_test_targets.astype(int), total_test_results.astype(int), average=None), np.mean(f1_score(total_test_targets.astype(int), total_test_results.astype(int), average=None))
#with open(os.path.join(output_dir,'.txt'),'w') as output_file:
save_vec(total_test_targets, os.path.join(output_dir,'targets_full.txt'))
save_vec(total_test_results, os.path.join(output_dir,'results_full.txt'))
total_test_results = stats.mode(np.reshape(total_test_results, (-1, int(30/seg_len))), axis=1)[0]
total_test_targets = stats.mode(np.reshape(total_test_targets, (-1, int(30/seg_len))), axis=1)[0]
cnf_matrix = confusion_matrix(total_test_targets, total_test_results)
print(cnf_matrix)
save_cnf(cnf_matrix, os.path.join(output_dir,'cnf_1sec.txt'))
save_vec(total_test_targets, os.path.join(output_dir,'targets_1sec.txt'))
save_vec(total_test_results, os.path.join(output_dir,'results_1sec.txt'))
#print f1_score(total_test_targets.astype(int), total_test_results.astype(int), average=None), np.mean(f1_score(total_test_targets.astype(int), total_test_results.astype(int), average=None))
#print test_results_dict
print(scores(cnf_matrix.astype(np.float))['b_accuracy'])
#np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
#plt.figure()
#plot_confusion_matrix(cnf_matrix, classes=['0','1','2','3'],
# title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
#plt.figure()
#plot_confusion_matrix(cnf_matrix, classes=['0','1','2','3'], normalize=True,
# title='Normalized confusion matrix')
#plt.figure()
#t = np.arange(0,total_test_results.shape[0],1)
#plt.plot(t,total_test_results+0.1,'r',t,total_test_targets,'b')
#plt.figure()
#print input.size()
#plt.plot(np.arange(input.size(2)),input[0,0,:].squeeze().cpu().numpy())
#plt.show()
| [
"utils.EarlyStopping",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"os.path.join",
"torch.autograd.Variable",
"os.path.realpath",
"numpy.clip",
"time.time",
"numpy.append",
"numpy.array",
"net.MyNet",
"sklearn.metrics.confusion_matrix",
"utils.WeightedMSE... | [((1187, 1257), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Engagement estimation with LSTM"""'}), "(description='Engagement estimation with LSTM')\n", (1210, 1257), False, 'import argparse\n'), ((2623, 2643), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2637, 2643), True, 'import numpy as np\n'), ((2729, 2761), 'os.path.realpath', 'os.path.realpath', (['"""../TrainData"""'], {}), "('../TrainData')\n", (2745, 2761), False, 'import os\n'), ((2783, 2818), 'os.path.realpath', 'os.path.realpath', (['"""../TrainDataASD"""'], {}), "('../TrainDataASD')\n", (2799, 2818), False, 'import os\n'), ((3241, 3353), 'dataset_skeleton_ASD.load_dataset', 'load_dataset', (['train_data_path', 'test_data_path', 'seg_len', 'seg_overlap', 'sequence_length', '(0.2)', 'num_classes', 'cuda'], {}), '(train_data_path, test_data_path, seg_len, seg_overlap,\n sequence_length, 0.2, num_classes, cuda)\n', (3253, 3353), False, 'from dataset_skeleton_ASD import EngagementDataset, load_dataset\n'), ((11809, 11856), 'numpy.clip', 'np.clip', (['total_test_results', '(0)', '(num_classes - 1)'], {}), '(total_test_results, 0, num_classes - 1)\n', (11816, 11856), True, 'import numpy as np\n'), ((12134, 12190), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['total_test_targets', 'total_test_results'], {}), '(total_test_targets, total_test_results)\n', (12150, 12190), False, 'from sklearn.metrics import mean_squared_error, accuracy_score, f1_score, log_loss, make_scorer, confusion_matrix\n'), ((13161, 13217), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['total_test_targets', 'total_test_results'], {}), '(total_test_targets, total_test_results)\n', (13177, 13217), False, 'from sklearn.metrics import mean_squared_error, accuracy_score, f1_score, log_loss, make_scorer, confusion_matrix\n'), ((3191, 3217), 'os.listdir', 'os.listdir', (['test_data_path'], {}), '(test_data_path)\n', (3201, 3217), False, 'import os\n'), ((3592, 3622), 'utils.WeightedMSELoss', 'WeightedMSELoss', (['label_weights'], {}), '(label_weights)\n', (3607, 3622), False, 'from utils import suppress_stdout, EarlyStopping, plot_confusion_matrix, WeightedMSELoss, scores, save_cnf, save_vec\n'), ((4487, 4549), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=True)\n', (4497, 4549), False, 'from torch.utils.data import DataLoader\n'), ((4576, 4638), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(valid_dataset, batch_size=batch_size, shuffle=True)\n', (4586, 4638), False, 'from torch.utils.data import DataLoader\n'), ((4664, 4717), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(test_dataset, batch_size=1, shuffle=False)\n', (4674, 4717), False, 'from torch.utils.data import DataLoader\n'), ((10352, 10396), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_targets', 'test_results'], {}), '(test_targets, test_results)\n', (10368, 10396), False, 'from sklearn.metrics import mean_squared_error, accuracy_score, f1_score, log_loss, make_scorer, confusion_matrix\n'), ((11225, 11269), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_targets', 'test_results'], {}), '(test_targets, test_results)\n', (11241, 11269), False, 'from sklearn.metrics import mean_squared_error, accuracy_score, f1_score, log_loss, make_scorer, confusion_matrix\n'), ((12298, 12333), 'os.path.join', 'os.path.join', (['output_dir', '"""cnf.txt"""'], {}), "(output_dir, 'cnf.txt')\n", (12310, 12333), False, 'import os\n'), ((12357, 12397), 'os.path.join', 'os.path.join', (['output_dir', '"""cnf_norm.txt"""'], {}), "(output_dir, 'cnf_norm.txt')\n", (12369, 12397), False, 'import os\n'), ((12809, 12853), 'os.path.join', 'os.path.join', (['output_dir', '"""targets_full.txt"""'], {}), "(output_dir, 'targets_full.txt')\n", (12821, 12853), False, 'import os\n'), ((12889, 12933), 'os.path.join', 'os.path.join', (['output_dir', '"""results_full.txt"""'], {}), "(output_dir, 'results_full.txt')\n", (12901, 12933), False, 'import os\n'), ((13265, 13305), 'os.path.join', 'os.path.join', (['output_dir', '"""cnf_1sec.txt"""'], {}), "(output_dir, 'cnf_1sec.txt')\n", (13277, 13305), False, 'import os\n'), ((13340, 13384), 'os.path.join', 'os.path.join', (['output_dir', '"""targets_1sec.txt"""'], {}), "(output_dir, 'targets_1sec.txt')\n", (13352, 13384), False, 'import os\n'), ((13418, 13462), 'os.path.join', 'os.path.join', (['output_dir', '"""results_1sec.txt"""'], {}), "(output_dir, 'results_1sec.txt')\n", (13430, 13462), False, 'import os\n'), ((4878, 4956), 'net.MyNet', 'MyNet', (['num_classes', '(2 * (58 - 12))', 'sequence_length', 'hidden_size', 'one_hot', 'cuda'], {}), '(num_classes, 2 * (58 - 12), sequence_length, hidden_size, one_hot, cuda)\n', (4883, 4956), False, 'from net import MyNet, calc_gradients\n'), ((5040, 5072), 'utils.EarlyStopping', 'EarlyStopping', ([], {'patience': 'patience'}), '(patience=patience)\n', (5053, 5072), False, 'from utils import suppress_stdout, EarlyStopping, plot_confusion_matrix, WeightedMSELoss, scores, save_cnf, save_vec\n'), ((10314, 10336), 'numpy.array', 'np.array', (['test_targets'], {}), '(test_targets)\n', (10322, 10336), True, 'import numpy as np\n'), ((10873, 10924), 'numpy.append', 'np.append', (['total_test_results', 'test_results'], {'axis': '(0)'}), '(total_test_results, test_results, axis=0)\n', (10882, 10924), True, 'import numpy as np\n'), ((10958, 11009), 'numpy.append', 'np.append', (['total_test_targets', 'test_targets'], {'axis': '(0)'}), '(total_test_targets, test_targets, axis=0)\n', (10967, 11009), True, 'import numpy as np\n'), ((8027, 8049), 'torch.autograd.Variable', 'Variable', (["data['data']"], {}), "(data['data'])\n", (8035, 8049), False, 'from torch.autograd import Variable\n'), ((8051, 8075), 'torch.autograd.Variable', 'Variable', (["data['target']"], {}), "(data['target'])\n", (8059, 8075), False, 'from torch.autograd import Variable\n'), ((9984, 10010), 'os.listdir', 'os.listdir', (['test_data_path'], {}), '(test_data_path)\n', (9994, 10010), False, 'import os\n'), ((10237, 10259), 'numpy.array', 'np.array', (['test_results'], {}), '(test_results)\n', (10245, 10259), True, 'import numpy as np\n'), ((10089, 10111), 'numpy.array', 'np.array', (['test_targets'], {}), '(test_targets)\n', (10097, 10111), True, 'import numpy as np\n'), ((3521, 3548), 'torch.tensor', 'torch.tensor', (['label_weights'], {}), '(label_weights)\n', (3533, 3548), False, 'import torch\n'), ((10131, 10153), 'numpy.array', 'np.array', (['test_results'], {}), '(test_results)\n', (10139, 10153), True, 'import numpy as np\n'), ((3900, 3911), 'time.time', 'time.time', ([], {}), '()\n', (3909, 3911), False, 'import time\n'), ((5946, 5968), 'torch.autograd.Variable', 'Variable', (["data['data']"], {}), "(data['data'])\n", (5954, 5968), False, 'from torch.autograd import Variable\n'), ((5970, 5994), 'torch.autograd.Variable', 'Variable', (["data['target']"], {}), "(data['target'])\n", (5978, 5994), False, 'from torch.autograd import Variable\n'), ((7053, 7075), 'torch.autograd.Variable', 'Variable', (["data['data']"], {}), "(data['data'])\n", (7061, 7075), False, 'from torch.autograd import Variable\n'), ((7077, 7101), 'torch.autograd.Variable', 'Variable', (["data['target']"], {}), "(data['target'])\n", (7085, 7101), False, 'from torch.autograd import Variable\n'), ((3813, 3824), 'time.time', 'time.time', ([], {}), '()\n', (3822, 3824), False, 'import time\n')] |
from mpl_toolkits.basemap import Basemap, shiftgrid, cm
import numpy as np
import matplotlib.pyplot as plt
import h5py
# create the figure and axes instances.
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# setup of basemap ('lcc' = lambert conformal conic).
# use major and minor sphere radii from WGS84 ellipsoid.
m = Basemap(llcrnrlon=-145.5,llcrnrlat=1.,urcrnrlon=-2.566,urcrnrlat=46.352,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',area_thresh=1000.,projection='lcc',\
lat_1=50.,lon_0=-107.,ax=ax)
# transform to nx x ny regularly spaced 5km native projection grid
nx = int((m.xmax-m.xmin)/5000.)+1; ny = int((m.ymax-m.ymin)/5000.)+1
# topodat = m.transform_scalar(topoin,lons,lats,nx,ny)
# plot image over map with imshow.
# im = m.imshow(topodat,cm.GMT_haxby)
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawcountries()
m.drawstates()
# draw parallels and meridians.
# label on left and bottom of map.
parallels = np.arange(0.,80,20.)
m.drawparallels(parallels,labels=[1,0,0,1])
meridians = np.arange(10.,360.,30.)
m.drawmeridians(meridians,labels=[1,0,0,1])
# add colorbar
# cb = m.colorbar(im,"right", size="5%", pad='2%')
ax.set_title('ETOPO5 Topography - Lambert Conformal Conic')
from datetime import datetime
# date = datetime.utcnow()
# CS=m.nightshade(date)
import path
# grab the file data
downloads = path.path('C:/Users/balarsen/Downloads')
h5files = downloads.files('*.h5')
print(downloads, h5files)
with h5py.File(h5files[0]) as h5:
# get the latlon north and south footpoints
Pfn = h5['Pfn_geod_LatLon'][...]
Pfs = h5['Pfs_geod_LatLon'][...]
IsoTime = h5['IsoTime'][...]
# change isotime to datetime
dt = [datetime.strptime(v, '%Y-%m-%dT%H:%M:%SZ') for v in IsoTime]
print(Pfn)
x, y = m(Pfn[:,1], Pfn[:,0])
m.scatter(x,y, color='r', marker='o')
# m.plot(Pfn[:,1], Pfn[:,0], color='r')
plt.draw()
plt.show() | [
"path.path",
"h5py.File",
"matplotlib.pyplot.show",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"datetime.datetime.strptime",
"numpy.arange",
"mpl_toolkits.basemap.Basemap"
] | [((169, 181), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (179, 181), True, 'import matplotlib.pyplot as plt\n'), ((334, 540), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': '(-145.5)', 'llcrnrlat': '(1.0)', 'urcrnrlon': '(-2.566)', 'urcrnrlat': '(46.352)', 'rsphere': '(6378137.0, 6356752.3142)', 'resolution': '"""l"""', 'area_thresh': '(1000.0)', 'projection': '"""lcc"""', 'lat_1': '(50.0)', 'lon_0': '(-107.0)', 'ax': 'ax'}), "(llcrnrlon=-145.5, llcrnrlat=1.0, urcrnrlon=-2.566, urcrnrlat=46.352,\n rsphere=(6378137.0, 6356752.3142), resolution='l', area_thresh=1000.0,\n projection='lcc', lat_1=50.0, lon_0=-107.0, ax=ax)\n", (341, 540), False, 'from mpl_toolkits.basemap import Basemap, shiftgrid, cm\n'), ((1000, 1024), 'numpy.arange', 'np.arange', (['(0.0)', '(80)', '(20.0)'], {}), '(0.0, 80, 20.0)\n', (1009, 1024), True, 'import numpy as np\n'), ((1077, 1105), 'numpy.arange', 'np.arange', (['(10.0)', '(360.0)', '(30.0)'], {}), '(10.0, 360.0, 30.0)\n', (1086, 1105), True, 'import numpy as np\n'), ((1400, 1440), 'path.path', 'path.path', (['"""C:/Users/balarsen/Downloads"""'], {}), "('C:/Users/balarsen/Downloads')\n", (1409, 1440), False, 'import path\n'), ((1908, 1918), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1916, 1918), True, 'import matplotlib.pyplot as plt\n'), ((1919, 1929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1927, 1929), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1528), 'h5py.File', 'h5py.File', (['h5files[0]'], {}), '(h5files[0])\n', (1516, 1528), False, 'import h5py\n'), ((1727, 1769), 'datetime.datetime.strptime', 'datetime.strptime', (['v', '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(v, '%Y-%m-%dT%H:%M:%SZ')\n", (1744, 1769), False, 'from datetime import datetime\n')] |
"""
File to generate comparisons between the models for data recorded as part of
the HCHS publicly available data set.
https://sleepdata.org/datasets/hchs
"""
from __future__ import print_function
#Set the path to the downloaded HCHS data files on YOUR system!!
hchs_data_location='../../HumanData/HCHS/hchs-sol-sueno-'
from builtins import map
from builtins import str
from builtins import range
from circular_stats import *
import pandas as pd
import scipy as sp
import seaborn as sbn
import numpy as np
import scipy as sp
import pylab as plt
from circstats import *
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.weightstats import ttest_ind
from scipy.interpolate import InterpolatedUnivariateSpline
import matplotlib.gridspec as gridspec
from HCRSimPY.light_schedules import *
from HCRSimPy.plots import *
from HCRSimPY.models import *
convert_mil_time=lambda x: pd.DatetimeIndex(x).hour+pd.DatetimeIndex(x).minute/60.0+pd.DatetimeIndex(x).second/3600.0
def findCircularCorr(data1, criteria=None):
"""Find the Circular Correlation between the DLMO Times and the measured marker for the three models. Assume the data is given in military time"""
if criteria is None:
criteria=[True for i in range(0, data1.shape[0])]
spVal=nancorrcc(hchs[criteria].SP_DLMO*sp.pi/12.0, data1*sp.pi/12.0)
tp=nancorrcc(hchs.TP_DLMO[criteria]*sp.pi/12.0, data1*sp.pi/12.0)
vdp=nancorrcc(hchs.VDP_DLMO[criteria]*sp.pi/12.0, data1*sp.pi/12.0)
return((spVal, tp, vdp))
def findKeyDLMOTimes(tsdf):
"""Find the DLMO and CBT times for a given time series prediction"""
wrapped_time=np.round([fmod(x, 24.0) for x in list(tsdf.Time)],2)
df=pd.DataFrame({'Time': wrapped_time, 'Phase': tsdf.Phase})
df2=df.groupby('Time')['Phase'].agg({'Circular_Mean':circular_mean, 'Phase_Coherence': phase_coherence, 'Samples':np.size})
mean_func=sp.interpolate.interp1d(np.array(df2['Circular_Mean']), np.array(df2.index))
return(mean_func(1.309))
def record_diff(tsdfS, tsdfV, tsdfT):
"""Find the differences in the DLMO timing of the three models for that given light schedule"""
d1=findKeyDLMOTimes(tsdfS)
d2=findKeyDLMOTimes(tsdfV)
d3=findKeyDLMOTimes(tsdfT)
return((d1,d2,d3))
hchs2=pd.read_csv('hchs_model_diff.csv').rename(columns=lambda x: x.strip())
column_list=['PID', 'SAWA9', 'SAWA339', 'SHIFTWORKERYN', 'SAWA174', 'SAWA313', 'SAWA315', 'SAWA316', 'SAWA323', 'RMEQ', 'SAWA325', 'SAWA326', 'SAWA327', 'SAWA328', 'SAWA317']
sleep_data=pd.read_csv('../../HumanData/HCHS/datasets/hchs-sol-sueno-ancillary-dataset-0.3.0.csv', usecols=column_list)
sleep_data=sleep_data.rename(columns={'PID':'Filename','SAWA9':'Num_Good_Days', 'SAWA339':'Inter_Day_Variability', 'SAWA174':'Av_Sleep_Onset_Weekend', 'SAWA313':'Av_Bedtime', 'SAWA315':'Av_Sleep_Onset', 'SAWA316':'Sd_Sleep_Onset', 'SAWA323':'Av_MidSleep', 'SAWA325':'White_Light', 'SAWA326':'Blue_Light', 'SAWA327':'Green_Light', 'SAWA328':'Red_Light', 'SAWA317':'Av_Sleep_Offset'})
#Remove any errors from the hchs sims
hchs2=hchs2.loc[hchs2.SP_DLMO>0.0]
#Cast the times in the sleep data to the correct formats
sleep_data.Av_Sleep_Onset=pd.to_datetime(sleep_data.Av_Sleep_Onset, format='%H:%M:%S')
sleep_data.Sd_Sleep_Onset=pd.to_datetime(sleep_data.Sd_Sleep_Onset, format='%H:%M:%S')
sleep_data.Av_MidSleep=pd.to_datetime(sleep_data.Av_MidSleep, format='%H:%M:%S')
sleep_data.Av_Bedtime=pd.to_datetime(sleep_data.Av_Bedtime, format='%H:%M:%S')
sleep_data.Av_Sleep_Onset_Weekend=pd.to_datetime(sleep_data.Av_Sleep_Onset_Weekend, format='%H:%M:%S')
#Convert the times to military times
sleep_data.Av_Sleep_Onset=convert_mil_time(sleep_data.Av_Sleep_Onset)
sleep_data.Sd_Sleep_Onset=convert_mil_time(sleep_data.Sd_Sleep_Onset)
sleep_data.Av_MidSleep=convert_mil_time(sleep_data.Av_MidSleep)
sleep_data.Av_Bedtime=convert_mil_time(sleep_data.Av_Bedtime)
sleep_data.Av_Sleep_Offset=convert_mil_time(sleep_data.Av_Sleep_Offset)
sleep_data.Av_Sleep_Onset_Weekend=convert_mil_time(sleep_data.Av_Sleep_Onset_Weekend)
hchs=pd.merge(hchs2, sleep_data, on='Filename', how='left')
#Drop any rows missing simulation data for some reason
hchs=hchs[hchs['Num_Good_Days']>=5]
hchs=hchs.dropna(subset=['SP_DLMO', 'TP_DLMO', 'VDP_DLMO'])
#Add some additional columns
diff_sptp=[]
diff_spvdp=[]
diff_tpvdp=[]
diff_spSODLMO=[]
diff_vdpSODLMO=[]
diff_tpSODLMO=[]
for i in range(0, hchs.shape[0]):
diff_sptp.append(subtract_clock_times(hchs.SP_DLMO.iloc[i], hchs.TP_DLMO.iloc[i]))
diff_spvdp.append(subtract_clock_times(hchs.SP_DLMO.iloc[i], hchs.VDP_DLMO.iloc[i]))
diff_tpvdp.append(subtract_clock_times(hchs.TP_DLMO.iloc[i], hchs.VDP_DLMO.iloc[i]))
diff_spSODLMO.append(subtract_clock_times(hchs.Av_Sleep_Onset.iloc[i], hchs.SP_DLMO.iloc[i]))
diff_tpSODLMO.append(subtract_clock_times(hchs.Av_Sleep_Onset.iloc[i], hchs.TP_DLMO.iloc[i]))
diff_vdpSODLMO.append(subtract_clock_times(hchs.Av_Sleep_Onset.iloc[i], hchs.VDP_DLMO.iloc[i]))
hchs['diff_sptp']=pd.Series(diff_sptp, index=hchs.index)
hchs['diff_spvdp']=pd.Series(diff_spvdp, index=hchs.index)
hchs['diff_tpvdp']=pd.Series(diff_tpvdp, index=hchs.index)
hchs['diff_spSODLMO']=pd.Series(diff_spSODLMO, index=hchs.index)
hchs['diff_tpSODLMO']=pd.Series(diff_tpSODLMO, index=hchs.index)
hchs['diff_vdpSODLMO']=pd.Series(diff_vdpSODLMO, index=hchs.index)
#Find Major differences in the model predictions
criteria=(abs(hchs.diff_spvdp)>1.0) #& (abs(hchs.diff_sptp)<0.5)
criteriaInt=list(map(int, criteria))
hchs['Model_Diff']=pd.Series(criteriaInt, index=hchs.index)
#Save a copy of the list of model diff
list_of_discrepancies=list(hchs[criteria].Filename)
criteriaNOT=[not c for c in criteria]
list_of_agreements=list(hchs[criteriaNOT].Filename)
print(("Number of Model Diff: ", sum(criteriaInt)))
print(("Percentage of Model Diff: ", sum(criteriaInt)/float(hchs.shape[0])*100))
print(("Total number of light schedules considered", hchs.shape))
x=np.linspace(0.0,24.0,200)
data_list=[]
for f in list_of_discrepancies:
try:
f=str(f)
if len(f)!=8:
while (len(f)<8):
f='0'+f
filename=hchs_data_location+f+'.csv'
ls=hchs_light(filename)
av_data=ls.data.groupby(by=['TimeCount']).mean()
lf=InterpolatedUnivariateSpline(av_data.index, av_data.Lux, k=1)
y_vals=list(map(lf,x))
for i in range(len(x)):
data_list.append((str(f), 'Disagree', x[i], LightLog(y_vals[i])))
except:
print(("Error with file: ", f))
for f in list_of_agreements:
try:
f=str(f)
if len(f)!=8:
while (len(f)<8):
f='0'+f
filename=hchs_data_location+f+'.csv'
ls=hchs_light(filename)
av_data=ls.data.groupby(by=['TimeCount']).mean()
lf=InterpolatedUnivariateSpline(av_data.index, av_data.Lux, k=1)
y_vals=list(map(lf,x))
for i in range(len(x)):
data_list.append((str(f), 'Agree', x[i], LightLog(y_vals[i])))
except:
print(("Error with file (agreement): ", f))
lightSchedulesD=pd.DataFrame(data_list, columns=['PID', 'Agreement', 'Time', 'Log_Lux'])
latexify(columns=1)
plt.figure()
G = gridspec.GridSpec(1, 2)
ax2= plt.subplot(G[0, 0])
ax1=plt.subplot(G[0, 1])
sbn.lineplot(x="Time", y="Log_Lux", data=lightSchedulesD, hue='Agreement', style='Agreement', ci=None, lw=2.0, ax=ax1, legend=False, palette=["green", "blue"]);
#handles, labels = ax.get_legend_handles_labels()
#ax.legend(handles=handles[1:], labels=labels[1:])
#ax1.set_yscale('log', basex=10)
ax1.set_xlabel('Time of Day');
ax1.set_ylabel(r'$\log_{10}(Lux)$');
#ax1.set_title('Light Schedules and Model Discrepancies');
ax1.set_xlim(0,24.5)
ax1.text(1.5, 3.5, '(b)')
ax1.set_xticks([0,6,12,18,24])
#Add a Regression plot to show differences in the predictions
sbn.regplot('SP_DLMO', 'VDP_DLMO', data=hchs[criteriaNOT], color='green', fit_reg=False, marker='o', scatter_kws={"s": 7, "facecolor":'none'}, ax=ax2);
sbn.regplot('SP_DLMO', 'VDP_DLMO', data=hchs[criteria], color='blue', fit_reg=False, marker='x', scatter_kws={"s": 9}, ax=ax2);
ax2.set_xlabel('SP DLMO Time')
ax2.set_ylabel('VDP DLMO Time')
ax2.set_xlim(15,24)
ax2.set_ylim(15,24)
ax2.plot(np.linspace(15,24,100), np.linspace(15,24,100), lw=2.0, color='red')
ax2.set_xticks([15,18,21,24])
#ax2.set_title('SP DLMO versus VDP DLMO');
ax2.text(15.5,23.0,'(a)')
plt.tight_layout()
#This creates the figure from the file
plt.savefig('model_diff.eps', dpi=1200)
plt.show()
#--------------------------------------------------------------------------------------------------------------------------------------
#Find the diff for the average agree light schedule
agreeOnlyMeans=lightSchedulesD.loc[lightSchedulesD['Agreement']=='Agree'].groupby(by=['Time'])['Log_Lux'].mean()
lfa1=InterpolatedUnivariateSpline(agreeOnlyMeans.index, np.power(10, agreeOnlyMeans.values), k=1)
lfa=lambda t: lfa1(fmod(t,24.0)) #wrap the time
trans_days=50
init=guessICData(lfa, 0.0, length=trans_days)
initVDP=guessICDataVDP(lfa, 0.0, length=trans_days)
initTwo=guessICDataTwoPop(lfa, 0.0, length=trans_days)
a=SinglePopModel(lfa)
b=vdp_model(lfa)
c=TwoPopModel(lfa)
ent_angle=a.integrateModelData((0.0, 40.0*24.0), initial=init);
ent_angle_vdp=b.integrateModelData((0.0, 40.0*24.0), initial=initVDP);
ent_angle_two=c.integrateModelData((0.0, 40.0*24.0), initial=initTwo);
tsdf=a.getTS()
tsdf_vdp=b.getTS()
tsdf_two=c.getTS()
print(("Average DLMO diff for Agree: ", np.array(record_diff(tsdf, tsdf_vdp, tsdf_two))))
#Add the diff for average disagree schedule
disagreeOnlyMeans=lightSchedulesD.loc[lightSchedulesD['Agreement']=='Disagree'].groupby(by=['Time'])['Log_Lux'].mean()
lfd1=InterpolatedUnivariateSpline(disagreeOnlyMeans.index, np.power(10,disagreeOnlyMeans.values), k=1)
lfd=lambda t: lfd1(fmod(t,24.0)) #wrap the time
trans_days=50
init=guessICData(lfd, 0.0, length=trans_days)
initVDP=guessICDataVDP(lfd, 0.0, length=trans_days)
initTwo=guessICDataTwoPop(lfd, 0.0, length=trans_days)
a=SinglePopModel(lfd)
b=vdp_model(lfd)
c=TwoPopModel(lfd)
ent_angle=a.integrateModelData((0.0, 40.0*24.0), initial=init);
ent_angle_vdp=b.integrateModelData((0.0, 40.0*24.0), initial=initVDP);
ent_angle_two=c.integrateModelData((0.0, 40.0*24.0), initial=initTwo);
tsdf=a.getTS()
tsdf_vdp=b.getTS()
tsdf_two=c.getTS()
print(("Average DLMO diff for Disagree: ", np.array(record_diff(tsdf, tsdf_vdp, tsdf_two))))
| [
"seaborn.lineplot",
"pandas.read_csv",
"pandas.DatetimeIndex",
"seaborn.regplot",
"pylab.figure",
"pylab.tight_layout",
"builtins.range",
"pandas.DataFrame",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.power",
"pandas.merge",
"numpy.linspace",
"pylab.subplot",
"pylab.savefig",... | [((2560, 2677), 'pandas.read_csv', 'pd.read_csv', (['"""../../HumanData/HCHS/datasets/hchs-sol-sueno-ancillary-dataset-0.3.0.csv"""'], {'usecols': 'column_list'}), "(\n '../../HumanData/HCHS/datasets/hchs-sol-sueno-ancillary-dataset-0.3.0.csv',\n usecols=column_list)\n", (2571, 2677), True, 'import pandas as pd\n'), ((3211, 3271), 'pandas.to_datetime', 'pd.to_datetime', (['sleep_data.Av_Sleep_Onset'], {'format': '"""%H:%M:%S"""'}), "(sleep_data.Av_Sleep_Onset, format='%H:%M:%S')\n", (3225, 3271), True, 'import pandas as pd\n'), ((3298, 3358), 'pandas.to_datetime', 'pd.to_datetime', (['sleep_data.Sd_Sleep_Onset'], {'format': '"""%H:%M:%S"""'}), "(sleep_data.Sd_Sleep_Onset, format='%H:%M:%S')\n", (3312, 3358), True, 'import pandas as pd\n'), ((3382, 3439), 'pandas.to_datetime', 'pd.to_datetime', (['sleep_data.Av_MidSleep'], {'format': '"""%H:%M:%S"""'}), "(sleep_data.Av_MidSleep, format='%H:%M:%S')\n", (3396, 3439), True, 'import pandas as pd\n'), ((3462, 3518), 'pandas.to_datetime', 'pd.to_datetime', (['sleep_data.Av_Bedtime'], {'format': '"""%H:%M:%S"""'}), "(sleep_data.Av_Bedtime, format='%H:%M:%S')\n", (3476, 3518), True, 'import pandas as pd\n'), ((3553, 3621), 'pandas.to_datetime', 'pd.to_datetime', (['sleep_data.Av_Sleep_Onset_Weekend'], {'format': '"""%H:%M:%S"""'}), "(sleep_data.Av_Sleep_Onset_Weekend, format='%H:%M:%S')\n", (3567, 3621), True, 'import pandas as pd\n'), ((4090, 4144), 'pandas.merge', 'pd.merge', (['hchs2', 'sleep_data'], {'on': '"""Filename"""', 'how': '"""left"""'}), "(hchs2, sleep_data, on='Filename', how='left')\n", (4098, 4144), True, 'import pandas as pd\n'), ((4431, 4454), 'builtins.range', 'range', (['(0)', 'hchs.shape[0]'], {}), '(0, hchs.shape[0])\n', (4436, 4454), False, 'from builtins import range\n'), ((5036, 5074), 'pandas.Series', 'pd.Series', (['diff_sptp'], {'index': 'hchs.index'}), '(diff_sptp, index=hchs.index)\n', (5045, 5074), True, 'import pandas as pd\n'), ((5094, 5133), 'pandas.Series', 'pd.Series', (['diff_spvdp'], {'index': 'hchs.index'}), '(diff_spvdp, index=hchs.index)\n', (5103, 5133), True, 'import pandas as pd\n'), ((5153, 5192), 'pandas.Series', 'pd.Series', (['diff_tpvdp'], {'index': 'hchs.index'}), '(diff_tpvdp, index=hchs.index)\n', (5162, 5192), True, 'import pandas as pd\n'), ((5215, 5257), 'pandas.Series', 'pd.Series', (['diff_spSODLMO'], {'index': 'hchs.index'}), '(diff_spSODLMO, index=hchs.index)\n', (5224, 5257), True, 'import pandas as pd\n'), ((5280, 5322), 'pandas.Series', 'pd.Series', (['diff_tpSODLMO'], {'index': 'hchs.index'}), '(diff_tpSODLMO, index=hchs.index)\n', (5289, 5322), True, 'import pandas as pd\n'), ((5346, 5389), 'pandas.Series', 'pd.Series', (['diff_vdpSODLMO'], {'index': 'hchs.index'}), '(diff_vdpSODLMO, index=hchs.index)\n', (5355, 5389), True, 'import pandas as pd\n'), ((5562, 5602), 'pandas.Series', 'pd.Series', (['criteriaInt'], {'index': 'hchs.index'}), '(criteriaInt, index=hchs.index)\n', (5571, 5602), True, 'import pandas as pd\n'), ((5989, 6016), 'numpy.linspace', 'np.linspace', (['(0.0)', '(24.0)', '(200)'], {}), '(0.0, 24.0, 200)\n', (6000, 6016), True, 'import numpy as np\n'), ((7127, 7199), 'pandas.DataFrame', 'pd.DataFrame', (['data_list'], {'columns': "['PID', 'Agreement', 'Time', 'Log_Lux']"}), "(data_list, columns=['PID', 'Agreement', 'Time', 'Log_Lux'])\n", (7139, 7199), True, 'import pandas as pd\n'), ((7221, 7233), 'pylab.figure', 'plt.figure', ([], {}), '()\n', (7231, 7233), True, 'import pylab as plt\n'), ((7238, 7261), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (7255, 7261), True, 'import matplotlib.gridspec as gridspec\n'), ((7267, 7287), 'pylab.subplot', 'plt.subplot', (['G[0, 0]'], {}), '(G[0, 0])\n', (7278, 7287), True, 'import pylab as plt\n'), ((7292, 7312), 'pylab.subplot', 'plt.subplot', (['G[0, 1]'], {}), '(G[0, 1])\n', (7303, 7312), True, 'import pylab as plt\n'), ((7315, 7483), 'seaborn.lineplot', 'sbn.lineplot', ([], {'x': '"""Time"""', 'y': '"""Log_Lux"""', 'data': 'lightSchedulesD', 'hue': '"""Agreement"""', 'style': '"""Agreement"""', 'ci': 'None', 'lw': '(2.0)', 'ax': 'ax1', 'legend': '(False)', 'palette': "['green', 'blue']"}), "(x='Time', y='Log_Lux', data=lightSchedulesD, hue='Agreement',\n style='Agreement', ci=None, lw=2.0, ax=ax1, legend=False, palette=[\n 'green', 'blue'])\n", (7327, 7483), True, 'import seaborn as sbn\n'), ((7879, 8038), 'seaborn.regplot', 'sbn.regplot', (['"""SP_DLMO"""', '"""VDP_DLMO"""'], {'data': 'hchs[criteriaNOT]', 'color': '"""green"""', 'fit_reg': '(False)', 'marker': '"""o"""', 'scatter_kws': "{'s': 7, 'facecolor': 'none'}", 'ax': 'ax2'}), "('SP_DLMO', 'VDP_DLMO', data=hchs[criteriaNOT], color='green',\n fit_reg=False, marker='o', scatter_kws={'s': 7, 'facecolor': 'none'},\n ax=ax2)\n", (7890, 8038), True, 'import seaborn as sbn\n'), ((8031, 8161), 'seaborn.regplot', 'sbn.regplot', (['"""SP_DLMO"""', '"""VDP_DLMO"""'], {'data': 'hchs[criteria]', 'color': '"""blue"""', 'fit_reg': '(False)', 'marker': '"""x"""', 'scatter_kws': "{'s': 9}", 'ax': 'ax2'}), "('SP_DLMO', 'VDP_DLMO', data=hchs[criteria], color='blue',\n fit_reg=False, marker='x', scatter_kws={'s': 9}, ax=ax2)\n", (8042, 8161), True, 'import seaborn as sbn\n'), ((8440, 8458), 'pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8456, 8458), True, 'import pylab as plt\n'), ((8498, 8537), 'pylab.savefig', 'plt.savefig', (['"""model_diff.eps"""'], {'dpi': '(1200)'}), "('model_diff.eps', dpi=1200)\n", (8509, 8537), True, 'import pylab as plt\n'), ((8538, 8548), 'pylab.show', 'plt.show', ([], {}), '()\n', (8546, 8548), True, 'import pylab as plt\n'), ((1721, 1778), 'pandas.DataFrame', 'pd.DataFrame', (["{'Time': wrapped_time, 'Phase': tsdf.Phase}"], {}), "({'Time': wrapped_time, 'Phase': tsdf.Phase})\n", (1733, 1778), True, 'import pandas as pd\n'), ((5523, 5541), 'builtins.map', 'map', (['int', 'criteria'], {}), '(int, criteria)\n', (5526, 5541), False, 'from builtins import map\n'), ((8271, 8295), 'numpy.linspace', 'np.linspace', (['(15)', '(24)', '(100)'], {}), '(15, 24, 100)\n', (8282, 8295), True, 'import numpy as np\n'), ((8295, 8319), 'numpy.linspace', 'np.linspace', (['(15)', '(24)', '(100)'], {}), '(15, 24, 100)\n', (8306, 8319), True, 'import numpy as np\n'), ((8911, 8946), 'numpy.power', 'np.power', (['(10)', 'agreeOnlyMeans.values'], {}), '(10, agreeOnlyMeans.values)\n', (8919, 8946), True, 'import numpy as np\n'), ((9802, 9840), 'numpy.power', 'np.power', (['(10)', 'disagreeOnlyMeans.values'], {}), '(10, disagreeOnlyMeans.values)\n', (9810, 9840), True, 'import numpy as np\n'), ((1945, 1975), 'numpy.array', 'np.array', (["df2['Circular_Mean']"], {}), "(df2['Circular_Mean'])\n", (1953, 1975), True, 'import numpy as np\n'), ((1977, 1996), 'numpy.array', 'np.array', (['df2.index'], {}), '(df2.index)\n', (1985, 1996), True, 'import numpy as np\n'), ((2303, 2337), 'pandas.read_csv', 'pd.read_csv', (['"""hchs_model_diff.csv"""'], {}), "('hchs_model_diff.csv')\n", (2314, 2337), True, 'import pandas as pd\n'), ((6080, 6086), 'builtins.str', 'str', (['f'], {}), '(f)\n', (6083, 6086), False, 'from builtins import str\n'), ((6309, 6370), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['av_data.index', 'av_data.Lux'], {'k': '(1)'}), '(av_data.index, av_data.Lux, k=1)\n', (6337, 6370), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((6614, 6620), 'builtins.str', 'str', (['f'], {}), '(f)\n', (6617, 6620), False, 'from builtins import str\n'), ((6844, 6905), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['av_data.index', 'av_data.Lux'], {'k': '(1)'}), '(av_data.index, av_data.Lux, k=1)\n', (6872, 6905), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((6391, 6401), 'builtins.map', 'map', (['lf', 'x'], {}), '(lf, x)\n', (6394, 6401), False, 'from builtins import map\n'), ((6926, 6936), 'builtins.map', 'map', (['lf', 'x'], {}), '(lf, x)\n', (6929, 6936), False, 'from builtins import map\n'), ((923, 942), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['x'], {}), '(x)\n', (939, 942), True, 'import pandas as pd\n'), ((980, 999), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['x'], {}), '(x)\n', (996, 999), True, 'import pandas as pd\n'), ((1270, 1294), 'builtins.range', 'range', (['(0)', 'data1.shape[0]'], {}), '(0, data1.shape[0])\n', (1275, 1294), False, 'from builtins import range\n'), ((948, 967), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['x'], {}), '(x)\n', (964, 967), True, 'import pandas as pd\n'), ((6464, 6470), 'builtins.str', 'str', (['f'], {}), '(f)\n', (6467, 6470), False, 'from builtins import str\n'), ((6999, 7005), 'builtins.str', 'str', (['f'], {}), '(f)\n', (7002, 7005), False, 'from builtins import str\n')] |
import tensorflow as tf
import logging
from pdp.utils.vocab import aa_idx_vocab
import numpy as np
# new span
#
import random
# todo : data split 에 대해 생각. 어떻게 데이터 버전 관리할지
class PretrainDataLoader:
def __init__(self, files, seed: int = 12345):
self.files = files
self.seed = seed
random.seed(seed)
random.shuffle(self.files)
self._train_idx = round(len(files) * 0.9)
# todo : numpy, tfrecord 버전 추가
def download(self):
pass
# todo : tfx, data versioning -> ML OPS에 대한 생각.
def load(self, span: int) -> tf.data.TFRecordDataset:
pass
# 1. pre-processing, parsing.
def __call__(
self,
mode="train",
is_training: bool = False,
max_sequence_length: int = 512,
num_token_predictions: int = 128,
mask_ratio: float = 0.15,
buffer_size: int = 200,
batch_size: int = 8,
) -> tf.data.TFRecordDataset:
"""
:param mode: train or valid
:param is_training: true or false, true면 shuffle과 repeat 없음.
:param max_sequence_length: 시퀀스의 최대 길이
:param num_token_predictions: LML 갯수
:param mask_ratio: num_token_predictions 중 몇 퍼센트를 mask 씌울 것인가.
:param buffer_size: tfdata를 읽어올때 한번에 몇개를 읽을 것인가.
:param batch_size:
:return: tfdata
"""
features = {
"fasta": tf.io.FixedLenFeature([], tf.string),
"seq": tf.io.RaggedFeature(value_key="seq", dtype=tf.int64),
}
if mode == "train":
self.target_files = self.files[: self._train_idx]
train_files = self.target_files
logging.info(
f"you are using training dataset, list of training file : {train_files}"
)
dataset = tf.data.TFRecordDataset(
train_files,
num_parallel_reads=tf.data.experimental.AUTOTUNE,
compression_type="GZIP",
)
else:
self.target_files = self.files[self._train_idx :]
valid_files = self.target_files
logging.info(
f"you are using validation dataset, list of training file : {valid_files}"
)
dataset = tf.data.TFRecordDataset(
valid_files,
num_parallel_reads=tf.data.experimental.AUTOTUNE,
compression_type="GZIP",
)
if is_training:
dataset = dataset.shuffle(buffer_size, seed=self.seed)
dataset = dataset.repeat()
else:
logging.info("you are using dataset for evaluation. no repeat, no shuffle")
max_mask_num = int(num_token_predictions * 0.8)
max_another_num = int(num_token_predictions * 0.1)
def _parse_function(example_proto):
with tf.device("cpu"):
eg = tf.io.parse_single_example(example_proto, features)
fasta = eg["fasta"]
seq = tf.cast(eg["seq"], tf.int32)
# length = eg['seq'].shape[0]
length = len(
eg["seq"]
) # length means only number of AA, excluding the <cls> , <eos>
# add cls, eos token
seq = tf.concat(
[[aa_idx_vocab["<cls>"]], seq, [aa_idx_vocab["<eos>"]]], axis=0
)
mask = tf.ones(length + 2, dtype=tf.int32)
# count
num_mask = tf.cast(length, tf.float32) * mask_ratio * 0.8
num_mask = tf.clip_by_value(
tf.cast(num_mask, tf.int32), 1, max_mask_num
)
num_replace = tf.cast(length, tf.float32) * mask_ratio * 0.1
num_replace = tf.clip_by_value(
tf.cast(num_replace, tf.int32), 1, max_another_num
)
num_total = num_mask + 2 * num_replace
# lm position.
# range -> we have to exclude <cls>, <eos>
lm_positions = tf.random.shuffle(tf.range(1, length + 1))[:num_total]
masked_lm_positions = lm_positions[:num_total]
# lm weight
lm_weights = tf.ones(num_total, tf.int32)
# new input
masked_seq = tf.identity(seq)
replacing_seq = tf.fill([num_mask], np.int32(aa_idx_vocab["<mask>"]))
masked_seq = tf.tensor_scatter_nd_update(
masked_seq,
tf.expand_dims(masked_lm_positions[:num_mask], axis=-1),
replacing_seq,
)
replacing_seq = tf.random.uniform([num_replace], 0, 20, dtype=tf.int32)
masked_seq = tf.tensor_scatter_nd_update(
masked_seq,
tf.expand_dims(
masked_lm_positions[num_mask : num_mask + num_replace], axis=-1
),
replacing_seq,
)
# gt
masked_lm_ids = tf.gather(seq, masked_lm_positions)
return {
"input_fasta": fasta,
"input_seq": masked_seq,
"input_seq_mask": mask,
"input_lm_positions": masked_lm_positions,
"input_lm_target": masked_lm_ids,
"input_lm_weights": lm_weights,
"length": length,
}
padded_shapes = {
"input_fasta": [],
"input_seq": [max_sequence_length],
"input_seq_mask": [max_sequence_length],
"input_lm_positions": [num_token_predictions],
"input_lm_target": [num_token_predictions],
"input_lm_weights": [num_token_predictions],
"length": [],
}
zero = tf.constant(0, dtype=tf.int32)
padded_value = {
"input_fasta": "",
"input_seq": tf.cast(aa_idx_vocab["<pad>"], tf.int32),
"input_seq_mask": zero,
"input_lm_positions": zero,
"input_lm_target": np.int32(20),
"input_lm_weights": zero,
"length": zero,
}
dataset = dataset.map(
_parse_function, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset = dataset.padded_batch(
batch_size, padded_shapes=padded_shapes, padding_values=padded_value
)
dataset = dataset.prefetch(buffer_size)
return dataset
# todo : uniref50 순서의 의미 -> 알파벳 순서인가?
| [
"tensorflow.io.RaggedFeature",
"tensorflow.ones",
"tensorflow.range",
"tensorflow.data.TFRecordDataset",
"tensorflow.random.uniform",
"tensorflow.identity",
"random.shuffle",
"tensorflow.gather",
"tensorflow.device",
"tensorflow.io.parse_single_example",
"tensorflow.concat",
"tensorflow.consta... | [((323, 340), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (334, 340), False, 'import random\n'), ((350, 376), 'random.shuffle', 'random.shuffle', (['self.files'], {}), '(self.files)\n', (364, 376), False, 'import random\n'), ((6007, 6037), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (6018, 6037), True, 'import tensorflow as tf\n'), ((1453, 1489), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (1474, 1489), True, 'import tensorflow as tf\n'), ((1511, 1563), 'tensorflow.io.RaggedFeature', 'tf.io.RaggedFeature', ([], {'value_key': '"""seq"""', 'dtype': 'tf.int64'}), "(value_key='seq', dtype=tf.int64)\n", (1530, 1563), True, 'import tensorflow as tf\n'), ((1726, 1817), 'logging.info', 'logging.info', (['f"""you are using training dataset, list of training file : {train_files}"""'], {}), "(\n f'you are using training dataset, list of training file : {train_files}')\n", (1738, 1817), False, 'import logging\n'), ((1868, 1984), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['train_files'], {'num_parallel_reads': 'tf.data.experimental.AUTOTUNE', 'compression_type': '"""GZIP"""'}), "(train_files, num_parallel_reads=tf.data.\n experimental.AUTOTUNE, compression_type='GZIP')\n", (1891, 1984), True, 'import tensorflow as tf\n'), ((2183, 2276), 'logging.info', 'logging.info', (['f"""you are using validation dataset, list of training file : {valid_files}"""'], {}), "(\n f'you are using validation dataset, list of training file : {valid_files}')\n", (2195, 2276), False, 'import logging\n'), ((2327, 2443), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['valid_files'], {'num_parallel_reads': 'tf.data.experimental.AUTOTUNE', 'compression_type': '"""GZIP"""'}), "(valid_files, num_parallel_reads=tf.data.\n experimental.AUTOTUNE, compression_type='GZIP')\n", (2350, 2443), True, 'import tensorflow as tf\n'), ((2669, 2744), 'logging.info', 'logging.info', (['"""you are using dataset for evaluation. no repeat, no shuffle"""'], {}), "('you are using dataset for evaluation. no repeat, no shuffle')\n", (2681, 2744), False, 'import logging\n'), ((6124, 6164), 'tensorflow.cast', 'tf.cast', (["aa_idx_vocab['<pad>']", 'tf.int32'], {}), "(aa_idx_vocab['<pad>'], tf.int32)\n", (6131, 6164), True, 'import tensorflow as tf\n'), ((6276, 6288), 'numpy.int32', 'np.int32', (['(20)'], {}), '(20)\n', (6284, 6288), True, 'import numpy as np\n'), ((2929, 2945), 'tensorflow.device', 'tf.device', (['"""cpu"""'], {}), "('cpu')\n", (2938, 2945), True, 'import tensorflow as tf\n'), ((2969, 3020), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (2995, 3020), True, 'import tensorflow as tf\n'), ((3083, 3111), 'tensorflow.cast', 'tf.cast', (["eg['seq']", 'tf.int32'], {}), "(eg['seq'], tf.int32)\n", (3090, 3111), True, 'import tensorflow as tf\n'), ((3368, 3442), 'tensorflow.concat', 'tf.concat', (["[[aa_idx_vocab['<cls>']], seq, [aa_idx_vocab['<eos>']]]"], {'axis': '(0)'}), "([[aa_idx_vocab['<cls>']], seq, [aa_idx_vocab['<eos>']]], axis=0)\n", (3377, 3442), True, 'import tensorflow as tf\n'), ((3507, 3542), 'tensorflow.ones', 'tf.ones', (['(length + 2)'], {'dtype': 'tf.int32'}), '(length + 2, dtype=tf.int32)\n', (3514, 3542), True, 'import tensorflow as tf\n'), ((4358, 4386), 'tensorflow.ones', 'tf.ones', (['num_total', 'tf.int32'], {}), '(num_total, tf.int32)\n', (4365, 4386), True, 'import tensorflow as tf\n'), ((4448, 4464), 'tensorflow.identity', 'tf.identity', (['seq'], {}), '(seq)\n', (4459, 4464), True, 'import tensorflow as tf\n'), ((4814, 4869), 'tensorflow.random.uniform', 'tf.random.uniform', (['[num_replace]', '(0)', '(20)'], {'dtype': 'tf.int32'}), '([num_replace], 0, 20, dtype=tf.int32)\n', (4831, 4869), True, 'import tensorflow as tf\n'), ((5222, 5257), 'tensorflow.gather', 'tf.gather', (['seq', 'masked_lm_positions'], {}), '(seq, masked_lm_positions)\n', (5231, 5257), True, 'import tensorflow as tf\n'), ((3712, 3739), 'tensorflow.cast', 'tf.cast', (['num_mask', 'tf.int32'], {}), '(num_mask, tf.int32)\n', (3719, 3739), True, 'import tensorflow as tf\n'), ((3926, 3956), 'tensorflow.cast', 'tf.cast', (['num_replace', 'tf.int32'], {}), '(num_replace, tf.int32)\n', (3933, 3956), True, 'import tensorflow as tf\n'), ((4520, 4552), 'numpy.int32', 'np.int32', (["aa_idx_vocab['<mask>']"], {}), "(aa_idx_vocab['<mask>'])\n", (4528, 4552), True, 'import numpy as np\n'), ((4667, 4722), 'tensorflow.expand_dims', 'tf.expand_dims', (['masked_lm_positions[:num_mask]'], {'axis': '(-1)'}), '(masked_lm_positions[:num_mask], axis=-1)\n', (4681, 4722), True, 'import tensorflow as tf\n'), ((4983, 5060), 'tensorflow.expand_dims', 'tf.expand_dims', (['masked_lm_positions[num_mask:num_mask + num_replace]'], {'axis': '(-1)'}), '(masked_lm_positions[num_mask:num_mask + num_replace], axis=-1)\n', (4997, 5060), True, 'import tensorflow as tf\n'), ((3598, 3625), 'tensorflow.cast', 'tf.cast', (['length', 'tf.float32'], {}), '(length, tf.float32)\n', (3605, 3625), True, 'import tensorflow as tf\n'), ((3809, 3836), 'tensorflow.cast', 'tf.cast', (['length', 'tf.float32'], {}), '(length, tf.float32)\n', (3816, 3836), True, 'import tensorflow as tf\n'), ((4196, 4219), 'tensorflow.range', 'tf.range', (['(1)', '(length + 1)'], {}), '(1, length + 1)\n', (4204, 4219), True, 'import tensorflow as tf\n')] |
import numpy as np
from nnfs.initializers import zeros, he_normal
class Parameter:
def __init__(self, initial_value):
self.shape = initial_value.shape
self.value = initial_value
self.grad = np.zeros(initial_value.shape)
class Layer:
def get_parameters(self):
return []
def get_loss(self):
return 0.0
class Linear(Layer):
def __init__(self, n_inputs, n_neurons,
weights_inititalizer=he_normal,
bias_initializer=zeros,
weights_regularizer=None,
bias_regularizer=None):
self.weights = Parameter(weights_inititalizer(n_inputs, n_neurons))
self.biases = Parameter(bias_initializer(1, n_neurons))
self.weights_regularizer = weights_regularizer
self.bias_regularizer = bias_regularizer
def forward(self, inputs):
self._cached_inputs = inputs
return np.dot(inputs, self.weights.value) + self.biases.value
def backward(self, grad_out):
grad_in = np.dot(grad_out, self.weights.value.T)
self.weights.grad = np.dot(self._cached_inputs.T, grad_out)
if self.weights_regularizer:
self.weights.grad += self.weights_regularizer.get_grad(self.weights.value)
self.biases.grad = np.sum(grad_out, axis=0)
if self.bias_regularizer:
self.biases.grad += self.bias_regularizer.get_grad(self.biases.value)
return grad_in
def get_parameters(self):
return [self.weights, self.biases]
def get_loss(self):
loss = 0.0
if self.weights_regularizer:
loss += self.weights_regularizer(self.weights.value)
if self.bias_regularizer:
loss += self.bias_regularizer(self.biases.value)
return loss
class Sigmoid(Layer):
def forward(self, inputs):
self._cached_inputs = inputs
return 1.0 / (1.0 + np.exp(-inputs))
def backward(self, grad_out):
sig = 1.0 / (1.0 + np.exp(-self._cached_inputs))
return grad_out * sig * (1.0 - sig)
class ReLU(Layer):
def forward(self, inputs):
self._cached_inputs = inputs
return np.maximum(0.0, inputs)
def backward(self, grad_out):
grad_in = np.where(self._cached_inputs < 0.0, 0.0, 1.0)
return grad_out * grad_in
class Softmax(Layer):
def forward(self, inputs):
maxs = np.max(inputs, axis=1).reshape((-1, 1))
exps = np.exp(inputs - maxs)
self._cached_outputs = exps / np.sum(exps, axis=1).reshape((-1, 1))
return self._cached_outputs
def backward(self, grad_out):
a = np.empty((grad_out.shape[0], 1))
for i in range(a.shape[0]):
a[i, :] = np.dot(grad_out[i, :], self._cached_outputs[i, :])
return self._cached_outputs * (grad_out - a)
| [
"numpy.sum",
"numpy.maximum",
"numpy.empty",
"numpy.zeros",
"numpy.max",
"numpy.where",
"numpy.exp",
"numpy.dot"
] | [((220, 249), 'numpy.zeros', 'np.zeros', (['initial_value.shape'], {}), '(initial_value.shape)\n', (228, 249), True, 'import numpy as np\n'), ((1034, 1072), 'numpy.dot', 'np.dot', (['grad_out', 'self.weights.value.T'], {}), '(grad_out, self.weights.value.T)\n', (1040, 1072), True, 'import numpy as np\n'), ((1101, 1140), 'numpy.dot', 'np.dot', (['self._cached_inputs.T', 'grad_out'], {}), '(self._cached_inputs.T, grad_out)\n', (1107, 1140), True, 'import numpy as np\n'), ((1292, 1316), 'numpy.sum', 'np.sum', (['grad_out'], {'axis': '(0)'}), '(grad_out, axis=0)\n', (1298, 1316), True, 'import numpy as np\n'), ((2168, 2191), 'numpy.maximum', 'np.maximum', (['(0.0)', 'inputs'], {}), '(0.0, inputs)\n', (2178, 2191), True, 'import numpy as np\n'), ((2245, 2290), 'numpy.where', 'np.where', (['(self._cached_inputs < 0.0)', '(0.0)', '(1.0)'], {}), '(self._cached_inputs < 0.0, 0.0, 1.0)\n', (2253, 2290), True, 'import numpy as np\n'), ((2450, 2471), 'numpy.exp', 'np.exp', (['(inputs - maxs)'], {}), '(inputs - maxs)\n', (2456, 2471), True, 'import numpy as np\n'), ((2631, 2663), 'numpy.empty', 'np.empty', (['(grad_out.shape[0], 1)'], {}), '((grad_out.shape[0], 1))\n', (2639, 2663), True, 'import numpy as np\n'), ((926, 960), 'numpy.dot', 'np.dot', (['inputs', 'self.weights.value'], {}), '(inputs, self.weights.value)\n', (932, 960), True, 'import numpy as np\n'), ((2722, 2772), 'numpy.dot', 'np.dot', (['grad_out[i, :]', 'self._cached_outputs[i, :]'], {}), '(grad_out[i, :], self._cached_outputs[i, :])\n', (2728, 2772), True, 'import numpy as np\n'), ((1911, 1926), 'numpy.exp', 'np.exp', (['(-inputs)'], {}), '(-inputs)\n', (1917, 1926), True, 'import numpy as np\n'), ((1990, 2018), 'numpy.exp', 'np.exp', (['(-self._cached_inputs)'], {}), '(-self._cached_inputs)\n', (1996, 2018), True, 'import numpy as np\n'), ((2395, 2417), 'numpy.max', 'np.max', (['inputs'], {'axis': '(1)'}), '(inputs, axis=1)\n', (2401, 2417), True, 'import numpy as np\n'), ((2510, 2530), 'numpy.sum', 'np.sum', (['exps'], {'axis': '(1)'}), '(exps, axis=1)\n', (2516, 2530), True, 'import numpy as np\n')] |
import os
from os import path as osp
import numpy as np
from tqdm import tqdm
import pickle
import cv2
import torch
from experiments.service.benchmark_base import Benchmark
from experiments.service.ldd_factory import LocalDetectorDescriptor
from experiments.service.matchers_factory import MatchersFactory
from experiments.service.utils import compute_homography_error
def warp_keypoints(keypoints, H):
"""Warp keypoints given a homography
Parameters
----------
keypoints: numpy.ndarray (N,2)
Keypoint vector.
H: numpy.ndarray (3,3)
Homography.
Returns
-------
warped_keypoints: numpy.ndarray (N,2)
Warped keypoints vector.
"""
num_points = keypoints.shape[0]
homogeneous_points = np.concatenate(
[keypoints, np.ones((num_points, 1))], axis=1
)
warped_points = np.dot(homogeneous_points, np.transpose(H))
return warped_points[:, :2] / warped_points[:, 2:]
def scale_homography(homography, original_scale, new_scale, pre):
scales = np.divide(new_scale, original_scale)
if pre:
s = np.diag(np.append(scales, 1.0))
homography = np.matmul(s, homography)
else:
sinv = np.diag(np.append(1.0 / scales, 1.0))
homography = np.matmul(homography, sinv)
return homography
class HPSequenceBenchmark(Benchmark):
def __init__(self, cfg):
super().__init__(cfg)
self.hpsequences_path = self.cfg.task.task_params.paths.img_path
self.preds = self.cfg.task.task_params.output.precomputed_feats_dir
# Scenes with a very large image resolution (need to be removed)
self.outliers = [
"i_contruction",
"i_crownnight",
"i_dc",
"i_pencils",
"i_whitebuilding",
"v_artisans",
"v_astronautis",
"v_talent",
]
# Matcher
self.matcher = MatchersFactory(cfg.matcher).get_matcher()
# Local detector-descriptor
self.ldd_model = LocalDetectorDescriptor(self.cfg)
self.kpts_matches = {}
self.stats_feat_matches = {
"i": {"feat": [], "matches": []},
"v": {"feat": [], "matches": []},
}
self.n_illum = 0
self.n_viewpnt = 0
self.h_mtx_ransac_params = {
"thr_px": 3.0,
"max_iters": 5000,
"confidence": 0.9995,
}
def _get_kpts_and_matches(self):
print("Let us find matches...")
for seq_name in sorted(os.listdir(self.preds)):
if seq_name in set(self.outliers):
continue
if seq_name[0] == "i":
self.n_illum += 1
else:
self.n_viewpnt += 1
with open(osp.join(self.preds, seq_name, "1.pkl"), "rb") as f:
data = pickle.load(f)
keypoints_a, descriptor_a = data["kpts"], data["descs"]
for img_id in range(2, 7):
with open(
osp.join(self.preds, seq_name, f"{img_id}.pkl"), "rb"
) as f:
data = pickle.load(f)
keypoints_b, descriptor_b = data["kpts"], data["descs"]
self.stats_feat_matches[seq_name[0]]["feat"].append(
keypoints_a.shape[0]
)
self.stats_feat_matches[seq_name[0]]["feat"].append(
keypoints_b.shape[0]
)
matches = self.matcher.match(
torch.from_numpy(descriptor_a).to(self.device),
torch.from_numpy(descriptor_b).to(self.device),
)
self.stats_feat_matches[seq_name[0]]["matches"].append(
matches.shape[0]
)
pos_a = keypoints_a[matches[:, 0], :2]
pos_b = keypoints_b[matches[:, 1], :2]
self.kpts_matches[seq_name + "1_" + str(img_id)] = {
"pos_a": pos_a,
"pos_b": pos_b,
}
print("Let us find matches... Done!")
return True
def h_mtx_estimation_benchmark(self, h_thresh):
h_mtx_res = {"i": [], "v": []}
for seq_name in sorted(os.listdir(self.hpsequences_path)):
if seq_name in set(self.outliers):
continue
for img_id in range(2, 7):
img = cv2.imread(
os.path.join(
self.hpsequences_path, seq_name, str(img_id) + ".ppm"
),
-1,
)
h, w, _ = img.shape
h_gt = np.loadtxt(
os.path.join(
self.hpsequences_path, seq_name, "H_1_" + str(img_id)
)
)
pos_a = self.kpts_matches[seq_name + "1_" + str(img_id)][
"pos_a"
]
pos_b = self.kpts_matches[seq_name + "1_" + str(img_id)][
"pos_b"
]
h_est, _ = cv2.findHomography(
pos_a,
pos_b,
cv2.RANSAC,
self.h_mtx_ransac_params["thr_px"],
maxIters=self.h_mtx_ransac_params["max_iters"],
confidence=self.h_mtx_ransac_params["confidence"],
)
if h_est is None:
print("No homography found! Sequence name: ", seq_name)
sys.exit()
error_h = compute_homography_error(h_est, h_gt, h, w)
correct = (
(error_h < h_thresh) if error_h is not None else False
)
h_mtx_res[seq_name[0]].append(correct)
return h_mtx_res
def _keep_shared_points(self, keypoints, descriptors, H, shape):
"""
Compute a list of keypoints from the map, filter the list of points by keeping
only the points that once mapped by H are still inside the shape of the map
and keep at most 'keep_k_points' keypoints in the image.
Parameters
----------
keypoints: numpy.ndarray (N,3)
Keypoint vector, consisting of (x,y,probability).
descriptors: numpy.ndarray (N,256)
Keypoint descriptors.
H: numpy.ndarray (3,3)
Homography.
shape: tuple
Image shape.
keep_k_points: int
Number of keypoints to select, based on probability.
Returns
-------
selected_points: numpy.ndarray (k,2)
k most probable keypoints.
selected_descriptors: numpy.ndarray (k,256)
Descriptors corresponding to the k most probable keypoints.
"""
def _keep_true_keypoints(points, descriptors, H, shape):
"""Keep only the points whose warped coordinates by H are still inside shape."""
warped_points = warp_keypoints(points[:, :2], H)
mask = (
(warped_points[:, 0] >= 0)
& (warped_points[:, 0] < shape[0])
& (warped_points[:, 1] >= 0)
& (warped_points[:, 1] < shape[1])
)
return points[mask, :], descriptors[mask, :]
selected_keypoints, selected_descriptors = _keep_true_keypoints(
keypoints, descriptors, H, shape
)
"""
selected_keypoints, selected_descriptors = select_k_best(selected_keypoints, selected_descriptors,
keep_k_points)
"""
return selected_keypoints, selected_descriptors
def h_mtx_estimation_benchmark_upd(self, fnames, resize_480x640=True):
res = {
"i": {"t1": [], "t3": [], "t5": []},
"v": {"t1": [], "t3": [], "t5": []},
}
# Re-create local detector-descriptor
self.cfg.task.task_params.detector.max_keypoints_480x640 = 1000
self.ldd_model = LocalDetectorDescriptor(self.cfg)
print(
f"Let us extract keypoints and local descriptors from images with 640x480"
)
self.ldd_model.evaluate(
fnames, bbxs=None, resize_480x640=resize_480x640
)
fname_prefix = f"wh_480x640.pkl"
output_shape_wh = (640, 480)
seq_names = os.listdir(self.hpsequences_path)
for _, seq_name in enumerate(tqdm(seq_names, total=len(seq_names))):
if seq_name in set(self.outliers):
continue
with open(
osp.join(self.preds, seq_name, f"1_{fname_prefix}"), "rb"
) as f:
data = pickle.load(f)
keypoints_a, descriptor_a = data["kpts"], data["descs"]
img1 = cv2.imread(
os.path.join(self.hpsequences_path, seq_name, "1.ppm"), -1
)
for img_id in range(2, 7):
with open(
osp.join(self.preds, seq_name, f"{img_id}_{fname_prefix}"),
"rb",
) as f:
data = pickle.load(f)
keypoints_b, descriptor_b = data["kpts"], data["descs"]
img2 = cv2.imread(
os.path.join(
self.hpsequences_path, seq_name, str(img_id) + ".ppm"
),
-1,
)
h_gt = np.loadtxt(
os.path.join(
self.hpsequences_path, seq_name, "H_1_" + str(img_id)
)
)
h_gt = scale_homography(
h_gt,
img1.shape[:2][::-1],
new_scale=output_shape_wh,
pre=False,
)
h_gt = scale_homography(
h_gt,
img2.shape[:2][::-1],
new_scale=output_shape_wh,
pre=True,
)
# Keeps only the points shared between the two views
kpts_a, descs_a = self._keep_shared_points(
keypoints_a, descriptor_a, h_gt, output_shape_wh
)
kpts_b, descs_b = self._keep_shared_points(
keypoints_b,
descriptor_b,
np.linalg.inv(h_gt),
output_shape_wh,
)
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
matches = bf.match(descs_a, descs_b)
matches_idx = np.array([m.queryIdx for m in matches])
m_keypoints = kpts_a[matches_idx, :]
matches_idx = np.array([m.trainIdx for m in matches])
m_warped_keypoints = kpts_b[matches_idx, :]
if m_keypoints.shape[0] < 4 or m_warped_keypoints.shape[0] < 4:
res[seq_name[0]]["t1"].append(0)
res[seq_name[0]]["t3"].append(0)
res[seq_name[0]]["t5"].append(0)
continue
# Estimate the homography between the matches using RANSAC
H, mask = cv2.findHomography(
m_keypoints,
m_warped_keypoints,
cv2.RANSAC,
3,
maxIters=5000,
)
if H is None:
res[seq_name[0]]["t1"].append(0)
res[seq_name[0]]["t3"].append(0)
res[seq_name[0]]["t5"].append(0)
continue
# Compute correctness
corners = np.array(
[
[0, 0, 1],
[0, output_shape_wh[1] - 1, 1],
[output_shape_wh[0] - 1, 0, 1],
[output_shape_wh[0] - 1, output_shape_wh[1] - 1, 1],
]
)
real_warped_corners = np.dot(corners, np.transpose(h_gt))
real_warped_corners = (
real_warped_corners[:, :2] / real_warped_corners[:, 2:]
)
warped_corners = np.dot(corners, np.transpose(H))
warped_corners = warped_corners[:, :2] / warped_corners[:, 2:]
mean_dist = np.mean(
np.linalg.norm(
real_warped_corners - warped_corners, axis=1
)
)
c1 = float(mean_dist <= 1)
c3 = float(mean_dist <= 3)
c5 = float(mean_dist <= 5)
res[seq_name[0]]["t1"].append(c1)
res[seq_name[0]]["t3"].append(c3)
res[seq_name[0]]["t5"].append(c5)
return res
def matching_score_benchmark(self, fnames, resize_480x640=True):
res = {"i": [], "v": []}
# Re-create local detector-descriptor
self.cfg.task.task_params.detector.max_keypoints_480x640 = 1000
self.ldd_model = LocalDetectorDescriptor(self.cfg)
output_shape_wh = (640, 480)
print(
f"Let us extract keypoints and local descriptors from images with {output_shape_wh}"
)
self.ldd_model.evaluate(
fnames, bbxs=None, resize_480x640=resize_480x640
)
fname_prefix = f"wh_480x640.pkl"
seq_names = os.listdir(self.hpsequences_path)
for _, seq_name in enumerate(tqdm(seq_names, total=len(seq_names))):
if seq_name in set(self.outliers):
continue
with open(
osp.join(self.preds, seq_name, f"1_{fname_prefix}"), "rb"
) as f:
data = pickle.load(f)
keypoints_a, descriptor_a = data["kpts"], data["descs"]
img1 = cv2.imread(
os.path.join(self.hpsequences_path, seq_name, "1.ppm"), -1
)
for img_id in range(2, 7):
with open(
osp.join(self.preds, seq_name, f"{img_id}_{fname_prefix}"),
"rb",
) as f:
data = pickle.load(f)
keypoints_b, descriptor_b = data["kpts"], data["descs"]
img2 = cv2.imread(
os.path.join(
self.hpsequences_path, seq_name, str(img_id) + ".ppm"
),
-1,
)
h_gt = np.loadtxt(
os.path.join(
self.hpsequences_path, seq_name, "H_1_" + str(img_id)
)
)
h_gt = scale_homography(
h_gt,
img1.shape[:2][::-1],
new_scale=output_shape_wh,
pre=False,
)
h_gt = scale_homography(
h_gt,
img2.shape[:2][::-1],
new_scale=output_shape_wh,
pre=True,
)
# This part needs to be done with crossCheck=False.
# All the matched pairs need to be evaluated without any selection.
bf = cv2.BFMatcher(cv2.NORM_L2)
matches = bf.match(descriptor_a, descriptor_b)
matches_idx = np.array([m.queryIdx for m in matches])
m_keypoints = keypoints_a[matches_idx, :]
matches_idx = np.array([m.trainIdx for m in matches])
m_warped_keypoints = keypoints_b[matches_idx, :]
true_warped_keypoints = warp_keypoints(
m_warped_keypoints, np.linalg.inv(h_gt)
)
vis_warped = np.all(
(true_warped_keypoints >= 0)
& (
true_warped_keypoints
<= (np.array(output_shape_wh) - 1)
),
axis=-1,
)
norm1 = np.linalg.norm(
true_warped_keypoints - m_keypoints, axis=-1
)
correct1 = norm1 < 3
count1 = np.sum(correct1 * vis_warped)
score1 = count1 / np.maximum(np.sum(vis_warped), 1.0)
matches = bf.match(descriptor_b, descriptor_a)
matches_idx = np.array([m.queryIdx for m in matches])
m_warped_keypoints = keypoints_b[matches_idx, :]
matches_idx = np.array([m.trainIdx for m in matches])
m_keypoints = keypoints_a[matches_idx, :]
true_keypoints = warp_keypoints(m_keypoints, h_gt)
vis = np.all(
(true_keypoints >= 0)
& (true_keypoints <= (np.array(output_shape_wh) - 1)),
axis=-1,
)
norm2 = np.linalg.norm(
true_keypoints - m_warped_keypoints, axis=-1
)
correct2 = norm2 < 3
count2 = np.sum(correct2 * vis)
score2 = count2 / np.maximum(np.sum(vis), 1.0)
ms = (score1 + score2) / 2
res[seq_name[0]].append(ms)
return res
def pck_benchmark(self, pck_thresholds):
pck_res = {
"i": {thr: 0 for thr in pck_thresholds},
"v": {thr: 0 for thr in pck_thresholds},
}
for seq_name in sorted(os.listdir(self.hpsequences_path)):
if seq_name in self.outliers:
continue
for img_id in range(2, 7):
pos_a = self.kpts_matches[seq_name + "1_" + str(img_id)][
"pos_a"
]
pos_b = self.kpts_matches[seq_name + "1_" + str(img_id)][
"pos_b"
]
h_gt = np.loadtxt(
osp.join(
self.hpsequences_path, seq_name, "H_1_" + str(img_id)
)
)
pos_a_h = np.concatenate(
[pos_a, np.ones([pos_a.shape[0], 1])], axis=1
)
pos_b_proj_h = np.dot(h_gt, pos_a_h.T).T
pos_b_proj = pos_b_proj_h[:, :2] / pos_b_proj_h[:, -1, None]
dist = np.sqrt(
np.sum((pos_b - pos_b_proj[:, :2]) ** 2, axis=1)
)
if dist.shape[0] == 0:
dist = np.array([float("inf")])
for thr in pck_thresholds:
pck_res["i" if seq_name[0] == "i" else "v"][
thr
] += np.mean(dist <= thr)
return pck_res
def get_dataset_stats(self):
return {
"n_illum_scenes": self.n_illum,
"n_viewpnt_scenes": self.n_viewpnt,
"illum_feat": np.array(self.stats_feat_matches["i"]["feat"]),
"illum_matches": np.array(self.stats_feat_matches["i"]["matches"]),
"viewpnt_feat": np.array(self.stats_feat_matches["v"]["feat"]),
"viewpnt_matches": np.array(
self.stats_feat_matches["v"]["matches"]
),
}
def evaluate(self):
# prepare a dataset (create a list of images: fnames -> seqName_imgName.ppm)
fnames = []
folder_name = self.cfg.task.task_params.paths.img_path
for root, dirs, files in os.walk(folder_name):
if root.split("/")[-1] in self.outliers:
continue
if files:
fnames_per_scene = [
osp.join(root, fname)
for fname in files
if fname.endswith("ppm")
]
fnames.extend(fnames_per_scene)
# Let us extract image features
self.ldd_model.evaluate(fnames)
# compute matches
self._get_kpts_and_matches()
h_bench_out = {
int: {"acc_v": float, "acc_i": float, "acc_total": float}
}
pck_bench_out = {int: {"v": float, "i": float, "avg": float}}
n_i = self.n_illum * 5
n_v = self.n_viewpnt * 5
feat_i, feat_v = (
self.get_dataset_stats()["illum_feat"],
self.get_dataset_stats()["viewpnt_feat"],
)
matches_i, matches_v = (
self.get_dataset_stats()["illum_matches"],
self.get_dataset_stats()["viewpnt_matches"],
)
print(
"# Features [i]: {:f} - [{:d}, {:d}]".format(
np.mean(feat_i), np.min(feat_i), np.max(feat_i)
)
)
print(
"# Features [v]: {:f} - [{:d}, {:d}]".format(
np.mean(feat_v), np.min(feat_v), np.max(feat_v)
)
)
print(
"# Matches: Overall {:f}, Illumination {:f}, Viewpoint {:f}".format(
(np.sum(matches_i) + np.sum(matches_v)) / (n_i + n_v),
np.sum(matches_i) / n_i,
np.sum(matches_v) / n_v,
)
)
# PCK metrics
pck_thresholds = self.cfg.task.task_params.pck_thresholds
pck_res = self.pck_benchmark(pck_thresholds)
for pck_thr in self.cfg.task.task_params.pck_thresholds:
print(
"MMA@" + str(pck_thr) + " [v]: ", pck_res["v"][pck_thr] / n_v
)
print(
"MMA@" + str(pck_thr) + " [i]: ", pck_res["i"][pck_thr] / n_i
)
avg = 0.5 * (
pck_res["v"][pck_thr] / n_v + pck_res["i"][pck_thr] / n_i
)
print("MMA@" + str(pck_thr) + " [avg]: ", avg)
print(11 * "*")
pck_bench_out[pck_thr] = {
"v": pck_res["v"][pck_thr] / n_v,
"i": pck_res["i"][pck_thr] / n_i,
"avg": avg,
}
print(22 * "-")
# Homography metrics
h_mtx_thresholds = self.cfg.task.task_params.h_mtx_thresholds
for h_thr in h_mtx_thresholds:
error_h = self.h_mtx_estimation_benchmark(h_thr)
print("h_threshold: ", h_thr)
print("Accuracy (viewpoint): ", np.mean(error_h["v"]))
print("Accuracy (illumination): ", np.mean(error_h["i"]))
print("Accuracy total: ", np.mean(error_h["i"] + error_h["v"]))
print(11 * "*")
h_bench_out[h_thr] = {
"acc_v": np.mean(error_h["v"]),
"acc_i": np.mean(error_h["i"]),
"acc_total": np.mean(error_h["i"] + error_h["v"]),
}
h_mtx_res = self.h_mtx_estimation_benchmark_upd(fnames)
for th in ["t1", "t3", "t5"]:
print(
f'th: {np.asarray(h_mtx_res["v"][th]).mean()} / '
f'{np.asarray(h_mtx_res["i"][th]).mean()} / '
f'{np.asarray(h_mtx_res["i"][th] + h_mtx_res["v"][th]).mean()}'
)
print(22 * "-")
matching_res = self.matching_score_benchmark(fnames)
print(f"Matching score: ")
print(
f'{np.asarray(matching_res["v"]).mean()} / '
f'{np.asarray(matching_res["i"]).mean()} / '
f'{np.asarray(matching_res["i"] + matching_res["v"]).mean()}'
)
# write results to the file
with open(self.cfg.task.task_params.output.res_txt_fname, "w") as f:
f.write(f"PCK benchmark:\n")
for pck_thr in pck_thresholds:
f.write(
f"MMA@{pck_thr:d} v/i/avg:"
f" {pck_bench_out[pck_thr]['v']:05.3f} / {pck_bench_out[pck_thr]['i']:05.3f} / "
f"{pck_bench_out[pck_thr]['avg']:05.3f}\n"
)
f.write(f"Homography benchmark:\n")
for h_thr in h_mtx_thresholds:
f.write(
f"th: {h_thr:d} v/i/avg:"
f" {h_bench_out[h_thr]['acc_v']:05.3f} / {h_bench_out[h_thr]['acc_i']:05.3f} / "
f"{h_bench_out[h_thr]['acc_total']:05.3f}\n"
)
| [
"numpy.sum",
"experiments.service.ldd_factory.LocalDetectorDescriptor",
"os.walk",
"numpy.ones",
"pickle.load",
"numpy.linalg.norm",
"numpy.mean",
"os.path.join",
"experiments.service.utils.compute_homography_error",
"cv2.BFMatcher",
"numpy.transpose",
"numpy.append",
"numpy.max",
"experim... | [((1028, 1064), 'numpy.divide', 'np.divide', (['new_scale', 'original_scale'], {}), '(new_scale, original_scale)\n', (1037, 1064), True, 'import numpy as np\n'), ((875, 890), 'numpy.transpose', 'np.transpose', (['H'], {}), '(H)\n', (887, 890), True, 'import numpy as np\n'), ((1142, 1166), 'numpy.matmul', 'np.matmul', (['s', 'homography'], {}), '(s, homography)\n', (1151, 1166), True, 'import numpy as np\n'), ((1251, 1278), 'numpy.matmul', 'np.matmul', (['homography', 'sinv'], {}), '(homography, sinv)\n', (1260, 1278), True, 'import numpy as np\n'), ((2018, 2051), 'experiments.service.ldd_factory.LocalDetectorDescriptor', 'LocalDetectorDescriptor', (['self.cfg'], {}), '(self.cfg)\n', (2041, 2051), False, 'from experiments.service.ldd_factory import LocalDetectorDescriptor\n'), ((8061, 8094), 'experiments.service.ldd_factory.LocalDetectorDescriptor', 'LocalDetectorDescriptor', (['self.cfg'], {}), '(self.cfg)\n', (8084, 8094), False, 'from experiments.service.ldd_factory import LocalDetectorDescriptor\n'), ((8411, 8444), 'os.listdir', 'os.listdir', (['self.hpsequences_path'], {}), '(self.hpsequences_path)\n', (8421, 8444), False, 'import os\n'), ((13109, 13142), 'experiments.service.ldd_factory.LocalDetectorDescriptor', 'LocalDetectorDescriptor', (['self.cfg'], {}), '(self.cfg)\n', (13132, 13142), False, 'from experiments.service.ldd_factory import LocalDetectorDescriptor\n'), ((13469, 13502), 'os.listdir', 'os.listdir', (['self.hpsequences_path'], {}), '(self.hpsequences_path)\n', (13479, 13502), False, 'import os\n'), ((19503, 19523), 'os.walk', 'os.walk', (['folder_name'], {}), '(folder_name)\n', (19510, 19523), False, 'import os\n'), ((787, 811), 'numpy.ones', 'np.ones', (['(num_points, 1)'], {}), '((num_points, 1))\n', (794, 811), True, 'import numpy as np\n'), ((1097, 1119), 'numpy.append', 'np.append', (['scales', '(1.0)'], {}), '(scales, 1.0)\n', (1106, 1119), True, 'import numpy as np\n'), ((1200, 1228), 'numpy.append', 'np.append', (['(1.0 / scales)', '(1.0)'], {}), '(1.0 / scales, 1.0)\n', (1209, 1228), True, 'import numpy as np\n'), ((2525, 2547), 'os.listdir', 'os.listdir', (['self.preds'], {}), '(self.preds)\n', (2535, 2547), False, 'import os\n'), ((4255, 4288), 'os.listdir', 'os.listdir', (['self.hpsequences_path'], {}), '(self.hpsequences_path)\n', (4265, 4288), False, 'import os\n'), ((17536, 17569), 'os.listdir', 'os.listdir', (['self.hpsequences_path'], {}), '(self.hpsequences_path)\n', (17546, 17569), False, 'import os\n'), ((18950, 18996), 'numpy.array', 'np.array', (["self.stats_feat_matches['i']['feat']"], {}), "(self.stats_feat_matches['i']['feat'])\n", (18958, 18996), True, 'import numpy as np\n'), ((19027, 19076), 'numpy.array', 'np.array', (["self.stats_feat_matches['i']['matches']"], {}), "(self.stats_feat_matches['i']['matches'])\n", (19035, 19076), True, 'import numpy as np\n'), ((19106, 19152), 'numpy.array', 'np.array', (["self.stats_feat_matches['v']['feat']"], {}), "(self.stats_feat_matches['v']['feat'])\n", (19114, 19152), True, 'import numpy as np\n'), ((19185, 19234), 'numpy.array', 'np.array', (["self.stats_feat_matches['v']['matches']"], {}), "(self.stats_feat_matches['v']['matches'])\n", (19193, 19234), True, 'import numpy as np\n'), ((1913, 1941), 'experiments.service.matchers_factory.MatchersFactory', 'MatchersFactory', (['cfg.matcher'], {}), '(cfg.matcher)\n', (1928, 1941), False, 'from experiments.service.matchers_factory import MatchersFactory\n'), ((2845, 2859), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2856, 2859), False, 'import pickle\n'), ((5106, 5295), 'cv2.findHomography', 'cv2.findHomography', (['pos_a', 'pos_b', 'cv2.RANSAC', "self.h_mtx_ransac_params['thr_px']"], {'maxIters': "self.h_mtx_ransac_params['max_iters']", 'confidence': "self.h_mtx_ransac_params['confidence']"}), "(pos_a, pos_b, cv2.RANSAC, self.h_mtx_ransac_params[\n 'thr_px'], maxIters=self.h_mtx_ransac_params['max_iters'], confidence=\n self.h_mtx_ransac_params['confidence'])\n", (5124, 5295), False, 'import cv2\n'), ((5593, 5636), 'experiments.service.utils.compute_homography_error', 'compute_homography_error', (['h_est', 'h_gt', 'h', 'w'], {}), '(h_est, h_gt, h, w)\n', (5617, 5636), False, 'from experiments.service.utils import compute_homography_error\n'), ((8735, 8749), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8746, 8749), False, 'import pickle\n'), ((8866, 8920), 'os.path.join', 'os.path.join', (['self.hpsequences_path', 'seq_name', '"""1.ppm"""'], {}), "(self.hpsequences_path, seq_name, '1.ppm')\n", (8878, 8920), False, 'import os\n'), ((10524, 10567), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_L2'], {'crossCheck': '(True)'}), '(cv2.NORM_L2, crossCheck=True)\n', (10537, 10567), False, 'import cv2\n'), ((10651, 10690), 'numpy.array', 'np.array', (['[m.queryIdx for m in matches]'], {}), '([m.queryIdx for m in matches])\n', (10659, 10690), True, 'import numpy as np\n'), ((10774, 10813), 'numpy.array', 'np.array', (['[m.trainIdx for m in matches]'], {}), '([m.trainIdx for m in matches])\n', (10782, 10813), True, 'import numpy as np\n'), ((11245, 11331), 'cv2.findHomography', 'cv2.findHomography', (['m_keypoints', 'm_warped_keypoints', 'cv2.RANSAC', '(3)'], {'maxIters': '(5000)'}), '(m_keypoints, m_warped_keypoints, cv2.RANSAC, 3, maxIters\n =5000)\n', (11263, 11331), False, 'import cv2\n'), ((11730, 11873), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, output_shape_wh[1] - 1, 1], [output_shape_wh[0] - 1, 0, 1],\n [output_shape_wh[0] - 1, output_shape_wh[1] - 1, 1]]'], {}), '([[0, 0, 1], [0, output_shape_wh[1] - 1, 1], [output_shape_wh[0] - \n 1, 0, 1], [output_shape_wh[0] - 1, output_shape_wh[1] - 1, 1]])\n', (11738, 11873), True, 'import numpy as np\n'), ((13793, 13807), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (13804, 13807), False, 'import pickle\n'), ((13924, 13978), 'os.path.join', 'os.path.join', (['self.hpsequences_path', 'seq_name', '"""1.ppm"""'], {}), "(self.hpsequences_path, seq_name, '1.ppm')\n", (13936, 13978), False, 'import os\n'), ((15293, 15319), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_L2'], {}), '(cv2.NORM_L2)\n', (15306, 15319), False, 'import cv2\n'), ((15413, 15452), 'numpy.array', 'np.array', (['[m.queryIdx for m in matches]'], {}), '([m.queryIdx for m in matches])\n', (15421, 15452), True, 'import numpy as np\n'), ((15541, 15580), 'numpy.array', 'np.array', (['[m.trainIdx for m in matches]'], {}), '([m.trainIdx for m in matches])\n', (15549, 15580), True, 'import numpy as np\n'), ((16090, 16150), 'numpy.linalg.norm', 'np.linalg.norm', (['(true_warped_keypoints - m_keypoints)'], {'axis': '(-1)'}), '(true_warped_keypoints - m_keypoints, axis=-1)\n', (16104, 16150), True, 'import numpy as np\n'), ((16252, 16281), 'numpy.sum', 'np.sum', (['(correct1 * vis_warped)'], {}), '(correct1 * vis_warped)\n', (16258, 16281), True, 'import numpy as np\n'), ((16446, 16485), 'numpy.array', 'np.array', (['[m.queryIdx for m in matches]'], {}), '([m.queryIdx for m in matches])\n', (16454, 16485), True, 'import numpy as np\n'), ((16581, 16620), 'numpy.array', 'np.array', (['[m.trainIdx for m in matches]'], {}), '([m.trainIdx for m in matches])\n', (16589, 16620), True, 'import numpy as np\n'), ((16965, 17025), 'numpy.linalg.norm', 'np.linalg.norm', (['(true_keypoints - m_warped_keypoints)'], {'axis': '(-1)'}), '(true_keypoints - m_warped_keypoints, axis=-1)\n', (16979, 17025), True, 'import numpy as np\n'), ((17127, 17149), 'numpy.sum', 'np.sum', (['(correct2 * vis)'], {}), '(correct2 * vis)\n', (17133, 17149), True, 'import numpy as np\n'), ((20627, 20642), 'numpy.mean', 'np.mean', (['feat_i'], {}), '(feat_i)\n', (20634, 20642), True, 'import numpy as np\n'), ((20644, 20658), 'numpy.min', 'np.min', (['feat_i'], {}), '(feat_i)\n', (20650, 20658), True, 'import numpy as np\n'), ((20660, 20674), 'numpy.max', 'np.max', (['feat_i'], {}), '(feat_i)\n', (20666, 20674), True, 'import numpy as np\n'), ((20788, 20803), 'numpy.mean', 'np.mean', (['feat_v'], {}), '(feat_v)\n', (20795, 20803), True, 'import numpy as np\n'), ((20805, 20819), 'numpy.min', 'np.min', (['feat_v'], {}), '(feat_v)\n', (20811, 20819), True, 'import numpy as np\n'), ((20821, 20835), 'numpy.max', 'np.max', (['feat_v'], {}), '(feat_v)\n', (20827, 20835), True, 'import numpy as np\n'), ((22256, 22277), 'numpy.mean', 'np.mean', (["error_h['v']"], {}), "(error_h['v'])\n", (22263, 22277), True, 'import numpy as np\n'), ((22326, 22347), 'numpy.mean', 'np.mean', (["error_h['i']"], {}), "(error_h['i'])\n", (22333, 22347), True, 'import numpy as np\n'), ((22387, 22423), 'numpy.mean', 'np.mean', (["(error_h['i'] + error_h['v'])"], {}), "(error_h['i'] + error_h['v'])\n", (22394, 22423), True, 'import numpy as np\n'), ((22513, 22534), 'numpy.mean', 'np.mean', (["error_h['v']"], {}), "(error_h['v'])\n", (22520, 22534), True, 'import numpy as np\n'), ((22561, 22582), 'numpy.mean', 'np.mean', (["error_h['i']"], {}), "(error_h['i'])\n", (22568, 22582), True, 'import numpy as np\n'), ((22613, 22649), 'numpy.mean', 'np.mean', (["(error_h['i'] + error_h['v'])"], {}), "(error_h['i'] + error_h['v'])\n", (22620, 22649), True, 'import numpy as np\n'), ((2769, 2808), 'os.path.join', 'osp.join', (['self.preds', 'seq_name', '"""1.pkl"""'], {}), "(self.preds, seq_name, '1.pkl')\n", (2777, 2808), True, 'from os import path as osp\n'), ((3120, 3134), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3131, 3134), False, 'import pickle\n'), ((8634, 8685), 'os.path.join', 'osp.join', (['self.preds', 'seq_name', 'f"""1_{fname_prefix}"""'], {}), "(self.preds, seq_name, f'1_{fname_prefix}')\n", (8642, 8685), True, 'from os import path as osp\n'), ((9163, 9177), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9174, 9177), False, 'import pickle\n'), ((10426, 10445), 'numpy.linalg.inv', 'np.linalg.inv', (['h_gt'], {}), '(h_gt)\n', (10439, 10445), True, 'import numpy as np\n'), ((12080, 12098), 'numpy.transpose', 'np.transpose', (['h_gt'], {}), '(h_gt)\n', (12092, 12098), True, 'import numpy as np\n'), ((12283, 12298), 'numpy.transpose', 'np.transpose', (['H'], {}), '(H)\n', (12295, 12298), True, 'import numpy as np\n'), ((12437, 12497), 'numpy.linalg.norm', 'np.linalg.norm', (['(real_warped_corners - warped_corners)'], {'axis': '(1)'}), '(real_warped_corners - warped_corners, axis=1)\n', (12451, 12497), True, 'import numpy as np\n'), ((13692, 13743), 'os.path.join', 'osp.join', (['self.preds', 'seq_name', 'f"""1_{fname_prefix}"""'], {}), "(self.preds, seq_name, f'1_{fname_prefix}')\n", (13700, 13743), True, 'from os import path as osp\n'), ((14221, 14235), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14232, 14235), False, 'import pickle\n'), ((15743, 15762), 'numpy.linalg.inv', 'np.linalg.inv', (['h_gt'], {}), '(h_gt)\n', (15756, 15762), True, 'import numpy as np\n'), ((18261, 18284), 'numpy.dot', 'np.dot', (['h_gt', 'pos_a_h.T'], {}), '(h_gt, pos_a_h.T)\n', (18267, 18284), True, 'import numpy as np\n'), ((18417, 18465), 'numpy.sum', 'np.sum', (['((pos_b - pos_b_proj[:, :2]) ** 2)'], {'axis': '(1)'}), '((pos_b - pos_b_proj[:, :2]) ** 2, axis=1)\n', (18423, 18465), True, 'import numpy as np\n'), ((18737, 18757), 'numpy.mean', 'np.mean', (['(dist <= thr)'], {}), '(dist <= thr)\n', (18744, 18757), True, 'import numpy as np\n'), ((19682, 19703), 'os.path.join', 'osp.join', (['root', 'fname'], {}), '(root, fname)\n', (19690, 19703), True, 'from os import path as osp\n'), ((21043, 21060), 'numpy.sum', 'np.sum', (['matches_i'], {}), '(matches_i)\n', (21049, 21060), True, 'import numpy as np\n'), ((21084, 21101), 'numpy.sum', 'np.sum', (['matches_v'], {}), '(matches_v)\n', (21090, 21101), True, 'import numpy as np\n'), ((3015, 3062), 'os.path.join', 'osp.join', (['self.preds', 'seq_name', 'f"""{img_id}.pkl"""'], {}), "(self.preds, seq_name, f'{img_id}.pkl')\n", (3023, 3062), True, 'from os import path as osp\n'), ((9026, 9084), 'os.path.join', 'osp.join', (['self.preds', 'seq_name', 'f"""{img_id}_{fname_prefix}"""'], {}), "(self.preds, seq_name, f'{img_id}_{fname_prefix}')\n", (9034, 9084), True, 'from os import path as osp\n'), ((14084, 14142), 'os.path.join', 'osp.join', (['self.preds', 'seq_name', 'f"""{img_id}_{fname_prefix}"""'], {}), "(self.preds, seq_name, f'{img_id}_{fname_prefix}')\n", (14092, 14142), True, 'from os import path as osp\n'), ((16327, 16345), 'numpy.sum', 'np.sum', (['vis_warped'], {}), '(vis_warped)\n', (16333, 16345), True, 'import numpy as np\n'), ((17195, 17206), 'numpy.sum', 'np.sum', (['vis'], {}), '(vis)\n', (17201, 17206), True, 'import numpy as np\n'), ((18174, 18202), 'numpy.ones', 'np.ones', (['[pos_a.shape[0], 1]'], {}), '([pos_a.shape[0], 1])\n', (18181, 18202), True, 'import numpy as np\n'), ((20973, 20990), 'numpy.sum', 'np.sum', (['matches_i'], {}), '(matches_i)\n', (20979, 20990), True, 'import numpy as np\n'), ((20993, 21010), 'numpy.sum', 'np.sum', (['matches_v'], {}), '(matches_v)\n', (20999, 21010), True, 'import numpy as np\n'), ((3531, 3561), 'torch.from_numpy', 'torch.from_numpy', (['descriptor_a'], {}), '(descriptor_a)\n', (3547, 3561), False, 'import torch\n'), ((3599, 3629), 'torch.from_numpy', 'torch.from_numpy', (['descriptor_b'], {}), '(descriptor_b)\n', (3615, 3629), False, 'import torch\n'), ((23160, 23189), 'numpy.asarray', 'np.asarray', (["matching_res['v']"], {}), "(matching_res['v'])\n", (23170, 23189), True, 'import numpy as np\n'), ((23217, 23246), 'numpy.asarray', 'np.asarray', (["matching_res['i']"], {}), "(matching_res['i'])\n", (23227, 23246), True, 'import numpy as np\n'), ((23274, 23323), 'numpy.asarray', 'np.asarray', (["(matching_res['i'] + matching_res['v'])"], {}), "(matching_res['i'] + matching_res['v'])\n", (23284, 23323), True, 'import numpy as np\n'), ((15965, 15990), 'numpy.array', 'np.array', (['output_shape_wh'], {}), '(output_shape_wh)\n', (15973, 15990), True, 'import numpy as np\n'), ((16861, 16886), 'numpy.array', 'np.array', (['output_shape_wh'], {}), '(output_shape_wh)\n', (16869, 16886), True, 'import numpy as np\n'), ((22810, 22840), 'numpy.asarray', 'np.asarray', (["h_mtx_res['v'][th]"], {}), "(h_mtx_res['v'][th])\n", (22820, 22840), True, 'import numpy as np\n'), ((22872, 22902), 'numpy.asarray', 'np.asarray', (["h_mtx_res['i'][th]"], {}), "(h_mtx_res['i'][th])\n", (22882, 22902), True, 'import numpy as np\n'), ((22934, 22985), 'numpy.asarray', 'np.asarray', (["(h_mtx_res['i'][th] + h_mtx_res['v'][th])"], {}), "(h_mtx_res['i'][th] + h_mtx_res['v'][th])\n", (22944, 22985), True, 'import numpy as np\n')] |
# Create your tasks here
from __future__ import absolute_import, unicode_literals
import hashlib
import json
from urllib.parse import unquote
import librosa
import numpy as np
from celery.task import task
from django.core.exceptions import ObjectDoesNotExist
from django.db import OperationalError
from django.utils import timezone
from .models import Audio, BPM, State
SUCCESS_CODE = 0
OBJECT_DOES_NOT_EXIST_ERROR_CODE = -9
UNKNOWN_PARAMETER_ERROR_CODE = -1
WRONG_ARRAY_LENGTH = -2
UNKNOWN_ERROR = -3
NUMBER_OF_SEGMENTS = 9804
NUMBER_OF_SEGMENTS_IN_TIME = 1026
NUMBER_OF_SEGMENTS_IN_FREQ = 430
TIME_STEP = 9
FREQ_STEP = 5
BPM_SPLIT_DURATION = 15.0
LOST_TASK_TIMEOUT = 60
HASHING_ALGORITHM = 'md5'
@task(name='core.tasks.process_bpm', autoretry_for=(OperationalError,))
def process_bpm(task_id):
"""
Processes BPM task, setting value, status, processing_start and processing_end of core.models.BPM object
:param task_id: task id
:return: OBJECT_DOES_NOT_EXIST_ERROR_CODE if wrong audio_id specified, SUCCESS_CODE otherwise
"""
try:
bpm = BPM.objects.get(id=task_id)
except ObjectDoesNotExist:
return OBJECT_DOES_NOT_EXIST_ERROR_CODE
if bpm.status == BPM.PROCESSED:
return SUCCESS_CODE
bpm.status = BPM.PROCESSING
bpm.processing_start = timezone.now()
bpm.save()
y, sr = librosa.load(unquote(bpm.audio.file.url), offset=bpm.start_time, duration=bpm.duration)
onset_env = librosa.onset.onset_strength(y, sr=sr)
tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
bpm.value = np.round(tempo, 1)
bpm.status = BPM.PROCESSED
bpm.processing_end = timezone.now()
bpm.save()
return SUCCESS_CODE
def split_duration(total_duration, target_duration, split_last=False):
"""
Splits total_duration into parts
:param total_duration: total duration of file
:param target_duration: target duration of parts
:param split_last: if True, last part may be less than others, otherwise it can be more that others
:return: array of object with fields
- start - start of part
- end - end of part
- duration - duration of part
Note that the last part's duration will probably be not equal target_duration
"""
res = []
start = 0
pre_end = total_duration - target_duration * 2
total_count = total_duration // target_duration
if split_last:
pre_end = pre_end + target_duration
while start < pre_end:
res.append({'start': start, 'end': start + target_duration, 'duration': target_duration})
start = start + target_duration
if split_last:
res.append({'start': start, 'end': start + target_duration, 'duration': target_duration})
res.append({'start': start + target_duration, 'end': total_duration,
'duration': total_duration - target_duration * (total_count - 1)})
else:
res.append({'start': start, 'end': total_duration,
'duration': total_duration - target_duration * (total_count - 2)})
return res
@task(name='core.tasks.schedule_bpm_tasks', autoretry_for=(OperationalError,))
def schedule_audio_tasks(audio_id):
"""
Schedules all tasks related to specific audio:
- refreshing principal components
- BPMs
- refresh audio status
:param audio_id: core.models.Audio.id
:return: OBJECT_DOES_NOT_EXIST_ERROR_CODE if wrong audio_id specified, SUCCESS_CODE otherwise
"""
try:
a = Audio.objects.get(id=audio_id)
except ObjectDoesNotExist:
return OBJECT_DOES_NOT_EXIST_ERROR_CODE
refresh_principal_components.delay(list(Audio.objects.all().values_list('id', flat=True)))
intervals = split_duration(a.duration, BPM_SPLIT_DURATION)
for interval in intervals:
bpm = BPM.objects.create(audio=a, start_time=interval['start'], duration=interval['duration'])
bpm.save()
process_bpm.delay(bpm.id)
refresh_audio_status.delay(a.id)
a.status = Audio.IN_QUEUE
a.save()
return SUCCESS_CODE
@task(name='core.tasks.merge', autoretry_for=(OperationalError,))
def merge(audio_id):
"""
Merges several BPM objects into one with extended duration
:param audio_id: audio id to merge parameter for
:return: OBJECT_DOES_NOT_EXIST_ERROR_CODE if wrong audio_id specified, SUCCESS_CODE otherwise
"""
try:
a = Audio.objects.get(id=audio_id)
except ObjectDoesNotExist:
return OBJECT_DOES_NOT_EXIST_ERROR_CODE
bpms = BPM.objects.filter(audio=a).order_by('start_time')
if bpms.count() < 2:
return WRONG_ARRAY_LENGTH
t_bpm = bpms[0]
res_bpm = []
for i in range(1, len(bpms)):
old_bpm = bpms[i]
if old_bpm.value == t_bpm.value:
t_bpm.duration += old_bpm.duration
else:
res_bpm.append(t_bpm)
t_bpm = old_bpm
res_bpm.append(t_bpm)
bpms.delete()
BPM.objects.bulk_create(res_bpm)
return SUCCESS_CODE
@task(name='core.tasks.refresh_audio_status', autoretry_for=(OperationalError,))
def refresh_audio_status(audio_id):
"""
Refreshes audio status in case of long processing and schedules itself unless audio processing is finished
:param audio_id: audio id to refresh status for
:return: OBJECT_DOES_NOT_EXIST_ERROR_CODE if wrong audio_id specified, SUCCESS_CODE otherwise
"""
try:
audio = Audio.objects.get(id=audio_id)
except ObjectDoesNotExist:
return OBJECT_DOES_NOT_EXIST_ERROR_CODE
audio.tasks_scheduled = BPM.objects.filter(audio=audio).count()
audio.tasks_processed = BPM.objects.filter(audio=audio, status=BPM.PROCESSED).count()
if audio.tasks_scheduled == audio.tasks_processed:
audio.status = Audio.PROCESSED
merge.delay(audio_id)
count_avg_bpm.delay(audio_id)
elif audio.tasks_processed > 0:
audio.status = Audio.PROCESSING
refresh_audio_status.delay(audio_id)
audio.save()
return SUCCESS_CODE
@task(name='core.tasks.count_avg_bpm')
def count_avg_bpm(audio_id):
"""
Calculates weighted BPM of audio after all parts are processed.
:param audio_id: audio id to count avg_bpm for
:return: OBJECT_DOES_NOT_EXIST_ERROR_CODE if wrong audio_id specified, SUCCESS_CODE otherwise
"""
try:
a = Audio.objects.get(id=audio_id)
except ObjectDoesNotExist:
return OBJECT_DOES_NOT_EXIST_ERROR_CODE
values = BPM.objects.filter(audio=a).order_by('start_time').values_list('value', flat=True)
weights = list(BPM.objects.filter(audio=a).order_by('start_time').values_list('duration', flat=True))
a.avg_bpm = sum(x * y for x, y in zip(weights, values)) / sum(weights)
a.save()
return SUCCESS_CODE
@task(name='core.tasks.refresh_principal_components')
def refresh_principal_components(audio_ids):
"""
Computes principal components matrix for given list of audio ids and stores it in core.models.Global object
:param audio_ids: list of audio ids to compute principal components for
:return: SUCCESS_CODE
"""
variance_share = 0.95
files = list(Audio.objects.filter(id__in=audio_ids).values_list('file_url', flat=True))
a = np.zeros(shape=(NUMBER_OF_SEGMENTS, len(files)), dtype=float)
nf = 0
for file in files:
y, sr = librosa.load(file, offset=15, duration=10)
d = librosa.stft(y, n_fft=2048)
x = np.abs(d)
i = 0
j = 0
k = 0
while i < NUMBER_OF_SEGMENTS_IN_TIME:
while j < NUMBER_OF_SEGMENTS_IN_FREQ:
s = 0
for ii in range(i, i + TIME_STEP-1):
for jj in range(j, j + FREQ_STEP):
s += x[ii][jj]
a[k][nf] = s / (TIME_STEP*FREQ_STEP)
k = k + 1
j = j + FREQ_STEP
j = 0
i = i + TIME_STEP
nf = nf + 1
means = np.mean(a, 1)
stds = np.std(a, 1)
for i in range(len(means)):
a[i, :] = a[i, :] - means[i]
a[i, :] = a[i, :] / stds[i]
r = np.cov(a)
d, v = np.linalg.eigh(r)
component_count = 0
d = d[::-1]
cumsum = np.cumsum(d)
dsum = np.sum(d)
for k in range(len(cumsum)):
if cumsum[k] / dsum >= variance_share:
component_count = k + 1
break
principal_vectors = np.zeros((NUMBER_OF_SEGMENTS, component_count))
for k in range(component_count):
principal_vectors[:, k] = v[:, NUMBER_OF_SEGMENTS - 1 - k]
pc = np.dot(a.T, principal_vectors)
h = hashlib.new(HASHING_ALGORITHM)
h.update(audio_ids)
State.objects.create(hash_id=h.hexdigest(),
pc=json.dumps(pc),
means=json.dumps(means),
stds=json.dumps(stds)).save()
for i in range(pc.shape[0]):
Audio.objects.filter(id=audio_ids[i]).update(principal_components=pc[i, :])
get_closest_melodies.delay(audio_ids[i])
return SUCCESS_CODE
@task(name='core.tasks.calc_melody_components')
def calc_melody_components(audio_id, offset, audio_ids=None):
"""
Computes components for new audio based on existing state described by audio_ids
:param audio_id: audio id to calculate principal vector for
:param offset: offset for calculating components in audio
:param audio_ids: ids to use for principal components matrix
:return:
"""
if audio_ids is None:
state = State.objects.latest('calculated_date')
else:
try:
h = hashlib.new(HASHING_ALGORITHM)
h.update(audio_ids)
state = State.objects.get(hash_id=h.hexdigest())
except ObjectDoesNotExist:
return OBJECT_DOES_NOT_EXIST_ERROR_CODE
means = state.means
stds = state.stds
principal_vectors = state.pc
audio = Audio.objects.get(id=audio_id)
y, sr = librosa.load(unquote(audio.file.url), offset=offset, duration=10)
d = librosa.stft(y, n_fft=2048)
x = np.abs(d)
a = np.zeros(shape=(NUMBER_OF_SEGMENTS,), dtype=float)
i = 0
j = 0
k = 0
while i < NUMBER_OF_SEGMENTS_IN_TIME:
while j < NUMBER_OF_SEGMENTS_IN_FREQ:
s = 0
for ii in range(i, i + TIME_STEP-1):
for jj in range(j, j + FREQ_STEP-1):
s += x[ii][jj]
a[k] = s / (TIME_STEP*FREQ_STEP)
k = k + 1
j = j + FREQ_STEP
j = 0
i = i + TIME_STEP
for i in range(len(means)):
a[i] = a[i] - means[i]
a[i] = a[i] / stds[i]
new_components = np.dot(a, principal_vectors)
Audio.objects.filter(id=audio_id).update(principal_components=new_components)
return SUCCESS_CODE
@task(name='core.tasks.get_closest_melodies')
def get_closest_melodies(audio_id, audio_ids=None):
"""
Returns closest audios with distance according to its' principal components
:param audio_id: target audio to compare others to
:param audio_ids: audio ids to compare audio to
:return:
"""
if audio_ids is None:
state = State.objects.latest('calculated_date')
else:
try:
h = hashlib.new(HASHING_ALGORITHM)
h.update(audio_ids)
state = State.objects.get(hash_id=h.hexdigest())
except ObjectDoesNotExist:
return OBJECT_DOES_NOT_EXIST_ERROR_CODE
pc = state.pc
melody_components = Audio.objects.get(id=audio_id).principal_components
component_number = melody_components.shape[1]
num_of_melodies_in_db = pc.shape[0]
dtype = np.dtype([('distance', float), ('number', int)])
num_and_distance = np.array([], dtype=dtype)
for i in range(num_of_melodies_in_db):
distance = 0
for j in range(component_number):
distance = distance + (float(pc[i][j]) - melody_components[j]) * (
float(pc[i][j]) - melody_components[j])
num_and_distance = np.insert(num_and_distance,
num_and_distance.searchsorted(np.asarray((distance, i), dtype=dtype)),
(distance, i))
Audio.objects.filter(id=audio_id).update(closest_list=json.dumps(num_and_distance))
return SUCCESS_CODE
| [
"urllib.parse.unquote",
"numpy.sum",
"numpy.abs",
"json.dumps",
"numpy.mean",
"numpy.round",
"celery.task.task",
"django.utils.timezone.now",
"numpy.std",
"numpy.cumsum",
"numpy.cov",
"librosa.stft",
"numpy.asarray",
"librosa.load",
"numpy.dot",
"librosa.onset.onset_strength",
"numpy... | [((705, 775), 'celery.task.task', 'task', ([], {'name': '"""core.tasks.process_bpm"""', 'autoretry_for': '(OperationalError,)'}), "(name='core.tasks.process_bpm', autoretry_for=(OperationalError,))\n", (709, 775), False, 'from celery.task import task\n'), ((3072, 3149), 'celery.task.task', 'task', ([], {'name': '"""core.tasks.schedule_bpm_tasks"""', 'autoretry_for': '(OperationalError,)'}), "(name='core.tasks.schedule_bpm_tasks', autoretry_for=(OperationalError,))\n", (3076, 3149), False, 'from celery.task import task\n'), ((4064, 4128), 'celery.task.task', 'task', ([], {'name': '"""core.tasks.merge"""', 'autoretry_for': '(OperationalError,)'}), "(name='core.tasks.merge', autoretry_for=(OperationalError,))\n", (4068, 4128), False, 'from celery.task import task\n'), ((5001, 5080), 'celery.task.task', 'task', ([], {'name': '"""core.tasks.refresh_audio_status"""', 'autoretry_for': '(OperationalError,)'}), "(name='core.tasks.refresh_audio_status', autoretry_for=(OperationalError,))\n", (5005, 5080), False, 'from celery.task import task\n'), ((6016, 6053), 'celery.task.task', 'task', ([], {'name': '"""core.tasks.count_avg_bpm"""'}), "(name='core.tasks.count_avg_bpm')\n", (6020, 6053), False, 'from celery.task import task\n'), ((6764, 6816), 'celery.task.task', 'task', ([], {'name': '"""core.tasks.refresh_principal_components"""'}), "(name='core.tasks.refresh_principal_components')\n", (6768, 6816), False, 'from celery.task import task\n'), ((9016, 9062), 'celery.task.task', 'task', ([], {'name': '"""core.tasks.calc_melody_components"""'}), "(name='core.tasks.calc_melody_components')\n", (9020, 9062), False, 'from celery.task import task\n'), ((10737, 10781), 'celery.task.task', 'task', ([], {'name': '"""core.tasks.get_closest_melodies"""'}), "(name='core.tasks.get_closest_melodies')\n", (10741, 10781), False, 'from celery.task import task\n'), ((1307, 1321), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1319, 1321), False, 'from django.utils import timezone\n'), ((1453, 1491), 'librosa.onset.onset_strength', 'librosa.onset.onset_strength', (['y'], {'sr': 'sr'}), '(y, sr=sr)\n', (1481, 1491), False, 'import librosa\n'), ((1504, 1555), 'librosa.beat.tempo', 'librosa.beat.tempo', ([], {'onset_envelope': 'onset_env', 'sr': 'sr'}), '(onset_envelope=onset_env, sr=sr)\n', (1522, 1555), False, 'import librosa\n'), ((1572, 1590), 'numpy.round', 'np.round', (['tempo', '(1)'], {}), '(tempo, 1)\n', (1580, 1590), True, 'import numpy as np\n'), ((1647, 1661), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1659, 1661), False, 'from django.utils import timezone\n'), ((7935, 7948), 'numpy.mean', 'np.mean', (['a', '(1)'], {}), '(a, 1)\n', (7942, 7948), True, 'import numpy as np\n'), ((7960, 7972), 'numpy.std', 'np.std', (['a', '(1)'], {}), '(a, 1)\n', (7966, 7972), True, 'import numpy as np\n'), ((8086, 8095), 'numpy.cov', 'np.cov', (['a'], {}), '(a)\n', (8092, 8095), True, 'import numpy as np\n'), ((8107, 8124), 'numpy.linalg.eigh', 'np.linalg.eigh', (['r'], {}), '(r)\n', (8121, 8124), True, 'import numpy as np\n'), ((8179, 8191), 'numpy.cumsum', 'np.cumsum', (['d'], {}), '(d)\n', (8188, 8191), True, 'import numpy as np\n'), ((8203, 8212), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (8209, 8212), True, 'import numpy as np\n'), ((8371, 8418), 'numpy.zeros', 'np.zeros', (['(NUMBER_OF_SEGMENTS, component_count)'], {}), '((NUMBER_OF_SEGMENTS, component_count))\n', (8379, 8418), True, 'import numpy as np\n'), ((8532, 8562), 'numpy.dot', 'np.dot', (['a.T', 'principal_vectors'], {}), '(a.T, principal_vectors)\n', (8538, 8562), True, 'import numpy as np\n'), ((8571, 8601), 'hashlib.new', 'hashlib.new', (['HASHING_ALGORITHM'], {}), '(HASHING_ALGORITHM)\n', (8582, 8601), False, 'import hashlib\n'), ((9970, 9997), 'librosa.stft', 'librosa.stft', (['y'], {'n_fft': '(2048)'}), '(y, n_fft=2048)\n', (9982, 9997), False, 'import librosa\n'), ((10006, 10015), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (10012, 10015), True, 'import numpy as np\n'), ((10024, 10074), 'numpy.zeros', 'np.zeros', ([], {'shape': '(NUMBER_OF_SEGMENTS,)', 'dtype': 'float'}), '(shape=(NUMBER_OF_SEGMENTS,), dtype=float)\n', (10032, 10074), True, 'import numpy as np\n'), ((10599, 10627), 'numpy.dot', 'np.dot', (['a', 'principal_vectors'], {}), '(a, principal_vectors)\n', (10605, 10627), True, 'import numpy as np\n'), ((11578, 11626), 'numpy.dtype', 'np.dtype', (["[('distance', float), ('number', int)]"], {}), "([('distance', float), ('number', int)])\n", (11586, 11626), True, 'import numpy as np\n'), ((11650, 11675), 'numpy.array', 'np.array', (['[]'], {'dtype': 'dtype'}), '([], dtype=dtype)\n', (11658, 11675), True, 'import numpy as np\n'), ((1362, 1389), 'urllib.parse.unquote', 'unquote', (['bpm.audio.file.url'], {}), '(bpm.audio.file.url)\n', (1369, 1389), False, 'from urllib.parse import unquote\n'), ((7330, 7372), 'librosa.load', 'librosa.load', (['file'], {'offset': '(15)', 'duration': '(10)'}), '(file, offset=15, duration=10)\n', (7342, 7372), False, 'import librosa\n'), ((7385, 7412), 'librosa.stft', 'librosa.stft', (['y'], {'n_fft': '(2048)'}), '(y, n_fft=2048)\n', (7397, 7412), False, 'import librosa\n'), ((7425, 7434), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (7431, 7434), True, 'import numpy as np\n'), ((9909, 9932), 'urllib.parse.unquote', 'unquote', (['audio.file.url'], {}), '(audio.file.url)\n', (9916, 9932), False, 'from urllib.parse import unquote\n'), ((9551, 9581), 'hashlib.new', 'hashlib.new', (['HASHING_ALGORITHM'], {}), '(HASHING_ALGORITHM)\n', (9562, 9581), False, 'import hashlib\n'), ((11171, 11201), 'hashlib.new', 'hashlib.new', (['HASHING_ALGORITHM'], {}), '(HASHING_ALGORITHM)\n', (11182, 11201), False, 'import hashlib\n'), ((12196, 12224), 'json.dumps', 'json.dumps', (['num_and_distance'], {}), '(num_and_distance)\n', (12206, 12224), False, 'import json\n'), ((12044, 12082), 'numpy.asarray', 'np.asarray', (['(distance, i)'], {'dtype': 'dtype'}), '((distance, i), dtype=dtype)\n', (12054, 12082), True, 'import numpy as np\n'), ((8702, 8716), 'json.dumps', 'json.dumps', (['pc'], {}), '(pc)\n', (8712, 8716), False, 'import json\n'), ((8749, 8766), 'json.dumps', 'json.dumps', (['means'], {}), '(means)\n', (8759, 8766), False, 'import json\n'), ((8798, 8814), 'json.dumps', 'json.dumps', (['stds'], {}), '(stds)\n', (8808, 8814), False, 'import json\n')] |
import geopandas as gpd
import pandas as pd
import numpy as np
from .preprocess import *
def busgps_arriveinfo(data,line,stop,col = ['VehicleId','GPSDateTime','lon','lat','stopname'],
stopbuffer = 200,mintime = 300,project_epsg = 2416,timegap = 1800,method = 'project',projectoutput = False):
'''
Input bus GPS data, bus route and station GeoDataFrame, this method can identify the bus arrival and departure information
Parameters
-------
data : DataFrame
Bus GPS data. It should be the data from one bus route, and need to contain vehicle ID, GPS time, latitude and longitude (wgs84)
line : GeoDataFrame
GeoDataFrame for the bus line
stop : GeoDataFrame
GeoDataFrame for bus stops
col : List
Column names, in the order of [vehicle ID, time, longitude, latitude, station name]
stopbuffer : number
Meter. When the vehicle approaches the station within this certain distance, it is considered to be arrive at the station.
mintime : number
Seconds. Within a short period of time that the bus arrive at bus station again, it will not be consider as another arrival
project_epsg : number
The matching algorithm will convert the data into a projection coordinate system to calculate the distance, here the epsg code of the projection coordinate system is given
timegap : number
Seconds. For how long the vehicle does not appear, it will be considered as a new vehicle
method : str
The method of matching the bus, either ‘project’ or ‘dislimit’; ‘project’ is to directly match the nearest point on the route, which is fast. ‘dislimit’ needs to consider the front point position with the distance limitation, the matching speed is slow.
projectoutput : bool
Whether to output the projected data
Returns
-------
arrive_info : DataFrame
Bus arrival and departure information
'''
VehicleId,GPSDateTime,lon,lat,stopcol = col
#Clean data
print('Cleaning data',end = '')
line.set_crs(crs='epsg:4326',allow_override=True,inplace=True)
line = line.to_crs(epsg = project_epsg)
line_buffer = line.copy()
line_buffer['geometry'] = line_buffer.buffer(200)
line_buffer = line_buffer.to_crs(epsg = 4326)
print('.',end = '')
data = clean_same(data,col=[VehicleId,GPSDateTime,lon,lat])
print('.',end = '')
data = clean_outofshape(data,line_buffer,col = [lon,lat],accuracy = 500)
print('.')
data = id_reindex(data,VehicleId,timegap = timegap,timecol = GPSDateTime,suffix='')
print('Position matching',end = '')
#project data points onto bus LineString
lineshp = line['geometry'].iloc[0]
print('.',end = '')
data['geometry'] = gpd.points_from_xy(data[lon],data[lat])
data = gpd.GeoDataFrame(data)
data.set_crs(crs='epsg:4326',allow_override=True,inplace=True)
print('.',end = '')
data = data.to_crs(epsg = project_epsg)
print('.',end = '')
if method == 'project':
data['project'] = data['geometry'].apply(lambda r:lineshp.project(r))
elif method == 'dislimit':
tmps = []
#Distance limit method
for vid in data[VehicleId].drop_duplicates():
print('.',end = '')
tmp = data[data[VehicleId]==vid].copy()
gap = 30
i = 0
tmp = tmp.sort_values(by = [VehicleId,GPSDateTime]).reset_index(drop=True)
tmp['project'] = 0
from shapely.geometry import LineString
for i in range(len(tmp)-1):
if i == 0:
proj = lineshp.project(tmp.iloc[i]['geometry'])
tmp.loc[i,'project'] = proj
else:
proj = tmp['project'].iloc[i]
dis = tmp.iloc[i+1]['geometry'].distance(tmp.iloc[i]['geometry'])
if dis == 0:
proj1 = proj
else:
proj2 = lineshp.project(tmp.iloc[i+1]['geometry'])
if abs(proj2-proj)>dis:
proj1 = np.sign(proj2-proj)*dis+proj
else:
proj1 = proj2
tmp.loc[i+1,'project'] = proj1
tmps.append(tmp)
data = pd.concat(tmps)
print('.',end = '')
#Project bus stop to bus line
stop = stop.to_crs(epsg = project_epsg)
stop['project'] = stop['geometry'].apply(lambda r:lineshp.project(r))
print('.',end = '')
starttime = data[GPSDateTime].min()
data['time_st'] = (data[GPSDateTime]-starttime).dt.total_seconds()
BUS_project = data
print('.')
from shapely.geometry import LineString
import shapely
ls = []
print('Matching arrival and leaving info...',end = '')
for car in BUS_project[VehicleId].drop_duplicates():
print('.',end = '')
#Extract bus trajectory
tmp = BUS_project[BUS_project[VehicleId] == car]
if len(tmp)>1:
for stopname in stop[stopcol].drop_duplicates():
#Get the stop position
position = stop[stop[stopcol] == stopname]['project'].iloc[0]
#Identify arrival and departure by intersection of stop buffer and line segment
buffer_polygon = LineString([[0,position],
[data['time_st'].max(),position]]).buffer(stopbuffer)
bus_linestring = LineString(tmp[['time_st','project']].values)
line_intersection = bus_linestring.intersection(buffer_polygon)
#Extract leave time
if line_intersection.is_empty:
#If empty, no bus arrive
continue
else:
if type(line_intersection) == shapely.geometry.linestring.LineString:
arrive = [line_intersection]
else:
arrive = list(line_intersection)
arrive = pd.DataFrame(arrive)
arrive['arrivetime']= arrive[0].apply(lambda r:r.coords[0][0])
arrive['leavetime']= arrive[0].apply(lambda r:r.coords[-1][0])
#Filtering arrival information through time threshold
a = arrive[['arrivetime']].copy()
a.columns = ['time']
a['flag'] = 1
b = arrive[['leavetime']].copy()
b.columns = ['time']
b['flag'] = 0
c = pd.concat([a,b]).sort_values(by = 'time')
c['time1'] = c['time'].shift(-1)
c['flag_1'] = ((c['time1']-c['time'])<mintime)&(c['flag']==0)
c['flag_2'] = c['flag_1'].shift().fillna(False)
c['flag_3'] = c['flag_1']|c['flag_2']
c = c[-c['flag_3']]
arrive_new = c[c['flag'] == 1][['time']].copy()
arrive_new.columns = ['arrivetime']
arrive_new['leavetime'] = list(c[c['flag'] == 0]['time'])
arrive_new[stopcol] = stopname
arrive_new[VehicleId] = car
#Save data
ls.append(arrive_new)
#Concat data
arrive_info = pd.concat(ls)
arrive_info['arrivetime'] = starttime+arrive_info['arrivetime'].apply(lambda r:pd.Timedelta(int(r),unit = 's'))
arrive_info['leavetime'] = starttime+arrive_info['leavetime'].apply(lambda r:pd.Timedelta(int(r),unit = 's'))
if projectoutput:
return arrive_info,data
else:
return arrive_info
def busgps_onewaytime(arrive_info,start,end,col = ['VehicleId','stopname','arrivetime','leavetime']):
'''
Input the departure information table drive_info and the station information table stop to calculate the one-way travel time
Parameters
-------
arrive_info : DataFrame
The departure information table drive_info
start : Str
Starting station name
end : Str
Ending station name
col : List
Column name [vehicle ID, station name,arrivetime,leavetime]
Returns
-------
onewaytime : DataFrame
One-way travel time of the bus
'''
#For one direction
#The information of start and end points is extracted and merged together
#Arrival time of terminal
[VehicleId,stopname,arrivetime,leavetime] = col
arrive_info[arrivetime] = pd.to_datetime(arrive_info[arrivetime])
arrive_info[leavetime] = pd.to_datetime(arrive_info[leavetime])
a = arrive_info[arrive_info[stopname] == end][[arrivetime,stopname,VehicleId]]
#Departure time of starting station
b = arrive_info[arrive_info[stopname] == start][[leavetime,stopname,VehicleId]]
a.columns = ['time',stopname,VehicleId]
b.columns = ['time',stopname,VehicleId]
#Concat data
c = pd.concat([a,b])
#After sorting, extract the travel time of each one-way trip
c = c.sort_values(by = [VehicleId,'time'])
for i in c.columns:
c[i+'1'] = c[i].shift(-1)
c = c[(c[VehicleId] == c[VehicleId+'1'])&
(c[stopname]==start)&
(c[stopname+'1']==end)]
#Calculate the duration of the trip
c['duration'] = (c['time1'] - c['time']).dt.total_seconds()
c['shour'] = c['time'].dt.hour
c['direction'] = start+'-'+end
c1 = c.copy()
#Do the same for the other direction
a = arrive_info[arrive_info[stopname] == start][['arrivetime',stopname,VehicleId]]
b = arrive_info[arrive_info[stopname] == end][['leavetime',stopname,VehicleId]]
a.columns = ['time',stopname,VehicleId]
b.columns = ['time',stopname,VehicleId]
c = pd.concat([a,b])
c = c.sort_values(by = [VehicleId,'time'])
for i in c.columns:
c[i+'1'] = c[i].shift(-1)
c = c[(c[VehicleId] == c[VehicleId+'1'])&(c[stopname]==end)&(c[stopname+'1']==start)]
c['duration'] = (c['time1'] - c['time']).dt.total_seconds()
c['shour'] = c['time'].dt.hour
c['direction'] = end+'-'+start
c2 = c.copy()
onewaytime = pd.concat([c1,c2])
return onewaytime
| [
"pandas.DataFrame",
"shapely.geometry.LineString",
"geopandas.GeoDataFrame",
"pandas.to_datetime",
"geopandas.points_from_xy",
"numpy.sign",
"pandas.concat"
] | [((2769, 2809), 'geopandas.points_from_xy', 'gpd.points_from_xy', (['data[lon]', 'data[lat]'], {}), '(data[lon], data[lat])\n', (2787, 2809), True, 'import geopandas as gpd\n'), ((2820, 2842), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['data'], {}), '(data)\n', (2836, 2842), True, 'import geopandas as gpd\n'), ((7211, 7224), 'pandas.concat', 'pd.concat', (['ls'], {}), '(ls)\n', (7220, 7224), True, 'import pandas as pd\n'), ((8382, 8421), 'pandas.to_datetime', 'pd.to_datetime', (['arrive_info[arrivetime]'], {}), '(arrive_info[arrivetime])\n', (8396, 8421), True, 'import pandas as pd\n'), ((8451, 8489), 'pandas.to_datetime', 'pd.to_datetime', (['arrive_info[leavetime]'], {}), '(arrive_info[leavetime])\n', (8465, 8489), True, 'import pandas as pd\n'), ((8810, 8827), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (8819, 8827), True, 'import pandas as pd\n'), ((9609, 9626), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (9618, 9626), True, 'import pandas as pd\n'), ((9990, 10009), 'pandas.concat', 'pd.concat', (['[c1, c2]'], {}), '([c1, c2])\n', (9999, 10009), True, 'import pandas as pd\n'), ((4288, 4303), 'pandas.concat', 'pd.concat', (['tmps'], {}), '(tmps)\n', (4297, 4303), True, 'import pandas as pd\n'), ((5449, 5495), 'shapely.geometry.LineString', 'LineString', (["tmp[['time_st', 'project']].values"], {}), "(tmp[['time_st', 'project']].values)\n", (5459, 5495), False, 'from shapely.geometry import LineString\n'), ((6005, 6025), 'pandas.DataFrame', 'pd.DataFrame', (['arrive'], {}), '(arrive)\n', (6017, 6025), True, 'import pandas as pd\n'), ((6507, 6524), 'pandas.concat', 'pd.concat', (['[a, b]'], {}), '([a, b])\n', (6516, 6524), True, 'import pandas as pd\n'), ((4104, 4125), 'numpy.sign', 'np.sign', (['(proj2 - proj)'], {}), '(proj2 - proj)\n', (4111, 4125), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from numpy import linalg
from sklearn import linear_model as lm
from sklearn import svm as sv
from cvxopt import matrix
from cvxopt import solvers
import time
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pickle
class SimClasses:
def GetData(self, N, D, Distance):
# Setting up mean and covariance matrices
mean_head = np.zeros(D)
mean_tail = np.zeros(D)
cov = np.zeros((D,D))
Y = np.ones(N)
X = []
# For tails mean's first coordinate is distance D
mean_tail[0] = Distance
# Covariance matrix
for i in range(0,D):
for j in range(0,D):
if i == j: cov[i][j] = 1
# Drawing for Heads and Tails
for i in range(0,N):
if i%2 == 0:
xarr = np.random.multivariate_normal(mean_head, cov)
Y[i] = 1
else:
xarr = np.random.multivariate_normal(mean_tail, cov)
Y[i] = -1
if(i > 0):
xarr_prev = np.vstack((xarr_prev, xarr))
if i == 0:
xarr_prev = xarr
X = np.matrix(xarr_prev)
return X,Y
class Classifier_A:
# Logistic Regression
def Classify(self, N, D, Distance):
# Generate the data:
cp = SimClasses()
Xtr,Ytr = cp.GetData(N, D, Distance)
# Fit the data:
start = time.clock()
lr = lm.LogisticRegression(C=1.0)
lr.fit(Xtr,Ytr)
end = time.clock() - start
parameters = lr.coef_
# Predict the data
N = 100
Xte,Yte = cp.GetData(N, D, Distance)
Z = lr.predict(Xte)
# Caclulate accuracy
accuracy = (Yte.reshape(1,N) == Z)
tmp = np.ones((1,N))
accuracy = len(tmp[accuracy])
return accuracy, end, parameters
class Classifier_B:
# Perceptron Regression
def Classify(self, N, D, Distance):
# Generate the train data
cp = SimClasses()
Xtr, Ytr = cp.GetData(N, D, Distance)
# Train the data
pr = lm.Perceptron()
start = time.clock()
pr.fit(Xtr, Ytr)
end = time.clock() - start
# Test the data
N = 100
Xte, Yte = cp.GetData(N, D, Distance)
Z = pr.predict(Xte)
parameters = pr.coef_
# Caclulate accuracy
accuracy = (Yte.reshape(1,N) == Z)
tmp = np.ones((1,N))
accuracy = len(tmp[accuracy])
return accuracy, end, parameters
class Classifier_C:
# SVM Regression
def Classify(self, N, D, Distance):
# Generate the test data
cp = SimClasses()
Xtr, Ytr = cp.GetData(N, D, Distance)
# Fit the data
start = time.clock()
svm = sv.SVC(kernel='linear')
svm.fit(Xtr,Ytr)
end = time.clock() - start
parameters = svm.coef_
# Predict the data
N = 100
Xte, Yte = cp.GetData(N, D, Distance)
Z = svm.predict(Xte)
# Caclulate accuracy
accuracy = (Yte.reshape(1,N) == Z)
tmp = np.ones((1,N))
accuracy = len(tmp[accuracy])
return accuracy, end, parameters
class Classifier_D:
#SVM Regression Self Implemented:
def Classify(self, N, D, Distance):
#Getting training data:
cp = SimClasses()
X, Y = cp.GetData(N, D, Distance)
# Fitting the data:
start = time.clock()
#Defining Matrices to be used as input for qp solver
# Going to solve for alpha in dual form
# penalty on slack
soft_c = 1.0
# Calculate Kernal type function (linear dot product)
# Calculate other parameters to be passed onto CVXOPT solver
X_tmp = X.getA()
Y_tmp = Y[:, None]
K = Y_tmp*X_tmp
K = K.dot(K.T)
P = matrix(K)
q = matrix(-np.ones((N, 1)))
tmp1 = np.diag(np.ones(N) * -1)
tmp2 = np.identity(N)
G = matrix(np.vstack((tmp1, tmp2)))
t1_temp = np.zeros(N)
t2_temp = np.ones(N)*soft_c
h = matrix(np.hstack((t1_temp, t2_temp)))
# Factoring in upper limit on C
for i in range(N,N):
h[i] = soft_c
A = matrix(Y.reshape(1, -1))
b = matrix(np.zeros(1))
#Solve!!!
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h, A, b)
#Got alphas
alphas = np.array(sol['x'])
# get weights
w = []
sum = 0
x_tmp = X.getA()
for d in range(0,D):
sum = 0
for num in range(0,N):
sum = sum + alphas[num]*Y[num]*x_tmp[num][d]
w.append(sum)
# get bias
cond = np.logical_and(alphas > 1e-4, alphas < soft_c)
cond = cond.reshape(-1)
b = Y.reshape(N,1) - X.dot(w)
b = b[cond]
if len(b) is 0:
bias = 0
else:
bias = np.array(b).mean()
end = time.clock() - start
parameters = np.array(w)
# Predicting
# Generate test data
N = 100
Xte, Yte = cp.GetData(N, D, Distance)
# Test the points
Xte = Xte.reshape(N,D)
result = np.sign(Xte.dot(w)+bias)
# Caclulate accuracy
accuracy = (Yte.reshape(N,1) == result)
tmp = np.ones((N,1))
accuracy = len(tmp[accuracy])
return accuracy, end, parameters
# Actual run:
def run_classifier(N, D, Distance, classifier):
if classifier == "A":
cp = Classifier_A()
elif classifier == 'B':
cp = Classifier_B()
elif classifier == 'C':
cp = Classifier_C()
else:
cp = Classifier_D()
accuracy, train_time, parameters = cp.Classify(N, D, Distance)
#print('Classifer: '+str(classifier)+ ' N = '+str(N)+' D = '+str(D)+' Distance = '+str(Distance)+'---> Accuracy = '+str(accuracy)+',Train_time = '+str(train_time)+' secs')
return accuracy,format(train_time, '.4f'), parameters, parameters
def TestClassifiers():
# Default values
measurement = ['i', 'ii']
method = ['A', 'B', 'C', 'D']
item = ['a', 'b', 'c']
val_a = [4, 5, 6, 7, 8]
val_b = [25, 50, 75, 100, 200]
val_c = [0.01, 0.1, 1, 10, 100]
# Defaults for N, D, Distance
N = 100
D = 3
Distance = 5
Result_pkl = {}
Parameters_pkl = {}
for i in method:
for j in item:
if j == 'a':
for k in val_a:
Result_pkl['i', i, j, k], Result_pkl['ii',i,j,k], Parameters_pkl['i',i,j,k], Parameters_pkl['ii',i,j,k] = run_classifier(N, k, Distance, i)
if j == 'b':
for k in val_b:
Result_pkl['i', i, j, k], Result_pkl['ii',i,j,k], Parameters_pkl['i',i,j,k], Parameters_pkl['ii',i,j,k] = run_classifier(k, D, Distance, i)
if j == 'c':
for k in val_c:
Result_pkl['i', i, j, k], Result_pkl['ii', i, j, k], Parameters_pkl['i',i,j,k], Parameters_pkl['ii',i,j,k] = run_classifier(N, D, k, i)
# Data plots
for i in measurement:
if i == 'i': tag_1 = 'Accuracy'
else: tag_1 = 'Training time'
for j in item:
fname = 'Plot_'+i+'_'+j+'.pdf'
pp = PdfPages(fname)
if j == 'a':
tag = 'Fixed: N ='+str(N)+', Distance ='+str(Distance)+', Variable: D'
for k in method:
x = val_a
y = []
for l in val_a:
y.append(Result_pkl[i,k,j,l])
plt.plot(val_a, y, label = k)
plt.xticks(val_a)
if j == 'c':
tag = 'Fixed: N ='+ str(N) + ', D =' + str(D) + ', Variable: Distance'
for k in method:
x = val_c
y = []
for l in val_c:
y.append(Result_pkl[i,k,j,l])
plt.plot(val_c, y, label=k)
if j == 'b':
tag = 'D =' + str(D) + ', Distance =' + str(Distance) + ', Variable: N'
for k in method:
x = val_b
y = []
for l in val_b:
y.append(Result_pkl[i,k,j,l])
plt.plot(val_b, y, label=k)
plt.ylabel(tag_1)
plt.xlabel(tag)
plt.legend()
pp.savefig()
plt.clf()
pp.close()
# Pickle file
fname = "Results.pkl"
with open(fname, 'wb') as handle:
pickle.dump(Result_pkl, handle, protocol=pickle.HIGHEST_PROTOCOL)
fname = "Parameters.pkl"
with open(fname, 'wb') as handle:
pickle.dump(Parameters_pkl, handle, protocol=pickle.HIGHEST_PROTOCOL)
return
TestClassifiers()
| [
"matplotlib.backends.backend_pdf.PdfPages",
"pickle.dump",
"matplotlib.pyplot.clf",
"numpy.ones",
"sklearn.svm.SVC",
"numpy.identity",
"time.clock",
"cvxopt.solvers.qp",
"matplotlib.pyplot.xticks",
"cvxopt.matrix",
"matplotlib.pyplot.legend",
"numpy.hstack",
"sklearn.linear_model.Perceptron"... | [((478, 489), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (486, 489), True, 'import numpy as np\n'), ((510, 521), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (518, 521), True, 'import numpy as np\n'), ((536, 552), 'numpy.zeros', 'np.zeros', (['(D, D)'], {}), '((D, D))\n', (544, 552), True, 'import numpy as np\n'), ((564, 574), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (571, 574), True, 'import numpy as np\n'), ((1263, 1283), 'numpy.matrix', 'np.matrix', (['xarr_prev'], {}), '(xarr_prev)\n', (1272, 1283), True, 'import numpy as np\n'), ((1541, 1553), 'time.clock', 'time.clock', ([], {}), '()\n', (1551, 1553), False, 'import time\n'), ((1568, 1596), 'sklearn.linear_model.LogisticRegression', 'lm.LogisticRegression', ([], {'C': '(1.0)'}), '(C=1.0)\n', (1589, 1596), True, 'from sklearn import linear_model as lm\n'), ((1900, 1915), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (1907, 1915), True, 'import numpy as np\n'), ((2232, 2247), 'sklearn.linear_model.Perceptron', 'lm.Perceptron', ([], {}), '()\n', (2245, 2247), True, 'from sklearn import linear_model as lm\n'), ((2264, 2276), 'time.clock', 'time.clock', ([], {}), '()\n', (2274, 2276), False, 'import time\n'), ((2570, 2585), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (2577, 2585), True, 'import numpy as np\n'), ((2893, 2905), 'time.clock', 'time.clock', ([], {}), '()\n', (2903, 2905), False, 'import time\n'), ((2920, 2943), 'sklearn.svm.SVC', 'sv.SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (2926, 2943), True, 'from sklearn import svm as sv\n'), ((3241, 3256), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (3248, 3256), True, 'import numpy as np\n'), ((3581, 3593), 'time.clock', 'time.clock', ([], {}), '()\n', (3591, 3593), False, 'import time\n'), ((3997, 4006), 'cvxopt.matrix', 'matrix', (['K'], {}), '(K)\n', (4003, 4006), False, 'from cvxopt import matrix\n'), ((4099, 4113), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (4110, 4113), True, 'import numpy as np\n'), ((4176, 4187), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4184, 4187), True, 'import numpy as np\n'), ((4520, 4548), 'cvxopt.solvers.qp', 'solvers.qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (4530, 4548), False, 'from cvxopt import solvers\n'), ((4587, 4605), 'numpy.array', 'np.array', (["sol['x']"], {}), "(sol['x'])\n", (4595, 4605), True, 'import numpy as np\n'), ((4891, 4939), 'numpy.logical_and', 'np.logical_and', (['(alphas > 0.0001)', '(alphas < soft_c)'], {}), '(alphas > 0.0001, alphas < soft_c)\n', (4905, 4939), True, 'import numpy as np\n'), ((5182, 5193), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (5190, 5193), True, 'import numpy as np\n'), ((5500, 5515), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (5507, 5515), True, 'import numpy as np\n'), ((8759, 8824), 'pickle.dump', 'pickle.dump', (['Result_pkl', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(Result_pkl, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (8770, 8824), False, 'import pickle\n'), ((8901, 8970), 'pickle.dump', 'pickle.dump', (['Parameters_pkl', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(Parameters_pkl, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (8912, 8970), False, 'import pickle\n'), ((1637, 1649), 'time.clock', 'time.clock', ([], {}), '()\n', (1647, 1649), False, 'import time\n'), ((2316, 2328), 'time.clock', 'time.clock', ([], {}), '()\n', (2326, 2328), False, 'import time\n'), ((2983, 2995), 'time.clock', 'time.clock', ([], {}), '()\n', (2993, 2995), False, 'import time\n'), ((4133, 4156), 'numpy.vstack', 'np.vstack', (['(tmp1, tmp2)'], {}), '((tmp1, tmp2))\n', (4142, 4156), True, 'import numpy as np\n'), ((4206, 4216), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (4213, 4216), True, 'import numpy as np\n'), ((4243, 4272), 'numpy.hstack', 'np.hstack', (['(t1_temp, t2_temp)'], {}), '((t1_temp, t2_temp))\n', (4252, 4272), True, 'import numpy as np\n'), ((4425, 4436), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (4433, 4436), True, 'import numpy as np\n'), ((5140, 5152), 'time.clock', 'time.clock', ([], {}), '()\n', (5150, 5152), False, 'import time\n'), ((7437, 7452), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['fname'], {}), '(fname)\n', (7445, 7452), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((8527, 8544), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['tag_1'], {}), '(tag_1)\n', (8537, 8544), True, 'import matplotlib.pyplot as plt\n'), ((8557, 8572), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['tag'], {}), '(tag)\n', (8567, 8572), True, 'import matplotlib.pyplot as plt\n'), ((8585, 8597), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8595, 8597), True, 'import matplotlib.pyplot as plt\n'), ((8635, 8644), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8642, 8644), True, 'import matplotlib.pyplot as plt\n'), ((930, 975), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_head', 'cov'], {}), '(mean_head, cov)\n', (959, 975), True, 'import numpy as np\n'), ((1042, 1087), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_tail', 'cov'], {}), '(mean_tail, cov)\n', (1071, 1087), True, 'import numpy as np\n'), ((1165, 1193), 'numpy.vstack', 'np.vstack', (['(xarr_prev, xarr)'], {}), '((xarr_prev, xarr))\n', (1174, 1193), True, 'import numpy as np\n'), ((4027, 4042), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (4034, 4042), True, 'import numpy as np\n'), ((4067, 4077), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (4074, 4077), True, 'import numpy as np\n'), ((5106, 5117), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (5114, 5117), True, 'import numpy as np\n'), ((7765, 7792), 'matplotlib.pyplot.plot', 'plt.plot', (['val_a', 'y'], {'label': 'k'}), '(val_a, y, label=k)\n', (7773, 7792), True, 'import matplotlib.pyplot as plt\n'), ((7815, 7832), 'matplotlib.pyplot.xticks', 'plt.xticks', (['val_a'], {}), '(val_a)\n', (7825, 7832), True, 'import matplotlib.pyplot as plt\n'), ((8145, 8172), 'matplotlib.pyplot.plot', 'plt.plot', (['val_c', 'y'], {'label': 'k'}), '(val_c, y, label=k)\n', (8153, 8172), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8513), 'matplotlib.pyplot.plot', 'plt.plot', (['val_b', 'y'], {'label': 'k'}), '(val_b, y, label=k)\n', (8494, 8513), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import cv2
def get_triangles(image, face_landmark_points):
#The function creates an empty Delaunay subdivision where 2D points can be added
#Subdiv2D( Rect(top_left_x, top_left_y, width, height) )
rect = (0, 0, image.shape[1], image.shape[0])
subdiv = cv2.Subdiv2D( rect )
for i in range(68):
#each landmark point should be inserted into Subdiv2D object as a tuple
x = face_landmark_points.part(i).x
y = face_landmark_points.part(i).y
#x = points_list[1].part(i).x # get x coordinate of point
#y = points_list[1].part(i).y # get y coordinate of point
#add the points to another list to check again
subdiv.insert((x,y))
img_triangles = subdiv.getTriangleList()
return img_triangles
def get_triangles_edges_included(image, face_landmark_points):
ID_list = []
#The function creates an empty Delaunay subdivision where 2D points can be added
#Subdiv2D( Rect(top_left_x, top_left_y, width, height) )
rect = (0, 0, image.shape[1], image.shape[0])
subdiv = cv2.Subdiv2D( rect )
for i in range(68):
#each landmark point should be inserted into Subdiv2D object as a tuple
x = face_landmark_points.part(i).x
y = face_landmark_points.part(i).y
#x = points_list[1].part(i).x # get x coordinate of point
#y = points_list[1].part(i).y # get y coordinate of point
#print("x point: " + str(x) + " y point: " + str(y));
#add the points to another list to check again
ID_list.append((x,y))
subdiv.insert((x,y))
#add points on edges
top_left_point = (0,0)
ID_list.append(top_left_point)
subdiv.insert(top_left_point) #top left
bottom_left_point = (0, image.shape[0]-1)
ID_list.append(bottom_left_point)
subdiv.insert(bottom_left_point) #bottom left
medium_left_point = (0, image.shape[0]//2)
ID_list.append(medium_left_point)
subdiv.insert(medium_left_point) #medium left
top_right_point = (image.shape[1]-1, 0)
ID_list.append(top_right_point)
subdiv.insert(top_right_point) #top right
bottom_right_point = (image.shape[1]-1, image.shape[0]-1)
ID_list.append(bottom_right_point)
subdiv.insert(bottom_right_point) #bottom right
medium_right_point = (image.shape[1]-1, image.shape[0]//2)
ID_list.append(medium_right_point)
subdiv.insert(medium_right_point) #medium right
medium_top_point = (image.shape[1]//2, 0)
ID_list.append(medium_top_point)
subdiv.insert(medium_top_point) #medium top
medium_bottom_point = (image.shape[1]//2, image.shape[0]-1)
ID_list.append(medium_bottom_point)
subdiv.insert(medium_bottom_point) #medium bottom
img_triangles = subdiv.getTriangleList()
return (img_triangles, ID_list)
def find_index(id_list, point):
#find the index for given point
#in the list of points
#id_list: [ (x1,y1), (x2,y2) , (x3,y3), ...]
#point: (x, y)
#return: index number of point
index = 0
for item in id_list:
if point == item:
return index
index += 1
return -1
def get_matched_triangles_for_second_image(points_for_second_image, image1_triangles, id_list):
img2_triangles_list = [] #We will store the triangle points here
for triangle in image1_triangles:
pt = []
pt.append((triangle[0], triangle[1])) #x and y coordinate for first point
pt.append((triangle[2], triangle[3])) #x and y coordinate for second point
pt.append((triangle[4], triangle[5])) #x and y coordinate for third point
ids = [] # store 3 id for every loop
id1 = find_index(id_list, pt[0])
ids.append(id1)
id2 = find_index(id_list, pt[1])
ids.append(id2)
id3 = find_index(id_list, pt[2])
ids.append(id3)
#ids: [id1, id2, id3]
for id_number in ids:
#if the point is on edge, handle it in different way
if id_number >= 68: # 0-67 are face points, others are edge points
img2_triangles_list.append(id_list[id_number])
else: #face landmark point
p_x = points_for_second_image.part(id_number).x
p_y = points_for_second_image.part(id_number).y
img2_triangles_list.append((p_x, p_y))
#We added {x1, y1, x2, y2, x3, y3} to the img2_triangles
img2_triangles = np.reshape(img2_triangles_list, (142, 6))
return img2_triangles
def draw_triangles_on_image(image, triangles):
for triangle in triangles:
pt = []
pt.append((triangle[0], triangle[1]))
pt.append((triangle[2], triangle[3]))
pt.append((triangle[4], triangle[5]))
#if rectContainPoint(rect, pt[0]) and rectContainPoint(rect, pt[1]) and rectContainPoint(rect, pt[2]):
cv2.line(image, pt[0], pt[1], (0,255,0), 1)
cv2.line(image, pt[0], pt[2], (0,255,0), 1)
cv2.line(image, pt[1], pt[2], (0,255,0), 1) | [
"cv2.Subdiv2D",
"numpy.reshape",
"cv2.line"
] | [((299, 317), 'cv2.Subdiv2D', 'cv2.Subdiv2D', (['rect'], {}), '(rect)\n', (311, 317), False, 'import cv2\n'), ((1109, 1127), 'cv2.Subdiv2D', 'cv2.Subdiv2D', (['rect'], {}), '(rect)\n', (1121, 1127), False, 'import cv2\n'), ((4477, 4518), 'numpy.reshape', 'np.reshape', (['img2_triangles_list', '(142, 6)'], {}), '(img2_triangles_list, (142, 6))\n', (4487, 4518), True, 'import numpy as np\n'), ((4908, 4953), 'cv2.line', 'cv2.line', (['image', 'pt[0]', 'pt[1]', '(0, 255, 0)', '(1)'], {}), '(image, pt[0], pt[1], (0, 255, 0), 1)\n', (4916, 4953), False, 'import cv2\n'), ((4960, 5005), 'cv2.line', 'cv2.line', (['image', 'pt[0]', 'pt[2]', '(0, 255, 0)', '(1)'], {}), '(image, pt[0], pt[2], (0, 255, 0), 1)\n', (4968, 5005), False, 'import cv2\n'), ((5012, 5057), 'cv2.line', 'cv2.line', (['image', 'pt[1]', 'pt[2]', '(0, 255, 0)', '(1)'], {}), '(image, pt[1], pt[2], (0, 255, 0), 1)\n', (5020, 5057), False, 'import cv2\n')] |
import numpy as np
from typing import Tuple, List
def _get_meanface(
meanface_string: str,
num_nb: int = 10
) -> Tuple[List[int], List[int], List[int], int, int]:
"""
:param meanface_string: a long string contains normalized or un-normalized
meanface coords, the format is "x0,y0,x1,y1,x2,y2,...,xn-1,yn-1".
:param num_nb: the number of Nearest-neighbor landmarks for NRM, default 10
:return: meanface_indices, reverse_index1, reverse_index2, max_len
"""
meanface = meanface_string.strip("\n").strip(" ").split(" ")
meanface = [float(x) for x in meanface]
meanface = np.array(meanface).reshape(-1, 2)
meanface_lms = meanface.shape[0]
# each landmark predicts num_nb neighbors
meanface_indices = []
for i in range(meanface.shape[0]):
pt = meanface[i, :]
dists = np.sum(np.power(pt - meanface, 2), axis=1)
indices = np.argsort(dists)
meanface_indices.append(indices[1:1 + num_nb])
# each landmark predicted by X neighbors, X varies
meanface_indices_reversed = {}
for i in range(meanface.shape[0]):
meanface_indices_reversed[i] = [[], []]
for i in range(meanface.shape[0]):
for j in range(num_nb):
# meanface_indices[i][0,1,2,...,9] -> [[i,i,...,i],[0,1,2,...,9]]
meanface_indices_reversed[meanface_indices[i][j]][0].append(i)
meanface_indices_reversed[meanface_indices[i][j]][1].append(j)
max_len = 0
for i in range(meanface.shape[0]):
tmp_len = len(meanface_indices_reversed[i][0])
if tmp_len > max_len:
max_len = tmp_len
# tricks, make them have equal length for efficient computation
for i in range(meanface.shape[0]):
meanface_indices_reversed[i][0] += meanface_indices_reversed[i][0] * 10
meanface_indices_reversed[i][1] += meanface_indices_reversed[i][1] * 10
meanface_indices_reversed[i][0] = meanface_indices_reversed[i][0][:max_len]
meanface_indices_reversed[i][1] = meanface_indices_reversed[i][1][:max_len]
# make the indices 1-dim
# [...,max_len,...,max_len*2,...]
reverse_index1 = []
reverse_index2 = []
for i in range(meanface.shape[0]):
reverse_index1 += meanface_indices_reversed[i][0]
reverse_index2 += meanface_indices_reversed[i][1]
return meanface_indices, reverse_index1, reverse_index2, max_len, meanface_lms
def _normalize(
img: np.ndarray
) -> np.ndarray:
"""
:param img: source image, RGB with HWC and range [0,255]
:return: normalized image CHW Tensor for PIPNet
"""
img = img.astype(np.float32)
img /= 255.
img[:, :, 0] -= 0.485
img[:, :, 1] -= 0.456
img[:, :, 2] -= 0.406
img[:, :, 0] /= 0.229
img[:, :, 1] /= 0.224
img[:, :, 2] /= 0.225
img = img.transpose((2, 0, 1)) # HWC->CHW
return img.astype(np.float32)
| [
"numpy.argsort",
"numpy.power",
"numpy.array"
] | [((909, 926), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (919, 926), True, 'import numpy as np\n'), ((622, 640), 'numpy.array', 'np.array', (['meanface'], {}), '(meanface)\n', (630, 640), True, 'import numpy as np\n'), ((855, 881), 'numpy.power', 'np.power', (['(pt - meanface)', '(2)'], {}), '(pt - meanface, 2)\n', (863, 881), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 1 15:52:48 2018
@author: esteban
"""
import numpy as np
import solver as sol
import matplotlib.pyplot as plt
import matplotlib as mpl
label_size = 16
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['font.size'] = label_size
def predefined1(x, q, Tc):
return -1/(q*Tc)*np.exp(np.abs(x)**q)*sol.odd_pow(x, 1-q)
def predefined2(x, q, Tc):
return -np.pi/(2*q*Tc)*(sol.odd_pow(x, 1+q) + sol.odd_pow(x, 1-q))
def predefined3(x, q, a, Tc):
return -1/(a*q*Tc)*(np.abs(x)**q+a)**2*sol.odd_pow(x, 1-q)
def predefined4(x, q, a, Tc):
from scipy.special import gamma
return -gamma(a)/(q*Tc)*np.exp(np.abs(x)**q)*sol.odd_pow(x, 1-a*q)
def system(t, x):
import solver as sol
import numpy as np
Delta = np.sin(2*np.pi*t/5)
q, Tc, zeta = 0.3, 1, 1
a = 1
return predefined4(x,q,a,Tc)-zeta*sol.odd_pow(x, 0)+Delta
t0, tf, h, i = 0, 1.2, 1e-5, 0
xx0 = np.logspace(-1, 3, 5)
T_x0 = np.zeros(xx0.size)
plt.figure(figsize=(8,6), num=1)
plt.figure(figsize=(8,6), num=2)
for x0 in xx0:
t, x = sol.ode1(system, x0, t0, tf, h)
if x0>=0:
T_x0[i] = np.argmax(np.abs(x)<1.5e-4)*h
i += 1
plt.figure(num=1)
plt.plot(t, x[0], color=0*np.ones(3))
# Trajectories
plt.figure(num=1)
plt.ylim(-3, 5)
plt.xlim(0, 1.2)
plt.xlabel('$t$', fontsize = 18)
plt.ylabel('$x(t,x_0)$', fontsize = 18)
plt.axvline(x = 1, ymin = -1, ymax = 2, linestyle='dashed', color = 0.3*np.ones(3))
plt.grid()
plt.savefig('figures/basic.eps', bbox_inches='tight', format='eps', dpi=1500)
# Settling-time function figure
plt.figure(num=2)
plt.semilogx(xx0, T_x0, 'k', lw=2)
plt.grid()
plt.xlabel('|$x_0$|', fontsize = 18)
plt.ylabel('$T(x_0)$', fontsize = 18)
plt.axhline(y = 1, xmin = -1, xmax = 2, linestyle='dashed', color = 0.3*np.ones(3))
plt.ylim(0, 1.2)
plt.savefig('figures/settling_basic.eps', bbox_inches='tight', format='eps', dpi=1500) | [
"matplotlib.pyplot.xlim",
"numpy.abs",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.semilogx",
"numpy.logspace",
"solver.odd_pow",
"scipy.special.gamma",
"numpy.zeros",
"solver.ode1",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.sin",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlab... | [((967, 988), 'numpy.logspace', 'np.logspace', (['(-1)', '(3)', '(5)'], {}), '(-1, 3, 5)\n', (978, 988), True, 'import numpy as np\n'), ((996, 1014), 'numpy.zeros', 'np.zeros', (['xx0.size'], {}), '(xx0.size)\n', (1004, 1014), True, 'import numpy as np\n'), ((1015, 1048), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)', 'num': '(1)'}), '(figsize=(8, 6), num=1)\n', (1025, 1048), True, 'import matplotlib.pyplot as plt\n'), ((1048, 1081), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)', 'num': '(2)'}), '(figsize=(8, 6), num=2)\n', (1058, 1081), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)'}), '(num=1)\n', (1320, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1343), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3)', '(5)'], {}), '(-3, 5)\n', (1336, 1343), True, 'import matplotlib.pyplot as plt\n'), ((1344, 1360), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.2)'], {}), '(0, 1.2)\n', (1352, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1391), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t$"""'], {'fontsize': '(18)'}), "('$t$', fontsize=18)\n", (1371, 1391), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1431), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x(t,x_0)$"""'], {'fontsize': '(18)'}), "('$x(t,x_0)$', fontsize=18)\n", (1404, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1528), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1526, 1528), True, 'import matplotlib.pyplot as plt\n'), ((1529, 1606), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/basic.eps"""'], {'bbox_inches': '"""tight"""', 'format': '"""eps"""', 'dpi': '(1500)'}), "('figures/basic.eps', bbox_inches='tight', format='eps', dpi=1500)\n", (1540, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1657), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(2)'}), '(num=2)\n', (1650, 1657), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1692), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['xx0', 'T_x0', '"""k"""'], {'lw': '(2)'}), "(xx0, T_x0, 'k', lw=2)\n", (1670, 1692), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1703), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1701, 1703), True, 'import matplotlib.pyplot as plt\n'), ((1704, 1738), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""|$x_0$|"""'], {'fontsize': '(18)'}), "('|$x_0$|', fontsize=18)\n", (1714, 1738), True, 'import matplotlib.pyplot as plt\n'), ((1741, 1776), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$T(x_0)$"""'], {'fontsize': '(18)'}), "('$T(x_0)$', fontsize=18)\n", (1751, 1776), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1879), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.2)'], {}), '(0, 1.2)\n', (1871, 1879), True, 'import matplotlib.pyplot as plt\n'), ((1880, 1970), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/settling_basic.eps"""'], {'bbox_inches': '"""tight"""', 'format': '"""eps"""', 'dpi': '(1500)'}), "('figures/settling_basic.eps', bbox_inches='tight', format='eps',\n dpi=1500)\n", (1891, 1970), True, 'import matplotlib.pyplot as plt\n'), ((807, 832), 'numpy.sin', 'np.sin', (['(2 * np.pi * t / 5)'], {}), '(2 * np.pi * t / 5)\n', (813, 832), True, 'import numpy as np\n'), ((1108, 1139), 'solver.ode1', 'sol.ode1', (['system', 'x0', 't0', 'tf', 'h'], {}), '(system, x0, t0, tf, h)\n', (1116, 1139), True, 'import solver as sol\n'), ((1226, 1243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)'}), '(num=1)\n', (1236, 1243), True, 'import matplotlib.pyplot as plt\n'), ((377, 398), 'solver.odd_pow', 'sol.odd_pow', (['x', '(1 - q)'], {}), '(x, 1 - q)\n', (388, 398), True, 'import solver as sol\n'), ((570, 591), 'solver.odd_pow', 'sol.odd_pow', (['x', '(1 - q)'], {}), '(x, 1 - q)\n', (581, 591), True, 'import solver as sol\n'), ((706, 731), 'solver.odd_pow', 'sol.odd_pow', (['x', '(1 - a * q)'], {}), '(x, 1 - a * q)\n', (717, 731), True, 'import solver as sol\n'), ((453, 474), 'solver.odd_pow', 'sol.odd_pow', (['x', '(1 + q)'], {}), '(x, 1 + q)\n', (464, 474), True, 'import solver as sol\n'), ((475, 496), 'solver.odd_pow', 'sol.odd_pow', (['x', '(1 - q)'], {}), '(x, 1 - q)\n', (486, 496), True, 'import solver as sol\n'), ((1506, 1516), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1513, 1516), True, 'import numpy as np\n'), ((1851, 1861), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1858, 1861), True, 'import numpy as np\n'), ((903, 920), 'solver.odd_pow', 'sol.odd_pow', (['x', '(0)'], {}), '(x, 0)\n', (914, 920), True, 'import solver as sol\n'), ((1274, 1284), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1281, 1284), True, 'import numpy as np\n'), ((363, 372), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (369, 372), True, 'import numpy as np\n'), ((669, 677), 'scipy.special.gamma', 'gamma', (['a'], {}), '(a)\n', (674, 677), False, 'from scipy.special import gamma\n'), ((692, 701), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (698, 701), True, 'import numpy as np\n'), ((1182, 1191), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1188, 1191), True, 'import numpy as np\n'), ((551, 560), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (557, 560), True, 'import numpy as np\n')] |
"""Custom pandas accessors.
Methods can be accessed as follows:
* `ReturnsSRAccessor` -> `pd.Series.vbt.returns.*`
* `ReturnsDFAccessor` -> `pd.DataFrame.vbt.returns.*`
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> import vectorbt as vbt
>>> # vectorbt.returns.accessors.ReturnsAccessor.total
>>> price = pd.Series([1.1, 1.2, 1.3, 1.2, 1.1])
>>> returns = price.pct_change()
>>> returns.vbt.returns.total()
0.0
```
The accessors extend `vectorbt.generic.accessors`.
```python-repl
>>> # inherited from GenericAccessor
>>> returns.vbt.returns.max()
0.09090909090909083
```
!!! note
The underlying Series/DataFrame must already be a return series.
To convert price to returns, use `ReturnsAccessor.from_price`.
Here are some commonly used arguments:
* `start_value`: The starting returns.
* `window`: Window length.
* `minp`: Minimum number of observations in window required to have a value.
* `ddof`: Means Delta Degrees of Freedom.
* `risk_free`: Constant risk-free return throughout the period.
* `levy_alpha`: Scaling relation (Levy stability exponent).
* `required_return`: Minimum acceptance return of the investor.
* `cutoff`: Decimal representing the percentage cutoff for the bottom percentile of returns.
* `benchmark_rets`: Benchmark return to compare returns against.
"""
import numpy as np
import pandas as pd
from scipy.stats import skew, kurtosis
from vectorbt import _typing as tp
from vectorbt.root_accessors import register_dataframe_accessor, register_series_accessor
from vectorbt.utils import checks
from vectorbt.utils.config import merge_dicts
from vectorbt.utils.figure import make_figure, get_domain
from vectorbt.utils.decorators import cached_property, cached_method
from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to
from vectorbt.generic.drawdowns import Drawdowns
from vectorbt.generic.accessors import (
GenericAccessor,
GenericSRAccessor,
GenericDFAccessor
)
from vectorbt.utils.datetime import freq_to_timedelta, DatetimeIndexes
from vectorbt.returns import nb, metrics
ReturnsAccessorT = tp.TypeVar("ReturnsAccessorT", bound="ReturnsAccessor")
class ReturnsAccessor(GenericAccessor):
"""Accessor on top of return series. For both, Series and DataFrames.
Accessible through `pd.Series.vbt.returns` and `pd.DataFrame.vbt.returns`.
Args:
obj (pd.Series or pd.DataFrame): Pandas object.
year_freq (any): Year frequency for annualization purposes.
**kwargs: Keyword arguments that overwrite `ReturnsAccessor.settings`
or otherwise are passed down to `vectorbt.generic.accessors.GenericAccessor`."""
def __init__(self, obj: tp.SeriesFrame, year_freq: tp.Optional[tp.FrequencyLike] = None, **kwargs) -> None:
if not checks.is_pandas(obj): # parent accessor
obj = obj._obj
# Set defaults
self._year_freq = year_freq
self._defaults = {}
for k in list(self.defaults.keys()):
if k in kwargs:
self._defaults[k] = kwargs.pop(k)
GenericAccessor.__init__(self, obj, **kwargs)
@classmethod
def from_price(cls: tp.Type[ReturnsAccessorT], price: tp.SeriesFrame, **kwargs) -> ReturnsAccessorT:
"""Returns a new `ReturnsAccessor` instance with returns from `price`."""
return cls(price.vbt.pct_change(), **kwargs)
@property
def year_freq(self) -> tp.Optional[pd.Timedelta]:
"""Year frequency for annualization purposes."""
if self._year_freq is None:
from vectorbt._settings import settings
returns_cfg = settings['returns']
return freq_to_timedelta(returns_cfg['year_freq'])
return freq_to_timedelta(self._year_freq)
@property
def ann_factor(self) -> float:
"""Get annualization factor."""
if self.wrapper.freq is None:
raise ValueError("Index frequency could not be parsed. "
"Pass it as `freq` or define it globally under `settings.array_wrapper`.")
if self.year_freq is None:
raise ValueError("Year frequency is not known. "
"Pass `year_freq` or define it globally under `settings.returns`.")
return self.year_freq / self.wrapper.freq
@property
def defaults(self) -> tp.Kwargs:
"""Defaults for `ReturnsAccessor`.
Gets overridden/extended by `kwargs` from `ReturnsAccessor.__init__`."""
from vectorbt._settings import settings
returns_cfg = settings['returns']
return merge_dicts(
dict(
start_value=returns_cfg['start_value'],
window=returns_cfg['window'],
minp=returns_cfg['minp'],
ddof=returns_cfg['ddof'],
risk_free=returns_cfg['risk_free'],
levy_alpha=returns_cfg['levy_alpha'],
required_return=returns_cfg['required_return'],
cutoff=returns_cfg['cutoff']
),
self._defaults
)
def daily(self, **kwargs) -> tp.SeriesFrame:
"""Daily returns."""
checks.assert_type(self.wrapper.index, DatetimeIndexes)
if self.wrapper.freq == pd.Timedelta('1D'):
return self.obj
return self.resample_apply('1D', nb.total_return_apply_nb, **kwargs)
def annual(self, **kwargs) -> tp.SeriesFrame:
"""Annual returns."""
checks.assert_type(self.obj.index, DatetimeIndexes)
if self.wrapper.freq == self.year_freq:
return self.obj
return self.resample_apply(self.year_freq, nb.total_return_apply_nb, **kwargs)
def cumulative(self,
start_value: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Cumulative returns."""
if start_value is None:
start_value = self.defaults['start_value']
cumulative = nb.cum_returns_nb(self.to_2d_array(), start_value)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(cumulative, **wrap_kwargs)
def total(self, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Total return."""
result = nb.cum_returns_final_nb(self.to_2d_array(), 0.)
wrap_kwargs = merge_dicts(dict(name_or_index='total_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_total(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.total`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
result = nb.rolling_cum_returns_final_nb(self.to_2d_array(), window, minp, 0.)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def annualized(self, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Mean annual growth rate of returns.
This is equivalent to the compound annual growth rate."""
result = nb.annualized_return_nb(self.to_2d_array(), self.ann_factor)
wrap_kwargs = merge_dicts(dict(name_or_index='annualized_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_annualized(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.annualized`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
result = nb.rolling_annualized_return_nb(self.to_2d_array(), window, minp, self.ann_factor)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def annualized_volatility(self,
levy_alpha: tp.Optional[float] = None,
ddof: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Annualized volatility of a strategy."""
if levy_alpha is None:
levy_alpha = self.defaults['levy_alpha']
if ddof is None:
ddof = self.defaults['ddof']
result = nb.annualized_volatility_nb(self.to_2d_array(), self.ann_factor, levy_alpha, ddof)
wrap_kwargs = merge_dicts(dict(name_or_index='annualized_volatility'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_annualized_volatility(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
levy_alpha: tp.Optional[float] = None,
ddof: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.annualized_volatility`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if levy_alpha is None:
levy_alpha = self.defaults['levy_alpha']
if ddof is None:
ddof = self.defaults['ddof']
result = nb.rolling_annualized_volatility_nb(
self.to_2d_array(), window, minp, self.ann_factor, levy_alpha, ddof)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def calmar_ratio(self, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Calmar ratio, or drawdown ratio, of a strategy."""
result = nb.calmar_ratio_nb(self.to_2d_array(), self.ann_factor)
wrap_kwargs = merge_dicts(dict(name_or_index='calmar_ratio'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_calmar_ratio(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.calmar_ratio`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
result = nb.rolling_calmar_ratio_nb(self.to_2d_array(), window, minp, self.ann_factor)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def omega_ratio(self,
risk_free: tp.Optional[float] = None,
required_return: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Omega ratio of a strategy."""
if risk_free is None:
risk_free = self.defaults['risk_free']
if required_return is None:
required_return = self.defaults['required_return']
result = nb.omega_ratio_nb(self.to_2d_array(), self.ann_factor, risk_free, required_return)
wrap_kwargs = merge_dicts(dict(name_or_index='omega_ratio'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_omega_ratio(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
risk_free: tp.Optional[float] = None,
required_return: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.omega_ratio`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if risk_free is None:
risk_free = self.defaults['risk_free']
if required_return is None:
required_return = self.defaults['required_return']
result = nb.rolling_omega_ratio_nb(
self.to_2d_array(), window, minp, self.ann_factor, risk_free, required_return)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def sharpe_ratio(self,
risk_free: tp.Optional[float] = None,
ddof: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Sharpe ratio of a strategy."""
if risk_free is None:
risk_free = self.defaults['risk_free']
if ddof is None:
ddof = self.defaults['ddof']
result = nb.sharpe_ratio_nb(self.to_2d_array(), self.ann_factor, risk_free, ddof)
wrap_kwargs = merge_dicts(dict(name_or_index='sharpe_ratio'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_sharpe_ratio(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
risk_free: tp.Optional[float] = None,
ddof: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.sharpe_ratio`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if risk_free is None:
risk_free = self.defaults['risk_free']
if ddof is None:
ddof = self.defaults['ddof']
result = nb.rolling_sharpe_ratio_nb(self.to_2d_array(), window, minp, self.ann_factor, risk_free, ddof)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def deflated_sharpe_ratio(self,
risk_free: tp.Optional[float] = None,
ddof: tp.Optional[int] = None,
var_sharpe: tp.Optional[float] = None,
nb_trials: tp.Optional[int] = None,
bias: bool = True,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Deflated Sharpe Ratio (DSR).
Expresses the chance that the advertised strategy has a positive Sharpe ratio.
If `var_sharpe` is None, is calculated based on all columns.
If `nb_trials` is None, is set to the number of columns."""
if risk_free is None:
risk_free = self.defaults['risk_free']
if ddof is None:
ddof = self.defaults['ddof']
sharpe_ratio = to_1d(self.sharpe_ratio(risk_free=risk_free), raw=True)
if var_sharpe is None:
var_sharpe = np.var(sharpe_ratio, ddof=ddof)
if nb_trials is None:
nb_trials = self.wrapper.shape_2d[1]
returns = to_2d(self.obj, raw=True)
nanmask = np.isnan(returns)
if nanmask.any():
returns = returns.copy()
returns[nanmask] = 0.
result = metrics.deflated_sharpe_ratio(
est_sharpe=sharpe_ratio / np.sqrt(self.ann_factor),
var_sharpe=var_sharpe / self.ann_factor,
nb_trials=nb_trials,
backtest_horizon=self.wrapper.shape_2d[0],
skew=skew(returns, axis=0, bias=bias),
kurtosis=kurtosis(returns, axis=0, bias=bias)
)
wrap_kwargs = merge_dicts(dict(name_or_index='deflated_sharpe_ratio'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def downside_risk(self,
required_return: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Downside deviation below a threshold."""
if required_return is None:
required_return = self.defaults['required_return']
result = nb.downside_risk_nb(self.to_2d_array(), self.ann_factor, required_return)
wrap_kwargs = merge_dicts(dict(name_or_index='downside_risk'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_downside_risk(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
required_return: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.downside_risk`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if required_return is None:
required_return = self.defaults['required_return']
result = nb.rolling_downside_risk_nb(self.to_2d_array(), window, minp, self.ann_factor, required_return)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def sortino_ratio(self,
required_return: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Sortino ratio of a strategy."""
if required_return is None:
required_return = self.defaults['required_return']
result = nb.sortino_ratio_nb(self.to_2d_array(), self.ann_factor, required_return)
wrap_kwargs = merge_dicts(dict(name_or_index='sortino_ratio'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_sortino_ratio(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
required_return: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.sortino_ratio`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if required_return is None:
required_return = self.defaults['required_return']
result = nb.rolling_sortino_ratio_nb(self.to_2d_array(), window, minp, self.ann_factor, required_return)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def information_ratio(self,
benchmark_rets: tp.ArrayLike,
ddof: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Information ratio of a strategy."""
if ddof is None:
ddof = self.defaults['ddof']
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.information_ratio_nb(self.to_2d_array(), benchmark_rets, ddof)
wrap_kwargs = merge_dicts(dict(name_or_index='information_ratio'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_information_ratio(self,
benchmark_rets: tp.ArrayLike,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
ddof: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.information_ratio`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if ddof is None:
ddof = self.defaults['ddof']
wrap_kwargs = merge_dicts({}, wrap_kwargs)
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.rolling_information_ratio_nb(self.to_2d_array(), window, minp, benchmark_rets, ddof)
return self.wrapper.wrap(result, **wrap_kwargs)
def beta(self, benchmark_rets: tp.ArrayLike, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Beta."""
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.beta_nb(self.to_2d_array(), benchmark_rets)
wrap_kwargs = merge_dicts(dict(name_or_index='beta'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_beta(self,
benchmark_rets: tp.ArrayLike,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.beta`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.rolling_beta_nb(self.to_2d_array(), window, minp, benchmark_rets)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def alpha(self,
benchmark_rets: tp.ArrayLike,
risk_free: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Annualized alpha."""
if risk_free is None:
risk_free = self.defaults['risk_free']
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.alpha_nb(self.to_2d_array(), benchmark_rets, self.ann_factor, risk_free)
wrap_kwargs = merge_dicts(dict(name_or_index='alpha'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_alpha(self,
benchmark_rets: tp.ArrayLike,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
risk_free: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.alpha`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if risk_free is None:
risk_free = self.defaults['risk_free']
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.rolling_alpha_nb(self.to_2d_array(), window, minp, benchmark_rets, self.ann_factor, risk_free)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def tail_ratio(self, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Ratio between the right (95%) and left tail (5%)."""
result = nb.tail_ratio_nb(self.to_2d_array())
wrap_kwargs = merge_dicts(dict(name_or_index='tail_ratio'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_tail_ratio(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.tail_ratio`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
result = nb.rolling_tail_ratio_nb(self.to_2d_array(), window, minp)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def common_sense_ratio(self, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Common Sense Ratio."""
result = to_1d(self.tail_ratio(), raw=True) * (1 + to_1d(self.annualized(), raw=True))
wrap_kwargs = merge_dicts(dict(name_or_index='common_sense_ratio'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_common_sense_ratio(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.common_sense_ratio`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
rolling_tail_ratio = to_2d(self.rolling_tail_ratio(window, minp=minp), raw=True)
rolling_annualized = to_2d(self.rolling_annualized(window, minp=minp), raw=True)
result = rolling_tail_ratio * (1 + rolling_annualized)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def value_at_risk(self,
cutoff: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Value at risk (VaR) of a returns stream."""
if cutoff is None:
cutoff = self.defaults['cutoff']
result = nb.value_at_risk_nb(self.to_2d_array(), cutoff)
wrap_kwargs = merge_dicts(dict(name_or_index='value_at_risk'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_value_at_risk(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
cutoff: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.value_at_risk`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if cutoff is None:
cutoff = self.defaults['cutoff']
result = nb.rolling_value_at_risk_nb(self.to_2d_array(), window, minp, cutoff)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def cond_value_at_risk(self,
cutoff: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Conditional value at risk (CVaR) of a returns stream."""
if cutoff is None:
cutoff = self.defaults['cutoff']
result = nb.cond_value_at_risk_nb(self.to_2d_array(), cutoff)
wrap_kwargs = merge_dicts(dict(name_or_index='cond_value_at_risk'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_cond_value_at_risk(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
cutoff: tp.Optional[float] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.cond_value_at_risk`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
if cutoff is None:
cutoff = self.defaults['cutoff']
result = nb.rolling_cond_value_at_risk_nb(self.to_2d_array(), window, minp, cutoff)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def capture(self, benchmark_rets: tp.ArrayLike, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Capture ratio."""
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.capture_nb(self.to_2d_array(), benchmark_rets, self.ann_factor)
wrap_kwargs = merge_dicts(dict(name_or_index='capture'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_capture(self,
benchmark_rets: tp.ArrayLike,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.capture`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.rolling_capture_nb(self.to_2d_array(), window, minp, benchmark_rets, self.ann_factor)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def up_capture(self, benchmark_rets: tp.ArrayLike, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Capture ratio for periods when the benchmark return is positive."""
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.up_capture_nb(self.to_2d_array(), benchmark_rets, self.ann_factor)
wrap_kwargs = merge_dicts(dict(name_or_index='up_capture'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_up_capture(self,
benchmark_rets: tp.ArrayLike,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.up_capture`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.rolling_up_capture_nb(self.to_2d_array(), window, minp, benchmark_rets, self.ann_factor)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def down_capture(self, benchmark_rets: tp.ArrayLike, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Capture ratio for periods when the benchmark return is negative."""
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.down_capture_nb(self.to_2d_array(), benchmark_rets, self.ann_factor)
wrap_kwargs = merge_dicts(dict(name_or_index='down_capture'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_down_capture(self,
benchmark_rets: tp.ArrayLike,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.down_capture`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
benchmark_rets = broadcast_to(to_2d(benchmark_rets, raw=True), to_2d(self.obj, raw=True))
result = nb.rolling_down_capture_nb(self.to_2d_array(), window, minp, benchmark_rets, self.ann_factor)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def drawdown(self, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Relative decline from a peak."""
result = nb.drawdown_nb(self.to_2d_array())
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
def max_drawdown(self, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Total maximum drawdown (MDD)."""
result = nb.max_drawdown_nb(self.to_2d_array())
wrap_kwargs = merge_dicts(dict(name_or_index='max_drawdown'), wrap_kwargs)
return self.wrapper.wrap_reduced(result, **wrap_kwargs)
def rolling_max_drawdown(self,
window: tp.Optional[int] = None,
minp: tp.Optional[int] = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Rolling version of `ReturnsAccessor.max_drawdown`."""
if window is None:
window = self.defaults['window']
if minp is None:
minp = self.defaults['minp']
result = nb.rolling_max_drawdown_nb(self.to_2d_array(), window, minp)
wrap_kwargs = merge_dicts({}, wrap_kwargs)
return self.wrapper.wrap(result, **wrap_kwargs)
@cached_property
def drawdowns(self) -> Drawdowns:
"""`ReturnsAccessor.get_drawdowns` with default arguments."""
return self.get_drawdowns()
@cached_method
def get_drawdowns(self, group_by: tp.GroupByLike = None, **kwargs) -> Drawdowns:
"""Generate drawdown records of cumulative returns.
See `vectorbt.generic.drawdowns.Drawdowns`."""
if group_by is None:
group_by = self.wrapper.grouper.group_by
return self.cumulative(start_value=1.).vbt(freq=self.wrapper.freq, group_by=group_by).get_drawdowns(**kwargs)
def stats(self,
benchmark_rets: tp.ArrayLike,
wrap_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""Compute various statistics on these returns.
## Example
```python-repl
>>> import pandas as pd
>>> from datetime import datetime
>>> import vectorbt as vbt
>>> symbols = ["BTC-USD", "SPY"]
>>> price = vbt.YFData.download(symbols, missing_index='drop').get('Close')
>>> returns = price.pct_change()
>>> returns["BTC-USD"].vbt.returns(freq='D').stats(returns["SPY"])
Start 2014-09-17 00:00:00
End 2021-03-12 00:00:00
Duration 1629 days 00:00:00
Total Return [%] 12296.6
Benchmark Return [%] 122.857
Annual Return [%] 194.465
Annual Volatility [%] 88.4466
Sharpe Ratio 1.66841
Calmar Ratio 2.34193
Max Drawdown [%] -83.0363
Omega Ratio 1.31107
Sortino Ratio 2.54018
Skew 0.0101324
Kurtosis 6.6453
Tail Ratio 1.19828
Common Sense Ratio 3.5285
Value at Risk -0.0664826
Alpha 2.90175
Beta 0.548808
Name: BTC-USD, dtype: object
```
"""
# Run stats
benchmark_rets = broadcast_to(benchmark_rets, self.obj)
kwargs = merge_dicts(self.defaults, kwargs)
stats_df = pd.DataFrame({
'Start': self.wrapper.index[0],
'End': self.wrapper.index[-1],
'Duration': self.wrapper.shape[0] * self.wrapper.freq,
'Total Return [%]': self.total() * 100,
'Benchmark Return [%]': benchmark_rets.vbt.returns.total() * 100,
'Annual Return [%]': self.annualized() * 100,
'Annual Volatility [%]': self.annualized_volatility(levy_alpha=kwargs['levy_alpha']) * 100,
'Sharpe Ratio': self.sharpe_ratio(risk_free=kwargs['risk_free']),
'Calmar Ratio': self.calmar_ratio(),
'Max Drawdown [%]': self.max_drawdown() * 100,
'Omega Ratio': self.omega_ratio(required_return=kwargs['required_return']),
'Sortino Ratio': self.sortino_ratio(required_return=kwargs['required_return']),
'Skew': self.obj.skew(axis=0),
'Kurtosis': self.obj.kurtosis(axis=0),
'Tail Ratio': self.tail_ratio(),
'Common Sense Ratio': self.common_sense_ratio(),
'Value at Risk': self.value_at_risk(),
'Alpha': self.alpha(benchmark_rets, risk_free=kwargs['risk_free']),
'Beta': self.beta(benchmark_rets)
}, index=self.wrapper.columns)
# Select columns or reduce
if self.is_series():
wrap_kwargs = merge_dicts(dict(name_or_index=stats_df.columns), wrap_kwargs)
return self.wrapper.wrap_reduced(stats_df.iloc[0], **wrap_kwargs)
return stats_df
@register_series_accessor('returns')
class ReturnsSRAccessor(ReturnsAccessor, GenericSRAccessor):
"""Accessor on top of return series. For Series only.
Accessible through `pd.Series.vbt.returns`."""
def __init__(self, obj: tp.Series, year_freq: tp.Optional[tp.FrequencyLike] = None, **kwargs) -> None:
if not checks.is_pandas(obj): # parent accessor
obj = obj._obj
GenericSRAccessor.__init__(self, obj, **kwargs)
ReturnsAccessor.__init__(self, obj, year_freq=year_freq, **kwargs)
def plot_cumulative(self,
benchmark_rets: tp.Optional[tp.ArrayLike] = None,
start_value: float = 1,
fill_to_benchmark: bool = False,
main_kwargs: tp.KwargsLike = None,
benchmark_kwargs: tp.KwargsLike = None,
hline_shape_kwargs: tp.KwargsLike = None,
add_trace_kwargs: tp.KwargsLike = None,
xref: str = 'x',
yref: str = 'y',
fig: tp.Optional[tp.BaseFigure] = None,
**layout_kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot cumulative returns.
Args:
benchmark_rets (array_like): Benchmark return to compare returns against.
Will broadcast per element.
start_value (float): The starting returns.
fill_to_benchmark (bool): Whether to fill between main and benchmark, or between main and `start_value`.
main_kwargs (dict): Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot` for main.
benchmark_kwargs (dict): Keyword arguments passed to `vectorbt.generic.accessors.GenericSRAccessor.plot` for benchmark.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for `start_value` line.
add_trace_kwargs (dict): Keyword arguments passed to `add_trace`.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
fig (Figure or FigureWidget): Figure to add traces to.
**layout_kwargs: Keyword arguments for layout.
## Example
```python-repl
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(0)
>>> rets = pd.Series(np.random.uniform(-0.05, 0.05, size=100))
>>> benchmark_rets = pd.Series(np.random.uniform(-0.05, 0.05, size=100))
>>> rets.vbt.returns.plot_cumulative(benchmark_rets=benchmark_rets)
```

"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
if fig is None:
fig = make_figure()
fig.update_layout(**layout_kwargs)
x_domain = get_domain(xref, fig)
fill_to_benchmark = fill_to_benchmark and benchmark_rets is not None
if benchmark_rets is not None:
# Plot benchmark
benchmark_rets = broadcast_to(benchmark_rets, self.obj)
if benchmark_kwargs is None:
benchmark_kwargs = {}
benchmark_kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['gray']
),
name='Benchmark'
)
), benchmark_kwargs)
benchmark_cumrets = benchmark_rets.vbt.returns.cumulative(start_value=start_value)
benchmark_cumrets.vbt.plot(**benchmark_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig)
else:
benchmark_cumrets = None
# Plot main
if main_kwargs is None:
main_kwargs = {}
main_kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['purple']
)
),
other_trace_kwargs='hidden'
), main_kwargs)
cumrets = self.cumulative(start_value=start_value)
if fill_to_benchmark:
cumrets.vbt.plot_against(benchmark_cumrets, **main_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig)
else:
cumrets.vbt.plot_against(start_value, **main_kwargs, add_trace_kwargs=add_trace_kwargs, fig=fig)
# Plot hline
if hline_shape_kwargs is None:
hline_shape_kwargs = {}
fig.add_shape(**merge_dicts(dict(
type='line',
xref="paper",
yref=yref,
x0=x_domain[0],
y0=start_value,
x1=x_domain[1],
y1=start_value,
line=dict(
color="gray",
dash="dash",
)
), hline_shape_kwargs))
return fig
@register_dataframe_accessor('returns')
class ReturnsDFAccessor(ReturnsAccessor, GenericDFAccessor):
"""Accessor on top of return series. For DataFrames only.
Accessible through `pd.DataFrame.vbt.returns`."""
def __init__(self, obj: tp.Frame, year_freq: tp.Optional[tp.FrequencyLike] = None, **kwargs) -> None:
if not checks.is_pandas(obj): # parent accessor
obj = obj._obj
GenericDFAccessor.__init__(self, obj, **kwargs)
ReturnsAccessor.__init__(self, obj, year_freq=year_freq, **kwargs)
| [
"vectorbt.utils.checks.is_pandas",
"vectorbt.base.reshape_fns.broadcast_to",
"numpy.isnan",
"vectorbt.generic.accessors.GenericSRAccessor.__init__",
"vectorbt.generic.accessors.GenericAccessor.__init__",
"vectorbt.utils.checks.assert_type",
"pandas.Timedelta",
"vectorbt.utils.config.merge_dicts",
"v... | [((2087, 2142), 'vectorbt._typing.TypeVar', 'tp.TypeVar', (['"""ReturnsAccessorT"""'], {'bound': '"""ReturnsAccessor"""'}), "('ReturnsAccessorT', bound='ReturnsAccessor')\n", (2097, 2142), True, 'from vectorbt import _typing as tp\n'), ((36936, 36971), 'vectorbt.root_accessors.register_series_accessor', 'register_series_accessor', (['"""returns"""'], {}), "('returns')\n", (36960, 36971), False, 'from vectorbt.root_accessors import register_dataframe_accessor, register_series_accessor\n'), ((41839, 41877), 'vectorbt.root_accessors.register_dataframe_accessor', 'register_dataframe_accessor', (['"""returns"""'], {}), "('returns')\n", (41866, 41877), False, 'from vectorbt.root_accessors import register_dataframe_accessor, register_series_accessor\n'), ((3062, 3107), 'vectorbt.generic.accessors.GenericAccessor.__init__', 'GenericAccessor.__init__', (['self', 'obj'], {}), '(self, obj, **kwargs)\n', (3086, 3107), False, 'from vectorbt.generic.accessors import GenericAccessor, GenericSRAccessor, GenericDFAccessor\n'), ((3705, 3739), 'vectorbt.utils.datetime.freq_to_timedelta', 'freq_to_timedelta', (['self._year_freq'], {}), '(self._year_freq)\n', (3722, 3739), False, 'from vectorbt.utils.datetime import freq_to_timedelta, DatetimeIndexes\n'), ((5138, 5193), 'vectorbt.utils.checks.assert_type', 'checks.assert_type', (['self.wrapper.index', 'DatetimeIndexes'], {}), '(self.wrapper.index, DatetimeIndexes)\n', (5156, 5193), False, 'from vectorbt.utils import checks\n'), ((5441, 5492), 'vectorbt.utils.checks.assert_type', 'checks.assert_type', (['self.obj.index', 'DatetimeIndexes'], {}), '(self.obj.index, DatetimeIndexes)\n', (5459, 5492), False, 'from vectorbt.utils import checks\n'), ((6030, 6058), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (6041, 6058), False, 'from vectorbt.utils.config import merge_dicts\n'), ((6952, 6980), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (6963, 6980), False, 'from vectorbt.utils.config import merge_dicts\n'), ((8017, 8045), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (8028, 8045), False, 'from vectorbt.utils.config import merge_dicts\n'), ((9752, 9780), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (9763, 9780), False, 'from vectorbt.utils.config import merge_dicts\n'), ((10762, 10790), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (10773, 10790), False, 'from vectorbt.utils.config import merge_dicts\n'), ((12451, 12479), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (12462, 12479), False, 'from vectorbt.utils.config import merge_dicts\n'), ((14028, 14056), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (14039, 14056), False, 'from vectorbt.utils.config import merge_dicts\n'), ((15224, 15249), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (15229, 15249), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((15268, 15285), 'numpy.isnan', 'np.isnan', (['returns'], {}), '(returns)\n', (15276, 15285), True, 'import numpy as np\n'), ((17229, 17257), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (17240, 17257), False, 'from vectorbt.utils.config import merge_dicts\n'), ((18623, 18651), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (18634, 18651), False, 'from vectorbt.utils.config import merge_dicts\n'), ((20066, 20094), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (20077, 20094), False, 'from vectorbt.utils.config import merge_dicts\n'), ((21440, 21468), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (21451, 21468), False, 'from vectorbt.utils.config import merge_dicts\n'), ((22991, 23019), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (23002, 23019), False, 'from vectorbt.utils.config import merge_dicts\n'), ((23951, 23979), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (23962, 23979), False, 'from vectorbt.utils.config import merge_dicts\n'), ((25143, 25171), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (25154, 25171), False, 'from vectorbt.utils.config import merge_dicts\n'), ((26425, 26453), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (26436, 26453), False, 'from vectorbt.utils.config import merge_dicts\n'), ((27780, 27808), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (27791, 27808), False, 'from vectorbt.utils.config import merge_dicts\n'), ((29024, 29052), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (29035, 29052), False, 'from vectorbt.utils.config import merge_dicts\n'), ((30348, 30376), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (30359, 30376), False, 'from vectorbt.utils.config import merge_dicts\n'), ((31692, 31720), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (31703, 31720), False, 'from vectorbt.utils.config import merge_dicts\n'), ((31973, 32001), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (31984, 32001), False, 'from vectorbt.utils.config import merge_dicts\n'), ((32931, 32959), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['{}', 'wrap_kwargs'], {}), '({}, wrap_kwargs)\n', (32942, 32959), False, 'from vectorbt.utils.config import merge_dicts\n'), ((35324, 35362), 'vectorbt.base.reshape_fns.broadcast_to', 'broadcast_to', (['benchmark_rets', 'self.obj'], {}), '(benchmark_rets, self.obj)\n', (35336, 35362), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((35380, 35414), 'vectorbt.utils.config.merge_dicts', 'merge_dicts', (['self.defaults', 'kwargs'], {}), '(self.defaults, kwargs)\n', (35391, 35414), False, 'from vectorbt.utils.config import merge_dicts\n'), ((37344, 37391), 'vectorbt.generic.accessors.GenericSRAccessor.__init__', 'GenericSRAccessor.__init__', (['self', 'obj'], {}), '(self, obj, **kwargs)\n', (37370, 37391), False, 'from vectorbt.generic.accessors import GenericAccessor, GenericSRAccessor, GenericDFAccessor\n'), ((39837, 39858), 'vectorbt.utils.figure.get_domain', 'get_domain', (['xref', 'fig'], {}), '(xref, fig)\n', (39847, 39858), False, 'from vectorbt.utils.figure import make_figure, get_domain\n'), ((42256, 42303), 'vectorbt.generic.accessors.GenericDFAccessor.__init__', 'GenericDFAccessor.__init__', (['self', 'obj'], {}), '(self, obj, **kwargs)\n', (42282, 42303), False, 'from vectorbt.generic.accessors import GenericAccessor, GenericSRAccessor, GenericDFAccessor\n'), ((2773, 2794), 'vectorbt.utils.checks.is_pandas', 'checks.is_pandas', (['obj'], {}), '(obj)\n', (2789, 2794), False, 'from vectorbt.utils import checks\n'), ((3646, 3689), 'vectorbt.utils.datetime.freq_to_timedelta', 'freq_to_timedelta', (["returns_cfg['year_freq']"], {}), "(returns_cfg['year_freq'])\n", (3663, 3689), False, 'from vectorbt.utils.datetime import freq_to_timedelta, DatetimeIndexes\n'), ((5227, 5245), 'pandas.Timedelta', 'pd.Timedelta', (['"""1D"""'], {}), "('1D')\n", (5239, 5245), True, 'import pandas as pd\n'), ((15095, 15126), 'numpy.var', 'np.var', (['sharpe_ratio'], {'ddof': 'ddof'}), '(sharpe_ratio, ddof=ddof)\n', (15101, 15126), True, 'import numpy as np\n'), ((19085, 19116), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (19090, 19116), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((19118, 19143), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (19123, 19143), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((20133, 20164), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (20138, 20164), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((20166, 20191), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (20171, 20191), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((20516, 20547), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (20521, 20547), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((20549, 20574), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (20554, 20574), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((21272, 21303), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (21277, 21303), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((21305, 21330), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (21310, 21330), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((21861, 21892), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (21866, 21892), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((21894, 21919), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (21899, 21919), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((22794, 22825), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (22799, 22825), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((22827, 22852), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (22832, 22852), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((28039, 28070), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (28044, 28070), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((28072, 28097), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (28077, 28097), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((28836, 28867), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (28841, 28867), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((28869, 28894), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (28874, 28894), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((29336, 29367), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (29341, 29367), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((29369, 29394), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (29374, 29394), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((30157, 30188), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (30162, 30188), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((30190, 30215), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (30195, 30215), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((30662, 30693), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (30667, 30693), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((30695, 30720), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (30700, 30720), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((31499, 31530), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['benchmark_rets'], {'raw': '(True)'}), '(benchmark_rets, raw=True)\n', (31504, 31530), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((31532, 31557), 'vectorbt.base.reshape_fns.to_2d', 'to_2d', (['self.obj'], {'raw': '(True)'}), '(self.obj, raw=True)\n', (31537, 31557), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((37266, 37287), 'vectorbt.utils.checks.is_pandas', 'checks.is_pandas', (['obj'], {}), '(obj)\n', (37282, 37287), False, 'from vectorbt.utils import checks\n'), ((39761, 39774), 'vectorbt.utils.figure.make_figure', 'make_figure', ([], {}), '()\n', (39772, 39774), False, 'from vectorbt.utils.figure import make_figure, get_domain\n'), ((40034, 40072), 'vectorbt.base.reshape_fns.broadcast_to', 'broadcast_to', (['benchmark_rets', 'self.obj'], {}), '(benchmark_rets, self.obj)\n', (40046, 40072), False, 'from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to\n'), ((42178, 42199), 'vectorbt.utils.checks.is_pandas', 'checks.is_pandas', (['obj'], {}), '(obj)\n', (42194, 42199), False, 'from vectorbt.utils import checks\n'), ((15653, 15685), 'scipy.stats.skew', 'skew', (['returns'], {'axis': '(0)', 'bias': 'bias'}), '(returns, axis=0, bias=bias)\n', (15657, 15685), False, 'from scipy.stats import skew, kurtosis\n'), ((15708, 15744), 'scipy.stats.kurtosis', 'kurtosis', (['returns'], {'axis': '(0)', 'bias': 'bias'}), '(returns, axis=0, bias=bias)\n', (15716, 15744), False, 'from scipy.stats import skew, kurtosis\n'), ((15469, 15493), 'numpy.sqrt', 'np.sqrt', (['self.ann_factor'], {}), '(self.ann_factor)\n', (15476, 15493), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 3 09:45:29 2019
@author: bala
"""
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras import optimizers
import copy
from sklearn.metrics import mean_squared_error as skMSE
class SimpleNNagent_Continous():
def __init__(self, env):
self.trainX = []
self.trainY = []
self.replayMemory = []
self.epsilon = 1.0
self.minEpsilon = 0.01
self.epsilonDecay = 0.997
self.discount = 0.95
self.learningRate = 0.002
self.batchSize = 128
self.sLow = env.observation_space.low
self.sHigh = env.observation_space.high
self.nActions = 10
self.model = self.buildModel(env.observation_space.shape[0],self.nActions)
self.actionContinous = np.linspace(-1, 1, num=10, endpoint=True)
def toDiscreteAction(self,action):
return np.argmin(np.absolute(self.actionContinous - action))
def toContAction(self,action):
return self.actionContinous[action]
def nState(self, state):
# return np.divide(state-self.sLow,
# (self.sHigh-self.sLow))
return state
def buildModel(self,iSize, oSize):
# =============================================================================
# model = Sequential()
# model.add(Dense(128, input_dim=iSize, activation='relu'))
# model.add(Dense(52, activation='relu'))
# model.add(Dense(oSize, activation='linear'))
# model.compile(loss='mse', optimizer='sgd') # Adam()
# =============================================================================
# =============================================================================
# model = Sequential()
# model.add(Dense(34, input_dim=iSize, activation='relu'))
# model.add(Dense(31, activation='relu'))
# model.add(Dense(21, activation='relu'))
# model.add(Dense(19, activation='relu'))
# model.add(Dense(10, activation='relu'))
# model.add(Dense(4, activation='relu'))
# model.add(Dense(oSize, activation='linear'))
# model.compile(loss='mse', optimizer='sgd') # Adam()
# =============================================================================
# =============================================================================
# model = Sequential()
# model.add(Dense(50, input_dim=iSize, activation='relu'))
# model.add(Dense(oSize, activation='linear'))
# sgd = optimizers.SGD(lr=0.005, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='mse', optimizer= sgd) # Adam()
# =============================================================================
model = Sequential()
model.add(Dense(64, input_dim=iSize, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(oSize, activation='linear'))
# model.compile(loss="mean_squared_error", optimizer=Adam(lr=self.learningRate))
model.compile(loss="mean_squared_error", optimizer=Adam(lr=self.learningRate))
return model
def trainModel(self):
self.model.fit(np.asarray(self.trainX),
np.asarray(self.trainY),
# epochs=2,
verbose = 0)
def miniBatchTrainModel(self):
self.trainX = []
self.trainY = []
loss = 0
for sarsa in self.replayMemory:
loss+=self.buildTrainData(sarsa[0], sarsa[1], sarsa[2], sarsa[3], sarsa[4])
self.model.fit(np.asarray(self.trainX),
np.asarray(self.trainY),
epochs=1)
return loss
def EpsilonGreedyPolicy(self,state):
if random.random() <= self.epsilon:
# choose random
action = random.randint(0,self.nActions-1)
else:
#ChooseMax
#Handle multiple max
self.qValues = self.model.predict(np.reshape(self.nState(state),(-1,2)))[0]
action = np.random.choice(
np.where(self.qValues == np.max(self.qValues))[0]
)
return action
def newGame(self):
self.trainX = []
self.trainY = []
# self.replayMemory = []
def getTrainAction(self,state):
action = self.EpsilonGreedyPolicy(state)
return action
def getAction(self,state):
self.qValues = self.model.predict(np.reshape(self.nState(state),(-1,2)))[0]
action = np.random.choice(
np.where(self.qValues == np.max(self.qValues))[0]
)
return action
def buildReplayMemory(self, currState, nextState, reward, done, action):
# if len(self.replayMemory)> self.batchSize:
# self.replayMemory.pop()
self.replayMemory.append([currState, nextState, reward, done, action])
def buildMiniBatchTrainData(self):
c = []
n = []
r = []
d = []
a = []
if len(self.replayMemory)>self.batchSize:
minibatch = random.sample(self.replayMemory, self.batchSize)
else:
minibatch = self.replayMemory
for ndx,[currState, nextState, reward, done, action] in enumerate(minibatch):
# for ndx,val in enumerate(choices):
# [currState, nextState, reward, done, action] = self.replayMemory[val]
c.append(currState)
n.append(nextState)
r.append(reward)
d.append(done)
a.append([ndx, action])
c = np.asanyarray(c)
n = np.asanyarray(n)
r = np.asanyarray(r)
d = np.asanyarray(d)
a = np.asanyarray(a)
a = a.T
qVal_n = self.model.predict(np.reshape(self.nState(n),(-1,2)))
qMax_n = np.max(qVal_n, axis = 1)
qVal_c = self.model.predict(np.reshape(self.nState(c),(-1,2)))
Y = copy.deepcopy(qVal_c)
y = np.zeros(r.shape)
ndx = np.where(d == True)
y[ndx] = r[ndx]
ndx = np.where(d == False)
y[ndx] = r[ndx] + self.discount * qMax_n[ndx]
Y[a[0],a[1]] = y
self.trainX = c
self.trainY = Y
return skMSE(Y,qVal_c)
def buildTrainData(self, currState, nextState, reward, done, action):
states = np.asarray([currState, nextState])
q = self.model.predict(np.reshape(self.nState(states),(-1,2)))
self.qValues = q[0]
qVal = q[1]
qMax = np.max(qVal)
Y = copy.deepcopy(self.qValues)
if done:
y = reward
else:
y = reward + self.discount * qMax
#check if replaced prpoerly, 1 epoh loss should be mpr, initial loss has to be more
#check if values are referenced rather rhan copy
Y[action] = y
self.trainX.append(self.nState(currState))
self.trainY.append(Y)
return skMSE(Y,self.qValues)
def getReward(self, currState, nextState, action, reward, maxDist, step, done):
# =============================================================================
# # Reward 1
# if nextState[0] >= 0.5 or nextState[0] > episodeMaxDist:
# reward += 5
# else:
# reward = nextState[0] + 0.5
# =============================================================================
# =============================================================================
# # Reward 2
# if nextState[0] >= 0.5:
# reward += 5
# else:
# reward = nextState[0] + 0.5
# =============================================================================
# =============================================================================
# # Reward 3
# # No change
# =============================================================================
# =============================================================================
# # Reward 4
# sign = np.array([-1.0,0.0,1.0])
# if nextState[1]*sign[action] >= 0:
# reward = nextState[0] + 0.5
# else:
# reward = nextState[0] - 0.5
# =============================================================================
# =============================================================================
# # Reward 5
# sign = np.array([-1.0,0.0,1.0])
# if currState[1]*sign[action] >= 0:
# reward = nextState[0] + 0.5
# else:
# reward = nextState[0] - 0.5
# =============================================================================
# =============================================================================
# # Reward 6
# sign = np.array([-1.0,0.0,1.0])
# if currState[1]*sign[action] >= 0:
# reward = 1
# else:
# reward = -1
# =============================================================================
# =============================================================================
# # Reward 7
# sign = np.array([-1.0,0.0,1.0])
# if currState[1]*sign[action] >= 0:
# reward = 1
# else:
# reward = -1
# reward = (0.999**step) * reward
# =============================================================================
# =============================================================================
# # Reward 8
# sign = np.array([-1.0,0.0,1.0])
# if currState[1]*sign[action] >= 0:
# reward = 1 * (0.99**step)
# else:
# reward = -1 * (1.01**step)
# =============================================================================
# =============================================================================
# # Reward 9
# sign = np.array([-1.0,0.0,1.0])
# if currState[1]*sign[action] >= 0:
# reward = nextState[0] + 1 * (0.99**step)
# else:
# reward = nextState[0] - -1 * (1.01**step)
# =============================================================================
# =============================================================================
# # Reward 10
# sign = np.array([-1.0,0.0,1.0])
# if currState[1]*sign[action] >= 0:
# reward = 1
# else:
# reward = -1
# reward = (0.8**step) * reward
# if nextState[0] >=0.5:
# reward+= 100
# =============================================================================
# =============================================================================
# # Reward 11
# if nextState[1] > currState[1] and nextState[1]>0 and currState[1]>0:
# reward += 15
# elif nextState[1] < currState[1] and nextState[1]<=0 and currState[1]<=0:
# reward +=15
# if done:
# reward = reward + 1000
# else:
# reward=reward-10
# =============================================================================
# =============================================================================
# # Reward 12
# reward = nextState[0]
# if nextState[0] >= 0.5:
# reward += 5000
# elif nextState[0] > maxDist:
# reward += 5
# =============================================================================
# =============================================================================
# # Reward 13
# sign = np.array([-1.0,0.0,1.0])
# if currState[1]*sign[action] >= 0:
# reward = 1
# else:
# reward = -1
# if currState[0]>=0.5:
# reward += 1000
# reward = (0.999**step) * reward
# =============================================================================
# =============================================================================
# # Reward 14
# reward = currState[0]+0.5
# if nextState[0]>-0.5:
# reward+=1
# =============================================================================
# Reward 15
if nextState[1] > currState[1] and nextState[1]>0 and currState[1]>0:
reward += 15
elif nextState[1] < currState[1] and nextState[1]<=0 and currState[1]<=0:
reward +=15
if step >=199:
reward = reward + 1000
else:
reward=reward-10
if nextState[0]>= 0.5:
reward += 1000
# reward = (0.8**step)*reward
return reward
| [
"numpy.absolute",
"copy.deepcopy",
"random.randint",
"random.sample",
"numpy.asarray",
"numpy.asanyarray",
"numpy.zeros",
"keras.optimizers.Adam",
"random.random",
"numpy.max",
"numpy.where",
"keras.layers.Dense",
"numpy.linspace",
"keras.models.Sequential",
"sklearn.metrics.mean_squared... | [((910, 951), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)'], {'num': '(10)', 'endpoint': '(True)'}), '(-1, 1, num=10, endpoint=True)\n', (921, 951), True, 'import numpy as np\n'), ((2906, 2918), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2916, 2918), False, 'from keras.models import Sequential\n'), ((5821, 5837), 'numpy.asanyarray', 'np.asanyarray', (['c'], {}), '(c)\n', (5834, 5837), True, 'import numpy as np\n'), ((5850, 5866), 'numpy.asanyarray', 'np.asanyarray', (['n'], {}), '(n)\n', (5863, 5866), True, 'import numpy as np\n'), ((5879, 5895), 'numpy.asanyarray', 'np.asanyarray', (['r'], {}), '(r)\n', (5892, 5895), True, 'import numpy as np\n'), ((5908, 5924), 'numpy.asanyarray', 'np.asanyarray', (['d'], {}), '(d)\n', (5921, 5924), True, 'import numpy as np\n'), ((5937, 5953), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (5950, 5953), True, 'import numpy as np\n'), ((6058, 6080), 'numpy.max', 'np.max', (['qVal_n'], {'axis': '(1)'}), '(qVal_n, axis=1)\n', (6064, 6080), True, 'import numpy as np\n'), ((6167, 6188), 'copy.deepcopy', 'copy.deepcopy', (['qVal_c'], {}), '(qVal_c)\n', (6180, 6188), False, 'import copy\n'), ((6201, 6218), 'numpy.zeros', 'np.zeros', (['r.shape'], {}), '(r.shape)\n', (6209, 6218), True, 'import numpy as np\n'), ((6233, 6252), 'numpy.where', 'np.where', (['(d == True)'], {}), '(d == True)\n', (6241, 6252), True, 'import numpy as np\n'), ((6291, 6311), 'numpy.where', 'np.where', (['(d == False)'], {}), '(d == False)\n', (6299, 6311), True, 'import numpy as np\n'), ((6454, 6470), 'sklearn.metrics.mean_squared_error', 'skMSE', (['Y', 'qVal_c'], {}), '(Y, qVal_c)\n', (6459, 6470), True, 'from sklearn.metrics import mean_squared_error as skMSE\n'), ((6570, 6604), 'numpy.asarray', 'np.asarray', (['[currState, nextState]'], {}), '([currState, nextState])\n', (6580, 6604), True, 'import numpy as np\n'), ((6739, 6751), 'numpy.max', 'np.max', (['qVal'], {}), '(qVal)\n', (6745, 6751), True, 'import numpy as np\n'), ((6764, 6791), 'copy.deepcopy', 'copy.deepcopy', (['self.qValues'], {}), '(self.qValues)\n', (6777, 6791), False, 'import copy\n'), ((7159, 7181), 'sklearn.metrics.mean_squared_error', 'skMSE', (['Y', 'self.qValues'], {}), '(Y, self.qValues)\n', (7164, 7181), True, 'from sklearn.metrics import mean_squared_error as skMSE\n'), ((1025, 1067), 'numpy.absolute', 'np.absolute', (['(self.actionContinous - action)'], {}), '(self.actionContinous - action)\n', (1036, 1067), True, 'import numpy as np\n'), ((2937, 2982), 'keras.layers.Dense', 'Dense', (['(64)'], {'input_dim': 'iSize', 'activation': '"""relu"""'}), "(64, input_dim=iSize, activation='relu')\n", (2942, 2982), False, 'from keras.layers import Dense\n'), ((3002, 3030), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (3007, 3030), False, 'from keras.layers import Dense\n'), ((3050, 3083), 'keras.layers.Dense', 'Dense', (['oSize'], {'activation': '"""linear"""'}), "(oSize, activation='linear')\n", (3055, 3083), False, 'from keras.layers import Dense\n'), ((3339, 3362), 'numpy.asarray', 'np.asarray', (['self.trainX'], {}), '(self.trainX)\n', (3349, 3362), True, 'import numpy as np\n'), ((3387, 3410), 'numpy.asarray', 'np.asarray', (['self.trainY'], {}), '(self.trainY)\n', (3397, 3410), True, 'import numpy as np\n'), ((3751, 3774), 'numpy.asarray', 'np.asarray', (['self.trainX'], {}), '(self.trainX)\n', (3761, 3774), True, 'import numpy as np\n'), ((3799, 3822), 'numpy.asarray', 'np.asarray', (['self.trainY'], {}), '(self.trainY)\n', (3809, 3822), True, 'import numpy as np\n'), ((3938, 3953), 'random.random', 'random.random', ([], {}), '()\n', (3951, 3953), False, 'import random\n'), ((4020, 4056), 'random.randint', 'random.randint', (['(0)', '(self.nActions - 1)'], {}), '(0, self.nActions - 1)\n', (4034, 4056), False, 'import random\n'), ((5335, 5383), 'random.sample', 'random.sample', (['self.replayMemory', 'self.batchSize'], {}), '(self.replayMemory, self.batchSize)\n', (5348, 5383), False, 'import random\n'), ((3232, 3258), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.learningRate'}), '(lr=self.learningRate)\n', (3236, 3258), False, 'from keras.optimizers import Adam\n'), ((4815, 4835), 'numpy.max', 'np.max', (['self.qValues'], {}), '(self.qValues)\n', (4821, 4835), True, 'import numpy as np\n'), ((4304, 4324), 'numpy.max', 'np.max', (['self.qValues'], {}), '(self.qValues)\n', (4310, 4324), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# unsupervised training of typology evaluator (categorical)
#
import sys
import codecs
import json
import numpy as np
import random
from argparse import ArgumentParser
from json_utils import load_json_file, load_json_stream
from evaluator import CategoricalFeatureList, CategoricalFeatureListEvaluator, NestedCategoricalFeatureListEvaluator
def cum_error(binvect_list, evaluator):
rv = 0.0
for binvect in binvect_list:
binvect2 = evaluator.decode(evaluator.encode(binvect))
binvect3 = evaluator.binarize(binvect2)
rv += np.absolute(binvect - binvect3).sum()
return rv
def train(tnode_list, evaluator, _iter=100, minibatch=10, _iter_offset=1, Cscore=0.1, interval=5, psamples=5, nsamples=5):
for _i in xrange(_iter):
random.shuffle(tnode_list)
cdiff = 0.0
count = 0
error = 0.0
delta = None
for tnode in tnode_list:
delta = evaluator.train_scorer(tnode, burn_in=0, interval=interval, psamples=psamples, nsamples=nsamples, delta=delta, Cscore=Cscore)
delta, error_each = evaluator.calc_delta_autoencoder(tnode.binvect, delta=delta)
error += error_each
count += 1
if count % minibatch == 0:
cdiff += evaluator.update_weight(delta)
delta = None
if count % minibatch != 0:
cdiff += evaluator.update_weight(delta)
delta = None
sys.stderr.write("AE\titer %d: cdiff: %f\tt_error: %f\tc_error: %f\n" % \
(_i + _iter_offset, cdiff, error / len(tnode_list),
cum_error([tnode.binvect for tnode in tnode_list], evaluator) / float(len(tnode_list))))
shuffle_randnode(tnode_list)
def shuffle_randnode(node_list):
i = len(node_list) - 1
while i > 0:
r = np.random.random_integers(0, high=i)
node_list[i].randnode, node_list[r].randnode = node_list[r].randnode, node_list[i].randnode
i -= 1
return node_list
def save(evaluator, modelfile, tnode_list):
with codecs.getwriter("utf-8")(open(modelfile, 'w')) as f:
f.write(evaluator.dumps())
def main():
sys.stderr = codecs.getwriter("utf-8")(sys.stderr)
parser = ArgumentParser()
parser.add_argument("--nested", dest="nested", action="store_true", default=False)
parser.add_argument("-s", "--seed", dest="seed", metavar="INT", type=int, default=None,
help="random seed")
parser.add_argument("-d", "--dims", dest="dims", metavar="INT", type=int, default=200,
help="number of dimensions")
parser.add_argument("--dims2", dest="dims2", metavar="INT", type=int, default=10,
help="number of dimensions")
parser.add_argument("-i", "--iter", dest="_iter", metavar="INT", type=int, default=20000,
help="number of dimensions")
parser.add_argument("--eta", dest="eta", metavar="FLOAT", type=float, default=0.01,
help="SGD parameter")
parser.add_argument("--penalty", dest="penalty", metavar="l1 or l2", default=None,
help="regularization l1 or l2 (default None)")
parser.add_argument("--lambda", dest="_lambda", metavar="FLOAT", type=float, default=0.0,
help="L2 regularization term")
parser.add_argument("--Cscore", dest="Cscore", metavar="FLOAT", type=float, default=0.1,
help="balance between autoencoder and scorer")
parser.add_argument("--minibatch", dest="minibatch", metavar="INT", type=int, default=10,
help="minibatch size (default: 10)")
parser.add_argument("--interval", dest="interval", metavar="INT", type=int, default=10)
parser.add_argument("--psamples", dest="psamples", metavar="INT", type=int, default=10)
parser.add_argument("--nsamples", dest="nsamples", metavar="INT", type=int, default=10)
parser.add_argument("langs", metavar="LANG", default=None)
parser.add_argument("fid2struct", metavar="FLIST", default=None)
parser.add_argument("model", metavar="MODEL", default=None)
args = parser.parse_args()
if args.seed is not None:
np.random.seed(args.seed)
random.seed(args.seed)
fid2struct = load_json_file(args.fid2struct)
modelfile = args.model
if args.nested:
evaluator = NestedCategoricalFeatureListEvaluator(fid2struct, dims=args.dims, dims2=args.dims2, eta=args.eta, _lambda=args._lambda, penalty=args.penalty)
else:
evaluator = CategoricalFeatureListEvaluator(fid2struct, dims=args.dims, eta=args.eta, _lambda=args._lambda, penalty=args.penalty)
# mv_count = 0
train_total = 0
langlist = []
for lang in load_json_stream(open(args.langs)):
tnode = CategoricalFeatureList(lang["catvect_filled"], evaluator, has_missing_values=False)
tnode.lang = lang
train_total += 1
langlist.append(tnode)
sys.stderr.write("# of catvect elemts: %d\n" % evaluator.catsize)
# sys.stderr.write("missing value rate: %f\n" % (mv_count / (float(train_total * evaluator.catsize))))
sys.stderr.write("# of binvect elems: %d\n" % evaluator.binsize)
sys.stderr.write("# of training instances: %d\n" % train_total)
sys.stderr.write("Cscore: %f\n" % args.Cscore)
sys.stderr.write("# of hidden dims: %d\n" % evaluator.dims)
if args.nested:
sys.stderr.write("# of hidden dims2: %d\n" % evaluator.dims2)
sys.stderr.write("interval, psamples, nsamples: (%d, %d, %d)\n" % (args.interval, args.psamples, args.nsamples))
sys.stderr.write("SGD/Adagrad eta: %f\n" % evaluator.eta)
sys.stderr.write("penalty: %s\n" % evaluator.penalty)
sys.stderr.write("lambda: %f\n" % evaluator._lambda)
_iter_remaining = args._iter
_iter_count=0
while _iter_remaining > 0:
_iter_each = min(1000, _iter_remaining)
train(langlist, evaluator, _iter=_iter_each, _iter_offset=_iter_count, minibatch=args.minibatch, Cscore=args.Cscore,
interval=10, psamples=10, nsamples=10)
_iter_remaining -= 1000
_iter_count += 1000
save(evaluator, modelfile, langlist)
if __name__ == "__main__":
main()
| [
"numpy.absolute",
"numpy.random.seed",
"argparse.ArgumentParser",
"random.shuffle",
"evaluator.CategoricalFeatureListEvaluator",
"codecs.getwriter",
"evaluator.NestedCategoricalFeatureListEvaluator",
"random.seed",
"evaluator.CategoricalFeatureList",
"sys.stderr.write",
"json_utils.load_json_fil... | [((2279, 2295), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2293, 2295), False, 'from argparse import ArgumentParser\n'), ((4326, 4357), 'json_utils.load_json_file', 'load_json_file', (['args.fid2struct'], {}), '(args.fid2struct)\n', (4340, 4357), False, 'from json_utils import load_json_file, load_json_stream\n'), ((5013, 5078), 'sys.stderr.write', 'sys.stderr.write', (["('# of catvect elemts: %d\\n' % evaluator.catsize)"], {}), "('# of catvect elemts: %d\\n' % evaluator.catsize)\n", (5029, 5078), False, 'import sys\n'), ((5191, 5255), 'sys.stderr.write', 'sys.stderr.write', (["('# of binvect elems: %d\\n' % evaluator.binsize)"], {}), "('# of binvect elems: %d\\n' % evaluator.binsize)\n", (5207, 5255), False, 'import sys\n'), ((5260, 5323), 'sys.stderr.write', 'sys.stderr.write', (["('# of training instances: %d\\n' % train_total)"], {}), "('# of training instances: %d\\n' % train_total)\n", (5276, 5323), False, 'import sys\n'), ((5328, 5374), 'sys.stderr.write', 'sys.stderr.write', (["('Cscore: %f\\n' % args.Cscore)"], {}), "('Cscore: %f\\n' % args.Cscore)\n", (5344, 5374), False, 'import sys\n'), ((5379, 5438), 'sys.stderr.write', 'sys.stderr.write', (["('# of hidden dims: %d\\n' % evaluator.dims)"], {}), "('# of hidden dims: %d\\n' % evaluator.dims)\n", (5395, 5438), False, 'import sys\n'), ((5533, 5650), 'sys.stderr.write', 'sys.stderr.write', (["('interval, psamples, nsamples: (%d, %d, %d)\\n' % (args.interval, args.\n psamples, args.nsamples))"], {}), "('interval, psamples, nsamples: (%d, %d, %d)\\n' % (args.\n interval, args.psamples, args.nsamples))\n", (5549, 5650), False, 'import sys\n'), ((5650, 5707), 'sys.stderr.write', 'sys.stderr.write', (["('SGD/Adagrad eta: %f\\n' % evaluator.eta)"], {}), "('SGD/Adagrad eta: %f\\n' % evaluator.eta)\n", (5666, 5707), False, 'import sys\n'), ((5712, 5765), 'sys.stderr.write', 'sys.stderr.write', (["('penalty: %s\\n' % evaluator.penalty)"], {}), "('penalty: %s\\n' % evaluator.penalty)\n", (5728, 5765), False, 'import sys\n'), ((5770, 5822), 'sys.stderr.write', 'sys.stderr.write', (["('lambda: %f\\n' % evaluator._lambda)"], {}), "('lambda: %f\\n' % evaluator._lambda)\n", (5786, 5822), False, 'import sys\n'), ((793, 819), 'random.shuffle', 'random.shuffle', (['tnode_list'], {}), '(tnode_list)\n', (807, 819), False, 'import random\n'), ((1881, 1917), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)'], {'high': 'i'}), '(0, high=i)\n', (1906, 1917), True, 'import numpy as np\n'), ((2227, 2252), 'codecs.getwriter', 'codecs.getwriter', (['"""utf-8"""'], {}), "('utf-8')\n", (2243, 2252), False, 'import codecs\n'), ((4251, 4276), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4265, 4276), True, 'import numpy as np\n'), ((4285, 4307), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (4296, 4307), False, 'import random\n'), ((4426, 4572), 'evaluator.NestedCategoricalFeatureListEvaluator', 'NestedCategoricalFeatureListEvaluator', (['fid2struct'], {'dims': 'args.dims', 'dims2': 'args.dims2', 'eta': 'args.eta', '_lambda': 'args._lambda', 'penalty': 'args.penalty'}), '(fid2struct, dims=args.dims, dims2=\n args.dims2, eta=args.eta, _lambda=args._lambda, penalty=args.penalty)\n', (4463, 4572), False, 'from evaluator import CategoricalFeatureList, CategoricalFeatureListEvaluator, NestedCategoricalFeatureListEvaluator\n'), ((4598, 4719), 'evaluator.CategoricalFeatureListEvaluator', 'CategoricalFeatureListEvaluator', (['fid2struct'], {'dims': 'args.dims', 'eta': 'args.eta', '_lambda': 'args._lambda', 'penalty': 'args.penalty'}), '(fid2struct, dims=args.dims, eta=args.eta,\n _lambda=args._lambda, penalty=args.penalty)\n', (4629, 4719), False, 'from evaluator import CategoricalFeatureList, CategoricalFeatureListEvaluator, NestedCategoricalFeatureListEvaluator\n'), ((4842, 4929), 'evaluator.CategoricalFeatureList', 'CategoricalFeatureList', (["lang['catvect_filled']", 'evaluator'], {'has_missing_values': '(False)'}), "(lang['catvect_filled'], evaluator,\n has_missing_values=False)\n", (4864, 4929), False, 'from evaluator import CategoricalFeatureList, CategoricalFeatureListEvaluator, NestedCategoricalFeatureListEvaluator\n'), ((5467, 5528), 'sys.stderr.write', 'sys.stderr.write', (["('# of hidden dims2: %d\\n' % evaluator.dims2)"], {}), "('# of hidden dims2: %d\\n' % evaluator.dims2)\n", (5483, 5528), False, 'import sys\n'), ((2108, 2133), 'codecs.getwriter', 'codecs.getwriter', (['"""utf-8"""'], {}), "('utf-8')\n", (2124, 2133), False, 'import codecs\n'), ((580, 611), 'numpy.absolute', 'np.absolute', (['(binvect - binvect3)'], {}), '(binvect - binvect3)\n', (591, 611), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# import vectorize_data as vd
import settings
import pickle as pickle
from preProcessData import FeatureExtraction
import numpy as np
# X_train, y_train, X_test, y_test = vd.tf_Idf('./dataS/train/pre_train.txt', './dataS/test/pre_test.txt')
# X_train, y_train, X_test, y_test = vd.Bow('./data/train/pre_train.txt', './data/test/pre_test.txt')
features_test_loader = pickle.load(open(settings.FEATURES_TEST,'rb'))
features_train_loader = pickle.load(open(settings.FEATURES_TRAIN,'rb'))
features_train, labels_train = FeatureExtraction(data=features_train_loader).read_feature()
features_test, labels_test = FeatureExtraction(data=features_test_loader).read_feature()
X_train=features_train
y_train=labels_train
X_test=features_test
y_test=labels_test
def SVM():
from sklearn.svm import LinearSVC
from sklearn.model_selection import GridSearchCV
svc = LinearSVC()
max_iter = [1,10,20,50,100]
penalty = ['l2']
C= [0.1,1,10,20]
param_grid = {'penalty':penalty,'max_iter': max_iter,'C': C}
clf = GridSearchCV(svc, param_grid, refit=True)
clf.fit(X_train, y_train)
best_score = clf.best_score_
best_param = clf.best_params_
score = clf.score(X_test, y_test)
scores = [x[1] for x in clf.grid_scores_]
print(scores)
scores = np.array(scores).reshape(len(C), len(max_iter)*len(penalty))
np.save('drawChart',scores)
return best_score, best_param, score
print ('SVM: ', SVM())
# [0.8810391303059925, 0.90257412838058, 0.90257412838058, 0.90257412838058, 0.90257412838058, 0.8783435528303564, 0.9171776415178174, 0.917088776326313, 0.91702953286531, 0.91702953286531, 0.8891851061939039, 0.9183032672768743, 0.9182144020853699, 0.9179774282413579, 0.9180366717023608, 0.8776326312983205, 0.9180662934328624, 0.9175627240143369, 0.9183328890073759, 0.9179478065108564]
# ('SVM: ', (0.9183328890073759, {'penalty': 'l2', 'C': 20, 'max_iter': 50}, 0.9204137136958291)) | [
"sklearn.model_selection.GridSearchCV",
"numpy.save",
"numpy.array",
"sklearn.svm.LinearSVC",
"preProcessData.FeatureExtraction"
] | [((890, 901), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (899, 901), False, 'from sklearn.svm import LinearSVC\n'), ((1052, 1093), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svc', 'param_grid'], {'refit': '(True)'}), '(svc, param_grid, refit=True)\n', (1064, 1093), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1374, 1402), 'numpy.save', 'np.save', (['"""drawChart"""', 'scores'], {}), "('drawChart', scores)\n", (1381, 1402), True, 'import numpy as np\n'), ((541, 586), 'preProcessData.FeatureExtraction', 'FeatureExtraction', ([], {'data': 'features_train_loader'}), '(data=features_train_loader)\n', (558, 586), False, 'from preProcessData import FeatureExtraction\n'), ((631, 675), 'preProcessData.FeatureExtraction', 'FeatureExtraction', ([], {'data': 'features_test_loader'}), '(data=features_test_loader)\n', (648, 675), False, 'from preProcessData import FeatureExtraction\n'), ((1309, 1325), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1317, 1325), True, 'import numpy as np\n')] |
import os, time, sys, zipfile
import tensorflow as tf
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from io import open, BytesIO
import numpy as np
from PIL import Image
import lib
def pil_bilinear_interpolation(x, size=(299, 299)):
"""
x: [-1, 1] torch tensor
"""
y = np.zeros((x.shape[0], size[0], size[1], 3), dtype='uint8')
x_arr = ((x + 1) * 127.5).detach().cpu().numpy().astype("uint8")
x_arr = x_arr.transpose(0, 2, 3, 1)
for i in range(x_arr.shape[0]):
if x_arr.shape[-1] == 1:
y[i] = np.asarray(Image.fromarray(x_arr[i, :, :, 0]).resize(
size, Image.BILINEAR).convert("RGB"))
else:
y[i] = np.asarray(Image.fromarray(x_arr[i]).resize(size, Image.BILINEAR))
return torch.from_numpy(y.transpose(0, 3, 1, 2)).type_as(x) / 127.5 - 1
def read_image_and_resize(filename, size):
"""
An eagar function for reading image from zip
"""
f = filename.numpy().decode("utf-8")
return np.asarray(Image.open(open(f, "rb")).resize(size))
class GeneratorIterator(object):
def __init__(self, model, tot_num=50000, batch_size=64, cuda=True):
self.model = model
self.tot_num = tot_num
self.cuda = cuda
self.batch_size = batch_size
self.num_iter = self.tot_num // self.batch_size
def iterator(self, save_path=None):
if save_path is not None:
os.system("mkdir %s" % save_path)
z = torch.Tensor(self.batch_size, 128)
if self.cuda: z = z.cuda()
if self.num_iter * self.batch_size < self.tot_num:
self.num_iter += 1
for i in range(self.num_iter):
if i == self.num_iter - 1:
bs = self.tot_num - self.batch_size * i
if bs < self.batch_size:
z = torch.Tensor(bs, 128)
if self.cuda: z = z.cuda()
z = z.normal_() * 2
t = self.model(z)
if save_path is not None:
lib.utils.save_4dtensor_image(
save_path + "/%05d.jpg",
i * self.batch_size,
(t + 1) * 127.5)
yield pil_bilinear_interpolation(t)
class PytorchDataloader(object):
def __init__(self, train_dl, test_dl, train):
self.train = train
self.train_dl = train_dl
self.test_dl = test_dl
def reset(self):
pass
def __iter__(self):
if self.train:
return self.train_dl.__iter__()
else:
return self.test_dl.__iter__()
def __len__(self):
if self.train:
return len(self.train_dl)
else:
return len(self.test_dl)
class TFDataloader():
def __init__(self, dataset, batch_size):
"""
A workround need to specify num_iter
"""
self.dataset = dataset
self.batch_size = batch_size
self.num_iter = len(self.dataset) // self.batch_size - 1
self.dataset = dataset.dataset.shuffle(buffer_size=1000).batch(batch_size)
self.iterator = self.dataset.make_initializable_iterator()
self.next_element = self.iterator.get_next()
t_ = os.environ['CUDA_VISIBLE_DEVICES']
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
self.sess = tf.Session()
os.environ['CUDA_VISIBLE_DEVICES'] = t_
def reset(self):
self.sess.run(self.iterator.initializer)
def __getitem__(self, idx):
try:
item = self.sess.run(self.next_element)
if type(item) is tuple:
return [torch.Tensor(t) for t in item]
else:
return item
except tf.errors.OutOfRangeError:
print("=> TFDataloader out of range")
return (-1, -1)
def __len__(self):
return self.num_iter
class TFFileDataset():
def __init__(self, data_path, img_size=64, npy_dir=None, train=True, seed=1):
self.img_size = (img_size, img_size)
self.train = train
if ".zip" in data_path:
self.use_zip = True
self.data_file = zipfile.ZipFile(data_path)
self.files = self.data_file.namelist()
self.files.sort()
self.files = self.files[1:]
else:
self.use_zip = False
self.files = sum([[file for file in files] for path, dirs, files in os.walk(data_path) if files], [])
self.files.sort()
self.idxs = np.arange(len(self.files))
self.rng = np.random.RandomState(seed)
self.rng.shuffle
# 图片文件的列表
filelist_t = tf.constant(self.files)
self.file_num = len(self.files)
# label
if npy_dir is not None:
label = np.load(npy_dir)
label_t = tf.constant(label)
self.class_num = label.shape[-1]
dataset = tf.data.Dataset.from_tensor_slices((filelist_t, label_t))
else:
self.class_num = -1
dataset = tf.data.Dataset.from_tensor_slices((filelist_t, tf.constant(np.zeros((self.file_num,)))))
self.dataset = dataset.map(self._parse_function)
def __len__(self):
return len(self.files)
def read_image_from_zip(self, filename):
"""
An eagar function for reading image from zip
"""
f = filename.numpy().decode("utf-8")
img = Image.open(BytesIO(self.data_file.read(f)))
return np.asarray(img)
def _parse_function(self, filename, label):
if self.use_zip:
x = tf.py_function(self.read_image_resize_from_zip, [filename, self.img_size], tf.float32)
else:
x = tf.py_function(read_image_resize, [filename, self.img_size], tf.float32)
x = tf.expand_dims(x, 0)
#x = tf.image.resize_bilinear(x, (self.img_size[0], self.img_size[1]))
x = x[0]
x = tf.cast(x, tf.float32) / 255.0
if self.train:
x = tf.image.random_brightness(x, 0.05)
x = tf.image.random_contrast(x, 0.9, 1.1)
x = tf.image.random_flip_left_right(x)
x = tf.clip_by_value(x * 2 - 1, -1.0, 1.0)
x = tf.transpose(x, (2, 0, 1)) # (H, W, C) => (C, H, W)
if self.class_num > 0:
return x, label
else:
return x
class TFCelebADataset(TFFileDataset):
def __init__(self, data_path, img_size=64, npy_dir=None, train=True, seed=1):
super(TFCelebADataset, self).__init__(data_path, img_size, npy_dir, seed)
self.train = train
def access(self, idx):
"""
Deprecated
"""
if self.use_zip:
img = np.asarray(Image.open(BytesIO(self.data_file.read(self.files[idx]))))
else:
img_path = os.path.join(self.data_path, self.files[idx])
img = np.asarray(Image.open(open(img_path, "rb")))
img = img[50:50+128, 25:25+128]
return self.transform(img)
# 函数将filename对应的图片文件读进来
def _parse_function(self, filename, label):
if self.use_zip:
x = tf.py_function(self.read_image_from_zip, [filename], tf.float32)
else:
#x = tf.py_function(read_image_resize, [filename, self.img_size], tf.float32)
x = tf.read_file(filename)
x = tf.image.decode_image(x)
x = tf.image.crop_to_bounding_box(x, 50, 25, 128, 128)
x = tf.expand_dims(x, 0)
#TF bilinear resize is not correct
x = tf.image.resize_bilinear(x, (self.img_size[0], self.img_size[1]))
x = x[0]
x = tf.cast(x, tf.float32) / 255.0
if self.train:
x = tf.image.random_brightness(x, 0.05)
x = tf.image.random_contrast(x, 0.9, 1.1)
x = tf.image.random_flip_left_right(x)
x = tf.clip_by_value(x * 2 - 1, -1.0, 1.0)
x = tf.transpose(x, (2, 0, 1)) # (H, W, C) => (C, H, W)
if self.class_num > 0:
return x, label
else:
return x
def read_label(self):
"""
For convert txt label to npy label
"""
self.label = []
with open(self.attr_file) as f:
self.label_len = int(f.readline())
self.label_name = f.readline().strip().split(" ")
self.class_num = len(self.label_name)
for l in f.readlines():
l = l.strip().replace(" ", " ").split(" ")
l = [int(i) for i in l[1:]]
self.label.append(np.array(l))
self.label = np.array(self.label)
self.label[self.label==-1] = 0
np.save(self.attr_file.replace(".txt", ""), self.label)
class SimpleDataset(torch.utils.data.Dataset):
"""
Currently label is not available
"""
def __init__(self, data_path, size, transform=None):
self.size = size
self.data_path = data_path
self.transform = transform
self.files = sum([[file for file in files if ".jpg" in file or ".png" in file] for path, dirs, files in os.walk(data_path) if files], [])
self.files.sort()
def __getitem__(self, idx):
fpath = self.files[idx]
with open(os.path.join(self.data_path, fpath), "rb") as f:
img = Image.open(f).convert("RGB").resize(self.size, Image.BILINEAR)
if self.transform:
img = self.transform(img)
return img
def __len__(self):
return len(self.files)
class FileDataset(torch.utils.data.Dataset):
"""
Currently label is not available
"""
def __init__(self, data_path, transform=None):
self.data_path = data_path
self.transform = transform
if ".zip" in data_path:
self.use_zip = True
self.data_file = zipfile.ZipFile(data_path)
self.files = self.data_file.namelist()
self.files.sort()
self.files = self.files[1:]
else:
self.use_zip = False
self.files = sum([[file for file in files] for path, dirs, files in os.walk(data_path) if files], [])
self.files.sort()
self.idxs = np.arange(len(self.files))
self.rng = np.random.RandomState(1)
self.reset()
def reset(self):
self.rng.shuffle(self.idxs)
def __getitem__(self, idx):
idx = self.idxs[idx]
fpath = self.files[idx]
if self.use_zip:
img = Image.open(BytesIO(self.data_file.read(fpath)))
else:
with open(self.data_path + fpath, "rb") as f:
img = Image.open(f).convert("RGB")
img = img.resize((299, 299))
if self.transform:
img = self.transform(img)
label = 0
return (img, label)
def __len__(self):
return len(self.files) | [
"numpy.load",
"tensorflow.clip_by_value",
"os.walk",
"os.path.join",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.py_function",
"lib.utils.save_4dtensor_image",
"tensorflow.image.random_contrast",
"numpy.random.RandomState",
"tensorflow.cast",
"torch.Tensor",
"io.open",
"numpy.asarra... | [((334, 392), 'numpy.zeros', 'np.zeros', (['(x.shape[0], size[0], size[1], 3)'], {'dtype': '"""uint8"""'}), "((x.shape[0], size[0], size[1], 3), dtype='uint8')\n", (342, 392), True, 'import numpy as np\n'), ((1505, 1539), 'torch.Tensor', 'torch.Tensor', (['self.batch_size', '(128)'], {}), '(self.batch_size, 128)\n', (1517, 1539), False, 'import torch\n'), ((3347, 3359), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3357, 3359), True, 'import tensorflow as tf\n'), ((4573, 4600), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4594, 4600), True, 'import numpy as np\n'), ((4666, 4689), 'tensorflow.constant', 'tf.constant', (['self.files'], {}), '(self.files)\n', (4677, 4689), True, 'import tensorflow as tf\n'), ((5502, 5517), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (5512, 5517), True, 'import numpy as np\n'), ((5819, 5839), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (5833, 5839), True, 'import tensorflow as tf\n'), ((6226, 6252), 'tensorflow.transpose', 'tf.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (6238, 6252), True, 'import tensorflow as tf\n'), ((7398, 7448), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['x', '(50)', '(25)', '(128)', '(128)'], {}), '(x, 50, 25, 128, 128)\n', (7427, 7448), True, 'import tensorflow as tf\n'), ((7461, 7481), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (7475, 7481), True, 'import tensorflow as tf\n'), ((7537, 7602), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['x', '(self.img_size[0], self.img_size[1])'], {}), '(x, (self.img_size[0], self.img_size[1]))\n', (7561, 7602), True, 'import tensorflow as tf\n'), ((7855, 7893), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(x * 2 - 1)', '(-1.0)', '(1.0)'], {}), '(x * 2 - 1, -1.0, 1.0)\n', (7871, 7893), True, 'import tensorflow as tf\n'), ((7906, 7932), 'tensorflow.transpose', 'tf.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (7918, 7932), True, 'import tensorflow as tf\n'), ((8577, 8597), 'numpy.array', 'np.array', (['self.label'], {}), '(self.label)\n', (8585, 8597), True, 'import numpy as np\n'), ((10202, 10226), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (10223, 10226), True, 'import numpy as np\n'), ((1459, 1492), 'os.system', 'os.system', (["('mkdir %s' % save_path)"], {}), "('mkdir %s' % save_path)\n", (1468, 1492), False, 'import os, time, sys, zipfile\n'), ((4167, 4193), 'zipfile.ZipFile', 'zipfile.ZipFile', (['data_path'], {}), '(data_path)\n', (4182, 4193), False, 'import os, time, sys, zipfile\n'), ((4799, 4815), 'numpy.load', 'np.load', (['npy_dir'], {}), '(npy_dir)\n', (4806, 4815), True, 'import numpy as np\n'), ((4838, 4856), 'tensorflow.constant', 'tf.constant', (['label'], {}), '(label)\n', (4849, 4856), True, 'import tensorflow as tf\n'), ((4924, 4981), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(filelist_t, label_t)'], {}), '((filelist_t, label_t))\n', (4958, 4981), True, 'import tensorflow as tf\n'), ((5608, 5698), 'tensorflow.py_function', 'tf.py_function', (['self.read_image_resize_from_zip', '[filename, self.img_size]', 'tf.float32'], {}), '(self.read_image_resize_from_zip, [filename, self.img_size],\n tf.float32)\n', (5622, 5698), True, 'import tensorflow as tf\n'), ((5725, 5797), 'tensorflow.py_function', 'tf.py_function', (['read_image_resize', '[filename, self.img_size]', 'tf.float32'], {}), '(read_image_resize, [filename, self.img_size], tf.float32)\n', (5739, 5797), True, 'import tensorflow as tf\n'), ((5948, 5970), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (5955, 5970), True, 'import tensorflow as tf\n'), ((6018, 6053), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x', '(0.05)'], {}), '(x, 0.05)\n', (6044, 6053), True, 'import tensorflow as tf\n'), ((6070, 6107), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['x', '(0.9)', '(1.1)'], {}), '(x, 0.9, 1.1)\n', (6094, 6107), True, 'import tensorflow as tf\n'), ((6124, 6158), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['x'], {}), '(x)\n', (6155, 6158), True, 'import tensorflow as tf\n'), ((6175, 6213), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(x * 2 - 1)', '(-1.0)', '(1.0)'], {}), '(x * 2 - 1, -1.0, 1.0)\n', (6191, 6213), True, 'import tensorflow as tf\n'), ((6824, 6869), 'os.path.join', 'os.path.join', (['self.data_path', 'self.files[idx]'], {}), '(self.data_path, self.files[idx])\n', (6836, 6869), False, 'import os, time, sys, zipfile\n'), ((7128, 7192), 'tensorflow.py_function', 'tf.py_function', (['self.read_image_from_zip', '[filename]', 'tf.float32'], {}), '(self.read_image_from_zip, [filename], tf.float32)\n', (7142, 7192), True, 'import tensorflow as tf\n'), ((7313, 7335), 'tensorflow.read_file', 'tf.read_file', (['filename'], {}), '(filename)\n', (7325, 7335), True, 'import tensorflow as tf\n'), ((7352, 7376), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['x'], {}), '(x)\n', (7373, 7376), True, 'import tensorflow as tf\n'), ((7632, 7654), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (7639, 7654), True, 'import tensorflow as tf\n'), ((7702, 7737), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x', '(0.05)'], {}), '(x, 0.05)\n', (7728, 7737), True, 'import tensorflow as tf\n'), ((7754, 7791), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['x', '(0.9)', '(1.1)'], {}), '(x, 0.9, 1.1)\n', (7778, 7791), True, 'import tensorflow as tf\n'), ((7808, 7842), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['x'], {}), '(x)\n', (7839, 7842), True, 'import tensorflow as tf\n'), ((8183, 8203), 'io.open', 'open', (['self.attr_file'], {}), '(self.attr_file)\n', (8187, 8203), False, 'from io import open, BytesIO\n'), ((9797, 9823), 'zipfile.ZipFile', 'zipfile.ZipFile', (['data_path'], {}), '(data_path)\n', (9812, 9823), False, 'import os, time, sys, zipfile\n'), ((2049, 2145), 'lib.utils.save_4dtensor_image', 'lib.utils.save_4dtensor_image', (["(save_path + '/%05d.jpg')", '(i * self.batch_size)', '((t + 1) * 127.5)'], {}), "(save_path + '/%05d.jpg', i * self.batch_size,\n (t + 1) * 127.5)\n", (2078, 2145), False, 'import lib\n'), ((9210, 9245), 'os.path.join', 'os.path.join', (['self.data_path', 'fpath'], {}), '(self.data_path, fpath)\n', (9222, 9245), False, 'import os, time, sys, zipfile\n'), ((10527, 10561), 'io.open', 'open', (['(self.data_path + fpath)', '"""rb"""'], {}), "(self.data_path + fpath, 'rb')\n", (10531, 10561), False, 'from io import open, BytesIO\n'), ((1057, 1070), 'io.open', 'open', (['f', '"""rb"""'], {}), "(f, 'rb')\n", (1061, 1070), False, 'from io import open, BytesIO\n'), ((1864, 1885), 'torch.Tensor', 'torch.Tensor', (['bs', '(128)'], {}), '(bs, 128)\n', (1876, 1885), False, 'import torch\n'), ((3641, 3656), 'torch.Tensor', 'torch.Tensor', (['t'], {}), '(t)\n', (3653, 3656), False, 'import torch\n'), ((6910, 6930), 'io.open', 'open', (['img_path', '"""rb"""'], {}), "(img_path, 'rb')\n", (6914, 6930), False, 'from io import open, BytesIO\n'), ((8543, 8554), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (8551, 8554), True, 'import numpy as np\n'), ((9067, 9085), 'os.walk', 'os.walk', (['data_path'], {}), '(data_path)\n', (9074, 9085), False, 'import os, time, sys, zipfile\n'), ((742, 767), 'PIL.Image.fromarray', 'Image.fromarray', (['x_arr[i]'], {}), '(x_arr[i])\n', (757, 767), False, 'from PIL import Image\n'), ((4442, 4460), 'os.walk', 'os.walk', (['data_path'], {}), '(data_path)\n', (4449, 4460), False, 'import os, time, sys, zipfile\n'), ((5110, 5136), 'numpy.zeros', 'np.zeros', (['(self.file_num,)'], {}), '((self.file_num,))\n', (5118, 5136), True, 'import numpy as np\n'), ((10072, 10090), 'os.walk', 'os.walk', (['data_path'], {}), '(data_path)\n', (10079, 10090), False, 'import os, time, sys, zipfile\n'), ((10590, 10603), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (10600, 10603), False, 'from PIL import Image\n'), ((9277, 9290), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (9287, 9290), False, 'from PIL import Image\n'), ((601, 635), 'PIL.Image.fromarray', 'Image.fromarray', (['x_arr[i, :, :, 0]'], {}), '(x_arr[i, :, :, 0])\n', (616, 635), False, 'from PIL import Image\n')] |
from numpy import linspace, sum, absolute
from math import log
from population import Population, PopulationProperties
from program_trees import IfWrapper, MultiplicationWrapper, AdditionWrapper, IsGreaterWrapper
from environment import Environment, EnvironmentProperties
from program_evolution import Evolution, EvolutionProperties
target_function = lambda x: float(x**2 + 2*x + 1)
create_training_set = lambda P: [(x, P(x)) for x in linspace(-50, 50)]
training_set = create_training_set(target_function)
def compute_function_score(training_set, evaluable_tree):
evalue_tree = lambda x: evaluable_tree.evaluate([x])
errors = [absolute(evalue_tree(x) - y) for (x, y) in training_set]
return sum(errors)
population_size = 100
function_wrappers = [MultiplicationWrapper, AdditionWrapper]
polynomial_population_properties = PopulationProperties()
polynomial_population = Population(
function_wrappers,
population_size,
polynomial_population_properties
)
environment_properties = EnvironmentProperties(
fitness_function = lambda tree: compute_function_score(training_set, tree),
population = polynomial_population
)
environment = Environment(environment_properties)
environment.evolve() | [
"population.PopulationProperties",
"numpy.sum",
"population.Population",
"environment.Environment",
"numpy.linspace"
] | [((837, 859), 'population.PopulationProperties', 'PopulationProperties', ([], {}), '()\n', (857, 859), False, 'from population import Population, PopulationProperties\n'), ((885, 970), 'population.Population', 'Population', (['function_wrappers', 'population_size', 'polynomial_population_properties'], {}), '(function_wrappers, population_size, polynomial_population_properties\n )\n', (895, 970), False, 'from population import Population, PopulationProperties\n'), ((1165, 1200), 'environment.Environment', 'Environment', (['environment_properties'], {}), '(environment_properties)\n', (1176, 1200), False, 'from environment import Environment, EnvironmentProperties\n'), ((705, 716), 'numpy.sum', 'sum', (['errors'], {}), '(errors)\n', (708, 716), False, 'from numpy import linspace, sum, absolute\n'), ((436, 453), 'numpy.linspace', 'linspace', (['(-50)', '(50)'], {}), '(-50, 50)\n', (444, 453), False, 'from numpy import linspace, sum, absolute\n')] |
import numpy as np
from pandas.core.tools.numeric import to_numeric
np.random.seed(42)
import argparse
import napari
from napari_particles.particles import Particles
from napari_particles.filters import ShaderFilter
import pandas as pd
def norm_clip(x, pmin=0.1, pmax=99.9):
bounds = np.max(np.abs(np.percentile(x, (pmin, pmax), axis=0)),0)
bounds = np.stack([-bounds, bounds])
x = np.clip(x, *bounds)
x = x/np.max(bounds)
return x
if __name__ == "__main__":
parser = argparse.ArgumentParser()
#parser.add_argument('-i','--input', type=str, default='data/stars/galaxy_200kparticles.dat')
parser.add_argument('-i','--input', type=str, default='data/stars/gaiasky_basedata.csv')
parser.add_argument('--size', type=float, default=.005)
parser.add_argument('--sub', type=int, default=1)
parser.add_argument('-s','--shader', type=str, default='particle')
parser.add_argument('-a','--antialias', type=float, default=0.05)
parser.add_argument('--points', action='store_true')
args = parser.parse_args()
np.random.seed(32)
for sep in (',', " ", "\t"):
try:
df = pd.read_csv(args.input, delimiter=sep, comment='#')
df = df[df.columns[:4]]
df.columns=["x","y","z","r"]
break
except Exception as e:
print(e)
continue
df = df.dropna()
# df = df[df.x.abs()<1e10]
# df = df[df.y.abs()<1e10]
# df = df[df.z.abs()<1e10]
df = df.iloc[::args.sub]
coords = df[["x","y","z"]].to_numpy()
print(f'rendering {len(coords)} objects')
coords = coords - np.median(coords,axis=0)
coords = norm_clip(coords)
rad = np.maximum(0,df['r'].to_numpy()).astype(np.float32)
rad /= np.max(np.abs(np.percentile(rad, (.01,99.99), axis=0)),0, keepdims=True)
size = args.size#*rad
values = rad
v = napari.Viewer()
if args.points:
v.add_points(coords, size=size)
v.layers[-1].blending='additive'
else:
layer = Particles(coords,
size=size,
values=values,
colormap='Spectral',
filter = ShaderFilter(args.shader, distance_intensity_increase=args.antialias) if args.shader !="" else None,
antialias=args.antialias,
)
layer.contrast_limits=(0,1)
layer.add_to_viewer(v)
v.dims.ndisplay=3
v.camera.perspective=80.0
v.camera.angles=(90,0, 0)
napari.run()
| [
"numpy.stack",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.median",
"pandas.read_csv",
"napari_particles.filters.ShaderFilter",
"numpy.clip",
"numpy.percentile",
"numpy.max",
"napari.Viewer",
"napari.run"
] | [((68, 86), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (82, 86), True, 'import numpy as np\n'), ((361, 388), 'numpy.stack', 'np.stack', (['[-bounds, bounds]'], {}), '([-bounds, bounds])\n', (369, 388), True, 'import numpy as np\n'), ((397, 416), 'numpy.clip', 'np.clip', (['x', '*bounds'], {}), '(x, *bounds)\n', (404, 416), True, 'import numpy as np\n'), ((498, 523), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (521, 523), False, 'import argparse\n'), ((1067, 1085), 'numpy.random.seed', 'np.random.seed', (['(32)'], {}), '(32)\n', (1081, 1085), True, 'import numpy as np\n'), ((1914, 1929), 'napari.Viewer', 'napari.Viewer', ([], {}), '()\n', (1927, 1929), False, 'import napari\n'), ((2491, 2503), 'napari.run', 'napari.run', ([], {}), '()\n', (2501, 2503), False, 'import napari\n'), ((427, 441), 'numpy.max', 'np.max', (['bounds'], {}), '(bounds)\n', (433, 441), True, 'import numpy as np\n'), ((1638, 1663), 'numpy.median', 'np.median', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (1647, 1663), True, 'import numpy as np\n'), ((304, 342), 'numpy.percentile', 'np.percentile', (['x', '(pmin, pmax)'], {'axis': '(0)'}), '(x, (pmin, pmax), axis=0)\n', (317, 342), True, 'import numpy as np\n'), ((1155, 1206), 'pandas.read_csv', 'pd.read_csv', (['args.input'], {'delimiter': 'sep', 'comment': '"""#"""'}), "(args.input, delimiter=sep, comment='#')\n", (1166, 1206), True, 'import pandas as pd\n'), ((1799, 1840), 'numpy.percentile', 'np.percentile', (['rad', '(0.01, 99.99)'], {'axis': '(0)'}), '(rad, (0.01, 99.99), axis=0)\n', (1812, 1840), True, 'import numpy as np\n'), ((2182, 2251), 'napari_particles.filters.ShaderFilter', 'ShaderFilter', (['args.shader'], {'distance_intensity_increase': 'args.antialias'}), '(args.shader, distance_intensity_increase=args.antialias)\n', (2194, 2251), False, 'from napari_particles.filters import ShaderFilter\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import numpy as np
import pandas as pd
from unittest import TestCase, main
from .utils import MockKerasModel
from ..core.constants import IMAGE_ID_COL, RLE_MASK_COL
from ..core.optimization import RandomSearch
class RandomSearchTest(TestCase):
"""
Tests `optimization.RandomSearch` class.
"""
@staticmethod
def _mock_model_factory(*args, **kwargs):
"""
Function generating mock model instances.
:return: `utils.MockKerasModel` instance
"""
return MockKerasModel((1, 1), 1)
def setUp(self):
"""
Sets up the tests.
"""
test_param_grid = {'lr': [1, 2], 'n_dense': [1, 2]}
self._random_search = RandomSearch(
model_factory=self._mock_model_factory,
image_directory=None,
image_shape=(5, 5, 3),
cv=5,
param_grid=test_param_grid)
self._test_image_rle_masks = pd.DataFrame({
IMAGE_ID_COL: [str(i) for i in range(128)],
RLE_MASK_COL: [np.nan, '1 10'] * 64})
def test_init_generator(self):
"""
Tests that `optimization.RandomSearch._init_generator`
corretly creates a generator from the train/validation
subset.
"""
test_case_ix = np.arange(0, 128, 2)
gen = self._random_search._init_generator(
self._test_image_rle_masks, test_case_ix)
expected_image_ids = self._test_image_rle_masks.loc[
test_case_ix,
IMAGE_ID_COL].tolist()
self.assertEqual(len(gen), 64 // gen._batch_size)
self.assertListEqual(gen._image_ids, expected_image_ids)
def test_eval_params(self):
"""
Tests that `optimization.RandomSearch._eval_params` correctly
passes trough all the cross-validation folds.
"""
result = self._random_search._eval_params(
self._test_image_rle_masks, {'test': None})
expected = {'mean_loss': 0, 'params': {'test': None}, 'losses': [0] * 5}
self.assertDictEqual(result, expected)
if __name__ == '__main__':
main()
| [
"unittest.main",
"numpy.arange"
] | [((2168, 2174), 'unittest.main', 'main', ([], {}), '()\n', (2172, 2174), False, 'from unittest import TestCase, main\n'), ((1343, 1363), 'numpy.arange', 'np.arange', (['(0)', '(128)', '(2)'], {}), '(0, 128, 2)\n', (1352, 1363), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2018-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import nbconvert
import numpy as np
import yaml
with open("project3.ipynb") as f:
exporter = nbconvert.PythonExporter()
python_file, _ = exporter.from_file(f)
with open("project3.py", "w") as f:
f.write(python_file)
from project3 import Project3
def get_gold_pressures():
return np.load('pressure_gold.npy')
def get_gold_saturations():
return np.load('saturation_gold.npy')
class TestSolution(unittest.TestCase):
def setUp(self):
with open('inputs.yml') as f:
self.inputs = yaml.load(f, yaml.FullLoader)
def test_project3_test_1(self):
test = Project3(self.inputs)
test.solve()
np.testing.assert_allclose(test.p,
get_gold_pressures(),
atol=30.0)
return
def test_project3_test_2(self):
test = Project3(self.inputs)
test.solve()
np.testing.assert_allclose(test.saturation,
get_gold_saturations(),
atol=0.02)
return
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"nbconvert.PythonExporter",
"yaml.load",
"numpy.load",
"project3.Project3"
] | [((711, 737), 'nbconvert.PythonExporter', 'nbconvert.PythonExporter', ([], {}), '()\n', (735, 737), False, 'import nbconvert\n'), ((915, 943), 'numpy.load', 'np.load', (['"""pressure_gold.npy"""'], {}), "('pressure_gold.npy')\n", (922, 943), True, 'import numpy as np\n'), ((985, 1015), 'numpy.load', 'np.load', (['"""saturation_gold.npy"""'], {}), "('saturation_gold.npy')\n", (992, 1015), True, 'import numpy as np\n'), ((1802, 1817), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1815, 1817), False, 'import unittest\n'), ((1242, 1263), 'project3.Project3', 'Project3', (['self.inputs'], {}), '(self.inputs)\n', (1250, 1263), False, 'from project3 import Project3\n'), ((1535, 1556), 'project3.Project3', 'Project3', (['self.inputs'], {}), '(self.inputs)\n', (1543, 1556), False, 'from project3 import Project3\n'), ((1143, 1172), 'yaml.load', 'yaml.load', (['f', 'yaml.FullLoader'], {}), '(f, yaml.FullLoader)\n', (1152, 1172), False, 'import yaml\n')] |
"""Tests cleaning module
"""
import numpy as np
import pandas as pd
from dsutils.cleaning import remove_duplicate_cols
from dsutils.cleaning import remove_noninformative_cols
from dsutils.cleaning import categorical_to_int
def test_remove_duplicate_cols():
"""Tests cleaning.remove_duplicate_cols"""
# Should remove duplicate cols
df = pd.DataFrame()
df['A'] = np.array([0, 1, 2, 3, 4, 5])
df['B'] = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
df['C'] = np.array(['a', 'a', 'a', 'b', 'b', 'b']) #dup
df['D'] = np.array([1, 1, 2, 2, 3, 3])
df['E'] = np.array([0.01, 1.01, 2.01, 3.01, 4.01, 5.01])
df['F'] = np.array([0, 1, 2, 3, 4, 5]) #dup
df['G'] = np.array([0.01, 1.01, 2.01, 3.01, 4.01, 5.01]) #dup
df['H'] = np.array([11, 12, 13, 14, 15, 16])
remove_noninformative_cols(df)
assert 'A' in df
assert 'B' in df
assert 'C' not in df
assert 'D' in df
assert 'E' in df
assert 'F' not in df
assert 'G' not in df
assert 'H' in df
def test_remove_noninformative_cols():
"""Tests cleaning.remove_noninformative_cols"""
# Should remove entirely empty columns
df = pd.DataFrame()
df['A'] = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
df['B'] = np.nan
df['C'] = np.array([0, 1, 2, 3, 4, 5])
df['D'] = np.nan
remove_noninformative_cols(df)
assert 'A' in df
assert 'B' not in df
assert 'C' in df
assert 'D' not in df
# Should remove cols w/ only 1 unique value
df = pd.DataFrame()
df['A'] = np.array(['a', 'a', 'a', 'a', 'a', 'a'])
df['B'] = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
df['C'] = np.array([1, 1, 1, 1, 1, 1])
df['D'] = np.array([0, 1, 2, 3, 4, 5])
remove_noninformative_cols(df)
assert 'A' not in df
assert 'B' in df
assert 'C' not in df
assert 'D' in df
# Should remove duplicate cols
df = pd.DataFrame()
df['A'] = np.array([0, 1, 2, 3, 4, 5])
df['B'] = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
df['C'] = np.array([1, 1, 2, 2, 3, 3])
df['D'] = np.array([0, 1, 2, 3, 4, 5])
remove_noninformative_cols(df)
assert 'A' in df
assert 'B' in df
assert 'C' in df
assert 'D' not in df
def test_categorical_to_int():
"""Tests cleaning.categorical_to_int"""
# Should only encode cols in cols arg
df = pd.DataFrame()
df['A'] = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
df['B'] = np.array(['a', 'a', 'b', 'b', 'c', 'c'])
df['C'] = np.array([1, 1, 0, 1, 1, 1])
categorical_to_int(df, cols=['A'])
assert 'A' in df
assert 'B' in df
assert 'C' in df
assert df.shape[0] == 6
assert df.shape[1] == 3
assert str(df['A'].dtype) == 'uint8'
assert str(df['B'].dtype) == 'object'
# Should work the same way if passed a string
df['A'] = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
categorical_to_int(df, cols='A')
assert 'A' in df
assert 'B' in df
assert 'C' in df
assert df.shape[0] == 6
assert df.shape[1] == 3
assert str(df['A'].dtype) == 'uint8'
assert str(df['B'].dtype) == 'object'
# Should encode all categorical columns if not specified
df['A'] = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
categorical_to_int(df)
assert 'A' in df
assert 'B' in df
assert 'C' in df
assert df.shape[0] == 6
assert df.shape[1] == 3
assert str(df['A'].dtype) == 'uint8'
assert str(df['B'].dtype) == 'uint8'
# Should encode as float if there are NaN
df['A'] = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
df.loc[3, 'A'] = np.nan
categorical_to_int(df)
assert 'A' in df
assert 'B' in df
assert 'C' in df
assert df.shape[0] == 6
assert df.shape[1] == 3
assert str(df['A'].dtype) == 'float32'
# Should use uint16 if more unique vals than will fit in uint8
df = pd.DataFrame()
df['A'] = [str(n) for n in range(300)]
categorical_to_int(df, cols=['A'])
assert 'A' in df
assert df.shape[0] == 300
assert df.shape[1] == 1
assert str(df['A'].dtype) == 'uint16'
# Should use uint32 if more unique vals than will fit in uint16
df = pd.DataFrame()
df['A'] = [str(n) for n in range(70000)]
categorical_to_int(df, cols=['A'])
assert 'A' in df
assert df.shape[0] == 70000
assert df.shape[1] == 1
assert str(df['A'].dtype) == 'uint32'
# and in theory should use uint64 if too many vals to fit in uint32
| [
"pandas.DataFrame",
"numpy.array",
"dsutils.cleaning.categorical_to_int",
"dsutils.cleaning.remove_noninformative_cols"
] | [((356, 370), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (368, 370), True, 'import pandas as pd\n'), ((385, 413), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (393, 413), True, 'import numpy as np\n'), ((428, 468), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (436, 468), True, 'import numpy as np\n'), ((483, 523), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (491, 523), True, 'import numpy as np\n'), ((543, 571), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 3, 3]'], {}), '([1, 1, 2, 2, 3, 3])\n', (551, 571), True, 'import numpy as np\n'), ((586, 632), 'numpy.array', 'np.array', (['[0.01, 1.01, 2.01, 3.01, 4.01, 5.01]'], {}), '([0.01, 1.01, 2.01, 3.01, 4.01, 5.01])\n', (594, 632), True, 'import numpy as np\n'), ((647, 675), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (655, 675), True, 'import numpy as np\n'), ((695, 741), 'numpy.array', 'np.array', (['[0.01, 1.01, 2.01, 3.01, 4.01, 5.01]'], {}), '([0.01, 1.01, 2.01, 3.01, 4.01, 5.01])\n', (703, 741), True, 'import numpy as np\n'), ((761, 795), 'numpy.array', 'np.array', (['[11, 12, 13, 14, 15, 16]'], {}), '([11, 12, 13, 14, 15, 16])\n', (769, 795), True, 'import numpy as np\n'), ((801, 831), 'dsutils.cleaning.remove_noninformative_cols', 'remove_noninformative_cols', (['df'], {}), '(df)\n', (827, 831), False, 'from dsutils.cleaning import remove_noninformative_cols\n'), ((1159, 1173), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1171, 1173), True, 'import pandas as pd\n'), ((1188, 1228), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (1196, 1228), True, 'import numpy as np\n'), ((1264, 1292), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (1272, 1292), True, 'import numpy as np\n'), ((1318, 1348), 'dsutils.cleaning.remove_noninformative_cols', 'remove_noninformative_cols', (['df'], {}), '(df)\n', (1344, 1348), False, 'from dsutils.cleaning import remove_noninformative_cols\n'), ((1499, 1513), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1511, 1513), True, 'import pandas as pd\n'), ((1528, 1568), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'a', 'a', 'a']"], {}), "(['a', 'a', 'a', 'a', 'a', 'a'])\n", (1536, 1568), True, 'import numpy as np\n'), ((1583, 1623), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (1591, 1623), True, 'import numpy as np\n'), ((1638, 1666), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1])\n', (1646, 1666), True, 'import numpy as np\n'), ((1681, 1709), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (1689, 1709), True, 'import numpy as np\n'), ((1714, 1744), 'dsutils.cleaning.remove_noninformative_cols', 'remove_noninformative_cols', (['df'], {}), '(df)\n', (1740, 1744), False, 'from dsutils.cleaning import remove_noninformative_cols\n'), ((1882, 1896), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1894, 1896), True, 'import pandas as pd\n'), ((1911, 1939), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (1919, 1939), True, 'import numpy as np\n'), ((1954, 1994), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (1962, 1994), True, 'import numpy as np\n'), ((2009, 2037), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 3, 3]'], {}), '([1, 1, 2, 2, 3, 3])\n', (2017, 2037), True, 'import numpy as np\n'), ((2052, 2080), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (2060, 2080), True, 'import numpy as np\n'), ((2085, 2115), 'dsutils.cleaning.remove_noninformative_cols', 'remove_noninformative_cols', (['df'], {}), '(df)\n', (2111, 2115), False, 'from dsutils.cleaning import remove_noninformative_cols\n'), ((2334, 2348), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2346, 2348), True, 'import pandas as pd\n'), ((2363, 2403), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (2371, 2403), True, 'import numpy as np\n'), ((2418, 2458), 'numpy.array', 'np.array', (["['a', 'a', 'b', 'b', 'c', 'c']"], {}), "(['a', 'a', 'b', 'b', 'c', 'c'])\n", (2426, 2458), True, 'import numpy as np\n'), ((2473, 2501), 'numpy.array', 'np.array', (['[1, 1, 0, 1, 1, 1]'], {}), '([1, 1, 0, 1, 1, 1])\n', (2481, 2501), True, 'import numpy as np\n'), ((2506, 2540), 'dsutils.cleaning.categorical_to_int', 'categorical_to_int', (['df'], {'cols': "['A']"}), "(df, cols=['A'])\n", (2524, 2540), False, 'from dsutils.cleaning import categorical_to_int\n'), ((2808, 2848), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (2816, 2848), True, 'import numpy as np\n'), ((2853, 2885), 'dsutils.cleaning.categorical_to_int', 'categorical_to_int', (['df'], {'cols': '"""A"""'}), "(df, cols='A')\n", (2871, 2885), False, 'from dsutils.cleaning import categorical_to_int\n'), ((3164, 3204), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (3172, 3204), True, 'import numpy as np\n'), ((3209, 3231), 'dsutils.cleaning.categorical_to_int', 'categorical_to_int', (['df'], {}), '(df)\n', (3227, 3231), False, 'from dsutils.cleaning import categorical_to_int\n'), ((3494, 3534), 'numpy.array', 'np.array', (["['a', 'a', 'a', 'b', 'b', 'b']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b'])\n", (3502, 3534), True, 'import numpy as np\n'), ((3567, 3589), 'dsutils.cleaning.categorical_to_int', 'categorical_to_int', (['df'], {}), '(df)\n', (3585, 3589), False, 'from dsutils.cleaning import categorical_to_int\n'), ((3829, 3843), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3841, 3843), True, 'import pandas as pd\n'), ((3891, 3925), 'dsutils.cleaning.categorical_to_int', 'categorical_to_int', (['df'], {'cols': "['A']"}), "(df, cols=['A'])\n", (3909, 3925), False, 'from dsutils.cleaning import categorical_to_int\n'), ((4125, 4139), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4137, 4139), True, 'import pandas as pd\n'), ((4189, 4223), 'dsutils.cleaning.categorical_to_int', 'categorical_to_int', (['df'], {'cols': "['A']"}), "(df, cols=['A'])\n", (4207, 4223), False, 'from dsutils.cleaning import categorical_to_int\n')] |
import unittest
import numpy as np
import pandas
from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN
from shapely import wkt as wktreader
TOPOLOGY_CSV = 'test_files/polygon_multipolygon.csv'
SOURCE_DATA = pandas.read_csv(TOPOLOGY_CSV)
brt_wkt = SOURCE_DATA['brt_wkt']
osm_wkt = SOURCE_DATA['osm_wkt']
target_wkt = SOURCE_DATA['intersection_wkt']
input_geom = np.array([
[0., 0., 1., 0., 0.],
[0., 1., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 1., 0., 0.],
[0., -1., 1., 0., 0.],
[-1., -1., 1., 0., 0.],
[-1., 0., 1., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.]
])
output_geom = np.array([
[0.0, 0.00, 1., 0., 0.],
[0.0, 0.25, 1., 0., 0.],
[0.0, 0.50, 1., 0., 0.],
[0.0, 0.75, 1., 0., 0.],
[0.0, 1.00, 1., 0., 0.],
[0.25, 1.0, 1., 0., 0.],
[0.50, 1.0, 1., 0., 0.],
[1.0, 1.00, 1., 0., 0.],
[1.0, 0.50, 1., 0., 0.],
[1.0, 0.00, 1., 0., 0.],
[0.5, 0.00, 1., 0., 0.],
[0.0, 0.00, 0., 1., 0.],
[0.0, 0.00, 1., 0., 0.],
[0.0, -0.5, 1., 0., 0.],
[0.0, -1.0, 1., 0., 0.],
[-0.5, -1., 1., 0., 0.],
[-1., -1.0, 1., 0., 0.],
[-1., -0.5, 1., 0., 0.],
[-1., 0.00, 1., 0., 0.],
[-0.5, 0.0, 1., 0., 0.],
[0.00, 0.0, 0., 0., 1.],
[0.00, 0.0, 0., 0., 0.]
])
non_empty_geom_collection = 'GEOMETRYCOLLECTION(LINESTRING(1 1, 3 5),POLYGON((-1 -1, -1 -5, -5 -5, -5 -1, -1 -1)))'
class TestVectorizer(unittest.TestCase):
def test_max_points(self):
max_points = GeoVectorizer.max_points(brt_wkt, osm_wkt)
self.assertEqual(max_points, 159)
# def test_interpolate(self):
# interpolated = GeoVectorizer.interpolate(input_geom, len(input_geom) * 2)
# for index, _ in enumerate(interpolated):
# result = list(interpolated[index])
# expected = list(output_geom[index])
# self.assertListEqual(result, expected, msg='Lists differ at index %i' % index)
def test_vectorize_one_wkt(self):
max_points = 20
input_set = SOURCE_DATA['intersection_wkt']
vectorized = []
for index in range(len(input_set)):
vectorized.append(GeoVectorizer.vectorize_wkt(input_set[index], max_points, simplify=True))
self.assertEqual(len(input_set), len(brt_wkt))
self.assertEqual(vectorized[0].shape, (19, GEO_VECTOR_LEN))
self.assertEqual(vectorized[1].shape, (1, GEO_VECTOR_LEN))
def test_fixed_size(self):
max_points = 20
input_set = SOURCE_DATA['intersection_wkt']
vectorized = [GeoVectorizer.vectorize_wkt(wkt, max_points, simplify=True, fixed_size=True) for wkt in input_set]
self.assertEqual(np.array(vectorized).shape, (input_set.size, 20, GEO_VECTOR_LEN))
def test_non_empty_geom_coll(self):
with self.assertRaises(ValueError):
GeoVectorizer.vectorize_wkt(non_empty_geom_collection, 100)
def test_point(self):
point_matrix = GeoVectorizer.vectorize_wkt('POINT(12 14)', 5)
self.assertEqual(point_matrix.shape, (1, GEO_VECTOR_LEN))
def test_unsupported_geom(self):
# Since
with self.assertRaises(Exception):
GeoVectorizer.vectorize_wkt(
'TEST_FOR_UNKNOWN_GEOM_TYPE ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))', 16)
def test_vectorize_big_multipolygon(self):
with open('test_files/big_multipolygon_wkt.txt', 'r') as file:
wkt = file.read()
max_points = GeoVectorizer.max_points([wkt])
vectorized = GeoVectorizer.vectorize_wkt(wkt, max_points)
self.assertEqual((144, GEO_VECTOR_LEN), vectorized.shape)
def test_simplify_multipolygon_gt_max_points(self):
with open('test_files/multipart_multipolygon_wkt.txt', 'r') as file:
wkt = file.read()
max_points = 20
vectorized = GeoVectorizer.vectorize_wkt(wkt, max_points, simplify=True)
self.assertEqual((20, GEO_VECTOR_LEN), vectorized.shape)
def test_multipolygon_exceed_max_points(self):
with open('test_files/multipart_multipolygon_wkt.txt', 'r') as file:
wkt = file.read()
max_points = 20
with self.assertRaises(Exception):
GeoVectorizer.vectorize_wkt(wkt, max_points)
def test_polygon_exceed_max_points(self):
with open('test_files/multipart_multipolygon_wkt.txt', 'r') as file:
wkt = file.read()
shape = wktreader.loads(wkt)
geom = shape.geoms[0]
max_points = 20
with self.assertRaises(Exception):
GeoVectorizer.vectorize_wkt(geom.wkt, max_points)
| [
"pandas.read_csv",
"GeoVectorizer.GeoVectorizer.max_points",
"numpy.array",
"GeoVectorizer.GeoVectorizer.vectorize_wkt",
"shapely.wkt.loads"
] | [((211, 240), 'pandas.read_csv', 'pandas.read_csv', (['TOPOLOGY_CSV'], {}), '(TOPOLOGY_CSV)\n', (226, 240), False, 'import pandas\n'), ((366, 695), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0,\n 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, \n 1.0, 0.0, 0.0], [0.0, -1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 1.0, 0.0, 0.0],\n [-1.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, \n 0.0, 0.0]]'], {}), '([[0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, \n 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, -1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 1.0,\n 0.0, 0.0], [-1.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0], [0.0,\n 0.0, 0.0, 0.0, 0.0]])\n', (374, 695), True, 'import numpy as np\n'), ((684, 1337), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.25, 1.0, 0.0, 0.0], [0.0, 0.5, 1.0, 0.0,\n 0.0], [0.0, 0.75, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0], [0.25, 1.0,\n 1.0, 0.0, 0.0], [0.5, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0], [\n 1.0, 0.5, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [0.5, 0.0, 1.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0], [0.0, \n -0.5, 1.0, 0.0, 0.0], [0.0, -1.0, 1.0, 0.0, 0.0], [-0.5, -1.0, 1.0, 0.0,\n 0.0], [-1.0, -1.0, 1.0, 0.0, 0.0], [-1.0, -0.5, 1.0, 0.0, 0.0], [-1.0, \n 0.0, 1.0, 0.0, 0.0], [-0.5, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, \n 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.25, 1.0, 0.0, 0.0], [0.0, 0.5,\n 1.0, 0.0, 0.0], [0.0, 0.75, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0],\n [0.25, 1.0, 1.0, 0.0, 0.0], [0.5, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, \n 0.0, 0.0], [1.0, 0.5, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0, 0.0], [0.5, \n 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0, \n 0.0], [0.0, -0.5, 1.0, 0.0, 0.0], [0.0, -1.0, 1.0, 0.0, 0.0], [-0.5, -\n 1.0, 1.0, 0.0, 0.0], [-1.0, -1.0, 1.0, 0.0, 0.0], [-1.0, -0.5, 1.0, 0.0,\n 0.0], [-1.0, 0.0, 1.0, 0.0, 0.0], [-0.5, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0,\n 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]])\n', (692, 1337), True, 'import numpy as np\n'), ((1547, 1589), 'GeoVectorizer.GeoVectorizer.max_points', 'GeoVectorizer.max_points', (['brt_wkt', 'osm_wkt'], {}), '(brt_wkt, osm_wkt)\n', (1571, 1589), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((2998, 3044), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['"""POINT(12 14)"""', '(5)'], {}), "('POINT(12 14)', 5)\n", (3025, 3044), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((2601, 2677), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['wkt', 'max_points'], {'simplify': '(True)', 'fixed_size': '(True)'}), '(wkt, max_points, simplify=True, fixed_size=True)\n', (2628, 2677), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((2888, 2947), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['non_empty_geom_collection', '(100)'], {}), '(non_empty_geom_collection, 100)\n', (2915, 2947), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((3220, 3344), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['"""TEST_FOR_UNKNOWN_GEOM_TYPE ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))"""', '(16)'], {}), "(\n 'TEST_FOR_UNKNOWN_GEOM_TYPE ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))'\n , 16)\n", (3247, 3344), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((3526, 3557), 'GeoVectorizer.GeoVectorizer.max_points', 'GeoVectorizer.max_points', (['[wkt]'], {}), '([wkt])\n', (3550, 3557), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((3583, 3627), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['wkt', 'max_points'], {}), '(wkt, max_points)\n', (3610, 3627), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((3915, 3974), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['wkt', 'max_points'], {'simplify': '(True)'}), '(wkt, max_points, simplify=True)\n', (3942, 3974), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((4513, 4533), 'shapely.wkt.loads', 'wktreader.loads', (['wkt'], {}), '(wkt)\n', (4528, 4533), True, 'from shapely import wkt as wktreader\n'), ((2207, 2279), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['input_set[index]', 'max_points'], {'simplify': '(True)'}), '(input_set[index], max_points, simplify=True)\n', (2234, 2279), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((2725, 2745), 'numpy.array', 'np.array', (['vectorized'], {}), '(vectorized)\n', (2733, 2745), True, 'import numpy as np\n'), ((4294, 4338), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['wkt', 'max_points'], {}), '(wkt, max_points)\n', (4321, 4338), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n'), ((4659, 4708), 'GeoVectorizer.GeoVectorizer.vectorize_wkt', 'GeoVectorizer.vectorize_wkt', (['geom.wkt', 'max_points'], {}), '(geom.wkt, max_points)\n', (4686, 4708), False, 'from GeoVectorizer import GeoVectorizer, GEO_VECTOR_LEN\n')] |
#!/usr/bin/env python3
import colour
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from colour.plotting import *
from colour_demosaicing import (
EXAMPLES_RESOURCES_DIRECTORY,
demosaicing_CFA_Bayer_bilinear,
demosaicing_CFA_Bayer_Malvar2004,
demosaicing_CFA_Bayer_Menon2007,
mosaicing_CFA_Bayer)
cctf_encoding = colour.cctf_encoding
image = np.fromfile('image.raw', 'uint8')[:320*320]/0xff
print(image[0])
print(np.average(image))
print(image.shape)
print(image.shape[0]/320)
lines = int(image.shape[0]/320)
image = np.reshape(image, (lines, 320))
print(image.shape)
im = Image.open("image.jpeg")
image_jpeg = np.array(im.getdata()).reshape(im.size[0], im.size[1])/0xff
encoded = cctf_encoding(image)
encoded_jpeg = cctf_encoding(image_jpeg)
plot_image(encoded)
plot_image(encoded_jpeg)
plot_image(cctf_encoding(demosaicing_CFA_Bayer_Menon2007(image, 'BGGR')))
plot_image(cctf_encoding(demosaicing_CFA_Bayer_Menon2007(image_jpeg, 'BGGR')))
| [
"numpy.average",
"numpy.fromfile",
"PIL.Image.open",
"colour_demosaicing.demosaicing_CFA_Bayer_Menon2007",
"numpy.reshape"
] | [((572, 603), 'numpy.reshape', 'np.reshape', (['image', '(lines, 320)'], {}), '(image, (lines, 320))\n', (582, 603), True, 'import numpy as np\n'), ((629, 653), 'PIL.Image.open', 'Image.open', (['"""image.jpeg"""'], {}), "('image.jpeg')\n", (639, 653), False, 'from PIL import Image\n'), ((468, 485), 'numpy.average', 'np.average', (['image'], {}), '(image)\n', (478, 485), True, 'import numpy as np\n'), ((397, 430), 'numpy.fromfile', 'np.fromfile', (['"""image.raw"""', '"""uint8"""'], {}), "('image.raw', 'uint8')\n", (408, 430), True, 'import numpy as np\n'), ((870, 916), 'colour_demosaicing.demosaicing_CFA_Bayer_Menon2007', 'demosaicing_CFA_Bayer_Menon2007', (['image', '"""BGGR"""'], {}), "(image, 'BGGR')\n", (901, 916), False, 'from colour_demosaicing import EXAMPLES_RESOURCES_DIRECTORY, demosaicing_CFA_Bayer_bilinear, demosaicing_CFA_Bayer_Malvar2004, demosaicing_CFA_Bayer_Menon2007, mosaicing_CFA_Bayer\n'), ((944, 995), 'colour_demosaicing.demosaicing_CFA_Bayer_Menon2007', 'demosaicing_CFA_Bayer_Menon2007', (['image_jpeg', '"""BGGR"""'], {}), "(image_jpeg, 'BGGR')\n", (975, 995), False, 'from colour_demosaicing import EXAMPLES_RESOURCES_DIRECTORY, demosaicing_CFA_Bayer_bilinear, demosaicing_CFA_Bayer_Malvar2004, demosaicing_CFA_Bayer_Menon2007, mosaicing_CFA_Bayer\n')] |
import _init_paths
import argparse
import os
import random
import time
import numpy as np
from object_pose_utils.datasets.pose_dataset import OutputTypes as otypes
from object_pose_utils.datasets.ycb_dataset import YcbDataset as YCBDataset
from object_pose_utils.datasets.image_processing import ColorJitter, ImageNormalizer
from object_pose_utils.datasets.ycb_occlusion_augmentation import YCBOcclusionAugmentor
from object_pose_utils.datasets.point_processing import PointShifter
from object_pose_utils.utils import to_np
from tqdm import tqdm, trange
from time import sleep
import contextlib
import sys
from featurization import PoseCNNFeaturizer, toPoseCNNImage, getObjectGTQuaternion
import torch
import scipy.io as scio
import os
import sys
module_path = os.path.abspath(os.path.join('tools'))
if module_path not in sys.path:
sys.path.append(module_path)
module_path = os.path.abspath(os.path.join('lib'))
if module_path not in sys.path:
sys.path.append(module_path)
class DummyTqdmFile(object):
"""Dummy file-like that will write to tqdm"""
file = None
def __init__(self, file):
self.file = file
def write(self, x):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self.file)
def flush(self):
return getattr(self.file, "flush", lambda: None)()
@contextlib.contextmanager
def std_out_err_redirect_tqdm():
orig_out_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err)
yield orig_out_err[0]
# Relay exceptions
except Exception as exc:
raise exc
# Always restore sys.stdout/err if necessary
finally:
sys.stdout, sys.stderr = orig_out_err
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', type=str, default = 'datasets/ycb/YCB_Video_Dataset',
help='Dataset root dir (''YCB_Video_Dataset'')')
parser.add_argument('--dataset_mode', type=str, default = 'train_syn_valid',
help='Dataset mode')
parser.add_argument('--num_augmentations', type=int, default = 0,
help='Number of augmented images per render')
parser.add_argument('--workers', type=int, default = 10, help='Number of data loading workers')
#parser.add_argument('--weights', type=str, help='PoseNetGlobal weights file')
parser.add_argument('--output_folder', type=str, help='Feature save location')
parser.add_argument('--object_indices', type=int, nargs='+', default = None, help='Object indices to featureize')
parser.add_argument('--start_index', type=int, default = 0, help='Starting augmentation index')
opt = parser.parse_args()
def main():
opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if not os.path.exists(opt.output_folder):
os.makedirs(opt.output_folder)
num_points = 1000 #number of points on the input pointcloud
num_objects = 21
if(opt.object_indices is None):
opt.object_indices = list(range(1,num_objects+1))
estimator = PoseCNNFeaturizer()
output_format = [otypes.IMAGE,
otypes.DEPTH_IMAGE]
with std_out_err_redirect_tqdm() as orig_stdout:
preprocessors = []
postprocessors = []
if(opt.num_augmentations > 0):
preprocessors.extend([YCBOcclusionAugmentor(opt.dataset_root),
ColorJitter(),])
postprocessors.append(PointShifter())
dataset = YCBDataset(opt.dataset_root, mode = opt.dataset_mode,
object_list = opt.object_indices,
output_data = output_format,
resample_on_error = False,
preprocessors = preprocessors,
postprocessors = postprocessors,
image_size = [640, 480], num_points=1000)
_, u_idxs = np.unique(zip(*dataset.image_list)[0], return_index = True)
dataset.image_list = np.array(dataset.image_list)[u_idxs].tolist()
dataset.list_obj = np.array(dataset.list_obj)[u_idxs].tolist()
classes = dataset.classes
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
#pbar.set_description('Featurizing {}'.format(classes[cls]))
if(opt.num_augmentations > 0):
pbar_aug = trange(opt.start_index, opt.num_augmentations, file=orig_stdout, dynamic_ncols=True)
else:
pbar_aug = [None]
for aug_idx in pbar_aug:
pbar_save = tqdm(enumerate(dataloader), total = len(dataloader),
file=orig_stdout, dynamic_ncols=True)
for i, data in pbar_save:
if(len(data) == 0 or len(data[0]) == 0):
continue
img, depth = data
img = toPoseCNNImage(img[0])
depth = to_np(depth[0])
data_path = dataset.image_list[i]
path = '{}/data/{}-meta.mat'.format(dataset.dataset_root, dataset.getPath(i))
meta_data = scio.loadmat(path)
try:
seg = estimator(img, depth, meta_data)
except Exception as e:
print(e)
continue
for pose_idx, cls in enumerate(seg['rois'][:,1]):
cls = int(cls)
quat = getObjectGTQuaternion(meta_data, cls)
feat = seg['feats'][pose_idx]
fc6 = seg['fc6'][pose_idx]
if(opt.num_augmentations > 0):
output_filename = '{0}/data/{1}_{2}_{3}_feat.npz'.format(opt.output_folder,
data_path[0], classes[cls], aug_idx)
else:
output_filename = '{0}/data/{1}_{2}_feat.npz'.format(opt.output_folder,
data_path[0], classes[cls])
#pbar_save.set_description(output_filename)
if not os.path.exists(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
np.savez(output_filename, quat = quat, feat = feat, fc6 = fc6)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"object_pose_utils.datasets.point_processing.PointShifter",
"scipy.io.loadmat",
"featurization.PoseCNNFeaturizer",
"object_pose_utils.utils.to_np",
"os.path.join",
"sys.path.append",
"random.randint",
"torch.utils.data.DataLoader",
"os.path.dirname",
"os.path.exists",
... | [((1797, 1822), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1820, 1822), False, 'import argparse\n'), ((783, 804), 'os.path.join', 'os.path.join', (['"""tools"""'], {}), "('tools')\n", (795, 804), False, 'import os\n'), ((846, 874), 'sys.path.append', 'sys.path.append', (['module_path'], {}), '(module_path)\n', (861, 874), False, 'import sys\n'), ((2725, 2749), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2739, 2749), False, 'import random\n'), ((2754, 2781), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2765, 2781), False, 'import random\n'), ((2786, 2819), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2803, 2819), False, 'import torch\n'), ((3100, 3119), 'featurization.PoseCNNFeaturizer', 'PoseCNNFeaturizer', ([], {}), '()\n', (3117, 3119), False, 'from featurization import PoseCNNFeaturizer, toPoseCNNImage, getObjectGTQuaternion\n'), ((913, 932), 'os.path.join', 'os.path.join', (['"""lib"""'], {}), "('lib')\n", (925, 932), False, 'import os\n'), ((990, 1018), 'sys.path.append', 'sys.path.append', (['module_path'], {}), '(module_path)\n', (1005, 1018), False, 'import sys\n'), ((2831, 2864), 'os.path.exists', 'os.path.exists', (['opt.output_folder'], {}), '(opt.output_folder)\n', (2845, 2864), False, 'import os\n'), ((2874, 2904), 'os.makedirs', 'os.makedirs', (['opt.output_folder'], {}), '(opt.output_folder)\n', (2885, 2904), False, 'import os\n'), ((3557, 3806), 'object_pose_utils.datasets.ycb_dataset.YcbDataset', 'YCBDataset', (['opt.dataset_root'], {'mode': 'opt.dataset_mode', 'object_list': 'opt.object_indices', 'output_data': 'output_format', 'resample_on_error': '(False)', 'preprocessors': 'preprocessors', 'postprocessors': 'postprocessors', 'image_size': '[640, 480]', 'num_points': '(1000)'}), '(opt.dataset_root, mode=opt.dataset_mode, object_list=opt.\n object_indices, output_data=output_format, resample_on_error=False,\n preprocessors=preprocessors, postprocessors=postprocessors, image_size=\n [640, 480], num_points=1000)\n', (3567, 3806), True, 'from object_pose_utils.datasets.ycb_dataset import YcbDataset as YCBDataset\n'), ((4265, 4359), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'opt.workers'}), '(dataset, batch_size=1, shuffle=False,\n num_workers=opt.workers)\n', (4292, 4359), False, 'import torch\n'), ((1288, 1317), 'tqdm.tqdm.write', 'tqdm.write', (['x'], {'file': 'self.file'}), '(x, file=self.file)\n', (1298, 1317), False, 'from tqdm import tqdm, trange\n'), ((4488, 4576), 'tqdm.trange', 'trange', (['opt.start_index', 'opt.num_augmentations'], {'file': 'orig_stdout', 'dynamic_ncols': '(True)'}), '(opt.start_index, opt.num_augmentations, file=orig_stdout,\n dynamic_ncols=True)\n', (4494, 4576), False, 'from tqdm import tqdm, trange\n'), ((3514, 3528), 'object_pose_utils.datasets.point_processing.PointShifter', 'PointShifter', ([], {}), '()\n', (3526, 3528), False, 'from object_pose_utils.datasets.point_processing import PointShifter\n'), ((4987, 5009), 'featurization.toPoseCNNImage', 'toPoseCNNImage', (['img[0]'], {}), '(img[0])\n', (5001, 5009), False, 'from featurization import PoseCNNFeaturizer, toPoseCNNImage, getObjectGTQuaternion\n'), ((5034, 5049), 'object_pose_utils.utils.to_np', 'to_np', (['depth[0]'], {}), '(depth[0])\n', (5039, 5049), False, 'from object_pose_utils.utils import to_np\n'), ((5239, 5257), 'scipy.io.loadmat', 'scio.loadmat', (['path'], {}), '(path)\n', (5251, 5257), True, 'import scipy.io as scio\n'), ((3387, 3426), 'object_pose_utils.datasets.ycb_occlusion_augmentation.YCBOcclusionAugmentor', 'YCBOcclusionAugmentor', (['opt.dataset_root'], {}), '(opt.dataset_root)\n', (3408, 3426), False, 'from object_pose_utils.datasets.ycb_occlusion_augmentation import YCBOcclusionAugmentor\n'), ((3463, 3476), 'object_pose_utils.datasets.image_processing.ColorJitter', 'ColorJitter', ([], {}), '()\n', (3474, 3476), False, 'from object_pose_utils.datasets.image_processing import ColorJitter, ImageNormalizer\n'), ((4092, 4120), 'numpy.array', 'np.array', (['dataset.image_list'], {}), '(dataset.image_list)\n', (4100, 4120), True, 'import numpy as np\n'), ((4165, 4191), 'numpy.array', 'np.array', (['dataset.list_obj'], {}), '(dataset.list_obj)\n', (4173, 4191), True, 'import numpy as np\n'), ((5564, 5601), 'featurization.getObjectGTQuaternion', 'getObjectGTQuaternion', (['meta_data', 'cls'], {}), '(meta_data, cls)\n', (5585, 5601), False, 'from featurization import PoseCNNFeaturizer, toPoseCNNImage, getObjectGTQuaternion\n'), ((6335, 6391), 'numpy.savez', 'np.savez', (['output_filename'], {'quat': 'quat', 'feat': 'feat', 'fc6': 'fc6'}), '(output_filename, quat=quat, feat=feat, fc6=fc6)\n', (6343, 6391), True, 'import numpy as np\n'), ((6210, 6242), 'os.path.dirname', 'os.path.dirname', (['output_filename'], {}), '(output_filename)\n', (6225, 6242), False, 'import os\n'), ((6281, 6313), 'os.path.dirname', 'os.path.dirname', (['output_filename'], {}), '(output_filename)\n', (6296, 6313), False, 'import os\n')] |
import numpy as np
from typing import Optional
from fypy.volatility.implied.ImpliedVolCalculator import ImpliedVolCalculator
class MarketSlice(object):
def __init__(self,
T: float,
F: float,
disc: float,
strikes: np.ndarray,
is_calls: np.ndarray,
bid_prices: Optional[np.ndarray] = None,
mid_prices: Optional[np.ndarray] = None,
ask_prices: Optional[np.ndarray] = None):
"""
Container holding market option prices for a single slice (tenor) in a price surface. You must either supply
mid prices, or bid and ask prices
:param T: float, time to maturity for this slice
:param F: float, the foward price, F(T)
:param disc: float, the discount, D(T)
:param strikes: np.ndarray, the strikes in increasing order along the slice- no sorting is done, this is assumed
:param is_calls: np.ndarray, true/false indicator per strike, true if its a call option
:param bid_prices: np.ndarray, optional, bid prices per strike
:param mid_prices: np.ndarray, optional, mid prices per strike (if not supplied, supply both bid and ask)
:param ask_prices: np.ndarray, optional, ask prices per strike
"""
self.T = T
self.F = F # forward price (used to determine main quotes)
self.disc = disc
self.strikes = strikes
self.is_calls = is_calls
self.bid_prices = bid_prices
self.mid_prices = mid_prices
self.ask_prices = ask_prices
self._set_prices()
# Implied Volatilies (these can be set/filled after initialization)
self.bid_vols: Optional[np.ndarray] = None
self.mid_vols: Optional[np.ndarray] = None
self.ask_vols: Optional[np.ndarray] = None
def set_vols(self,
bid_vols: Optional[np.ndarray] = None,
mid_vols: Optional[np.ndarray] = None,
ask_vols: Optional[np.ndarray] = None):
"""
Set the implied volatilities from their value. Alternatively, you can fill them by supplying an
ImpliedVolCalculator
:param bid_vols: np.ndarray, bid implied vols (optional)
:param mid_vols: np.ndarray, mid implied vols (optional)
:param ask_vols: np.ndarray, ask implied vols (optional)
:return: None
"""
self.bid_vols = bid_vols
self.ask_vols = ask_vols
self.mid_vols = mid_vols
def fill_implied_vols(self, calculator: ImpliedVolCalculator):
"""
Fill the implied vols given a calculator. Fills in for each of bid,mid,ask, but only those that have
corresponding prices
:param calculator: ImpliedVolCalculator, a calculator used to fill in the vols from prices
:return: None
"""
for prices, which in zip((self.bid_prices, self.mid_prices, self.ask_prices),
('bid', 'mid', 'ask')):
if prices is not None:
vols = calculator.imply_vols(strikes=self.strikes, prices=prices, is_calls=self.is_calls, ttm=self.T)
if which == 'bid':
self.bid_vols = vols
elif which == 'mid':
self.mid_vols = vols
else:
self.ask_vols = vols
def _set_prices(self):
if self.mid_prices is None:
if self.bid_prices is None or self.ask_prices is None:
raise ValueError("If you dont supply mid prices, must supply bid and ask prices")
self.mid_prices = (self.bid_prices + self.ask_prices) / 2
if __name__ == '__main__':
# Example usage
from fypy.pricing.analytical.black_scholes import black76_price_strikes
from fypy.termstructures.EquityForward import EquityForward, DiscountCurve_ConstRate
from fypy.volatility.implied.ImpliedVolCalculator import ImpliedVolCalculator_Black76
strikes_ = np.arange(50, 150, 1)
is_calls_ = np.ones(len(strikes_), dtype=bool)
T = 1.
disc_curve = DiscountCurve_ConstRate(rate=0.02)
fwd = EquityForward(S0=100, discount=disc_curve)
prices_ = black76_price_strikes(F=fwd(T), K=strikes_, is_calls=is_calls_, vol=0.2, disc=disc_curve(T), T=T)
mkt_slice = MarketSlice(T=T, F=fwd(T), disc=disc_curve(T), strikes=strikes_, is_calls=is_calls_,
mid_prices=prices_)
ivc = ImpliedVolCalculator_Black76(fwd_curve=fwd, disc_curve=disc_curve)
mkt_slice.fill_implied_vols(calculator=ivc)
vols = mkt_slice.mid_vols
| [
"fypy.termstructures.EquityForward.DiscountCurve_ConstRate",
"fypy.volatility.implied.ImpliedVolCalculator.ImpliedVolCalculator_Black76",
"numpy.arange",
"fypy.termstructures.EquityForward.EquityForward"
] | [((4016, 4037), 'numpy.arange', 'np.arange', (['(50)', '(150)', '(1)'], {}), '(50, 150, 1)\n', (4025, 4037), True, 'import numpy as np\n'), ((4118, 4152), 'fypy.termstructures.EquityForward.DiscountCurve_ConstRate', 'DiscountCurve_ConstRate', ([], {'rate': '(0.02)'}), '(rate=0.02)\n', (4141, 4152), False, 'from fypy.termstructures.EquityForward import EquityForward, DiscountCurve_ConstRate\n'), ((4163, 4205), 'fypy.termstructures.EquityForward.EquityForward', 'EquityForward', ([], {'S0': '(100)', 'discount': 'disc_curve'}), '(S0=100, discount=disc_curve)\n', (4176, 4205), False, 'from fypy.termstructures.EquityForward import EquityForward, DiscountCurve_ConstRate\n'), ((4480, 4546), 'fypy.volatility.implied.ImpliedVolCalculator.ImpliedVolCalculator_Black76', 'ImpliedVolCalculator_Black76', ([], {'fwd_curve': 'fwd', 'disc_curve': 'disc_curve'}), '(fwd_curve=fwd, disc_curve=disc_curve)\n', (4508, 4546), False, 'from fypy.volatility.implied.ImpliedVolCalculator import ImpliedVolCalculator_Black76\n')] |
"""
@brief test log(time=2s)
"""
import unittest
import numpy
from pyquickhelper.pycode import ExtTestCase
from mlinsights.sklapi.sklearn_base import SkBase
from mlinsights.sklapi.sklearn_base_learner import SkBaseLearner
from mlinsights.sklapi.sklearn_base_regressor import SkBaseRegressor
from mlinsights.sklapi.sklearn_base_classifier import SkBaseClassifier
from mlinsights.sklapi.sklearn_base_transform import SkBaseTransform
class TestSklearnBase(ExtTestCase):
def test_sklearn_base_parameters(self):
sk = SkBase(pa1="r", pa2=2)
p = sk.get_params()
self.assertEqual(p, dict(pa1="r", pa2=2))
r = repr(sk)
self.assertEqual(r, "SkBase(pa1='r', pa2=2)")
self.assertTrue(sk.test_equality(sk))
sk.set_params(r=3)
def test_sklearn_equality(self):
sk1 = SkBaseLearner(pa1="r", pa2=2)
p = sk1.get_params()
self.assertEqual(p, dict(pa1="r", pa2=2))
r = repr(sk1)
self.assertEqual(r, "SkBaseLearner(pa1='r', pa2=2)")
sk2 = SkBase(pa1="r", pa2=2)
self.assertFalse(sk1.test_equality(sk2))
def test_sklearn_equality_reg(self):
sk1 = SkBaseRegressor(pa1="r", pa2=2)
p = sk1.get_params()
self.assertEqual(p, dict(pa1="r", pa2=2))
r = repr(sk1)
self.assertEqual(r, "SkBaseRegressor(pa1='r', pa2=2)")
sk2 = SkBase(pa1="r", pa2=2)
self.assertFalse(sk1.test_equality(sk2))
x = numpy.array([[0, 1]], dtype=numpy.float64)
y = numpy.array([0], dtype=numpy.float64)
self.assertRaise(lambda: sk1.score(x, y), NotImplementedError)
def test_sklearn_equality_cls(self):
sk1 = SkBaseClassifier(pa1="r", pa2=2)
p = sk1.get_params()
self.assertEqual(p, dict(pa1="r", pa2=2))
r = repr(sk1)
self.assertEqual(r, "SkBaseClassifier(pa1='r', pa2=2)")
sk2 = SkBase(pa1="r", pa2=2)
self.assertFalse(sk1.test_equality(sk2))
x = numpy.array([[0, 1]], dtype=numpy.float64)
y = numpy.array([0], dtype=numpy.float64)
self.assertRaise(lambda: sk1.score(x, y), NotImplementedError)
self.assertRaise(lambda: sk1.predict_proba(x), NotImplementedError)
def test_sklearn_equality_tr(self):
sk1 = SkBaseTransform(pa1="r", pa2=2)
p = sk1.get_params()
self.assertEqual(p, dict(pa1="r", pa2=2))
r = repr(sk1)
self.assertEqual(r, "SkBaseTransform(pa1='r', pa2=2)")
sk2 = SkBase(pa1="r", pa2=2)
self.assertFalse(sk1.test_equality(sk2))
x = numpy.array([[0, 1]], dtype=numpy.float64)
self.assertRaise(lambda: sk1.transform(x), NotImplementedError)
self.assertRaise(lambda: sk1.fit(x), NotImplementedError)
def test_sklearn_compare(self):
p1 = dict(pa1="r", pa2=2)
p2 = dict(pa1="r", pa2=2, pa3=4)
self.assertRaise(lambda: SkBase.compare_params(p1, p2), KeyError)
self.assertRaise(lambda: SkBase.compare_params(p2, p1), KeyError)
p1 = dict(pa1="r", pa2=2, d1=dict(e='e', i=0))
p2 = dict(pa1="r", pa2=2, d1=dict(e='e', i=0))
self.assertTrue(SkBase.compare_params(p1, p2))
p2['d1']['i'] = 3
self.assertFalse(SkBase.compare_params(p1, p2))
p2['d1']['i2'] = 3
self.assertRaise(lambda: SkBase.compare_params(
p1, p2), ValueError, "Values for key")
def test_sklearn_compare_object(self):
p1 = SkBase(pa1="r", pa2=2)
p2 = SkBase(pa1="r", pa2=2, pa3=4)
self.assertRaise(lambda: p1.test_equality(p2), KeyError)
self.assertRaise(lambda: p2.test_equality(p1), KeyError)
p1 = SkBase(pa1="r", pa2=2, d1=dict(e='e', i=0))
p2 = SkBase(pa1="r", pa2=2, d1=dict(e='e', i=0))
self.assertTrue(p1.test_equality(p2))
p2 = SkBase(pa1="r", pa2=2, d1=dict(e='e', i=3))
self.assertFalse(p1.test_equality(p2))
p2 = SkBase(pa1="r", pa2=2, d1=dict(e='e', i=3, i2=4))
self.assertRaise(lambda: p1.test_equality(p2), ValueError)
p1 = SkBase(pa1="r", pa2=2, d1=SkBase(e='e', i=0))
p2 = SkBase(pa1="r", pa2=2, d1=SkBase(e='e', i=0))
self.assertTrue(p1.test_equality(p2))
p1 = SkBase(pa1="r", pa2=2, d1=SkBase(e='e', i=0))
p2 = SkBase(pa1="r", pa2=2, d1=SkBase(e='ef', i=0))
self.assertRaise(lambda: p1.test_equality(p2), ValueError)
p1 = SkBase(pa1="r", pa2=2, d1=SkBase(e='e', i=0))
p2 = SkBase(pa1="r", pa2=2, d1=SkBase(e='e', i=0, i2=4))
self.assertRaise(lambda: p1.test_equality(p2), KeyError)
p1 = SkBase(pa1="r", pa2=2, d1=[SkBase(e='e', i=0)])
p2 = SkBase(pa1="r", pa2=2, d1=[SkBase(e='e', i=0, i2=4)])
self.assertRaise(lambda: p1.test_equality(p2), KeyError)
p1 = SkBase(pa1="r", pa2=2, d1=[SkBase(e='e', i=0)])
p2 = SkBase(pa1="r", pa2=2, d1=[SkBase(e='e', i=0)])
self.assertTrue(p1.test_equality(p2))
p1 = SkBase(pa1="r", pa2=2, d1=[
SkBase(e='e', i=0), SkBase(e='e', i=0)])
p2 = SkBase(pa1="r", pa2=2, d1=[SkBase(e='e', i=0)])
self.assertRaise(lambda: p1.test_equality(p2), ValueError)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"mlinsights.sklapi.sklearn_base.SkBase.compare_params",
"mlinsights.sklapi.sklearn_base_classifier.SkBaseClassifier",
"mlinsights.sklapi.sklearn_base_regressor.SkBaseRegressor",
"mlinsights.sklapi.sklearn_base_learner.SkBaseLearner",
"numpy.array",
"mlinsights.sklapi.sklearn_base.SkBase... | [((5205, 5220), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5218, 5220), False, 'import unittest\n'), ((532, 554), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (538, 554), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((833, 862), 'mlinsights.sklapi.sklearn_base_learner.SkBaseLearner', 'SkBaseLearner', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (846, 862), False, 'from mlinsights.sklapi.sklearn_base_learner import SkBaseLearner\n'), ((1039, 1061), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (1045, 1061), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((1167, 1198), 'mlinsights.sklapi.sklearn_base_regressor.SkBaseRegressor', 'SkBaseRegressor', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (1182, 1198), False, 'from mlinsights.sklapi.sklearn_base_regressor import SkBaseRegressor\n'), ((1377, 1399), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (1383, 1399), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((1461, 1503), 'numpy.array', 'numpy.array', (['[[0, 1]]'], {'dtype': 'numpy.float64'}), '([[0, 1]], dtype=numpy.float64)\n', (1472, 1503), False, 'import numpy\n'), ((1516, 1553), 'numpy.array', 'numpy.array', (['[0]'], {'dtype': 'numpy.float64'}), '([0], dtype=numpy.float64)\n', (1527, 1553), False, 'import numpy\n'), ((1681, 1713), 'mlinsights.sklapi.sklearn_base_classifier.SkBaseClassifier', 'SkBaseClassifier', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (1697, 1713), False, 'from mlinsights.sklapi.sklearn_base_classifier import SkBaseClassifier\n'), ((1893, 1915), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (1899, 1915), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((1977, 2019), 'numpy.array', 'numpy.array', (['[[0, 1]]'], {'dtype': 'numpy.float64'}), '([[0, 1]], dtype=numpy.float64)\n', (1988, 2019), False, 'import numpy\n'), ((2032, 2069), 'numpy.array', 'numpy.array', (['[0]'], {'dtype': 'numpy.float64'}), '([0], dtype=numpy.float64)\n', (2043, 2069), False, 'import numpy\n'), ((2272, 2303), 'mlinsights.sklapi.sklearn_base_transform.SkBaseTransform', 'SkBaseTransform', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (2287, 2303), False, 'from mlinsights.sklapi.sklearn_base_transform import SkBaseTransform\n'), ((2482, 2504), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (2488, 2504), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((2566, 2608), 'numpy.array', 'numpy.array', (['[[0, 1]]'], {'dtype': 'numpy.float64'}), '([[0, 1]], dtype=numpy.float64)\n', (2577, 2608), False, 'import numpy\n'), ((3445, 3467), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'pa1': '"""r"""', 'pa2': '(2)'}), "(pa1='r', pa2=2)\n", (3451, 3467), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((3481, 3510), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'pa1': '"""r"""', 'pa2': '(2)', 'pa3': '(4)'}), "(pa1='r', pa2=2, pa3=4)\n", (3487, 3510), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((3141, 3170), 'mlinsights.sklapi.sklearn_base.SkBase.compare_params', 'SkBase.compare_params', (['p1', 'p2'], {}), '(p1, p2)\n', (3162, 3170), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((3223, 3252), 'mlinsights.sklapi.sklearn_base.SkBase.compare_params', 'SkBase.compare_params', (['p1', 'p2'], {}), '(p1, p2)\n', (3244, 3252), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((2892, 2921), 'mlinsights.sklapi.sklearn_base.SkBase.compare_params', 'SkBase.compare_params', (['p1', 'p2'], {}), '(p1, p2)\n', (2913, 2921), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((2966, 2995), 'mlinsights.sklapi.sklearn_base.SkBase.compare_params', 'SkBase.compare_params', (['p2', 'p1'], {}), '(p2, p1)\n', (2987, 2995), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((3314, 3343), 'mlinsights.sklapi.sklearn_base.SkBase.compare_params', 'SkBase.compare_params', (['p1', 'p2'], {}), '(p1, p2)\n', (3335, 3343), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4076, 4094), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (4082, 4094), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4135, 4153), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (4141, 4153), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4241, 4259), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (4247, 4259), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4300, 4319), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""ef"""', 'i': '(0)'}), "(e='ef', i=0)\n", (4306, 4319), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4428, 4446), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (4434, 4446), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4487, 4511), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)', 'i2': '(4)'}), "(e='e', i=0, i2=4)\n", (4493, 4511), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4619, 4637), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (4625, 4637), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4680, 4704), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)', 'i2': '(4)'}), "(e='e', i=0, i2=4)\n", (4686, 4704), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4813, 4831), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (4819, 4831), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((4874, 4892), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (4880, 4892), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((5003, 5021), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (5009, 5021), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((5023, 5041), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (5029, 5041), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n'), ((5084, 5102), 'mlinsights.sklapi.sklearn_base.SkBase', 'SkBase', ([], {'e': '"""e"""', 'i': '(0)'}), "(e='e', i=0)\n", (5090, 5102), False, 'from mlinsights.sklapi.sklearn_base import SkBase\n')] |
# Python code by <NAME> March 2021 (with input from <NAME> and <NAME>)
from astropy.coordinates import EarthLocation
from astropy.coordinates import get_body_barycentric_posvel, get_body_barycentric
from astropy.time import Time
import astropy.constants as ac
import os
import numpy as np
import pandas as pd
import barycorrpy
from barycorrpy import PINT_erfautils as PINT
from barycorrpy.utils import CalculatePositionVector
from barycorrpy.PhysicalConstants import *
from scipy.spatial.transform import Rotation as R
# For full precision, do not use obsname='KPNO', but use actual WIYN coords
obsname = 'KPNO'
ephemeris = 'de430'
obsname = None
longi = -17.8893
lat = 28.7541
alt = 2370
if obsname:
loc = EarthLocation.of_site(obsname)
lat = loc.lat.value
longi = loc.lon.value
alt = loc.height.value
else:
loc = EarthLocation.from_geodetic(longi, lat, height=alt)
HARPSN_df = pd.read_csv("../data/Sun_harpsn_qualflag.csv")
jd = np.array(HARPSN_df['JD']) + 2400000
F_obs = np.array(HARPSN_df['FWHMobs'])
F_sid = np.array(HARPSN_df['FWHMsid'])
HARPS_Delta = F_obs**2 - F_sid**2
JDUTC_master = Time(jd[::50] , format='jd', scale='utc')
def CalculateFWHMDifference_SolarRotation_Equatorial(loc, JDUTC):
"""
Calculate the difference between the Observed Solar FWHM and Sidereal Solar FWHM
Based on <NAME> et al. (2019)
INPUTS:
loc: Astropy Earth Location object. https://docs.astropy.org/en/stable/api/astropy.coordinates.EarthLocation.html
JDUTC: Astropy Time Object.
OUTPUT:
Delta: F_obs**2 - F_sid**2 [(km/s)^2]
"""
# Convert times to obtain TDB and TT
JDTDB = JDUTC.tdb
JDTT = JDUTC.tt
################################
######EARTH EPHEMERIS ############
################################
##### NUTATION, PRECESSION, ETC. #####
# Observatory position wrt Geocenter
r_pint, v_pint = PINT.gcrs_posvel_from_itrf(loc, JDUTC, JDTT)
r_eci = r_pint[0] # [m]
v_eci = v_pint[0] # [m/s]
##### EPHEMERIDES #####
earth_geo = get_body_barycentric_posvel('earth', JDTDB, ephemeris=ephemeris) # [km]
r_geo = np.reshape(earth_geo[0].xyz.value*1000., 3) # [m]
v_geo = np.reshape(earth_geo[1].xyz.value*1000./86400., 3) # [m/s]
PosVector_EarthSSB = r_eci + r_geo # [m]
# Relativistic Addition of Velocities
VelVector_EarthSSB = (v_eci+v_geo) / (1.+ np.sum(v_eci*v_geo)/c**2) # [m/s]
################################
######SOLAR EPHEMERIS ############
################################
solar_ephem = get_body_barycentric_posvel('sun', JDTDB, ephemeris=ephemeris)
PosVector_SolSSB = np.reshape(solar_ephem[0].xyz.value*1000., 3) #[m]
VelVector_SolSSB = np.reshape(solar_ephem[1].xyz.value*1000./86400., 3) # [m/s]
################################
####EQUATORIAL COORD VECTORS ####
################################
PosVector_EarthSol, PosMag_EarthSol, PosHat_EarthSol = CalculatePositionVector(r1=PosVector_EarthSSB, r2=PosVector_SolSSB)
VelVector_EarthSol = (VelVector_EarthSSB - VelVector_SolSSB) / (1. + np.sum(VelVector_SolSSB*VelVector_EarthSSB)/c**2)
# omega = (x.vy - y.vx)/|(x,y)|^2
# Omega = (PosVector_EarthSol[0]*VelVector_EarthSol[1] - PosVector_EarthSol[1]*VelVector_EarthSol[0]) / (PosMag_EarthSol**2)
OmegaVector = np.cross(PosVector_EarthSol, VelVector_EarthSol) / (PosMag_EarthSol**2)
################################
################################
# Rotation Axis of the Sun with respect to Equatorial System
# https://www2.mps.mpg.de/homes/fraenz/systems/systems3art.pdf
#Eqn 13
alpha = 286.13 # Rotate along the z axis
dec = 63.87 # Rotate along the x axis
Theta = alpha * np.pi/180
Phi = dec * np.pi/180
EclipticEpsilon = 23.44
SolarInclination = 7.25
SolarLongitude = 75.76
# Theta = SolarLongitude * 180/np.pi
# Phi = SolarInclination * 180/np.pi
# Need to perform Extrinsic Euler Rotations
# Transform to solar rotation axis
REquatorial = R.from_euler("zyx", [alpha, 0, dec], degrees=True).as_matrix()
REquatorial = np.array([np.cos(Theta)*np.cos(Phi), np.sin(Theta)*np.cos(Phi), np.sin(Phi)])
"""
################################
####ROTATED COORD VECTORS ####
################################
RotatedPositionVector = np.matmul(Rzyx, PosVector_EarthSol)
RotatedPositionVector = PosVector_EarthSol
RotatedPositionHat = RotatedPositionVector / np.linalg.norm(RotatedPositionVector)
OmegaEarthVectorRotated = np.matmul(Rzyx, OmegaVector)
################################
################################
"""
OmegaSolVector = np.array([0, 0, 2.972 *1e-6])
OmegaSolHat = np.array([0,0,1])
# Rotated Solar rotation vector to ecliptic plane
# OmegaSolVector = np.matmul(REquatorial, OmegaSolVector)
OmegaSolVector = REquatorial * np.linalg.norm(OmegaSolVector)
OmegaSolHat = OmegaSolVector / np.linalg.norm(OmegaSolVector)
sini = np.sqrt(1 - np.matmul(OmegaSolHat, PosHat_EarthSol)**2)
Gamma = 1.04
DeltaOmega = OmegaSolVector - OmegaVector
Delta = ((Gamma* ac.R_sun.to(u.km).value)**2) * (np.matmul(DeltaOmega, DeltaOmega)*sini*sini - np.matmul(OmegaSolVector, OmegaSolVector))
return Delta
Delta = np.array([CalculateFWHMDifference_SolarRotation(loc, JDUTC) for JDUTC in JDUTC_master])
#########
plt.plot(JDUTC_master.jd, HARPS_Delta[::50]-Delta)
plt.ylabel("HARPS - BCPy (km/s)^2")
plt.show(block=False)
| [
"astropy.coordinates.EarthLocation.from_geodetic",
"barycorrpy.utils.CalculatePositionVector",
"astropy.constants.R_sun.to",
"numpy.sum",
"pandas.read_csv",
"astropy.time.Time",
"astropy.coordinates.get_body_barycentric_posvel",
"numpy.cross",
"barycorrpy.PINT_erfautils.gcrs_posvel_from_itrf",
"nu... | [((931, 977), 'pandas.read_csv', 'pd.read_csv', (['"""../data/Sun_harpsn_qualflag.csv"""'], {}), "('../data/Sun_harpsn_qualflag.csv')\n", (942, 977), True, 'import pandas as pd\n'), ((1031, 1061), 'numpy.array', 'np.array', (["HARPSN_df['FWHMobs']"], {}), "(HARPSN_df['FWHMobs'])\n", (1039, 1061), True, 'import numpy as np\n'), ((1071, 1101), 'numpy.array', 'np.array', (["HARPSN_df['FWHMsid']"], {}), "(HARPSN_df['FWHMsid'])\n", (1079, 1101), True, 'import numpy as np\n'), ((1155, 1195), 'astropy.time.Time', 'Time', (['jd[::50]'], {'format': '"""jd"""', 'scale': '"""utc"""'}), "(jd[::50], format='jd', scale='utc')\n", (1159, 1195), False, 'from astropy.time import Time\n'), ((745, 775), 'astropy.coordinates.EarthLocation.of_site', 'EarthLocation.of_site', (['obsname'], {}), '(obsname)\n', (766, 775), False, 'from astropy.coordinates import EarthLocation\n'), ((862, 913), 'astropy.coordinates.EarthLocation.from_geodetic', 'EarthLocation.from_geodetic', (['longi', 'lat'], {'height': 'alt'}), '(longi, lat, height=alt)\n', (889, 913), False, 'from astropy.coordinates import EarthLocation\n'), ((986, 1011), 'numpy.array', 'np.array', (["HARPSN_df['JD']"], {}), "(HARPSN_df['JD'])\n", (994, 1011), True, 'import numpy as np\n'), ((1896, 1940), 'barycorrpy.PINT_erfautils.gcrs_posvel_from_itrf', 'PINT.gcrs_posvel_from_itrf', (['loc', 'JDUTC', 'JDTT'], {}), '(loc, JDUTC, JDTT)\n', (1922, 1940), True, 'from barycorrpy import PINT_erfautils as PINT\n'), ((2043, 2107), 'astropy.coordinates.get_body_barycentric_posvel', 'get_body_barycentric_posvel', (['"""earth"""', 'JDTDB'], {'ephemeris': 'ephemeris'}), "('earth', JDTDB, ephemeris=ephemeris)\n", (2070, 2107), False, 'from astropy.coordinates import get_body_barycentric_posvel, get_body_barycentric\n'), ((2125, 2171), 'numpy.reshape', 'np.reshape', (['(earth_geo[0].xyz.value * 1000.0)', '(3)'], {}), '(earth_geo[0].xyz.value * 1000.0, 3)\n', (2135, 2171), True, 'import numpy as np\n'), ((2185, 2241), 'numpy.reshape', 'np.reshape', (['(earth_geo[1].xyz.value * 1000.0 / 86400.0)', '(3)'], {}), '(earth_geo[1].xyz.value * 1000.0 / 86400.0, 3)\n', (2195, 2241), True, 'import numpy as np\n'), ((2537, 2599), 'astropy.coordinates.get_body_barycentric_posvel', 'get_body_barycentric_posvel', (['"""sun"""', 'JDTDB'], {'ephemeris': 'ephemeris'}), "('sun', JDTDB, ephemeris=ephemeris)\n", (2564, 2599), False, 'from astropy.coordinates import get_body_barycentric_posvel, get_body_barycentric\n'), ((2623, 2671), 'numpy.reshape', 'np.reshape', (['(solar_ephem[0].xyz.value * 1000.0)', '(3)'], {}), '(solar_ephem[0].xyz.value * 1000.0, 3)\n', (2633, 2671), True, 'import numpy as np\n'), ((2695, 2753), 'numpy.reshape', 'np.reshape', (['(solar_ephem[1].xyz.value * 1000.0 / 86400.0)', '(3)'], {}), '(solar_ephem[1].xyz.value * 1000.0 / 86400.0, 3)\n', (2705, 2753), True, 'import numpy as np\n'), ((2926, 2993), 'barycorrpy.utils.CalculatePositionVector', 'CalculatePositionVector', ([], {'r1': 'PosVector_EarthSSB', 'r2': 'PosVector_SolSSB'}), '(r1=PosVector_EarthSSB, r2=PosVector_SolSSB)\n', (2949, 2993), False, 'from barycorrpy.utils import CalculatePositionVector\n'), ((4611, 4642), 'numpy.array', 'np.array', (['[0, 0, 2.972 * 1e-06]'], {}), '([0, 0, 2.972 * 1e-06])\n', (4619, 4642), True, 'import numpy as np\n'), ((4658, 4677), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (4666, 4677), True, 'import numpy as np\n'), ((3296, 3344), 'numpy.cross', 'np.cross', (['PosVector_EarthSol', 'VelVector_EarthSol'], {}), '(PosVector_EarthSol, VelVector_EarthSol)\n', (3304, 3344), True, 'import numpy as np\n'), ((4825, 4855), 'numpy.linalg.norm', 'np.linalg.norm', (['OmegaSolVector'], {}), '(OmegaSolVector)\n', (4839, 4855), True, 'import numpy as np\n'), ((4891, 4921), 'numpy.linalg.norm', 'np.linalg.norm', (['OmegaSolVector'], {}), '(OmegaSolVector)\n', (4905, 4921), True, 'import numpy as np\n'), ((3987, 4037), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""zyx"""', '[alpha, 0, dec]'], {'degrees': '(True)'}), "('zyx', [alpha, 0, dec], degrees=True)\n", (3999, 4037), True, 'from scipy.spatial.transform import Rotation as R\n'), ((4132, 4143), 'numpy.sin', 'np.sin', (['Phi'], {}), '(Phi)\n', (4138, 4143), True, 'import numpy as np\n'), ((5149, 5190), 'numpy.matmul', 'np.matmul', (['OmegaSolVector', 'OmegaSolVector'], {}), '(OmegaSolVector, OmegaSolVector)\n', (5158, 5190), True, 'import numpy as np\n'), ((2376, 2397), 'numpy.sum', 'np.sum', (['(v_eci * v_geo)'], {}), '(v_eci * v_geo)\n', (2382, 2397), True, 'import numpy as np\n'), ((3065, 3110), 'numpy.sum', 'np.sum', (['(VelVector_SolSSB * VelVector_EarthSSB)'], {}), '(VelVector_SolSSB * VelVector_EarthSSB)\n', (3071, 3110), True, 'import numpy as np\n'), ((4078, 4091), 'numpy.cos', 'np.cos', (['Theta'], {}), '(Theta)\n', (4084, 4091), True, 'import numpy as np\n'), ((4092, 4103), 'numpy.cos', 'np.cos', (['Phi'], {}), '(Phi)\n', (4098, 4103), True, 'import numpy as np\n'), ((4105, 4118), 'numpy.sin', 'np.sin', (['Theta'], {}), '(Theta)\n', (4111, 4118), True, 'import numpy as np\n'), ((4119, 4130), 'numpy.cos', 'np.cos', (['Phi'], {}), '(Phi)\n', (4125, 4130), True, 'import numpy as np\n'), ((4945, 4984), 'numpy.matmul', 'np.matmul', (['OmegaSolHat', 'PosHat_EarthSol'], {}), '(OmegaSolHat, PosHat_EarthSol)\n', (4954, 4984), True, 'import numpy as np\n'), ((5071, 5088), 'astropy.constants.R_sun.to', 'ac.R_sun.to', (['u.km'], {}), '(u.km)\n', (5082, 5088), True, 'import astropy.constants as ac\n'), ((5103, 5136), 'numpy.matmul', 'np.matmul', (['DeltaOmega', 'DeltaOmega'], {}), '(DeltaOmega, DeltaOmega)\n', (5112, 5136), True, 'import numpy as np\n')] |
import torch
import numpy as np
import matplotlib.pyplot as plt
from enbed.utils.scorer import RESCAL_score, DistMult_score
class RESCAL:
def __init__(self, num_entities, num_relations, dim, seed = 1231245):
'''
Implementation of the RESCAL graph embedding model (Nickel et al., 2011).
dim: embedding dimension
num_entities: number of entities in the graph
num_relations: number of relation types in the graph
'''
self.dim = dim
self.num_entities = num_entities
self.num_relations = num_relations
# embeddings
torch.manual_seed(seed)
self.entities = torch.nn.Embedding(num_entities, dim)
self.relations = torch.nn.Embedding(num_relations, dim*dim)
def init(self):
self.entities.weight.data *= 0.1
self.relations.weight.data *= 0.1
def score(self, sub, rel, obj):
'''
Score a list of triple [[s0, r0, o0], [s1, r1, o1],...]
sub, rel and obj are lists [s0, s1, ...], [r0, r1, ...], [o0, o1, ...]
'''
s_emb = self.entities(torch.tensor(sub).long())
o_emb = self.entities(torch.tensor(obj).long())
r_emb = self.relations(torch.tensor(rel).long())
return RESCAL_score(s_emb, o_emb, r_emb.view(-1, self.dim, self.dim))
def prob(self, sub, rel, obj):
'''
Apply sigmoid to score.
'''
return torch.sigmoid(self.score(sub, rel, obj))
def save(self, savepath, appdix = ''):
'''
Save and visualize embeddings.
'''
rel_embs = self.relations.weight.data.detach().numpy()
ent_embs = self.entities.weight.data.detach().numpy()
np.save('{}/relation_embeddings_{}.npy'.format(savepath, appdix), rel_embs)
np.save('{}/entity_embeddings_{}.npy'.format(savepath, appdix), ent_embs)
plt.close()
for j in range(50):
plt.vlines(ent_embs[j], j+0.1, (j+1)-0.1)
plt.savefig('{}/entity_embeddings_{}.png'.format(savepath, appdix))
plt.close()
for j in range(len(rel_embs)):
plt.vlines(rel_embs[j], j+0.1, (j+1)-0.1)
plt.savefig('{}/relation_embeddings_{}.png'.format(savepath, appdix))
class Energy(RESCAL):
def __init__(self, num_entities, num_relations, dim, seed = 1231245):
'''
Energy-based model for calculating embeddings. The cost is obtained using stochastic sampling.
'''
super().__init__(num_entities, num_relations, dim, seed)
np.random.seed(seed)
def cost(self, sub, rel, obj, num_samples, burnin=0):
'''
Cost function using sampling to maximize data likelihood.
'''
# nbatch is the dimension of embedding vectors
nbatch = len(sub)
pscore = self.score(sub, rel, obj)
total_score = 0
old_score = pscore
for k in range(num_samples+burnin):
sro = np.random.randint(3, size=nbatch)
# One mask will be 1 and the rest will be 0 (randomly) on each for loop
smask = (sro == 0)
rmask = (sro == 1)
omask = (sro == 2)
# Note: '~' is bitwise negation; 0 -> -1, 1 -> -2
# Pick new sub, obj, rel by first multiplying by the complement of the mask, then adding a random offset to one of them
new_sub = ~smask*sub + smask*(np.random.random(nbatch)*self.num_entities)
new_obj = ~omask*obj + omask*(np.random.random(nbatch)*self.num_entities)
new_rel = ~rmask*rel + rmask*(np.random.random(nbatch)*self.num_relations)
# Score the new triple
proposal_score = self.score(new_sub, new_rel, new_obj)
# filters is a binary tensor containing the result of comparing each element of a random tensor and the tensor resulting from
# Ti = e^(proposal_score_i - old_score_i) for each element i
# Recalculate old_score by applying filters to proposal_score and old_score and combining the results
filters = 1.*(torch.rand(nbatch) <= torch.exp(proposal_score-old_score))
old_score = proposal_score*filters + old_score*(1-filters)
# Convert filters to a numpy array and construct a new triple by applying filters to the old and new triples and combining the results
filters = filters.detach().numpy()
sub = np.array(new_sub*filters + sub*(1-filters), dtype=int)
obj = np.array(new_obj*filters + obj*(1-filters), dtype=int)
rel = np.array(new_rel*filters + rel*(1-filters), dtype=int)
# Convert old_score from tensor to scalar by summing its elements
# Add the old score to the sum, not counting burnin
if k >= burnin:
total_score += old_score.sum()
# The cost is the negative sum of the original score tensor plus a small offset based on the number of samples and the total score
# Cost is a scalar. Higher total score gives lower cost.
cost = -pscore.sum() + 1./num_samples*total_score
return cost
class EnergyDiag(Energy):
def __init__(self, num_entities, num_relations, dim, seed = 1231245):
'''
Energy-based model for calculating embeddings, with diagonally-constrained relation matrices.
Similar to DistMult (Yang et al., 2014).
'''
super().__init__(num_entities, num_relations, dim, seed)
self.relations = torch.nn.Embedding(num_relations, dim)
def score(self, sub, rel, obj):
s_emb = self.entities(torch.tensor(sub).long())
o_emb = self.entities(torch.tensor(obj).long())
r_emb = self.relations(torch.tensor(rel).long())
return DistMult_score(s_emb, o_emb, r_emb)
| [
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.Embedding",
"matplotlib.pyplot.close",
"matplotlib.pyplot.vlines",
"torch.exp",
"enbed.utils.scorer.DistMult_score",
"numpy.random.randint",
"numpy.array",
"numpy.random.random",
"torch.rand",
"torch.tensor"
] | [((605, 628), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (622, 628), False, 'import torch\n'), ((653, 690), 'torch.nn.Embedding', 'torch.nn.Embedding', (['num_entities', 'dim'], {}), '(num_entities, dim)\n', (671, 690), False, 'import torch\n'), ((716, 760), 'torch.nn.Embedding', 'torch.nn.Embedding', (['num_relations', '(dim * dim)'], {}), '(num_relations, dim * dim)\n', (734, 760), False, 'import torch\n'), ((1875, 1886), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1884, 1886), True, 'import matplotlib.pyplot as plt\n'), ((2054, 2065), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2063, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2535, 2555), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2549, 2555), True, 'import numpy as np\n'), ((5486, 5524), 'torch.nn.Embedding', 'torch.nn.Embedding', (['num_relations', 'dim'], {}), '(num_relations, dim)\n', (5504, 5524), False, 'import torch\n'), ((5747, 5782), 'enbed.utils.scorer.DistMult_score', 'DistMult_score', (['s_emb', 'o_emb', 'r_emb'], {}), '(s_emb, o_emb, r_emb)\n', (5761, 5782), False, 'from enbed.utils.scorer import RESCAL_score, DistMult_score\n'), ((1927, 1972), 'matplotlib.pyplot.vlines', 'plt.vlines', (['ent_embs[j]', '(j + 0.1)', '(j + 1 - 0.1)'], {}), '(ent_embs[j], j + 0.1, j + 1 - 0.1)\n', (1937, 1972), True, 'import matplotlib.pyplot as plt\n'), ((2117, 2162), 'matplotlib.pyplot.vlines', 'plt.vlines', (['rel_embs[j]', '(j + 0.1)', '(j + 1 - 0.1)'], {}), '(rel_embs[j], j + 0.1, j + 1 - 0.1)\n', (2127, 2162), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2976), 'numpy.random.randint', 'np.random.randint', (['(3)'], {'size': 'nbatch'}), '(3, size=nbatch)\n', (2960, 2976), True, 'import numpy as np\n'), ((4418, 4478), 'numpy.array', 'np.array', (['(new_sub * filters + sub * (1 - filters))'], {'dtype': 'int'}), '(new_sub * filters + sub * (1 - filters), dtype=int)\n', (4426, 4478), True, 'import numpy as np\n'), ((4491, 4551), 'numpy.array', 'np.array', (['(new_obj * filters + obj * (1 - filters))'], {'dtype': 'int'}), '(new_obj * filters + obj * (1 - filters), dtype=int)\n', (4499, 4551), True, 'import numpy as np\n'), ((4564, 4624), 'numpy.array', 'np.array', (['(new_rel * filters + rel * (1 - filters))'], {'dtype': 'int'}), '(new_rel * filters + rel * (1 - filters), dtype=int)\n', (4572, 4624), True, 'import numpy as np\n'), ((1102, 1119), 'torch.tensor', 'torch.tensor', (['sub'], {}), '(sub)\n', (1114, 1119), False, 'import torch\n'), ((1158, 1175), 'torch.tensor', 'torch.tensor', (['obj'], {}), '(obj)\n', (1170, 1175), False, 'import torch\n'), ((1215, 1232), 'torch.tensor', 'torch.tensor', (['rel'], {}), '(rel)\n', (1227, 1232), False, 'import torch\n'), ((4075, 4093), 'torch.rand', 'torch.rand', (['nbatch'], {}), '(nbatch)\n', (4085, 4093), False, 'import torch\n'), ((4097, 4134), 'torch.exp', 'torch.exp', (['(proposal_score - old_score)'], {}), '(proposal_score - old_score)\n', (4106, 4134), False, 'import torch\n'), ((5592, 5609), 'torch.tensor', 'torch.tensor', (['sub'], {}), '(sub)\n', (5604, 5609), False, 'import torch\n'), ((5648, 5665), 'torch.tensor', 'torch.tensor', (['obj'], {}), '(obj)\n', (5660, 5665), False, 'import torch\n'), ((5705, 5722), 'torch.tensor', 'torch.tensor', (['rel'], {}), '(rel)\n', (5717, 5722), False, 'import torch\n'), ((3403, 3427), 'numpy.random.random', 'np.random.random', (['nbatch'], {}), '(nbatch)\n', (3419, 3427), True, 'import numpy as np\n'), ((3489, 3513), 'numpy.random.random', 'np.random.random', (['nbatch'], {}), '(nbatch)\n', (3505, 3513), True, 'import numpy as np\n'), ((3575, 3599), 'numpy.random.random', 'np.random.random', (['nbatch'], {}), '(nbatch)\n', (3591, 3599), True, 'import numpy as np\n')] |
"""
Geosoft databases for line-oriented spatial data.
:Classes:
:`Geosoft_gdb`: Geosoft line database
:`Line`: line handling
:`Channel`: channel handling
:Constants:
:LINE_TYPE_NORMAL: `geosoft.gxapi.DB_LINE_TYPE_NORMAL`
:LINE_TYPE_BASE: `geosoft.gxapi.DB_LINE_TYPE_BASE`
:LINE_TYPE_TIE: `geosoft.gxapi.DB_LINE_TYPE_TIE`
:LINE_TYPE_TEST: `geosoft.gxapi.DB_LINE_TYPE_TEST`
:LINE_TYPE_TREND: `geosoft.gxapi.DB_LINE_TYPE_TREND`
:LINE_TYPE_SPECIAL: `geosoft.gxapi.DB_LINE_TYPE_SPECIAL`
:LINE_TYPE_RANDOM: `geosoft.gxapi.DB_LINE_TYPE_RANDOM`
:LINE_CATEGORY_FLIGHT: `geosoft.gxapi.DB_CATEGORY_LINE_FLIGHT`
:LINE_CATEGORY_GROUP: `geosoft.gxapi.DB_CATEGORY_LINE_GROUP`
:LINE_CATEGORY_NORMAL: `geosoft.gxapi.DB_CATEGORY_LINE_NORMAL`
:FORMAT_NORMAL: `geosoft.gxapi.DB_CHAN_FORMAT_NORMAL`
:FORMAT_EXP: `geosoft.gxapi.DB_CHAN_FORMAT_EXP`
:FORMAT_TIME: `geosoft.gxapi.DB_CHAN_FORMAT_TIME`
:FORMAT_DATE: `geosoft.gxapi.DB_CHAN_FORMAT_DATE`
:FORMAT_GEOGR: `geosoft.gxapi.DB_CHAN_FORMAT_GEOGR`
:FORMAT_SIGDIG: `geosoft.gxapi.DB_CHAN_FORMAT_SIGDIG`
:FORMAT_HEX: `geosoft.gxapi.DB_CHAN_FORMAT_HEX`
:CHAN_ALL: None
:CHAN_NORMAL: 0
:CHAN_ARRAY: 1
:CHAN_DISPLAYED: 2
:SYMB_LINE_NORMAL: `geosoft.gxapi.DB_CATEGORY_LINE_NORMAL`
:SYMB_LINE_FLIGHT: `geosoft.gxapi.DB_CATEGORY_LINE_FLIGHT`
:SYMB_LINE_GROUP: `geosoft.gxapi.DB_CATEGORY_LINE_GROUP`
:SELECT_INCLUDE: `geosoft.gxapi.DB_LINE_SELECT_INCLUDE`
:SELECT_EXCLUDE: `geosoft.gxapi.DB_LINE_SELECT_EXCLUDE`
:COMP_NONE: `geosoft.gxapi.DB_COMP_NONE`
:COMP_SPEED: `geosoft.gxapi.DB_COMP_SPEED`
:COMP_SIZE: `geosoft.gxapi.DB_COMP_SIZE`
:READ_REMOVE_DUMMYROWS: 1
:READ_REMOVE_DUMMYCOLUMNS: 2
:SYMBOL_LOCK_NONE: `geosoft.gxapi.DB_LOCK_NONE`
:SYMBOL_LOCK_READ: `geosoft.gxapi.DB_LOCK_READONLY`
:SYMBOL_LOCK_WRITE: `geosoft.gxapi.DB_LOCK_READWRITE`
:DRAW_AS_POINTS: 0
:DRAW_AS_LINES: 1
.. seealso:: `geosoft.gxapi.GXGB`, `geosoft.gxapi.GXEDB`,
`geosoft.gxapi.GXDBREAD`, `geosoft.gxapi.GXDBWRITE`
.. note::
Regression tests provide usage examples:
`Tests <https://github.com/GeosoftInc/gxpy/blob/master/geosoft/gxpy/tests/test_gdb.py>`_
"""
import os
import sys
import math
import numpy as np
import pandas as pd
import geosoft
import geosoft.gxapi as gxapi
from . import vv as gxvv
from . import va as gxva
from . import utility as gxu
from . import gx as gx
from . import coordinate_system as gxcs
from . import metadata as gxmeta
from . import map as gxmap
from . import view as gxview
from . import group as gxgroup
from . import geometry as gxgeo
__version__ = geosoft.__version__
def _t(s):
return geosoft.gxpy.system.translate(s)
LINE_TYPE_NORMAL = gxapi.DB_LINE_TYPE_NORMAL
LINE_TYPE_BASE = gxapi.DB_LINE_TYPE_BASE
LINE_TYPE_TIE = gxapi.DB_LINE_TYPE_TIE
LINE_TYPE_TEST = gxapi.DB_LINE_TYPE_TEST
LINE_TYPE_TREND = gxapi.DB_LINE_TYPE_TREND
LINE_TYPE_SPECIAL = gxapi.DB_LINE_TYPE_SPECIAL
LINE_TYPE_RANDOM = gxapi.DB_LINE_TYPE_RANDOM
LINE_CATEGORY_FLIGHT = gxapi.DB_CATEGORY_LINE_FLIGHT
LINE_CATEGORY_GROUP = gxapi.DB_CATEGORY_LINE_GROUP
LINE_CATEGORY_NORMAL = gxapi.DB_CATEGORY_LINE_NORMAL
FORMAT_NORMAL = gxapi.DB_CHAN_FORMAT_NORMAL
FORMAT_EXP = gxapi.DB_CHAN_FORMAT_EXP
FORMAT_TIME = gxapi.DB_CHAN_FORMAT_TIME
FORMAT_DATE = gxapi.DB_CHAN_FORMAT_DATE
FORMAT_GEOGR = gxapi.DB_CHAN_FORMAT_GEOGR
FORMAT_SIGDIG = gxapi.DB_CHAN_FORMAT_SIGDIG
FORMAT_HEX = gxapi.DB_CHAN_FORMAT_HEX
CHAN_ALL = None
CHAN_NORMAL = 0
CHAN_ARRAY = 1
CHAN_DISPLAYED = 2
SYMB_LINE_NORMAL = gxapi.DB_CATEGORY_LINE_NORMAL
SYMB_LINE_FLIGHT = gxapi.DB_CATEGORY_LINE_FLIGHT
SYMB_LINE_GROUP = gxapi.DB_CATEGORY_LINE_GROUP
SELECT_INCLUDE = gxapi.DB_LINE_SELECT_INCLUDE
SELECT_EXCLUDE = gxapi.DB_LINE_SELECT_EXCLUDE
COMP_NONE = gxapi.DB_COMP_NONE
COMP_SPEED = gxapi.DB_COMP_SPEED
COMP_SIZE = gxapi.DB_COMP_SIZE
READ_REMOVE_DUMMYROWS = 1
READ_REMOVE_DUMMYCOLUMNS = 2
SYMBOL_LOCK_NONE = gxapi.DB_LOCK_NONE
SYMBOL_LOCK_READ = gxapi.DB_LOCK_READONLY
SYMBOL_LOCK_WRITE = gxapi.DB_LOCK_READWRITE
DRAW_AS_POINTS = 0
DRAW_AS_LINES = 1
class GdbException(geosoft.GXRuntimeError):
"""
Exceptions from `geosoft.gxpy.gdb`.
.. versionadded:: 9.1
"""
pass
def _gdb_name(name):
name = name.strip()
name_ext = os.path.splitext(name)
if name_ext[1].lower() == '.gdb':
return name
else:
return os.path.normpath(name + ".gdb")
def _va_width(data):
if len(data.shape) == 1:
width = 1
elif len(data.shape) == 2:
width = data.shape[1]
else:
raise GdbException(_t("Only one or two-dimensional data allowed."))
return width
def is_valid_line_name(name):
"""
Return True if this is a valid line name.
See also `create_line_name`
.. versionadded:: 9.3
"""
name = str(name)
try:
int(name)
return False
except ValueError:
return bool(gxapi.GXDB.is_line_name(name))
def create_line_name(number=0, line_type=LINE_TYPE_NORMAL, version=0):
"""
Returns a valid database line name constructed from the component parts.
:param number: line number, or a string, default is 0
:param line_type: one of LINE_TYPE constants, default is LINE_TYPE_NORMAL
:param version: version number, default is 0
:return: string line name
Line name strings are constructed using the line naming convention as in the following:
====== =======================================
L10.4 LINE_TYPE_NORMAL, number 10, version 4
B10.4 LINE_TYPE_BASE, number 10, version 4
D10.4 LINE_TYPE_RANDOM, number 10, version 4
P10.4 LINE_TYPE_SPECIAL, number 10, version 4
T10.4 LINE_TYPE_TIE, number 10, version 4
S10.4 LINE_TYPE_TEST, number 10, version 4
R10.4 LINE_TYPE_TREND, number 10, version 4
====== =======================================
.. versionadded:: 9.3
"""
sr = gxapi.str_ref()
gxapi.GXDB.set_line_name2(str(number), line_type, version, sr)
return sr.value
def delete_files(file_name):
"""
Delete all files associates with this database name.
:param file_name: name of the database
.. versionadded:: 9.3
"""
if file_name is not None:
path = _gdb_name(file_name)
root, ext = os.path.splitext(os.path.basename(path))
if ext.lower() != '.gdb':
raise GdbException(_t('File is not a Geosoft database file (no gdb extension): {}'.format(file_name)))
gxu.delete_file(file_name)
gxu.delete_file(file_name + '.xml')
class Geosoft_gdb(gxgeo.Geometry):
"""
Class to work with Geosoft databases. This class wraps many of the functions found in
`geosoft.gxapi.GXDB`.
:Constructors:
========= =========================================================================
`open` open an existing file, or if not specified open/lock the current database
`new` create a new database
========= =========================================================================
**Some typical programming patterns**
Python Oasis extension opens and reads through all data in the current database:
.. code::
import os,sys
import numpy as np
import gxpy.gx as gxp
import gxpy.gdb as gxdb
# open the current database in the open project
gdb = gxdb.Geosoft_gdb.open()
for line in gdb.list_lines():
npd,ch,fid = gdb.read_line(line)
# npd is a 2D numpy array to all data in this line.
# ch is a list of the channels, one channel for each column in npd.
# Array channels are expanded with channel names "name[0]", "name[1]" ...
# fid is a tuple (start,increment) fiducial, which will be the minimum start and smallest increment.
# ... do something with the data in npd ...
External Python program to open and read through all data in a database:
.. code::
import os,sys
import numpy as np
import gxpy.gx as gx
import gxpy.gdb as gxdb
# initalize the gx environment - required for external programs.
gxp = gx.GXpy()
# open a database
gdb = gxdb.Geosoft_gdb.open('test.gdb')
for line in gdb.list_lines():
npd,ch,fid = gdb.read_line(line)
# npd is a 2D numpy array to all data in this line.
# ch is a list of the channels, one channel for each column in npd.
# Array channels are expanded with channel names "name[0]", "name[1]" ...
# fid is a tuple (start,increment) fiducial, which will be the minimum start and smallest increment.
# ... do something with the data in npd ...
The following creates a new channel that is the distance from the origin to the X,Y,Z location of every point.
.. code::
...
gdb = gxdb.Geosoft_gdb.open('test.gdb')
for line in gdb.list_lines():
npd,ch,fid = gdb.read_line(line, channels=['X','Y','Z'])
npd = np.square(npd)
distance_from_origin = np.sqrt(npd[0] + npd[1] + npd[2])
gdb.write_channel(line, 'distance', distance_from_origin, fid)
.. versionadded:: 9.1
.. versionchanged:: 9.3 float numpy arrays use np.nan for dummies so dummy filtering no longer necessary.
.. versionchanged:: 9.3.1 inherits from `geosoft.gxpy.geometry.Geometry`
"""
def __enter__(self):
return self
def __exit__(self, _type, _value, _traceback):
self.__del__()
def __del__(self):
if hasattr(self, '_close'):
self._close()
def _close(self, pop=True, discard=False):
if hasattr(self, '_open'):
if self._open:
if self._db:
if self._edb is not None:
if self._edb.is_locked():
self._edb.un_lock()
discard = False
self._edb = None
if not discard and self._xmlmetadata_changed:
with open(self._file_name + '.xml', 'w+') as f:
f.write(gxu.xml_from_dict(self._xmlmetadata))
self._db.sync()
self._db = None
if discard:
gxu.delete_files_by_root(self._file_name)
if pop:
gx.pop_resource(self._open)
self._open = None
def __repr__(self):
return "{}({})".format(self.__class__, self.__dict__)
def __str__(self):
return '{}({} lines, {} channels)'.format(os.path.basename(self.name), self.used_lines, self.used_channels)
def __init__(self, name=None, db=None):
self._lst = gxapi.GXLST.create(2000)
self._file_name = None
self._db = db
self._edb = None
self._xmlmetadata = None
self._xmlmetadata_changed = False
self._xmlmetadata_root = ''
self._extent = {'xyz': None, 'extent': None}
if name is None:
if self._db:
s = gxapi.str_ref()
self._db.get_name(gxapi.DB_NAME_FILE, s)
self._file_name = os.path.normpath(s.value)
name = os.path.basename(self._file_name)
else:
name = '_gdb_'
else:
name = os.path.basename(name)
super().__init__(name=name)
self._open = gx.track_resource(self.__class__.__name__, self._file_name)
def close(self, discard=False):
"""
Close the database and free resources
:param discard: True to discard the database files(s) after closing.
.. versionadded:: 9.4
"""
self._close(discard=discard)
@classmethod
def open(cls, name=None):
"""
Open an existing database.
:param name: name of the database, default is the current project database
:returns: `Geosoft_gdb` instance
.. versionadded:: 9.1
"""
gdb = cls(name)
if name is None:
gdb._edb = gxapi.GXEDB.current()
gdb._db = gxapi.GXEDB.lock(gdb._edb)
else:
gdb._edb = None
gdb._db = gxapi.GXDB.open(_gdb_name(name), 'SUPER', '')
sr = gxapi.str_ref()
gdb._db.get_name(gxapi.DB_NAME_FILE, sr)
gdb._file_name = os.path.normpath(sr.value)
return gdb
@classmethod
def new(cls, name=None, max_lines=500, max_channels=200, max_blobs=0, page_size=1024,
comp=None, overwrite=False):
"""
Create a new database.
:param name: database name, if None a temporary database is created
:param max_lines: maximum number of lines, default 500
:param max_channels: maximum number of channels, default 200
:param max_blobs: maximum number of blobs, default lines*channels+20
:param comp: compression:
| COMP_NONE
| COMP_SPEED (default)
| COMP_SIZE
:param overwrite: `True` to overwrite existing database. Default is `False`, GdbException if file exists.
:param page_size: page size (default is 1024), which limits the amount of compressed data that can be
stored in a single channel on a line. The maximum compressed data size for a channel
will be this number * 65534 (default 1024 * 65534 = 64 MB of compressed data).
This will be forced to a power of 2 between 64 and 4096, which would allow for a
maximum of 256 MB compressed data per channel per line.
:returns: `Geosoft_gdb` instance
.. versionadded:: 9.1
.. versionchanged:: 9.3
added parameter `overwrite=False`
.. versionchanged:: 9.4 `name=None` creates a temporary database
"""
max_lines = max(10, max_lines)
max_channels = max(25, max_channels)
min_blobs = max_channels + max_lines + 20
max_blobs = max(min_blobs, max_blobs)
if not comp:
comp = COMP_SPEED
# validate page_size:
ps = 64
while ps < page_size:
ps *= 2
if ps > 4096:
raise GdbException(_t('Page size cannot be larger than 4096 (256 MB per line-channel).'))
page_size = ps
if name is None:
name = gx.gx().temp_file('gdb')
name = _gdb_name(name)
if not overwrite and os.path.isfile(name):
raise GdbException(_t('Cannot overwrite existing database \'{}\''.format(name)))
gxu.delete_files_by_root(name)
gxapi.GXDB.create_comp(name,
max_lines, max_channels, max_blobs, 10, 100,
'SUPER', '',
page_size, comp)
return cls.open(name)
def commit(self):
"""
Commit database changes.
.. versionadded:: 9.1
"""
self._db.commit()
def discard(self):
"""
Discard database changes.
.. versionadded:: 9.1
"""
self._db.discard()
# ============================================================================
# internal helper functions
def exist_symb_(self, symb, symb_type):
"""
Check if a symbol exists of the required type.
:param symb: symbol name, number or instance
:param symb_type: one of DB_SYMB_TYPE
:returns: `True` if the symbol exists and is the expected symbol type, `False` otherwise
.. versionadded:: 9.1
"""
if isinstance(symb, str):
return self._db.exist_symb(symb, symb_type)
elif isinstance(symb, int):
return self._db.valid_symb(symb, symb_type)
elif isinstance(symb, Line) and (symb_type == gxapi.DB_SYMB_LINE):
return True
elif isinstance(symb, Channel) and (symb_type == gxapi.DB_SYMB_CHAN):
return True
return False
# ============================================================================
# Information
@property
def gxdb(self):
"""The `geosoft.gxapi.GXDB` instance handle"""
return self._db
@property
def xyz_channels(self):
"""
The currently identified (x, y, z) channels. Methods that work on spatial locations will use these
channels for locating the data at each fiducial of the data. Can be set using a tuple of two or
three strings. For example:
.. code::
gdb.xyz_channels = ('Easting', 'Northing')
gdb.xyz_channels = ('Easting', 'Northing', 'Elevation')
.. versionadded:: 9.2
"""
sr = gxapi.str_ref()
self.gxdb.get_xyz_chan(0, sr)
x = sr.value
self.gxdb.get_xyz_chan(1, sr)
y = sr.value
self.gxdb.get_xyz_chan(2, sr)
z = sr.value
if not self.is_channel(x):
x = None
if not self.is_channel(y):
y = None
if not self.is_channel(z):
z = None
return x, y, z
@xyz_channels.setter
def xyz_channels(self, xyz):
if len(xyz) >= 3:
x, y, z = xyz
self.is_channel(z, True)
else:
x, y = xyz
z = None
self.is_channel(x, True)
self.is_channel(y, True)
self.gxdb.set_xyz_chan(0, x)
self.gxdb.set_xyz_chan(1, y)
if z:
self.gxdb.set_xyz_chan(2, z)
self.clear_extent()
def _init_xmlmetadata(self):
if not self._xmlmetadata:
self._xmlmetadata = gxu.geosoft_metadata(self._file_name)
self._xmlmetadata_root = tuple(self._xmlmetadata.items())[0][0]
@property
def metadata(self):
"""
Return the database XML metadata as a dictionary. Can be set, in which case
the dictionary items passed will be added to, or replace existing XML metadata.
.. versionadded:: 9.2
"""
self._init_xmlmetadata()
return self._xmlmetadata[self._xmlmetadata_root]
@metadata.setter
def metadata(self, meta):
self._init_xmlmetadata()
self._xmlmetadata[self._xmlmetadata_root] = gxu.merge_dict(self._xmlmetadata[self._xmlmetadata_root], meta)
self._xmlmetadata_changed = True
def get_gx_metadata(self):
"""
Return the database Geosoft metadata as a Geosoft `geosoft.gxpy.metadata.Metadata` instance.
The internal database metadata is used to store various database properties that are not intended
to be part of the exposed dataset metadata exposed by the :attr:metadata property.
If you wish to add your own metadata to the internal properties you can use the
`geosoft.gxpy.metadata` module to add metadata and save it to the database using
`geosoft.gxapi.GXDB.set_meta`.
.. versionadded:: 9.3
"""
gxm = gxapi.GXMETA.create()
self.gxdb.get_meta(gxm)
return gxmeta.Metadata(gxm)
def update_gxmeta(self, new_meta):
"""
Update the database Geosoft metadata as a Geosoft `geosoft.gxpy.metadata.Metadata` instance.
:param meta: the new metadata as a `geosoft.gxpy.Metadata` instance or a nested dict.
.. versionadded:: 9.3.1
"""
current_meta = self.get_gx_metadata()
if isinstance(new_meta, gxmeta.Metadata):
new_meta = new_meta.meta_dict()
current_meta.update_dict(new_meta)
self.gxdb.set_meta(current_meta.gxmeta)
@property
def file_name(self):
"""Database file name."""
return os.path.abspath(self._file_name)
@property
def coordinate_system(self):
"""
Coordinate system of the current `xyz_channels`.
Can be set from any `geosoft.gxpy.coordinate_system.Coordinate_system` constructor.
.. versionchanged:: 9.3
added setter
"""
try:
x, y, z = self.xyz_channels
ipj = gxapi.GXIPJ.create()
self.gxdb.get_ipj(self.channel_name_symb(x)[1], ipj)
return gxcs.Coordinate_system(ipj)
except GdbException:
return gxcs.Coordinate_system()
@coordinate_system.setter
def coordinate_system(self, cs):
if not isinstance(cs, gxcs.Coordinate_system):
cs = gxcs.Coordinate_system(cs)
x, y, z = self.xyz_channels
self.gxdb.set_ipj(self.channel_name_symb(x)[1], self.channel_name_symb(y)[1], cs.gxipj)
x, _, z = self.xyz_channels
if z:
z = Channel(self, z)
if not z.unit_of_measure:
z.unit_of_measure = Channel(self, x).unit_of_measure
@property
def max_blobs(self):
"""maximum blobs allowed"""
return self._db.get_info(gxapi.DB_INFO_BLOBS_MAX)
@property
def max_lines(self):
"""maximum number of lines allowed"""
return self._db.get_info(gxapi.DB_INFO_LINES_MAX)
@property
def max_channels(self):
"""maximum number of channels allowed"""
return self._db.get_info(gxapi.DB_INFO_CHANS_MAX)
@property
def used_blobs(self):
"""number of blobs used"""
return self._db.get_info(gxapi.DB_INFO_BLOBS_USED)
@property
def used_lines(self):
"""number of lines used"""
return self._db.get_info(gxapi.DB_INFO_LINES_USED)
@property
def used_channels(self):
"""number of channels used"""
return self._db.get_info(gxapi.DB_INFO_CHANS_USED)
@property
def max_compressed_channel_bytes(self):
"""maximum compressed data per channel per line in bytes"""
ps = self._db.get_info(gxapi.DB_INFO_PAGE_SIZE)
return ps * 65534
@property
def number_of_blocks(self):
"""number of blocks"""
return self._db.get_info(gxapi.DB_INFO_DATA_SIZE)
@property
def lost_blocks(self):
"""lost blocks that might be freed"""
return self._db.get_info(gxapi.DB_INFO_LOST_SIZE)
@property
def free_blocks(self):
"""number of free blocks"""
return self._db.get_info(gxapi.DB_INFO_FREE_SIZE)
@property
def compression(self):
"""database compression setting"""
return self._db.get_info(gxapi.DB_INFO_COMP_LEVEL)
@property
def pages_for_blobs(self):
"""pages consumed by blobs"""
return self._db.get_info(gxapi.DB_INFO_BLOB_SIZE)
@property
def db_size_kb(self):
"""database size in kb"""
return self._db.get_info(gxapi.DB_INFO_FILE_SIZE)
@property
def index_size_kb(self):
"""index size in kb"""
return self._db.get_info(gxapi.DB_INFO_INDEX_SIZE)
@property
def max_block_size_bytes(self):
"""maximum block size in bytes"""
return self._db.get_info(gxapi.DB_INFO_MAX_BLOCK_SIZE)
@property
def data_has_changed(self):
"""`True` if data has changed"""
return self._db.get_info(gxapi.DB_INFO_CHANGESLOST)
def is_line(self, line, raise_err=False):
"""
Returns `True` if the named line exists in the database.
:param line: line name
:param raise_err: True to raise an error if it does not exist
.. versionadded:: 9.1
"""
exist = self._db.find_symb(str(line), gxapi.DB_SYMB_LINE) != gxapi.NULLSYMB
if raise_err and not exist:
raise GdbException(_t('"{}" is not a line in the database'.format(line)))
return exist
def is_channel(self, chan, raise_err=False):
"""
Returns `True` if the channel name exists in the database.
:param chan: channel name
:param raise_err: True to raise an error if it does not exist
.. versionadded:: 9.1
"""
exist = self._db.find_chan(chan) != gxapi.NULLSYMB
if raise_err and not exist:
raise GdbException(_t('"{}" is not a channel in the database'.format(chan)))
return exist
@property
def extent(self):
"""
Return the spatial extent of all selected data in the database as a `geosoft.gxpy.geometry.Point2`.
:returns: `geosoft.gxpy.geometry.Point2` of minimum, maximum, or None if no spatial information.
.. versionadded:: 9.2
"""
def expand(_min, _max, _data):
if np.isnan(_data).all():
return _min, _max
mdata = np.nanmin(_data)
if _min is None:
_min = mdata
_max = np.nanmax(_data)
return _min, _max
if mdata < _min:
_min = mdata
return _min, _max
mdata = np.nanmax(_data)
if mdata > _max:
_max = mdata
return _min, _max
lines = self.lines()
if len(lines):
xyz = self.xyz_channels
if str(xyz) == self._extent['xyz']:
return self._extent['extent']
xmin = xmax = ymin = ymax = zmin = zmax = None
if None in xyz:
if None in xyz[0:2]:
return None
xyz = xyz[0:2]
for l in lines:
data = self.read_line(l, channels=xyz)[0]
xmin, xmax = expand(xmin, xmax, data[:, 0])
ymin, ymax = expand(ymin, ymax, data[:, 1])
if data.shape[1] > 2:
zmin, zmax = expand(zmin, zmax, data[:, 2])
ext = gxgeo.Point2((xmin, ymin, zmin, xmax, ymax, zmax), coordinate_system=self.coordinate_system)
self._extent['xyz'] = str(xyz)
self._extent['extent'] = ext
return ext
return None
def _get(self, s, fn):
self.lock_read_(s)
try:
v = fn(s)
finally:
self.unlock_(s)
return v
def lock_set_(self, s, fn, v):
self.lock_write_(s)
try:
fn(s, v)
finally:
self.unlock_(s)
def line_name_symb(self, line, create=False):
"""
Return line name, symbol
:param line: line name, or symbol number
:param create: `True` to create a line if one does not exist
:returns: line name, symbol
:raises: GdbException if line not found or cannot be created
.. versionadded:: 9.1
"""
if isinstance(line, Line):
return line.name, line.symbol
elif isinstance(line, str):
if self.exist_symb_(line, gxapi.DB_SYMB_LINE):
symb = self._db.find_symb(line, gxapi.DB_SYMB_LINE)
return line, symb
if create:
return line, self.new_line(line)
else:
raise GdbException(_t('Line \'{}\' not found'.format(line)))
else:
sr = gxapi.str_ref()
self._db.get_symb_name(line, sr)
return sr.value, line
def channel_name_symb(self, chan):
"""
Return channel name, symbol
:param chan: channel name, or symbol number or Channel instance
:returns: line name, symbol, returns ('',-1) if invalid
:raises: GdbException if channel does not exist
.. versionadded:: 9.1
"""
if isinstance(chan, Channel):
return chan.name, chan.symbol
if isinstance(chan, str):
symb = self._db.find_symb(chan, gxapi.DB_SYMB_CHAN)
if symb == -1:
raise GdbException(_t('Channel \'{}\' not found'.format(chan)))
return chan, symb
if not self.exist_symb_(chan, gxapi.DB_SYMB_CHAN):
raise GdbException(_t('Channel symbol \'{}\' not found'.format(chan)))
sr = gxapi.str_ref()
self._db.get_symb_name(chan, sr)
return sr.value, chan
def channel_width(self, channel):
"""
Channel array width, 1 for normal channels, >1 for VA channels.
:param channel: channel symbol or name
:returns: array dimension, 1 for non-array channels
.. versionadded:: 9.1
"""
return self._get(self.channel_name_symb(channel)[1], self._db.get_col_va)
def list_channels(self, chan=None):
"""
Return a dict of channels in the database.
:param chan: channel filter, default CHAN_ALL:
=============== ============================
CHAN_ALL all channels, normal and VA
CHAN_NORMAL normal channels only
CHAN_ARRAY VA channels only
=============== ============================
:returns: dictionary {channel_names: channel_symbols}
.. versionadded:: 9.1
"""
def clean_chan_dict():
""" returns list without any temporary VA sliced channels """
self._db.chan_lst(self._lst)
_dct = gxu.dict_from_lst(self._lst)
cdct = {}
for ck in _dct:
if '[' in ck:
continue
cdct[ck] = _dct.get(ck)
return cdct
if chan == CHAN_ALL:
dct = clean_chan_dict()
else:
self._db.array_lst(self._lst)
va = gxu.dict_from_lst(self._lst)
if chan == CHAN_ARRAY:
dct = va
else:
# filter VA channels out of the list
allc = clean_chan_dict()
va = list(va)
dct = {}
for k in allc:
if not(k in va):
dct[k] = allc.get(k)
# convert symbol strings to ints
for k in dct:
dct[k] = int(dct.get(k))
return dct
def lines(self, select=True):
"""
.. deprecated:: 9.2 use list_lines()
"""
return self.list_lines(select)
def list_lines(self, select=True):
"""
List of lines in the database, returned as a {name: symbol} dictionary
:param select: `True` to return selected lines, `False` to return all lines
:returns: dictionary (line name: symbol)
.. versionadded:: 9.1
"""
if select:
self._db.selected_line_lst(self._lst)
else:
self._db.line_lst(self._lst)
dct = gxu.dict_from_lst(self._lst)
for k in dct:
dct[k] = int(dct.get(k))
return dct
def line_details(self, line):
"""
Return dictionary of line details
:param line: channel name or symbol
:returns: dictionary:
=========== ==============================================================
Key Meaning
=========== ==============================================================
name line name
symbol line symbol
type line type, one of gxapi.DB_LINE_TYPE
category one of SYMB_LINE
date date of the line
number numeric line number
flight flight number
version line version number
groupclass class name for grouped lines, None if not a grouped line
=========== ==============================================================
.. versionadded:: 9.1
"""
def get_detail(fn):
try:
sr = gxapi.str_ref()
fn(ls, sr)
return sr.value
except geosoft.gxapi.GXAPIError:
return ''
ln, ls = self.line_name_symb(line)
detail = {}
self.lock_read_(ls)
try:
detail['name'] = ln
detail['symbol'] = ls
detail['category'] = self._db.line_category(ls)
detail['date'] = self._db.line_date(ls)
detail['flight'] = self._db.line_flight(ls)
detail['number'] = self._db.line_number(ls)
detail['version'] = self._db.line_version(ls)
detail['type'] = self._db.line_type(ls)
if self._db.line_category(ls) == gxapi.DB_CATEGORY_LINE_GROUP:
detail['groupclass'] = get_detail(self._db.get_group_class)
else:
detail['groupclass'] = None
finally:
self.unlock_(ls)
return detail
def channel_details(self, channel):
"""
Return dictionary of channel details
:param channel: channel name or symbol
:returns: dictionary:
======= ==============================================================
Key Meaning
======= ==============================================================
name channel name
symbol channel symbol
class class name
format format, one of gxapi.DB_CHAN_FORMAT constants
width display width in characters
decimal decimal places to display
unit measurement unit
label channel label, which can be different from the channel name
protect protection: 0 can be modified; 1 protected from modification
columns number data columns, 1 for normal channels, n for VA channels
type data type, one of gxapi.DB_CATEGORY_CHAN constants
======= ==============================================================
.. versionadded:: 9.1
"""
def get_detail(fn):
sr = gxapi.str_ref()
fn(cs, sr)
return sr.value
cn, cs = self.channel_name_symb(channel)
detail = {}
self.lock_read_(cs)
try:
detail['name'] = cn
detail['symbol'] = cs
detail['class'] = get_detail(self._db.get_chan_class)
detail['format'] = self._db.get_chan_format(cs)
detail['width'] = self._db.get_chan_width(cs)
detail['decimal'] = self._db.get_chan_decimal(cs)
detail['unit'] = get_detail(self._db.get_chan_unit)
detail['label'] = get_detail(self._db.get_chan_label)
detail['protect'] = self._db.get_chan_protect(cs)
detail['array'] = self.channel_width(cs)
detail['type'] = self._db.get_chan_type(cs)
finally:
self.unlock_(cs)
return detail
def set_channel_details(self, channel, detail):
"""
Set/change channel details from dictionary
:param channel: channel name or symbol
:param detail: dictionary, see chan_details
.. versionadded:: 9.1
"""
def set_detail(what, fn):
det = detail.get(what)
if det is not None:
fn(cs, det)
cs = self.channel_name_symb(channel)[1]
self.lock_write_(cs)
try:
set_detail('class', self._db.set_chan_class)
set_detail('format', self._db.set_chan_format)
set_detail('width', self._db.set_chan_width)
set_detail('decimal', self._db.set_chan_decimal)
set_detail('unit', self._db.set_chan_unit)
set_detail('label', self._db.set_chan_label)
protect = detail.get('protect')
if protect is not None:
self._db.set_chan_protect(cs, protect)
finally:
self.unlock_(cs)
def channel_dtype(self, channel):
"""
Returns channel numpy dtype
:param channel: channel name or symbol
:returns: numpy dtype
.. versionadded:: 9.1
"""
return gxu.dtype_gx(self._db.get_chan_type(self.channel_name_symb(channel)[1]))
def channel_fid(self, line, channel):
"""
Return the fiducial of a line, channel
:param line: line name, symbol or Line
:param channel: channel name, symbol or channel
:returns: (start,increment)
"""
ls = self.line_name_symb(line)[1]
cs = self.channel_name_symb(channel)[1]
self.lock_read_(cs)
try:
fid_start = self._db.get_fid_start(ls, cs)
fid_incr = self._db.get_fid_incr(ls, cs)
finally:
self.unlock_(cs)
return fid_start, fid_incr
# ========================================================================================
# management
def new_channel(self, name, dtype=np.float64, array=1, dup=None, details=None):
"""
Return a channel symbol, create if it does not exist.
:param name: channel name
:param dtype: numpy dtype (ie. np.int64)
:param array: array columns (default is 1)
:param dup: duplicate properties of this channel (name, symbol, channel)
:param details: dictionary containing channel details, see channel_details()
:returns: channel symbol
Examples:
.. code::
symb = gdb.newChan('X')
symb = gdb.newChan('X', dtype=np.float64, details={'decimal':4})
.. versionadded:: 9.1
.. versionchanged:: 9.3
added support for duplication an existing channel via dup=
"""
symb = self._db.find_symb(name, gxapi.DB_SYMB_CHAN)
if array < 1:
array = 1
if symb == gxapi.NULLSYMB:
if dup:
symb = self._db.dup_symb_no_lock(self.channel_name_symb(dup)[1], name)
else:
symb = self._db.create_symb_ex(name,
gxapi.DB_SYMB_CHAN,
gxapi.DB_OWN_SHARED,
gxu.gx_dtype(dtype),
array)
if details:
self.set_channel_details(symb, details)
elif not dup:
self.set_channel_details(symb, {'width': 12, 'decimal': 2})
return symb
def new_line(self, line, linetype=None, group=None, dup=None):
"""
Create a new line symbol. If line exists an error is raised.
:param line: line name
:param linetype: line type for creating a new line, ignored if group defines
================= =========================================
SYMB_LINE_NORMAL normal lines, name is a string
SYMB_LINE_FLIGHT flight lines, first letter is line type
================= =========================================
:param group: group name for a grouped class
:param dup: duplicate from an existing line (name, symbol of Line)
:returns: line symbol
.. seealso:: function `create_line_name` to create a valid line name.
.. versionadded:: 9.1
"""
if group is None and dup is None and not is_valid_line_name(line):
raise GdbException(_t('Invalid line name \'{}\'. Use create_line_name() to create a valid name.'.
format(line)))
symb = self._db.find_symb(line, gxapi.DB_SYMB_LINE)
if symb != gxapi.NULLSYMB:
raise GdbException(('Cannot create existing line \'{}\''.format(line)))
if dup:
dup_symb = self.line_name_symb(dup)[1]
symb = self._db.dup_line_symb(dup_symb, line)
else:
if group:
linetype = SYMB_LINE_GROUP
elif not linetype:
linetype = SYMB_LINE_NORMAL
symb = self._db.create_symb_ex(line,
gxapi.DB_SYMB_LINE,
gxapi.DB_OWN_SHARED,
linetype,
0)
if group:
Line(self, symb).group = group
self.clear_extent()
return symb
def clear_extent(self):
"""
Clear the extent cache.
.. versionadded:: 9.3.1
"""
self._extent['xyz'] = None
def delete_channel(self, channels):
"""
Delete channel(s) by name or symbol.
:param channels: channel name or symbol, or a list of channel names or symbols
.. versionadded:: 9.1
"""
if isinstance(channels, str) or isinstance(channels, int):
channels = [channels]
protected_channels = []
for s in channels:
try:
c = Channel(self, s)
if c.protect:
protected_channels.append(c.name)
else:
c.delete()
except GdbException:
continue
if len(protected_channels):
raise GdbException(_t('Cannot delete protected channels: {}'.format(protected_channels)))
def delete_line(self, lines):
"""
Delete line(s) by name or symbol.
:param lines: line name/symbol, or a list of names/symbols
.. versionadded:: 9.1
"""
if isinstance(lines, str) or isinstance(lines, int):
lines = [lines]
for s in lines:
if type(s) is str and not self.exist_symb_(s, gxapi.DB_SYMB_LINE):
continue
ls = self.line_name_symb(s)[1] if type(s) is str else s
self.unlock_(ls)
self.lock_write_(ls)
self._db.delete_symb(ls)
def delete_line_data(self, lines):
"""
Delete all data in line(s) by name or symbol but keep the line.
:param lines: line name/symbol, or a list of names/symbols
.. versionadded:: 9.6
"""
if isinstance(lines, str) or isinstance(lines, int):
lines = [lines]
for s in lines:
ls = self.line_name_symb(s)[1] if type(s) is str else s
self._delete_line_data(ls)
def _delete_line_data(self, ls):
channels = self.sorted_chan_list()
for ch in channels:
cn, cs = self.channel_name_symb(ch)
dtype = self.channel_dtype(cs)
w = self.channel_width(cs)
if w == 1:
vv = gxvv.GXvv(dtype=dtype)
self.write_channel_vv(ls, cs, vv)
else:
va = gxva.GXva(width=w, dtype=dtype)
self.write_channel_va(ls, cs, va)
def select_lines(self, selection='', select=True):
"""
Change selected state of a line, or group of lines
:param selection: string representing selection, comma-delimit multiple selections, or provide a list
of selections.
:param select: `True` to select, `False` to deselect
"L99:800" will select all lines of type "L" in range 99 through 800.
| Use a "T" prefix for Tie lines.
| Use an "F" prefix to specify lines of a specific flight.
| For example, "F10" would select all lines of flight 10.
| Use an empty string ("") to select/deselect ALL lines.
Invalid line names are ignored.
.. versionadded:: 9.1
"""
if isinstance(selection, str):
selection = selection.split(',')
for s in selection:
if select:
self._db.select(s, gxapi.DB_LINE_SELECT_INCLUDE)
else:
self._db.select(s, gxapi.DB_LINE_SELECT_EXCLUDE)
self.clear_extent()
# =====================================================================================
# reading and writing
def _to_string_chan_list(self, channels):
if isinstance(channels, str):
if ',' in channels:
channels = [c.strip() for c in channels.split(',')]
else:
channels = [channels]
elif isinstance(channels, int):
channels = [channels]
return [self.channel_name_symb(c)[0] if isinstance(channels, int) else c for c in channels]
def sorted_chan_list(self, channels=None):
"""
Get a list of sorted channels from Gdb, placing x, y and z channels (if defined) at front of list.
:param channels: list of channels, strings or symbol number. If None, read all channels
:returns: list containing channel names
.. versionadded:: 9.6
"""
if channels is not None:
ch = self._to_string_chan_list(channels)
else:
ch = list(self.list_channels())
ch.sort(key=str.lower)
ch_lower = [c.lower() for c in ch]
channels = []
nxlower = nylower = nzlower = ''
# put x,y,z at the front
xch = self._db.get_xyz_chan_symb(gxapi.DB_CHAN_X)
if xch != -1:
nx, _ = self.channel_name_symb(xch)
nxlower = nx.lower()
if nxlower in ch_lower:
channels.append(nx)
ych = self._db.get_xyz_chan_symb(gxapi.DB_CHAN_Y)
if ych != -1:
ny, _ = self.channel_name_symb(ych)
nylower = ny.lower()
if nylower in ch_lower:
channels.append(ny)
zch = self._db.get_xyz_chan_symb(gxapi.DB_CHAN_Z)
if zch != -1:
nz, _ = self.channel_name_symb(zch)
nzlower = nz.lower()
if nzlower in ch_lower:
channels.append(nz)
for c in ch:
clower = c.lower()
if (clower == nxlower) or (clower == nylower) or (clower == nzlower):
continue
channels.append(c)
return channels
def _expand_chan_list(self, channels):
""" expand VA channels and return lists of names, symbols and types"""
ch_names = []
ch_symbs = []
c_type = []
for c in channels:
cn, cs = self.channel_name_symb(c)
w = self.channel_width(cs)
if w == 1:
ch_names.append(cn)
ch_symbs.append(cs)
c_type.append(self._db.get_chan_type(cs))
else:
for i in range(w):
ccn, ccs = self.channel_name_symb("{}[{}]".format(cn, i))
ch_names.append(ccn)
ch_symbs.append(ccs)
c_type.append(self._db.get_chan_type(cs))
return ch_names, ch_symbs, c_type
def lock_read_(self, s):
"""internal function to lock a symbol for read"""
try:
self._db.lock_symb(s, SYMBOL_LOCK_READ, gxapi.DB_WAIT_INFINITY)
except GdbException:
raise GdbException(_t('Cannot read lock symbol {}'.format(s)))
def lock_write_(self, s):
"""internal function to lock a symbol for write"""
try:
self._db.lock_symb(s, SYMBOL_LOCK_WRITE, gxapi.DB_WAIT_INFINITY)
except GdbException:
raise GdbException(_t('Cannot write lock symbol {}'.format(s)))
def unlock_(self, s):
"""internal_function to unlock a symbol"""
if self._db.get_symb_lock(s) != SYMBOL_LOCK_NONE:
self._db.un_lock_symb(s)
def unlock_all(self):
"""
Unlock all locked symbols.
.. versionadded:: 9.3
"""
self._db.un_lock_all_symb()
def read_channel_vv(self, line, channel, dtype=None):
"""
Read data from a single channel, return in a vv.
:param line: line name or symbol
:param channel: channel name or symbol
:param dtype: type wanted, default same as the channel data
:returns: vv
.. versionadded:: 9.2
"""
ln, ls = self.line_name_symb(line, create=True)
cn, cs = self.channel_name_symb(channel)
if self.channel_width(cs) != 1:
raise GdbException(_t("Cannot read a VA channel into a VV."))
if dtype is None:
dtype = self.channel_dtype(cs)
vv = gxvv.GXvv(dtype=dtype)
self.lock_read_(cs)
try:
self._db.get_chan_vv(ls, cs, vv.gxvv)
finally:
self.unlock_(cs)
vv.unit_of_measure = Channel(self, cs).unit_of_measure
return vv
def read_channel_va(self, line, channel, dtype=None):
"""
Read VA data from a single channel, return in a va.
:param line: line name or symbol
:param channel: channel name or symbol
:param dtype: type wanted, default same as the channel data
:returns: va
.. versionadded:: 9.2
"""
ln, ls = self.line_name_symb(line, create=True)
cn, cs = self.channel_name_symb(channel)
if dtype is None:
dtype = self.channel_dtype(cs)
w = self.channel_width(cs)
va = gxva.GXva(width=w, dtype=dtype)
self.lock_read_(cs)
try:
self._db.get_chan_va(ls, cs, va.gxva)
finally:
self.unlock_(cs)
va.unit_of_measure = Channel(self, cs).unit_of_measure
return va
def read_channel(self, line, channel, dtype=None):
"""
Read data from a single channel.
:param line: line name or symbol
:param channel: channel name or symbol
:param dtype: type wanted, default same as the channel data
:returns: numpy data, fid (start, increment)
For dtype=np.float, dummy values will be np.nan. For integer types dummy values will be the
Geosoft dummy values.
.. versionadded:: 9.1
"""
if self.channel_width(channel) == 1:
vv = self.read_channel_vv(line, channel, dtype)
return vv.get_data(vv.dtype)[0], vv.fid
else:
va = self.read_channel_va(line, channel, dtype)
return va.get_data(va.dtype)[0], va.fid
def read_line_vv(self, line, channels=None, dtype=None, fid=None, common_fid=False, chan_dtypes=False):
"""
Read a line of data into VVs stored in a dictionary by channel.
:param line: line to read, string or symbol number
:param channels: list of channels, strings or symbol number. If None, read all channels
:param dtype: numpy data type for the array, default np.float64 for multi-channel data (unless
chan_dtypes is `True`), data type for single channel data. Use "<Unnn" for string type.
:param common_fid: `True` to resample all channels to a common fiducial
:param chan_dtypes: `True` to determine dtype for each vv from channel type, default `False`
:returns: list of tuples [(channel_name, vv), ...]
If a requested channel is a VA, it is with channel names 'name[0]', 'name[1]', etc.
Examples:
.. code::
# npd - returned numpy array shape (n, number of channels)
# ch - list of returned channels names, array channels expanded to array[0], array[1], ...
# fid - tuple (fidStart,fidIncrement), channels resampled as necessary
data = gdb.read_line_vv('L100') # read all channels in line "L100"
data = gdb.read_line_vv(681) # read all channels in line symbol 681
data = gdb.read_line_vv('L100','X') # read channel 'X' from line 'L100'
data = gdb.read_line_vv('L100',2135) # read channel symbol 2135 from 'L100"
data = gdb.read_line_vv('L100',channels=['X','Y','Z']) # read a list of channels to (n,3) array
data = gdb.read_line_vv('L100','X',np.int32) # read channel 'X' into integer array
.. versionadded:: 9.2
"""
ln, ls = self.line_name_symb(line)
if channels is None:
channels = self.sorted_chan_list()
else:
channels = self._to_string_chan_list(channels)
# make up channel list, expanding VA channels
ch_names, ch_symb, c_type = self._expand_chan_list(channels)
if chan_dtypes:
dtype = None
elif dtype is None:
dtype = np.float64
# read the data into vv
chvv = []
for c in ch_names:
cs = self._db.find_symb(c, gxapi.DB_SYMB_CHAN)
vv = self.read_channel_vv(ls, cs, dtype=dtype)
chvv.append((c, vv))
# resample?
if common_fid:
# determine fiducial range from data
start = gxapi.GS_R8MX
incr = gxapi.GS_R8MX
fend = gxapi.GS_R8MN
for vv in chvv:
if vv[1].length > 0:
fd = vv[1].fid
if fd[0] != gxapi.rDUMMY:
if fd[0] < start:
start = fd[0]
if fd[1] < incr:
incr = fd[1]
dend = start + incr * (vv[1].length - 1)
if dend > fend:
fend = dend
if fid is None:
if start == gxapi.GS_R8MX:
fid = (0.0, 1.0)
else:
fid = (start, incr)
if start == gxapi.GS_R8MX:
nvd = 0
else:
nvd = math.ceil(max((fend - fid[0] - sys.float_info.epsilon), 0) / fid[1]) + 1
for vv in chvv:
vv[1].refid(fid, nvd)
return chvv
def scan_line_fid(self, line, channels=None):
"""
Scan channels in a line and return the smallest common fid, line length, data width, list of channels
:param line: line to read, string or symbol number
:param channels: list of channels, strings or symbol number. If empty, read all channels
:returns: (fid_start, fid_increment, fid_last, data_width, channel_list)
.. versionadded:: 9.4
"""
if channels is None:
channels = self.sorted_chan_list()
else:
channels = self._to_string_chan_list(channels)
if len(channels) == 0:
return 0, 1., 0, 0, []
ln, ls = self.line_name_symb(line)
cs = self.channel_name_symb(channels[0])[1]
fid_start, fid_increment = self.channel_fid(ls, cs)
self.lock_read_(cs)
nrows = self.gxdb.get_channel_length(ls, cs)
self.unlock_(cs)
if nrows == 0:
fid_last = fid_start
else:
fid_last = fid_start + fid_increment * (nrows - 1)
n_width = self.channel_width(cs)
for c in channels[1:]:
cs = self.channel_name_symb(c)[1]
n_width += self.channel_width(cs)
c_start, c_increment = self.channel_fid(ls, cs)
if c_start != gxapi.rDUMMY:
self.lock_read_(cs)
c_last = c_start + c_increment * (self.gxdb.get_channel_length(ls, cs) - 1)
self.unlock_(cs)
if fid_start == gxapi.rDUMMY or c_start < fid_start:
fid_start = c_start
if fid_increment == gxapi.rDUMMY or c_increment < fid_increment:
fid_increment = c_increment
if c_last > fid_last:
fid_last = c_last
if fid_start == gxapi.rDUMMY or fid_increment == gxapi.rDUMMY:
return 0., 1., 0., 0, channels
return fid_start, fid_increment, fid_last, n_width, channels
def readLine(self, *args, **kwargs):
"""
.. deprecated:: 9.2 use read_line()
"""
return self.read_line(*args, **kwargs)
@classmethod
def _num_rows_from_fid(cls, src_fid_start, src_fid_last, fid):
return int((src_fid_last - fid[0])/fid[1] + 1.5)
def read_line(self, line, channels=None, dtype=None, fid=None, dummy=None):
"""
Read a line of data into a numpy array.
:param line: line to read, string or symbol number
:param channels: list of channels, strings or symbol number. If empty, read all channels
:param dtype: numpy data type for the array, default np.float64 for multi-channel data,
data type for single channel data. Use "<Unnn" for string type.
:param fid: required fiducial as tuple (start,incr), default smallest in data
:param dummy: dummy_handling for multi-channel read, default leaves dummies in place.:
======================== ===================================================
READ_REMOVE_DUMMYROWS remove rows with dummies, fiducials lose meaning
READ_REMOVE_DUMMYCOLUMNS remove columns with dummies
======================== ===================================================
:returns: 2D numpy array shape(records,channels), list of channel names, (fidStart,fidIncr)
:raises: GdbException if first channel requested is empty
VA channels are expanded by element with channel names name[0], name[1], etc.
This method is intended for relatively simple databases in relatively simple applications.
If your database has a lot of channels, or wide array channels it will be more efficient
to read and work with just the channels you need. See `read_channel`, `read_channel_vv`
and `read_channel_va`.
Examples:
.. code::
# npd - returned numpy array shape (n, number of channels)
# ch - list of returned channels names, array channels expanded to array[0], array[1], ...
# fid - tuple (fidStart,fidIncrement), channels resampled as necessary
npd,ch,fid = gdb.read_line('L100') # read all channels in line "L100"
npd,ch,fid = gdb.read_line(681) # read all channels in line symbol 681
npd,ch,fid = gdb.read_line('L100','X') # read channel 'X' from line 'L100'
npd,ch,fid = gdb.read_line('L100',2135) # read channel symbol 2135 from 'L100"
npd,ch,fid = gdb.read_line('L100',channels=['X','Y','Z']) # read a list of channels to (n,3) array
npd,ch,fid = gdb.read_line('L100','X',np.int32) # read channel 'X' into integer array
.. versionadded:: 9.1
"""
ls = self.line_name_symb(line)[1]
fid_start, fid_incr, fid_last, ncols, channels = self.scan_line_fid(line, channels)
if fid is None:
fid = (fid_start, fid_incr)
nrows = self._num_rows_from_fid(fid_start, fid_last, fid)
if nrows == 0 or ncols == 0:
if len(channels) == 0:
data = np.array([], dtype=dtype)
else:
data = np.array([], dtype=dtype).reshape((-1, len(channels)))
return data, channels, fid
# read to a numpy array
npd = np.empty((nrows, ncols), dtype=dtype)
if npd.dtype == np.float32 or npd.dtype == np.float64:
dummy_value = np.nan
else:
dummy_value = gxu.gx_dummy(npd.dtype)
all_empty = True
ch_names = []
icol = 0
for ch in channels:
cn, cs = self.channel_name_symb(ch)
w = self.channel_width(cs)
if w == 1:
vv = self.read_channel_vv(ls, cs, dtype=npd.dtype)
if vv.length > 0:
all_empty = False
vv.refid(fid, nrows)
npd[:, icol] = vv.np
icol += 1
ch_names.append(cn)
else:
va = self.read_channel_va(ls, cs, dtype=npd.dtype)
if va.length > 0:
all_empty = False
va.refid(fid, nrows)
npd[:, icol:icol+w] = va.np
icol += w
for i in range(w):
ch_names.append('{}[{}]'.format(cn, str(i)))
nch = len(ch_names)
if all_empty:
npd = np.empty((0, ncols), dtype=dtype)
elif dummy:
# dummy handling
if dummy == READ_REMOVE_DUMMYCOLUMNS:
n_ok = 0
# shift data and channel names to remove columns containing a dummy
for i in range(nch):
if np.isnan(dummy_value):
if np.isnan(npd[:, i]).any():
continue
elif dummy_value in npd[:, i]:
continue
if n_ok != i:
npd[:, n_ok] = npd[:, i]
ch_names[n_ok] = ch_names[i]
n_ok += 1
if n_ok != nch:
npd = npd[:, 0:n_ok]
ch_names = ch_names[0:n_ok]
elif dummy == READ_REMOVE_DUMMYROWS:
if np.isnan(dummy_value):
mask = np.apply_along_axis(lambda a: not (np.isnan(a).any()), 1, npd)
else:
mask = np.apply_along_axis(lambda a: not (dummy_value in a), 1, npd)
npd = npd[mask, :]
fid = (0.0, 1.0)
else:
raise GdbException(_t('Unrecognized dummy={}').format(dummy))
return npd, ch_names, fid
def read_line_dataframe(self, line, channels=None, fid=None):
"""
Read a line of data into a Pandas DataFrame
:param line: line to read, string or symbol number
:param channels: list of channels, strings or symbol number. If empty, read all channels
:param fid: required fiducial as tuple (start,incr), default smallest in data
:returns: Pandas DataFrame, list of channel names, (fidStart,fidIncr)
:raises: GdbException if first channel requested is empty
VA channels are expanded by element with channel names name[0], name[1], etc.
This method can be used to conveniently get a table structure of all data corresponding to the
native types of the channels. It is however not necessarily the most efficient way to get at the data.
If your database has a lot of channels, or wide array channels it will be more efficient
to read and work with just the channels you need. See `read_channel`, `read_channel_vv`
and `read_channel_va`. This method also does not currently support dummy removal in the same
way as `read_line`.
Examples:
.. code::
# df - Pandas DataFrame
# ch - list of returned channels names
# fid - tuple (fidStart,fidIncrement), channels resampled as necessary
df,ch,fid = gdb.read_line('L100') # read all channels in line "L100"
df,ch,fid = gdb.read_line(681) # read all channels in line symbol 681
df,ch,fid = gdb.read_line('L100','X') # read channel 'X' from line 'L100'
df,ch,fid = gdb.read_line('L100',2135) # read channel symbol 2135 from 'L100"
df,ch,fid = gdb.read_line('L100',channels=['X','Y','Z']) # read a list of channels to (n,3) array
.. versionadded:: 9.5
"""
df = pd.DataFrame()
ls = self.line_name_symb(line)[1]
fid_start, fid_incr, fid_last, ncols, channels = self.scan_line_fid(line, channels)
ch_names = []
if fid is None:
fid = (fid_start, fid_incr)
nrows = self._num_rows_from_fid(fid_start, fid_last, fid)
if nrows == 0 or ncols == 0:
for ch in channels:
cn, cs = self.channel_name_symb(ch)
w = self.channel_width(cs)
if w == 1:
df[cn] = ()
ch_names.append(cn)
else:
for i in range(w):
va_cn = '{}[{}]'.format(cn, str(i))
df[va_cn] = ()
ch_names.append(va_cn)
return df, ch_names, fid
icol = 0
all_empty = True
for ch in channels:
cn, cs = self.channel_name_symb(ch)
w = self.channel_width(cs)
if w == 1:
vv = self.read_channel_vv(ls, cs)
if vv.length > 0:
all_empty = False
vv.refid(fid, nrows)
df[cn] = vv.np
icol += 1
ch_names.append(cn)
else:
va = self.read_channel_va(ls, cs)
if va.length > 0:
all_empty = False
va.refid(fid, nrows)
icol += w
for i in range(w):
va_cn = '{}[{}]'.format(cn, str(i))
df[va_cn] = va.np[:, i]
ch_names.append(va_cn)
if all_empty:
# Delete one and only row
df = df.drop([0])
return df, ch_names, fid
def write_channel_vv(self, line, channel, vv):
"""
Write data to a single channel.
:param line: line name or symbol
:param channel: channel name or symbol
:param vv: vv data to write
.. versionadded:: 9.2
"""
ln, ls = self.line_name_symb(line, create=True)
try:
cn, cs = self.channel_name_symb(channel)
except GdbException:
if type(channel) is str:
cs = self.new_channel(channel, vv.dtype)
cn = channel
else:
raise
if cn in self.xyz_channels:
self.clear_extent()
self.lock_write_(cs)
try:
self._db.put_chan_vv(ls, cs, vv.gxvv)
finally:
self.unlock_(cs)
if vv.unit_of_measure:
Channel(self, cs).unit_of_measure = vv.unit_of_measure
def write_channel_va(self, line, channel, va):
"""
Write VA data to a single channel.
:param line: line name or symbol
:param channel: channel name or symbol
:param va: va data to write
.. versionadded:: 9.2
"""
ln, ls = self.line_name_symb(line, create=True)
try:
cn, cs = self.channel_name_symb(channel)
except GdbException:
if type(channel) is str:
cs = self.new_channel(channel, va.dtype, array=va.width)
else:
raise
self.lock_write_(cs)
try:
self._db.put_chan_va(ls, cs, va.gxva)
finally:
self.unlock_(cs)
if va.unit_of_measure:
Channel(self, cs).unit_of_measure = va.unit_of_measure
def writeDataChan(self, *args, **kwargs):
"""
.. deprecated:: 9.2 use `write_channel`
"""
self.write_channel(*args, **kwargs)
def write_channel(self, line, channel, data, fid=(0.0, 1.0), unit_of_measure=None):
"""
Write data to a single channel.
:param line: line name or symbol
:param channel: channel name or symbol
:param data: numpy array (2D for VA channel), or a list
:param fid: tuple (fid start, increment), default (0.0,1.0)
:param unit_of_measure: data unit of measurement
.. versionchanged:: 9.3 support for setting channel from a list
added unit_of_measure
.. versionadded:: 9.1
"""
ln, ls = self.line_name_symb(line, create=True)
if not isinstance(data, np.ndarray):
data = np.array(data)
if isinstance(channel, str):
cn = channel
cs = self.new_channel(channel, data.dtype, array=_va_width(data))
else:
cn, cs = self.channel_name_symb(channel)
if cn in self.xyz_channels:
self.clear_extent()
if _va_width(data) == 0:
# no data to write
return
w = self.channel_width(cs)
if w != _va_width(data):
raise GdbException(
_t("Array data width {} does not fit into channel '{}' with width {}").
format(_va_width(data), cn, w))
# 1D channel
if w == 1:
# get a VV of the data
vv = gxvv.GXvv(data, fid=fid)
self.lock_write_(cs)
try:
self._db.put_chan_vv(ls, cs, vv.gxvv)
finally:
self.unlock_(cs)
else:
# get a VA of the data
va = gxva.GXva(data, fid=fid)
self.lock_write_(cs)
try:
self._db.put_chan_va(ls, cs, va.gxva)
finally:
self.unlock_(cs)
if unit_of_measure:
Channel(self, cs).unit_of_measure = unit_of_measure
def write_line_vv(self, line, chan_data):
"""
Write data to multiple channels in a line. If no channel list is provided it assumes that the
data is for all channels from the line, the compliment of read_line().
:param line: line to write to, name or symbol
:param chan_data: numpy array shape (records,channels). If single dimension, one channel.
Channels are created if they do not exist. VA channels must exist.
:param chan_data: list of tuples [(channel_name, vv), ]
.. note::
chan_data may contain VA data, which is defined by slice (ie. name[0], name[4]...).
If VA data is included the VA channels must already exist.
.. versionadded:: 9.2
"""
for chvv in chan_data:
ch = chvv[0]
vv = chvv[1]
self.write_channel_vv(line, ch, vv)
def write_line(self, line, data, channels=None, fid=(0.0, 1.0)):
"""
Write data to a multiple channels in a line. If no channel list is provided it assumes that the
data is for all channels from the line, the compliment of read_line().
:param line: line to write to, name or symbol
:param data: numpy array shape (records,channels). If single dimension, one channel
:param channels: channel name or symbol list, or a single name/symbol. If a single name is specified
for multi-column data, a VA channel is assumed. If None, a sorted list of all channels
is assumed.
:param fid: option fid tuple (start, increment), default (0.0,1.0)
.. versionadded:: 9.1
"""
if type(channels) is str:
self.write_channel(line, channels, data, fid=fid)
else:
if channels is None:
channels = self.sorted_chan_list()
else:
channels = self._to_string_chan_list(channels)
if not isinstance(data, np.ndarray):
data = np.array(data)
if data.ndim == 1:
data = data.reshape((-1, 1))
# ensure data matches channels
np_data = 0
for chan in channels:
try:
ch, cs = self.channel_name_symb(chan)
w = self.channel_width(cs)
except GdbException:
w = 1
np_data += w
# channel - data mismatch
if data.shape[1] != np_data:
raise GdbException(_t('Data dimension ({}) does not match data required by channels ({}).').
format(data.shape, channels))
# all good, write the data
np_index = 0
for chan in channels:
try:
ch, cs = self.channel_name_symb(chan)
w = self.channel_width(cs)
except GdbException:
w = 1
cs = chan
self.write_channel(line, cs, data[:, np_index: np_index + w], fid=fid)
np_index += w
def list_values(self, chan, umax=1000, selected=True, dupl=50, progress=None, stop=None):
"""
Build a list of unique values in a channel. Uniqueness depends on the current display format for
the field.
:param chan: channel to scan
:param umax: maximum values allowed, once this maximum is reached scanning stops, default 1000
:param selected: `True` to scan only selected lines
:param dupl: Stop growing list after this many lines fail to grow the list, 0 scans all lines
:param progress: progress reporting function
:param stop: stop check function
:returns: list of values, represented as a string
.. versionadded:: 9.1
"""
lines = list(self.list_lines(select=selected))
cn, cs = self.channel_name_symb(chan)
details = self.channel_details(cs)
dtype = np.dtype('<U{}'.format(details.get('width')))
lines.sort(key=str.lower)
vset = []
n = 0
nset = -1
ndup = 0
for l in lines:
try:
d, c, f = self.read_line(l, cs, dtype=dtype)
except GdbException:
continue
if d.shape[0] == 0:
continue
d = np.unique(d)
vset = np.append(vset, d)
vset = np.unique(vset)
if vset.shape[0] > umax:
break
if dupl > 0:
if vset.shape[0] == nset:
ndup += 1
if ndup > dupl:
break
else:
ndup = 0
nset = vset.shape[0]
n += 1
if progress:
progress('Scanning unique values in "{}", {}'.format(cn, str(l)), (n * 100.0) / len(lines))
if stop:
if stop():
return vset.tolist()
if vset.shape[0] > umax:
vset = vset[:umax]
return vset.tolist()
def figure_map(self, file_name=None, overwrite=False, title=None, draw=DRAW_AS_POINTS,
features=None, **kwargs):
"""
Create a figure map file from selected lines in the database.
:param file_name: the name of the map, if None a temporary default map is created.
:param overwrite: `True` to overwrite map file should it exist
:param title: Title added to the image
:param draw: `DRAW_AS_POINTS` to draw a dot at each point (default). Long lines are decimated.
`DRAW_AS_LINES` to draw lines with a line label at each end.
:param features: list of features to place on the map, default is ('SCALE', 'NEATLINE')
=========== ===========================================
'ALL' include all features. This is the default.
'SCALE' show a scale bar
'NEATLINE' draw a neat-line around the image
'ANNOT_XY' annotate map coordinates
'ANNOT_LL' annotate map Latitude, Longitude
=========== ===========================================
:param kwargs: passed to `geosoft.gxpy.map.Map.new`
.. versionadded:: 9.3
"""
# uppercase features, use a dict so we pop things we use and report error
if features is None:
features = ['ALL']
if isinstance(features, str):
features = (features,)
feature_list = {}
if features is not None:
for f in features:
feature_list[f.upper()] = None
features = list(feature_list.keys())
# setup margins
if not ('margins' in kwargs):
bottom_margin = 1.0
if title:
bottom_margin += len(title.split('\n')) * 1.0
if 'ALL' in feature_list or 'SCALE' in feature_list:
bottom_margin += 1.2
kwargs['margins'] = (1, 1, bottom_margin, 1)
kwargs['coordinate_system'] = self.coordinate_system
# work out some non-zero extents
ex = self.extent_xyz
if ex[0] is None or ex[1] is None or ex[3] is None or ex[4] is None:
raise GdbException(_t('Invalid data extent: {}').format(ex))
mnx, mny, mxx, mxy = (ex[0], ex[1], ex[3], ex[4])
dx = mxx - mnx
dy = mxy - mny
if dx == 0 and dy == 0:
ex = (mnx - 50., mny - 50., mxx + 50., mxy + 50.)
else:
if dx < dy * 0.1:
d = dy * 0.05
mnx -= d
mxx += d
elif dy < dx * 0.1:
d = dx * 0.05
mny -= d
mxy += d
ex = (mnx, mny, mxx, mxy)
if 'inside_margin' not in kwargs:
kwargs['inside_margin'] = 1
gmap = gxmap.Map.figure(ex,
file_name=file_name,
overwrite=overwrite,
features=features,
title=title,
**kwargs)
x, y, _ = self.xyz_channels
with gxview.View.open(gmap, "data") as v:
with gxgroup.Draw(v, 'lines') as g:
for line in self.list_lines():
xvv = self.read_channel_vv(line, x)
yvv = self.read_channel_vv(line, y)
if draw == DRAW_AS_LINES:
g.polyline(gxgeo.PPoint((xvv, yvv)),
pen=gxgroup.Pen(line_thick=0.03 * v.units_per_map_cm))
else:
g.polypoint(gxgeo.PPoint((xvv, yvv)),
pen=gxgroup.Pen(line_thick=0.03 * v.units_per_map_cm))
return gmap
class Channel:
"""
Class to work with database channels. Use constructor `Channel.new` to create a new channel.
Use instance properties to work with channel properties.
:param gdb: database instance
:param name: channel name string, must exist - see `new()` to create a new channel
.. versionadded:: 9.3
"""
def _get(self, fn):
self.gdb.lock_read_(self._symb)
try:
return fn(self._symb)
finally:
self.gdb.unlock_(self._symb)
def _get_str(self, fn):
self.gdb.lock_read_(self._symb)
try:
sr = gxapi.str_ref()
fn(self._symb, sr)
return sr.value
finally:
self.gdb.unlock_(self._symb)
def lock_set_(self, fn, v):
self.gdb.lock_write_(self._symb)
try:
fn(self._symb, v)
finally:
self.gdb.unlock_(self._symb)
def __repr__(self):
return "{}({})".format(self.__class__, self.__dict__)
def __str__(self):
return self.name
def __init__(self, gdb, name):
self.gdb = gdb
name, self._symb = gdb.channel_name_symb(name)
@classmethod
def new(cls, gdb, name, dtype=np.float64, array=1, dup=None, details=None, replace=False, unit_of_measure=None):
"""
Create a new channel.
:param gdb: Geosoft_gdb instance
:param name: channel name
:param dtype: numpy data type, defaule np.float64
:param array: array size, default 1
:param dup: duplicate properties of this channal (name, symbol or Channel)
:param details: dictionary of other channel properties - see `Geosoft_gdb.set_channel_details`
:param replace: `True` to replace existing channel. Existing channel information and data is lost.
default is `False`.
:param unit_of_measure: unit of measurement of the data
:return: Channel instance
"""
if gdb.exist_symb_(name, gxapi.DB_SYMB_CHAN):
if replace:
gdb.delete_channel(name)
else:
raise GdbException(_t("Cannot replace existing channel '{}'".format(name)))
symb = gdb.new_channel(name, dtype, array=array, dup=dup)
if details:
gdb.set_channel_details(symb, details)
chan = cls(gdb, name)
if unit_of_measure:
chan.unit_of_measure = unit_of_measure
return chan
@property
def name(self):
"""
Channel name.
.. versionadded:: 9.3
"""
return self._get_str(self.gdb.gxdb.get_chan_name)
@name.setter
def name(self, name):
name = str(name)
if name != self.name:
if not self.gdb.gxdb.is_chan_name(name):
raise GdbException(_t('Invalid channel name \'{}\''.format(name)))
if self.gdb.exist_symb_(name, gxapi.DB_SYMB_CHAN):
raise GdbException(_t('Cannot rename to an existing channel name \'{}\''.format(name)))
self.lock_set_(self.gdb.gxdb.set_chan_name, name)
@property
def symbol(self):
"""
Channel symbol
.. versionadded:: 9.3
"""
return self._symb
@property
def array(self):
"""
Array channel width, 1 for non-array channels
.. versionadded:: 9.3
"""
return self.gdb.channel_width(self._symb)
@property
def is_array(self):
"""
`True` if this is an array channel
.. versionadded:: 9.3
"""
return bool(self.array > 1)
@property
def decimal(self):
"""
Number of displayed decimal places, can be set.
.. versionadded:: 9.3
"""
return self.gdb.gxdb.get_chan_decimal(self._symb)
@decimal.setter
def decimal(self, value):
self.lock_set_(self.gdb.gxdb.set_chan_decimal, value)
@property
def format(self):
"""
Channel display format:
============= ========================================
FORMAT_NORMAL normal decimal or integer format
FORMAT_EXP exponential
FORMAT_TIME geosoft time (HH:MM:SS.ssss)
FORMAT_DATE date (YYYY/MM/DD)
FORMAT_GEOGR geographic (deg.mm.ss.ssss)
FORMAT_SIGDIG decimals is number of significant digits
FORMAT_HEX hexadecimal
============= ========================================
.. versionadded:: 9.3
"""
return self.gdb.gxdb.get_chan_format(self._symb)
@format.setter
def format(self, value):
self.lock_set_(self.gdb.gxdb.set_chan_format, value)
@property
def label(self):
"""
Channel label used in display graphics, normally the same as the channel name.
Can be set.
.. versionadded:: 9.3
"""
sr = gxapi.str_ref()
self.gdb.gxdb.get_chan_label(self._symb, sr)
return sr.value
@label.setter
def label(self, value):
self.lock_set_(self.gdb.gxdb.set_chan_label, value)
@property
def type(self):
"""
Geosoft data type.
.. versionadded:: 9.3
"""
return self.gdb.gxdb.get_chan_type(self._symb)
@property
def unit_of_measure(self):
"""
Unit of measure, can be set.
.. versionadded:: 9.3
"""
sr = gxapi.str_ref()
self.gdb.gxdb.get_chan_unit(self._symb, sr)
return sr.value
@unit_of_measure.setter
def unit_of_measure(self, value):
self.lock_set_(self.gdb.gxdb.set_chan_unit, value)
@property
def width(self):
"""
Display window width in characters.
Can be set.
.. versionadded:: 9.3
"""
return self.gdb.gxdb.get_chan_width(self._symb)
@width.setter
def width(self, value):
self.lock_set_(self.gdb.gxdb.set_chan_width, value)
@property
def class_(self):
"""
Class name to which this channel is associated.
Can be set.
.. versionadded:: 9.3
"""
sr = gxapi.str_ref()
self.gdb.gxdb.get_chan_class(self._symb, sr)
return sr.value
@class_.setter
def class_(self, value):
self.lock_set_(self.gdb.gxdb.set_chan_class, value)
@property
def protect(self):
"""
`True` if this channel is protected from modification.
Can be set.
.. versionadded:: 9.3
"""
return bool(self.gdb.gxdb.get_chan_protect(self._symb))
@protect.setter
def protect(self, value):
if value:
value = 1
else:
value = 0
self.lock_set_(self.gdb.gxdb.set_chan_protect, value)
@property
def locked(self):
"""
True if symbol is locked. Use property :any:`lock` to determine if read or write lock, or to
set the lock.
Setting to `False` unlocks the symbol.
.. versionadded:: 9.3
"""
return self.lock != SYMBOL_LOCK_NONE
@locked.setter
def locked(self, value):
if not value:
self.gdb.unlock_(self._symb)
else:
raise GdbException(_t('Use property \'lock\' to set SYMBOL_READ or SYMBOL_WRITE lock.'))
@property
def lock(self):
"""
Lock setting:
| -1 unlocked (SYMBOL_LOCK_NONE)
| 0 read-locked (SYMBOL_LOCK_READ)
| 1 write-locked (SYMBOL_LOCK_WRITE)
Can be set.
.. versionadded 9.3
"""
return self.gdb.gxdb.get_symb_lock(self.symbol)
@lock.setter
def lock(self, value):
if self.lock != value:
self.gdb.unlock_(self.symbol)
self.gdb.gxdb.lock_symb(self.symbol, value, gxapi.DB_WAIT_INFINITY)
def delete(self):
"""
Delete the channel and all associated data. After calling this method this
channel instance is no longer valid.
.. versionadded:: 9.3
"""
if self.protect:
raise GdbException(_t("Cannot delete protected channel '{}'".format(self.name)))
self.lock = SYMBOL_LOCK_WRITE
self.gdb.gxdb.delete_symb(self._symb)
self._symb = gxapi.NULLSYMB
class Line:
"""
Class to work with database lines. Use constructor `Line.new` to create a new line.
Use instance properties to work with line properties.
:param gdb: `Geosoft_gdb` instance
:param name: line name string, must exist - see `new()` to create a new line
.. versionadded:: 9.3
"""
def _get(self, fn):
self.gdb.lock_read_(self._symb)
try:
return fn(self._symb)
finally:
self.gdb.unlock_(self._symb)
def _get_str(self, fn):
self.gdb.lock_read_(self._symb)
try:
sr = gxapi.str_ref()
fn(self._symb, sr)
return sr.value
finally:
self.gdb.unlock_(self._symb)
def lock_set_(self, fn, v):
"""write_lock, set and release a gdb attribute that requires locking to write."""
self.gdb.lock_write_(self._symb)
try:
fn(self._symb, v)
finally:
self.gdb.unlock_(self._symb)
def __repr__(self):
return "{}({})".format(self.__class__, self.__dict__)
def __str__(self):
return self.name
def __init__(self, gdb, name):
self.gdb = gdb
name, self._symb = gdb.line_name_symb(name)
@classmethod
def new(cls, gdb, name, linetype=None, group=None, dup=None, replace=False):
"""
Create a new line.
:param gdb: `Geosoft_gdb` instance
:param name: line name
:param linetype: line type for creating a new line, ignored if group defines
================= =========================================
SYMB_LINE_NORMAL normal lines, name is a string
SYMB_LINE_FLIGHT flight lines, first letter is line type
================= =========================================
:param group: group name for a grouped class
:param dup: duplicate properties of this line (name, symbol or Line).
:param replace: `True` to replace line if it exists. Default is `False` .
:returns: Line instance
.. versionadded:: 9.3
"""
if group is None and dup is None and not is_valid_line_name(name):
raise GdbException(_t('Invalid line name: {}'.format(name)))
if gdb.exist_symb_(name, gxapi.DB_SYMB_LINE):
if replace:
gdb.delete_line(name)
else:
raise GdbException(_t("Cannot replace existing line '{}'".format(name)))
gdb.new_line(name, linetype, group=group, dup=dup)
return cls(gdb, name)
@property
def name(self):
"""
Line name, consistent with names constructed by `create_line_name`.
To change a line name change the type, number or version.
.. versionadded:: 9.3
"""
return self._get_str(self.gdb.gxdb.get_symb_name)
@property
def symbol(self):
"""
Line symbol
.. versionadded:: 9.3
"""
return self._symb
@property
def type(self):
"""
Line type, which can be set:
| LINE_TYPE_NORMAL
| LINE_TYPE_BASE
| LINE_TYPE_TIE
| LINE_TYPE_TEST
| LINE_TYPE_TREND
| LINE_TYPE_SPECIAL
| LINE_TYPE_RANDOM
.. versionadded:: 9.3
"""
return self._get(self.gdb.gxdb.line_type)
@type.setter
def type(self, value):
self.lock_set_(self.gdb.gxdb.set_line_type, value)
@property
def category(self):
"""
Line category, which can be set:
| LINE_CATAGORY_FLIGHT
| LINE_CATEGORY_GROUP
| LINE_CATEGORY_NORMAL
.. versionadded:: 9.3
"""
return self._get(self.gdb.gxdb.line_category)
@property
def date(self):
"""
Line date. Can be set.
.. versionadded:: 9.3
"""
return self._get(self.gdb.gxdb.line_date)
@date.setter
def date(self, value):
self.lock_set_(self.gdb.gxdb.set_line_date, value)
@property
def flight(self):
"""
Line flight number (flight/cruise/survey event). Can be set.
.. versionadded:: 9.3
"""
return self._get(self.gdb.gxdb.line_flight)
@flight.setter
def flight(self, value):
self.lock_set_(self.gdb.gxdb.set_line_flight, value)
@property
def number(self):
"""
Line number. Can be set
.. versionadded:: 9.3
"""
return self._get(self.gdb.gxdb.line_number)
@number.setter
def number(self, value):
self.lock_set_(self.gdb.gxdb.set_line_num, int(value))
@property
def version(self):
"""
Line version number. Can be set.
.. versionadded:: 9.3
"""
return self._get(self.gdb.gxdb.line_version)
@version.setter
def version(self, value):
self.lock_set_(self.gdb.gxdb.set_line_ver, value)
@property
def grouped(self):
"""
True if this is a grouped line.
.. versionadded:: 9.3
"""
return self.category == LINE_CATEGORY_GROUP
@property
def group(self):
"""
The lines group class name, '' for a group lines (LINE_CATEGORY_GROUP).
Only works for lines that are part of a group.
Can be set.
.. versionadded:: 9.3
"""
if self.category == LINE_CATEGORY_GROUP:
return self._get_str(self.gdb.gxdb.get_group_class)
else:
return None
@group.setter
def group(self, value):
if self.category == LINE_CATEGORY_GROUP:
self.lock_set_(self.gdb.gxdb.set_group_class, value)
else:
raise GdbException(_t('Line \'{}\' is not a grouped line.'.format(self.name)))
@property
def selected(self):
"""True if this line is selected, can be set."""
return self.gdb.gxdb.get_line_selection(self._symb) == gxapi.DB_LINE_SELECT_INCLUDE
@selected.setter
def selected(self, value):
if bool(value):
self.gdb.gxdb.set_line_selection(self._symb, gxapi.DB_LINE_SELECT_INCLUDE)
else:
self.gdb.gxdb.set_line_selection(self._symb, gxapi.DB_LINE_SELECT_EXCLUDE)
@property
def locked(self):
"""
True if symbol is locked. Use property :any:`lock` to determine if read or write lock, or to
set the lock.
Setting to `False` unlocks the symbol.
.. versionadded:: 9.3
"""
return self.lock != SYMBOL_LOCK_NONE
@locked.setter
def locked(self, value):
if not value:
self.gdb.unlock_(self._symb)
else:
raise GdbException(_t('Use property \'lock\' to set SYMBOL_READ or SYMBOL_WRITE lock.'))
@property
def lock(self):
"""
Lock setting:
| -1 unlocked (SYMBOL_LOCK_NONE)
| 0 read-locked (SYMBOL_LOCK_READ)
| 1 write-locked (SYMBOL_LOCK_WRITE)
Can be set.
.. versionadded 9.3
"""
return self.gdb.gxdb.get_symb_lock(self.symbol)
@lock.setter
def lock(self, value):
if self.lock != value:
self.gdb.unlock_(self.symbol)
self.gdb.gxdb.lock_symb(self.symbol, value, gxapi.DB_WAIT_INFINITY)
def delete(self):
"""
Delete the line and all data associated with the line. After calling this method this
line instance is no longer valid.
.. versionadded:: 9.3
"""
self.gdb.delete_line(self.symbol)
self._symb = gxapi.NULLSYMB
def delete_data(self):
"""
Delete all data in a line but keep the line
.. versionadded:: 9.6
"""
self.gdb.delete_line_data(self.symbol)
# =================================
# methods that work with line data
def bearing(self):
"""
Return bearing of a line based on location of the first and last point in the line.
Returns None if the line is empty or first and last points are the same.
.. versionadded:: 9.3
"""
x, y, z = self.gdb.xyz_channels
x = self.gdb.channel_name_symb(x)[1]
y = self.gdb.channel_name_symb(y)[1]
self.gdb.lock_read_(x)
self.gdb.lock_read_(y)
try:
bearing = gxapi.GXDU.direction(self.gdb.gxdb, self._symb, x, y)
finally:
self.gdb.unlock_(y)
self.gdb.unlock_(x)
self.lock_set_(self.gdb.gxdb.set_line_bearing, bearing)
if bearing == gxapi.rDUMMY:
return None
return bearing
| [
"geosoft.gxapi.str_ref",
"numpy.empty",
"numpy.isnan",
"os.path.isfile",
"geosoft.gxapi.GXIPJ.create",
"numpy.unique",
"pandas.DataFrame",
"os.path.abspath",
"geosoft.gxapi.GXEDB.lock",
"numpy.append",
"numpy.apply_along_axis",
"geosoft.gxpy.system.translate",
"os.path.normpath",
"geosoft.... | [((2741, 2773), 'geosoft.gxpy.system.translate', 'geosoft.gxpy.system.translate', (['s'], {}), '(s)\n', (2770, 2773), False, 'import geosoft\n'), ((4357, 4379), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (4373, 4379), False, 'import os\n'), ((6038, 6053), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (6051, 6053), True, 'import geosoft.gxapi as gxapi\n'), ((4463, 4494), 'os.path.normpath', 'os.path.normpath', (["(name + '.gdb')"], {}), "(name + '.gdb')\n", (4479, 4494), False, 'import os\n'), ((10934, 10958), 'geosoft.gxapi.GXLST.create', 'gxapi.GXLST.create', (['(2000)'], {}), '(2000)\n', (10952, 10958), True, 'import geosoft.gxapi as gxapi\n'), ((12479, 12494), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (12492, 12494), True, 'import geosoft.gxapi as gxapi\n'), ((12569, 12595), 'os.path.normpath', 'os.path.normpath', (['sr.value'], {}), '(sr.value)\n', (12585, 12595), False, 'import os\n'), ((15067, 15174), 'geosoft.gxapi.GXDB.create_comp', 'gxapi.GXDB.create_comp', (['name', 'max_lines', 'max_channels', 'max_blobs', '(10)', '(100)', '"""SUPER"""', '""""""', 'page_size', 'comp'], {}), "(name, max_lines, max_channels, max_blobs, 10, 100,\n 'SUPER', '', page_size, comp)\n", (15089, 15174), True, 'import geosoft.gxapi as gxapi\n'), ((17219, 17234), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (17232, 17234), True, 'import geosoft.gxapi as gxapi\n'), ((19468, 19489), 'geosoft.gxapi.GXMETA.create', 'gxapi.GXMETA.create', ([], {}), '()\n', (19487, 19489), True, 'import geosoft.gxapi as gxapi\n'), ((20175, 20207), 'os.path.abspath', 'os.path.abspath', (['self._file_name'], {}), '(self._file_name)\n', (20190, 20207), False, 'import os\n'), ((28340, 28355), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (28353, 28355), True, 'import geosoft.gxapi as gxapi\n'), ((59649, 59686), 'numpy.empty', 'np.empty', (['(nrows, ncols)'], {'dtype': 'dtype'}), '((nrows, ncols), dtype=dtype)\n', (59657, 59686), True, 'import numpy as np\n'), ((64027, 64041), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (64039, 64041), True, 'import pandas as pd\n'), ((83898, 83913), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (83911, 83913), True, 'import geosoft.gxapi as gxapi\n'), ((84421, 84436), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (84434, 84436), True, 'import geosoft.gxapi as gxapi\n'), ((85139, 85154), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (85152, 85154), True, 'import geosoft.gxapi as gxapi\n'), ((6423, 6445), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6439, 6445), False, 'import os\n'), ((10803, 10830), 'os.path.basename', 'os.path.basename', (['self.name'], {}), '(self.name)\n', (10819, 10830), False, 'import os\n'), ((11544, 11566), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (11560, 11566), False, 'import os\n'), ((12284, 12305), 'geosoft.gxapi.GXEDB.current', 'gxapi.GXEDB.current', ([], {}), '()\n', (12303, 12305), True, 'import geosoft.gxapi as gxapi\n'), ((12328, 12354), 'geosoft.gxapi.GXEDB.lock', 'gxapi.GXEDB.lock', (['gdb._edb'], {}), '(gdb._edb)\n', (12344, 12354), True, 'import geosoft.gxapi as gxapi\n'), ((14905, 14925), 'os.path.isfile', 'os.path.isfile', (['name'], {}), '(name)\n', (14919, 14925), False, 'import os\n'), ((20558, 20578), 'geosoft.gxapi.GXIPJ.create', 'gxapi.GXIPJ.create', ([], {}), '()\n', (20576, 20578), True, 'import geosoft.gxapi as gxapi\n'), ((24996, 25012), 'numpy.nanmin', 'np.nanmin', (['_data'], {}), '(_data)\n', (25005, 25012), True, 'import numpy as np\n'), ((25257, 25273), 'numpy.nanmax', 'np.nanmax', (['_data'], {}), '(_data)\n', (25266, 25273), True, 'import numpy as np\n'), ((34122, 34137), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (34135, 34137), True, 'import geosoft.gxapi as gxapi\n'), ((60759, 60792), 'numpy.empty', 'np.empty', (['(0, ncols)'], {'dtype': 'dtype'}), '((0, ncols), dtype=dtype)\n', (60767, 60792), True, 'import numpy as np\n'), ((68403, 68417), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (68411, 68417), True, 'import numpy as np\n'), ((74183, 74195), 'numpy.unique', 'np.unique', (['d'], {}), '(d)\n', (74192, 74195), True, 'import numpy as np\n'), ((74215, 74233), 'numpy.append', 'np.append', (['vset', 'd'], {}), '(vset, d)\n', (74224, 74233), True, 'import numpy as np\n'), ((74253, 74268), 'numpy.unique', 'np.unique', (['vset'], {}), '(vset)\n', (74262, 74268), True, 'import numpy as np\n'), ((79530, 79545), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (79543, 79545), True, 'import geosoft.gxapi as gxapi\n'), ((87868, 87883), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (87881, 87883), True, 'import geosoft.gxapi as gxapi\n'), ((95635, 95688), 'geosoft.gxapi.GXDU.direction', 'gxapi.GXDU.direction', (['self.gdb.gxdb', 'self._symb', 'x', 'y'], {}), '(self.gdb.gxdb, self._symb, x, y)\n', (95655, 95688), True, 'import geosoft.gxapi as gxapi\n'), ((4995, 5024), 'geosoft.gxapi.GXDB.is_line_name', 'gxapi.GXDB.is_line_name', (['name'], {}), '(name)\n', (5018, 5024), True, 'import geosoft.gxapi as gxapi\n'), ((11272, 11287), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (11285, 11287), True, 'import geosoft.gxapi as gxapi\n'), ((11379, 11404), 'os.path.normpath', 'os.path.normpath', (['s.value'], {}), '(s.value)\n', (11395, 11404), False, 'import os\n'), ((11428, 11461), 'os.path.basename', 'os.path.basename', (['self._file_name'], {}), '(self._file_name)\n', (11444, 11461), False, 'import os\n'), ((25094, 25110), 'numpy.nanmax', 'np.nanmax', (['_data'], {}), '(_data)\n', (25103, 25110), True, 'import numpy as np\n'), ((27433, 27448), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (27446, 27448), True, 'import geosoft.gxapi as gxapi\n'), ((32028, 32043), 'geosoft.gxapi.str_ref', 'gxapi.str_ref', ([], {}), '()\n', (32041, 32043), True, 'import geosoft.gxapi as gxapi\n'), ((59441, 59466), 'numpy.array', 'np.array', (['[]'], {'dtype': 'dtype'}), '([], dtype=dtype)\n', (59449, 59466), True, 'import numpy as np\n'), ((71737, 71751), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (71745, 71751), True, 'import numpy as np\n'), ((24919, 24934), 'numpy.isnan', 'np.isnan', (['_data'], {}), '(_data)\n', (24927, 24934), True, 'import numpy as np\n'), ((59508, 59533), 'numpy.array', 'np.array', (['[]'], {'dtype': 'dtype'}), '([], dtype=dtype)\n', (59516, 59533), True, 'import numpy as np\n'), ((61062, 61083), 'numpy.isnan', 'np.isnan', (['dummy_value'], {}), '(dummy_value)\n', (61070, 61083), True, 'import numpy as np\n'), ((61617, 61638), 'numpy.isnan', 'np.isnan', (['dummy_value'], {}), '(dummy_value)\n', (61625, 61638), True, 'import numpy as np\n'), ((61779, 61838), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda a: not dummy_value in a)', '(1)', 'npd'], {}), '(lambda a: not dummy_value in a, 1, npd)\n', (61798, 61838), True, 'import numpy as np\n'), ((61112, 61131), 'numpy.isnan', 'np.isnan', (['npd[:, i]'], {}), '(npd[:, i])\n', (61120, 61131), True, 'import numpy as np\n'), ((61702, 61713), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (61710, 61713), True, 'import numpy as np\n')] |
import os, json
import logging
import numpy as np
import collections
import serifxml3
from serif.model.base_model import BaseModel
from serif.theory.sentence import Sentence
from nlplingo.decoding.decoder import Decoder, DocumentPrediction, SentencePrediction, EventPrediction, \
TriggerPrediction, ArgumentPrediction
from nlplingo.text.text_theory import Document as lingoDoc
from nlplingo.annotation.serif import to_lingo_sentence
from nlplingo.annotation.ingestion import populate_doc_sentences_with_embeddings_and_annotations
from nlplingo.embeddings.word_embeddings import DocumentContextualEmbeddings
from nlplingo.text.text_theory import EventEventRelation
from nlplingo.tasks.eventrelation.postprocess import add_serif_eerm_to_all_eer_predictions
import time
logger = logging.getLogger(__name__)
class DummySentenceTheory(object):
def __init__(self):
self.token_sequence = list()
class DummySentence(object):
def __init__(self,sent_no,start_edt,end_edt):
dummy_sentence_theory = DummySentenceTheory()
self.sentence_theories = [dummy_sentence_theory]
self.sentence_theory = dummy_sentence_theory
self.sent_no = sent_no
self.start_edt = start_edt
self.end_edt = end_edt
def find_lowest_common_ancestor(syn_node_1, syn_node_2):
# https://www.hrwhisper.me/algorithm-lowest-common-ancestor-of-a-binary-tree
assert isinstance(syn_node_1, serifxml3.SynNode)
assert isinstance(syn_node_2, serifxml3.SynNode)
visited = set()
while syn_node_1 is not None and syn_node_2 is not None:
if syn_node_1 is not None:
if syn_node_1 in visited:
return syn_node_1
visited.add(syn_node_1)
syn_node_1 = syn_node_1.parent
if syn_node_2 is not None:
if syn_node_2 in visited:
return syn_node_2
visited.add(syn_node_2)
syn_node_2 = syn_node_2.parent
return None
def build_nlplingo_entity_mention_id_to_serif_mention_valuemention_name_mapping_dict(serif_doc):
assert isinstance(serif_doc, serifxml3.Document)
# For why this is implemented in this way, refer to nlplingo.annotation.serif
# It turns out that nlplingo would use serif node id as nlplingo.text.text_span.EntityMention.id
ret = dict()
for sentence in serif_doc.sentences:
assert isinstance(sentence, serifxml3.Sentence)
for m in sentence.mention_set:
ret[m.id] = m
for m in sentence.value_mention_set:
ret[m.id] = m
for m in sentence.name_theory:
ret[m.id] = m
return ret
class NLPLingoDecoder(BaseModel):
def __init__(self, params_path, npz_filelist, argparse, **kwargs):
super(NLPLingoDecoder, self).__init__(**kwargs)
self.argparse = argparse
with open(params_path) as fp:
self.params = json.load(fp)
self.doc_id_to_bert_npz_path = dict()
self.should_output_event_emb = False
for extractor in self.params.get("extractors",[]):
output_vectors = extractor.get("output_vectors",False)
if output_vectors is True:
self.should_output_event_emb = True
break
self.max_number_of_tokens_per_sentence = int(kwargs.get("max_number_of_tokens_per_sentence", -1))
if os.path.isfile(npz_filelist):
with open(npz_filelist) as fp:
for i in fp:
i = i.strip()
docid = os.path.basename(i)
docid = docid.replace(".npz", "")
self.doc_id_to_bert_npz_path[docid] = i
self.decoder = Decoder(self.params)
self.decoder.load_model()
def get_npz(self, docid):
if docid in self.doc_id_to_bert_npz_path:
return np.load(self.doc_id_to_bert_npz_path[docid], allow_pickle=True)
else:
return {"embeddings":np.asarray([]),"token_map":np.asarray([])}
def reload_model(self):
self.decoder.reload_model()
def decode_event_and_event_argument(self, serif_doc):
docid = serif_doc.docid
lingo_doc = lingoDoc(docid)
sent_edt_off_to_sent = dict()
for st_index, sentence in enumerate(serif_doc.sentences):
if self.max_number_of_tokens_per_sentence > -1:
st = sentence.sentence_theories[0]
if len(st.token_sequence) == 0 or len(st.token_sequence) > self.max_number_of_tokens_per_sentence:
to_lingo_sentence(serif_doc,st_index,DummySentence(st_index,sentence.start_edt,sentence.end_edt),lingo_doc=lingo_doc,
add_serif_entity_mentions=self.params.get('add_serif_entity_mentions', True),
add_serif_event_mentions=self.params.get('add_serif_event_mentions', False),
add_serif_entity_relation_mentions=self.params.get(
'add_serif_entity_entity_relation_mentions', False),
add_serif_prop_adj=self.params.get('add_serif_prop_adj', False))
continue
to_lingo_sentence(serif_doc, st_index, sentence, lingo_doc=lingo_doc,
add_serif_entity_mentions=self.params.get('add_serif_entity_mentions', True),
add_serif_event_mentions=self.params.get('add_serif_event_mentions', False),
add_serif_entity_relation_mentions=self.params.get(
'add_serif_entity_entity_relation_mentions', False),
add_serif_prop_adj=self.params.get('add_serif_prop_adj', False))
if len(sentence.token_sequence) > 0:
sent_edt_off_to_sent[
sentence.token_sequence[0].start_char, sentence.token_sequence[-1].end_char] = sentence
if hasattr(serif_doc,"aux") and hasattr(serif_doc.aux,"bert_npz"):
DocumentContextualEmbeddings.load_embeddings_into_doc(
lingo_doc, serif_doc.aux.bert_npz)
elif len(self.doc_id_to_bert_npz_path) > 0:
DocumentContextualEmbeddings.load_embeddings_into_doc(
lingo_doc, self.get_npz(docid))
populate_doc_sentences_with_embeddings_and_annotations([lingo_doc], self.params, self.decoder.embeddings)
list_trigger_extractor_result_collection, doc_id_to_event_and_event_arg_feature = self.decoder.decode_trigger_and_argument(
[lingo_doc])
serif_id_to_serif_mention_valuemention_name_mapping = build_nlplingo_entity_mention_id_to_serif_mention_valuemention_name_mapping_dict(
serif_doc)
if self.should_output_event_emb:
self.decoder.serialize_doc_event_and_event_arg_feature_npz(doc_id_to_event_and_event_arg_feature,
self.argparse.output_directory)
for trigger_extractor_result_collection in list_trigger_extractor_result_collection:
trigger_extractor_result_collection.organize_into_prediction_objects()
prediction_object = trigger_extractor_result_collection.document_predictions
for doc_p_docid, doc_p in prediction_object.items():
assert docid == doc_p_docid
for sent_p in doc_p.sentences.values():
assert isinstance(sent_p, SentencePrediction)
sent_start_edt = sent_p.start
sent_end_edt = sent_p.end - 1
sentence = sent_edt_off_to_sent[sent_start_edt, sent_end_edt]
assert isinstance(sentence, serifxml3.Sentence)
event_mention_set = sentence.event_mention_set
if event_mention_set is None:
event_mention_set = \
sentence.add_new_event_mention_set()
''':type: EventMentionSet'''
token_start_edt_to_token = {token.start_edt: token for token in sentence.token_sequence}
token_end_edt_to_token = {token.end_edt: token for token in sentence.token_sequence}
for event_p in sent_p.events.values():
assert isinstance(event_p, EventPrediction)
list_serif_argument_tuple = list()
for argument_p in event_p.arguments.values():
assert isinstance(argument_p, ArgumentPrediction)
arg_start_char = argument_p.start
arg_end_char = argument_p.end - 1
arg_serif_id = argument_p.em_id
arg_serif_obj = serif_id_to_serif_mention_valuemention_name_mapping[arg_serif_id]
for arg_role, arg_score in argument_p.labels.items():
list_serif_argument_tuple.append(tuple((arg_role, arg_serif_obj, arg_score)))
trigger = event_p.trigger
assert isinstance(trigger, TriggerPrediction)
trigger_start_char = trigger.start
trigger_end_char = trigger.end - 1
start_token = token_start_edt_to_token[trigger_start_char]
end_token = token_end_edt_to_token[trigger_end_char]
event_anchor_synnode = find_lowest_common_ancestor(start_token.syn_node, end_token.syn_node)
assert isinstance(event_anchor_synnode, serifxml3.SynNode)
for event_type, event_type_score in trigger.labels.items():
event_mention = event_mention_set.add_new_event_mention(
event_type, event_anchor_synnode, event_type_score)
# add arguments
for arg_role, arg_serif_obj, arg_score in list_serif_argument_tuple:
added_arg = None
if isinstance(arg_serif_obj, serifxml3.Mention):
added_arg = event_mention.add_new_mention_argument(arg_role, arg_serif_obj,
arg_score)
elif isinstance(arg_serif_obj, serifxml3.ValueMention):
added_arg = event_mention.add_new_value_mention_argument(arg_role, arg_serif_obj,
arg_score)
else:
raise ValueError(
"Bad argument type {} in EventMention".format(type(arg_serif_obj).__name__))
def decode_event_event_relation_doc_list(self, serif_doc_list):
# START LOADING SERIFS
lingo_docs = []
sent_edt_off_to_sent_dict = {}
lingo_anchor_int_pair_to_serif_ems_dict = {}
eerm_set_dict = {}
all_eer_predictions = dict()
start = time.time()
for serif_doc_idx, serif_doc in enumerate(serif_doc_list):
docid = serif_doc.docid
lingo_doc = lingoDoc(docid)
sent_edt_off_to_sent_dict[docid] = dict()
sent_edt_off_to_sent = sent_edt_off_to_sent_dict[docid]
lingo_anchor_int_pair_to_serif_ems_dict[docid] = dict()
lingo_anchor_int_pair_to_serif_ems = lingo_anchor_int_pair_to_serif_ems_dict[docid]
# Code block for event and event argument
for st_index, sentence in enumerate(serif_doc.sentences):
assert isinstance(sentence, serifxml3.Sentence)
if self.max_number_of_tokens_per_sentence > -1:
st = sentence.sentence_theories[0]
if len(st.token_sequence) == 0 or len(st.token_sequence) > self.max_number_of_tokens_per_sentence:
to_lingo_sentence(serif_doc,st_index,DummySentence(st_index,sentence.start_edt,sentence.end_edt),lingo_doc=lingo_doc,
add_serif_entity_mentions=self.params.get('add_serif_entity_mentions', True),
add_serif_event_mentions=self.params.get('add_serif_event_mentions', False),
add_serif_entity_relation_mentions=self.params.get(
'add_serif_entity_entity_relation_mentions', False),
add_serif_prop_adj=self.params.get('add_serif_prop_adj', False))
continue
to_lingo_sentence(serif_doc, st_index, sentence, lingo_doc=lingo_doc,
add_serif_entity_mentions=self.params.get('add_serif_entity_mentions', True),
add_serif_event_mentions=True,
add_serif_entity_relation_mentions=self.params.get(
'add_serif_entity_entity_relation_mentions', False),
add_serif_prop_adj=self.params.get('add_serif_prop_adj', False))
if len(sentence.token_sequence) > 0:
sent_edt_off_to_sent[
sentence.token_sequence[0].start_char, sentence.token_sequence[-1].end_char] = sentence
### Populate EER candidates Now only do in sentence EER
for event_mention_src in sentence.event_mention_set or []:
lingo_em_arg1 = lingo_doc.get_event_with_id(event_mention_src.id)
for anchor in lingo_em_arg1.anchors:
lingo_anchor_int_pair_to_serif_ems.setdefault(
(anchor.start_char_offset(), anchor.end_char_offset()), set()).add(event_mention_src)
for event_mention_dst in sentence.event_mention_set or []:
if event_mention_src != event_mention_dst:
lingo_em_arg2 = lingo_doc.get_event_with_id(event_mention_dst.id)
relation_type = None
eer = EventEventRelation(relation_type, lingo_em_arg1, lingo_em_arg2, serif_sentence=sentence, serif_event_0=event_mention_src, serif_event_1=event_mention_dst)
lingo_doc.add_event_event_relation(eer)
### End populate EER candidates
eerm_set = serif_doc.event_event_relation_mention_set
if eerm_set is None:
eerm_set = \
serif_doc.add_new_event_event_relation_mention_set()
''':type: EventEventRelationMentionSet'''
# add LearnIt event-event relation mentions into a data structure
for serif_eerm in eerm_set or []:
if serif_eerm.model == 'LearnIt':
add_serif_eerm_to_all_eer_predictions(all_eer_predictions, serif_eerm, lingo_doc)
eerm_set = serif_doc.add_new_event_event_relation_mention_set() # kill the eerm_set
eerm_set_dict[docid] = eerm_set
lingo_docs.append(lingo_doc)
# END LOADING SERIFS
end = time.time()
logging.info('SerifXML loading took %s seconds', end - start)
logging.info('Start of entire EER decoding step')
start = time.time()
event_event_relation_result_collection = self.decoder.decode_event_event_relation(lingo_docs, all_eer_predictions, sent_edt_off_to_sent_dict)
end = time.time()
logging.info('Entire EER decoding took %s seconds', end - start)
logging.info('Start of EER prediction object organization')
start = time.time()
event_event_relation_result_collection.organize_into_prediction_objects()
end = time.time()
logging.info('EER prediction object organization took %s seconds', end - start)
prediction_object = event_event_relation_result_collection.document_predictions
logging.info('Start of writing EERs into SerifXML')
start = time.time()
for doc_p_docid, doc_p in prediction_object.items():
sent_edt_off_to_sent = sent_edt_off_to_sent_dict[doc_p_docid]
lingo_anchor_int_pair_to_serif_ems = lingo_anchor_int_pair_to_serif_ems_dict[doc_p_docid]
eerm_set = eerm_set_dict[doc_p_docid]
for sent_p in doc_p.sentences.values():
assert isinstance(sent_p, SentencePrediction)
sent_start_edt = sent_p.start
sent_end_edt = sent_p.end - 1
sentence = sent_edt_off_to_sent[sent_start_edt, sent_end_edt]
assert isinstance(sentence, serifxml3.Sentence)
for event_event_relation_p in sent_p.event_event_relations.values():
left_trigger_p = event_event_relation_p.left_event.trigger
right_trigger_p = event_event_relation_p.right_event.trigger
for relation_type, score in event_event_relation_p.labels.items():
for left_serif_em in lingo_anchor_int_pair_to_serif_ems[
(left_trigger_p.start, left_trigger_p.end)]:
for right_serif_em in lingo_anchor_int_pair_to_serif_ems[
(right_trigger_p.start, right_trigger_p.end)]:
eerm = eerm_set.add_new_event_event_relation_mention(
relation_type, score, "nlplingo")
eerm.add_new_event_mention_argument("arg1", left_serif_em)
eerm.add_new_event_mention_argument("arg2", right_serif_em)
logger.debug("{}\t{}\t{}\t{}".format(left_serif_em.anchor_node.text, relation_type,
right_serif_em.anchor_node.text, sentence.text))
for docid in event_event_relation_result_collection.learnit_relations:
learnit_relations = event_event_relation_result_collection.learnit_relations[docid]
for serif_eerm in learnit_relations:
eerm_set_dict[docid].add_event_event_relation_mention(serif_eerm)
end = time.time()
logging.info('Writing EERs into SerifXML took %s seconds', end - start)
def process(self, serif_doc):
assert self.decoder.model_loaded == True
if len(self.decoder.event_trigger_extractors) > 0 or len(self.decoder.event_argument_extractors) > 0:
self.decode_event_and_event_argument(serif_doc)
if len(self.decoder.event_event_relation_extractors) > 0:
self.decode_event_event_relation_doc_list([serif_doc])
def process_barrier(self, serif_doc_list):
assert self.decoder.model_loaded == True
if len(self.decoder.event_event_relation_extractors) > 0:
self.decode_event_event_relation_doc_list(serif_doc_list)
| [
"numpy.load",
"json.load",
"nlplingo.decoding.decoder.Decoder",
"os.path.basename",
"nlplingo.annotation.ingestion.populate_doc_sentences_with_embeddings_and_annotations",
"numpy.asarray",
"time.time",
"logging.info",
"os.path.isfile",
"nlplingo.text.text_theory.Document",
"nlplingo.embeddings.w... | [((781, 808), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (798, 808), False, 'import logging\n'), ((3358, 3386), 'os.path.isfile', 'os.path.isfile', (['npz_filelist'], {}), '(npz_filelist)\n', (3372, 3386), False, 'import os, json\n'), ((3679, 3699), 'nlplingo.decoding.decoder.Decoder', 'Decoder', (['self.params'], {}), '(self.params)\n', (3686, 3699), False, 'from nlplingo.decoding.decoder import Decoder, DocumentPrediction, SentencePrediction, EventPrediction, TriggerPrediction, ArgumentPrediction\n'), ((4164, 4179), 'nlplingo.text.text_theory.Document', 'lingoDoc', (['docid'], {}), '(docid)\n', (4172, 4179), True, 'from nlplingo.text.text_theory import Document as lingoDoc\n'), ((6283, 6393), 'nlplingo.annotation.ingestion.populate_doc_sentences_with_embeddings_and_annotations', 'populate_doc_sentences_with_embeddings_and_annotations', (['[lingo_doc]', 'self.params', 'self.decoder.embeddings'], {}), '([lingo_doc], self.\n params, self.decoder.embeddings)\n', (6337, 6393), False, 'from nlplingo.annotation.ingestion import populate_doc_sentences_with_embeddings_and_annotations\n'), ((11176, 11187), 'time.time', 'time.time', ([], {}), '()\n', (11185, 11187), False, 'import time\n'), ((15293, 15304), 'time.time', 'time.time', ([], {}), '()\n', (15302, 15304), False, 'import time\n'), ((15313, 15374), 'logging.info', 'logging.info', (['"""SerifXML loading took %s seconds"""', '(end - start)'], {}), "('SerifXML loading took %s seconds', end - start)\n", (15325, 15374), False, 'import logging\n'), ((15384, 15433), 'logging.info', 'logging.info', (['"""Start of entire EER decoding step"""'], {}), "('Start of entire EER decoding step')\n", (15396, 15433), False, 'import logging\n'), ((15450, 15461), 'time.time', 'time.time', ([], {}), '()\n', (15459, 15461), False, 'import time\n'), ((15626, 15637), 'time.time', 'time.time', ([], {}), '()\n', (15635, 15637), False, 'import time\n'), ((15646, 15710), 'logging.info', 'logging.info', (['"""Entire EER decoding took %s seconds"""', '(end - start)'], {}), "('Entire EER decoding took %s seconds', end - start)\n", (15658, 15710), False, 'import logging\n'), ((15720, 15779), 'logging.info', 'logging.info', (['"""Start of EER prediction object organization"""'], {}), "('Start of EER prediction object organization')\n", (15732, 15779), False, 'import logging\n'), ((15796, 15807), 'time.time', 'time.time', ([], {}), '()\n', (15805, 15807), False, 'import time\n'), ((15904, 15915), 'time.time', 'time.time', ([], {}), '()\n', (15913, 15915), False, 'import time\n'), ((15924, 16003), 'logging.info', 'logging.info', (['"""EER prediction object organization took %s seconds"""', '(end - start)'], {}), "('EER prediction object organization took %s seconds', end - start)\n", (15936, 16003), False, 'import logging\n'), ((16101, 16152), 'logging.info', 'logging.info', (['"""Start of writing EERs into SerifXML"""'], {}), "('Start of writing EERs into SerifXML')\n", (16113, 16152), False, 'import logging\n'), ((16169, 16180), 'time.time', 'time.time', ([], {}), '()\n', (16178, 16180), False, 'import time\n'), ((18362, 18373), 'time.time', 'time.time', ([], {}), '()\n', (18371, 18373), False, 'import time\n'), ((18382, 18453), 'logging.info', 'logging.info', (['"""Writing EERs into SerifXML took %s seconds"""', '(end - start)'], {}), "('Writing EERs into SerifXML took %s seconds', end - start)\n", (18394, 18453), False, 'import logging\n'), ((2890, 2903), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (2899, 2903), False, 'import os, json\n'), ((3834, 3897), 'numpy.load', 'np.load', (['self.doc_id_to_bert_npz_path[docid]'], {'allow_pickle': '(True)'}), '(self.doc_id_to_bert_npz_path[docid], allow_pickle=True)\n', (3841, 3897), True, 'import numpy as np\n'), ((6002, 6095), 'nlplingo.embeddings.word_embeddings.DocumentContextualEmbeddings.load_embeddings_into_doc', 'DocumentContextualEmbeddings.load_embeddings_into_doc', (['lingo_doc', 'serif_doc.aux.bert_npz'], {}), '(lingo_doc, serif_doc.\n aux.bert_npz)\n', (6055, 6095), False, 'from nlplingo.embeddings.word_embeddings import DocumentContextualEmbeddings\n'), ((11315, 11330), 'nlplingo.text.text_theory.Document', 'lingoDoc', (['docid'], {}), '(docid)\n', (11323, 11330), True, 'from nlplingo.text.text_theory import Document as lingoDoc\n'), ((3945, 3959), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3955, 3959), True, 'import numpy as np\n'), ((3972, 3986), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3982, 3986), True, 'import numpy as np\n'), ((3522, 3541), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (3538, 3541), False, 'import os, json\n'), ((14987, 15072), 'nlplingo.tasks.eventrelation.postprocess.add_serif_eerm_to_all_eer_predictions', 'add_serif_eerm_to_all_eer_predictions', (['all_eer_predictions', 'serif_eerm', 'lingo_doc'], {}), '(all_eer_predictions, serif_eerm,\n lingo_doc)\n', (15024, 15072), False, 'from nlplingo.tasks.eventrelation.postprocess import add_serif_eerm_to_all_eer_predictions\n'), ((14261, 14424), 'nlplingo.text.text_theory.EventEventRelation', 'EventEventRelation', (['relation_type', 'lingo_em_arg1', 'lingo_em_arg2'], {'serif_sentence': 'sentence', 'serif_event_0': 'event_mention_src', 'serif_event_1': 'event_mention_dst'}), '(relation_type, lingo_em_arg1, lingo_em_arg2,\n serif_sentence=sentence, serif_event_0=event_mention_src, serif_event_1\n =event_mention_dst)\n', (14279, 14424), False, 'from nlplingo.text.text_theory import EventEventRelation\n')] |
def count_missingness(X):
"""
Count the number of missing values per column.
Parameters
----------
X : array_like
Matrix.
Returns
-------
count : ndarray
Number of missing values per column.
"""
import dask.array as da
from numpy import isnan
if isinstance(X, da.Array):
return da.isnan(X).sum(axis=0).compute()
return isnan(X).sum(axis=0)
| [
"dask.array.isnan",
"numpy.isnan"
] | [((399, 407), 'numpy.isnan', 'isnan', (['X'], {}), '(X)\n', (404, 407), False, 'from numpy import isnan\n'), ((353, 364), 'dask.array.isnan', 'da.isnan', (['X'], {}), '(X)\n', (361, 364), True, 'import dask.array as da\n')] |
import numpy as np
import matplotlib.pyplot as plt
def step(x):
y=x>0
return y.astype(np.float)
def sigmoid(x):
return 1/(1+np.exp(-x))
def ReLU(x):
return np.maximum(0, x)
def identity(x):
return x
a=np.array([-0.1, 0, 0.5, 0.2])
#print(step(a))
x=np.arange(-5,5, dtype=np.float)
#y=step(x)
y=sigmoid(x)
#plt.plot(x,y)
#plt.ylim(-0.1, 1.1)
#plt.show()
''' 3층 신경망 '''
def hypothesis(X, W, B):
return np.dot(X, W)+B
def forward():
X=np.array([1.0, 0.5])
W1=np.array([[0.1,0.3,0.5], [0.2, 0.4, 0.6]])
B1=np.array([0.1, 0.2, 0.3])
W2=np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
B2=np.array([0.1, 0.2])
W3=np.array([[0.1,0.3],[0.2,0.4]])
B3=np.array([0.1,0.2])
y=identity(hypothesis(sigmoid(hypothesis(sigmoid(hypothesis(X, W1, B1)), W2, B2)), W3, B3))
return y
print(forward())
| [
"numpy.maximum",
"numpy.array",
"numpy.exp",
"numpy.arange",
"numpy.dot"
] | [((226, 255), 'numpy.array', 'np.array', (['[-0.1, 0, 0.5, 0.2]'], {}), '([-0.1, 0, 0.5, 0.2])\n', (234, 255), True, 'import numpy as np\n'), ((275, 307), 'numpy.arange', 'np.arange', (['(-5)', '(5)'], {'dtype': 'np.float'}), '(-5, 5, dtype=np.float)\n', (284, 307), True, 'import numpy as np\n'), ((175, 191), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (185, 191), True, 'import numpy as np\n'), ((472, 492), 'numpy.array', 'np.array', (['[1.0, 0.5]'], {}), '([1.0, 0.5])\n', (480, 492), True, 'import numpy as np\n'), ((500, 544), 'numpy.array', 'np.array', (['[[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]]'], {}), '([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n', (508, 544), True, 'import numpy as np\n'), ((550, 575), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3]'], {}), '([0.1, 0.2, 0.3])\n', (558, 575), True, 'import numpy as np\n'), ((583, 629), 'numpy.array', 'np.array', (['[[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]]'], {}), '([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n', (591, 629), True, 'import numpy as np\n'), ((637, 657), 'numpy.array', 'np.array', (['[0.1, 0.2]'], {}), '([0.1, 0.2])\n', (645, 657), True, 'import numpy as np\n'), ((665, 699), 'numpy.array', 'np.array', (['[[0.1, 0.3], [0.2, 0.4]]'], {}), '([[0.1, 0.3], [0.2, 0.4]])\n', (673, 699), True, 'import numpy as np\n'), ((704, 724), 'numpy.array', 'np.array', (['[0.1, 0.2]'], {}), '([0.1, 0.2])\n', (712, 724), True, 'import numpy as np\n'), ((435, 447), 'numpy.dot', 'np.dot', (['X', 'W'], {}), '(X, W)\n', (441, 447), True, 'import numpy as np\n'), ((138, 148), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (144, 148), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 2022/1/12 10:10 上午
# @Author : 李炳翰
# @File : OLH.py
# @Software: PyCharm
import numpy as np
import xxhash
import random
import sys
class OLH_USER(object):
def __init__(self, epsilon, domain, data):
super(OLH_USER, self).__init__()
# 隐私预算
self.epsilon = epsilon
# 原始数据定义区间
self.domain = domain
# 用户的原始数据
self.data = data
# 扰动数据
self.per_data = []
# 为使用方便,定义e^\epsilon
e_epsilon = np.exp(self.epsilon)
#设置g为最佳本地哈希的值
self.g = int(round(e_epsilon)) + 1
# 协议中的扰动概率
self.p = e_epsilon / (e_epsilon + self.g - 1)
self.q = 1.0 / (e_epsilon + self.g - 1)
def run(self):
encode_x = self.encode(self.data)
perturb_x = self.perturb(encode_x)
self.per_data = perturb_x
def encode(self, v: int):
seed = random.randint(0, sys.maxsize)
hash = (xxhash.xxh32(str(v), seed=seed).intdigest() % self.g)
return seed, hash
def perturb(self, encode_list):
for i in len(encode_list):
if np.random.uniform(0, 1) < self.p:
return encode_list
else:
per_data = random.randint(0, self.g)
# 当随机选择的元素与之前的x一致时,再进行随机选择,直到不一致为止
while per_data == encode_list[i]:
per_x = random.randint(0, self.g)
return per_x
def get_per_data(self):
return self.per_data
class OLH_SERVER(object):
def __init__(self, epsilon: float, per_datalist: list, domain: list):
super(OLH_SERVER, self).__init__()
# 隐私预算
self.epsilon = epsilon
# 扰动数据列表
self.per_datalist = per_datalist
# 用户数量
self.n = len(per_datalist)
# 频率估计结果
self.es_data = []
# 值域
self.domain = domain
e_epsilon = np.exp(self.epsilon)
# 设置g为最佳本地哈希的值
self.g = int(round(e_epsilon)) + 1
# 协议中的扰动概率
self.p = e_epsilon / (e_epsilon + self.g - 1)
self.q = 1 / self.g
def estimate(self):
for x in self.domain:
count = 0
for data in self.per_datalist:
if xxhash.xxh32(str(x), seed=data[0]).intdigest() % self.g == data[1]:
count = count + 1
rs = (count - self.n * self.q) / (self.n * (self.p - self.q))
self.es_data.append(rs)
def get_es_data(self):
return self.es_data
| [
"numpy.random.uniform",
"random.randint",
"numpy.exp"
] | [((510, 530), 'numpy.exp', 'np.exp', (['self.epsilon'], {}), '(self.epsilon)\n', (516, 530), True, 'import numpy as np\n'), ((905, 935), 'random.randint', 'random.randint', (['(0)', 'sys.maxsize'], {}), '(0, sys.maxsize)\n', (919, 935), False, 'import random\n'), ((1906, 1926), 'numpy.exp', 'np.exp', (['self.epsilon'], {}), '(self.epsilon)\n', (1912, 1926), True, 'import numpy as np\n'), ((1119, 1142), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1136, 1142), True, 'import numpy as np\n'), ((1233, 1258), 'random.randint', 'random.randint', (['(0)', 'self.g'], {}), '(0, self.g)\n', (1247, 1258), False, 'import random\n'), ((1388, 1413), 'random.randint', 'random.randint', (['(0)', 'self.g'], {}), '(0, self.g)\n', (1402, 1413), False, 'import random\n')] |
import os
import multiprocessing
import psutil
import argparse
from time import time, sleep
import numpy as np
import pandas as pd
import rcsv
readers_map = {
'numpy': lambda path: np.loadtxt(path, delimiter=',', dtype=np.float32),
'rcsv': lambda path: rcsv.read(path),
'panda': lambda path: pd.read_csv(path),
}
parser = argparse.ArgumentParser(description='Simple Benchmarking tool',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
helps = {
'tool':"""
Change the benchmarking tool :
memory : Profile all memory usage
timer : Only measure time
""",
'numpy':"""
Profile numpy
""",
'panda':"""
Profile panda
""",
'rcsv':"""
Profile rcsv
""",
'files':"""
Files to be benchmarked
""",
}
parser.add_argument('--numpy', help=helps['numpy'], action='store_true')
parser.add_argument('--rcsv', help=helps['rcsv'], action='store_true')
parser.add_argument('--panda', help=helps['panda'], action='store_true')
parser.add_argument('--tool', help=helps['tool'], type=str, choices=['memory', 'timer'], default='timer')
parser.add_argument('files', help=helps['files'], type=str, nargs='+')
args = parser.parse_args()
readers = []
if args.numpy: readers.append(('numpy', readers_map['numpy']))
if args.rcsv: readers.append(('rcsv', readers_map['rcsv']))
if args.panda: readers.append(('panda', readers_map['panda']))
if not readers: readers = list(readers_map.items())
def profile_memory_usage(process):
start = time()
process.start()
pshdl = psutil.Process(process.pid)
min_sleep = 0.01
max_sleep = 0.25
drss_precision_aim = 1.0/100.0
backoff_margin = 25.0/100.0
backoff_coeff = 1.5
backoff_margin_inf = drss_precision_aim * (1.0 - backoff_margin)
backoff_margin_sup = drss_precision_aim * (1.0 + backoff_margin)
cur_sleep = min_sleep
last_rss = 0
while (process.is_alive()):
infos = pshdl.memory_full_info()
print(time() - start, end=',')
print(infos.rss, infos.vms, infos.shared, sep=',', end=',')
print(infos.text, infos.data, infos.dirty, sep=',', end=',')
print(infos.uss, infos.pss, infos.swap, sep=',')
rss = infos.rss
if abs(rss - last_rss) < backoff_margin_inf * last_rss:
cur_sleep *= backoff_coeff
elif abs(rss - last_rss) > backoff_margin_sup * last_rss:
cur_sleep /= backoff_coeff
cur_sleep = min(cur_sleep, max_sleep)
cur_sleep = max(cur_sleep, min_sleep)
sleep(cur_sleep)
# Just for safety, probably not needed
process.join()
def time_process(process):
start = time()
process.start()
process.join()
end = time()
print('%s : %.4f' % (name, end - start))
for path in args.files:
print('#', path)
for name, reader in readers:
print('## ', name)
process = multiprocessing.Process(target=reader, args=(path,))
if args.tool == 'memory':
profile_memory_usage(process)
elif args.tool == 'timer':
time_process(process)
| [
"psutil.Process",
"argparse.ArgumentParser",
"rcsv.read",
"pandas.read_csv",
"time.time",
"time.sleep",
"numpy.loadtxt",
"multiprocessing.Process"
] | [((339, 462), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple Benchmarking tool"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Simple Benchmarking tool',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (362, 462), False, 'import argparse\n'), ((1504, 1510), 'time.time', 'time', ([], {}), '()\n', (1508, 1510), False, 'from time import time, sleep\n'), ((1544, 1571), 'psutil.Process', 'psutil.Process', (['process.pid'], {}), '(process.pid)\n', (1558, 1571), False, 'import psutil\n'), ((2655, 2661), 'time.time', 'time', ([], {}), '()\n', (2659, 2661), False, 'from time import time, sleep\n'), ((2711, 2717), 'time.time', 'time', ([], {}), '()\n', (2715, 2717), False, 'from time import time, sleep\n'), ((188, 237), 'numpy.loadtxt', 'np.loadtxt', (['path'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "(path, delimiter=',', dtype=np.float32)\n", (198, 237), True, 'import numpy as np\n'), ((265, 280), 'rcsv.read', 'rcsv.read', (['path'], {}), '(path)\n', (274, 280), False, 'import rcsv\n'), ((308, 325), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (319, 325), True, 'import pandas as pd\n'), ((2535, 2551), 'time.sleep', 'sleep', (['cur_sleep'], {}), '(cur_sleep)\n', (2540, 2551), False, 'from time import time, sleep\n'), ((2892, 2944), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'reader', 'args': '(path,)'}), '(target=reader, args=(path,))\n', (2915, 2944), False, 'import multiprocessing\n'), ((1980, 1986), 'time.time', 'time', ([], {}), '()\n', (1984, 1986), False, 'from time import time, sleep\n')] |
import datetime
import json
import logging
import os
from pprint import pprint
import sys
import time
from indicatorcalc_redux import IndicatorCalc
import numpy as np
from pymarketcap import Pymarketcap
import requests
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
config_path = '../../TeslaBot/config/config.ini'
class CryptoBacktester:
def __init__(self, allowed_exchanges):
self.allowed_exchanges = allowed_exchanges
self.cmc = Pymarketcap()
self.indicator_calc = IndicatorCalc()
def filter_markets(self):
ranks_filtered = {'gainers': {'1h': [], '24h': [], '7d': []},
'losers': {'1h': [], '24h': [], '7d': []}}
failed_products = {'gainers': {'1h': [], '24h': [], '7d': []},
'losers': {'1h': [], '24h': [], '7d': []}}
time_bins = ['1h', '24h', '7d']
ranks = self.cmc.ranks()
gainers = ranks['gainers']
losers = ranks['losers']
for bin in time_bins:
logger.debug('bin: ' + str(bin))
for mkt in gainers[bin]:
logger.debug('[gainers] mkt: ' + str(mkt))
try:
markets = self.cmc.markets(mkt['website_slug'])
for exch in markets['markets']:
if exch['source'].lower() in self.allowed_exchanges:
ranks_filtered['gainers'][bin].append((mkt, exch))
except Exception as e:
logger.exception(e)
failed_products['gainers'][bin].append(mkt)
for mkt in losers[bin]:
logger.debug('[losers] mkt: ' + str(mkt))
try:
markets = self.cmc.markets(mkt['website_slug'])
for exch in markets['markets']:
if exch['source'].lower() in self.allowed_exchanges:
ranks_filtered['losers'][bin].append((mkt, exch))
except Exception as e:
logger.exception(e)
failed_products['losers'][bin].append(mkt)
return ranks_filtered, failed_products
def get_best_pairs(self, ranked_products):
best_pairs = {'success': True, 'result': {}}
#conversion_currencies = ['BTC', 'ETH', 'USD']
try:
for rank_type in ranked_products:
logger.debug('rank_type: ' + rank_type)
best_pairs['result'][rank_type] = {}
for time_bin in ranked_products[rank_type]:
logger.debug('time_bin: ' + time_bin)
best_pairs['result'][rank_type][time_bin] = {}
for product in ranked_products[rank_type][time_bin]:
logger.debug('product: ' + str(product))
if product[0]['symbol'] not in best_pairs['result'][rank_type][time_bin]:
best_pairs['result'][rank_type][time_bin][product[0]['symbol']] = {}
if product[1]['pair'].split('/')[1] not in best_pairs['result'][rank_type][time_bin][product[0]['symbol']]:
best_pairs['result'][rank_type][time_bin][product[0]['symbol']][product[1]['pair'].split('/')[1]] = self.cmc.ticker(currency=product[0]['website_slug'],
convert=product[1]['pair'].split('/')[1])['data']['quotes'][product[1]['pair'].split('/')[1]]
time.sleep(2)
except Exception as e:
logger.exception('Exception raised in get_best_pairs().')
logger.exception(e)
best_pairs['success'] = False
finally:
return best_pairs
def get_candles(self, exchange, market, interval=0):
candles = {'success': True, 'result': {}}
#try:
logger.debug('exchange: ' + exchange)
logger.debug('market: ' + market)
logger.debug('interval: ' + interval)
valid_intervals = [60, 180, 300, 900, 1800, 3600, 7200,
14400, 21600, 43200, 86400, 259200, 604800]
endpoint = '/markets/' + exchange.lower() + '/' + market.lower() + '/ohlc'
url = 'https://api.cryptowat.ch' + endpoint
url_params = {}
if interval == 0:
pass
else:
candle_url_param = str(int(round(float(interval), 0)))
url_params['periods'] = candle_url_param
if interval not in valid_intervals:
logger.error('Invalid interval passed to get_candles(). Exiting.')
sys.exit(1)
try:
r = requests.get(url, params=url_params)
time.sleep(request_delay)
results = r.json()
if 'result' not in results or 'allowance' not in results:
logger.debug('[get_candles()] Failed to acquire valid candles.')
candles['success'] = False
if 'error' in results:
logger.error('Error while calling Cryptowat.ch API.')
logger.error(results['error'])
if results['error'] == 'Out of allowance':
allowance_remaining = 0
else:
allowance_remaining = results['allowance']['remaining']
allowance_cost = results['allowance']['cost']
#allowance_avg_cost = average_api_cost(allowance_cost)
if candles['success'] == True:
for time_bin in results['result']:
data = results['result'][time_bin]
np_historical = np.array(data, dtype='f8')
candles[time_bin] = {}
candles[time_bin]['close_time'] = np_historical[:, 0]
candles[time_bin]['open'] = np_historical[:, 1]
candles[time_bin]['high'] = np_historical[:, 2]
candles[time_bin]['low'] = np_historical[:, 3]
candles[time_bin]['close'] = np_historical[:, 4]
candles[time_bin]['volume'] = np_historical[:, 5]
except requests.exceptions.RequestException as e:
logger.exception('RequestException while retrieving candles.')
logger.exception(e)
#candle_data['RequestException'] = True
candles['success'] = False
except requests.exceptions.ConnectionError as e:
logger.error('ConnectionError while retrieving candles.')
logger.error(e)
#candle_data['Error'] = True
candles['success'] = False
except json.JSONDecodeError as e:
logger.error('JSONDecodeError while retrieving candles.')
logger.error(e)
#candle_data['Error'] = True
candles['success'] = False
except Exception as e:
logger.exception('Uncaught exception while retrieving candles.')
logger.exception(e)
#candle_data['Exception'] = True
candles['success'] = False
finally:
return candles
if __name__ == '__main__':
try:
allowed_exchanges = ['binance', 'bittrex', 'gdax', 'poloniex']
crypto_backtester = CryptoBacktester(allowed_exchanges)
"""
ranks_filtered, failed_products = crypto_backtester.filter_markets()
if not os.path.exists('json/'):
logger.info('Creating json directory.')
os.mkdir('json/')
"""
dt_current = datetime.datetime.now().strftime('%m%d%Y-%H%M%S')
"""
logger.info('Dumping results to json file.')
ranks_json_file = 'json/' + dt_current + '_ranks.json'
with open(ranks_json_file, 'w', encoding='utf-8') as file:
json.dump(ranks_filtered, file, indent=4, sort_keys=True, ensure_ascii=False)
logger.info('Gathering candles for ranked products from selected exchanges.')
for rank_type in ranks_filtered:
for time_bin in ranks_filtered[rank_type]:
pass
"""
test_json_file = 'test.json'
with open(test_json_file, 'r', encoding='utf-8') as file:
data = json.load(file)
best_pairs = crypto_backtester.get_best_pairs(ranked_products=data)
#print('BEST PAIRS:')
#pprint(best_pairs)
logger.info('Dumping best pairs data to json file.')
pairs_json_file = 'json/' + dt_current + '_pairs.json'
with open(pairs_json_file, 'w', encoding='utf-8') as file:
json.dump(best_pairs, file, indent=4, sort_keys=True, ensure_ascii=False)
logger.info('Done.')
except Exception as e:
logger.exception(e)
except KeyboardInterrupt:
logger.info('Exit signal received.')
finally:
logger.info('Exiting.')
| [
"json.dump",
"pymarketcap.Pymarketcap",
"json.load",
"logging.basicConfig",
"indicatorcalc_redux.IndicatorCalc",
"time.sleep",
"numpy.array",
"requests.get",
"sys.exit",
"datetime.datetime.now",
"logging.getLogger"
] | [((221, 242), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (240, 242), False, 'import logging\n'), ((252, 279), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'import logging\n'), ((501, 514), 'pymarketcap.Pymarketcap', 'Pymarketcap', ([], {}), '()\n', (512, 514), False, 'from pymarketcap import Pymarketcap\n'), ((546, 561), 'indicatorcalc_redux.IndicatorCalc', 'IndicatorCalc', ([], {}), '()\n', (559, 561), False, 'from indicatorcalc_redux import IndicatorCalc\n'), ((4818, 4854), 'requests.get', 'requests.get', (['url'], {'params': 'url_params'}), '(url, params=url_params)\n', (4830, 4854), False, 'import requests\n'), ((4868, 4893), 'time.sleep', 'time.sleep', (['request_delay'], {}), '(request_delay)\n', (4878, 4893), False, 'import time\n'), ((8432, 8447), 'json.load', 'json.load', (['file'], {}), '(file)\n', (8441, 8447), False, 'import json\n'), ((8790, 8863), 'json.dump', 'json.dump', (['best_pairs', 'file'], {'indent': '(4)', 'sort_keys': '(True)', 'ensure_ascii': '(False)'}), '(best_pairs, file, indent=4, sort_keys=True, ensure_ascii=False)\n', (8799, 8863), False, 'import json\n'), ((4776, 4787), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4784, 4787), False, 'import sys\n'), ((7753, 7776), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7774, 7776), False, 'import datetime\n'), ((5827, 5853), 'numpy.array', 'np.array', (['data'], {'dtype': '"""f8"""'}), "(data, dtype='f8')\n", (5835, 5853), True, 'import numpy as np\n'), ((3652, 3665), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3662, 3665), False, 'import time\n')] |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
def open_image(fname, convert_to_rgb=False):
im = cv2.imread(fname)
if len(im.shape) == 2:
return im
if not convert_to_rgb:
return im
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
def open_and_undistort_image(fname, cm, dc, convert_to_rgb=False):
im = open_image(fname, convert_to_rgb)
return cv2.undistort(im, cm, dc)
def get_im_wh(im):
h, w = im.shape[:2]
return w, h
def e2h(x):
return np.array([x[0], x[1], 1.])
def h2e(x):
x = np.array(x)
return x[:2] / x[2]
def grayscale(im, flag=cv2.COLOR_BGR2GRAY):
return cv2.cvtColor(im, flag)
def canny(img, low_threshold, high_threshold):
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def sobel_x(im):
return cv2.Sobel(im, cv2.CV_64F, 1, 0)
def sobel_y(im):
return cv2.Sobel(im, cv2.CV_64F, 0, 1)
def sobel_abs(sobel):
return scale_image_255(np.abs(sobel))
def sobel_magnitude(sobelx, sobely):
return np.sqrt(np.square(sobelx) + np.square(sobely))
def sobel_direction(sobelx, sobely):
return np.arctan2(np.abs(sobely), np.abs(sobelx))
def get_rectangle_corners_from_cbc(cbc, nx, ny):
'''
Get 4 points eclosing the chessboard region in an image
cbc -- a (n x 2) NumPy array with each chessboard corner as a row
nx, ny -- number of corners in x and y direction
Returns a (4 x 2) matrix with each point as a row:
[top left ]
[top right ]
[bottom right]
[bottom left ]
'''
points = np.array([
cbc[0,:],
cbc[nx-1,:],
cbc[-1,:],
cbc[nx*ny-nx,:],
], dtype=np.float32)
return points
def get_rectangle_corners_in_image(im_sz, offset_x, offset_y):
'''
Get 4 points describing a rectangle in the image, offsetted
by the given amounts from the edges.
im_sz -- image size (cols, rows)
offset_x, offset_y -- offsets in pixels from the edges of the image
Returns a (4 x 2) matrix with each point as a row:
[top left ]
[top right ]
[bottom right]
[bottom left ]
'''
points = np.array([
[offset_x, offset_y],
[im_sz[0]-offset_x, offset_y],
[im_sz[0]-offset_x, im_sz[1]-offset_y],
[offset_x, im_sz[1]-offset_y]
], dtype=np.float32)
return points
def scale_image_255(im):
'''
Scale an image to pixel range [0, 255]
'''
return np.uint8(255 * (im / np.max(im)))
def mask_threashold_range(im, thresh_min, thresh_max):
'''
Return a binary mask image where pixel intensities
of the original image lie within [thresh_min, thresh_max)
'''
binary_output = (im >= thresh_min) & (im < thresh_max)
return np.uint8(binary_output)
def warp(im, M, canvas_sz):
'''
Warp an image im given the perspective transformation matrix M and
the output image size canvas_sz
'''
return cv2.warpPerspective(im, M, canvas_sz, flags=cv2.INTER_LINEAR)
def convert_to_HLS(im):
'''
Convert an RGB image to HLS color space
'''
return cv2.cvtColor(im, cv2.COLOR_RGB2HLS)
def weighted_sum_images(images, weights):
'''
Perfrom a weighted sum of 2 or more images
'''
assert len(weights) == len(images)
nonzero_indices = np.nonzero(weights)[0]
if len(nonzero_indices) < 2:
raise Exception('At least 2 non-zero weights are required')
first, second = nonzero_indices[:2]
res = cv2.addWeighted(images[first], weights[first], images[second], weights[second], 0)
if len(nonzero_indices) == 2:
return res
for i in nonzero_indices[2:]:
res = cv2.addWeighted(res, 1., images[i], weights[i], 0)
return res
def bitwise_or(images):
'''
Apply bitwise OR operation to a list of images
'''
assert len(images) > 0
if len(images) == 1:
return images[0]
res = cv2.bitwise_or(images[0], images[1])
if len(images) == 2:
return res
for im in images[2:]:
res = cv2.bitwise_or(res, im)
return res
def weighted_HLS(H, L, S, weights):
return weighted_sum_images([H, L, S], weights)
def add_contrast(im, gain):
'''
Add contrast to an image with the given gain.
The resulting image is scaled back to the [0, 255] range
'''
gained = gain * im
return scale_image_255(gained)
def sobel_combo(im):
'''
Compute magnitude and direction of Sobel filter
applied to the supplied image
'''
sobelx = sobel_x(im)
sobely = sobel_y(im)
magnitude = sobel_magnitude(sobelx, sobely)
direction = sobel_direction(sobelx, sobely)
return scale_image_255(magnitude), scale_image_255(direction)
def scaled_sobel_x(im):
return scale_image_255( sobel_x(im) )
def morphological_close(im, kernel=(3, 3)):
return cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)
def get_hls_channels(im):
hls = convert_to_HLS(im)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
return H, L, S
def gray(im):
'''
Convert a BGR image to grayscale
'''
return cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
def gather_thresholded_images(*images):
'''
A helper function used during pipeline development
'''
return images
def lane_cells(im, nx, ny, threshold=20):
'''
Divides the supplied image into nx*ny equally-sized
subimages (cells), computes sum of cell pixels intesities, and
returns an array of cell indices, where the cell's sum is largest
per row and larger than the threshold.
The output array is of shape (n x 2), with a pair of cell indices per row.
'''
cells = divide_image_to_cells(im, nx, ny)
res = []
for i in range(ny):
idx_from = i * nx
idx_to = i * nx + nx
rowcells = cells[idx_from:idx_to]
sums = np.array([np.sum(cell) for cell in rowcells])
max_j = np.argmax(sums)
if sums[max_j] > threshold:
res.append( (i, max_j) )
return np.array(res)
def lane_cells_real_coords(lanecells, im, nx, ny):
'''
Map the cell indices returned by lane_cells to the
coordinates of their centers.
'''
rows, cols= im.shape[:2]
cell_sz_x = cols // nx
cell_sz_y = rows // ny
points = np.zeros_like(lanecells)
for i in range(len(lanecells)):
idx_row, idx_col = lanecells[i, :]
x = idx_col * cell_sz_x + cell_sz_x / 2
y = idx_row * cell_sz_y + cell_sz_y / 2
points[i, :] = (x, y)
return points
def divide_image_to_cells(im, nx, ny):
'''
Divide the supplied image into nx*ny equally-sized
subimages (cells). Image shape has to be mutliple of nx, ny.
Returns list of cells, sliced from the original image row-by-row
'''
rows, cols= im.shape[:2]
assert rows % ny == 0
assert cols % nx == 0
offset_x = cols // nx
offset_y = rows // ny
cells = []
for j in range(ny):
for i in range(nx):
x_from = i * offset_x
x_to = x_from + offset_x
y_from = j * offset_y
y_to = y_from + offset_y
cell = im[y_from:y_to, x_from:x_to]
cells.append(cell)
return cells
def show_cells(cells, nx, ny):
for i, cell in enumerate(cells):
plt.subplot(ny, nx, i+1)
plt.axis('off')
plt.imshow(cell)
def split_image_lr(im):
cols = im.shape[1]
middle = cols // 2
return im[:, :middle], im[:, middle:]
def split_image_lr_and_show(im):
left, right = split_image_lr(im)
plt.figure()
plt.subplot(1, 2, 1)
plt.axis('off')
plt.imshow(left)
plt.subplot(1, 2, 2)
plt.axis('off')
plt.imshow(right)
def get_polynomial_2(coefs):
'''
Get a fucntion object that maps a domain scalar
to the range value using the second degree polynomial
'''
a, b, c = coefs
def f(y):
return a * (y**2) + b * y + c
return f
def get_target_cells_coordinates(im, nx=50, ny=100, lanecell_threshold=70):
'''
Get image coordinates of the high-intesity target cells
returned by lane_cells for both the left and the right lane line
'''
left, right = split_image_lr(im)
target_cells_left = lane_cells(left, nx, ny, lanecell_threshold)
coords_left = lane_cells_real_coords(target_cells_left, left, nx, ny)
target_cells_right = lane_cells(right, nx, ny, lanecell_threshold)
coords_right = lane_cells_real_coords(target_cells_right, right, nx, ny)
coords_right[:, 0] += left.shape[1]
return coords_left, coords_right
def fit_lane_polynomials(coords_left, coords_right):
'''
Fit second degree polynomials to sets of left and right
pixel coordinates indicating high-intesity values
(returned by get_target_cells_coordinates)
'''
p_coefs_left = np.polyfit(coords_left[:, 1], coords_left[:, 0], 2)
p_coefs_right = np.polyfit(coords_right[:, 1], coords_right[:, 0], 2)
return p_coefs_left, p_coefs_right
def get_lane_polynomials_points(warped_im, p_coefs_left, p_coefs_right):
'''
Get arrays of points (y, x_left, x_right) from the
corresponding polynomial coefficients
'''
poly_left = get_polynomial_2(p_coefs_left)
poly_right = get_polynomial_2(p_coefs_right)
poly_y = np.linspace(0, warped_im.shape[0])
poly_x_left = poly_left(poly_y)
poly_x_right = poly_right(poly_y)
return poly_y, poly_x_left, poly_x_right
def lanefill(image, warped, Minv, poly_y, poly_x_left, poly_x_right):
'''
Render the detected lane by reprojecting the bird's eye view image data
to the original image of the road
'''
canvas = np.zeros_like(warped).astype(np.uint8)
pts_left = np.array([np.transpose(np.vstack([poly_x_left, poly_y]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([poly_x_right, poly_y])))])
pts = np.hstack((pts_left, pts_right)).astype(np.int32)
cv2.fillPoly(canvas, [pts], (0, 255, 0))
newwarp = cv2.warpPerspective(canvas, Minv, (image.shape[1], image.shape[0]))
result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)
return result
def curvature_poly2(coefs, at_point):
'''
Measure curvature of a second-order polynomial at the given point
'''
a, b, _ = coefs
return ((1 + (2 * a * at_point + b) ** 2) ** 1.5) / np.abs(2 * a)
def curvature_poly2_in_meters(coefs, at_point, meters_in_pix_x, meters_in_pix_y):
'''
Curvature calculation based on polynomial
coefficients estimated from pixel points
'''
a, b, _ = coefs
m_a = meters_in_pix_x / (meters_in_pix_y**2) * a
m_b = (meters_in_pix_x / meters_in_pix_y) * b
return ((1 + (2 * m_a * at_point + m_b) ** 2) ** 1.5) / np.abs(2 * m_a)
def pixel_points_to_meters(points, meters_in_pix_x, meters_in_pix_y):
res = np.zeros_like(points, dtype=np.float32)
res[:, 0] = points[:, 0] * meters_in_pix_x
res[:, 1] = points[:, 1] * meters_in_pix_y
return res
def lane_curvature(coefs_left, coefs_right, meters_in_pix_x, meters_in_pix_y, canvas_sz):
'''
Estimate lane curvature in meters
'''
at_y = canvas_sz[1] * meters_in_pix_y
c1 = curvature_poly2_in_meters(coefs_left, at_y, meters_in_pix_x, meters_in_pix_y)
c2 = curvature_poly2_in_meters(coefs_right, at_y, meters_in_pix_x, meters_in_pix_y)
return 0.5 * (c1 + c2)
def lane_center(xleft, xright):
lane_len = xright - xleft
return xleft + 0.5 * lane_len
def lane_offset_from_center(
warped_im,
p_coefs_left,
p_coefs_right,
meters_in_pix_x
):
poly_left = get_polynomial_2(p_coefs_left)
poly_right = get_polynomial_2(p_coefs_right)
y_bottom = warped_im.shape[0]
xleft = poly_left(y_bottom)
xright = poly_right(y_bottom)
pix_center = lane_center(xleft, xright)
n_cols = warped_im.shape[1]
pix_offset = n_cols / 2. - pix_center
m_offset = pix_offset * meters_in_pix_x
return m_offset, pix_center
def render_lane(im, im_warped, p_coefs_left, p_coefs_right, M_inv):
poly_y, poly_x_left, poly_x_right = get_lane_polynomials_points(
im_warped, p_coefs_left, p_coefs_right
)
rendered = lanefill(im, im_warped, M_inv, poly_y, poly_x_left, poly_x_right)
return rendered
def put_text_on_top(im, text, fontscale=2, pos=(10, 60)):
cv2.putText(im, text, pos, cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 255, 255), 2, cv2.LINE_AA)
def pixel_to_meter_ratios_custom():
'''
Get a rough transformation from pixels to meters (in x and y direction)
for a warped image of size (500, 1500)
'''
pix_lane = 270. # lane width in pixels
pix_dash = 180. # lane dash length in pixels
m_lane = 3.7 # lane width in meters
m_dash = 3.0 # lane dash length in meters
p_x = m_lane / pix_lane
p_y = m_dash / pix_dash
return p_x, p_y
def define_lanes_region(n_rows, n_cols, x_from=450, x_to=518, y_lim=317, left_offset=50, right_offset=0):
vertices = np.array([[
[x_from, y_lim],
[x_to, y_lim],
[n_cols-right_offset, n_rows],
[left_offset, n_rows],
]], dtype=np.int32)
return vertices
def apply_region_mask(image, region_vertices):
mask = np.zeros_like(image)
cv2.fillPoly(mask, region_vertices, 255)
return cv2.bitwise_and(image, mask)
def find_hough_lines(im_masked, rho, theta, threshold, min_line_length, max_line_gap):
lines = cv2.HoughLinesP(im_masked, rho, theta, threshold, np.array([]), minLineLength=min_line_length, maxLineGap=max_line_gap)
return lines.reshape(lines.shape[0], 4)
def compute_line_tangents(lines):
x1 = lines[:, 0]
y1 = lines[:, 1]
x2 = lines[:, 2]
y2 = lines[:, 3]
tans = (y2 - y1) / (x2 - x1)
return tans
def line_vector_constant_y(val):
return np.array([0, 1, -val])
def line_vector_from_opencv_points(line):
x1, y1, x2, y2 = line
return np.cross([x1, y1, 1], [x2, y2, 1])
def extend_lane_lines(lines, y_const_0, y_const_1):
n = len(lines)
res = np.zeros((n, 4), dtype=np.int32)
line_y0 = line_vector_constant_y(y_const_0)
line_y1 = line_vector_constant_y(y_const_1)
for i in range(n):
line = line_vector_from_opencv_points(lines[i, :])
intersection_0 = h2e(np.cross(line, line_y0))
intersection_1 = h2e(np.cross(line, line_y1))
res[i, :2] = intersection_0
res[i, 2:] = intersection_1
return res
def extend_lane_lines_grouped_by_slopes(lines, slopes, y_const_0, y_const_1, abs_slope_threshold=0.2):
valid_lines = np.abs(slopes) > abs_slope_threshold
lines_left = extend_lane_lines(lines[np.logical_and(slopes < 0, valid_lines)], y_const_0, y_const_1)
lines_right = extend_lane_lines(lines[np.logical_and(slopes > 0, valid_lines)], y_const_0, y_const_1)
return lines_left, lines_right
def average_lines_endpoints(lines):
return np.array(lines.mean(axis=0), dtype=np.int32)
def move_line(line, offset_x=0., offset_y=0.):
x1, y1, x2, y2 = line
return np.array([
x1 + offset_x,
y1 + offset_y,
x2 + offset_x,
y2 + offset_y
])
def lines_distances_to_bottom(lines, n_rows):
def dist_to_bottom(line):
y1 = line[1]
y2 = line[3]
y_smaller = y1 if y1 < y2 else y2
return n_rows - y_smaller
n = len(lines)
distances = np.zeros(n)
for i in range(n):
distances[i] = dist_to_bottom(lines[i, :])
return distances
def split_distances_to_bottom(distances, slopes):
return distances[slopes < 0], distances[slopes > 0]
def weighted_average_lines_endpoints(lines, distances_to_bottom):
x1 = lines[:, 0]
y1 = lines[:, 1]
x2 = lines[:, 2]
y2 = lines[:, 3]
mu_y1 = y1[0]
mu_y2 = y2[0]
weights = 1. / distances_to_bottom
weights_sum = weights.sum()
mu_x1 = (x1 * weights).sum() / weights_sum
mu_x2 = (x2 * weights).sum() / weights_sum
return np.array([mu_x1, mu_y1, mu_x2, mu_y2], dtype=np.int32)
def weighted_img(im, initial_im, alpha=0.8, beta=1., gamma=0.):
'''
dst = initial_im*alpha + im*beta + gamma;
'''
return cv2.addWeighted(initial_im, alpha, im, beta, gamma)
def draw_line(canvas_im, line, color=[255, 0, 0], thickness=2):
x1, y1, x2, y2 = line
cv2.line(canvas_im, (x1, y1), (x2, y2), color, thickness)
def draw_lines_on_image(canvas_im, lines, color=[255, 0, 0], thickness=2):
for i in range(lines.shape[0]):
draw_line(canvas_im, lines[i, :], color, thickness)
def plot_line(line, **kvargs):
xs = [line[0], line[2]]
ys = [line[1], line[3]]
plt.plot(xs, ys, '-', **kvargs)
def plot_homogeneous_line_vector(vec, x_from, x_to, **kvargs):
a, b, c = vec
def line_func(x):
return (-a * x - c) / b
xs = np.arange(x_from, x_to)
ys = line_func(xs)
plt.plot(xs, ys, **kvargs)
def visualize_test_images(images, proc_func=lambda im : im):
plt.figure(figsize=(16, 16))
for i, im in enumerate(images):
plt.subplot(3, 2, i+1)
plt.imshow(proc_func(im))
def imshow_bgr(im, axis_setting='off'):
plt.axis(axis_setting)
plt.imshow( cv2.cvtColor(im, cv2.COLOR_BGR2RGB) )
| [
"cv2.GaussianBlur",
"numpy.abs",
"numpy.sum",
"cv2.bitwise_and",
"numpy.polyfit",
"numpy.argmax",
"cv2.fillPoly",
"matplotlib.pyplot.figure",
"numpy.arange",
"cv2.undistort",
"cv2.line",
"cv2.warpPerspective",
"numpy.zeros_like",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"numpy.max",... | [((151, 168), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (161, 168), False, 'import cv2\n'), ((273, 308), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (285, 308), False, 'import cv2\n'), ((433, 458), 'cv2.undistort', 'cv2.undistort', (['im', 'cm', 'dc'], {}), '(im, cm, dc)\n', (446, 458), False, 'import cv2\n'), ((545, 572), 'numpy.array', 'np.array', (['[x[0], x[1], 1.0]'], {}), '([x[0], x[1], 1.0])\n', (553, 572), True, 'import numpy as np\n'), ((594, 605), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (602, 605), True, 'import numpy as np\n'), ((687, 709), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'flag'], {}), '(im, flag)\n', (699, 709), False, 'import cv2\n'), ((770, 815), 'cv2.Canny', 'cv2.Canny', (['img', 'low_threshold', 'high_threshold'], {}), '(img, low_threshold, high_threshold)\n', (779, 815), False, 'import cv2\n'), ((866, 918), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(kernel_size, kernel_size)', '(0)'], {}), '(img, (kernel_size, kernel_size), 0)\n', (882, 918), False, 'import cv2\n'), ((949, 980), 'cv2.Sobel', 'cv2.Sobel', (['im', 'cv2.CV_64F', '(1)', '(0)'], {}), '(im, cv2.CV_64F, 1, 0)\n', (958, 980), False, 'import cv2\n'), ((1011, 1042), 'cv2.Sobel', 'cv2.Sobel', (['im', 'cv2.CV_64F', '(0)', '(1)'], {}), '(im, cv2.CV_64F, 0, 1)\n', (1020, 1042), False, 'import cv2\n'), ((1696, 1789), 'numpy.array', 'np.array', (['[cbc[0, :], cbc[nx - 1, :], cbc[-1, :], cbc[nx * ny - nx, :]]'], {'dtype': 'np.float32'}), '([cbc[0, :], cbc[nx - 1, :], cbc[-1, :], cbc[nx * ny - nx, :]],\n dtype=np.float32)\n', (1704, 1789), True, 'import numpy as np\n'), ((2276, 2445), 'numpy.array', 'np.array', (['[[offset_x, offset_y], [im_sz[0] - offset_x, offset_y], [im_sz[0] -\n offset_x, im_sz[1] - offset_y], [offset_x, im_sz[1] - offset_y]]'], {'dtype': 'np.float32'}), '([[offset_x, offset_y], [im_sz[0] - offset_x, offset_y], [im_sz[0] -\n offset_x, im_sz[1] - offset_y], [offset_x, im_sz[1] - offset_y]], dtype\n =np.float32)\n', (2284, 2445), True, 'import numpy as np\n'), ((2879, 2902), 'numpy.uint8', 'np.uint8', (['binary_output'], {}), '(binary_output)\n', (2887, 2902), True, 'import numpy as np\n'), ((3068, 3129), 'cv2.warpPerspective', 'cv2.warpPerspective', (['im', 'M', 'canvas_sz'], {'flags': 'cv2.INTER_LINEAR'}), '(im, M, canvas_sz, flags=cv2.INTER_LINEAR)\n', (3087, 3129), False, 'import cv2\n'), ((3228, 3263), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2HLS'], {}), '(im, cv2.COLOR_RGB2HLS)\n', (3240, 3263), False, 'import cv2\n'), ((3609, 3696), 'cv2.addWeighted', 'cv2.addWeighted', (['images[first]', 'weights[first]', 'images[second]', 'weights[second]', '(0)'], {}), '(images[first], weights[first], images[second], weights[\n second], 0)\n', (3624, 3696), False, 'import cv2\n'), ((4045, 4081), 'cv2.bitwise_or', 'cv2.bitwise_or', (['images[0]', 'images[1]'], {}), '(images[0], images[1])\n', (4059, 4081), False, 'import cv2\n'), ((4977, 5022), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(im, cv2.MORPH_CLOSE, kernel)\n', (4993, 5022), False, 'import cv2\n'), ((5239, 5275), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (5251, 5275), False, 'import cv2\n'), ((6148, 6161), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (6156, 6161), True, 'import numpy as np\n'), ((6419, 6443), 'numpy.zeros_like', 'np.zeros_like', (['lanecells'], {}), '(lanecells)\n', (6432, 6443), True, 'import numpy as np\n'), ((7709, 7721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7719, 7721), True, 'import matplotlib.pyplot as plt\n'), ((7727, 7747), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (7738, 7747), True, 'import matplotlib.pyplot as plt\n'), ((7752, 7767), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7760, 7767), True, 'import matplotlib.pyplot as plt\n'), ((7772, 7788), 'matplotlib.pyplot.imshow', 'plt.imshow', (['left'], {}), '(left)\n', (7782, 7788), True, 'import matplotlib.pyplot as plt\n'), ((7794, 7814), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (7805, 7814), True, 'import matplotlib.pyplot as plt\n'), ((7819, 7834), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7827, 7834), True, 'import matplotlib.pyplot as plt\n'), ((7839, 7856), 'matplotlib.pyplot.imshow', 'plt.imshow', (['right'], {}), '(right)\n', (7849, 7856), True, 'import matplotlib.pyplot as plt\n'), ((8986, 9037), 'numpy.polyfit', 'np.polyfit', (['coords_left[:, 1]', 'coords_left[:, 0]', '(2)'], {}), '(coords_left[:, 1], coords_left[:, 0], 2)\n', (8996, 9037), True, 'import numpy as np\n'), ((9058, 9111), 'numpy.polyfit', 'np.polyfit', (['coords_right[:, 1]', 'coords_right[:, 0]', '(2)'], {}), '(coords_right[:, 1], coords_right[:, 0], 2)\n', (9068, 9111), True, 'import numpy as np\n'), ((9451, 9485), 'numpy.linspace', 'np.linspace', (['(0)', 'warped_im.shape[0]'], {}), '(0, warped_im.shape[0])\n', (9462, 9485), True, 'import numpy as np\n'), ((10088, 10128), 'cv2.fillPoly', 'cv2.fillPoly', (['canvas', '[pts]', '(0, 255, 0)'], {}), '(canvas, [pts], (0, 255, 0))\n', (10100, 10128), False, 'import cv2\n'), ((10144, 10211), 'cv2.warpPerspective', 'cv2.warpPerspective', (['canvas', 'Minv', '(image.shape[1], image.shape[0])'], {}), '(canvas, Minv, (image.shape[1], image.shape[0]))\n', (10163, 10211), False, 'import cv2\n'), ((10226, 10268), 'cv2.addWeighted', 'cv2.addWeighted', (['image', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(image, 1, newwarp, 0.3, 0)\n', (10241, 10268), False, 'import cv2\n'), ((10979, 11018), 'numpy.zeros_like', 'np.zeros_like', (['points'], {'dtype': 'np.float32'}), '(points, dtype=np.float32)\n', (10992, 11018), True, 'import numpy as np\n'), ((12482, 12583), 'cv2.putText', 'cv2.putText', (['im', 'text', 'pos', 'cv2.FONT_HERSHEY_SIMPLEX', 'fontscale', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), '(im, text, pos, cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 255, \n 255), 2, cv2.LINE_AA)\n', (12493, 12583), False, 'import cv2\n'), ((13134, 13254), 'numpy.array', 'np.array', (['[[[x_from, y_lim], [x_to, y_lim], [n_cols - right_offset, n_rows], [\n left_offset, n_rows]]]'], {'dtype': 'np.int32'}), '([[[x_from, y_lim], [x_to, y_lim], [n_cols - right_offset, n_rows],\n [left_offset, n_rows]]], dtype=np.int32)\n', (13142, 13254), True, 'import numpy as np\n'), ((13370, 13390), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (13383, 13390), True, 'import numpy as np\n'), ((13395, 13435), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'region_vertices', '(255)'], {}), '(mask, region_vertices, 255)\n', (13407, 13435), False, 'import cv2\n'), ((13448, 13476), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'mask'], {}), '(image, mask)\n', (13463, 13476), False, 'import cv2\n'), ((13961, 13983), 'numpy.array', 'np.array', (['[0, 1, -val]'], {}), '([0, 1, -val])\n', (13969, 13983), True, 'import numpy as np\n'), ((14066, 14100), 'numpy.cross', 'np.cross', (['[x1, y1, 1]', '[x2, y2, 1]'], {}), '([x1, y1, 1], [x2, y2, 1])\n', (14074, 14100), True, 'import numpy as np\n'), ((14186, 14218), 'numpy.zeros', 'np.zeros', (['(n, 4)'], {'dtype': 'np.int32'}), '((n, 4), dtype=np.int32)\n', (14194, 14218), True, 'import numpy as np\n'), ((15190, 15260), 'numpy.array', 'np.array', (['[x1 + offset_x, y1 + offset_y, x2 + offset_x, y2 + offset_y]'], {}), '([x1 + offset_x, y1 + offset_y, x2 + offset_x, y2 + offset_y])\n', (15198, 15260), True, 'import numpy as np\n'), ((15532, 15543), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (15540, 15543), True, 'import numpy as np\n'), ((16119, 16173), 'numpy.array', 'np.array', (['[mu_x1, mu_y1, mu_x2, mu_y2]'], {'dtype': 'np.int32'}), '([mu_x1, mu_y1, mu_x2, mu_y2], dtype=np.int32)\n', (16127, 16173), True, 'import numpy as np\n'), ((16314, 16365), 'cv2.addWeighted', 'cv2.addWeighted', (['initial_im', 'alpha', 'im', 'beta', 'gamma'], {}), '(initial_im, alpha, im, beta, gamma)\n', (16329, 16365), False, 'import cv2\n'), ((16463, 16520), 'cv2.line', 'cv2.line', (['canvas_im', '(x1, y1)', '(x2, y2)', 'color', 'thickness'], {}), '(canvas_im, (x1, y1), (x2, y2), color, thickness)\n', (16471, 16520), False, 'import cv2\n'), ((16790, 16821), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '"""-"""'], {}), "(xs, ys, '-', **kvargs)\n", (16798, 16821), True, 'import matplotlib.pyplot as plt\n'), ((16971, 16994), 'numpy.arange', 'np.arange', (['x_from', 'x_to'], {}), '(x_from, x_to)\n', (16980, 16994), True, 'import numpy as np\n'), ((17023, 17049), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {}), '(xs, ys, **kvargs)\n', (17031, 17049), True, 'import matplotlib.pyplot as plt\n'), ((17118, 17146), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (17128, 17146), True, 'import matplotlib.pyplot as plt\n'), ((17295, 17317), 'matplotlib.pyplot.axis', 'plt.axis', (['axis_setting'], {}), '(axis_setting)\n', (17303, 17317), True, 'import matplotlib.pyplot as plt\n'), ((1094, 1107), 'numpy.abs', 'np.abs', (['sobel'], {}), '(sobel)\n', (1100, 1107), True, 'import numpy as np\n'), ((1267, 1281), 'numpy.abs', 'np.abs', (['sobely'], {}), '(sobely)\n', (1273, 1281), True, 'import numpy as np\n'), ((1283, 1297), 'numpy.abs', 'np.abs', (['sobelx'], {}), '(sobelx)\n', (1289, 1297), True, 'import numpy as np\n'), ((3434, 3453), 'numpy.nonzero', 'np.nonzero', (['weights'], {}), '(weights)\n', (3444, 3453), True, 'import numpy as np\n'), ((3795, 3846), 'cv2.addWeighted', 'cv2.addWeighted', (['res', '(1.0)', 'images[i]', 'weights[i]', '(0)'], {}), '(res, 1.0, images[i], weights[i], 0)\n', (3810, 3846), False, 'import cv2\n'), ((4167, 4190), 'cv2.bitwise_or', 'cv2.bitwise_or', (['res', 'im'], {}), '(res, im)\n', (4181, 4190), False, 'import cv2\n'), ((6046, 6061), 'numpy.argmax', 'np.argmax', (['sums'], {}), '(sums)\n', (6055, 6061), True, 'import numpy as np\n'), ((7442, 7468), 'matplotlib.pyplot.subplot', 'plt.subplot', (['ny', 'nx', '(i + 1)'], {}), '(ny, nx, i + 1)\n', (7453, 7468), True, 'import matplotlib.pyplot as plt\n'), ((7475, 7490), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7483, 7490), True, 'import matplotlib.pyplot as plt\n'), ((7499, 7515), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cell'], {}), '(cell)\n', (7509, 7515), True, 'import matplotlib.pyplot as plt\n'), ((10490, 10503), 'numpy.abs', 'np.abs', (['(2 * a)'], {}), '(2 * a)\n', (10496, 10503), True, 'import numpy as np\n'), ((10880, 10895), 'numpy.abs', 'np.abs', (['(2 * m_a)'], {}), '(2 * m_a)\n', (10886, 10895), True, 'import numpy as np\n'), ((13629, 13641), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13637, 13641), True, 'import numpy as np\n'), ((14722, 14736), 'numpy.abs', 'np.abs', (['slopes'], {}), '(slopes)\n', (14728, 14736), True, 'import numpy as np\n'), ((17191, 17215), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(i + 1)'], {}), '(3, 2, i + 1)\n', (17202, 17215), True, 'import matplotlib.pyplot as plt\n'), ((17334, 17369), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (17346, 17369), False, 'import cv2\n'), ((1167, 1184), 'numpy.square', 'np.square', (['sobelx'], {}), '(sobelx)\n', (1176, 1184), True, 'import numpy as np\n'), ((1187, 1204), 'numpy.square', 'np.square', (['sobely'], {}), '(sobely)\n', (1196, 1204), True, 'import numpy as np\n'), ((9822, 9843), 'numpy.zeros_like', 'np.zeros_like', (['warped'], {}), '(warped)\n', (9835, 9843), True, 'import numpy as np\n'), ((10033, 10065), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (10042, 10065), True, 'import numpy as np\n'), ((14430, 14453), 'numpy.cross', 'np.cross', (['line', 'line_y0'], {}), '(line, line_y0)\n', (14438, 14453), True, 'import numpy as np\n'), ((14484, 14507), 'numpy.cross', 'np.cross', (['line', 'line_y1'], {}), '(line, line_y1)\n', (14492, 14507), True, 'import numpy as np\n'), ((14801, 14840), 'numpy.logical_and', 'np.logical_and', (['(slopes < 0)', 'valid_lines'], {}), '(slopes < 0, valid_lines)\n', (14815, 14840), True, 'import numpy as np\n'), ((14907, 14946), 'numpy.logical_and', 'np.logical_and', (['(slopes > 0)', 'valid_lines'], {}), '(slopes > 0, valid_lines)\n', (14921, 14946), True, 'import numpy as np\n'), ((2605, 2615), 'numpy.max', 'np.max', (['im'], {}), '(im)\n', (2611, 2615), True, 'import numpy as np\n'), ((5994, 6006), 'numpy.sum', 'np.sum', (['cell'], {}), '(cell)\n', (6000, 6006), True, 'import numpy as np\n'), ((9900, 9932), 'numpy.vstack', 'np.vstack', (['[poly_x_left, poly_y]'], {}), '([poly_x_left, poly_y])\n', (9909, 9932), True, 'import numpy as np\n'), ((9985, 10018), 'numpy.vstack', 'np.vstack', (['[poly_x_right, poly_y]'], {}), '([poly_x_right, poly_y])\n', (9994, 10018), True, 'import numpy as np\n')] |
from hrm import readCSV, getBeats, getMeanHR, getDuration, hrd
import pytest
import numpy
@pytest.mark.parametrize("testinput,expected", [
('test_data31.csv', {"voltage_extremes": (1.0, 1.0), "duration": 1.0,
"beats": numpy.array([]),
"num_beats": 1.0, "mean_hr_bpm": 1.0}),
('test_data1.csv', {"voltage_extremes": (1.0, 1.0), "duration": 1.0,
"beats": numpy.array([]),
"num_beats": 1.0, "mean_hr_bpm": 1.0}),
('test_data10.csv', {"voltage_extremes": (1.0, 1.0), "duration": 1.0,
"beats": numpy.array([]),
"num_beats": 1.0, "mean_hr_bpm": 1.0})
])
def test(testinput, expected):
[t, v] = readCSV(testinput)
exp = hrd(t, v, duration=getDuration(t))
assert isinstance(exp["voltage_extremes"],
type(expected["voltage_extremes"]))
assert isinstance(exp["duration"],
type(expected["duration"]))
assert isinstance(exp["beats"],
type(expected["beats"]))
assert isinstance(exp["duration"],
type(expected["num_beats"]))
assert isinstance(exp["duration"],
type(expected["mean_hr_bpm"]))
| [
"hrm.getDuration",
"numpy.array",
"hrm.readCSV"
] | [((755, 773), 'hrm.readCSV', 'readCSV', (['testinput'], {}), '(testinput)\n', (762, 773), False, 'from hrm import readCSV, getBeats, getMeanHR, getDuration, hrd\n'), ((803, 817), 'hrm.getDuration', 'getDuration', (['t'], {}), '(t)\n', (814, 817), False, 'from hrm import readCSV, getBeats, getMeanHR, getDuration, hrd\n'), ((249, 264), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (260, 264), False, 'import numpy\n'), ((437, 452), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (448, 452), False, 'import numpy\n'), ((626, 641), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (637, 641), False, 'import numpy\n')] |
import numpy as np
from lbdapy import ProblemData
from lbdapy.lib.lbdapy.cutfamilies import LooseBenders as _LooseBenders
from .CutFamily import CutFamily
class LooseBenders(CutFamily):
def __init__(self,
problem: ProblemData,
alpha: np.array,
time_limit: float = 1e3):
self.obj = _LooseBenders(problem.obj, np.asarray(alpha), time_limit)
| [
"numpy.asarray"
] | [((375, 392), 'numpy.asarray', 'np.asarray', (['alpha'], {}), '(alpha)\n', (385, 392), True, 'import numpy as np\n')] |
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cape_document_qa import patches
import numpy as np
from cape_document_qa.cape_config import PREPRO_DATASET_DIR, PREPRO_EVIDENCE_DIR, SQUAD_SOURCE_DIR
import pickle
from os import makedirs
from os.path import join, exists
from docqa.squad.build_squad_dataset import parse_squad_data
from docqa.triviaqa.read_data import TriviaQaQuestion, TagMeEntityDoc, FreeForm
from tqdm import tqdm
from docqa.data_processing.text_utils import NltkAndPunctTokenizer
import json
from typing import Dict
def get_out_dir(dataset_name):
return join(PREPRO_DATASET_DIR, dataset_name)
def get_doc_savename(dirpath, doc_id):
return join(dirpath, doc_id.replace('/', ''))
def get_questions_savename(dataset_name, fold):
return join(get_out_dir(dataset_name), fold + ".pkl")
def dump_paragraph(paragraph):
return "\n\n".join("\n".join(" ".join(sent) for sent in para) for para in [paragraph.text])
def squad_q2triviaqa_q(question):
doc_id = question.question_id
question_id = question.question_id
answer_spans = np.unique(question.answer.answer_spans, axis=0)
answer_texts = list(set(question.answer.answer_text))
answer = FreeForm(
value=answer_texts[0],
normalized_value=answer_texts[0].lower(),
aliases=answer_texts,
normalized_aliases=answer_texts,
human_answers=None
)
doc = TagMeEntityDoc(1., 1., doc_id)
doc.answer_spans = answer_spans
return TriviaQaQuestion(question.words, question_id, answer, [doc], [])
def prepro_squad_fold(name, fold, squad_file_paths):
tokenizer = NltkAndPunctTokenizer()
dataset_evidence_dir = join(PREPRO_EVIDENCE_DIR, name)
if not exists(dataset_evidence_dir):
makedirs(dataset_evidence_dir)
voc = set()
squad_docs = [
d for squad_file_path in squad_file_paths
for d in parse_squad_data(squad_file_path, fold, tokenizer)
]
questions = []
file_map = {}
for document in tqdm(squad_docs, desc=fold, ncols=80):
for paragraph in document.paragraphs:
for question in paragraph.questions:
doc_id = question.question_id
doc_savename = get_doc_savename(dataset_evidence_dir, doc_id)
trivia_q = squad_q2triviaqa_q(question)
with open(doc_savename + '.txt', 'w', encoding='utf8') as f:
f.write(dump_paragraph(paragraph))
words = {w for sent in paragraph.text for w in sent}
voc.update(words)
file_map[doc_id] = doc_savename
questions.append(trivia_q)
questions_savename = get_questions_savename(name, fold)
with open(questions_savename, "wb") as f:
pickle.dump(questions, f)
return voc, file_map
def preprocess_squad_dataset(name: str, fold_dict: Dict):
"""Preprocess a squad dataset for training. Creates entries in the dataset directory/triviaqa directory
and adds entries in the dataset directory/triviaqa/evidence directories for questions and documents respectively
:param name: The name of the dataset (e.g. Squad)
:param fold_dict: keys: name of the fold, values: list of paths to the json of that fold,
e.g. {'train': ['path/to/train.json'], 'dev': ['path/to/dev.json'], 'test': ['path/to/test.json']]}
"""
print('Preprocessing Squad Dataset: {}'.format(name))
if not exists(get_out_dir(name)):
makedirs(get_out_dir(name))
voc, file_map = set(), {}
for fold, squad_file_paths in fold_dict.items():
fold_voc, fold_file_map = prepro_squad_fold(name, fold, squad_file_paths)
voc.update(fold_voc)
for k, v in fold_file_map.items():
file_map[k] = v
print("Dumping file mapping")
with open(join(get_out_dir(name), "file_map.json"), "w", encoding='utf8') as f:
json.dump(file_map, f)
print("Dumping vocab mapping")
with open(join(get_out_dir(name), "vocab.txt"), "w", encoding='utf8') as f:
for word in sorted(voc):
f.write(word)
f.write("\n")
def squad_dict():
return 'squad', {
'train': [join(SQUAD_SOURCE_DIR, 'train-v1.1.json')],
'dev': [join(SQUAD_SOURCE_DIR, 'dev-v1.1.json')],
'test': [join(SQUAD_SOURCE_DIR, 'dev-v1.1.json')],
}
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Perform dataset preprocessing for Squad models.")
parser.add_argument('-n', '--dataset_name', type=str, default=None, dest='title', help='name of dataset to preprocess')
parser.add_argument('-tr', '--train_files', nargs='+', default=[], help='Which datasets to compute')
parser.add_argument('-dv', '--dev_files', nargs='+', default=[], help='Which datasets to compute')
parser.add_argument('-ts', '--test_files', nargs='+', default=[], help='Which datasets to compute')
args = parser.parse_args()
if args.title is None:
title, fold_dict = squad_dict()
else:
title = args.title
fold_dict = {'train': args.train_files, 'dev': args.dev_files, 'test': args.test_files}
preprocess_squad_dataset(title, fold_dict)
| [
"json.dump",
"tqdm.tqdm",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"docqa.squad.build_squad_dataset.parse_squad_data",
"docqa.data_processing.text_utils.NltkAndPunctTokenizer",
"os.path.exists",
"docqa.triviaqa.read_data.TagMeEntityDoc",
"docqa.triviaqa.read_data.TriviaQaQuestion",... | [((1124, 1162), 'os.path.join', 'join', (['PREPRO_DATASET_DIR', 'dataset_name'], {}), '(PREPRO_DATASET_DIR, dataset_name)\n', (1128, 1162), False, 'from os.path import join, exists\n'), ((1620, 1667), 'numpy.unique', 'np.unique', (['question.answer.answer_spans'], {'axis': '(0)'}), '(question.answer.answer_spans, axis=0)\n', (1629, 1667), True, 'import numpy as np\n'), ((1945, 1977), 'docqa.triviaqa.read_data.TagMeEntityDoc', 'TagMeEntityDoc', (['(1.0)', '(1.0)', 'doc_id'], {}), '(1.0, 1.0, doc_id)\n', (1959, 1977), False, 'from docqa.triviaqa.read_data import TriviaQaQuestion, TagMeEntityDoc, FreeForm\n'), ((2023, 2087), 'docqa.triviaqa.read_data.TriviaQaQuestion', 'TriviaQaQuestion', (['question.words', 'question_id', 'answer', '[doc]', '[]'], {}), '(question.words, question_id, answer, [doc], [])\n', (2039, 2087), False, 'from docqa.triviaqa.read_data import TriviaQaQuestion, TagMeEntityDoc, FreeForm\n'), ((2159, 2182), 'docqa.data_processing.text_utils.NltkAndPunctTokenizer', 'NltkAndPunctTokenizer', ([], {}), '()\n', (2180, 2182), False, 'from docqa.data_processing.text_utils import NltkAndPunctTokenizer\n'), ((2210, 2241), 'os.path.join', 'join', (['PREPRO_EVIDENCE_DIR', 'name'], {}), '(PREPRO_EVIDENCE_DIR, name)\n', (2214, 2241), False, 'from os.path import join, exists\n'), ((2541, 2578), 'tqdm.tqdm', 'tqdm', (['squad_docs'], {'desc': 'fold', 'ncols': '(80)'}), '(squad_docs, desc=fold, ncols=80)\n', (2545, 2578), False, 'from tqdm import tqdm\n'), ((4937, 5028), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform dataset preprocessing for Squad models."""'}), "(description=\n 'Perform dataset preprocessing for Squad models.')\n", (4960, 5028), False, 'import argparse\n'), ((2254, 2282), 'os.path.exists', 'exists', (['dataset_evidence_dir'], {}), '(dataset_evidence_dir)\n', (2260, 2282), False, 'from os.path import join, exists\n'), ((2292, 2322), 'os.makedirs', 'makedirs', (['dataset_evidence_dir'], {}), '(dataset_evidence_dir)\n', (2300, 2322), False, 'from os import makedirs\n'), ((3299, 3324), 'pickle.dump', 'pickle.dump', (['questions', 'f'], {}), '(questions, f)\n', (3310, 3324), False, 'import pickle\n'), ((4424, 4446), 'json.dump', 'json.dump', (['file_map', 'f'], {}), '(file_map, f)\n', (4433, 4446), False, 'import json\n'), ((2426, 2476), 'docqa.squad.build_squad_dataset.parse_squad_data', 'parse_squad_data', (['squad_file_path', 'fold', 'tokenizer'], {}), '(squad_file_path, fold, tokenizer)\n', (2442, 2476), False, 'from docqa.squad.build_squad_dataset import parse_squad_data\n'), ((4708, 4749), 'os.path.join', 'join', (['SQUAD_SOURCE_DIR', '"""train-v1.1.json"""'], {}), "(SQUAD_SOURCE_DIR, 'train-v1.1.json')\n", (4712, 4749), False, 'from os.path import join, exists\n'), ((4768, 4807), 'os.path.join', 'join', (['SQUAD_SOURCE_DIR', '"""dev-v1.1.json"""'], {}), "(SQUAD_SOURCE_DIR, 'dev-v1.1.json')\n", (4772, 4807), False, 'from os.path import join, exists\n'), ((4827, 4866), 'os.path.join', 'join', (['SQUAD_SOURCE_DIR', '"""dev-v1.1.json"""'], {}), "(SQUAD_SOURCE_DIR, 'dev-v1.1.json')\n", (4831, 4866), False, 'from os.path import join, exists\n')] |
import copy
import os
import enum
import random
import traceback
from abc import ABC
from typing import Any, Tuple, Optional, Dict, Sequence, List, Union, cast, Set
import compress_pickle
import gym.spaces
import numpy as np
import stringcase
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_util import round_to_factor
from env.constants import DEFAULT_COMPATIBLE_RECEPTACLES, OBJECT_TYPES_WITH_PROPERTIES, SCENE_TO_SCENE_TYPE, STARTER_HOME_SERVICE_DATA_DIR, STARTER_HOME_SERVICE_SIMPLE_PICK_AND_PLACE_DATA_DIR, STARTER_REARRANGE_DATA_DIR, STEP_SIZE, VISIBILITY_DISTANCE
from env.environment import (
HomeServiceSimpleTaskOrderTaskSpec,
HomeServiceTHOREnvironment,
HomeServiceTaskSpec,
)
from env.expert import (
# GreedySimplePickAndPlaceExpert,
ShortestPathNavigatorTHOR,
SubTaskExpert,
)
from env.utils import (
HomeServiceActionSpace,
include_object_data,
)
# from sEDM.test_edm import sEDM_model
class HomeServiceTaskType(enum.Enum):
SIMPLE_PICK_AND_PLACE = "SimplePickAndPlace"
REARRANGE = "Rearrange"
class AbstractHomeServiceTask(Task, ABC):
@staticmethod
def agent_location_to_tuple(
agent_loc: Dict[str, Union[Dict[str, float], bool, float, int]],
base_rotation: int = 90,
base_horizon: int = 30,
) -> Tuple[float, float, int, int, int]:
if "position" in agent_loc:
agent_loc = {
"x": agent_loc["position"]["x"],
"y": agent_loc["position"]["y"],
"z": agent_loc["position"]["z"],
"rotation": agent_loc["rotation"]["y"],
"horizon": agent_loc["cameraHorizon"],
"standing": agent_loc.get("isStanding"),
}
return (
round(agent_loc["x"], 3),
round(agent_loc["z"], 3),
round_to_factor(agent_loc["rotation"], base_rotation) % 360,
1 * agent_loc["standing"],
round_to_factor(agent_loc["horizon"], base_horizon) % 360,
)
@property
def agent_location_tuple(self) -> Tuple[float, float, int, int, int]:
return self.agent_location_to_tuple(
agent_loc=self.env.get_agent_location(),
base_rotation=self.env.rotate_step_degrees,
base_horizon=self.env.horizon_step_degrees,
)
class HomeServiceBaseTask(AbstractHomeServiceTask):
def __init__(
self,
sensors: SensorSuite,
env: HomeServiceTHOREnvironment,
max_steps: int,
discrete_actions: Tuple[str, ...],
smooth_nav: bool = False,
smoothing_factor: int = 1,
force_axis_aligned_start: bool = False,
require_done_action: bool = False,
task_spec_in_metrics: bool = False,
) -> None:
super().__init__(
env=env, sensors=sensors, task_info=dict(), max_steps=max_steps,
)
self.env: HomeServiceTHOREnvironment = env
self.discrete_actions = discrete_actions
self.smooth_nav = smooth_nav
self.smoothing_factor = smoothing_factor if self.smooth_nav else 1
self.force_axis_aligned_start = force_axis_aligned_start
self.require_done_action = require_done_action
self.task_spec_in_metrics = task_spec_in_metrics
self._took_end_action: bool = False
# self._took_goto_action: bool = False
# self._check_goto_done: bool = False
# self._1st_check: bool = False
# self._2nd_check: bool = False
self._took_subtask_rollback: bool = False
self._rollback_count: int = 0
self._init_position_change_sensor: bool = False
self._target_positions: Dict[str, np.ndarray] = {}
self._target_visibles: Dict[str, bool] = {}
# self._place_position: Optional[np.array] = None
# self._place_visible: Optional[bool] = None
self.task_planner = None
self.greedy_expert = None
self._subtask_step = 0
self._planned_task = None
self._obs = None
self.actions_taken = []
self.actions_taken_success = []
self.rewards = []
self.subtask_info = [self.current_subtask]
self.agent_locs = [self.env.get_agent_location()]
@property
def action_space(self) -> gym.spaces.Discrete:
return gym.spaces.Discrete(len(self.action_names()))
@property
def target_positions(self) -> Dict[str, np.ndarray]:
return self._target_positions
@property
def target_visibles(self) -> Dict[str, bool]:
return self._target_visibles
@target_positions.setter
def target_positions(self, pos_dict: Dict[str, Union[np.ndarray, dict]]):
for k, v in pos_dict.items():
if isinstance(v, np.ndarray):
self._target_positions[k] = v
elif isinstance(v, dict):
self._target_positions[k] = np.array([v["x"], v["y"], v["z"]], dtype=np.float32)
@target_visibles.setter
def target_visibles(self, vis_dict: Dict[str, bool]):
for k, v in vis_dict.items():
self._target_visibles[k] = v
@property
def require_init_position_sensor(self) -> bool:
return self._init_position_change_sensor
@require_init_position_sensor.setter
def require_init_position_sensor(self, val: bool):
self._init_position_change_sensor = val
@property
def num_subtasks(self) -> int:
return len(self.planned_task)
@property
def planned_task(self):
if self._planned_task is None:
self._planned_task = self.query_planner()
return self._planned_task
@property
def current_subtask(self):
return (
self.planned_task[self._subtask_step]
if self._subtask_step < self.num_subtasks
else ("Done", None, None)
)
def subtask_step(self) -> int:
return self._subtask_step
def rollback_subtask(self):
if self._subtask_step > 0:
self._subtask_step -= 1
self._took_subtask_rollback = True
self._rollback_count += 1
def subtask_succeeded(self):
self._subtask_step += 1
assert self._subtask_step < self.num_subtasks + 1
def is_current_subtask_done(self):
subtask_action, subtask_target, subtask_place = self.current_subtask
if subtask_action == "Done":
return True
elif subtask_action == "Navigate":
with include_object_data(self.env.controller):
metadata = self.env.last_event.metadata
assert subtask_place is None
cur_subtask_target = next(
(o for o in metadata["objects"] if o["objectType"] == subtask_target), None
)
if self.env.scene == self.env.current_task_spec.target_scene:
assert cur_subtask_target is not None
if cur_subtask_target["visible"] and cur_subtask_target["distance"] < VISIBILITY_DISTANCE:
# print(f'cur_subtask_target in navigate success')
# print(f'visible: {cur_subtask_target["visible"]} | distance: {cur_subtask_target["distance"]}')
self.subtask_succeeded()
return True
elif subtask_action == "Pickup":
with include_object_data(self.env.controller):
metadata = self.env.last_event.metadata
assert subtask_place is None
if metadata["lastActionSuccess"] and (
metadata["lastAction"] == f"{subtask_action}Object"
) and (
self.env.held_object["objectType"] == subtask_target
):
self.subtask_succeeded()
return True
elif subtask_action in ["Open", "Close"]:
with include_object_data(self.env.controller):
metadata = self.env.last_event.metadata
assert subtask_place is None
if metadata["lastActionSuccess"] and (
metadata["lastAction"] == f"{subtask_action}Object"
):
self.subtask_succeeded()
return True
elif subtask_action == "Put":
with include_object_data(self.env.controller):
metadata = self.env.last_event.metadata
assert subtask_place is not None
cur_subtask_target = next(
(o for o in metadata["objects"] if o["objectType"] == subtask_target), None
)
cur_subtask_place = next(
(o for o in metadata["objects"] if o["objectType"] == subtask_place), None
)
if self.env.scene == self.env.current_task_spec.target_scene:
assert cur_subtask_target is not None
assert cur_subtask_place is not None
if metadata["lastActionSuccess"] and (
metadata["lastAction"] == f"{subtask_action}Object"
) and (
self.env.held_object is None
):
if cur_subtask_place["objectId"] in cur_subtask_target["parentReceptacles"]:
self.subtask_succeeded()
return True
elif subtask_action == "Goto":
# TODO
# if current room type detected from sensor is equal to the target room type
# return True
# ORACLE
metadata = self.env.last_event.metadata
if (
metadata["lastActionSuccess"]
and SCENE_TO_SCENE_TYPE[self.env.scene] == subtask_target
and self.greedy_expert.check_room_type_done
and not self.greedy_expert.require_check_room_type
):
self.subtask_succeeded()
return True
elif subtask_action == "Scan":
# TODO
self.subtask_succeeded()
return True
else:
raise NotImplementedError(
f"Action {subtask_action} for the subtasks is not implemented"
)
return False
def close(self) -> None:
try:
self.env.step()
except Exception as _:
pass
def action_names(self, **kwargs) -> Tuple[str, ...]:
return self.discrete_actions
def render(self, *args, **kwargs) -> Dict[str, np.array]:
obs = self.env.observation
return {
"rgb": obs[0], "depth": obs[1],
}
def reached_terminal_state(self) -> bool:
return (self.require_done_action and self._took_end_action) or (
(not self.require_done_action)
and self.current_subtask[0] == "Done"
)
def query_planner(self):
return []
def _judge(self, obs, action, next_obs, action_success, current_subtask, subtask_done) -> float:
"""Return the reward from a new (s, a, s')."""
action_name = self.action_names()[action]
reward = -0.05
if not action_success:
reward += -0.05
if subtask_done:
reward += 1
if current_subtask[0] == "Done":
if action_name == "done":
reward += 10
else:
# should take "done" when all the task is done
reward += -10
else:
# If "done" action taken when it is not "Done" subtask
if action_name == "done":
reward += -10
if current_subtask[0] != "Goto":
if action_name.startswith("goto"):
# Wrongly moved to other room type
reward += -10
if self._took_subtask_rollback:
reward += -1 * self._rollback_count
self._took_subtask_rollback = False
self._rollback_count = 0
return reward
def _step(self, action: int) -> RLStepResult:
"""
action: is the index of the action from self.action_names()
"""
# obs = [self._obs]
action_name = self.action_names()[action]
# if action_name.startswith("pickup_"):
# with include_object_data(self.env.controller):
# metadata = self.env.last_event.metadata
# if len(metadata["inventoryObjects"]) != 0:
# action_success = False
# else:
# object_type = stringcase.pascalcase(
# action_name.replace("pickup_", "")
# )
# possible_objects = [
# o
# for o in metadata["objects"]
# if o["visible"] and o["objectType"] == object_type
# ]
# possible_objects = sorted(
# possible_objects, key=lambda po: (po["distance"], po["name"])
# )
# object_before = None
# if len(possible_objects) > 0:
# object_before = possible_objects[0]
# object_id = object_before["objectId"]
# if object_before is not None:
# self.env.controller.step(
# "PickupObject",
# objectId=object_id,
# **self.env.physics_step_kwargs,
# )
# action_success = self.env.last_event.metadata["lastActionSuccess"]
# else:
# action_success = False
# if action_success and self.env.held_object is None:
# get_logger().warning(
# f"`PickupObject` was successful in picking up {object_id} but we're not holding"
# f" any object! Current task spec: \n {self.env.current_task_spec}"
# )
# action_success = False
# elif action_name.startswith("open_by_type"):
# object_type = stringcase.pascalcase(
# action_name.replace("open_by_type_", "")
# )
# with include_object_data(self.env.controller):
# metadata = self.env.last_event.metadata
# pickup_target = self.env.current_task_spec.pickup_target
# place_target = self.env.current_task_spec.place_target
# pickup_target_openable_receptacle = None
# if pickup_target["parentReceptacles"] is not None:
# for obj in metadata["objects"]:
# if (
# obj["openable"]
# and obj["objectId"] in pickup_target["parentReceptacles"]
# ):
# pickup_target_openable_receptacle = obj
# break
# object_before = None
# pickup_target_openable_receptacle_name = (
# pickup_target_openable_receptacle["name"]
# if pickup_target_openable_receptacle is not None and "name" in pickup_target_openable_receptacle
# else None
# )
# for obj in metadata["objects"]:
# if (
# obj["visible"]
# and obj["openable"]
# and obj["objectType"] == object_type
# and (
# obj["name"] == place_target["name"]
# or obj["name"] == pickup_target_openable_receptacle_name
# )
# ):
# object_before = obj
# break
# if object_before is not None:
# if object_before["openness"] > 0.0:
# self.env.controller.step(
# "CloseObject",
# objectId=object_before["objectId"],
# **self.env.physics_step_kwargs,
# )
# self.env.controller.step(
# "OpenObject",
# objectId=object_before["objectId"],
# openness=1.0,
# **self.env.physics_step_kwargs,
# )
# action_success = self.env.last_event.metadata["lastActionSuccess"]
# else:
# action_success = False
# elif action_name.startswith("close_by_type"):
# object_type = stringcase.pascalcase(
# action_name.replace("close_by_type_", "")
# )
# with include_object_data(self.env.controller):
# metadata = self.env.last_event.metadata
# pickup_target = self.env.current_task_spec.pickup_target
# place_target = self.env.current_task_spec.place_target
# pickup_target_openable_receptacle = None
# if pickup_target["parentReceptacles"] is not None:
# for obj in metadata["objects"]:
# if (
# obj["openable"]
# and obj["objectId"] in pickup_target["parentReceptacles"]
# ):
# pickup_target_openable_receptacle = obj
# break
# object_before = None
# pickup_target_openable_receptacle_name = (
# pickup_target_openable_receptacle["name"]
# if pickup_target_openable_receptacle is not None and "name" in pickup_target_openable_receptacle
# else None
# )
# for obj in metadata["objects"]:
# if (
# obj["visible"]
# and obj["openable"]
# and obj["objectType"] == object_type
# and (
# obj["name"] == place_target["name"]
# or obj["name"] == pickup_target_openable_receptacle_name
# )
# ):
# object_before = obj
# break
# if object_before is not None:
# if object_before["openness"] > 0.0:
# self.env.controller.step(
# "CloseObject",
# objectId=object_before["objectId"],
# **self.env.physics_step_kwargs,
# )
# action_success = self.env.last_event.metadata["lastActionSuccess"]
# else:
# action_success = False
# elif action_name.startswith("put_by_type"):
# object_type = stringcase.pascalcase(
# action_name.replace("put_by_type_", "")
# )
# with include_object_data(self.env.controller):
# metadata = self.env.last_event.metadata
# pickup_object = self.env.current_task_spec.pickup_object
# place_receptacle = self.env.current_task_spec.place_receptacle
# if len(metadata["inventoryObjects"]) == 0:
# action_success = False
# else:
# object_before = None
# for obj in metadata["objects"]:
# if (
# obj["visible"]
# and obj["receptacle"]
# and obj["objectType"] == place_receptacle
# # and obj["name"] == place_target["name"]
# ):
# object_before = obj
# break
# if object_before is not None:
# self.env.controller.step(
# "PutObject",
# objectId=object_before["objectId"],
# **self.env.physics_step_kwargs,
# )
# action_success = self.env.last_event.metadata["lastActionSuccess"]
# else:
# action_success = False
if action_name == "pickup":
pickup_obj_type = self.env.current_task_spec.pickup_object
with include_object_data(self.env.controller):
md = self.env.last_event.metadata
if len(md["inventoryObjects"]) != 0:
action_success = False
else:
pickup_obj = next(
(
obj for obj in md['objects']
if obj['objectType'] == pickup_obj_type
), None
)
if pickup_obj is None:
action_success = False
else:
self.env.controller.step(
"PickupObject",
objectId=pickup_obj['objectId'],
**self.env.physics_step_kwargs,
)
action_success = self.env.last_event.metadata["lastActionSuccess"]
if action_success and self.env.held_object is None:
get_logger().warning(
f"`PickupObject` was successful in picking up {pickup_obj} but we're not holding"
f" any object! Current task spec: \n {self.env.current_task_spec}"
)
action_success = False
elif action_name == "put":
pickup_obj_type = self.env.current_task_spec.pickup_object
place_recep_type = self.env.current_task_spec.place_receptacle
with include_object_data(self.env.controller):
md = self.env.last_event.metadata
if len(md["inventoryObjects"]) == 0:
action_success = False
else:
place_recep = next(
(
obj for obj in md['objects']
if (
obj['visible']
and obj['receptacle']
and obj['objectType'] == place_recep_type
)
), None
)
if place_recep is None:
action_success = False
else:
self.env.controller.step(
"PutObject",
objectId=place_recep["objectId"],
**self.env.physics_step_kwargs,
)
action_success = self.env.last_event.metadata["lastActionSuccess"]
elif action_name == "open":
pass
elif action_name == "close":
pass
elif action_name.startswith(("move", "rotate")):
# opposites = {
# "ahead": "back",
# "back": "ahead",
# "right": "left",
# "left": "right",
# }
# direction = action_name.split("_")[-1]
# opposite_direction = opposites[direction]
# for i in range(self.smoothing_factor):
action_success = getattr(self.env, action_name)()
# obs.append(self.get_observations())
# if not action_success:
# # obs.pop()
# for j in range(i):
# getattr(self.env, "_".join([action_name.split("_")[0], opposite_direction]))()
# # obs.pop()
# break
elif action_name.startswith("look"):
# opposites = {
# "up": "down",
# "down": "up",
# }
# direction = action_name.split("_")[-1]
# opposite_direction = opposites[direction]
# for i in range(self.smoothing_factor):
action_success = getattr(self.env, action_name)(1.0 / self.smoothing_factor)
# obs.append(self.get_observations())
# if not action_success:
# # obs.pop()
# for j in range(i):
# getattr(self.env, "_".join([action_name.split("_")[0], opposite_direction]))(1.0 / self.smoothing_factor)
# # obs.pop()
# break
elif action_name.startswith(("stand", "crouch")):
action_success = getattr(self.env, action_name)()
elif action_name == "done":
self._took_end_action = True
# action_success = getattr(self.env, action_name)()
action_success = True
elif action_name == "pass":
event = self.env.controller.step("Pass")
action_success = event.metadata["lastActionSuccess"]
elif action_name.startswith("goto"):
scene_type = "_".join(action_name.split("_")[1:])
assert scene_type in ("kitchen", "living_room", "bedroom", "bathroom")
if SCENE_TO_SCENE_TYPE[self.env.scene] == scene_type:
action_success = False
else:
self.env.reset(
task_spec=self.env.current_task_spec,
force_axis_aligned_start=self.force_axis_aligned_start,
scene_type=stringcase.pascalcase(scene_type)
)
action_success = self.env.last_event.metadata['lastActionSuccess']
if action_success:
self.require_init_position_sensor = True
if self.greedy_expert is not None:
self.greedy_expert.require_check_room_type = True
# self._took_goto_action = True
else:
raise RuntimeError(
f"Action '{action_name}' is not in the action space {HomeServiceActionSpace}"
)
self.actions_taken.append(action_name)
self.actions_taken_success.append(action_success)
self.subtask_info.append(self.current_subtask)
if self.task_spec_in_metrics:
self.agent_locs.append(self.env.get_agent_location())
# print(f'step {self.num_steps_taken()} | current subtask {self.current_subtask} | action_taken {action_name} | action taken success {action_success}', end=" | ")
current_subtask = self.current_subtask
subtask_done = self.is_current_subtask_done()
# print(f'subtask_done {subtask_done}')
# self._obs = self.get_observations()
reward = self._judge(
obs=None,
action=action,
next_obs=None,
action_success=action_success,
current_subtask=current_subtask,
subtask_done=subtask_done
)
self.rewards.append(reward)
return RLStepResult(
observation=None,
reward=reward,
done=self.is_done(),
info={"action_name": action_name, "action_success": action_success},
)
def get_observations(self, **kwargs) -> Any:
self._obs = super().get_observations(**kwargs)
return self._obs
def step(self, action: int) -> RLStepResult:
step_result = super().step(action=action)
if self.greedy_expert is not None:
self.greedy_expert.update(
action_taken=action, action_success=step_result.info["action_success"]
)
step_result = RLStepResult(
observation=self.get_observations(),
reward=step_result.reward,
done=step_result.done,
info=step_result.info,
)
return step_result
class HomeServiceSimplePickAndPlaceTask(HomeServiceBaseTask):
def __init__(
self,
**init_kwargs,
):
super().__init__(**init_kwargs)
self.greedy_expert: Optional[SubTaskExpert] = None
def query_planner(self, **kwargs) -> Sequence[Tuple[str, Dict[str, Any], Optional[Dict[str, Any]]]]:
"""
Query task planning result from task planner
self.task_planner = TaskPlanner(
task=self,
)
"""
target_object = self.env.current_task_spec.pickup_object
target_place = self.env.current_task_spec.place_receptacle
start_scene = self.env.current_task_spec.start_scene
start_scene_type = SCENE_TO_SCENE_TYPE[start_scene]
target_scene = self.env.current_task_spec.target_scene
target_scene_type = SCENE_TO_SCENE_TYPE[target_scene]
task_plan = []
task_plan.append(("Goto", target_scene_type, None))
task_plan.append(("Scan", None, None))
# self.task_planner = sEDM_model()
if self.task_planner is not None:
planner_result = self.task_planner.inference(target_object=target_object, target_place=target_place)
for i in range(len(planner_result)):
if planner_result[i][-1] == "hanger":
planner_result[i] = (planner_result[i][0], planner_result[i][1], "ToiletPaperHanger")
if planner_result[i][1] == "hanger":
planner_result[i] = (planner_result[i][0], "ToiletPaperHanger", planner_result[i][2])
if target_place == "User":
task_plan.extend(planner_result[:2])
task_plan.append(("Goto", start_scene_type, None))
else:
task_plan.extend(planner_result)
else:
task_plan = []
task_plan.append(("Goto", target_scene_type, None))
task_plan.append(("Scan", None, None))
task_plan.append(("Navigate", target_object, None))
task_plan.append(("Pickup", target_object, None))
if target_place != "User":
task_plan.append(("Navigate", target_place, None))
task_plan.append(("Put", target_object, target_place))
else:
task_plan.append(("Goto", start_scene_type, None))
return task_plan
# def query_expert(self, **kwargs) -> Tuple[Any, bool]:
# if self.greedy_expert is None:
# if not hasattr(self.env, "shortest_path_navigator"):
# self.env.shortest_path_navigator = ShortestPathNavigatorTHOR(
# controller = self.env.controller,
# grid_size=STEP_SIZE,
# include_move_left_right=all(
# f"move_{k}" in self.action_names() for k in ["left", "right"]
# ),
# )
# # self.greedy_expert = GreedySimplePickAndPlaceExpert(
# # task=self,
# # shortest_path_navigator=self.env.shortest_path_navigator,
# # )
# self.greedy_expert = SubTaskExpert(
# task=self,
# shortest_path_navigator=self.env.shortest_path_navigator,
# )
# action = self.greedy_expert.expert_action
# if action is None:
# return 0, False
# else:
# return action, True
def metrics(self) -> Dict[str, Any]:
if not self.is_done():
return {}
env = self.env
pickup_object = env.current_task_spec.pickup_object
start_receptacle = env.current_task_spec.start_receptacle
place_receptacle = env.current_task_spec.place_receptacle
target_object = next(
(
obj
for obj in env.last_event.metadata["objects"]
if obj["objectType"] == pickup_object
), None
)
# assert target_object is not None
if place_receptacle != "User":
possible_place_objects = [
obj for obj in env.last_event.metadata["objects"]
if obj["objectType"] == place_receptacle
]
# assert len(possible_place_objects) > 0
# receptacle = None
# if target_object["parentReceptacles"] is not None:
# receptacle = next(
# (
# o for o in possible_place_objects
# if o['objectId'] in target_object["parentReceptacles"]
# ), None
# )
metrics = {
**super().metrics(),
**{
"success": float(True if self.current_subtask[0] == "Done" else False),
"subtask_success": float(self._subtask_step / self.num_subtasks)
}
}
task_info = metrics["task_info"]
task_info["scene"] = env.scene
task_info["index"] = env.current_task_spec.metrics.get("index")
task_info["stage"] = env.current_task_spec.stage
del metrics["task_info"]
if self.task_spec_in_metrics:
task_info["task_spec"] = {**env.current_task_spec.__dict__}
task_info["actions_taken"] = self.actions_taken
task_info["actions_taken_success"] = self.actions_taken_success
task_info["subtask_info"] = self.subtask_info
task_info["unique_id"] = env.current_task_spec.unique_id if not env.current_task_spec.runtime_sample else None
metrics = {
"task_info": task_info,
**metrics,
}
# print(f'subtask_info: {self.subtask_info}')
# print(f'unique_id: {task_info["unique_id"]}')
# print(f'action_taken: {self.actions_taken}')
# print(f'actions_taken_success: {self.actions_taken_success}')
# print(f'rewards: {self.rewards}')
return metrics
class HomeServiceTaskSpecIterable:
def __init__(
self,
# scenes_to_task_spec_dicts: Dict[str, List[Dict]],
task_keys_to_task_spec_dicts: Dict[str, List[Dict]],
seed: int,
epochs: Union[int, float],
shuffle: bool = True,
task_type: HomeServiceTaskType = HomeServiceTaskType.SIMPLE_PICK_AND_PLACE,
# scenes_to_task_dicts: Dict[str, List[Dict]] = None,
):
assert epochs >= 1
self.task_keys_to_task_spec_dicts = {
k: [*v] for k, v in task_keys_to_task_spec_dicts.items()
}
# assert len(self.task_keys_to_task_spec_dicts) != 0 and all(
# len(self.task_keys_to_task_spec_dicts[task_key]) != 0
# for task_key in self.task_keys_to_task_spec_dicts
# )
assert len(self.task_keys_to_task_spec_dicts) != 0
# self.scenes_to_task_spec_dicts = {
# k: [*v] for k, v in scenes_to_task_spec_dicts.items()
# }
# assert len(self.scenes_to_task_spec_dicts) != 0 and all(
# len(self.scenes_to_task_spec_dicts[scene]) != 0
# for scene in self.scenes_to_task_spec_dicts
# )
# self.scenes_to_task_dicts = None
# if scenes_to_task_dicts is not None:
# self.scenes_to_task_dicts = {
# k: [*v] for k, v in scenes_to_task_dicts.items()
# }
self._seed = seed
self.random = random.Random(self.seed)
self.start_epochs = epochs
self.remaining_epochs = epochs
self.shuffle = shuffle
self.task_type = task_type
# self.remaining_scenes: List[str] = []
self.remaining_task_keys: List[str] = []
# self.task_spec_dicts_for_current_scene: List[Dict[str, Any]] = []
self.task_spec_dicts_for_current_task_key: List[Dict[str, Any]] = []
# self.current_scene: Optional[str] = None
self.current_task_key: Optional[str] = None
self.reset()
@property
def seed(self) -> int:
return self._seed
@seed.setter
def seed(self, seed: int):
self._seed = seed
self.random.seed(seed)
@property
def length(self):
if self.remaining_epochs == float("inf"):
return float("inf")
return (
len(self.task_spec_dicts_for_current_task_key)
+ sum(
len(self.task_keys_to_task_spec_dicts[task_key])
for task_key in self.remaining_task_keys
)
+ self.remaining_epochs
* (sum(len(v) for v in self.task_keys_to_task_spec_dicts.values()))
)
@property
def total_unique(self):
return sum(len(v) for v in self.task_keys_to_task_spec_dicts.values())
def reset(self):
self.random.seed(self.seed)
self.remaining_epochs = self.start_epochs
self.remaining_task_keys.clear()
self.task_spec_dicts_for_current_task_key.clear()
self.current_task_key = None
def refresh_remaining_scenes(self):
if self.remaining_epochs <= 0:
raise StopIteration
self.remaining_epochs -= 1
self.remaining_task_keys = list(sorted(self.task_keys_to_task_spec_dicts.keys()))
if self.shuffle:
self.random.shuffle(self.remaining_task_keys)
return self.remaining_task_keys
def __next__(self) -> HomeServiceTaskSpec:
while len(self.task_spec_dicts_for_current_task_key) == 0:
if len(self.remaining_task_keys) == 0:
self.refresh_remaining_scenes()
self.current_task_key = self.remaining_task_keys.pop()
self.task_spec_dicts_for_current_task_key = [
*self.task_keys_to_task_spec_dicts[self.current_task_key]
]
if self.shuffle:
self.random.shuffle(self.task_spec_dicts_for_current_task_key)
new_task_spec_dict = self.task_spec_dicts_for_current_task_key.pop()
if "task_key" not in new_task_spec_dict:
new_task_spec_dict["task_key"] = self.current_task_key
else:
assert self.current_task_key == new_task_spec_dict["task_key"]
if self.task_type == HomeServiceTaskType.SIMPLE_PICK_AND_PLACE:
return HomeServiceSimpleTaskOrderTaskSpec(**new_task_spec_dict)
else:
return HomeServiceTaskSpec(**new_task_spec_dict)
class HomeServiceTaskSampler(TaskSampler):
def __init__(
self,
stage: str,
# scenes_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]],
task_keys_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]],
home_service_env_kwargs: Optional[Dict[str, Any]],
sensors: SensorSuite,
max_steps: int,
discrete_actions: Tuple[str, ...],
smooth_nav: bool,
require_done_action: bool,
force_axis_aligned_start: bool,
task_type: HomeServiceTaskType = HomeServiceTaskType.SIMPLE_PICK_AND_PLACE,
# scenes_to_task_dicts: Optional[Dict[str, List[Dict[str,Any]]]] = None,
epochs: Union[int, float, str] = "default",
smoothing_factor: int = 1,
seed: Optional[int] = None,
task_spec_in_metrics: bool = False,
) -> None:
self.sensors = sensors
self.stage = stage
self.main_seed = seed if seed is not None else random.randint(0, 2 * 30 - 1)
self.task_spec_in_metrics = task_spec_in_metrics
# self.scenes_to_task_spec_dicts = copy.deepcopy(scenes_to_task_spec_dicts)
self.task_keys_to_task_spec_dicts = copy.deepcopy(task_keys_to_task_spec_dicts)
# self.scenes_to_task_dicts = None
# if scenes_to_task_dicts is not None:
# self.scenes_to_task_dicts = copy.deepcopy(scenes_to_task_dicts)
if isinstance(epochs, str):
if epochs.lower().strip() != "default":
raise NotImplementedError(f"Unknown value for `epochs` (=={epochs})")
epochs = float("inf") if stage == "train" else 1
self.task_spec_iterator = HomeServiceTaskSpecIterable(
task_keys_to_task_spec_dicts=self.task_keys_to_task_spec_dicts,
seed=self.main_seed,
epochs=epochs,
shuffle=epochs == float("inf"),
task_type=task_type,
# scenes_to_task_dicts=self.scenes_to_task_dicts,
)
self.env = HomeServiceTHOREnvironment(**home_service_env_kwargs)
self.task_keys = list(self.task_keys_to_task_spec_dicts.keys())
self.max_steps = max_steps
self.discrete_actions = discrete_actions
self.smooth_nav = smooth_nav
self.smoothing_factor = smoothing_factor
self.require_done_action = require_done_action
self.force_axis_aligned_start = force_axis_aligned_start
self.task_type = task_type
self._last_sampled_task: Optional[HomeServiceBaseTask] = None
# FOR REARRANGE DATA
# @classmethod
# def from_fixed_dataset(
# cls,
# stage: str,
# task_type: HomeServiceTaskType,
# allowed_scenes: Optional[Sequence[str]] = None,
# scene_to_allowed_inds: Optional[Dict[str, Sequence[int]]] = None,
# randomize_start_rotation: bool = False,
# **init_kwargs,
# ):
# scenes_to_task_spec_dicts = cls._filter_scenes_to_task_spec_dicts(
# scenes_to_task_spec_dicts=cls.load_rearrange_data_from_path(
# stage=stage, base_dir=STARTER_REARRANGE_DATA_DIR
# ),
# allowed_scenes=allowed_scenes,
# scene_to_allowed_inds=scene_to_allowed_inds,
# )
# if randomize_start_rotation:
# random_gen = random.Random(1)
# for scene in sorted(scenes_to_task_spec_dicts.keys()):
# for task_spec_dict in scenes_to_task_spec_dicts[scene]:
# task_spec_dict["agent_rotation"] = 360.0 * random_gen.random()
# return cls(
# stage=stage,
# task_type=task_type,
# scenes_to_task_spec_dicts=scenes_to_task_spec_dicts,
# **init_kwargs
# )
# FOR REARRANGE DATA
# @classmethod
# def _filter_scenes_to_task_spec_dicts(
# cls,
# scenes_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]],
# allowed_scenes: Optional[Sequence[str]],
# scene_to_allowed_inds: Optional[Dict[str, Sequence[int]]],
# ) -> Dict[str, List[Dict[str, Any]]]:
# if allowed_scenes is not None:
# scenes_to_task_spec_dicts = {
# scene: scenes_to_task_spec_dicts[scene] for scene in allowed_scenes
# }
# if scene_to_allowed_inds is not None:
# scenes_to_task_spec_dicts = {
# scene: [
# scenes_to_task_spec_dicts[scene][ind]
# for ind in sorted(scene_to_allowed_inds[scene])
# ]
# for scene in scene_to_allowed_inds
# if scene in scenes_to_task_spec_dicts
# }
# return scenes_to_task_spec_dicts
# FOR REARRANGE DATA
# @classmethod
# def load_rearrange_data_from_path(
# cls, stage: str, base_dir: Optional[str] = None,
# ) -> Dict[str, List[Dict[str, Any]]]:
# stage = stage.lower()
# if stage == "valid":
# stage = "val"
# data_path = os.path.abspath(os.path.join(base_dir, f"{stage}.pkl.gz"))
# if not os.path.exists(data_path):
# raise RuntimeError(f"No data at path {data_path}")
# data = compress_pickle.load(path=data_path)
# for scene in data:
# for ind, task_spec_dict in enumerate(data[scene]):
# task_spec_dict["scene"] = scene
# if "index" not in task_spec_dict:
# task_spec_dict["index"] = ind
# if "stage" not in task_spec_dict:
# task_spec_dict["stage"] = stage
# return data
@classmethod
def from_fixed_simple_pick_and_place_data(
cls,
stage: str,
task_type: HomeServiceTaskType,
allowed_task_keys: Optional[Sequence[str]] = None,
allowed_pickup_objs: Optional[Sequence[str]] = None,
allowed_start_receps: Optional[Sequence[str]] = None,
allowed_target_receps: Optional[Sequence[str]] = None,
allowed_scene_inds: Optional[Sequence[int]] = None,
randomize_start_rotation: bool = False,
**init_kwargs,
):
task_keys_to_task_spec_dicts = cls._filter_task_keys_to_task_spec_dicts(
task_keys_to_task_spec_dicts=cls.load_simple_pick_and_place_data_from_path(
stage=stage, base_dir=STARTER_HOME_SERVICE_DATA_DIR
),
allowed_task_keys=allowed_task_keys,
allowed_pickup_objs=allowed_pickup_objs,
allowed_start_receps=allowed_start_receps,
allowed_target_receps=allowed_target_receps,
allowed_scene_inds=allowed_scene_inds,
)
if randomize_start_rotation:
random_gen = random.Random(1)
for task_key in sorted(task_keys_to_task_spec_dicts):
for task_spec_dict in task_keys_to_task_spec_dicts[task_key]:
for room in task_spec_dict["agent_rotations"]:
task_spec_dict["agent_rotations"][room] = 360.0 * random_gen.random()
return cls(
stage=stage,
task_type=task_type,
task_keys_to_task_spec_dicts=task_keys_to_task_spec_dicts,
**init_kwargs
)
@classmethod
def _filter_task_keys_to_task_spec_dicts(
cls,
task_keys_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]],
allowed_task_keys: Optional[Sequence[str]],
allowed_pickup_objs: Optional[Sequence[str]],
allowed_start_receps: Optional[Sequence[str]],
allowed_target_receps: Optional[Sequence[str]],
allowed_scene_inds: Optional[Sequence[int]],
) -> Dict[str, List[Dict[str, Any]]]:
if allowed_task_keys is not None:
task_keys_to_task_spec_dicts = {
task_key: task_keys_to_task_spec_dicts[task_key]
for task_key in allowed_task_keys
}
filtered_keys = []
if (
allowed_pickup_objs is not None
or allowed_start_receps is not None
or allowed_target_receps is not None
):
for task_key in task_keys_to_task_spec_dicts:
splits = task_key.split("_")
pickup_allowed = False
start_recep_allowed = False
target_recep_allowed = False
if splits[0] == "Pick":
pickup_obj = splits[1]
start_recep = splits[3]
target_recep = splits[6]
else:
pickup_obj = splits[2]
start_recep = splits[4]
target_recep = None
if allowed_pickup_objs is not None:
if pickup_obj in allowed_pickup_objs:
pickup_allowed = True
else:
pickup_allowed = True
if allowed_start_receps is not None:
if start_recep in allowed_start_receps:
start_recep_allowed = True
else:
start_recep_allowed = True
if allowed_target_receps is not None:
if "User" in allowed_target_receps and splits[0] == "Bring":
target_recep_allowed = True
elif target_recep is not None and target_recep in allowed_target_receps:
target_recep_allowed = True
else:
target_recep_allowed = True
if pickup_allowed and start_recep_allowed and target_recep_allowed:
filtered_keys.append(task_key)
else:
filtered_keys = [task_key for task_key in task_keys_to_task_spec_dicts]
task_keys_to_task_spec_dicts = {
task_key: task_keys_to_task_spec_dicts[task_key]
for task_key in filtered_keys
}
if allowed_scene_inds is not None:
for task_key, task_spec_dicts in task_keys_to_task_spec_dicts.items():
task_keys_to_task_spec_dicts[task_key] = [
task_spec_dict
for task_spec_dict in task_spec_dicts
if task_spec_dict["scene_index"] in allowed_scene_inds
]
return task_keys_to_task_spec_dicts
@classmethod
def load_simple_pick_and_place_data_from_path(
cls,
stage: str,
base_dir: Optional[str] = None,
) -> Dict[str, List[Dict[str, Any]]]:
stage = stage.lower()
if stage == "valid":
stage = "val"
data_path = os.path.abspath(os.path.join(base_dir, f"{stage}.pkl.gz"))
if not os.path.exists(data_path):
raise RuntimeError(f"No data at path {data_path}")
data = compress_pickle.load(path=data_path)
for task_key in data:
for ind, task_spec_dict in enumerate(data[task_key]):
task_spec_dict["task_key"] = task_key
if "index" not in task_spec_dict:
task_spec_dict["index"] = ind
if "stage" not in task_spec_dict:
task_spec_dict["stage"] = stage
return data
# @classmethod
# def from_scenes_at_runtime(
# cls,
# stage: str,
# allowed_scenes: Sequence[str],
# repeats_before_scene_change: int,
# **init_kwargs,
# ):
# assert "scene_to_allowed_inds" not in init_kwargs
# assert repeats_before_scene_change >= 1
# return cls(
# stage=stage,
# scenes_to_task_spec_dicts={
# scene: tuple(
# {scene: scene, "runtime_sample": True}
# for _ in range(repeats_before_scene_change)
# )
# for scene in allowed_scenes
# },
# **init_kwargs,
# )
@property
def length(self) -> float:
return self.task_spec_iterator.length
@property
def total_unique(self):
return self.task_spec_iterator.total_unique
@property
def last_sampled_task(self) -> Optional[HomeServiceBaseTask]:
return self._last_sampled_task
@property
def all_observation_spaces_equal(self) -> bool:
return True
def close(self) -> None:
try:
self.env.stop()
except Exception as _:
pass
def reset(self) -> None:
self.task_spec_iterator.reset()
self._last_sampled_task = None
def set_seed(self, seed: int) -> None:
self.task_spec_iterator.seed = seed
self.main_seed = seed
@property
def current_task_spec(self) -> HomeServiceTaskSpec:
return self.env.current_task_spec
def next_task(
self,
forced_task_spec: Optional[HomeServiceTaskSpec] = None,
forced_start_scene_type: Optional[str] = None,
**kwargs
) -> Optional[HomeServiceBaseTask]:
try:
if forced_task_spec is None:
task_spec: HomeServiceTaskSpec = next(self.task_spec_iterator)
else:
task_spec = forced_task_spec
except StopIteration:
self._last_sampled_task = None
return self._last_sampled_task
runtime_sample = task_spec.runtime_sample
try:
self.env.reset(
task_spec=task_spec,
force_axis_aligned_start=self.force_axis_aligned_start,
scene_type=forced_start_scene_type,
)
if self.task_type == HomeServiceTaskType.SIMPLE_PICK_AND_PLACE:
# pick, place = sample_pick_and_place_target(
# env=self.env,
# randomizer=self.task_spec_iterator.random,
# pickup_target=pickup_target,
# place_target=place_target
# )
# self.env.current_task_spec.pickup_target = pick
# self.env.current_task_spec.place_target = place
self._last_sampled_task = HomeServiceSimplePickAndPlaceTask(
sensors=self.sensors,
env=self.env,
max_steps=self.max_steps,
discrete_actions=self.discrete_actions,
smooth_nav=self.smooth_nav,
smoothing_factor=self.smoothing_factor,
force_axis_aligned_start=self.force_axis_aligned_start,
require_done_action=self.require_done_action,
task_spec_in_metrics=self.task_spec_in_metrics,
)
else:
self._last_sampled_task = HomeServiceBaseTask(
sensors=self.sensors,
env=self.env,
max_steps=self.max_steps,
discrete_actions=self.discrete_actions,
smooth_nav=self.smooth_nav,
smoothing_factor=self.smoothing_factor,
force_axis_aligned_start=self.force_axis_aligned_start,
require_done_action=self.require_done_action,
task_spec_in_metrics=self.task_spec_in_metrics,
)
except Exception as e:
if runtime_sample:
get_logger().error(
"Encountered exception while sampling a next task."
" As this next task was a 'runtime sample' we are"
" simply returning the next task."
)
get_logger().error(traceback.format_exc())
return self.next_task()
else:
raise e
return self._last_sampled_task
| [
"copy.deepcopy",
"env.utils.include_object_data",
"allenact.utils.system.get_logger",
"env.environment.HomeServiceSimpleTaskOrderTaskSpec",
"compress_pickle.load",
"random.randint",
"allenact_plugins.ithor_plugin.ithor_util.round_to_factor",
"env.environment.HomeServiceTHOREnvironment",
"random.Rand... | [((36121, 36145), 'random.Random', 'random.Random', (['self.seed'], {}), '(self.seed)\n', (36134, 36145), False, 'import random\n'), ((40286, 40329), 'copy.deepcopy', 'copy.deepcopy', (['task_keys_to_task_spec_dicts'], {}), '(task_keys_to_task_spec_dicts)\n', (40299, 40329), False, 'import copy\n'), ((41111, 41164), 'env.environment.HomeServiceTHOREnvironment', 'HomeServiceTHOREnvironment', ([], {}), '(**home_service_env_kwargs)\n', (41137, 41164), False, 'from env.environment import HomeServiceSimpleTaskOrderTaskSpec, HomeServiceTHOREnvironment, HomeServiceTaskSpec\n'), ((49972, 50008), 'compress_pickle.load', 'compress_pickle.load', ([], {'path': 'data_path'}), '(path=data_path)\n', (49992, 50008), False, 'import compress_pickle\n'), ((38972, 39028), 'env.environment.HomeServiceSimpleTaskOrderTaskSpec', 'HomeServiceSimpleTaskOrderTaskSpec', ([], {}), '(**new_task_spec_dict)\n', (39006, 39028), False, 'from env.environment import HomeServiceSimpleTaskOrderTaskSpec, HomeServiceTHOREnvironment, HomeServiceTaskSpec\n'), ((39062, 39103), 'env.environment.HomeServiceTaskSpec', 'HomeServiceTaskSpec', ([], {}), '(**new_task_spec_dict)\n', (39081, 39103), False, 'from env.environment import HomeServiceSimpleTaskOrderTaskSpec, HomeServiceTHOREnvironment, HomeServiceTaskSpec\n'), ((40069, 40098), 'random.randint', 'random.randint', (['(0)', '(2 * 30 - 1)'], {}), '(0, 2 * 30 - 1)\n', (40083, 40098), False, 'import random\n'), ((45841, 45857), 'random.Random', 'random.Random', (['(1)'], {}), '(1)\n', (45854, 45857), False, 'import random\n'), ((49808, 49849), 'os.path.join', 'os.path.join', (['base_dir', 'f"""{stage}.pkl.gz"""'], {}), "(base_dir, f'{stage}.pkl.gz')\n", (49820, 49849), False, 'import os\n'), ((49866, 49891), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (49880, 49891), False, 'import os\n'), ((2042, 2095), 'allenact_plugins.ithor_plugin.ithor_util.round_to_factor', 'round_to_factor', (["agent_loc['rotation']", 'base_rotation'], {}), "(agent_loc['rotation'], base_rotation)\n", (2057, 2095), False, 'from allenact_plugins.ithor_plugin.ithor_util import round_to_factor\n'), ((2154, 2205), 'allenact_plugins.ithor_plugin.ithor_util.round_to_factor', 'round_to_factor', (["agent_loc['horizon']", 'base_horizon'], {}), "(agent_loc['horizon'], base_horizon)\n", (2169, 2205), False, 'from allenact_plugins.ithor_plugin.ithor_util import round_to_factor\n'), ((21112, 21152), 'env.utils.include_object_data', 'include_object_data', (['self.env.controller'], {}), '(self.env.controller)\n', (21131, 21152), False, 'from env.utils import HomeServiceActionSpace, include_object_data\n'), ((5084, 5136), 'numpy.array', 'np.array', (["[v['x'], v['y'], v['z']]"], {'dtype': 'np.float32'}), "([v['x'], v['y'], v['z']], dtype=np.float32)\n", (5092, 5136), True, 'import numpy as np\n'), ((6677, 6717), 'env.utils.include_object_data', 'include_object_data', (['self.env.controller'], {}), '(self.env.controller)\n', (6696, 6717), False, 'from env.utils import HomeServiceActionSpace, include_object_data\n'), ((22631, 22671), 'env.utils.include_object_data', 'include_object_data', (['self.env.controller'], {}), '(self.env.controller)\n', (22650, 22671), False, 'from env.utils import HomeServiceActionSpace, include_object_data\n'), ((7583, 7623), 'env.utils.include_object_data', 'include_object_data', (['self.env.controller'], {}), '(self.env.controller)\n', (7602, 7623), False, 'from env.utils import HomeServiceActionSpace, include_object_data\n'), ((54784, 54806), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (54804, 54806), False, 'import traceback\n'), ((8114, 8154), 'env.utils.include_object_data', 'include_object_data', (['self.env.controller'], {}), '(self.env.controller)\n', (8133, 8154), False, 'from env.utils import HomeServiceActionSpace, include_object_data\n'), ((54513, 54525), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (54523, 54525), False, 'from allenact.utils.system import get_logger\n'), ((54765, 54777), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (54775, 54777), False, 'from allenact.utils.system import get_logger\n'), ((8536, 8576), 'env.utils.include_object_data', 'include_object_data', (['self.env.controller'], {}), '(self.env.controller)\n', (8555, 8576), False, 'from env.utils import HomeServiceActionSpace, include_object_data\n'), ((22132, 22144), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (22142, 22144), False, 'from allenact.utils.system import get_logger\n'), ((26363, 26396), 'stringcase.pascalcase', 'stringcase.pascalcase', (['scene_type'], {}), '(scene_type)\n', (26384, 26396), False, 'import stringcase\n')] |
# -*- coding: utf-8 -*-
import os
import tempfile
from argparse import ArgumentParser
import numpy as np
from cytomine import Cytomine
from cytomine_utilities import CytomineJob
from sldc import StandardOutputLogger, Logger
from cell_counting.cytomine_utils import get_dataset
from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none
from cell_counting.extratrees_methods import CellCountRandomizedTrees
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.1"
def train(argv):
parser = ArgumentParser(prog="Extra-Trees Object Counter Model Builder")
# Cytomine
parser.add_argument('--cytomine_host', dest='cytomine_host',
default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='cytomine_public_key',
help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='cytomine_private_key',
help="The Cytomine private key")
parser.add_argument('--cytomine_base_path', dest='cytomine_base_path',
default='/api/', help="The Cytomine base path")
parser.add_argument('--cytomine_working_path', dest='cytomine_working_path',
default=None, help="The working directory (eg: /tmp)")
parser.add_argument('--cytomine_id_software', dest='cytomine_software', type=int,
help="The Cytomine software identifier")
parser.add_argument('--cytomine_id_project', dest='cytomine_project', type=int,
help="The Cytomine project identifier")
parser.add_argument('--cytomine_force_download', dest='cytomine_force_download', type=bool, default=True,
help="Force download from Cytomine or not")
# Objects
parser.add_argument('--cytomine_object_term', dest='cytomine_object_term', type=int,
help="The Cytomine identifier of object term")
parser.add_argument('--cytomine_object_user', dest='cytomine_object_user', type=int,
help="The Cytomine identifier of object owner")
parser.add_argument('--cytomine_object_reviewed_only', dest='cytomine_object_reviewed_only', type=bool,
help="Whether objects have to be reviewed or not")
# ROI
parser.add_argument('--cytomine_roi_term', dest='cytomine_roi_term', type=int, default=None,
help="The Cytomine identifier of region of interest term")
parser.add_argument('--cytomine_roi_user', dest='cytomine_roi_user', type=int,
help="The Cytomine identifier of ROI owner")
parser.add_argument('--cytomine_roi_reviewed_only', dest='cytomine_roi_reviewed_only', type=bool,
help="Whether ROIs have to be reviewed or not")
# Pre-processing
parser.add_argument('--mean_radius', dest='mean_radius', type=int, required=True,
help="The mean radius of object to detect")
parser.add_argument('--pre_transformer', dest='pre_transformer',
default=None, choices=['edt', 'euclidean_distance_transform', 'density', None, 'None'],
help="Scoremap transformer (None, edt, euclidean_distance_transform, density)")
parser.add_argument('--pre_alpha', dest='pre_alpha', action='append', type=int,
help="Exponential decrease rate of distance (if EDT)")
# Subwindows
parser.add_argument('--sw_input_size', dest='sw_input_size', action='append', type=int,
help="Size of input subwindow")
parser.add_argument('--sw_output_size', dest='sw_output_size', action='append', type=int,
help="Size of output subwindow (ignored for FCRN)")
parser.add_argument('--sw_extr_mode', dest='sw_extr_mode', choices=['random', 'sliding', 'scoremap_constrained'],
help="Mode of extraction (random, scoremap_constrained)")
parser.add_argument('--sw_extr_score_thres', dest='sw_extr_score_thres', action='append', type=float,
help="Minimum threshold to be foreground in subwindows extraction"
"(if 'scoremap_constrained' mode)")
parser.add_argument('--sw_extr_ratio', dest='sw_extr_ratio', action='append', type=float,
help="Ratio of background subwindows extracted in subwindows "
"extraction (if 'scoremap_constrained' mode)")
parser.add_argument('--sw_extr_npi', dest="sw_extr_npi", action='append', type=int,
help="Number of extracted subwindows per image (if 'random' mode)")
parser.add_argument('--sw_colorspace', dest="sw_colorspace", type=str, default='RGB__rgb',
help="List of colorspace features")
# Forest
parser.add_argument('--forest_method', dest='forest_method', type=str,
action='append', choices=['ET-clf', 'ET-regr', 'RF-clf', 'RF-regr'],
help="Type of forest method")
parser.add_argument('--forest_n_estimators', dest='forest_n_estimators', action='append', type=int,
help="Number of trees in forest")
parser.add_argument('--forest_min_samples_split', dest='forest_min_samples_split', action='append', type=int,
help="Minimum number of samples for further splitting")
parser.add_argument('--forest_max_features', dest='forest_max_features', action='append',
help="Max features")
# Dataset augmentation
parser.add_argument('--augmentation', dest='augmentation', type=bool)
parser.add_argument('--aug_rotation_range', dest='rotation_range', type=float)
parser.add_argument('--aug_width_shift_range', dest='width_shift_range', type=float)
parser.add_argument('--aug_height_shift_range', dest='height_shift_range', type=float)
parser.add_argument('--aug_zoom_range', dest='zoom_range', type=float)
parser.add_argument('--aug_fill_mode', dest='fill_mode', type=str)
parser.add_argument('--aug_horizontal_flip', dest='horizontal_flip', type=bool)
parser.add_argument('--aug_vertical_flip', dest='vertical_flip', type=bool)
parser.add_argument('--aug_featurewise_center', dest='featurewise_center', type=bool)
parser.add_argument('--aug_featurewise_std_normalization', dest='featurewise_std_normalization', type=bool)
# Execution
parser.add_argument('--n_jobs', dest='n_jobs', type=int, default=1, help="Number of jobs")
parser.add_argument('--verbose', '-v', dest='verbose', default=0, help="Level of verbosity")
params, other = parser.parse_known_args(argv)
if params.cytomine_working_path is None:
params.cytomine_working_path = os.path.join(tempfile.gettempdir(), "cytomine")
make_dirs(params.cytomine_working_path)
params.pre_transformer = check_default(params.pre_transformer, None, return_list=False)
params.pre_alpha = check_default(params.pre_alpha, 5)
params.forest_method = check_default(params.forest_method, 'ET-regr')
params.forest_n_estimators = check_default(params.forest_n_estimators, 1)
params.forest_min_samples_split = check_default(params.forest_min_samples_split, 2)
params.forest_max_features = check_default(params.forest_max_features, 'sqrt')
params.forest_max_features = check_max_features(params.forest_max_features)
params.sw_input_size = check_default(params.sw_input_size, 4)
params.sw_input_size = [(s, s) for s in params.sw_input_size]
params.sw_output_size = check_default(params.sw_output_size, 1)
params.sw_output_size = [(s, s) for s in params.sw_output_size]
params.sw_extr_mode = check_default(params.sw_extr_mode, 'scoremap_constrained', return_list=False)
params.sw_extr_ratio = check_default(params.sw_extr_ratio, 0.5)
params.sw_extr_score_thres = check_default(params.sw_extr_score_thres, 0.4)
params.sw_extr_npi = check_default(params.sw_extr_npi, 100)
params.sw_colorspace = params.sw_colorspace.split(' ')
params.augmentation = check_default(params.augmentation, False, return_list=False)
if params.augmentation:
params.rotation_range = check_default(params.rotation_range, 30., return_list=False)
params.width_shift_range = check_default(params.width_shift_range, 0.3, return_list=False)
params.height_shift_range = check_default(params.height_shift_range, 0.3, return_list=False)
params.zoom_range = check_default(params.zoom_range, 0.3, return_list=False)
params.fill_mode = check_default(params.fill_mode, 'constant', return_list=False)
params.horizontal_flip = check_default(params.horizontal_flip, True, return_list=False)
params.vertical_flip = check_default(params.vertical_flip, True, return_list=False)
params.featurewise_center = check_default(params.featurewise_center, False, return_list=False)
params.featurewise_std_normalization = check_default(params.featurewise_std_normalization, False,
return_list=False)
else:
params.rotation_range = 0.
params.width_shift_range = 0.
params.height_shift_range = 0.
params.zoom_range = 0.
params.fill_mode = 'reflect'
params.horizontal_flip = False
params.vertical_flip = False
params.featurewise_center = False
params.featurewise_std_normalization = False
params = params_remove_list(params)
# Initialize logger
logger = StandardOutputLogger(params.verbose)
for key, val in sorted(vars(params).iteritems()):
logger.info("[PARAMETER] {}: {}".format(key, val))
# Initialize Cytomine client
cytomine = Cytomine(
params.cytomine_host,
params.cytomine_public_key,
params.cytomine_private_key,
working_path=params.cytomine_working_path,
base_path=params.cytomine_base_path,
verbose=(params.verbose >= Logger.DEBUG)
)
# Start job
with CytomineJob(cytomine,
params.cytomine_software,
params.cytomine_project,
parameters=vars(params_remove_none(params))) as job:
cytomine.update_job_status(job.job, status_comment="Starting...", progress=0)
cytomine.update_job_status(job.job, status_comment="Loading training set...", progress=1)
X, y = get_dataset(cytomine, params.cytomine_working_path, params.cytomine_project, params.cytomine_object_term,
params.cytomine_roi_term, params.cytomine_object_user, params.cytomine_object_reviewed_only,
params.cytomine_roi_user, params.cytomine_roi_reviewed_only, params.cytomine_force_download)
logger.d("X size: {} samples".format(len(X)))
logger.d("y size: {} samples".format(len(y)))
cytomine.update_job_status(job.job, status_comment="Training forest...", progress=5)
estimator = CellCountRandomizedTrees(logger=logger, **vars(params))
estimator.fit(np.asarray(X), np.asarray(y))
cytomine.update_job_status(job.job, status_comment="Saving (best) model", progress=95)
model_path = os.path.join(params.cytomine_working_path, "models", str(params.cytomine_software))
model_file = os.path.join(model_path, "{}.pkl".format(job.job.id))
make_dirs(model_path)
estimator.save(model_file)
cytomine.update_job_status(job.job, status_comment="Finished.", progress=100)
if __name__ == '__main__':
import sys
train(sys.argv[1:])
| [
"cell_counting.utils.params_remove_list",
"argparse.ArgumentParser",
"cell_counting.utils.make_dirs",
"numpy.asarray",
"cytomine.Cytomine",
"tempfile.gettempdir",
"cell_counting.utils.check_default",
"cell_counting.cytomine_utils.get_dataset",
"cell_counting.utils.check_max_features",
"sldc.Standa... | [((551, 614), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'prog': '"""Extra-Trees Object Counter Model Builder"""'}), "(prog='Extra-Trees Object Counter Model Builder')\n", (565, 614), False, 'from argparse import ArgumentParser\n'), ((6881, 6920), 'cell_counting.utils.make_dirs', 'make_dirs', (['params.cytomine_working_path'], {}), '(params.cytomine_working_path)\n', (6890, 6920), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((6951, 7013), 'cell_counting.utils.check_default', 'check_default', (['params.pre_transformer', 'None'], {'return_list': '(False)'}), '(params.pre_transformer, None, return_list=False)\n', (6964, 7013), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7037, 7071), 'cell_counting.utils.check_default', 'check_default', (['params.pre_alpha', '(5)'], {}), '(params.pre_alpha, 5)\n', (7050, 7071), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7099, 7145), 'cell_counting.utils.check_default', 'check_default', (['params.forest_method', '"""ET-regr"""'], {}), "(params.forest_method, 'ET-regr')\n", (7112, 7145), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7179, 7223), 'cell_counting.utils.check_default', 'check_default', (['params.forest_n_estimators', '(1)'], {}), '(params.forest_n_estimators, 1)\n', (7192, 7223), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7262, 7311), 'cell_counting.utils.check_default', 'check_default', (['params.forest_min_samples_split', '(2)'], {}), '(params.forest_min_samples_split, 2)\n', (7275, 7311), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7345, 7394), 'cell_counting.utils.check_default', 'check_default', (['params.forest_max_features', '"""sqrt"""'], {}), "(params.forest_max_features, 'sqrt')\n", (7358, 7394), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7428, 7474), 'cell_counting.utils.check_max_features', 'check_max_features', (['params.forest_max_features'], {}), '(params.forest_max_features)\n', (7446, 7474), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7502, 7540), 'cell_counting.utils.check_default', 'check_default', (['params.sw_input_size', '(4)'], {}), '(params.sw_input_size, 4)\n', (7515, 7540), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7635, 7674), 'cell_counting.utils.check_default', 'check_default', (['params.sw_output_size', '(1)'], {}), '(params.sw_output_size, 1)\n', (7648, 7674), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7769, 7846), 'cell_counting.utils.check_default', 'check_default', (['params.sw_extr_mode', '"""scoremap_constrained"""'], {'return_list': '(False)'}), "(params.sw_extr_mode, 'scoremap_constrained', return_list=False)\n", (7782, 7846), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7874, 7914), 'cell_counting.utils.check_default', 'check_default', (['params.sw_extr_ratio', '(0.5)'], {}), '(params.sw_extr_ratio, 0.5)\n', (7887, 7914), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((7948, 7994), 'cell_counting.utils.check_default', 'check_default', (['params.sw_extr_score_thres', '(0.4)'], {}), '(params.sw_extr_score_thres, 0.4)\n', (7961, 7994), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8020, 8058), 'cell_counting.utils.check_default', 'check_default', (['params.sw_extr_npi', '(100)'], {}), '(params.sw_extr_npi, 100)\n', (8033, 8058), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8145, 8205), 'cell_counting.utils.check_default', 'check_default', (['params.augmentation', '(False)'], {'return_list': '(False)'}), '(params.augmentation, False, return_list=False)\n', (8158, 8205), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((9554, 9580), 'cell_counting.utils.params_remove_list', 'params_remove_list', (['params'], {}), '(params)\n', (9572, 9580), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((9619, 9655), 'sldc.StandardOutputLogger', 'StandardOutputLogger', (['params.verbose'], {}), '(params.verbose)\n', (9639, 9655), False, 'from sldc import StandardOutputLogger, Logger\n'), ((9818, 10039), 'cytomine.Cytomine', 'Cytomine', (['params.cytomine_host', 'params.cytomine_public_key', 'params.cytomine_private_key'], {'working_path': 'params.cytomine_working_path', 'base_path': 'params.cytomine_base_path', 'verbose': '(params.verbose >= Logger.DEBUG)'}), '(params.cytomine_host, params.cytomine_public_key, params.\n cytomine_private_key, working_path=params.cytomine_working_path,\n base_path=params.cytomine_base_path, verbose=params.verbose >= Logger.DEBUG\n )\n', (9826, 10039), False, 'from cytomine import Cytomine\n'), ((8266, 8327), 'cell_counting.utils.check_default', 'check_default', (['params.rotation_range', '(30.0)'], {'return_list': '(False)'}), '(params.rotation_range, 30.0, return_list=False)\n', (8279, 8327), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8362, 8425), 'cell_counting.utils.check_default', 'check_default', (['params.width_shift_range', '(0.3)'], {'return_list': '(False)'}), '(params.width_shift_range, 0.3, return_list=False)\n', (8375, 8425), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8462, 8526), 'cell_counting.utils.check_default', 'check_default', (['params.height_shift_range', '(0.3)'], {'return_list': '(False)'}), '(params.height_shift_range, 0.3, return_list=False)\n', (8475, 8526), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8555, 8611), 'cell_counting.utils.check_default', 'check_default', (['params.zoom_range', '(0.3)'], {'return_list': '(False)'}), '(params.zoom_range, 0.3, return_list=False)\n', (8568, 8611), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8639, 8701), 'cell_counting.utils.check_default', 'check_default', (['params.fill_mode', '"""constant"""'], {'return_list': '(False)'}), "(params.fill_mode, 'constant', return_list=False)\n", (8652, 8701), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8735, 8797), 'cell_counting.utils.check_default', 'check_default', (['params.horizontal_flip', '(True)'], {'return_list': '(False)'}), '(params.horizontal_flip, True, return_list=False)\n', (8748, 8797), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8829, 8889), 'cell_counting.utils.check_default', 'check_default', (['params.vertical_flip', '(True)'], {'return_list': '(False)'}), '(params.vertical_flip, True, return_list=False)\n', (8842, 8889), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((8926, 8992), 'cell_counting.utils.check_default', 'check_default', (['params.featurewise_center', '(False)'], {'return_list': '(False)'}), '(params.featurewise_center, False, return_list=False)\n', (8939, 8992), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((9040, 9117), 'cell_counting.utils.check_default', 'check_default', (['params.featurewise_std_normalization', '(False)'], {'return_list': '(False)'}), '(params.featurewise_std_normalization, False, return_list=False)\n', (9053, 9117), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((10497, 10807), 'cell_counting.cytomine_utils.get_dataset', 'get_dataset', (['cytomine', 'params.cytomine_working_path', 'params.cytomine_project', 'params.cytomine_object_term', 'params.cytomine_roi_term', 'params.cytomine_object_user', 'params.cytomine_object_reviewed_only', 'params.cytomine_roi_user', 'params.cytomine_roi_reviewed_only', 'params.cytomine_force_download'], {}), '(cytomine, params.cytomine_working_path, params.cytomine_project,\n params.cytomine_object_term, params.cytomine_roi_term, params.\n cytomine_object_user, params.cytomine_object_reviewed_only, params.\n cytomine_roi_user, params.cytomine_roi_reviewed_only, params.\n cytomine_force_download)\n', (10508, 10807), False, 'from cell_counting.cytomine_utils import get_dataset\n'), ((11457, 11478), 'cell_counting.utils.make_dirs', 'make_dirs', (['model_path'], {}), '(model_path)\n', (11466, 11478), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n'), ((6842, 6863), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (6861, 6863), False, 'import tempfile\n'), ((11143, 11156), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (11153, 11156), True, 'import numpy as np\n'), ((11158, 11171), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (11168, 11171), True, 'import numpy as np\n'), ((10260, 10286), 'cell_counting.utils.params_remove_none', 'params_remove_none', (['params'], {}), '(params)\n', (10278, 10286), False, 'from cell_counting.utils import make_dirs, check_default, params_remove_list, check_max_features, params_remove_none\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2019 by <NAME>
# Generates surrogate data for non-lynear analysis
#
# Uses algorithm from <NAME>., & <NAME>. (1996).
# Improved surrogate data for nonlinearity tests. Physical Review Letters, 77(4), 635.
import numpy as np
import scipy.special
import cmath
from mfdfa import mfdfa, get_hurst, create_logscale
from fgnoise import fgnoise
def schreiber_schmitz(x, h, htol, maxitr):
itr = 0
h1 = h
he = 0
n = len(x)
x = np.sort(x)
scale = create_logscale(10, n/4, 100)
while((he < h-htol)|(he > h+htol))&(itr < maxitr):
y = fgnoise(n, h1)
p = sorted(range(len(y)),key=lambda x:y[x])
y[p] = x
he = get_hurst(scale, mfdfa(y, scale, 2, 1)[1])
h1 = h1 + (h1 - he)
itr = itr +1
return y, he, itr
| [
"numpy.sort",
"mfdfa.mfdfa",
"fgnoise.fgnoise",
"mfdfa.create_logscale"
] | [((505, 515), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (512, 515), True, 'import numpy as np\n'), ((528, 559), 'mfdfa.create_logscale', 'create_logscale', (['(10)', '(n / 4)', '(100)'], {}), '(10, n / 4, 100)\n', (543, 559), False, 'from mfdfa import mfdfa, get_hurst, create_logscale\n'), ((625, 639), 'fgnoise.fgnoise', 'fgnoise', (['n', 'h1'], {}), '(n, h1)\n', (632, 639), False, 'from fgnoise import fgnoise\n'), ((739, 760), 'mfdfa.mfdfa', 'mfdfa', (['y', 'scale', '(2)', '(1)'], {}), '(y, scale, 2, 1)\n', (744, 760), False, 'from mfdfa import mfdfa, get_hurst, create_logscale\n')] |
import os
import logging
logger = logging.getLogger(__name__)
import numpy as np
import astropy.io.fits as fits
from ...echelle.imageproc import combine_images
from ..common import load_obslog, load_config
from .common import parse_image
def reduce_rawdata():
"""Reduce the Subaru/HDS spectra.
"""
# read obslog and config
config = load_config('HDS\S*\.cfg$')
logtable = load_obslog('\S*\.obslog$', fmt='astropy')
# extract keywords from config file
section = config['data']
rawpath = section.get('rawpath')
section = config['reduce']
midpath = section.get('midpath')
odspath = section.get('odspath')
figpath = section.get('figpath')
mode = section.get('mode')
fig_format = section.get('fig_format')
oned_suffix = section.get('oned_suffix')
ncores = section.get('ncores')
# create folders if not exist
if not os.path.exists(figpath): os.mkdir(figpath)
if not os.path.exists(odspath): os.mkdir(odspath)
if not os.path.exists(midpath): os.mkdir(midpath)
# determine number of cores to be used
if ncores == 'max':
ncores = os.cpu_count()
else:
ncores = min(os.cpu_count(), int(ncores))
############ count different setups #############
setup_lst = {}
for logitem in logtable:
setup = logitem['setup']
objtype = logitem['objtype']
binning = logitem['binning']
if (setup, binning) not in setup_lst:
setup_lst[(setup, binning)] = {}
if objtype not in setup_lst[(setup, binning)]:
setup_lst[(setup, binning)][objtype] = 0
setup_lst[(setup, binning)][objtype] += 1
object_setup_lst = []
for (setup, binning), objtype_lst in sorted(setup_lst.items()):
print('Setup: {} Binning: {}'.format(setup, binning))
count_total = 0
for objtype, count in sorted(objtype_lst.items()):
print(' - {:10s}: {:3d} Frames'.format(objtype, count))
count_total += count
if objtype=='OBJECT':
object_setup_lst.append((setup, binning))
print(' - {:10s}: {:3d} Frames'.format('Total', count_total))
object_setup_lst = list(set(object_setup_lst))
# loop over different setups and binnings
for sel_setup, sel_binning in object_setup_lst:
print('Selected setup={}; selected binning={}'.format(
sel_setup, sel_binning))
############### parse bias #################
bias_filter = lambda item: item['setup']==sel_setup \
and item['binning']==sel_binning \
and item['objtype']=='BIAS' \
and item['object']=='BIAS' \
and item['nsat_1']<100 \
and item['q95_1']<10000
bias_file = config['reduce.bias'].get('bias_file')
if mode=='debug' and os.path.exists(bias_file):
pass
else:
bias_data_lst1 = []
bias_data_lst2 = []
bias_card_lst = []
logitem_lst = list(filter(bias_filter, logtable))
# get the number of bias images
n_bias = len(logitem_lst)
if n_bias == 0:
pass
fmt_str = (' - {:>5s} {:12s} {:12s} {:<7s} {:<7s} {:1s}I2 {:>7}'
' {:<7s} {:5}' # setup, binning
' {:>7} {:>7} {:>5} {:>5}' # nsat_1, nsat_2, q95_1, q95_2
)
head_str = fmt_str.format('FID', 'fileid1', 'fileid2', 'objtype',
'object', '', 'exptime', 'setup', 'binning',
'nsat_1', 'nsat_2', 'q95_1', 'q95_2')
print(head_str)
for ifile, logitem in enumerate(logitem_lst):
fname1 = '{}.fits'.format(logitem['fileid1'])
fname2 = '{}.fits'.format(logitem['fileid2'])
filename1 = os.path.join(rawpath, fname1)
filename2 = os.path.join(rawpath, fname2)
data1, head1 = fits.getdata(filename1, header=True)
data2, head2 = fits.getdata(filename2, header=True)
data1 = parse_image(data1, head1)
data2 = parse_image(data2, head2)
string = fmt_str.format('[{:d}]'.format(logitem['frameid']),
logitem['fileid1'], logitem['fileid2'],
logitem['objtype'], logitem['object'],
logitem['i2'], logitem['exptime'],
logitem['setup'], logitem['binning'],
logitem['nsat_1'], logitem['nsat_2'],
logitem['q95_1'], logitem['q95_2'])
print(print_wrapper(string, logitem))
bias_data_lst1.append(data1)
bias_data_lst2.append(data2)
# append the file information
prefix = 'HIERARCH GAMSE BIAS FILE {:03d}'.format(ifile+1)
card = (prefix+' FILEID1', logitem['fileid1'])
bias_card_lst.append(card)
card = (prefix+' FILEID2', logitem['fileid2'])
bias_card_lst.append(card)
prefix = 'HIERARCH GAMSE BIAS '
bias_card_lst.append((prefix + 'NFILE', n_bias))
# combine bias images
bias_data_lst1 = np.array(bias_data_lst1)
bias_data_lst2 = np.array(bias_data_lst2)
combine_mode = 'mean'
cosmic_clip = section.getfloat('cosmic_clip')
maxiter = section.getint('maxiter')
maskmode = (None, 'max')[n_bias>=3]
bias_combine1 = combine_images(bias_data_lst1,
mode = combine_mode,
upper_clip = cosmic_clip,
maxiter = maxiter,
maskmode = maskmode,
ncores = ncores,
)
bias_combine2 = combine_images(bias_data_lst2,
mode = combine_mode,
upper_clip = cosmic_clip,
maxiter = maxiter,
maskmode = maskmode,
ncores = ncores,
)
bias_card_lst.append((prefix+'COMBINE_MODE', combine_mode))
bias_card_lst.append((prefix+'COSMIC_CLIP', cosmic_clip))
bias_card_lst.append((prefix+'MAXITER', maxiter))
bias_card_lst.append((prefix+'MASK_MODE', str(maskmode)))
# create the hdu list to be saved
hdu_lst = fits.HDUList()
# create new FITS Header for bias
head = fits.Header()
# pack new card list into header and bias_card_lst
for card in bias_card_lst:
head.append(card)
head['HIERARCH GAMSE FILECONTENT 0'] = 'BIAS COMBINED'
hdu_lst.append(fits.PrimaryHDU(data=bias_combine1, header=head))
hdu_lst.append(fits.ImageHDU(data=bias_combine2, header=head))
# write to FITS file
hdu_lst.writeto(bias_file, overwrite=True)
message = 'Bias image written to "{}"'.format(bias_file)
logger.info(message)
print(message)
############### find flat groups #################
flat_file_str = config['reduce.flat'].get('flat_file')
flat_file = flat_file.format(sel_setup, sel_binning)
if mode=='debug' and os.path.exists(flat_file):
continue
# pass
else:
filterfunc = lambda item: item['setup']==sel_setup \
and item['binning']==sel_binning \
and item['objtype']=='FLAT' \
and item['object']=='FLAT'
logitem_lst = list(filter(filterfunc, logtable))
fmt_str = (' - {:>5s} {:12s} {:12s} {:<7s} {:<7s} {:1s}I2 {:>7}'
' {:<7s} {:5} {:8}' # setup, binning, slitsize
' {:>7} {:>7} {:>5} {:>5}' # nsat_1, nsat_2, q95_1, q95_2
)
head_str = fmt_str.format('FID', 'fileid1', 'fileid2',
'objtype', 'object', '', 'exptime',
'setup', 'binning', 'slitsize',
'nsat_1', 'nsat_2', 'q95_1', 'q95_2')
for logitem in logtable:
objtype = logitem['objtype']
objname = logitem['object']
| [
"astropy.io.fits.ImageHDU",
"os.mkdir",
"astropy.io.fits.getdata",
"astropy.io.fits.PrimaryHDU",
"os.path.exists",
"os.cpu_count",
"astropy.io.fits.Header",
"numpy.array",
"astropy.io.fits.HDUList",
"os.path.join",
"logging.getLogger"
] | [((34, 61), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (51, 61), False, 'import logging\n'), ((916, 939), 'os.path.exists', 'os.path.exists', (['figpath'], {}), '(figpath)\n', (930, 939), False, 'import os\n'), ((941, 958), 'os.mkdir', 'os.mkdir', (['figpath'], {}), '(figpath)\n', (949, 958), False, 'import os\n'), ((970, 993), 'os.path.exists', 'os.path.exists', (['odspath'], {}), '(odspath)\n', (984, 993), False, 'import os\n'), ((995, 1012), 'os.mkdir', 'os.mkdir', (['odspath'], {}), '(odspath)\n', (1003, 1012), False, 'import os\n'), ((1024, 1047), 'os.path.exists', 'os.path.exists', (['midpath'], {}), '(midpath)\n', (1038, 1047), False, 'import os\n'), ((1049, 1066), 'os.mkdir', 'os.mkdir', (['midpath'], {}), '(midpath)\n', (1057, 1066), False, 'import os\n'), ((1152, 1166), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1164, 1166), False, 'import os\n'), ((1198, 1212), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1210, 1212), False, 'import os\n'), ((2911, 2936), 'os.path.exists', 'os.path.exists', (['bias_file'], {}), '(bias_file)\n', (2925, 2936), False, 'import os\n'), ((5414, 5438), 'numpy.array', 'np.array', (['bias_data_lst1'], {}), '(bias_data_lst1)\n', (5422, 5438), True, 'import numpy as np\n'), ((5468, 5492), 'numpy.array', 'np.array', (['bias_data_lst2'], {}), '(bias_data_lst2)\n', (5476, 5492), True, 'import numpy as np\n'), ((6676, 6690), 'astropy.io.fits.HDUList', 'fits.HDUList', ([], {}), '()\n', (6688, 6690), True, 'import astropy.io.fits as fits\n'), ((6756, 6769), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (6767, 6769), True, 'import astropy.io.fits as fits\n'), ((7569, 7594), 'os.path.exists', 'os.path.exists', (['flat_file'], {}), '(flat_file)\n', (7583, 7594), False, 'import os\n'), ((3940, 3969), 'os.path.join', 'os.path.join', (['rawpath', 'fname1'], {}), '(rawpath, fname1)\n', (3952, 3969), False, 'import os\n'), ((3998, 4027), 'os.path.join', 'os.path.join', (['rawpath', 'fname2'], {}), '(rawpath, fname2)\n', (4010, 4027), False, 'import os\n'), ((4059, 4095), 'astropy.io.fits.getdata', 'fits.getdata', (['filename1'], {'header': '(True)'}), '(filename1, header=True)\n', (4071, 4095), True, 'import astropy.io.fits as fits\n'), ((4127, 4163), 'astropy.io.fits.getdata', 'fits.getdata', (['filename2'], {'header': '(True)'}), '(filename2, header=True)\n', (4139, 4163), True, 'import astropy.io.fits as fits\n'), ((7000, 7048), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'bias_combine1', 'header': 'head'}), '(data=bias_combine1, header=head)\n', (7015, 7048), True, 'import astropy.io.fits as fits\n'), ((7077, 7123), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', ([], {'data': 'bias_combine2', 'header': 'head'}), '(data=bias_combine2, header=head)\n', (7090, 7123), True, 'import astropy.io.fits as fits\n')] |
import numpy as np
__all__ = ['default_version', 'known_versions', 'e_2_convention',
'default_column_keys', 'tomographic_redshift_bin',
'multiplicative_shear_bias']
default_version = 'DR4'
known_versions = ['DR3', 'KV450', 'DR4']
e_2_convention = 'standard'
def default_column_keys(version=default_version):
if version == 'DR3':
keys = {
'ra': 'RAJ2000',
'dec': 'DECJ2000',
'z': 'Z_B',
'z_low': 'Z_B_MIN',
'e_1': 'e1',
'e_2': 'e2',
'w': 'weight',
'm': 'm'}
elif version == 'KV450':
keys = {
'ra': 'ALPHA_J2000',
'dec': 'DELTA_J2000',
'z': 'Z_B',
'z_low': 'Z_B_MIN',
'e_1': 'bias_corrected_e1',
'e_2': 'bias_corrected_e2',
'w': 'weight'}
elif version == 'DR4':
keys = {
'ra': 'ALPHA_J2000',
'dec': 'DELTA_J2000',
'z': 'Z_B',
'z_low': 'Z_B_MIN',
'e_1': 'e1',
'e_2': 'e2',
'w': 'weight'}
else:
raise RuntimeError(
"Unkown version of KiDS. Supported versions are {}.".format(
known_versions))
return keys
def tomographic_redshift_bin(z_s, version=default_version):
"""KiDS KV450 and DR4 analyses work in pre-defined tomographic redshift
bins. This function returns the photometric redshift bin as a function of
photometric redshift.
Parameters
----------
z_s : numpy array
Photometric redshifts.
version : string
Which catalog version to use.
Returns
-------
z_bin : numpy array
The tomographic redshift bin corresponding to each photometric
redshift. Returns -1 in case a redshift does not fall into any bin.
"""
z_bin = np.digitize(z_s, [0.1, 0.3, 0.5, 0.7, 0.9, 1.2]) - 1
z_bin = np.where((z_s < 0.1) | (z_s >= 1.2), -1, z_bin)
return z_bin
def multiplicative_shear_bias(z_s, version=default_version):
"""For many version of KiDS, the multiplicative shear bias is not estimated
on the basis of individual sources but for broad photometric redshift
bins. This function returns the multiplicative bias :math:`m` as a function
of source photometric redshift.
Parameters
----------
z_s : numpy array
Photometric redshifts.
version : string
Which catalog version to use.
Returns
-------
m : numpy array
The multiplicative shear bias corresponding to each photometric
redshift.
"""
if version == 'DR3':
raise RuntimeError('For DR3, the multiplicative shear bias is ' +
'defined for each object individually.')
elif version in ['KV450', 'DR4']:
if version == 'KV450':
m = np.array([-0.017, -0.008, -0.015, 0.010, 0.006])
else:
m = np.array([-0.009, -0.011, -0.015, 0.002, 0.007])
z_bin = tomographic_redshift_bin(z_s, version=version)
return np.where(z_bin != -1, m[z_bin], np.nan)
else:
raise RuntimeError(
"Unkown version of KiDS. Supported versions are {}.".format(
known_versions))
| [
"numpy.digitize",
"numpy.where",
"numpy.array"
] | [((1940, 1987), 'numpy.where', 'np.where', (['((z_s < 0.1) | (z_s >= 1.2))', '(-1)', 'z_bin'], {}), '((z_s < 0.1) | (z_s >= 1.2), -1, z_bin)\n', (1948, 1987), True, 'import numpy as np\n'), ((1875, 1923), 'numpy.digitize', 'np.digitize', (['z_s', '[0.1, 0.3, 0.5, 0.7, 0.9, 1.2]'], {}), '(z_s, [0.1, 0.3, 0.5, 0.7, 0.9, 1.2])\n', (1886, 1923), True, 'import numpy as np\n'), ((3089, 3128), 'numpy.where', 'np.where', (['(z_bin != -1)', 'm[z_bin]', 'np.nan'], {}), '(z_bin != -1, m[z_bin], np.nan)\n', (3097, 3128), True, 'import numpy as np\n'), ((2881, 2928), 'numpy.array', 'np.array', (['[-0.017, -0.008, -0.015, 0.01, 0.006]'], {}), '([-0.017, -0.008, -0.015, 0.01, 0.006])\n', (2889, 2928), True, 'import numpy as np\n'), ((2960, 3008), 'numpy.array', 'np.array', (['[-0.009, -0.011, -0.015, 0.002, 0.007]'], {}), '([-0.009, -0.011, -0.015, 0.002, 0.007])\n', (2968, 3008), True, 'import numpy as np\n')] |
from io import StringIO
import math
import statistics
import geoglows
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import requests
from scipy import interpolate
from scipy import stats
import hydrostats as hs
import hydrostats.data as hd
def collect_data(start_id, start_ideam_id, downstream_id, downstream_ideam_id):
# Upstream simulated flow
start = geoglows.streamflow.historic_simulation(start_id)
# Upstream observed flow
start_ideam = get_ideam_flow(start_ideam_id)
start_ideam.dropna(inplace=True)
# Downstream simulated flow
downstream = geoglows.streamflow.historic_simulation(downstream_id)
# Downstream observed flow
downstream_ideam = get_ideam_flow(downstream_ideam_id)
downstream_ideam.dropna(inplace=True)
# Downstream bias corrected flow (for comparison to the propagation method
downstream_bc = geoglows.bias.correct_historical(downstream, downstream_ideam)
# Export all as csv
start.to_csv('start_flow.csv')
start_ideam.to_csv('start_ideam_flow.csv')
downstream.to_csv('downstream_flow.csv')
downstream_ideam.to_csv('downstream_ideam_flow.csv')
downstream_bc.to_csv('downstream_bc_flow.csv')
return
def get_ideam_flow(id):
# get the gauged data
url = f'https://www.hydroshare.org/resource/d222676fbd984a81911761ca1ba936bf/' \
f'data/contents/Discharge_Data/{id}.csv'
df = pd.read_csv(StringIO(requests.get(url).text), index_col=0)
df.index = pd.to_datetime(df.index).tz_localize('UTC')
return df
def find_downstream_ids(df: pd.DataFrame, target_id: int, same_order: bool = True):
downstream_ids = []
stream_row = df[df['COMID'] == target_id]
stream_order = stream_row['order_'].values[0]
if same_order:
while stream_row['NextDownID'].values[0] != -1 and stream_row['order_'].values[0] == stream_order:
downstream_ids.append(stream_row['NextDownID'].values[0])
stream_row = df[df['COMID'] == stream_row['NextDownID'].values[0]]
else:
while stream_row['NextDownID'].values[0] != -1:
downstream_ids.append(stream_row['NextDownID'].values[0])
stream_row = df[df['COMID'] == stream_row['NextDownID'].values[0]]
return tuple(downstream_ids)
def compute_flow_duration_curve(hydro: list or np.array, prob_steps: int = 500, exceedence: bool = True):
percentiles = [round((1 / prob_steps) * i * 100, 5) for i in range(prob_steps + 1)]
flows = np.nanpercentile(hydro, percentiles)
if exceedence:
percentiles.reverse()
columns = ['Exceedence Probability', 'Flow']
else:
columns = ['Non-Exceedence Probability', 'Flow']
return pd.DataFrame(np.transpose([percentiles, flows]), columns=columns)
def get_scalar_bias_fdc(first_series, seconds_series):
first_fdc = compute_flow_duration_curve(first_series)
second_fdc = compute_flow_duration_curve(seconds_series)
ratios = np.divide(first_fdc['Flow'].values.flatten(), second_fdc['Flow'].values.flatten())
columns = (first_fdc.columns[0], 'Scalars')
scalars_df = pd.DataFrame(np.transpose([first_fdc.values[:, 0], ratios]), columns=columns)
scalars_df.replace(np.inf, np.nan, inplace=True)
scalars_df.dropna(inplace=True)
return scalars_df
def solve_gumbel_flow(std, xbar, rp):
"""
Solves the Gumbel Type I pdf = exp(-exp(-b))
where b is the covariate
"""
# xbar = statistics.mean(year_max_flow_list)
# std = statistics.stdev(year_max_flow_list, xbar=xbar)
return -math.log(-math.log(1 - (1 / rp))) * std * .7797 + xbar - (.45 * std)
def propagate_correction(sim_data: pd.DataFrame, obs_data: pd.DataFrame, sim_data_to_correct,
drop_outliers: bool = False, outlier_threshold: int or float = 2.5,
filter_scalar_fdc: bool = False, filter_scalar_fdc_range: tuple = (0, 80),
# fill_scalar_fdc: bool = False, fill_scalar_method: str = 'gumbel',
extrapolate_method: str = 'nearest', fill_value: int or float = None,
use_gumbel: bool = False, gumbel_max: int = 75) -> pd.DataFrame:
# list of the unique months in the historical simulation. should always be 1->12 but just in case...
unique_simulation_months = sorted(set(sim_data.index.strftime('%m')))
dates = []
values = []
scales = []
percents = []
for month in unique_simulation_months:
# filter data to only be current iteration's month
monthly_sim = sim_data[sim_data.index.month == int(month)].dropna()
monthly_obs = obs_data[obs_data.index.month == int(month)].dropna()
monthly_cor = sim_data_to_correct[sim_data_to_correct.index.month == int(month)].dropna()
# compute the fdc for paired sim/obs data and compute scalar fdc, either with or without outliers
if drop_outliers:
# drop outlier data
# https://stackoverflow.com/questions/23199796/detect-and-exclude-outliers-in-pandas-data-frame
mon_sim_fdc = compute_flow_duration_curve(
monthly_sim[(np.abs(stats.zscore(monthly_sim)) < outlier_threshold).all(axis=1)])
mon_obs_fdc = compute_flow_duration_curve(
monthly_obs[(np.abs(stats.zscore(monthly_obs)) < outlier_threshold).all(axis=1)])
else:
mon_sim_fdc = compute_flow_duration_curve(monthly_sim)
mon_obs_fdc = compute_flow_duration_curve(monthly_obs)
scalar_fdc = get_scalar_bias_fdc(mon_obs_fdc['Flow'].values.flatten(), mon_sim_fdc['Flow'].values.flatten())
if filter_scalar_fdc:
scalar_fdc = scalar_fdc[scalar_fdc['Exceedence Probability'] >= filter_scalar_fdc_range[0]]
scalar_fdc = scalar_fdc[scalar_fdc['Exceedence Probability'] <= filter_scalar_fdc_range[1]]
# todo add a flag for saving this
scalar_fdc.to_csv(f'scalar_fdc_{month}.csv')
# Convert the percentiles
if extrapolate_method == 'nearest':
to_scalar = interpolate.interp1d(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],
fill_value='extrapolate', kind='nearest')
elif extrapolate_method == 'value':
to_scalar = interpolate.interp1d(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],
fill_value=fill_value, bounds_error=False)
elif extrapolate_method == 'linear':
to_scalar = interpolate.interp1d(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],
fill_value='extrapolate')
elif extrapolate_method == 'average':
to_scalar = interpolate.interp1d(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],
fill_value=np.mean(scalar_fdc.values[:, 1]), bounds_error=False)
elif extrapolate_method == 'max' or extrapolate_method == 'maximum':
to_scalar = interpolate.interp1d(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],
fill_value=np.max(scalar_fdc.values[:, 1]), bounds_error=False)
elif extrapolate_method == 'min' or extrapolate_method == 'minimum':
to_scalar = interpolate.interp1d(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],
fill_value=np.min(scalar_fdc.values[:, 1]), bounds_error=False)
elif extrapolate_method == 'globalmin':
total_scalar_fdc = get_scalar_bias_fdc(
compute_flow_duration_curve(obs_data.values.flatten()),
compute_flow_duration_curve(sim_data.values.flatten()))
to_scalar = interpolate.interp1d(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],
fill_value=np.min(total_scalar_fdc.values[:, 1]), bounds_error=False)
elif extrapolate_method == 'globalaverage':
total_scalar_fdc = get_scalar_bias_fdc(
compute_flow_duration_curve(obs_data.values.flatten()),
compute_flow_duration_curve(sim_data.values.flatten()))
to_scalar = interpolate.interp1d(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],
fill_value=np.mean(total_scalar_fdc.values[:, 1]), bounds_error=False)
else:
raise ValueError('Invalid extrapolation method provided')
# determine the percentile of each uncorrected flow using the monthly fdc
x = monthly_cor.values.flatten()
percentiles = [stats.percentileofscore(x, a) for a in x]
scalars = to_scalar(percentiles)
value = monthly_cor.values.flatten() * scalars
if use_gumbel:
tmp = pd.DataFrame(np.transpose([value, percentiles]), columns=('q', 'p'))
xbar = statistics.mean(tmp[tmp['p'] < gumbel_max]['q'].tolist())
std = statistics.stdev(tmp[tmp['p'] < gumbel_max]['q'].tolist(), xbar)
q = []
for p in tmp[tmp['p'] >= gumbel_max]['p'].tolist():
if p >= 100:
p = 99.999
q.append(solve_gumbel_flow(std, xbar, 1 / (1 - (p / 100))))
tmp.loc[tmp['p'] >= gumbel_max, 'q'] = q
value = tmp['q'].values
dates += monthly_sim.index.to_list()
values += value.tolist()
scales += scalars.tolist()
percents += percentiles
# values = np.multiply(values, 1.075)
df_data = np.transpose([values, scales, percents])
columns = ['Propagated Corrected Streamflow', 'Scalars', 'MonthlyPercentile']
corrected = pd.DataFrame(data=df_data, index=dates, columns=columns)
corrected.sort_index(inplace=True)
return corrected
def plot_results(sim, obs, bc, bcp, title):
sim_dates = sim.index.tolist()
scatters = [
go.Scatter(
name='Propagated Corrected (Experimental)',
x=bcp.index.tolist(),
y=bcp['Propagated Corrected Streamflow'].values.flatten(),
),
go.Scatter(
name='Bias Corrected (Jorges Method)',
x=sim_dates,
y=bc.values.flatten(),
),
go.Scatter(
name='Simulated (ERA 5)',
x=sim_dates,
y=sim.values.flatten(),
),
go.Scatter(
name='Observed',
x=obs.index.tolist(),
y=obs.values.flatten(),
),
go.Scatter(
name='Percentile',
x=sim_dates,
y=bcp['MonthlyPercentile'].values.flatten(),
),
go.Scatter(
name='Scalar',
x=sim_dates,
y=bcp['Scalars'].values.flatten(),
),
]
go.Figure(scatters, layout={'title': title}).show()
return
def statistics_tables(corrected: pd.DataFrame, simulated: pd.DataFrame, observed: pd.DataFrame) -> pd.DataFrame:
# merge the datasets together
merged_sim_obs = hd.merge_data(sim_df=simulated, obs_df=observed)
merged_cor_obs = hd.merge_data(sim_df=corrected, obs_df=observed)
metrics = ['ME', 'RMSE', 'NRMSE (Mean)', 'MAPE', 'NSE', 'KGE (2009)', 'KGE (2012)']
# Merge Data
table1 = hs.make_table(merged_dataframe=merged_sim_obs, metrics=metrics)
table2 = hs.make_table(merged_dataframe=merged_cor_obs, metrics=metrics)
table2 = table2.rename(index={'Full Time Series': 'Corrected Full Time Series'})
table1 = table1.rename(index={'Full Time Series': 'Original Full Time Series'})
table1 = table1.transpose()
table2 = table2.transpose()
return pd.merge(table1, table2, right_index=True, left_index=True)
def make_stats_summary(df1, df2, df3, df4, df5, df6, labels):
data = np.transpose((
df1['Original Full Time Series'],
df1['Corrected Full Time Series'],
df2['Corrected Full Time Series'],
df3['Corrected Full Time Series'],
df4['Corrected Full Time Series'],
df5['Corrected Full Time Series'],
df6['Corrected Full Time Series'],
))
columns = ['Sim v Obs'] + list(labels)
return pd.DataFrame(data, columns=columns, index=df1.index)
# collect_data(9017261, 32037030, 9015333, 32097010)
# collect_data(9012999, 22057070, 9012650, 22057010)
# Read all as csv
start_flow = pd.read_csv('start_flow.csv', index_col=0)
start_ideam_flow = pd.read_csv('start_ideam_flow.csv', index_col=0)
downstream_flow = pd.read_csv('downstream_flow.csv', index_col=0)
downstream_ideam_flow = pd.read_csv('downstream_ideam_flow.csv', index_col=0)
downstream_bc_flow = pd.read_csv('downstream_bc_flow.csv', index_col=0)
start_flow.index = pd.to_datetime(start_flow.index)
start_ideam_flow.index = pd.to_datetime(start_ideam_flow.index)
downstream_flow.index = pd.to_datetime(downstream_flow.index)
downstream_ideam_flow.index = pd.to_datetime(downstream_ideam_flow.index)
downstream_bc_flow.index = pd.to_datetime(downstream_bc_flow.index)
# downstream_prop_correct = propagate_correction(start_flow, start_ideam_flow, downstream_flow)
# plot_results(downstream_flow, downstream_ideam_flow, downstream_bc_flow, downstream_prop_correct, 'Correct Monthly')
methods = ('nearest', 'linear', 'min', 'globalmin', 'average', 'globalaverage')
# stats_dfs = []
# for extrap_met in methods:
# downstream_prop_correct = propagate_correction(start_flow, start_ideam_flow, downstream_flow,
# drop_outliers=True, outlier_threshold=1,
# extrapolate_method=extrap_met)
# plot_results(downstream_flow, downstream_ideam_flow, downstream_bc_flow, downstream_prop_correct,
# f'Correct Monthly - Drop input outliers @ 1z, {extrap_met} extrapolation')
# del downstream_prop_correct['Scalars'], downstream_prop_correct['MonthlyPercentile']
# stats_dfs.append(statistics_tables(downstream_prop_correct, downstream_flow, downstream_ideam_flow))
# make_stats_summary(stats_dfs[0], stats_dfs[1], stats_dfs[2], stats_dfs[3], stats_dfs[4], stats_dfs[5], methods).to_csv('stats_drop_outliers.csv')
# stats_dfs = []
# for extrap_met in methods:
# downstream_prop_correct = propagate_correction(start_flow, start_ideam_flow, downstream_flow,
# drop_outliers=True, outlier_threshold=1,
# extrapolate_method=extrap_met)
# plot_results(downstream_flow, downstream_ideam_flow, downstream_bc_flow, downstream_prop_correct,
# f'Correct Monthly - Using the middle of the scalar fdc (10-90%), {extrap_met} extrapolation')
# del downstream_prop_correct['Scalars'], downstream_prop_correct['MonthlyPercentile']
# stats_dfs.append(statistics_tables(downstream_prop_correct, downstream_flow, downstream_ideam_flow))
# make_stats_summary(stats_dfs[0], stats_dfs[1], stats_dfs[2], stats_dfs[3], stats_dfs[4], stats_dfs[5], methods).to_csv('stats_middle_1090_scalars.csv')
# stats_dfs = []
# for extrap_met in methods:
# downstream_prop_correct = propagate_correction(start_flow, start_ideam_flow, downstream_flow,
# drop_outliers=True, outlier_threshold=1,
# extrapolate_method=extrap_met)
# plot_results(downstream_flow, downstream_ideam_flow, downstream_bc_flow, downstream_prop_correct,
# f'Correct Monthly - Using the middle of the scalar fdc (10-80%), {extrap_met} extrapolation')
# del downstream_prop_correct['Scalars'], downstream_prop_correct['MonthlyPercentile']
# stats_dfs.append(statistics_tables(downstream_prop_correct, downstream_flow, downstream_ideam_flow))
# make_stats_summary(stats_dfs[0], stats_dfs[1], stats_dfs[2], stats_dfs[3], stats_dfs[4], stats_dfs[5], methods).to_csv('stats_middle_1080_scalars.csv')
downstream_prop_correct = propagate_correction(start_flow, start_ideam_flow, downstream_flow,
use_gumbel=True, gumbel_max=75)
plot_results(downstream_flow, downstream_ideam_flow, downstream_bc_flow, downstream_prop_correct, f'Correct Monthly - Fill value of 1')
del downstream_prop_correct['Scalars']
del downstream_prop_correct['MonthlyPercentile']
statistics_tables(downstream_prop_correct, downstream_flow, downstream_ideam_flow).to_csv('stats_test.csv')
| [
"numpy.nanpercentile",
"pandas.read_csv",
"geoglows.streamflow.historic_simulation",
"numpy.mean",
"scipy.stats.percentileofscore",
"scipy.interpolate.interp1d",
"pandas.DataFrame",
"pandas.merge",
"numpy.transpose",
"hydrostats.data.merge_data",
"numpy.max",
"requests.get",
"math.log",
"s... | [((12342, 12384), 'pandas.read_csv', 'pd.read_csv', (['"""start_flow.csv"""'], {'index_col': '(0)'}), "('start_flow.csv', index_col=0)\n", (12353, 12384), True, 'import pandas as pd\n'), ((12404, 12452), 'pandas.read_csv', 'pd.read_csv', (['"""start_ideam_flow.csv"""'], {'index_col': '(0)'}), "('start_ideam_flow.csv', index_col=0)\n", (12415, 12452), True, 'import pandas as pd\n'), ((12471, 12518), 'pandas.read_csv', 'pd.read_csv', (['"""downstream_flow.csv"""'], {'index_col': '(0)'}), "('downstream_flow.csv', index_col=0)\n", (12482, 12518), True, 'import pandas as pd\n'), ((12543, 12596), 'pandas.read_csv', 'pd.read_csv', (['"""downstream_ideam_flow.csv"""'], {'index_col': '(0)'}), "('downstream_ideam_flow.csv', index_col=0)\n", (12554, 12596), True, 'import pandas as pd\n'), ((12618, 12668), 'pandas.read_csv', 'pd.read_csv', (['"""downstream_bc_flow.csv"""'], {'index_col': '(0)'}), "('downstream_bc_flow.csv', index_col=0)\n", (12629, 12668), True, 'import pandas as pd\n'), ((12688, 12720), 'pandas.to_datetime', 'pd.to_datetime', (['start_flow.index'], {}), '(start_flow.index)\n', (12702, 12720), True, 'import pandas as pd\n'), ((12746, 12784), 'pandas.to_datetime', 'pd.to_datetime', (['start_ideam_flow.index'], {}), '(start_ideam_flow.index)\n', (12760, 12784), True, 'import pandas as pd\n'), ((12809, 12846), 'pandas.to_datetime', 'pd.to_datetime', (['downstream_flow.index'], {}), '(downstream_flow.index)\n', (12823, 12846), True, 'import pandas as pd\n'), ((12877, 12920), 'pandas.to_datetime', 'pd.to_datetime', (['downstream_ideam_flow.index'], {}), '(downstream_ideam_flow.index)\n', (12891, 12920), True, 'import pandas as pd\n'), ((12948, 12988), 'pandas.to_datetime', 'pd.to_datetime', (['downstream_bc_flow.index'], {}), '(downstream_bc_flow.index)\n', (12962, 12988), True, 'import pandas as pd\n'), ((391, 440), 'geoglows.streamflow.historic_simulation', 'geoglows.streamflow.historic_simulation', (['start_id'], {}), '(start_id)\n', (430, 440), False, 'import geoglows\n'), ((606, 660), 'geoglows.streamflow.historic_simulation', 'geoglows.streamflow.historic_simulation', (['downstream_id'], {}), '(downstream_id)\n', (645, 660), False, 'import geoglows\n'), ((892, 954), 'geoglows.bias.correct_historical', 'geoglows.bias.correct_historical', (['downstream', 'downstream_ideam'], {}), '(downstream, downstream_ideam)\n', (924, 954), False, 'import geoglows\n'), ((2493, 2529), 'numpy.nanpercentile', 'np.nanpercentile', (['hydro', 'percentiles'], {}), '(hydro, percentiles)\n', (2509, 2529), True, 'import numpy as np\n'), ((9548, 9588), 'numpy.transpose', 'np.transpose', (['[values, scales, percents]'], {}), '([values, scales, percents])\n', (9560, 9588), True, 'import numpy as np\n'), ((9687, 9743), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'df_data', 'index': 'dates', 'columns': 'columns'}), '(data=df_data, index=dates, columns=columns)\n', (9699, 9743), True, 'import pandas as pd\n'), ((11013, 11061), 'hydrostats.data.merge_data', 'hd.merge_data', ([], {'sim_df': 'simulated', 'obs_df': 'observed'}), '(sim_df=simulated, obs_df=observed)\n', (11026, 11061), True, 'import hydrostats.data as hd\n'), ((11083, 11131), 'hydrostats.data.merge_data', 'hd.merge_data', ([], {'sim_df': 'corrected', 'obs_df': 'observed'}), '(sim_df=corrected, obs_df=observed)\n', (11096, 11131), True, 'import hydrostats.data as hd\n'), ((11251, 11314), 'hydrostats.make_table', 'hs.make_table', ([], {'merged_dataframe': 'merged_sim_obs', 'metrics': 'metrics'}), '(merged_dataframe=merged_sim_obs, metrics=metrics)\n', (11264, 11314), True, 'import hydrostats as hs\n'), ((11328, 11391), 'hydrostats.make_table', 'hs.make_table', ([], {'merged_dataframe': 'merged_cor_obs', 'metrics': 'metrics'}), '(merged_dataframe=merged_cor_obs, metrics=metrics)\n', (11341, 11391), True, 'import hydrostats as hs\n'), ((11638, 11697), 'pandas.merge', 'pd.merge', (['table1', 'table2'], {'right_index': '(True)', 'left_index': '(True)'}), '(table1, table2, right_index=True, left_index=True)\n', (11646, 11697), True, 'import pandas as pd\n'), ((11773, 12046), 'numpy.transpose', 'np.transpose', (["(df1['Original Full Time Series'], df1['Corrected Full Time Series'], df2[\n 'Corrected Full Time Series'], df3['Corrected Full Time Series'], df4[\n 'Corrected Full Time Series'], df5['Corrected Full Time Series'], df6[\n 'Corrected Full Time Series'])"], {}), "((df1['Original Full Time Series'], df1[\n 'Corrected Full Time Series'], df2['Corrected Full Time Series'], df3[\n 'Corrected Full Time Series'], df4['Corrected Full Time Series'], df5[\n 'Corrected Full Time Series'], df6['Corrected Full Time Series']))\n", (11785, 12046), True, 'import numpy as np\n'), ((12149, 12201), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'columns', 'index': 'df1.index'}), '(data, columns=columns, index=df1.index)\n', (12161, 12201), True, 'import pandas as pd\n'), ((2723, 2757), 'numpy.transpose', 'np.transpose', (['[percentiles, flows]'], {}), '([percentiles, flows])\n', (2735, 2757), True, 'import numpy as np\n'), ((3126, 3172), 'numpy.transpose', 'np.transpose', (['[first_fdc.values[:, 0], ratios]'], {}), '([first_fdc.values[:, 0], ratios])\n', (3138, 3172), True, 'import numpy as np\n'), ((1497, 1521), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (1511, 1521), True, 'import pandas as pd\n'), ((6075, 6191), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['scalar_fdc.values[:, 0]', 'scalar_fdc.values[:, 1]'], {'fill_value': '"""extrapolate"""', 'kind': '"""nearest"""'}), "(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],\n fill_value='extrapolate', kind='nearest')\n", (6095, 6191), False, 'from scipy import interpolate\n'), ((8628, 8657), 'scipy.stats.percentileofscore', 'stats.percentileofscore', (['x', 'a'], {}), '(x, a)\n', (8651, 8657), False, 'from scipy import stats\n'), ((10780, 10824), 'plotly.graph_objects.Figure', 'go.Figure', (['scatters'], {'layout': "{'title': title}"}), "(scatters, layout={'title': title})\n", (10789, 10824), True, 'import plotly.graph_objects as go\n'), ((1444, 1461), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1456, 1461), False, 'import requests\n'), ((6301, 6418), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['scalar_fdc.values[:, 0]', 'scalar_fdc.values[:, 1]'], {'fill_value': 'fill_value', 'bounds_error': '(False)'}), '(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],\n fill_value=fill_value, bounds_error=False)\n', (6321, 6418), False, 'from scipy import interpolate\n'), ((8821, 8855), 'numpy.transpose', 'np.transpose', (['[value, percentiles]'], {}), '([value, percentiles])\n', (8833, 8855), True, 'import numpy as np\n'), ((6529, 6629), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['scalar_fdc.values[:, 0]', 'scalar_fdc.values[:, 1]'], {'fill_value': '"""extrapolate"""'}), "(scalar_fdc.values[:, 0], scalar_fdc.values[:, 1],\n fill_value='extrapolate')\n", (6549, 6629), False, 'from scipy import interpolate\n'), ((3568, 3588), 'math.log', 'math.log', (['(1 - 1 / rp)'], {}), '(1 - 1 / rp)\n', (3576, 3588), False, 'import math\n'), ((6868, 6900), 'numpy.mean', 'np.mean', (['scalar_fdc.values[:, 1]'], {}), '(scalar_fdc.values[:, 1])\n', (6875, 6900), True, 'import numpy as np\n'), ((5156, 5181), 'scipy.stats.zscore', 'stats.zscore', (['monthly_sim'], {}), '(monthly_sim)\n', (5168, 5181), False, 'from scipy import stats\n'), ((5309, 5334), 'scipy.stats.zscore', 'stats.zscore', (['monthly_obs'], {}), '(monthly_obs)\n', (5321, 5334), False, 'from scipy import stats\n'), ((7150, 7181), 'numpy.max', 'np.max', (['scalar_fdc.values[:, 1]'], {}), '(scalar_fdc.values[:, 1])\n', (7156, 7181), True, 'import numpy as np\n'), ((7431, 7462), 'numpy.min', 'np.min', (['scalar_fdc.values[:, 1]'], {}), '(scalar_fdc.values[:, 1])\n', (7437, 7462), True, 'import numpy as np\n'), ((7879, 7916), 'numpy.min', 'np.min', (['total_scalar_fdc.values[:, 1]'], {}), '(total_scalar_fdc.values[:, 1])\n', (7885, 7916), True, 'import numpy as np\n'), ((8337, 8375), 'numpy.mean', 'np.mean', (['total_scalar_fdc.values[:, 1]'], {}), '(total_scalar_fdc.values[:, 1])\n', (8344, 8375), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
General utility and helper functions.
"""
# =============================================================================
# IMPORTS AND DEPENDENCIES
# =============================================================================
import os
import pickle
import random
import colorsys
from datetime import datetime
import numpy as np
import torchvision
import torchvision.transforms as transforms
import torch
from sklearn.utils import shuffle
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from matplotlib.colors import ListedColormap
# =============================================================================
# FUNCTIONS
# =============================================================================
def copy(array_like):
"""Used to copy a torch.Tensor or np.ndarray object.
Args:
array_like (np.ndarray, torch.Tensor) : array/tensor to copy.
"""
if isinstance(array_like, torch.Tensor):
return array_like.detach().clone()
elif isinstance(array_like, np.ndarray):
return array_like.copy()
else:
raise TypeError("Niteshade only supports np.ndarray or torch.Tensor array-like objects.")
def get_time_stamp_as_string():
"""Get the current time stamp as a string.
Returns:
date_time_str (str) : current timestemp
"""
# Return current timestamp in a saving friendly format.
date_time = datetime.now()
date_time_str = date_time.strftime("%d-%b-%Y (%H-%M-%S)")
return date_time_str
def save_plot(plt, dirname='output', plotname='default'):
"""Save a matplotlib.pyplot.plot() as a .png file.
Args:
dirname (str) : name of the directory to save the plot to.
If the directory doesn't exist, it is created
plotname (str): plot name, if plotname is set to default,
the plot name is set to timestemp
"""
# Check if dirrectory exists, if not make one
if not os.path.isdir(dirname):
os.mkdir(dirname)
# Generate plot name, if needed
if plotname == 'default':
plotname = f'{get_time_stamp_as_string()}.png'
plt.savefig(f'{dirname}/{plotname}')
def save_pickle(results, dirname='output', filename='default'):
"""Save results as a .pickle file.
Args:
dirname (str) : name of the directory to save the pickle to.
If the directory doesn't exist, it is created
filename (str) : file name, if filename is set to default,
the file name is set to timestemp
"""
# Check if dirrectory exists, if not make one
if not os.path.isdir(dirname):
os.mkdir(dirname)
# Generate plot name, if needed
if filename == 'default':
filename = f'{get_time_stamp_as_string()}'
pickle.dump(results, open(f"{dirname}/{filename}", "wb"))
def load_model(filename):
"""Load a binary file containing a neural network.
Args:
filename (str) : name of file to save model in (excluding extension)
Returns:
model (nn.Module) : Trained model inheriting nn.Module with learnable parameters
"""
# If you alter this, make sure it works in tandem with save_regressor
with open(filename, 'rb') as target:
model = pickle.load(target)
return model
def save_model(model, filename):
"""Save an object as a binary .pickle file.
Args:
model (nn.Module) : model to save
filename (str) : name of file to save model in (excluding extension)
"""
# If you alter this, make sure it works in tandem with load_regressor
with open(f'{filename}.pickle', 'wb') as target:
pickle.dump(model, target)
def one_hot_encoding(y, num_classes):
""" Perform one hot encoding of previiously decoded data.
Args:
y (np.array, torch.tensor) : labels
num_classes (int) : number of classes
Returns:
enc_y (np.array or torch.tensor) : encoded labels
"""
enc_y = np.zeros([np.shape(y)[0], num_classes])
for i in range(np.shape(y)[0]):
element = y[i]
enc_y[i][int(element)] = 1
return enc_y
def check_num_of_classes(y):
"""Check the number of classes in one hot encoded data.
Supposing data is initially encoded, to attack it needs to be decoded.
Then, before outputting it, it needs to be encoded once again and so we
require the number of classes in the data. So we feed in this function the
initial encoded labela data to deteremine the number of classes.
Args:
y (np.array, torch.tensor) : labels (encoded)
Returns:
num_classes (int) : number of classes
"""
if check_batch_size(y) == 1:
y = y.reshape(1,-1)
num_classes = np.shape(y)[1]
return num_classes
def check_batch_size(y):
"""Check the batch size of input label data.
If batch size is 1, we need to reshape data for encoding/decoding.
The output is not the actual batch size, rather it is checking whether
batch size is 1 or not.
Args:
y (np.array, torch.tensor) : labels
Returns:
check (int) : 1 means 1, else means not 1
"""
check = len(np.shape(y))
return check
def decode_one_hot(y):
"""Decode one hot encoded data.
Args:
y (np.array, torch.tensor) : labels (encoded)
Returns:
new_y (np.array, torch.tensor) : labels (decoded)
"""
if check_batch_size(y) == 1:
y = y.reshape(1,-1)
num_classes = np.shape(y)[1]
new_y = np.zeros([np.shape(y)[0], 1])
for i in range(num_classes):
y_col_current = y[:,i]
for j in range(np.shape(y)[0]):
if y_col_current[j] != 0:
new_y[j] = i
return new_y
def train_test_iris(test_size=0.2, val_size = None, rand_state=42):
"""Loads the Iris dataset using sklearn.datasets.load_iris()
and returns the inputs and labels in splitted train and test
sets (and validation too if val_size != None). Please note
that the returned labels are one-hot encoded.
Args:
test_size (float) : Size of the test set expressed as a fraction
of the complete dataset (Default = 0.2)
val_size (float) : Size of the validation set expressed as a fraction
of the training set. Default = None (i.e only train
and test sets are returned)
rand_state (int) : random seed with which to split the dataset using
sklearn.model_selection.train_test_split().
Default = 42
Returns:
X_train (np.ndarray) : Train inputs
y_train (np.ndarray) : Train labels
X_test (np.ndarray) : Test inputs
y_test (np.ndarray) : Test labels
X_val (np.ndarray) : Validation inputs (only if val_size != None)
y_val (np.ndarray) : Validation labels (only if val_size != None)
"""
#define input and target data
data = load_iris()
#define input and target data
X, y = data.data, data.target
#one-hot encode
enc = OneHotEncoder()
y = enc.fit_transform(y.reshape(-1,1)).toarray()
#split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,
random_state=rand_state)
#normalise data using sklearn module
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train, y_train = shuffle(X_train, y_train)
return X_train, y_train, X_test, y_test
def train_test_MNIST(dir="datasets/", transform=None, val_size=None):
"""Function to load torchivisions' MNIST dataset, splitted into
train, test, and validation sets (the latter only if val_size != None).
Args:
transform (torchvision.transforms) : Sequence of transformations to apply
to the train and test sets.
Default: transforms.Compose([torchvision.transforms.Normalize(
(0.1307,), (0.3081,)),
transforms.ToTensor()]))
val_size (float) : Value between 0 and 1 indicating the percentage of the
training set that should be allocated to the validation set.
(Default = 0.2)
Returns:
X_train (np.ndarray) : Train inputs
y_train (np.ndarray) : Train labels
X_test (np.ndarray) : Test inputs
y_test (np.ndarray) : Test labels
X_val (np.ndarray) : Validation inputs (only if val_size != None)
y_val (np.ndarray) : Validation labels (only if val_size != None)
"""
if transform is None:
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))])
MNIST_train = torchvision.datasets.MNIST(root=dir, train=True, download=True,
transform=transform)
#get inputs and labels and convert to numpy arrays
X = MNIST_train.data.numpy().reshape(-1, 1, 28, 28)
y = MNIST_train.targets.numpy()
MNIST_test = torchvision.datasets.MNIST(root=dir, train=False, download=True,
transform=transform)
X_test = MNIST_test.data.numpy().reshape(-1, 1, 28, 28)
y_test = MNIST_test.targets.numpy()
if val_size:
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=val_size)
return X_train, X_val, X_test, y_train, y_val, y_test
else:
return X, y, X_test, y_test
def train_test_cifar(dir="datasets/", transform = None, val_size=None):
"""
Function to load torchvisions' CIFAR10 dataset, splitted into
train, test, and validation sets (the latter only if val_size != None).
Args:
transform (torchvision.transforms) : Sequence of transformations to apply
to the train and test sets.
Default: transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.ToTensor()]))
val_size (float) : Value between 0 and 1 indicating the percentage of the
training set that should be allocated to the validation set.
(Default = 0.2)
Returns:
X_train (torch.Tensor) : Train inputs
y_train (torch.Tensor) : Train labels
X_test (torch.Tensor) : Test inputs
y_test (torch.Tensor) : Test labels
X_val (torch.Tensor) : Validation inputs (only if val_size != None)
y_val (torch.Tensor) : Validation labels (only if val_size != None)
"""
if transform is None:
#data augmentation transformations for train and val/test datasets
transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
train = torchvision.datasets.CIFAR10(root=dir, train=True, download=True, transform=transform)
test = torchvision.datasets.CIFAR10(root=dir, train=False, download=True, transform=transform)
X = torch.stack([sample[0] for sample in train]).reshape(-1,3,32,32)
y = torch.stack([torch.tensor(sample[1]) for sample in train])
X_test = torch.stack([sample[0] for sample in test]).reshape(-1,3,32,32)
y_test = torch.stack([torch.tensor(sample[1]) for sample in test])
if val_size:
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=val_size)
return X_train, y_train, X_test, y_test, X_val, y_val
else:
return X, y, X_test, y_test
def rand_cmap(nlabels):
"""
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks.
Args:
nlabels (int) : Number of labels (size of colormap)
Returns:
random_colormap (ListedColormap) : colormap filled with nlabels random colors
"""
# Generate color map for bright colors, based on hsv
randHSVcolors = [(np.random.uniform(low=0.0, high=1),
np.random.uniform(low=0.2, high=1),
np.random.uniform(low=0.9, high=1)) for _ in range(nlabels)]
# Convert HSV list to RGB
randRGBcolors = []
for HSVcolor in randHSVcolors:
randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
random_colormap = ListedColormap(randRGBcolors, N=nlabels)
return random_colormap
def get_cmap(nlabels):
rgb_distinct = [(0.588,0.294,0), #brown
(1,0.647,0), #orange
(0.502,0.502,0), #olive
(0,0.53,0.55), #green
(0,1,1), #cyan
(0,0,0.93), #blue
(1,0,0), #red
(1,0.412,0.706), #pink
(1,1,0), #yellow
(0,0,0),
(0.627,0.125,0.941)] #black
try:
colors = random.sample(rgb_distinct, nlabels)
cmap = ListedColormap(colors, N=nlabels)
except IndexError:
cmap = rand_cmap(nlabels)
return cmap
# =============================================================================
# MAIN ENTRY POINT
# =============================================================================
if __name__ == "__main__":
pass
| [
"sklearn.datasets.load_iris",
"os.mkdir",
"pickle.dump",
"random.sample",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler",
"torchvision.datasets.CIFAR10",
"numpy.shape",
"pickle.load",
"torchvision.transforms.Normalize",
"matplotlib.colors.ListedColormap",
"dat... | [((1547, 1561), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1559, 1561), False, 'from datetime import datetime\n'), ((7257, 7268), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (7266, 7268), False, 'from sklearn.datasets import load_iris\n'), ((7369, 7384), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (7382, 7384), False, 'from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n'), ((7514, 7582), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'rand_state'}), '(X, y, test_size=test_size, random_state=rand_state)\n', (7530, 7582), False, 'from sklearn.model_selection import train_test_split\n'), ((7695, 7709), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (7707, 7709), False, 'from sklearn.preprocessing import MinMaxScaler, OneHotEncoder\n'), ((7821, 7846), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (7828, 7846), False, 'from sklearn.utils import shuffle\n'), ((9427, 9516), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': 'dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), '(root=dir, train=True, download=True, transform=\n transform)\n', (9453, 9516), False, 'import torchvision\n'), ((9723, 9813), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': 'dir', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), '(root=dir, train=False, download=True, transform=\n transform)\n', (9749, 9813), False, 'import torchvision\n'), ((11560, 11651), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), '(root=dir, train=True, download=True, transform\n =transform)\n', (11588, 11651), False, 'import torchvision\n'), ((11658, 11749), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'dir', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), '(root=dir, train=False, download=True,\n transform=transform)\n', (11686, 11749), False, 'import torchvision\n'), ((13021, 13061), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['randRGBcolors'], {'N': 'nlabels'}), '(randRGBcolors, N=nlabels)\n', (13035, 13061), False, 'from matplotlib.colors import ListedColormap\n'), ((2111, 2133), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (2124, 2133), False, 'import os\n'), ((2143, 2160), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (2151, 2160), False, 'import os\n'), ((2780, 2802), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (2793, 2802), False, 'import os\n'), ((2812, 2829), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (2820, 2829), False, 'import os\n'), ((3425, 3444), 'pickle.load', 'pickle.load', (['target'], {}), '(target)\n', (3436, 3444), False, 'import pickle\n'), ((3823, 3849), 'pickle.dump', 'pickle.dump', (['model', 'target'], {}), '(model, target)\n', (3834, 3849), False, 'import pickle\n'), ((4944, 4955), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4952, 4955), True, 'import numpy as np\n'), ((5399, 5410), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (5407, 5410), True, 'import numpy as np\n'), ((5743, 5754), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (5751, 5754), True, 'import numpy as np\n'), ((10017, 10059), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'val_size'}), '(X, y, test_size=val_size)\n', (10033, 10059), False, 'from sklearn.model_selection import train_test_split\n'), ((12095, 12137), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'val_size'}), '(X, y, test_size=val_size)\n', (12111, 12137), False, 'from sklearn.model_selection import train_test_split\n'), ((13594, 13630), 'random.sample', 'random.sample', (['rgb_distinct', 'nlabels'], {}), '(rgb_distinct, nlabels)\n', (13607, 13630), False, 'import random\n'), ((13646, 13679), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['colors'], {'N': 'nlabels'}), '(colors, N=nlabels)\n', (13660, 13679), False, 'from matplotlib.colors import ListedColormap\n'), ((4225, 4236), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4233, 4236), True, 'import numpy as np\n'), ((11755, 11799), 'torch.stack', 'torch.stack', (['[sample[0] for sample in train]'], {}), '([sample[0] for sample in train])\n', (11766, 11799), False, 'import torch\n'), ((11841, 11864), 'torch.tensor', 'torch.tensor', (['sample[1]'], {}), '(sample[1])\n', (11853, 11864), False, 'import torch\n'), ((11901, 11944), 'torch.stack', 'torch.stack', (['[sample[0] for sample in test]'], {}), '([sample[0] for sample in test])\n', (11912, 11944), False, 'import torch\n'), ((11991, 12014), 'torch.tensor', 'torch.tensor', (['sample[1]'], {}), '(sample[1])\n', (12003, 12014), False, 'import torch\n'), ((12639, 12673), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1)'}), '(low=0.0, high=1)\n', (12656, 12673), True, 'import numpy as np\n'), ((12699, 12733), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.2)', 'high': '(1)'}), '(low=0.2, high=1)\n', (12716, 12733), True, 'import numpy as np\n'), ((12759, 12793), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.9)', 'high': '(1)'}), '(low=0.9, high=1)\n', (12776, 12793), True, 'import numpy as np\n'), ((12938, 12996), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['HSVcolor[0]', 'HSVcolor[1]', 'HSVcolor[2]'], {}), '(HSVcolor[0], HSVcolor[1], HSVcolor[2])\n', (12957, 12996), False, 'import colorsys\n'), ((4175, 4186), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4183, 4186), True, 'import numpy as np\n'), ((5780, 5791), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (5788, 5791), True, 'import numpy as np\n'), ((5887, 5898), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (5895, 5898), True, 'import numpy as np\n'), ((9251, 9284), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (9282, 9284), False, 'import torchvision\n'), ((9317, 9371), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (9349, 9371), False, 'import torchvision\n'), ((11488, 11521), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (11519, 11521), True, 'import torchvision.transforms as transforms\n'), ((11523, 11544), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11542, 11544), True, 'import torchvision.transforms as transforms\n')] |
import torch.nn as nn
import torch
import numpy as np
import pytest
from test.utils import convert_and_test
class FNormTest(nn.Module):
"""
Test for nn.functional types
"""
def __init__(self, dim, keepdim):
super(FNormTest, self).__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
x = torch.norm(x, p=2, dim=self.dim, keepdim=self.keepdim)
return x
# TODO: Not working with dim=[2,3] and change_ordering=False ???? error about 0.0001-0.001
@pytest.mark.repeat(10)
@pytest.mark.parametrize('change_ordering', [True, False])
@pytest.mark.parametrize('dim', [[1, 2], [1, 3]])
@pytest.mark.parametrize('epsilon', [5e-5])
@pytest.mark.parametrize('keepdim', [True, False])
def test_norm(change_ordering, dim, epsilon, keepdim):
model = FNormTest(dim, keepdim)
model.eval()
input_np = np.random.uniform(0, 1, (1, 3, 224, 224))
error = convert_and_test(model, input_np, verbose=False, change_ordering=change_ordering, epsilon=epsilon)
| [
"numpy.random.uniform",
"pytest.mark.repeat",
"torch.norm",
"test.utils.convert_and_test",
"pytest.mark.parametrize"
] | [((528, 550), 'pytest.mark.repeat', 'pytest.mark.repeat', (['(10)'], {}), '(10)\n', (546, 550), False, 'import pytest\n'), ((552, 609), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""change_ordering"""', '[True, False]'], {}), "('change_ordering', [True, False])\n", (575, 609), False, 'import pytest\n'), ((611, 659), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', '[[1, 2], [1, 3]]'], {}), "('dim', [[1, 2], [1, 3]])\n", (634, 659), False, 'import pytest\n'), ((661, 704), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""epsilon"""', '[5e-05]'], {}), "('epsilon', [5e-05])\n", (684, 704), False, 'import pytest\n'), ((705, 754), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""keepdim"""', '[True, False]'], {}), "('keepdim', [True, False])\n", (728, 754), False, 'import pytest\n'), ((879, 920), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1, 3, 224, 224)'], {}), '(0, 1, (1, 3, 224, 224))\n', (896, 920), True, 'import numpy as np\n'), ((934, 1037), 'test.utils.convert_and_test', 'convert_and_test', (['model', 'input_np'], {'verbose': '(False)', 'change_ordering': 'change_ordering', 'epsilon': 'epsilon'}), '(model, input_np, verbose=False, change_ordering=\n change_ordering, epsilon=epsilon)\n', (950, 1037), False, 'from test.utils import convert_and_test\n'), ((362, 416), 'torch.norm', 'torch.norm', (['x'], {'p': '(2)', 'dim': 'self.dim', 'keepdim': 'self.keepdim'}), '(x, p=2, dim=self.dim, keepdim=self.keepdim)\n', (372, 416), False, 'import torch\n')] |
##
"""
Augmented Reality Drumset Main Script
"""
## Imports
import cv2
import numpy as np
import time
import pyaudio
import wave
from array import array
from struct import pack
import os
import threading
##
DRUMSOUNDSFOLDER = "drumFiles"
## Playing Drum Sounds
#threads the play function
def drumThreadCreator(file):
drumThread = threading.Thread(target = play, args = (file,))
drumThread.start()
#plays the sound of given .wav file
def play(file):
CHUNK = 1024 #measured in bytes
wf = wave.open(file, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
#plays drum given specified drum sound index
def playDrum(i):
global DRUMSOUNDSFOLDER
if i == 0:
drumThreadCreator(DRUMSOUNDSFOLDER+"/snare.wav")
elif i == 1:
drumThreadCreator(DRUMSOUNDSFOLDER+"/rack tom.wav")
elif i == 2:
drumThreadCreator(DRUMSOUNDSFOLDER+"/tom.wav")
elif i == 3:
drumThreadCreator(DRUMSOUNDSFOLDER+"/kick.wav")
elif i == 4:
drumThreadCreator(DRUMSOUNDSFOLDER+"/closed hat.wav")
##
#returns the drum shape and location on screen
def getDrum(i):
color = (0,255,0)
lineWidth = 2
radius1, radius2, radius3, radius4 = 100, 120, 140, 100
point1, point2, point3, point4, point5 = (300,550), (580,500), (820,500), (1100,550), (150,300)
cir1 = (point1,radius2,color,lineWidth)
cir2 = (point2,radius1,color,lineWidth)
cir3 = (point3,radius1,color,lineWidth)
cir4 = (point4,radius3,color,lineWidth)
cir5 = (point5,radius4,color,lineWidth)
drumParas = [cir1,cir2,cir3,cir4,cir5]
return drumParas[i]
#check if drumstick is detected in any drums
def checkDrum(res, k):
threshold = (10,10,10)
point, radius, _, _ = getDrum(k)
counter = False
for line in range(point[1] - radius//2, point[1] + (radius*2//3), 20):
for char in range(point[0] - radius//2, point[0] + radius//2, 20):
for i in range(3):
if res[line][char][i] >= threshold[i]:
counter = True
return counter
#gives color range to be detected
def getColorRange():
#range of color
#Current Color: Red
colorLower1 = np.array([0, 120, 70], np.uint8)
colorUpper1 = np.array([10, 255, 255], np.uint8)
colorLower2 = np.array([170, 120, 70], np.uint8)
colorUpper2 = np.array([180, 255, 255], np.uint8)
return colorLower1, colorUpper1, colorLower2, colorUpper2
#apply filters and mask on frame for contouring
def filterFrame(frame):
#apply blur
kernel = np.ones((5,5), 'uint8')
blurred = cv2.GaussianBlur(frame, (11,11), 0)
blurred = cv2.erode(blurred, kernel, iterations = 5)
blurred = cv2.dilate(blurred, kernel, iterations = 5)
#apply mask on hsv image
cLow1, cUp1, cLow2, cUp2 = getColorRange()
frameHSV = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
colorMask1 = cv2.inRange(frameHSV, cLow1, cUp1)
colorMask2 = cv2.inRange(frameHSV, cLow2, cUp2)
colorMask = colorMask1 + colorMask2
res = cv2.bitwise_and(frame, frame, mask = colorMask)
#grayscale and return
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
return gray, res
#resize and mirror frames for more natural drum experience
def rescaleFrame(frame):
frame = cv2.resize(frame, (0,0), fx = 1, fy = 1)
frame = cv2.flip(frame, +1)
return frame
#finds contours around the filtered frame
def contourFilteredFrame(frame):
thresh = cv2.adaptiveThreshold(frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours
#draws a circle with a dot for detected objects
def drawContours(contours):
minRad = 30
maxContours = 10
contourList = []
count = 0
for contour in contours:
count += 1
((x,y), radius) = cv2.minEnclosingCircle(contour)
#remove contours that are too small
if radius < minRad:
continue
#get center point
M = cv2.moments(contour)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
contourList.append((x,y,radius,center))
if count > maxContours:
break
return contourList
#Reduce Timer Count By 1
def timerLoopCount(drums):
for i in range(len(drums)):
if drums[i] > 0:
drums[i] -= 1
return drums
## Main
#main run function for program
def main():
#parameters
drumNum = 5
drums = [0] * drumNum
inDrums = [False] * drumNum
#set up video
cap = cv2.VideoCapture(0)
#buffer to load video
time.sleep(2.0)
#main cv2 video loop
while(True):
#read frames
_, frame = cap.read()
frame = rescaleFrame(frame)
filteredFrame, res = filterFrame(frame)
contours = contourFilteredFrame(filteredFrame)
contourList = drawContours(contours)
for x,y,radius,center in contourList:
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 0, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# Reduce timer count by 1
drums = timerLoopCount(drums)
#loops through all drums
for i in range(drumNum):
#draws drum
point, radius, color, lineWidth = getDrum(i)
cv2.circle(frame,point,radius,color,lineWidth)
#when drum has finished its timer period
timer = drums[i]
if timer == 0:
#check if drum is hit
isHit = checkDrum(res, i)
if isHit == True and inDrums[i] == False:
playDrum(i)
cv2.circle(frame,point,radius,color,-1)
#reset timer to 5 loops
drums[i] = 5
inDrums[i] = True
else:
inDrums[i] = False
cv2.imshow("Drum AR", frame)
#if condition is met, break out of loop
ch = cv2.waitKey(1)
if ch & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
main() | [
"cv2.GaussianBlur",
"cv2.bitwise_and",
"numpy.ones",
"cv2.adaptiveThreshold",
"cv2.erode",
"cv2.imshow",
"cv2.inRange",
"cv2.dilate",
"cv2.cvtColor",
"cv2.destroyAllWindows",
"cv2.resize",
"threading.Thread",
"cv2.circle",
"cv2.minEnclosingCircle",
"cv2.waitKey",
"time.sleep",
"cv2.f... | [((341, 384), 'threading.Thread', 'threading.Thread', ([], {'target': 'play', 'args': '(file,)'}), '(target=play, args=(file,))\n', (357, 384), False, 'import threading\n'), ((511, 532), 'wave.open', 'wave.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (520, 532), False, 'import wave\n'), ((542, 559), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (557, 559), False, 'import pyaudio\n'), ((2544, 2576), 'numpy.array', 'np.array', (['[0, 120, 70]', 'np.uint8'], {}), '([0, 120, 70], np.uint8)\n', (2552, 2576), True, 'import numpy as np\n'), ((2595, 2629), 'numpy.array', 'np.array', (['[10, 255, 255]', 'np.uint8'], {}), '([10, 255, 255], np.uint8)\n', (2603, 2629), True, 'import numpy as np\n'), ((2648, 2682), 'numpy.array', 'np.array', (['[170, 120, 70]', 'np.uint8'], {}), '([170, 120, 70], np.uint8)\n', (2656, 2682), True, 'import numpy as np\n'), ((2701, 2736), 'numpy.array', 'np.array', (['[180, 255, 255]', 'np.uint8'], {}), '([180, 255, 255], np.uint8)\n', (2709, 2736), True, 'import numpy as np\n'), ((2906, 2930), 'numpy.ones', 'np.ones', (['(5, 5)', '"""uint8"""'], {}), "((5, 5), 'uint8')\n", (2913, 2930), True, 'import numpy as np\n'), ((2944, 2980), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frame', '(11, 11)', '(0)'], {}), '(frame, (11, 11), 0)\n', (2960, 2980), False, 'import cv2\n'), ((2994, 3034), 'cv2.erode', 'cv2.erode', (['blurred', 'kernel'], {'iterations': '(5)'}), '(blurred, kernel, iterations=5)\n', (3003, 3034), False, 'import cv2\n'), ((3051, 3092), 'cv2.dilate', 'cv2.dilate', (['blurred', 'kernel'], {'iterations': '(5)'}), '(blurred, kernel, iterations=5)\n', (3061, 3092), False, 'import cv2\n'), ((3191, 3231), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred', 'cv2.COLOR_BGR2HSV'], {}), '(blurred, cv2.COLOR_BGR2HSV)\n', (3203, 3231), False, 'import cv2\n'), ((3249, 3283), 'cv2.inRange', 'cv2.inRange', (['frameHSV', 'cLow1', 'cUp1'], {}), '(frameHSV, cLow1, cUp1)\n', (3260, 3283), False, 'import cv2\n'), ((3301, 3335), 'cv2.inRange', 'cv2.inRange', (['frameHSV', 'cLow2', 'cUp2'], {}), '(frameHSV, cLow2, cUp2)\n', (3312, 3335), False, 'import cv2\n'), ((3386, 3431), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'colorMask'}), '(frame, frame, mask=colorMask)\n', (3401, 3431), False, 'import cv2\n'), ((3476, 3513), 'cv2.cvtColor', 'cv2.cvtColor', (['res', 'cv2.COLOR_BGR2GRAY'], {}), '(res, cv2.COLOR_BGR2GRAY)\n', (3488, 3513), False, 'import cv2\n'), ((3637, 3674), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(1)', 'fy': '(1)'}), '(frame, (0, 0), fx=1, fy=1)\n', (3647, 3674), False, 'import cv2\n'), ((3690, 3709), 'cv2.flip', 'cv2.flip', (['frame', '(+1)'], {}), '(frame, +1)\n', (3698, 3709), False, 'import cv2\n'), ((3816, 3912), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['frame', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (3837, 3912), False, 'import cv2\n'), ((3926, 3990), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3942, 3990), False, 'import cv2\n'), ((4937, 4956), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4953, 4956), False, 'import cv2\n'), ((4987, 5002), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (4997, 5002), False, 'import time\n'), ((6490, 6513), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6511, 6513), False, 'import cv2\n'), ((4234, 4265), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['contour'], {}), '(contour)\n', (4256, 4265), False, 'import cv2\n'), ((4397, 4417), 'cv2.moments', 'cv2.moments', (['contour'], {}), '(contour)\n', (4408, 4417), False, 'import cv2\n'), ((6289, 6317), 'cv2.imshow', 'cv2.imshow', (['"""Drum AR"""', 'frame'], {}), "('Drum AR', frame)\n", (6299, 6317), False, 'import cv2\n'), ((6388, 6402), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6399, 6402), False, 'import cv2\n'), ((5415, 5460), 'cv2.circle', 'cv2.circle', (['frame', 'center', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, center, 5, (0, 0, 255), -1)\n', (5425, 5460), False, 'import cv2\n'), ((5710, 5760), 'cv2.circle', 'cv2.circle', (['frame', 'point', 'radius', 'color', 'lineWidth'], {}), '(frame, point, radius, color, lineWidth)\n', (5720, 5760), False, 'import cv2\n'), ((6056, 6099), 'cv2.circle', 'cv2.circle', (['frame', 'point', 'radius', 'color', '(-1)'], {}), '(frame, point, radius, color, -1)\n', (6066, 6099), False, 'import cv2\n')] |
import pandas as pd
import numpy as np
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
import seaborn as sns
from mlflow import log_artifact
sns.set()
# Confusion matrix
def plot_cm(target_all, predictions_all
, path= "data/08_reporting/confusion_matrix.png", show= False):
data_cm = metrics.confusion_matrix(target_all, predictions_all)
positif_negatif_dict_map = {1: "positif", 0: "negatif"}
df_cm = pd.DataFrame(data_cm, columns=[positif_negatif_dict_map[i] for i in np.unique(target_all)]
, index=[positif_negatif_dict_map[i] for i in np.unique(target_all)])
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize=(7, 6))
sns.heatmap(df_cm, cmap="Blues", annot=True, fmt='g')
plt.tight_layout()
plt.savefig(path, bbox_inches="tight")
log_artifact(path)
if show:
plt.show()
pass | [
"matplotlib.pyplot.tight_layout",
"seaborn.heatmap",
"matplotlib.pyplot.show",
"numpy.unique",
"mlflow.log_artifact",
"matplotlib.pyplot.figure",
"sklearn.metrics.confusion_matrix",
"seaborn.set",
"matplotlib.pyplot.savefig"
] | [((159, 168), 'seaborn.set', 'sns.set', ([], {}), '()\n', (166, 168), True, 'import seaborn as sns\n'), ((319, 372), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['target_all', 'predictions_all'], {}), '(target_all, predictions_all)\n', (343, 372), True, 'import sklearn.metrics as metrics\n'), ((708, 734), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 6)'}), '(figsize=(7, 6))\n', (718, 734), True, 'import matplotlib.pyplot as plt\n'), ((740, 793), 'seaborn.heatmap', 'sns.heatmap', (['df_cm'], {'cmap': '"""Blues"""', 'annot': '(True)', 'fmt': '"""g"""'}), "(df_cm, cmap='Blues', annot=True, fmt='g')\n", (751, 793), True, 'import seaborn as sns\n'), ((799, 817), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (815, 817), True, 'import matplotlib.pyplot as plt\n'), ((822, 860), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""'}), "(path, bbox_inches='tight')\n", (833, 860), True, 'import matplotlib.pyplot as plt\n'), ((865, 883), 'mlflow.log_artifact', 'log_artifact', (['path'], {}), '(path)\n', (877, 883), False, 'from mlflow import log_artifact\n'), ((906, 916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (914, 916), True, 'import matplotlib.pyplot as plt\n'), ((515, 536), 'numpy.unique', 'np.unique', (['target_all'], {}), '(target_all)\n', (524, 536), True, 'import numpy as np\n'), ((609, 630), 'numpy.unique', 'np.unique', (['target_all'], {}), '(target_all)\n', (618, 630), True, 'import numpy as np\n')] |
import numpy
from astropy.constants import c, G, M_sun, R_sun, au, L_sun, pc
from astropy import units as u
from math import pi, sqrt, exp, log, log2
from scipy.special import j0, j1 # Bessel function
from scipy.optimize import minimize
r_g_sun = ((2 * G * M_sun) / (c**2)) / u.meter # [m] Schwarzschild radius of the sun
F0_sun = (R_sun / u.meter)**2 / (2 * r_g_sun) # [m] closest focus of SGL, ~547.303 au
v_critical = 122.3 # [GHz] Returns the frequency at which the sun has no focus
def d_critical(frequency):
"""Returns focal distance as function of frequency [GHz],
for gravity + plasma"""
def distance_min(impact_parameter):
return F0_sun * impact_parameter**2 * (1 - (v_critical**2 /
frequency ** 2) * (1 / impact_parameter)**15)**-1
return minimize(distance_min, bounds=[(1, 2)], x0=1.1).fun[0]
def holevo_perfect(M, eta):
"""Holevo capacity of a noise free channel.
M: # Number of photons per mode (the signal)
eta: Receiver efficiency"""
return ((1 + M * eta) * log2(1 + M * eta) - M * eta * log2(M * eta)) / M * eta
def holevo_thermal(M, N_th, eta):
"""Holevo capacity of a channel with noise. eta: Receiver efficiency.
Example:
F = 100 # number of photons
N = 100 # Number of noise photons
modes = 10**5 # number of modes
M = F / modes # Number of photons per mode (the signal)
N_th = N / modes # average number of noise photons per mode (the noise)"""
def gx(x, eta, M):
return (((1 + x)) * log2(1 + x) - x * log2(x)) / M * eta
a = M * eta + (1 - eta) * N_th
b = (1 - eta) * N_th
return gx(a, eta, M) - gx(b, eta, M)
def photons_received(D_r, D_t, P, wavelength, R, Q_R=1.22):
"""Number of photons that telescope with aperture D_r [m] receives,
D_t [m] aperture of transmitting telescope,
wavelength lambda in [m],
R [m] distance between D_r and D_t
Q_R [1] diffraction limit"""
# wavelength = wavelength * 10**-9 # nm to m
h = 6.62607004E-34 # [J*s] Planck's h
# c = 299792458 # [m/s] speed of light
f = 1 / (wavelength / c) / (u.meter / u.second)
# print(f)
F_r = P / ((pi * h * f) * (Q_R * wavelength / D_t * R)**2) * pi * D_r**2 / 4
return F_r
def Q(wavelength):
"""Returns the technologically achievable focusing (Q-factor)
(Earth 2007 tech) for a given wavelength (in m)"""
wavelength = wavelength / 10**-9
if wavelength > 300:
Q = 1.22
else: # power-law fit to values from Earth 2017 technology (see Figure)
Q = 1 / ((5.21575598 * (wavelength / 1000)**1.20669934) / 1.22) + 0.22
return Q
def sgl_psf(rho, wavelength, z, r_g=r_g_sun):
"""Returns the gain for a point of the PSF (rho: distance from axis)"""
return 4 * pi**2 / (1-exp(1) ** (-4 * pi**2 * r_g / wavelength)) * r_g / \
wavelength * j0(2*pi*(rho/wavelength)*sqrt((2*r_g)/z))**2
def integrated_flux(wavelength, z, d0, r_g=r_g_sun):
"""Eq. 143 in Turyshev (2017)"""
first_term = 4 * pi**2 / (1 - exp(1) ** (-4 * pi**2 * r_g / wavelength)) * \
r_g / wavelength
zeroth_bessel = j0(pi*(d0/wavelength)*sqrt((2*r_g)/z))**2
first_bessel = j1(pi*(d0/wavelength)*sqrt((2*r_g)/z))**2
return first_term * (zeroth_bessel + first_bessel)
def classical_aperture(wavelength, z, d0, r_g=r_g_sun):
"""Returns the corresponding size D of a classical aperture [m] for a
given SGL aperture d0 [m]"""
flux = integrated_flux(wavelength, z, d0, r_g=r_g)
# Bugfix as explained by <NAME> in his mail on 24 July 2017
# "I realized that the thickness of the Einstein ring seeing by a telescope
# is equal to the telescope's aperture. Thus, I was missing a factor of
# \sqrt{2} in calculating the equivalent telescope diameter - is is not
# 53 km, but 75km.
sgl_aperture = 2 * pi * (d0 / 2)**2
effective_flux = flux * sgl_aperture
classical_aperture = 2 * sqrt(effective_flux / pi) # equivalent telescope diameter [m]
return classical_aperture
def b_of_z(z, F0=F0_sun):
"""Returns impact parameter b as a function of heliocentric distance z [m]"""
return sqrt(z) / sqrt(F0)
def z_of_b(b, F0=F0_sun):
"""Returns heliocentric distance z [m] as a function of impact parameter b"""
return F0 * (b * (R_sun / u.meter))**2 / (R_sun / u.meter)**2
def noise_photons(wavelength, z, d0, fractional_bandwidth, print_status=False):
"""Noise photons from ring-shaped area in solar corona overlaying the
Einstein ring"""
def apparent_size_sun(z):
"""Returns the apparent diameter of the sun depending on
heliocentric distance z """
return 3600 * 180 / pi * 2 * R_sun / u.meter / z # arcseconds
def angular_resolution(d, wavelength, Q):
"""Returns the angular resolution of a telescope
d: circular aperture diameter [m]
wavelength \lambda [m]
Q: Quality < 1.22, diffraction-limit: Q=1.22"""
return 3600 * 180 / pi * Q * wavelength / d
b = b_of_z(z=z)
width_sun = apparent_size_sun(z=z)
resolution = angular_resolution(d=d0, wavelength=wavelength, Q=1.22)
r = b * width_sun / 2
A_ring = pi * ((r + resolution)**2 - r**2)
A_sun = pi * width_sun**2 / 4
ring_brightness = b**-6 * 10**-6
noise_in_ring = ring_brightness * (A_ring / A_sun) # [solar luminosities]
distance_scale = 4 * pi * z**2
noise_in_ring_scaled_to_distance = noise_in_ring / distance_scale
noise_watt = noise_in_ring_scaled_to_distance * L_sun / u.watt
joule_wavelength = 1.98641E-19 / wavelength / 10**6
fractional_noise = fractional_bandwidth * noise_watt
noise_photons_per_nm_per_sec = fractional_noise / joule_wavelength
if print_status:
print('b=', b)
print('width of sun (arcsec)', width_sun)
print('resolution', resolution)
print('r', r)
print('A_ring', A_ring)
print('A_sun', A_sun)
print('A_ring / A_sun', A_ring / A_sun)
print('ring_brightness', ring_brightness)
print('noise_in_ring_scaled_to_distance', noise_in_ring_scaled_to_distance)
print('noise_watt', noise_watt)
print('joule_wavelength', joule_wavelength)
print('fractional_bandwidth', fractional_bandwidth)
print('fractional_noise', fractional_noise)
print('noise_photons_per_nm_per_sec', noise_photons_per_nm_per_sec)
return noise_photons_per_nm_per_sec
def QuadraticLimbDarkening(Impact, limb1, limb2):
"""Quadratic limb darkening. Kopal 1950, Harvard Col. Obs. Circ., 454, 1"""
return 1 - limb1 * (1 - Impact) - limb2 * (1 - Impact) ** 2
def xy_rotate(x, y, xcen, ycen, phi):
"""
Copyright 2009 by <NAME>
Creative Commons Attribution-Noncommercial-ShareAlike 3.0 license applies
NAME: xy_rotate
PURPOSE: Transform input (x, y) coordiantes into the frame of a new
(x, y) coordinate system that has its origin at the point
(xcen, ycen) in the old system, and whose x-axis is rotated
c.c.w. by phi degrees with respect to the original x axis.
USAGE: (xnew,ynew) = xy_rotate(x, y, xcen, ycen, phi)
ARGUMENTS:
x, y: numpy ndarrays with (hopefully) matching sizes
giving coordinates in the old system
xcen: old-system x coordinate of the new origin
ycen: old-system y coordinate of the new origin
phi: angle c.c.w. in degrees from old x to new x axis
RETURNS: 2-item tuple containing new x and y coordinate arrays
WRITTEN: <NAME>, U. of Utah, 2009
"""
phirad = numpy.deg2rad(phi)
xnew = (x - xcen) * numpy.cos(phirad) + (y - ycen) * numpy.sin(phirad)
ynew = (y - ycen) * numpy.cos(phirad) - (x - xcen) * numpy.sin(phirad)
return (xnew,ynew)
def gauss_2d(x, y, par):
"""
Copyright 2009 by <NAME>
Creative Commons Attribution-Noncommercial-ShareAlike 3.0 license applies
NAME: gauss_2d
PURPOSE: Implement 2D Gaussian function
USAGE: z = gauss_2d(x, y, par)
ARGUMENTS:
x, y: vecors or images of coordinates;
should be matching numpy ndarrays
par: vector of parameters, defined as follows:
par[0]: amplitude
par[1]: intermediate-axis sigma
par[2]: x-center
par[3]: y-center
par[4]: axis ratio
par[5]: c.c.w. major-axis rotation w.r.t. x-axis
RETURNS: 2D Gaussian evaluated at x-y coords
NOTE: amplitude = 1 is not normalized, but rather has max = 1
WRITTEN: <NAME>, U. of Utah, 2009
"""
(xnew,ynew) = xy_rotate(x, y, par[2], par[3], par[5])
r_ell_sq = ((xnew**2)*par[4] + (ynew**2)/par[4]) / numpy.abs(par[1])**2
return par[0] * numpy.exp(-0.5*r_ell_sq)
def sie_grad(x, y, par):
"""
Copyright 2009 by <NAME>
Creative Commons Attribution-Noncommercial-ShareAlike 3.0 license applies
NAME: sie_grad
PURPOSE: compute the deflection of an SIE potential
USAGE: (xg, yg) = sie_grad(x, y, par)
ARGUMENTS:
x, y: vectors or images of coordinates;
should be matching numpy ndarrays
par: vector of parameters with 1 to 5 elements, defined as follows:
par[0]: lens strength, or 'Einstein radius'
par[1]: (optional) x-center (default = 0.0)
par[2]: (optional) y-center (default = 0.0)
par[3]: (optional) axis ratio (default=1.0)
par[4]: (optional) major axis Position Angle
in degrees c.c.w. of x axis. (default = 0.0)
RETURNS: tuple (xg, yg) of gradients at the positions (x, y)
NOTES: This routine implements an 'intermediate-axis' conventionumpy.
Analytic forms for the SIE potential can be found in:
Kassiola & Kovner 1993, ApJ, 417, 450
Kormann et al. 1994, A&A, 284, 285
Keeton & Kochanek 1998, ApJ, 495, 157
The parameter-order convention in this routine differs from that
of a previous IDL routine of the same name by ASB.
WRITTEN: <NAME>, U of Utah, 2009
"""
# Set parameters:
b = numpy.abs(par[0]) # can't be negative!!!
xzero = 0. if (len(par) < 2) else par[1]
yzero = 0. if (len(par) < 3) else par[2]
q = 1. if (len(par) < 4) else numpy.abs(par[3])
phiq = 0. if (len(par) < 5) else par[4]
eps = 0.001 # for sqrt(1/q - q) < eps, a limit expression is used.
# Handle q > 1 gracefully:
if (q > 1.):
q = 1.0 / q
phiq = phiq + 90.0
# Go into shifted coordinats of the potential:
phirad = numpy.deg2rad(phiq)
xsie = (x-xzero) * numpy.cos(phirad) + (y-yzero) * numpy.sin(phirad)
ysie = (y-yzero) * numpy.cos(phirad) - (x-xzero) * numpy.sin(phirad)
# Compute potential gradient in the transformed system:
r_ell = numpy.sqrt(q * xsie**2 + ysie**2 / q)
qfact = numpy.sqrt(1./q - q)
# (r_ell == 0) terms prevent divide-by-zero problems
if (qfact >= eps):
xtg = (b/qfact) * numpy.arctan(qfact * xsie / (r_ell + (r_ell == 0)))
ytg = (b/qfact) * numpy.arctanh(qfact * ysie / (r_ell + (r_ell == 0)))
else:
xtg = b * xsie / (r_ell + (r_ell == 0))
ytg = b * ysie / (r_ell + (r_ell == 0))
# Transform back to un-rotated system:
xg = xtg * numpy.cos(phirad) - ytg * numpy.sin(phirad)
yg = ytg * numpy.cos(phirad) + xtg * numpy.sin(phirad)
# Return value:
return (xg, yg)
| [
"scipy.optimize.minimize",
"numpy.arctanh",
"numpy.abs",
"math.exp",
"math.sqrt",
"numpy.deg2rad",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.arctan",
"math.log2",
"numpy.sqrt"
] | [((7774, 7792), 'numpy.deg2rad', 'numpy.deg2rad', (['phi'], {}), '(phi)\n', (7787, 7792), False, 'import numpy\n'), ((10288, 10305), 'numpy.abs', 'numpy.abs', (['par[0]'], {}), '(par[0])\n', (10297, 10305), False, 'import numpy\n'), ((10756, 10775), 'numpy.deg2rad', 'numpy.deg2rad', (['phiq'], {}), '(phiq)\n', (10769, 10775), False, 'import numpy\n'), ((10998, 11039), 'numpy.sqrt', 'numpy.sqrt', (['(q * xsie ** 2 + ysie ** 2 / q)'], {}), '(q * xsie ** 2 + ysie ** 2 / q)\n', (11008, 11039), False, 'import numpy\n'), ((11049, 11072), 'numpy.sqrt', 'numpy.sqrt', (['(1.0 / q - q)'], {}), '(1.0 / q - q)\n', (11059, 11072), False, 'import numpy\n'), ((4034, 4059), 'math.sqrt', 'sqrt', (['(effective_flux / pi)'], {}), '(effective_flux / pi)\n', (4038, 4059), False, 'from math import pi, sqrt, exp, log, log2\n'), ((4254, 4261), 'math.sqrt', 'sqrt', (['z'], {}), '(z)\n', (4258, 4261), False, 'from math import pi, sqrt, exp, log, log2\n'), ((4264, 4272), 'math.sqrt', 'sqrt', (['F0'], {}), '(F0)\n', (4268, 4272), False, 'from math import pi, sqrt, exp, log, log2\n'), ((8921, 8947), 'numpy.exp', 'numpy.exp', (['(-0.5 * r_ell_sq)'], {}), '(-0.5 * r_ell_sq)\n', (8930, 8947), False, 'import numpy\n'), ((10456, 10473), 'numpy.abs', 'numpy.abs', (['par[3]'], {}), '(par[3])\n', (10465, 10473), False, 'import numpy\n'), ((816, 863), 'scipy.optimize.minimize', 'minimize', (['distance_min'], {'bounds': '[(1, 2)]', 'x0': '(1.1)'}), '(distance_min, bounds=[(1, 2)], x0=1.1)\n', (824, 863), False, 'from scipy.optimize import minimize\n'), ((7818, 7835), 'numpy.cos', 'numpy.cos', (['phirad'], {}), '(phirad)\n', (7827, 7835), False, 'import numpy\n'), ((7851, 7868), 'numpy.sin', 'numpy.sin', (['phirad'], {}), '(phirad)\n', (7860, 7868), False, 'import numpy\n'), ((7894, 7911), 'numpy.cos', 'numpy.cos', (['phirad'], {}), '(phirad)\n', (7903, 7911), False, 'import numpy\n'), ((7927, 7944), 'numpy.sin', 'numpy.sin', (['phirad'], {}), '(phirad)\n', (7936, 7944), False, 'import numpy\n'), ((8879, 8896), 'numpy.abs', 'numpy.abs', (['par[1]'], {}), '(par[1])\n', (8888, 8896), False, 'import numpy\n'), ((10800, 10817), 'numpy.cos', 'numpy.cos', (['phirad'], {}), '(phirad)\n', (10809, 10817), False, 'import numpy\n'), ((10832, 10849), 'numpy.sin', 'numpy.sin', (['phirad'], {}), '(phirad)\n', (10841, 10849), False, 'import numpy\n'), ((10874, 10891), 'numpy.cos', 'numpy.cos', (['phirad'], {}), '(phirad)\n', (10883, 10891), False, 'import numpy\n'), ((10906, 10923), 'numpy.sin', 'numpy.sin', (['phirad'], {}), '(phirad)\n', (10915, 10923), False, 'import numpy\n'), ((11179, 11230), 'numpy.arctan', 'numpy.arctan', (['(qfact * xsie / (r_ell + (r_ell == 0)))'], {}), '(qfact * xsie / (r_ell + (r_ell == 0)))\n', (11191, 11230), False, 'import numpy\n'), ((11258, 11310), 'numpy.arctanh', 'numpy.arctanh', (['(qfact * ysie / (r_ell + (r_ell == 0)))'], {}), '(qfact * ysie / (r_ell + (r_ell == 0)))\n', (11271, 11310), False, 'import numpy\n'), ((11480, 11497), 'numpy.cos', 'numpy.cos', (['phirad'], {}), '(phirad)\n', (11489, 11497), False, 'import numpy\n'), ((11506, 11523), 'numpy.sin', 'numpy.sin', (['phirad'], {}), '(phirad)\n', (11515, 11523), False, 'import numpy\n'), ((11540, 11557), 'numpy.cos', 'numpy.cos', (['phirad'], {}), '(phirad)\n', (11549, 11557), False, 'import numpy\n'), ((11566, 11583), 'numpy.sin', 'numpy.sin', (['phirad'], {}), '(phirad)\n', (11575, 11583), False, 'import numpy\n'), ((3227, 3244), 'math.sqrt', 'sqrt', (['(2 * r_g / z)'], {}), '(2 * r_g / z)\n', (3231, 3244), False, 'from math import pi, sqrt, exp, log, log2\n'), ((3290, 3307), 'math.sqrt', 'sqrt', (['(2 * r_g / z)'], {}), '(2 * r_g / z)\n', (3294, 3307), False, 'from math import pi, sqrt, exp, log, log2\n'), ((1067, 1084), 'math.log2', 'log2', (['(1 + M * eta)'], {}), '(1 + M * eta)\n', (1071, 1084), False, 'from math import pi, sqrt, exp, log, log2\n'), ((1097, 1110), 'math.log2', 'log2', (['(M * eta)'], {}), '(M * eta)\n', (1101, 1110), False, 'from math import pi, sqrt, exp, log, log2\n'), ((2960, 2977), 'math.sqrt', 'sqrt', (['(2 * r_g / z)'], {}), '(2 * r_g / z)\n', (2964, 2977), False, 'from math import pi, sqrt, exp, log, log2\n'), ((1560, 1571), 'math.log2', 'log2', (['(1 + x)'], {}), '(1 + x)\n', (1564, 1571), False, 'from math import pi, sqrt, exp, log, log2\n'), ((1578, 1585), 'math.log2', 'log2', (['x'], {}), '(x)\n', (1582, 1585), False, 'from math import pi, sqrt, exp, log, log2\n'), ((3111, 3117), 'math.exp', 'exp', (['(1)'], {}), '(1)\n', (3114, 3117), False, 'from math import pi, sqrt, exp, log, log2\n'), ((2860, 2866), 'math.exp', 'exp', (['(1)'], {}), '(1)\n', (2863, 2866), False, 'from math import pi, sqrt, exp, log, log2\n')] |
from typing import List, NoReturn, Union, Dict
import numpy as np
from more_itertools import chunked
from config import DATA_DIR
class Bingo:
"""Bingo Solver"""
def __init__(self, numbers: List[int], boards: List[np.ndarray]):
"""
Parameters
----------
numbers: List[int]
An array of integers that are drawn consecutively
boards: List[np.ndarray]
A set of boards each consisting of a nxn grid of numbers
"""
self.numbers = numbers
self.boards = boards
self.row_sums = None
self.col_sums = None
self.all_nums = None
def prepare_boards(self) -> NoReturn:
"""
This method creates metadata about the Bingo boards that are crucial
for the searching strategy
Returns
-------
NoReturn
"""
# row sums for each board
self.row_sums = [b.sum(axis=1) for b in self.boards]
# column sums for each board
self.col_sums = [b.sum(axis=0) for b in self.boards]
# all distinct number for each board
self.all_nums = [set(b.flatten()) for b in self.boards]
def search_board(self, value: int, board_id: int) -> Union[None, int]:
"""
Searches within the board for a given value. If the value is present
it reduces to row and column sum for the given board.
If to column or row sums becomes zero, then we have a winner board.
Parameters
----------
value: int
The search value.
board_id: int
The board ID
Returns
-------
Union[None, int]
If we have a winner then it returns the board ID. Otherwise it
returns None
"""
if value in self.all_nums[board_id]:
indexes = (self.boards[board_id] == value).nonzero()
for row, col in np.asarray(indexes).T:
self.row_sums[board_id][row] -= value
self.col_sums[board_id][col] -= value
if self.row_sums[board_id][row] == 0 or \
self.col_sums[board_id][col] == 0:
return board_id
return None
def run_win_first_strategy(self) -> Dict[str, any]:
"""
If all numbers in any row or any column of a board are marked,
that board wins. (Diagonals don't count.)
Returns
-------
Dict[str, any]
The winning meta
"""
self.prepare_boards()
out = dict(
values_used=[],
board_id=None,
board=None,
remaining=None,
winning_value=None,
score=None)
for value in self.numbers:
out['values_used'].append(value)
for board_id in range(len(self.boards)):
board_id = self.search_board(value, board_id)
if board_id is None:
continue
else:
remaining = self.row_sums[board_id].sum()
out['board_id'] = board_id
out['board'] = self.boards[board_id]
out['remaining'] = self.row_sums[board_id].sum()
out['winning_value'] = value
out['score'] = out['remaining'] * out['winning_value']
return out
return out
def run_win_last_strategy(self) -> Dict[str, any]:
"""
This method figures out which board will win last and choose that one
Returns
-------
Dict[str, any]
The winning meta
"""
self.prepare_boards()
out = dict(
values_used=[],
board_id=None,
board=None,
remaining=None,
winning_value=None,
score=None)
already_won = []
for value in self.numbers:
if len(already_won) == len(self.boards):
break
out['values_used'].append(value)
for board_id in range(len(self.boards)):
if board_id in already_won:
continue
board_id = self.search_board(value, board_id)
if board_id is None:
continue
else:
already_won.append(board_id)
last_winner = already_won[-1]
remaining = self.row_sums[last_winner].sum()
out['board_id'] = last_winner
out['board'] = self.boards[last_winner]
out['remaining'] = self.row_sums[last_winner].sum()
out['winning_value'] = out['values_used'][-1]
out['score'] = out['remaining'] * out['winning_value']
return out
def main():
"""
Loads the data and runs both stategies
Returns
-------
NoReturn
"""
path = DATA_DIR.joinpath('day4.txt')
with open(path, 'r') as f:
lines = f.readlines()
inputs = list(map(int, lines[0].strip().split(',')))
boards = list()
for chunk in chunked(lines[1:], n=6):
# takes into account the empty lines between the boards
board = np.asarray([s.strip().split() for s in chunk[1:]], dtype=int)
boards.append(board)
bingo = Bingo(inputs, boards)
result1 = bingo.run_win_first_strategy()
print('First strategy')
print(result1)
result2 = bingo.run_win_last_strategy()
print('Second strategy')
print(result2)
if __name__ == "__main__":
main()
| [
"numpy.asarray",
"config.DATA_DIR.joinpath",
"more_itertools.chunked"
] | [((4889, 4918), 'config.DATA_DIR.joinpath', 'DATA_DIR.joinpath', (['"""day4.txt"""'], {}), "('day4.txt')\n", (4906, 4918), False, 'from config import DATA_DIR\n'), ((5076, 5099), 'more_itertools.chunked', 'chunked', (['lines[1:]'], {'n': '(6)'}), '(lines[1:], n=6)\n', (5083, 5099), False, 'from more_itertools import chunked\n'), ((1921, 1940), 'numpy.asarray', 'np.asarray', (['indexes'], {}), '(indexes)\n', (1931, 1940), True, 'import numpy as np\n')] |
# Created by Pro-Machina
# This is an implementation of Particle Swarm Optimisation algorithm for the function:
# Maximize: f(x) = 1 - (x^2) + 2x
# Matrices are classified into position and fitness matrices, majorly only position matrices are used
import numpy as np
import random
# Paramenters are taken as
w = 0.7 # Inertia weight (larger -> greater global search, smaller -> greater local search)
c1 = 0.2 # Acceleratin coefficient 1
c2 = 0.6 # Acceleration coefficient 2
iterations = 100 # Number of iterations to go through
# (c1 > c2 : greater local search ability)
# (c2 > c1 : greater global search ability)
def find_fitness (swarm_pos):
""" Finds the fitness of the swarm with respect to their positions """
# This function needs to be updated after changing the fitness function
swarm_size = int(np.shape(swarm_pos)[0])
if (np.ndim(swarm_pos) > 1):
# Since global best is also an input in this function and it's a 1D array, the below line of code would give an error if the condition is not implemented
n_var = int(np.shape(swarm_pos)[1])
swarm_fit = np.zeros((swarm_size, 1))
for r in range(0, swarm_size):
swarm_fit[r] = 1 - ((swarm_pos[r])**2) + (2*(swarm_pos[r])) # Make changes here if there is any change in fitness function
if (np.ndim(swarm_pos) > 1):
# Seperately adding the column index for array with more than 1 dimensions
swarm_fit[r] = 1 - ((swarm_pos[r][0])**2) + (2*(swarm_pos[r][0])) # Make changes here if there is any change in fitness function
# Swarm fitness is a (swarm_size X 1) dimensional fitness matrix
return swarm_fit
def find_global_best (swarm_pos, global_best, max_min = 'max'):
""" Finds the global best and returns the corresponding position, enter 'min' if its a minimisation problem, 'max' otherwise """
swarm_fit = find_fitness(swarm_pos)
swarm_size = int(np.shape(swarm_pos)[0])
n_var = int(np.shape(swarm_pos)[1])
if (max_min == 'min'):
for r in range(0, swarm_size):
if (float(swarm_fit[r][0]) < float(find_fitness(global_best)[0])):
global_best = (swarm_pos[r][:]).copy()
else:
for r in range(0, swarm_size):
if (float(swarm_fit[r][0]) > float(find_fitness(global_best)[0])):
global_best = (swarm_pos[r][:]).copy()
# Global best is a (1 X n_var) dimensional position matrix
return global_best
def find_local_best (swarm_pos, local_best, max_min = 'max'):
""" Keeps a track of the personal best of a swarm and returns the same, enter 'min' if its a minimisation problem, 'max' otherwise """
swarm_fit = find_fitness(swarm_pos)
swarm_size = int(np.shape(swarm_pos)[0])
n_var = int(np.shape(swarm_pos)[1])
if (max_min == 'min'):
for r in range(0, swarm_size):
for c in range(0, n_var):
if (float(swarm_fit[r][0]) < float(find_fitness(local_best[r][:])[0])):
local_best[r][:] = (swarm_pos[r][:]).copy()
else:
for r in range(0, swarm_size):
for c in range(0, n_var):
if (float(swarm_fit[r][0]) > float(find_fitness(local_best[r][:])[0])):
local_best[r][:] = (swarm_pos[r][:]).copy()
# Local besst is a (swarm_size X n_var) dimensional position matrix
return local_best
def update_vel (swarm_vel, swarm_pos, global_best, local_best ):
""" Returns the updated velocity vector for each swarm particle """
r1 = random.uniform(0, 1)
r2 = random.uniform(0, 1)
new_vel = swarm_vel.copy()
swarm_size = int(np.shape(swarm_pos)[0])
n_var = int(np.shape(swarm_pos)[1])
for r in range(0, swarm_size):
for c in range(0, n_var):
new_vel[r][c] = (w*swarm_vel[r][c]) + ( c1*( r1*(local_best[r][c] - swarm_pos[r][c]) ) ) + ( c2*( r2*(global_best[0] - swarm_pos[r][c]) ) )
if (n_var > 1):
new_vel[r][c] = (w*swarm_vel[r][c]) + ( c1*( r1*(local_best[r][c] - swarm_pos[r][c]) ) ) + ( c2*( r2*(global_best[0][c] - swarm_pos[r][c]) ) )
# New velocity is a (swarm_size X n_var) dimensional position type matrix
return new_vel
def update_position (swarm_pos, swarm_vel):
""" Returns the updated position of the swarm particles """
swarm_size = int(np.shape(swarm_pos)[0])
n_var = int(np.shape(swarm_pos)[1])
new_pos = swarm_pos.copy()
for r in range(0, swarm_size):
for c in range(0, n_var):
new_pos[r][c] = swarm_pos[r][c] + swarm_vel[r][c]
# New position is a (swarm_size X n_var) dimensional position matrix
return new_pos
# Main program starts
swarm_size = int(input('Enter the swarm size: '))
n_var = int(input('Enter the number of variables: '))
var_range = np.zeros((n_var, 2))
for r in range(0, n_var):
var_range[r][0] = float(input('Enter min value for variable %d: ' % (r+1)))
var_range[r][1] = float(input('Enter max value for variable %d: ' % (r+1)))
# Initialize the swarm particles' positions
swarm_pos = np.zeros((swarm_size, n_var))
#print(swarm_pos)
for r in range(0, swarm_size):
for c in range(0, n_var):
swarm_pos[r][c] = random.uniform(var_range[c][0], var_range[c][1])
# Initialize the swarm particles' velocity
swarm_vel = np.zeros((swarm_size, n_var))
for r in range(0, swarm_size):
for c in range(0, n_var):
swarm_vel[r][c] = random.uniform(-1, 1)
# Start the iterations
global_best = np.zeros((1, n_var))
local_best = np.zeros((swarm_size, n_var))
while (iterations > 0):
global_best = find_global_best(swarm_pos, global_best, max_min = 'max')
local_best = find_local_best(swarm_pos, local_best, max_min = 'max')
swarm_vel = update_vel(swarm_vel, swarm_pos, global_best, local_best)
swarm_pos = update_position(swarm_pos, swarm_vel)
iterations = iterations - 1
print('')
print('Converging through Particle Swarm Optimization')
print('')
print('The Final Solution is: %f' % global_best)
print('')
print('The value of thee function at this position is: %f' % find_fitness(global_best))
print('')
| [
"numpy.shape",
"numpy.zeros",
"numpy.ndim",
"random.uniform"
] | [((4779, 4799), 'numpy.zeros', 'np.zeros', (['(n_var, 2)'], {}), '((n_var, 2))\n', (4787, 4799), True, 'import numpy as np\n'), ((5043, 5072), 'numpy.zeros', 'np.zeros', (['(swarm_size, n_var)'], {}), '((swarm_size, n_var))\n', (5051, 5072), True, 'import numpy as np\n'), ((5283, 5312), 'numpy.zeros', 'np.zeros', (['(swarm_size, n_var)'], {}), '((swarm_size, n_var))\n', (5291, 5312), True, 'import numpy as np\n'), ((5460, 5480), 'numpy.zeros', 'np.zeros', (['(1, n_var)'], {}), '((1, n_var))\n', (5468, 5480), True, 'import numpy as np\n'), ((5494, 5523), 'numpy.zeros', 'np.zeros', (['(swarm_size, n_var)'], {}), '((swarm_size, n_var))\n', (5502, 5523), True, 'import numpy as np\n'), ((1102, 1127), 'numpy.zeros', 'np.zeros', (['(swarm_size, 1)'], {}), '((swarm_size, 1))\n', (1110, 1127), True, 'import numpy as np\n'), ((3513, 3533), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3527, 3533), False, 'import random\n'), ((3543, 3563), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3557, 3563), False, 'import random\n'), ((854, 872), 'numpy.ndim', 'np.ndim', (['swarm_pos'], {}), '(swarm_pos)\n', (861, 872), True, 'import numpy as np\n'), ((5178, 5226), 'random.uniform', 'random.uniform', (['var_range[c][0]', 'var_range[c][1]'], {}), '(var_range[c][0], var_range[c][1])\n', (5192, 5226), False, 'import random\n'), ((5400, 5421), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (5414, 5421), False, 'import random\n'), ((822, 841), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (830, 841), True, 'import numpy as np\n'), ((1306, 1324), 'numpy.ndim', 'np.ndim', (['swarm_pos'], {}), '(swarm_pos)\n', (1313, 1324), True, 'import numpy as np\n'), ((1910, 1929), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (1918, 1929), True, 'import numpy as np\n'), ((1950, 1969), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (1958, 1969), True, 'import numpy as np\n'), ((2710, 2729), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (2718, 2729), True, 'import numpy as np\n'), ((2750, 2769), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (2758, 2769), True, 'import numpy as np\n'), ((3616, 3635), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (3624, 3635), True, 'import numpy as np\n'), ((3656, 3675), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (3664, 3675), True, 'import numpy as np\n'), ((4318, 4337), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (4326, 4337), True, 'import numpy as np\n'), ((4358, 4377), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (4366, 4377), True, 'import numpy as np\n'), ((1061, 1080), 'numpy.shape', 'np.shape', (['swarm_pos'], {}), '(swarm_pos)\n', (1069, 1080), True, 'import numpy as np\n')] |
from styx_msgs.msg import TrafficLight
import rospy
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from PIL import Image
class TLClassifier(object):
def __init__(self):
PATH_TO_CKPT = r'../../../models/ssd_sim/frozen_inference_graph.pb'
NUM_CLASSES = 4
#load graph
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
with self.detection_graph.as_default():
with tf.Session(graph=self.detection_graph) as sess:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
highest_score=scores.item(0)*100
rospy.logwarn("Detected Traffic light: {0}, Score: {1}".format(classes.item(0),highest_score))
if highest_score > 65.0:
if classes.item(0) == 1:
return TrafficLight.GREEN
elif classes.item(0) == 2:
return TrafficLight.RED
elif classes.item(0) == 3:
return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
| [
"tensorflow.GraphDef",
"tensorflow.Session",
"numpy.expand_dims",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.import_graph_def"
] | [((477, 487), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (485, 487), True, 'import tensorflow as tf\n'), ((561, 574), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (572, 574), True, 'import tensorflow as tf\n'), ((590, 624), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_CKPT', '"""rb"""'], {}), "(PATH_TO_CKPT, 'rb')\n", (604, 624), True, 'import tensorflow as tf\n'), ((746, 788), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (765, 788), True, 'import tensorflow as tf\n'), ((2135, 2173), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (2145, 2173), True, 'import tensorflow as tf\n'), ((2312, 2341), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2326, 2341), True, 'import numpy as np\n')] |
# Copyright (c) Fairlearn contributors.
# Licensed under the MIT License.
"""
=================================================
MetricFrame: más allá de la clasificación binaria
=================================================
"""
# %%
# Este notebook contiene ejemplos de uso :class:`~ fairlearn.metrics.MetricFrame`
# para tareas que van más allá de la simple clasificación binaria.
import sklearn.metrics as skm
import functools
from fairlearn.metrics import MetricFrame
# %%
# Resultados multiclase y no escalares
# ====================================
#
# Supongamos que tenemos un problema multiclase, con etiquetas :math:`\in {0, 1, 2}`,
# y que deseamos generar matrices de confusión para cada subgrupo
# identificado por la característica sensible :math:`\in {a, b, c, d}`.
# Esto es apoyado fácilmente por
# :class:`~ fairlearn.metrics.MetricFrame`, que no requiere
# el resultado de una métrica para ser un escalar.
#
# Primero, generemos algunos datos de entrada aleatorios:
import numpy as np
rng = np.random.default_rng(seed=96132)
n_rows = 1000
n_classes = 3
n_sensitive_features = 4
y_true = rng.integers(n_classes, size=n_rows)
y_pred = rng.integers(n_classes, size=n_rows)
temp = rng.integers(n_sensitive_features, size=n_rows)
s_f = [chr(ord('a')+x) for x in temp]
# %%
# Para usar :func:`~ sklearn.metrics.confusion_matrix`,
# es necesario enlazar previamente el argumento `labels` (etiquetas), ya que es posible
# que algunos de los subgrupos no contendrán todos
# las posibles etiquetas
conf_mat = functools.partial(skm.confusion_matrix,
labels=np.unique(y_true))
# %%
# Con esto ahora disponible, podemos crear nuestro objeto
# :class:`~ fairlearn.metrics.MetricFrame`:
mf = MetricFrame(metrics={'conf_mat': conf_mat},
y_true=y_true,
y_pred=y_pred,
sensitive_features=s_f)
# %%
# A partir de esto, podemos ver la matriz de confusión general:
mf.overall
# %%
# Y también las matrices de confusión para cada subgrupo:
mf.by_group
# %%
# Obviamente, los otros métodos como
# :meth:`~ fairlearn.metrics.MetricFrame.group_min`
# no funcionarán, ya que operaciones como 'less than' (menor que)
# no están bien definidos para matrices.
# %%
# Las funciones métricas con diferentes tipos de retorno también pueden
# mezclarse con :class:`~ fairlearn.metrics.MetricFrame`.
# Por ejemplo:
recall = functools.partial(skm.recall_score, average='macro')
mf2 = MetricFrame(metrics={'conf_mat': conf_mat,
'recall': recall
},
y_true=y_true,
y_pred=y_pred,
sensitive_features=s_f)
print("Overall values")
print(mf2.overall)
print("Values by group")
print(mf2.by_group)
# %%
# Argumentos no escalares
# =======================
#
# :class:`~ fairlearn.metrics.MetricFrame` no requiere
# que los argumentos sean escalares. Para demostrar esto,
# utilizaremos un ejemplo de reconocimiento de imágenes (proporcionado amablemente por
# <NAME>, <NAME> y <NAME>).
#
# Los algoritmos de reconocimiento de imágenes frecuentemente construyen un cuadro delimitador
# (bounding box) alrededor de las regiones donde han encontrado las características objetivo.
# Por ejemplo, si un algoritmo detecta un rostro en una imagen,
# colocará un cuadro delimitador a su alrededor. Estos cuadros delimitadores
# constituyen `y_pred` para :class:`~ fairlearn.metrics.MetricFrame`.
# Los valores de `y_true` proceden de los cuadros delimitadores marcados con
# etiquetadores humanos.
#
# Los cuadros delimitadores a menudo se comparan utilizando la métrica 'iou'.
# Ésta calcula la intersección y la unión de los dos
# cuadros delimitadores y devuelve la proporción de sus áreas.
# Si los cuadros delimitadores son idénticos, entonces la métrica
# be 1; si está disjunto, será 0. Una función para hacer esto es:
def bounding_box_iou(box_A_input, box_B_input):
# The inputs are array-likes in the form
# [x_0, y_0, delta_x,delta_y]
# where the deltas are positive
box_A = np.array(box_A_input)
box_B = np.array(box_B_input)
if box_A[2] < 0:
raise ValueError("Bad delta_x for box_A")
if box_A[3] < 0:
raise ValueError("Bad delta y for box_A")
if box_B[2] < 0:
raise ValueError("Bad delta x for box_B")
if box_B[3] < 0:
raise ValueError("Bad delta y for box_B")
# Convert deltas to co-ordinates
box_A[2:4] = box_A[0:2] + box_A[2:4]
box_B[2:4] = box_B[0:2] + box_B[2:4]
# Determine the (x, y)-coordinates of the intersection rectangle
x_A = max(box_A[0], box_B[0])
y_A = max(box_A[1], box_B[1])
x_B = min(box_A[2], box_B[2])
y_B = min(box_A[3], box_B[3])
if (x_B < x_A) or (y_B < y_A):
return 0
# Compute the area of intersection rectangle
interArea = (x_B - x_A) * (y_B - y_A)
# Compute the area of both the prediction and ground-truth
# rectangles
box_A_area = (box_A[2] - box_A[0]) * (box_A[3] - box_A[1])
box_B_area = (box_B[2] - box_B[0]) * (box_B[3] - box_B[1])
# Compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the intersection area
iou = interArea / float(box_A_area + box_B_area - interArea)
return iou
# %%
# Esta es una métrica para dos cuadros delimitadores, pero para :class:`~ fairlearn.metrics.MetricFrame`
# necesitamos comparar dos listas de cuadros delimitadores. Por
# simplicidad, devolveremos el valor medio de 'iou' para las
# dos listas, pero esta no es la única opción:
def mean_iou(true_boxes, predicted_boxes):
if len(true_boxes) != len(predicted_boxes):
raise ValueError("Array size mismatch")
all_iou = [
bounding_box_iou(y_true, y_pred)
for y_true, y_pred in zip(true_boxes, predicted_boxes)
]
return np.mean(all_iou)
# %%
# Necesitamos generar algunos datos de entrada, así que primero crearemos una función para
# generar un solo cuadro delimitador aleatorio:
def generate_bounding_box(max_coord, max_delta, rng):
corner = max_coord * rng.random(size=2)
delta = max_delta * rng.random(size=2)
return np.concatenate((corner, delta))
# %%
# Usaremos esto para crear matrices de muestra `y_true` e `y_pred` de
# cuadros delimitadores:
def many_bounding_boxes(n_rows, max_coord, max_delta, rng):
return [
generate_bounding_box(max_coord, max_delta, rng)
for _ in range(n_rows)
]
true_bounding_boxes = many_bounding_boxes(n_rows, 5, 10, rng)
pred_bounding_boxes = many_bounding_boxes(n_rows, 5, 10, rng)
# %%
# Finalmente, podemos usarlos en :class:`~ fairlearn.metrics.MetricFrame`:
mf_bb = MetricFrame(metrics={'mean_iou': mean_iou},
y_true=true_bounding_boxes,
y_pred=pred_bounding_boxes,
sensitive_features=s_f)
print("Overall metric")
print(mf_bb.overall)
print("Metrics by group")
print(mf_bb.by_group)
# %%
# Las entradas individuales en las matrices `y_true` e `y_pred`
# puede ser arbitrariamente complejas. Son las funciones métricas
# que les dan sentido. De manera similar,
# :class:`~ fairlearn.metrics.MetricFrame` no impone
# restricciones sobre el tipo de resultado obtenido. Uno puede imaginarse una tarea
# de imagen de reconocimiento donde hay múltiples objetos detectables en cada
# imagen, y el algoritmo de reconocimiento de imágenes produce
# varios cuadros delimitadores (no necesariamente en un mapeo 1-a-1). El resultado de tal escenario podría
# ser una matriz de alguna descripción.
# Otro caso en el que tanto los datos de entrada como las métricas
# serán complejos es el procesamiento del lenguaje natural,
# donde cada fila de la entrada podría ser una oración completa,
# posiblemente con incrustaciones de palabras complejas incluidas.
# %%
# Conclusión
# ==========
#
# Este tutorial ha probado la flexibilidad
# de :class:`~ fairlearn.metrics.MetricFrame` cuando se trata
# de argumentos de entradas, salida y de funciones métricas.
# Las argumentos de entradas de tipo lista (array) pueden tener elementos de tipos arbitrarios,
# y los valores de retorno de las funciones métricas también pueden
# ser de cualquier tipo (aunque métodos como
# :meth:`~ fairlearn.metrics.MetricFrame.group_min` puede no
# trabajo).
| [
"functools.partial",
"numpy.unique",
"fairlearn.metrics.MetricFrame",
"numpy.random.default_rng",
"numpy.mean",
"numpy.array",
"numpy.concatenate"
] | [((1019, 1052), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': '(96132)'}), '(seed=96132)\n', (1040, 1052), True, 'import numpy as np\n'), ((1742, 1843), 'fairlearn.metrics.MetricFrame', 'MetricFrame', ([], {'metrics': "{'conf_mat': conf_mat}", 'y_true': 'y_true', 'y_pred': 'y_pred', 'sensitive_features': 's_f'}), "(metrics={'conf_mat': conf_mat}, y_true=y_true, y_pred=y_pred,\n sensitive_features=s_f)\n", (1753, 1843), False, 'from fairlearn.metrics import MetricFrame\n'), ((2413, 2465), 'functools.partial', 'functools.partial', (['skm.recall_score'], {'average': '"""macro"""'}), "(skm.recall_score, average='macro')\n", (2430, 2465), False, 'import functools\n'), ((2473, 2592), 'fairlearn.metrics.MetricFrame', 'MetricFrame', ([], {'metrics': "{'conf_mat': conf_mat, 'recall': recall}", 'y_true': 'y_true', 'y_pred': 'y_pred', 'sensitive_features': 's_f'}), "(metrics={'conf_mat': conf_mat, 'recall': recall}, y_true=y_true,\n y_pred=y_pred, sensitive_features=s_f)\n", (2484, 2592), False, 'from fairlearn.metrics import MetricFrame\n'), ((6756, 6883), 'fairlearn.metrics.MetricFrame', 'MetricFrame', ([], {'metrics': "{'mean_iou': mean_iou}", 'y_true': 'true_bounding_boxes', 'y_pred': 'pred_bounding_boxes', 'sensitive_features': 's_f'}), "(metrics={'mean_iou': mean_iou}, y_true=true_bounding_boxes,\n y_pred=pred_bounding_boxes, sensitive_features=s_f)\n", (6767, 6883), False, 'from fairlearn.metrics import MetricFrame\n'), ((4085, 4106), 'numpy.array', 'np.array', (['box_A_input'], {}), '(box_A_input)\n', (4093, 4106), True, 'import numpy as np\n'), ((4119, 4140), 'numpy.array', 'np.array', (['box_B_input'], {}), '(box_B_input)\n', (4127, 4140), True, 'import numpy as np\n'), ((5921, 5937), 'numpy.mean', 'np.mean', (['all_iou'], {}), '(all_iou)\n', (5928, 5937), True, 'import numpy as np\n'), ((6238, 6269), 'numpy.concatenate', 'np.concatenate', (['(corner, delta)'], {}), '((corner, delta))\n', (6252, 6269), True, 'import numpy as np\n'), ((1609, 1626), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (1618, 1626), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
from typing import Iterable, Dict
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.networks import FlattenMlp
from self_supervised.env_wrapper.rlkit_wrapper import NormalizedBoxEnvWrapper
from self_supervised.policy.skill_policy import SkillTanhGaussianPolicy
from self_supervised.loss.loss_intrin_selfsup import reconstruction_based_rewards
from self_supervised.algo.trainer_mode_latent import \
ModeLatentTrainer, ModeLatentNetworkWithEncoder
from self_supervised.base.trainer.trainer_base import MyTrainerBaseClass
import self_supervised.utils.conversion as self_sup_conversion
import self_supervised.utils.typed_dicts as td
class SelfSupTrainer(MyTrainerBaseClass):
def __init__(self,
env: NormalizedBoxEnvWrapper,
policy: SkillTanhGaussianPolicy,
qf1: FlattenMlp,
qf2: FlattenMlp,
target_qf1: FlattenMlp,
target_qf2: FlattenMlp,
mode_latent_model: ModeLatentNetworkWithEncoder,
discount=0.99,
reward_scale=1.0,
policy_lr=1e-3,
qf_lr=1e-3,
optimizer_class=torch.optim.Adam,
soft_target_tau=1e-2,
target_update_period=1,
plotter=None,
render_eval_paths=False,
use_automatic_entropy_tuning=True,
target_entropy=None
):
super().__init__()
self.env = env
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.target_qf1 = target_qf1
self.target_qf2 = target_qf2
self.mode_latent_model = mode_latent_model
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy:
self.target_entropy = target_entropy
else:
self.target_entropy = -np.prod(self.env.action_space.shape).item()
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr
)
self.qf_criterion = nn.MSELoss()
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr
)
self.discount = discount
self.reward_scale = reward_scale
self._n_train_steps_total = 0
def train(self, data: td.TransitionModeMapping):
"""
data : TransitionModeMapping consisting of (N, data_dim, S) data
"""
seq_dim = -1
data_dim = -2
batch_dim = 0
data = data.transpose(batch_dim, seq_dim, data_dim)
data = td.TransitionModeMappingTorch(**self_sup_conversion.from_numpy(data))
data_dim = -1
seq_dim = -2
# Reward
# TODO: Normalize loss values?
intrinsic_rewards = reconstruction_based_rewards(
mode_latent_model=self.mode_latent_model,
obs_seq=data.obs,
action_seq=data.action,
skill_seq=data.mode
)
# Train SAC
for idx, transition in enumerate(
data.permute(seq_dim, batch_dim, data_dim)):
self.train_sac(
batch=transition,
intrinsic_rewards=intrinsic_rewards[:, idx]
)
def train_sac(self, batch: td.TransitionModeMappingTorch,
intrinsic_rewards: torch.Tensor):
"""
batch : TransitionModeMapping consisting of (N, dim) data
intrinsic_rewards : (N, 1) tensor
"""
batch_dim = 0
obs = batch.obs
actions = batch.action
next_obs = batch.next_obs
terminals = batch.terminal
skills = batch.mode
rewards = intrinsic_rewards
batch_dim = 0
data_dim = -1
assert obs.size(data_dim) == next_obs.size(data_dim) == self.env.observation_space.shape[0]
assert actions.size(data_dim) == self.env.action_space.shape[0]
assert rewards.size(data_dim) == terminals.size(data_dim) == 1
"""
Policy and Alpha Loss
"""
#new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = (1, 1, 1, 1)
policy_ret_mapping = self.policy(
obs,
skill_vec=skills,
reparameterize=True,
return_log_prob=True,
)
# just to make auto complete work
policy_ret_mapping = td.ForwardReturnMapping(**policy_ret_mapping)
log_pi = policy_ret_mapping.log_prob
obs_skills = torch.cat((obs, skills), dim=1)
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha *
(log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = 1
q_new_actions = torch.min(
self.qf1(obs_skills, policy_ret_mapping.action),
self.qf2(obs_skills, policy_ret_mapping.action),
)
policy_loss = (alpha * log_pi - q_new_actions).mean()
"""
QF Loss
"""
q1_pred = self.qf1(obs_skills, actions)
q2_pred = self.qf2(obs_skills, actions)
# Make sure policy accounts for squashing functions like tanh correctly!
new_policy_ret_mapping = self.policy(
next_obs,
skill_vec=skills,
reparameterize=True,
return_log_prob=True,
)
new_policy_ret_mapping = td.ForwardReturnMapping(**new_policy_ret_mapping)
new_log_pi = new_policy_ret_mapping.log_prob
next_obs_skills = torch.cat((next_obs, skills), dim=1)
target_q_values = torch.min(
self.target_qf1(next_obs_skills, new_policy_ret_mapping.action),
self.target_qf2(next_obs_skills, new_policy_ret_mapping.action),
) - alpha * new_log_pi
q_target = self.reward_scale * rewards + (1. - terminals) *\
self.discount * target_q_values
qf1_loss = self.qf_criterion(q1_pred, q_target.detach())
qf2_loss = self.qf_criterion(q2_pred, q_target.detach())
"""
Update networks
"""
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
"""
Soft Updates
"""
if self._n_train_steps_total % self.target_update_period == 0:
ptu.soft_update_from_to(
self.qf1, self.target_qf1, self.soft_target_tau
)
ptu.soft_update_from_to(
self.qf2, self.target_qf2, self.soft_target_tau
)
self._n_train_steps_total += 1
@property
def networks(self) -> Dict[str, nn.Module]:
return dict(
policy=self.policy,
qf1=self.qf1,
qf2=self.qf2,
target_qf1=self.target_qf1,
target_qf2=self.target_qf2,
mode_latent=self.mode_latent_model,
)
| [
"rlkit.torch.pytorch_util.soft_update_from_to",
"rlkit.torch.pytorch_util.zeros",
"torch.nn.MSELoss",
"torch.cat",
"self_supervised.utils.conversion.from_numpy",
"self_supervised.loss.loss_intrin_selfsup.reconstruction_based_rewards",
"self_supervised.utils.typed_dicts.ForwardReturnMapping",
"numpy.pr... | [((2404, 2416), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2414, 2416), True, 'import torch.nn as nn\n'), ((3375, 3512), 'self_supervised.loss.loss_intrin_selfsup.reconstruction_based_rewards', 'reconstruction_based_rewards', ([], {'mode_latent_model': 'self.mode_latent_model', 'obs_seq': 'data.obs', 'action_seq': 'data.action', 'skill_seq': 'data.mode'}), '(mode_latent_model=self.mode_latent_model,\n obs_seq=data.obs, action_seq=data.action, skill_seq=data.mode)\n', (3403, 3512), False, 'from self_supervised.loss.loss_intrin_selfsup import reconstruction_based_rewards\n'), ((4969, 5014), 'self_supervised.utils.typed_dicts.ForwardReturnMapping', 'td.ForwardReturnMapping', ([], {}), '(**policy_ret_mapping)\n', (4992, 5014), True, 'import self_supervised.utils.typed_dicts as td\n'), ((5082, 5113), 'torch.cat', 'torch.cat', (['(obs, skills)'], {'dim': '(1)'}), '((obs, skills), dim=1)\n', (5091, 5113), False, 'import torch\n'), ((6159, 6208), 'self_supervised.utils.typed_dicts.ForwardReturnMapping', 'td.ForwardReturnMapping', ([], {}), '(**new_policy_ret_mapping)\n', (6182, 6208), True, 'import self_supervised.utils.typed_dicts as td\n'), ((6290, 6326), 'torch.cat', 'torch.cat', (['(next_obs, skills)'], {'dim': '(1)'}), '((next_obs, skills), dim=1)\n', (6299, 6326), False, 'import torch\n'), ((2213, 2245), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (2222, 2245), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((7292, 7364), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf1', 'self.target_qf1', 'self.soft_target_tau'], {}), '(self.qf1, self.target_qf1, self.soft_target_tau)\n', (7315, 7364), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((7407, 7479), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf2', 'self.target_qf2', 'self.soft_target_tau'], {}), '(self.qf2, self.target_qf2, self.soft_target_tau)\n', (7430, 7479), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((3209, 3245), 'self_supervised.utils.conversion.from_numpy', 'self_sup_conversion.from_numpy', (['data'], {}), '(data)\n', (3239, 3245), True, 'import self_supervised.utils.conversion as self_sup_conversion\n'), ((2139, 2175), 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (2146, 2175), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import cv2
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from data import load_train_data, load_test_data
def check_data():
imgs_test, imgs_id_test = load_test_data()
imgs_mask_test = np.load('imgs_mask_test.npy')
print('-' * 30)
print('Saving predicted masks to files...')
print('-' * 30)
pred_dir = 'preds/'
if not os.path.exists(pred_dir):
os.mkdir(pred_dir)
print(imgs_mask_test.shape)
print(imgs_id_test.shape)
for image, image_id in zip(imgs_mask_test, imgs_id_test):
# print(image.shape)
image = (image[:, :, 0] * 255.).astype(np.uint8)
# print(image.shape)
#cv2.imshow('1', image)
#if cv2.waitKey(0) == 27:
# exit( 1 )
imsave('preds/{}_pred.png'.format(image_id), image)
# cv2.imwrite(pred_dir + image_id + '_pred.png', image)
if __name__ == '__main__':
check_data() | [
"os.mkdir",
"numpy.load",
"data.load_test_data",
"os.path.exists"
] | [((463, 479), 'data.load_test_data', 'load_test_data', ([], {}), '()\n', (477, 479), False, 'from data import load_train_data, load_test_data\n'), ((498, 527), 'numpy.load', 'np.load', (['"""imgs_mask_test.npy"""'], {}), "('imgs_mask_test.npy')\n", (505, 527), True, 'import numpy as np\n'), ((636, 660), 'os.path.exists', 'os.path.exists', (['pred_dir'], {}), '(pred_dir)\n', (650, 660), False, 'import os\n'), ((664, 682), 'os.mkdir', 'os.mkdir', (['pred_dir'], {}), '(pred_dir)\n', (672, 682), False, 'import os\n')] |
# coding: utf-8
"""
Abinit Task classes for Fireworks.
"""
import inspect
import subprocess
import logging
import time
import shutil
import json
import threading
import glob
import os
import errno
import numpy as np
import abipy.abio.input_tags as atags
from collections import namedtuple, defaultdict
from monty.json import MontyEncoder, MontyDecoder, MSONable
from pymatgen.util.serialization import json_pretty_dump, pmg_serialize
from pymatgen.analysis.elasticity import ElasticTensor
from abipy.flowtk.utils import Directory, File
from abipy.flowtk import events, tasks
from abipy.flowtk.netcdf import NetcdfReader
from abipy.flowtk.utils import irdvars_for_ext
from abipy.flowtk.wrappers import Mrgddb
from abipy.flowtk.qutils import time2slurm
from abipy.flowtk.tasks import ParalHints
from abipy.abio.factories import InputFactory #, PiezoElasticFromGsFactory
from abipy.abio.inputs import AbinitInput
from abipy.core.mixins import Has_Structure
from abipy.electrons.gsr import GsrFile
from abipy.electrons.gw import SigresFile
from abipy.dfpt.phonons import PhbstFile, PhdosFile
from abipy.dfpt.anaddbnc import AnaddbNcFile
from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow
from fireworks.utilities.fw_utilities import explicit_serialize
from fireworks.utilities.fw_serializers import serialize_fw
from abiflows.fireworks.utils.task_history import TaskHistory
#from abiflows.fireworks.utils.fw_utils import links_dict_update
#from abiflows.fireworks.utils.fw_utils import set_short_single_core_to_spec
from abiflows.fireworks.tasks.abinit_common import TMPDIR_NAME, OUTDIR_NAME, INDIR_NAME, STDERR_FILE_NAME, \
LOG_FILE_NAME, FILES_FILE_NAME, OUTPUT_FILE_NAME, INPUT_FILE_NAME, MPIABORTFILE, DUMMY_FILENAME, \
ELPHON_OUTPUT_FILE_NAME, DDK_FILES_FILE_NAME, HISTORY_JSON
from abiflows.fireworks.utils.fw_utils import FWTaskManager
logger = logging.getLogger(__name__)
# files and folders names
class BasicAbinitTaskMixin(object):
task_type = ""
@serialize_fw
def to_dict(self):
d = {}
for arg in inspect.getfullargspec(self.__init__).args:
if arg != "self":
val = self.__getattribute__(arg)
if hasattr(val, "as_dict"):
val = val.as_dict()
elif isinstance(val, (tuple, list)):
val = [v.as_dict() if hasattr(v, "as_dict") else v for v in val]
d[arg] = val
return d
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
kwargs = {k: dec.process_decoded(v) for k, v in d.items()
if k in inspect.getfullargspec(cls.__init__).args}
return cls(**kwargs)
def get_fw_task_manager(self, fw_spec):
"""
Generates an instance of FWTaskManager. First looks for a 'ftm_file' key in the spec, otherwise generates it
with FWTaskManager.from_user_config. The configuration is updated with the keywords defined in 'fw_policy'
in the spec.
"""
if 'ftm_file' in fw_spec:
ftm = FWTaskManager.from_file(fw_spec['ftm_file'])
else:
ftm = FWTaskManager.from_user_config()
ftm.update_fw_policy(fw_spec.get('fw_policy', {}))
return ftm
def run_autoparal(self, abiinput, autoparal_dir, ftm, clean_up='move'):
"""
Runs the autoparal using AbinitInput abiget_autoparal_pconfs method.
The information are retrieved from the FWTaskManager that should be present and contain the standard
AbiPy |TaskManager|, that provides information about the queue adapters.
No check is performed on the autoparal_dir. If there is a possibility of overwriting output data due to
reuse of the same folder, it should be handled by the caller.
"""
manager = ftm.task_manager
if not manager:
msg = 'No task manager available: autoparal could not be performed.'
logger.error(msg)
raise InitializationError(msg)
pconfs = abiinput.abiget_autoparal_pconfs(max_ncpus=manager.max_cores, workdir=autoparal_dir,
manager=manager)
optconf = manager.select_qadapter(pconfs)
qadapter_spec = manager.qadapter.get_subs_dict()
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(autoparal_dir, "autoparal.json"))
# Method to clean the output files
def safe_rm(name):
try:
path = os.path.join(autoparal_dir, name)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
pass
# Method to rename the output files
def safe_mv(name):
try:
autoparal_backup_dir = os.path.join(autoparal_dir, 'autoparal_backup')
if not os.path.exists(autoparal_backup_dir):
os.makedirs(autoparal_backup_dir)
current_path = os.path.join(autoparal_dir, name)
newpath = os.path.join(autoparal_backup_dir, name)
if not os.path.exists(newpath):
shutil.move(current_path, newpath)
else:
raise ValueError('Autoparal backup file already exists for the file "{}"'.format(name))
except OSError:
pass
# clean up useless files. The output files should removed also to avoid abinit renaming the out file in
# case the main run will be performed in the same dir
if clean_up == 'move':
to_be_moved = [OUTPUT_FILE_NAME, LOG_FILE_NAME, STDERR_FILE_NAME]
for r in to_be_moved:
safe_mv(r)
to_be_removed = [TMPDIR_NAME, OUTDIR_NAME, INDIR_NAME]
for r in to_be_removed:
safe_rm(r)
elif clean_up == 'remove':
to_be_removed = [OUTPUT_FILE_NAME, LOG_FILE_NAME, STDERR_FILE_NAME, TMPDIR_NAME, OUTDIR_NAME, INDIR_NAME]
for r in to_be_removed:
safe_rm(r)
return optconf, qadapter_spec, manager.qadapter
def run_fake_autoparal(self, ftm):
"""
In cases where the autoparal is not supported a fake run autoparal can be used to set the queueadapter.
Takes the number of processors suggested by the manager given that the paral hints contain all the
number of processors up tu max_cores and they all have the same efficiency.
"""
manager = ftm.task_manager
if not manager:
msg = 'No task manager available: autoparal could not be performed.'
logger.error(msg)
raise InitializationError(msg)
# all the options have the same priority, let the qadapter decide which is preferred.
fake_conf_list = list({'tot_ncpus': i, 'mpi_ncpus': i, 'efficiency': 1} for i in range(1, manager.max_cores+1))
pconfs = ParalHints({}, fake_conf_list)
optconf = manager.select_qadapter(pconfs)
qadapter_spec = manager.qadapter.get_subs_dict()
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(os.getcwd(), "autoparal.json"))
return optconf, qadapter_spec, manager.qadapter
def get_final_mod_spec(self, fw_spec):
"""
Generates the standard mod_spec dict for the FWAction. Pushes the information of the current task to
the list associated with self.task_type. Requires a "current_task_info" method.
"""
return [{'_push': {'previous_fws->' + self.task_type: self.current_task_info(fw_spec)}}]
# if 'previous_fws' in fw_spec:
# prev_fws = fw_spec['previous_fws'].copy()
# else:
# prev_fws = {}
# prev_fws[self.task_type] = [self.current_task_info(fw_spec)]
# return [{'_set': {'previous_fws': prev_fws}}]
def set_logger(self):
"""
Set a logger for pymatgen.io.abinit and abipy
"""
log_handler = logging.FileHandler('abipy.log')
log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger('pymatgen.io.abinit').addHandler(log_handler)
logging.getLogger('abipy').addHandler(log_handler)
logging.getLogger('abiflows').addHandler(log_handler)
def link_ext(self, ext, source_dir, strict=True):
"""
Links the required files from previous runs in the indata folder.
It will first try to link the fortran file and then the Netcdf file, if the first is not found.
Args:
ext: extension that should be linked
source_dir: path to the source directory
strict: if True an exception is raised if the file is missing.
Returns:
The path to the generated link. None if strict=False and the file could not be found.
"""
source = os.path.join(source_dir, self.prefix.odata + "_" + ext)
logger.info("Need path {} with ext {}".format(source, ext))
dest = os.path.join(self.workdir, self.prefix.idata + "_" + ext)
if not os.path.exists(source):
# Try netcdf file. TODO: this case should be treated in a cleaner way.
source += ".nc"
if os.path.exists(source): dest += ".nc"
if not os.path.exists(source):
if strict:
msg = "{} is needed by this task but it does not exist".format(source)
logger.error(msg)
raise InitializationError(msg)
else:
return None
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
logger.info("Linking path {} --> {}".format(source, dest))
if not os.path.exists(dest) or not strict:
if self.ftm.fw_policy.copy_deps:
shutil.copyfile(source, dest)
else:
os.symlink(source, dest)
return dest
else:
# check links but only if we haven't performed the restart.
# in this case, indeed we may have replaced the file pointer with the
# previous output file of the present task.
if not self.ftm.fw_policy.copy_deps and os.path.realpath(dest) != source and not self.restart_info:
msg = "dest {} does not point to path {}".format(dest, source)
logger.error(msg)
raise InitializationError(msg)
def link_ddk(self, source_dir):
"""
Links the DDK files rom previous runs in the indata folder.
Accepts more than one DDK file in the source_dir: multiple perturbations are allowed in a single calculation.
Note that the DDK extension is not generated by abinit, but should be created from the appropriate 1WF file.
"""
outdata_dir = Directory(os.path.join(source_dir, OUTDIR_NAME))
ddks = []
for f in outdata_dir.list_filepaths():
if f.endswith('_DDK'):
ddks.append(f)
if not ddks:
msg = "DDK is needed by this task but it does not exist"
logger.error(msg)
raise InitializationError(msg)
exts = [os.path.basename(ddk).split('_')[-2] for ddk in ddks]
ddk_files = []
for ext in exts:
ddk_files.append(self.link_ext(ext, source_dir))
return ddk_files
#TODO to avoid any regression this function will only be used to link 1WF and 1DEN files.
# This method should be more general than link_ext, but currently fails in passing all the tests.
# After fixing the problems the more general methods should replace link_ext.
def link_1ext(self, ext, source_dir, strict=True):
"""
Links the 1DEN and 1WF files in the indata folder.
It will first try to link the fortran file and then the Netcdf file, if the first is not found.
"""
# 1DEN is used as a general reference and to trigger the correct ird variable,
# but the real extension in DEN.
if "1DEN" in ext:
ext = "DEN"
source = Directory(os.path.join(source_dir,os.path.split(self.prefix.odata)[0])).has_abiext(ext)
if not source:
if strict:
msg = "output file with extension {} is needed from {} dir, " \
"but it does not exist".format(ext, source_dir)
logger.error(msg)
raise InitializationError(msg)
else:
return None
logger.info("Need path {} with ext {}".format(source, ext))
# determine the correct extension
#TODO check if this is correct for all the possible extensions, apart from 1WF
ext_full = source.split('_')[-1]
dest = os.path.join(self.workdir, self.prefix.idata + "_" + ext_full)
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
logger.info("Linking path {} --> {}".format(source, dest))
if not os.path.exists(dest) or not strict:
if self.ftm.fw_policy.copy_deps:
shutil.copyfile(source, dest)
else:
os.symlink(source, dest)
return dest
else:
# check links but only if we haven't performed the restart.
# in this case, indeed we may have replaced the file pointer with the
# previous output file of the present task.
if not self.ftm.fw_policy.copy_deps and os.path.realpath(dest) != source and not self.restart_info:
msg = "dest {} does not point to path {}".format(dest, source)
logger.error(msg)
raise InitializationError(msg)
#from Task
# Prefixes for Abinit (input, output, temporary) files.
Prefix = namedtuple("Prefix", "idata odata tdata")
pj = os.path.join
prefix = Prefix(pj("indata", "in"), pj("outdata", "out"), pj("tmpdata", "tmp"))
del Prefix, pj
@explicit_serialize
class AbiFireTask(BasicAbinitTaskMixin, FireTaskBase):
# List of `AbinitEvent` subclasses that are tested in the check_status method.
# Subclasses should provide their own list if they need to check the converge status.
CRITICAL_EVENTS = [
]
def __init__(self, abiinput, restart_info=None, handlers=None, is_autoparal=None, deps=None, history=None,
task_type=None):
"""
Basic __init__, subclasses are supposed to define the same input parameters,
add their own and call super for
the basic ones. The input parameter should be stored as attributes of the instance for serialization and
for inspection.
Args:
abiinput: an |AbinitInput| or an InputFactory. Defines the input used in the run
restart_info: an instance of RestartInfo. This should be present in case the current task is a restart.
Contains information useful to proceed with the restart.
handlers: list of ErrorHandlers that should be used in case of error. If None all the error handlers
available from abipy will be used.
is_autoparal: whether the current task is just an autoparal job or not.
deps: the required file dependencies from previous tasks (e.g. DEN, WFK, ...). Can be a single string,
a list or a dict of the form {task_type: list of dependecies}. The dependencies will be retrieved
from the 'previous_tasks' key in spec.
history: a TaskHistory or a list of items that will be stored in a TaskHistory instance.
task_type: a string that, if not None, overrides the task_type defined in the class.
"""
if handlers is None:
handlers = []
if history is None:
history = []
self.abiinput = abiinput
self.restart_info = restart_info
self.handlers = handlers or [cls() for cls in events.get_event_handler_classes()]
self.is_autoparal = is_autoparal
#TODO: rationalize this and check whether this might create problems due to the fact that if task_type is None,
# self.task_type is the class variable (actually self.task_type refers to self.__class__.task_type) while
# if task_type is specified, self.task_type is an instance variable and is potentially different from
# self.__class__.task_type !
if task_type is not None:
self.task_type = task_type
# deps are transformed to be a list or a dict of lists
if isinstance(deps, dict):
deps = dict(deps)
for k, v in deps.items():
if not isinstance(v, (list, tuple)):
deps[k] = [v]
elif deps and not isinstance(deps, (list, tuple)):
deps = [deps]
self.deps = deps
# create a copy
self.history = TaskHistory(history)
#from Task
def set_workdir(self, workdir):
"""
Sets up the working directory: adds attributes for all the files and directories.
"""
self.workdir = os.path.abspath(workdir)
# Files required for the execution.
self.input_file = File(os.path.join(self.workdir, INPUT_FILE_NAME))
self.output_file = File(os.path.join(self.workdir, OUTPUT_FILE_NAME))
self.files_file = File(os.path.join(self.workdir, FILES_FILE_NAME))
self.log_file = File(os.path.join(self.workdir, LOG_FILE_NAME))
self.stderr_file = File(os.path.join(self.workdir, STDERR_FILE_NAME))
# This file is produce by Abinit if nprocs > 1 and MPI_ABORT.
self.mpiabort_file = File(os.path.join(self.workdir, MPIABORTFILE))
# Directories with input|output|temporary data.
self.indir = Directory(os.path.join(self.workdir, INDIR_NAME))
self.outdir = Directory(os.path.join(self.workdir, OUTDIR_NAME))
self.tmpdir = Directory(os.path.join(self.workdir, TMPDIR_NAME))
#from abitask
def rename_outputs(self):
"""
If rerunning in the same folder, we rename the outputs according to the abipy convention:
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists.
This is applied both if the calculation is rerun from scratch or with a restart in the same folder.
"""
files_to_rename = [self.output_file.path, self.log_file.path]
if not any(os.path.isfile(f) for f in files_to_rename):
return
file_paths = [f for file_path in files_to_rename for f in glob.glob(file_path + '*')]
nums = [int(f) for f in [f.split("_")[-1] for f in file_paths] if f.isdigit()]
new_index = (max(nums) if nums else 0) + 1
for f in files_to_rename:
try:
new_path = f + '_' + str(new_index)
os.rename(f, new_path)
logger.info("Renamed %s to %s" % (f, new_path))
except OSError as exc:
logger.warning("couldn't rename {} to {} : {} ".format(f, new_path, str(exc)))
#from AbintTask
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = self.abiinput.structure.to_abivars()["znucl"]
for z in znucl:
for p in self.abiinput.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines)
def config_run(self, fw_spec):
"""
Configure the job for the run:
- set up logging system
- sets and creates the directories and the input files needed to run the task.
- sets dependencies and data from previous run (both the case of a restart in the same folder as the previous
FW and the case of a creation of a new folder).
"""
# rename outputs if rerunning in the same dir
# self.rename_outputs()
# Copy the appropriate dependencies in the in dir
#TODO it should be clarified if this should stay here or in setup_task().
self.resolve_deps(fw_spec)
# if it's the restart of a previous task, perform specific task updates.
# perform these updates before writing the input, but after creating the dirs.
if self.restart_info:
#TODO add if it is a local restart or not
self.history.log_restart(self.restart_info)
self.restart()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(str(self.abiinput))
def run_abinit(self, fw_spec):
"""
Executes abinit and waits for the end of the process.
The mpirun and abinit commands are retrived from the mpirun_cmd and abinit_cmd keys in the fw_policy
of the FWTaskManager, that can be overridden by the values in the spec.
Note that in case of missing definition of these parameters, the values fall back to the default
values of mpirun_cmd and abinit_cmd: 'mpirun' and 'abinit', assuming that these are properly retrieved
from the $PATH.
"""
def abinit_process():
command = []
#consider the case of serial execution
if self.ftm.fw_policy.mpirun_cmd:
command.extend(self.ftm.fw_policy.mpirun_cmd.split())
if 'mpi_ncpus' in fw_spec:
command.extend(['-n', str(fw_spec['mpi_ncpus'])])
command.extend(self.ftm.fw_policy.abinit_cmd.split())
if self.walltime:
mytimelimit = self.walltime
if mytimelimit > 240:
mytimelimit -= 120
command.extend(['--timelimit', time2slurm(mytimelimit)])
with open(self.files_file.path, 'r') as stdin, open(self.log_file.path, 'w') as stdout, \
open(self.stderr_file.path, 'w') as stderr:
self.process = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr)
stdoutdata, stderrdata = self.process.communicate()
self.returncode = self.process.returncode
thread = threading.Thread(target=abinit_process)
# the amount of time left plus a buffer of 2 minutes
timeout = (self.walltime - (time.time() - self.start_time) - 120) if self.walltime else None
start_abinit_time = time.time()
thread.start()
thread.join(timeout)
self.history.log_abinit_stop(run_time=(time.time() - start_abinit_time))
if thread.is_alive():
self.process.terminate()
thread.join()
raise WalltimeError("The task couldn't be terminated within the time limit. Killed.")
def get_event_report(self, source='log'):
"""
Analyzes the main output file for possible Errors or Warnings. Will check the presence of an MPIABORTFILE
if not output file is found.
Args:
source: "output" or "log". Determine which file will be parsed.
Returns:
:class:`EventReport` instance or None if none of the output files exist.
"""
ofile = {
"output": self.output_file,
"log": self.log_file}[source]
parser = events.EventsParser()
if not ofile.exists:
if not self.mpiabort_file.exists:
return None
else:
# ABINIT abort file without log!
abort_report = parser.parse(self.mpiabort_file.path)
return abort_report
try:
report = parser.parse(ofile.path)
# Add events found in the ABI_MPIABORTFILE.
if self.mpiabort_file.exists:
logger.critical("Found ABI_MPIABORTFILE!")
abort_report = parser.parse(self.mpiabort_file.path)
if len(abort_report) == 0:
logger.warning("ABI_MPIABORTFILE but empty")
else:
if len(abort_report) != 1:
logger.critical("Found more than one event in ABI_MPIABORTFILE")
# Add it to the initial report only if it differs
# from the last one found in the main log file.
last_abort_event = abort_report[-1]
if report and last_abort_event != report[-1]:
report.append(last_abort_event)
else:
report.append(last_abort_event)
return report
#except parser.Error as exc:
except Exception as exc:
# Return a report with an error entry with info on the exception.
logger.critical("{}: Exception while parsing ABINIT events:\n {}".format(ofile, str(exc)))
return parser.report_exception(ofile.path, exc)
def task_analysis(self, fw_spec):
"""
This function checks final status of the calculation, inspecting the output and error files.
Sets up the restart in case of convergence not achieved (both from abinit or from the convergence of
cofiguration parameters) or in case of errors fixable by a ErrorHandler.
If the job is completed calls conclude_task and prepares the FWAction.
Raises an AbinitRuntimeError if unfixable errors are encountered or if the number or restarts exceeds
the number defined in the policy.
"""
self.report = None
try:
self.report = self.get_event_report()
except Exception as exc:
msg = "%s exception while parsing event_report:\n%s" % (self, exc)
logger.critical(msg)
# If the calculation is ok, parse the outputs
if self.report is not None:
# the calculation finished without errors
if self.report.run_completed:
# Check if the calculation converged.
not_ok = self.report.filter_types(self.CRITICAL_EVENTS)
if not_ok:
self.history.log_unconverged()
local_restart, restart_fw, stored_data = self.prepare_restart(fw_spec)
num_restarts = self.restart_info.num_restarts if self.restart_info else 0
if num_restarts < self.ftm.fw_policy.max_restarts:
if local_restart:
return None
else:
stored_data['final_state'] = 'Unconverged'
return FWAction(detours=restart_fw, stored_data=stored_data)
else:
raise UnconvergedError(self, msg="Unconverged after {} restarts".format(num_restarts),
abiinput=self.abiinput, restart_info=self.restart_info,
history=self.history)
else:
# calculation converged
# check if there are custom parameters that should be converged
unconverged_params, reset_restart = self.check_parameters_convergence(fw_spec)
if unconverged_params:
self.history.log_converge_params(unconverged_params, self.abiinput)
self.abiinput.set_vars(**unconverged_params)
local_restart, restart_fw, stored_data = self.prepare_restart(fw_spec, reset=reset_restart)
num_restarts = self.restart_info.num_restarts if self.restart_info else 0
if num_restarts < self.ftm.fw_policy.max_restarts:
if local_restart:
return None
else:
stored_data['final_state'] = 'Unconverged_parameters'
return FWAction(detours=restart_fw, stored_data=stored_data)
else:
raise UnconvergedParametersError(self, abiinput=self.abiinput,
restart_info=self.restart_info, history=self.history)
else:
# everything is ok. conclude the task
# hook
update_spec, mod_spec, stored_data = self.conclude_task(fw_spec)
return FWAction(stored_data=stored_data, update_spec=update_spec, mod_spec=mod_spec)
# Abinit reported problems
# Check if the errors could be handled
if self.report.errors:
logger.debug('Found errors in report')
for error in self.report.errors:
logger.debug(str(error))
try:
self.abi_errors.append(error)
except AttributeError:
self.abi_errors = [error]
# ABINIT errors, try to handle them
fixed, reset = self.fix_abicritical(fw_spec)
if fixed:
local_restart, restart_fw, stored_data = self.prepare_restart(fw_spec, reset=reset)
if local_restart:
return None
else:
return FWAction(detours=restart_fw, stored_data=stored_data)
else:
msg = "Critical events couldn't be fixed by handlers. return code {}".format(self.returncode)
logger.error(msg)
raise AbinitRuntimeError(self, "Critical events couldn't be fixed by handlers")
# No errors from abinit. No fix could be applied at this stage.
# The FW will be fizzled.
# Try to save the stderr file for Fortran runtime errors.
#TODO check if some cases could be handled here
err_msg = None
if self.stderr_file.exists:
#print(self.stderr_file)
#TODO length should always be enough, but maybe it's worth cutting the message if it's too long
err_msg = self.stderr_file.read()
# It happened that the text file contained non utf-8 characters.
# sanitize the text to avoid problems during database insertion
# remove decode as incompatible with python 3
# err_msg.decode("utf-8", "ignore")
logger.error("return code {}".format(self.returncode))
raise AbinitRuntimeError(self, err_msg)
def check_parameters_convergence(self, fw_spec):
"""
Base method related to the iterative convergence of some configuration parameter.
Specific task should overwrite this method and implement appropriate checks and updates of the
specific parameters.
Args:
fw_spec: The spec
Returns:
(tuple): tuple containing:
- unconverged_params(dict): The uncoverged input variables that should be updated as keys and their
corresponding new values as values.
- reset (boolean): True if a reset is required in self.prepare_restart.
"""
return {}, False
def _get_init_args_and_vals(self):
"""
Inspection method to extract variables and values of the arguments of __init__ that should be stored in self.
"""
init_dict = {}
for arg in inspect.getfullargspec(self.__init__).args:
if arg != "self":
init_dict[arg] = self.__getattribute__(arg)
return init_dict
def _exclude_from_spec_in_restart(self):
"""
List of keys that should not be forwarded to the newly created firework in case of restart.
"""
return ['_tasks', '_exception_details']
def prepare_restart(self, fw_spec, reset=False):
"""
Determines the required information for the restart. It will be called at the end of a task which requires
a restart (both for an error or for the convergence of some configuration parameter).
Sets self.restart_info with an instance of RestartInfo.
Args:
fw_spec: the spec
reset: if True a reset will be set in the restart_info
Returns:
(tuple): tuple containing:
- local_restart (boolean): True if the restart should be in the same folder
- new_fw (Firework): The new firework that should be used for detour
- stored_data (dict): Dict to be saved in the "stored_data"
"""
if self.restart_info:
num_restarts = self.restart_info.num_restarts + 1
else:
num_restarts = 0
self.restart_info = RestartInfo(previous_dir=self.workdir, reset=reset, num_restarts=num_restarts)
# forward all the specs of the task
new_spec = {k: v for k, v in fw_spec.items() if k not in self._exclude_from_spec_in_restart()}
local_restart = False
# only restart if it is known that there is a reasonable amount of time left
if self.ftm.fw_policy.allow_local_restart and self.walltime and self.walltime/2 > (time.time() - self.start_time):
local_restart = True
# run here the autorun, otherwise it would need a separated FW
if self.ftm.fw_policy.autoparal:
# in case of restarting from the same folder the autoparal subfolder can already exist
# create a new one with increasing number
i = 0
while os.path.exists(os.path.join(self.workdir, "autoparal{}".format("_"+str(i) if i else ""))):
i += 1
autoparal_dir = os.path.join(self.workdir, "autoparal{}".format("_"+str(i) if i else ""))
optconf, qadapter_spec, qtk_qadapter = self.run_autoparal(self.abiinput, autoparal_dir, self.ftm)
self.history.log_autoparal(optconf)
self.abiinput.set_vars(optconf.vars)
# set quadapter specification.
new_spec['_queueadapter'] = qadapter_spec
new_spec['mpi_ncpus'] = optconf['mpi_ncpus']
# if autoparal enabled, the number of processors should match the current number to restart in place
local_restart = local_restart and qadapter_spec == fw_spec.get('_queueadapter', {})
# increase the index associated with the specific task in the workflow
if 'wf_task_index' in fw_spec:
split = fw_spec['wf_task_index'].split('_')
new_spec['wf_task_index'] = '{}_{:d}'.format('_'.join(split[:-1]), int(split[-1])+1)
# new task. Construct it from the actual values of the input parameters
restart_task = self.__class__(**self._get_init_args_and_vals())
if self.ftm.fw_policy.rerun_same_dir:
new_spec['_launch_dir'] = self.workdir
# create the new FW
new_fw = Firework([restart_task], spec=new_spec)
# At this point the event report should be present
stored_data = {}
stored_data['report'] = self.report.as_dict()
stored_data['finalized'] = False
stored_data['restarted'] = True
return local_restart, new_fw, stored_data
def fix_abicritical(self, fw_spec):
"""
method to fix crashes/error caused by abinit
Returns:
(tuple): tuple containing:
retcode (int): 1 if task has been fixed else 0.
reset (boolean): True if at least one of the corrections applied requires a reset
"""
if not self.handlers:
logger.info('Empty list of event handlers. Cannot fix abi_critical errors')
return 0
done = len(self.handlers) * [0]
corrections = []
for event in self.report:
for i, handler in enumerate(self.handlers):
if handler.can_handle(event) and not done[i]:
logger.info("handler {} will try to fix {}".format(handler, event))
try:
c = handler.handle_input_event(self.abiinput, self.outdir, event)
if c:
done[i] += 1
corrections.append(c)
except Exception as exc:
logger.critical(str(exc))
if corrections:
reset = any(c.reset for c in corrections)
self.history.log_corrections(corrections)
return 1, reset
logger.info('We encountered AbiCritical events that could not be fixed')
return 0, None
def setup_task(self, fw_spec):
"""
Sets up the requirements for the task:
- sets several attributes
- generates the input in case self.abiinput is a factory
- makes directories
- handles information in '_exception_details'
"""
self.start_time = time.time()
self.set_logger()
# load the FWTaskManager to get configuration parameters
self.ftm = self.get_fw_task_manager(fw_spec)
# set walltime, if possible
self.walltime = None
if self.ftm.fw_policy.walltime_command:
try:
p = subprocess.Popen(self.ftm.fw_policy.walltime_command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
status = p.returncode
if status == 0 and out:
self.walltime = int(out.decode("utf-8"))
else:
logger.warning("Impossible to get the walltime: " + err.decode("utf-8"))
except Exception as e:
logger.warning("Impossible to get the walltime: ", exc_info=True)
# read autoparal policy from config if not explicitly set
if self.is_autoparal is None:
self.is_autoparal = self.ftm.fw_policy.autoparal
initialization_info = fw_spec.get('initialization_info', {})
# if the input is a factory, dynamically create the abinit input. From now on the code will expect an
# AbinitInput and not a factory. In this case there should be either a single input coming from the previous
# fws or a deps specifying which input use
if isinstance(self.abiinput, InputFactory):
initialization_info['input_factory'] = self.abiinput
previous_input = None
if self.abiinput.input_required:
previous_fws = fw_spec.get('previous_fws', {})
# check if the input source is specified
task_type_source = None
if isinstance(self.deps, dict):
try:
task_type_source = [tt for tt, deps in self.deps.items() if '@input' in deps][0]
except IndexError:
pass
# if not there should be only one previous fw
if not task_type_source:
if len(previous_fws) != 1:
msg = 'The input source cannot be identified from depenencies {}. ' \
'required by factory {}.'.format(self.deps, self.abiinput.__class__)
logger.error(msg)
raise InitializationError(msg)
task_type_source = list(previous_fws.keys())[0]
# the task_type_source should contain just one task and contain the 'input' key
if len(previous_fws[task_type_source]) != 1 or not previous_fws[task_type_source][0].get('input', None):
msg = 'The factory {} requires the input from previous run in the spec'.format(self.abiinput.__class__)
logger.error(msg)
raise InitializationError(msg)
# a single input exists
previous_input = previous_fws[task_type_source][0]['input']
if not isinstance(previous_input, AbinitInput):
previous_input = AbinitInput.from_dict(previous_input)
initialization_info['previous_input'] = previous_input
self.abiinput = self.abiinput.build_input(previous_input)
initialization_info['initial_input'] = self.abiinput
# if it's the first run log the initialization of the task
if len(self.history) == 0:
self.history.log_initialization(self, initialization_info)
# update data from previous run if it is not a restart
if 'previous_fws' in fw_spec and not self.restart_info:
self.load_previous_fws_data(fw_spec)
self.set_workdir(workdir=os.getcwd())
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# initialize returncode to avoid missing references in case of exception in the other thread
self.returncode = None
# check if there is a rerun of the FW with info on the exception.
# if that's the case use the restart information stored there to continue the calculation
exception_details = fw_spec.get('_exception_details', None)
# assume that DECODE_MONTY=True so that a handeled exception has already been deserialized
if exception_details and isinstance(exception_details, AbinitRuntimeError):
error_code = exception_details.ERROR_CODE
if (self.ftm.fw_policy.continue_unconverged_on_rerun and error_code == ErrorCode.UNCONVERGED and
exception_details.abiinput and exception_details.restart_info and
exception_details.history):
self.abiinput = exception_details.abiinput
self.restart_info = exception_details.restart_info
self.history = exception_details.history
def run_task(self, fw_spec):
try:
self.setup_task(fw_spec)
if self.is_autoparal:
return self.autoparal(fw_spec)
else:
# loop to allow local restart
while True:
self.config_run(fw_spec)
# try to recover previous run
if not self.ftm.fw_policy.recover_previous_job or not os.path.isfile(self.output_file.path):
self.run_abinit(fw_spec)
action = self.task_analysis(fw_spec)
if action:
return action
except BaseException as exc:
# log the error in history and reraise
self.history.log_error(exc)
raise
finally:
# Always dump the history for automatic parsing of the folders
with open(HISTORY_JSON, "w") as f:
json.dump(self.history, f, cls=MontyEncoder, indent=4, sort_keys=4)
def restart(self):
"""
Restart method. Each subclass should implement its own restart. It is called at the beginning of a task
that is the restart of a previous one. The base class should be called for common restarting operations.
"""
pass
def conclude_task(self, fw_spec):
"""
Performs operations that should be handled at the end of the tasks in case of successful completion.
Subclasses can overwrite to add additional operations, but the returns should be the same and
it is suggested to call original method with super.
Args:
fw_spec: the spec
Returns:
(tuple): tuple containing:
update_spec (dict): dictionary that should be passed to update_spec
mod_spec (dict): dictionary that should be passed to mod_spec
stored_data (dict): dictionary that should be passed to stored_data
"""
stored_data = {}
stored_data['report'] = self.report.as_dict()
stored_data['finalized'] = True
self.history.log_finalized(self.abiinput)
stored_data['history'] = self.history.as_dict()
update_spec = {}
mod_spec = self.get_final_mod_spec(fw_spec)
return update_spec, mod_spec, stored_data
def current_task_info(self, fw_spec):
"""
A dict containing information that should be passed to subsequent tasks.
It should contain at least the current workdir and input. Subclasses can add specific additional information.
"""
return dict(dir=self.workdir, input=self.abiinput)
def autoparal(self, fw_spec):
"""
Runs the task in autoparal and creates the new Firework with the optimized configuration.
Args:
fw_spec: the spec
Returns:
The FWAction containing the detour Firework.
"""
# Copy the appropriate dependencies in the in dir. needed in some cases
self.resolve_deps(fw_spec)
optconf, qadapter_spec, qtk_qadapter = self.run_autoparal(self.abiinput, os.path.abspath('.'), self.ftm)
self.history.log_autoparal(optconf)
self.abiinput.set_vars(optconf.vars)
task = self.__class__(**self._get_init_args_and_vals())
task.is_autoparal = False
# forward all the specs of the task
new_spec = {k: v for k, v in fw_spec.items() if k != '_tasks'}
# set quadapter specification. Note that mpi_ncpus may be different from ntasks
new_spec['_queueadapter'] = qadapter_spec
new_spec['mpi_ncpus'] = optconf['mpi_ncpus']
if 'wf_task_index' in fw_spec:
split = fw_spec['wf_task_index'].split('_')
new_spec['wf_task_index'] = '{}_{:d}'.format('_'.join(split[:-1]), 1)
new_fw = Firework([task], new_spec)
return FWAction(detours=new_fw)
def resolve_deps_per_task_type(self, previous_tasks, deps_list):
"""
Method to link the required deps for the current FW for a specific task_type.
Sets the ird variables corresponding to the linked dependecies.
Args:
previous_tasks: list of previous tasks from which the dependencies should be linked
deps_list: list of dependencies that should be linked
"""
for previous_task in previous_tasks:
for d in deps_list:
if d.startswith('@structure'):
if 'structure' not in previous_task:
msg = "previous_fws does not contain the structure."
logger.error(msg)
raise InitializationError(msg)
self.abiinput.set_structure(previous_task['structure'])
#FIXME out.nc is not safe. Check if needed and move to other nc files in case.
# elif d.startswith('@outnc'):
# varname = d.split('.')[1]
# outnc_path = os.path.join(previous_task['dir'], self.prefix.odata + "_OUT.nc")
# outnc_file = OutNcFile(outnc_path)
# vars = {varname: outnc_file[varname]}
# self.abiinput.set_vars(vars)
elif not d.startswith('@'):
source_dir = previous_task['dir']
self.abiinput.set_vars(irdvars_for_ext(d))
if d == "DDK":
self.link_ddk(source_dir)
elif d == "1WF" or d == "1DEN":
self.link_1ext(d, source_dir)
else:
self.link_ext(d, source_dir)
def resolve_deps(self, fw_spec):
"""
Method to link the required deps for the current FW.
Note that different cases are handled here depending whether the current FW is a restart or not and whether
the rerun is performed in the same folder or not.
In case of restart the safest choice is to link the deps of the previous FW, so that if they have been
updated in the meanwhile we are taking the correct one.
TODO: this last case sounds quite unlikely and should be tested
"""
# If no deps, nothing to do here
if not self.deps:
return
if not self.restart_info:
# If this is the first run of the task, the informations are taken from the 'previous_fws',
# that should be present.
previous_fws = fw_spec.get('previous_fws', None)
if previous_fws is None:
msg = "No previous_fws data. Needed for dependecies {}.".format(str(self.deps))
logger.error(msg)
raise InitializationError(msg)
if isinstance(self.deps, (list, tuple)):
# check that there is only one previous_fws
if len(previous_fws) != 1 or len(previous_fws.values()[0]) != 1:
msg = "previous_fws does not contain a single reference. " \
"Specify the dependency for {}.".format(str(self.deps))
logger.error(msg)
raise InitializationError(msg)
self.resolve_deps_per_task_type(previous_fws.values()[0], self.deps)
else:
# deps should be a dict
for task_type, deps_list in self.deps.items():
if task_type not in previous_fws:
msg = "No previous_fws data for task type {}.".format(task_type)
logger.error(msg)
raise InitializationError(msg)
if len(previous_fws[task_type]) < 1:
msg = "Previous_fws does not contain any reference for task type {}, " \
"needed in reference {}. ".format(task_type, str(self.deps))
logger.error(msg)
raise InitializationError(msg)
elif len(previous_fws[task_type]) > 1:
msg = "Previous_fws contains more than a single reference for task type {}, " \
"needed in reference {}. Risk of overwriting.".format(task_type, str(self.deps))
logger.warning(msg)
self.resolve_deps_per_task_type(previous_fws[task_type], deps_list)
else:
# If it is a restart, link the one from the previous task.
# If it's in the same dir, it is assumed that the dependencies have been correctly resolved in the previous
# run. So do nothing
if self.restart_info.previous_dir == self.workdir:
logger.info('rerunning in the same dir, no action on the deps')
return
#just link everything from the indata folder of the previous run. files needed for restart will be overwritten
prev_indata = os.path.join(self.restart_info.previous_dir, INDIR_NAME)
for f in os.listdir(prev_indata):
# if the target is already a link, link to the source to avoid many nested levels of linking
source = os.path.join(prev_indata, f)
if os.path.islink(source):
source = os.readlink(source)
os.symlink(source, os.path.join(self.workdir, INDIR_NAME, f))
def load_previous_fws_data(self, fw_spec):
"""
Called if a previous_fws key is in spec and the job is not a restart. Allows to load information from previous
tasks if needed. Subclasses can overwrite to handle specific cases.
Args:
fw_spec: The spec
"""
pass
def out_to_in(self, out_file):
"""
links or copies, according to the fw_policy, the output file to the input data directory of this task
and rename the file so that ABINIT will read it as an input data file.
Returns:
The absolute path of the new file in the indata directory.
"""
in_file = os.path.basename(out_file).replace("out", "in", 1)
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
# if rerunning in the same folder the file should be moved anyway
if self.ftm.fw_policy.copy_deps or self.workdir == self.restart_info.previous_dir:
shutil.copyfile(out_file, dest)
else:
# if dest already exists should be overwritten. see also resolve_deps and config_run
try:
os.symlink(out_file, dest)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(dest)
os.symlink(out_file, dest)
else:
raise e
return dest
def in_to_in(self, in_file):
"""
Copies the input file to the input of a previous task to the data directory of this task
Returns:
The absolute path of the new file in the indata directory.
"""
dest = os.path.join(self.indir.path, os.path.basename(in_file))
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, in_file))
# os.rename(out_file, dest)
shutil.copy(in_file, dest)
return dest
def remove_restart_vars(self, exts):
"""
Removes from the current input the ird values associated with the extension.
Useful in case of reset during a restart.
"""
if not isinstance(exts, (list, tuple)):
exts = [exts]
remove_vars = [v for e in exts for v in irdvars_for_ext(e).keys()]
self.abiinput.remove_vars(remove_vars, strict=False)
logger.info("Removing variables {} from input".format(remove_vars))
##############################
# Specific tasks
##############################
class GsFWTask(AbiFireTask):
"""
Base Task to handle Ground state calculation.
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: GsFWTask
"""
@property
def gsr_path(self):
"""Absolute path of the GSR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._gsr_path
except AttributeError:
path = self.outdir.has_abiext("GSR")
if path: self._gsr_path = path
return path
def open_gsr(self):
"""
Open the GSR.nc_ file located in the in self.outdir.
Returns |GsrFile| object, raise a PostProcessError exception if file could not be found or file is not readable.
"""
gsr_path = self.gsr_path
if not gsr_path:
msg = "No GSR file available for task {} in {}".format(self, self.outdir)
logger.critical(msg)
raise PostProcessError(msg)
# Open the GSR file.
try:
return GsrFile(gsr_path)
except Exception as exc:
msg = "Exception while reading GSR file at %s:\n%s" % (gsr_path, str(exc))
logger.critical(msg)
raise PostProcessError(msg)
@explicit_serialize
class ScfFWTask(GsFWTask):
"""
Task to handle SCF calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: ScfFWTask
"""
task_type = "scf"
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
def restart(self):
"""SCF calculations can be restarted if we have either the WFK file or the DEN file."""
# Prefer WFK over DEN files since we can reuse the wavefunctions.
if self.restart_info.reset:
# remove non reset keys that may have been added in a previous restart
self.remove_restart_vars(["WFK", "DEN"])
else:
for ext in ("WFK", "DEN"):
restart_file = self.restart_info.prev_outdir.has_abiext(ext)
irdvars = irdvars_for_ext(ext)
if restart_file: break
else:
msg = "Cannot find WFK or DEN file to restart from."
logger.error(msg)
raise RestartError(msg)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.abiinput.set_vars(irdvars)
@explicit_serialize
class NscfFWTask(GsFWTask):
"""
Task to handle non SCF calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: NscfFWTask
"""
task_type = "nscf"
CRITICAL_EVENTS = [
events.NscfConvergenceWarning,
]
def restart(self):
"""NSCF calculations can be restarted only if we have the WFK file."""
if self.restart_info.reset:
# remove non reset keys that may have been added in a previous restart
self.remove_restart_vars(["WFK"])
else:
ext = "WFK"
restart_file = self.restart_info.prev_outdir.has_abiext(ext)
if not restart_file:
msg = "Cannot find the WFK file to restart from."
logger.error(msg)
raise RestartError(msg)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.abiinput.set_vars(irdvars)
@explicit_serialize
class NscfWfqFWTask(NscfFWTask):
"""
Task to handle non SCF calculations for the calculations of the WFQ.
Differs from :class:`NscfFWTask` for the different restart requirements.
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: NscfWfqFWTask
"""
task_type = "nscf_wfq"
def restart(self):
"""
NSCF calculations can be restarted only if we have the WFK file.
Wfq calculations require a WFK file for restart. The produced out_WFQ
needs to be linked as a in_WFK with appropriate irdwfk=1.
"""
if self.restart_info.reset:
# remove non reset keys that may have been added in a previous restart
self.remove_restart_vars(["WFQ", "WFK"])
else:
ext = "WFQ"
restart_file = self.restart_info.prev_outdir.has_abiext(ext)
if not restart_file:
msg = "Cannot find the WFK file to restart from."
logger.error(msg)
raise RestartError(msg)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext("WFK")
self.abiinput.set_vars(irdvars)
def out_to_in(self, out_file):
"""
links or copies, according to the fw_policy, the output file to the input data directory of this task
and rename the file so that ABINIT will read it as an input data file.
In the case of Wfq calculations out_WFQ needs to be linked as a in_WFK
Returns:
The absolute path of the new file in the indata directory.
"""
in_file = os.path.basename(out_file).replace("out", "in", 1)
in_file = os.path.basename(in_file).replace("WFQ", "WFK", 1)
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
# if rerunning in the same folder the file should be moved anyway
if self.ftm.fw_policy.copy_deps or self.workdir == self.restart_info.previous_dir:
shutil.copyfile(out_file, dest)
else:
# if dest already exists should be overwritten. see also resolve_deps and config_run
try:
os.symlink(out_file, dest)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(dest)
os.symlink(out_file, dest)
else:
raise e
return dest
@explicit_serialize
class RelaxFWTask(GsFWTask):
"""
Task to handle relax calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: RelaxFWTask
"""
task_type = "relax"
CRITICAL_EVENTS = [
events.RelaxConvergenceWarning,
]
def get_final_structure(self):
"""Read the final structure from the GSR.nc_ file."""
try:
with self.open_gsr() as gsr:
return gsr.structure
except AttributeError:
msg = "Cannot find the GSR file with the final structure to restart from."
logger.error(msg)
raise PostProcessError(msg)
def prepare_restart(self, fw_spec, reset=False):
"""
Sets the final structure in abiinput, so that the relax can continue in the detour and calls the baseclass
method.
"""
self.abiinput.set_structure(self.get_final_structure())
return super().prepare_restart(fw_spec, reset)
def restart(self):
"""
Restart the structural relaxation.
See original RelaxTask for more details
"""
if self.restart_info.reset:
# remove non reset keys that may have been added in a previous restart
self.remove_restart_vars(["WFK", "DEN"])
else:
# for optcell > 0 it may fail to restart if paral_kgb == 0. Do not use DEN or WFK in this case
#FIXME fix when Matteo makes the restart possible for paral_kgb == 0
paral_kgb = self.abiinput.get('paral_kgb', 0)
optcell = self.abiinput.get('optcell', 0)
if optcell == 0 or paral_kgb == 1:
restart_file = None
# Try to restart from the WFK file if possible.
# FIXME: This part has been disabled because WFK=IO is a mess if paral_kgb == 1
# This is also the reason why I wrote my own MPI-IO code for the GW part!
wfk_file = self.restart_info.prev_outdir.has_abiext("WFK")
if False and wfk_file:
irdvars = irdvars_for_ext("WFK")
restart_file = self.out_to_in(wfk_file)
# Fallback to DEN file. Note that here we look for out_DEN instead of out_TIM?_DEN
# This happens when the previous run completed and task.on_done has been performed.
# ********************************************************************************
# Note that it's possible to have an undected error if we have multiple restarts
# and the last relax died badly. In this case indeed out_DEN is the file produced
# by the last run that has executed on_done.
# ********************************************************************************
if restart_file is None:
out_den = self.restart_info.prev_outdir.path_in("out_DEN")
if os.path.exists(out_den):
irdvars = irdvars_for_ext("DEN")
restart_file = self.out_to_in(out_den)
if restart_file is None:
# Try to restart from the last TIM?_DEN file.
# This should happen if the previous run didn't complete in clean way.
# Find the last TIM?_DEN file.
last_timden = self.restart_info.prev_outdir.find_last_timden_file()
if last_timden is not None:
if last_timden.path.endswith(".nc"):
in_file_name = ("in_DEN.nc")
else:
in_file_name = ("in_DEN")
restart_file = self.out_to_in_tim(last_timden.path, in_file_name)
irdvars = irdvars_for_ext("DEN")
if restart_file is None:
# Don't raise RestartError as the structure has been updated
logger.warning("Cannot find the WFK|DEN|TIM?_DEN file to restart from.")
else:
# Add the appropriate variable for restarting.
self.abiinput.set_vars(irdvars)
logger.info("Will restart from %s", restart_file)
def current_task_info(self, fw_spec):
"""
Add the final structure to the basic current_task_info
"""
d = super().current_task_info(fw_spec)
d['structure'] = self.get_final_structure()
return d
# def conclude_task(self, fw_spec):
# update_spec, mod_spec, stored_data = super().conclude_task(fw_spec)
# update_spec['previous_run']['structure'] = self.get_final_structure()
# return update_spec, mod_spec, stored_data
@property
def hist_nc_path(self):
"""Absolute path of the HIST.nc_ file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_nc_path
except AttributeError:
path = self.outdir.has_abiext("HIST")
if path: self._hist_nc_path = path
return path
def out_to_in_tim(self, out_file, in_file):
"""
links or copies, according to the fw_policy, the output file to the input data directory of this task
and rename the file so that ABINIT will read it as an input data file. for the TIM file the input needs to
be specified as depends on the specific iteration.
Returns:
The absolute path of the new file in the indata directory.
"""
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
# if rerunning in the same folder the file should be moved anyway
if self.ftm.fw_policy.copy_deps or self.workdir == self.restart_info.previous_dir:
shutil.copyfile(out_file, dest)
else:
# if dest already exists should be overwritten. see also resolve_deps and config_run
try:
os.symlink(out_file, dest)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(dest)
os.symlink(out_file, dest)
else:
raise e
return dest
@explicit_serialize
class HybridFWTask(GsFWTask):
"""
Task to handle hybrid functional calculations based on GW.
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: HybridFWTask
"""
task_type = "hybrid"
CRITICAL_EVENTS = [
]
@property
def sigres_path(self):
"""Absolute path of the SIGRES.nc file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path
def open_sigres(self):
"""
Open the SIGRES.nc_ file located in the in self.outdir.
Returns |SigresFile| object, None if file could not be found or file is not readable.
"""
sigres_path = self.sigres_path
if not sigres_path:
msg = "%s didn't produce a SIGRES file in %s" % (self, self.outdir)
logger.critical(msg)
raise PostProcessError(msg)
# Open the SIGRES file and add its data to results.out
try:
return SigresFile(sigres_path)
except Exception as exc:
msg = "Exception while reading SIGRES file at %s:\n%s" % (sigres_path, str(exc))
logger.critical(msg)
raise PostProcessError(msg)
@explicit_serialize
class DfptTask(AbiFireTask):
"""
Base Task to handle DFPT calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: DfptTask
"""
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
task_type = "dfpt"
def restart(self):
"""
Phonon calculations can be restarted only if we have the 1WF file or the 1DEN file.
from which we can read the first-order wavefunctions or the first order density.
Prefer 1WF over 1DEN since we can reuse the wavefunctions.
Try to handle an input with many perturbation calculated at the same time. link/copy all the 1WF or 1DEN files
"""
# Abinit adds the idir-ipert index at the end of the file and this breaks the extension
# e.g. out_1WF4, out_DEN4. find_1wf_files and find_1den_files returns the list of files found
#TODO check for reset
restart_files, irdvars = None, None
# Highest priority to the 1WF file because restart is more efficient.
wf_files = self.restart_info.prev_outdir.find_1wf_files()
if wf_files is not None:
restart_files = [f.path for f in wf_files]
irdvars = irdvars_for_ext("1WF")
# if len(wf_files) != 1:
# restart_files = None
# logger.critical("Found more than one 1WF file. Restart is ambiguous!")
if restart_files is None:
den_files = self.restart_info.prev_outdir.find_1den_files()
if den_files is not None:
restart_files = [f.path for f in den_files]
irdvars = {"ird1den": 1}
# if len(den_files) != 1:
# restart_files = None
# logger.critical("Found more than one 1DEN file. Restart is ambiguous!")
if not restart_files:
# Raise because otherwise restart is equivalent to a run from scratch --> infinite loop!
msg = "Cannot find the 1WF|1DEN file to restart from."
logger.error(msg)
raise RestartError(msg)
# Move file.
for f in restart_files:
self.out_to_in(f)
# Add the appropriate variable for restarting.
self.abiinput.set_vars(irdvars)
@explicit_serialize
class DdkTask(DfptTask):
"""
Task to handle DDK calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: DdkTask
"""
task_type = "ddk"
def conclude_task(self, fw_spec):
"""
Extends the base class method in order to create a _DDK file from the 1WF.
"""
# make a link to _DDK of the 1WF file to ease the link in the dependencies
wf_files = self.outdir.find_1wf_files()
if not wf_files:
raise PostProcessError("Couldn't link 1WF files.")
for f in wf_files:
os.symlink(f.path, f.path+'_DDK')
return super().conclude_task(fw_spec)
@explicit_serialize
class DdeTask(DfptTask):
"""
Task to handle DDE calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: DdeTask
"""
task_type = "dde"
@explicit_serialize
class PhononTask(DfptTask):
"""
Task to handle phonon calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: PhononTask
"""
task_type = "phonon"
@explicit_serialize
class BecTask(DfptTask):
"""
Task to handle BEC calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: BecTask
"""
task_type = "bec"
@explicit_serialize
class StrainPertTask(DfptTask):
"""
Task to handle strain calculations
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: StrainPertTask
"""
task_type = "strain_pert"
@explicit_serialize
class DteTask(DfptTask):
"""
Task to handle the third derivatives with respect to the electric field.
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: DteTask
"""
CRITICAL_EVENTS = []
task_type = "dte"
# non-linear does not support autoparal. Take the suggested number of processors.
def run_autoparal(self, abiinput, autoparal_dir, ftm, clean_up='move'):
"""
Non-linear does not support autoparal, so this will provide a fake run of the autoparal.
"""
return self.run_fake_autoparal(ftm)
##############################
# Convergence tasks
##############################
@explicit_serialize
class RelaxDilatmxFWTask(RelaxFWTask):
"""
Task to handle relax calculations with iterative convergence of the dilatmx
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: DteTask
"""
def __init__(self, abiinput, restart_info=None, handlers=None, is_autoparal=None, deps=None, history=None,
target_dilatmx=1.01):
if handlers is None:
handlers = []
if history is None:
history = []
self.target_dilatmx = target_dilatmx
super().__init__(abiinput=abiinput, restart_info=restart_info, handlers=handlers,
is_autoparal=is_autoparal, deps=deps, history=history)
def check_parameters_convergence(self, fw_spec):
"""
Checks if the target value for the dilatmx has been reached. If not reduces the values for the dilatmx and
signals that a restart is needed.
Args:
fw_spec: the spec of the Firework
"""
actual_dilatmx = self.abiinput.get('dilatmx', 1.)
new_dilatmx = actual_dilatmx - min((actual_dilatmx-self.target_dilatmx), actual_dilatmx*0.03)
#FIXME reset can be False with paral_kgb==1
return {'dilatmx': new_dilatmx} if new_dilatmx != actual_dilatmx else {}, True
##############################
# Wrapper tasks
##############################
@explicit_serialize
class MergeDdbAbinitTask(BasicAbinitTaskMixin, FireTaskBase):
"""
Task to handle the merge of multiple DDB files with mrgddb
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: MergeDdbAbinitTask
"""
task_type = "mrgddb"
def __init__(self, ddb_source_task_types=None, delete_source_ddbs=True, num_ddbs=None, task_type=None):
"""
ddb_source_task_type: list of task types that will be used as source for the DDB to be merged.
The default is [PhononTask.task_type, DdeTask.task_type, BecTask.task_type]
delete_ddbs: delete the ddb files used after the merge
num_ddbs: number of ddbs to be merged. If set will be used to check that the correct number of ddbs have been
passed to the task. Tha task will fizzle if the numbers do not match
"""
if ddb_source_task_types is None:
ddb_source_task_types = [ScfFWTask.task_type, PhononTask.task_type, DdeTask.task_type, BecTask.task_type,
DteTask.task_type, StrainPertTask.task_type]
elif not isinstance(ddb_source_task_types, (list, tuple)):
ddb_source_task_types = [ddb_source_task_types]
self.ddb_source_task_types = ddb_source_task_types
self.delete_source_ddbs = delete_source_ddbs
self.num_ddbs = num_ddbs
if task_type is not None:
self.task_type = task_type
def get_ddb_list(self, previous_fws, task_type):
"""
Given a task_type that can produce DDB files and whole list of previous fireworks available here,
gets the list of DDB files that should be merged.
Args:
previous_fws: list of previous fireworks
task_type: string describing the task type
Returns:
The list of DDB files that should be linked
"""
ddb_files = []
#Check that the same directory is not passed more than once (in principle it should not be needed.
# kept from previous version to avoid regressions)
mydirs = []
for t in previous_fws.get(task_type, []):
if t['dir'] in mydirs:
continue
# filepaths = Directory(os.path.join(t['dir'], OUTDIR_NAME)).list_filepaths()
# a DDB.nc is usually produced along with the text DDB file. has_abiext handles the extraction of
# the text one, ignoring the netCDF. If has_abiext is changed the problem should be handled here.
# ddb = self.get_ddb_from_filepaths(filepaths=filepaths)
ddb = Directory(os.path.join(t['dir'], OUTDIR_NAME)).has_abiext('DDB')
if not ddb:
msg = "One of the task of type {} (folder: {}) " \
"did not produce a DDB file!".format(task_type, t['dir'])
raise InitializationError(msg)
mydirs.append(t['dir'])
ddb_files.append(ddb)
return ddb_files
# def get_ddb_from_filepaths(self, filepaths):
# #TODO: temporary fix due to new DDB.nc in addition to DDB ... then has_abiext finds multiple multiple files ...
# ext = '_DDB'
#
# files = []
# for f in filepaths:
# if f.endswith(ext):
# files.append(f)
#
# if not files:
# return None
#
# if len(files) > 1:
# # ABINIT users must learn that multiple datasets are bad!
# err_msg = "Found multiple files with the same extensions:\n %s\nPlease avoid the use of mutiple datasets!" % files
# raise ValueError(err_msg)
#
# return files[0]
def get_event_report(self, ofile_name="mrgddb.stdout"):
"""
Analyzes the main output file for possible Errors or Warnings.
Args:
ofile_name: Name of the outpu file.
Returns:
:class:`EventReport` instance or None if the output file does not exist.
"""
ofile = File(os.path.join(self.workdir, ofile_name))
parser = events.EventsParser()
if not ofile.exists:
return None
else:
try:
report = parser.parse(ofile.path)
return report
except Exception as exc:
# Return a report with an error entry with info on the exception.
logger.critical("{}: Exception while parsing MRGDDB events:\n {}".format(ofile, str(exc)))
return parser.report_exception(ofile.path, exc)
def set_workdir(self, workdir):
"""
Sets up the working directory: adds attributes for all the files and directories.
"""
self.workdir = workdir
self.outdir = Directory(os.path.join(self.workdir, OUTDIR_NAME))
def run_task(self, fw_spec):
self.set_workdir(workdir=os.getcwd())
self.outdir.makedirs()
self.history = TaskHistory()
try:
ftm = self.get_fw_task_manager(fw_spec)
if not ftm.has_task_manager():
raise InitializationError("No task manager available: mrgddb could not be performed.")
mrgddb = Mrgddb(manager=ftm.task_manager, executable=ftm.fw_policy.mrgddb_cmd, verbose=0)
previous_fws = fw_spec['previous_fws']
ddb_files = []
for source_task_type in self.ddb_source_task_types:
ddb_files.extend(self.get_ddb_list(previous_fws, source_task_type))
initialization_info = fw_spec.get('initialization_info', {})
initialization_info['ddb_files_list'] = ddb_files
self.history.log_initialization(self, initialization_info)
if not ddb_files:
raise InitializationError("No DDB files to merge.")
if self.num_ddbs is not None and self.num_ddbs != len(ddb_files):
raise InitializationError("Wrong number of DDB files: {} DDB files have been requested, "
"but {} have been linked".format(self.num_ddbs, len(ddb_files)))
# keep the output in the outdata dir for consistency
out_ddb = os.path.join(self.workdir, OUTDIR_NAME, "out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
out_ddb = mrgddb.merge(self.workdir, ddb_files, out_ddb=out_ddb, description=desc,
delete_source_ddbs=self.delete_source_ddbs)
# Temporary fix ... mrgddb doesnt seem to work when I merge the GS DDB file with the Strain DDB file
# because the info on the pseudopotentials is not in the GS DDB file ...
# www.welcome2quickanddirty.com (DavidWaroquiers)
if 'PAW_datasets_description_correction' in fw_spec:
if len(ddb_files) != 2:
raise ValueError('Fix is temporary and only for a number of DDBs equal to 2')
fname_with_psp = None
psp_lines = []
for fname in ddb_files:
in_psp_info = False
with open(fname, 'r') as fh:
dd = fh.readlines()
for iline, line in enumerate(dd):
if 'No information on the potentials yet' in line:
break
if 'Description of the PAW dataset(s)' in line:
in_psp_info = True
fname_with_psp = fname
if in_psp_info:
if '**** Database of total energy derivatives ****' in line:
break
psp_lines.append(line)
if fname_with_psp:
break
if not fname_with_psp:
raise ValueError('Should have at least one DDB with the psp info ...')
out_ddb_backup = '{}.backup'.format(out_ddb)
shutil.move(out_ddb, out_ddb_backup)
fw = open(out_ddb, 'w')
with open(out_ddb_backup, 'r') as fh:
dd = fh.readlines()
just_copy = True
for line in dd:
if 'Description of the PAW dataset(s)' in line:
just_copy = False
for pspline in psp_lines:
fw.write(pspline)
if just_copy:
fw.write(line)
continue
if '**** Database of total energy derivatives ****' in line:
just_copy = True
fw.write(line)
continue
fw.close()
self.report = self.get_event_report()
if not os.path.isfile(out_ddb) or (self.report and self.report.errors):
raise AbinitRuntimeError(self, msg="Error during mrgddb.")
stored_data = dict(finalized=True)
mod_spec = self.get_final_mod_spec(fw_spec)
self.history.log_finalized()
return FWAction(stored_data=stored_data, mod_spec=mod_spec)
except BaseException as exc:
# log the error in history and reraise
self.history.log_error(exc)
raise
finally:
with open(HISTORY_JSON, "w") as f:
json.dump(self.history, f, cls=MontyEncoder, indent=4, sort_keys=4)
def current_task_info(self, fw_spec):
"""
A dict containing information that should be passed to subsequent tasks.
In this case it contains the current workdir.
"""
return dict(dir=self.workdir)
@property
def merged_ddb_path(self):
"""Absolute path of the merged DDB file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._merged_ddb_path
except AttributeError:
path = self.outdir.has_abiext("DDB")
if path: self._merged_ddb_path = path
return path
@explicit_serialize
class AnaDdbAbinitTask(BasicAbinitTaskMixin, FireTaskBase):
"""
Task that handles the run of anaddb based on a custom input
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: AnaDdbAbinitTask
"""
task_type = "anaddb"
def __init__(self, anaddb_input, restart_info=None, handlers=None, is_autoparal=None, deps=None, history=None,
task_type=None):
"""
Args:
anaddb_input: |AnaddbInput| object. Defines the input used in the run
restart_info: an instance of RestartInfo. This should be present in case the current task is a restart.
Contains information useful to proceed with the restart.
handlers: list of ErrorHandlers that should be used in case of error. If None all the error handlers
available from abipy will be used.
is_autoparal: whether the current task is just an autoparal job or not.
deps: the required file dependencies from previous tasks (e.g. DEN, WFK, ...). Can be a single string,
a list or a dict of the form {task_type: list of dependecies}. The dependencies will be retrieved
from the 'previous_tasks' key in spec.
history: a TaskHistory or a list of items that will be stored in a TaskHistory instance.
task_type: a string that, if not None, overrides the task_type defined in the class.
"""
if handlers is None:
handlers = []
if history is None:
history = []
self.anaddb_input = anaddb_input
self.restart_info = restart_info
self.handlers = handlers or [cls() for cls in events.get_event_handler_classes()]
self.is_autoparal = is_autoparal
#TODO: rationalize this and check whether this might create problems due to the fact that if task_type is None,
# self.task_type is the class variable (actually self.task_type refers to self.__class__.task_type) while
# if task_type is specified, self.task_type is an instance variable and is potentially different from
# self.__class__.task_type !
if task_type is not None:
self.task_type = task_type
# deps are transformed to be a list or a dict of lists
if isinstance(deps, dict):
deps = dict(deps)
for k, v in deps.items():
if not isinstance(v, (list, tuple)):
deps[k] = [v]
elif deps and not isinstance(deps, (list, tuple)):
deps = [deps]
self.deps = deps
self.history = TaskHistory(history)
@property
def ec_path(self):
"""Absolute path of the GSR.nc_ file. Empty string if file is not present."""
path = self.rundir.has_abiext("EC")
if path: self._ec_path = path
return path
def get_elastic_tensor(self, tensor_type='relaxed_ion'):
"""
Open the EC file located in the in self.workdir.
Returns :class:`ElasticConstant` object, None if file could not be found or file is not readable.
"""
ec_path = self.ec_path
if not ec_path:
msg = "{} reached the conclusion but didn't produce a EC file in {}".format(self, self.workdir)
logger.critical(msg)
raise PostProcessError(msg)
ec = ElasticComplianceTensor.from_ec_nc_file(ec_path, tensor_type=tensor_type)
return ec
def resolve_deps_per_task_type(self, previous_tasks, deps_list):
"""
Method to link the required deps for the current FW for a specific task_type.
Args:
previous_tasks: list of previous tasks from which the dependencies should be linked
deps_list: list of dependencies that should be linked
"""
ddb_dirs = []
for previous_task in previous_tasks:
for d in deps_list:
source_dir = previous_task['dir']
if d == "DDB":
# Check that the same directory is not passed more than once (in principle it should not be needed.
# kept from previous version to avoid regressions)
if source_dir in ddb_dirs:
continue
ddb_dirs.append(source_dir)
self.ddb_filepath = self.link_ext(d, source_dir)
elif d == "GKK":
self.gkk_filepath = self.link_ext(d, source_dir)
elif d == "DDK":
self.ddk_filepaths.extend(self.link_ddk(source_dir))
else:
logger.warning("Extensions {} is not used in anaddb and will be ignored".format(d))
continue
def resolve_deps(self, fw_spec):
"""
Method to link the required deps for the current FW.
"""
#FIXME extract common method with AbinitTask
previous_fws = fw_spec.get('previous_fws', None)
if previous_fws is None:
msg = "No previous_fws data. Needed for dependecies {}.".format(str(self.deps))
logger.error(msg)
raise InitializationError(msg)
if isinstance(self.deps, (list, tuple)):
# check that there is only one previous_fws
if len(previous_fws) != 1 or len(previous_fws.values()[0]) != 1:
msg = "previous_fws does not contain a single reference. " \
"Specify the dependency for {}.".format(str(self.deps))
logger.error(msg)
raise InitializationError(msg)
self.resolve_deps_per_task_type(previous_fws.values()[0], self.deps)
else:
# deps should be a dict
for task_type, deps_list in self.deps.items():
if task_type not in previous_fws:
msg = "No previous_fws data for task type {}.".format(task_type)
logger.error(msg)
raise InitializationError(msg)
if len(previous_fws[task_type]) < 1:
msg = "Previous_fws does not contain any reference for task type {}, " \
"needed in reference {}. ".format(task_type, str(self.deps))
logger.error(msg)
raise InitializationError(msg)
elif len(previous_fws[task_type]) > 1:
msg = "Previous_fws contains more than a single reference for task type {}, " \
"needed in reference {}. Risk of overwriting.".format(task_type, str(self.deps))
logger.warning(msg)
self.resolve_deps_per_task_type(previous_fws[task_type], deps_list)
def run_anaddb(self, fw_spec):
"""
executes anaddb and waits for the end of the process.
TODO: make it general in the same way as "run_abinit"
the mpirun command is retrived from the mpirun_cmd keys in the fw_polity
of the FWTaskManager, that can be overridden by the values in the spec.
Note that in case of missing definition of this parameter, the values fall back to the default
value of mpirun_cmd: 'mpirun', assuming that it is properly retrived
from the $PATH. By default, anaddb is retrieved from the PATH.
"""
def anaddb_process():
command = []
#consider the case of serial execution
if self.ftm.fw_policy.mpirun_cmd:
command.extend(self.ftm.fw_policy.mpirun_cmd.split())
if 'mpi_ncpus' in fw_spec:
command.extend(['-n', str(fw_spec['mpi_ncpus'])])
command.append(self.ftm.fw_policy.anaddb_cmd)
with open(self.files_file.path, 'r') as stdin, open(self.log_file.path, 'w') as stdout, \
open(self.stderr_file.path, 'w') as stderr:
self.process = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr)
(stdoutdata, stderrdata) = self.process.communicate()
self.returncode = self.process.returncode
# initialize returncode to avoid missing references in case of exception in the other thread
self.returncode = None
thread = threading.Thread(target=anaddb_process)
# the amount of time left plus a buffer of 2 minutes
timeout = (self.walltime - (time.time() - self.start_time) - 120) if self.walltime else None
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
raise WalltimeError("The task couldn't be terminated within the time limit. Killed.")
def setup_task(self, fw_spec):
"""
Sets up the requirements for the task:
- sets several attributes
- makes directories
- writes input files
"""
self.start_time = time.time()
self.set_logger()
# load the FWTaskManager to get configuration parameters
self.ftm = self.get_fw_task_manager(fw_spec)
# set walltime, if possible
self.walltime = None
if self.ftm.fw_policy.walltime_command:
try:
p = subprocess.Popen(self.ftm.fw_policy.walltime_command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
status = p.returncode
if status == 0:
self.walltime = int(out)
else:
logger.warning("Impossible to get the walltime: " + err)
except Exception as e:
logger.warning("Impossible to get the walltime: ", exc_info=True)
# setting working directory and files
self.set_workdir(os.getcwd())
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
self.ddb_filepath = None
self.gkk_filepath = None
self.ddk_filepaths = []
self.resolve_deps(fw_spec)
# the DDB file is needed. If not set as a dependency, look for it in all the possible sources
#FIXME check if we can remove this case and just rely on deps
if not self.ddb_filepath:
previous_fws = fw_spec['previous_fws']
for task_class in DfptTask.__subclasses__() + [DfptTask]:
task_type = task_class.task_type
ddb_list = []
for previous_task in previous_fws.get(task_type, []):
ddb_list.append(self.link_ext("DDB", previous_task['dir'], strict=False))
if len(ddb_list) != 1:
raise InitializationError("Cannot find a single DDB to run...")
self.ddb_filepath = ddb_list[0]
if self.ddk_filepaths:
self.ddk_files_file.write("\n".join(self.ddk_filepaths))
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(str(self.anaddb_input))
def run_task(self, fw_spec):
try:
self.setup_task(fw_spec)
self.run_anaddb(fw_spec)
action = self.task_analysis(fw_spec)
except BaseException as exc:
# log the error in history and reraise
self.history.log_error(exc)
raise
finally:
# Always dump the history for automatic parsing of the folders
with open(HISTORY_JSON, "w") as f:
json.dump(self.history, f, cls=MontyEncoder, indent=4, sort_keys=4)
def task_analysis(self, fw_spec):
if self.returncode != 0:
raise RuntimeError("Return code different from 0: {}".format(self.returncode))
return FWAction()
def set_workdir(self, workdir):
"""Set the working directory."""
self.workdir = os.path.abspath(workdir)
# Files required for the execution.
self.input_file = File(os.path.join(self.workdir, INPUT_FILE_NAME))
self.output_file = File(os.path.join(self.workdir, OUTPUT_FILE_NAME))
self.files_file = File(os.path.join(self.workdir, FILES_FILE_NAME))
self.log_file = File(os.path.join(self.workdir, LOG_FILE_NAME))
self.stderr_file = File(os.path.join(self.workdir, STDERR_FILE_NAME))
self.elphon_out_file = File(os.path.join(self.workdir, ELPHON_OUTPUT_FILE_NAME))
self.ddk_files_file = File(os.path.join(self.workdir, DDK_FILES_FILE_NAME))
# This file is produce by Abinit if nprocs > 1 and MPI_ABORT.
self.mpiabort_file = File(os.path.join(self.workdir, MPIABORTFILE))
# Directories with input|output|temporary data.
self.rundir = Directory(self.workdir)
self.indir = Directory(os.path.join(self.workdir, INDIR_NAME))
self.outdir = Directory(os.path.join(self.workdir, OUTDIR_NAME))
self.tmpdir = Directory(os.path.join(self.workdir, TMPDIR_NAME))
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(DUMMY_FILENAME) # 4) Ignored
app(self.gkk_filepath or DUMMY_FILENAME) # 5) Input elphon matrix elements (GKK file)
app(self.elphon_out_file.path) # 6) Base name for elphon output files e.g. t13
app(self.ddk_files_file if self.ddk_filepaths
else DUMMY_FILENAME) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines)
@property
def phbst_path(self):
"""Absolute path of the run.abo_PHBST.nc file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._phbst_path
except AttributeError:
path = self.rundir.has_abiext("PHBST.nc")
if path:
self._phbst_path = path
return path
@property
def phdos_path(self):
"""Absolute path of the run.abo_PHDOS.nc file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._phdos_path
except AttributeError:
path = self.rundir.has_abiext("PHDOS.nc")
if path:
self._phdos_path = path
return path
@property
def anaddb_nc_path(self):
"""Absolute path of the anaddb.nc file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._anaddbnc_path
except AttributeError:
path = self.rundir.has_abiext("anaddb.nc")
if path:
self._anaddbnc_path = path
return path
def open_phbst(self):
"""
Open PHBST file produced by Anaddb and returns |PhbstFile| object.
Raise a PostProcessError exception if file could not be found or file is not readable.
"""
if not self.phbst_path:
msg = "No PHBST file available for task {} in {}".format(self, self.outdir)
logger.critical(msg)
raise PostProcessError(msg)
try:
return PhbstFile(self.phbst_path)
except Exception as exc:
msg = "Exception while reading PHBST file at %s:\n%s" % (self.phbst_path, str(exc))
logger.critical(msg)
raise PostProcessError(msg)
def open_phdos(self):
"""
Open PHDOS file produced by Anaddb and returns |PhdosFile| object.
Raise a PostProcessError exception if file could not be found or file is not readable.
"""
if not self.phdos_path:
msg = "No PHDOS file available for task {} in {}".format(self, self.outdir)
logger.critical(msg)
raise PostProcessError(msg)
try:
return PhdosFile(self.phdos_path)
except Exception as exc:
msg = "Exception while reading PHDOS file at %s:\n%s" % (self.phdos_path, str(exc))
logger.critical(msg)
raise PostProcessError(msg)
def open_anaddbnc(self):
"""
Open anaddb.nc file produced by Anaddb and returns |AnaddbNcFile| object.
Raise a PostProcessError exception if file could not be found or file is not readable.
"""
if not self.anaddb_nc_path:
msg = "No anaddb.nc file available for task {} in {}".format(self, self.outdir)
logger.critical(msg)
raise PostProcessError(msg)
try:
return AnaddbNcFile(self.anaddb_nc_path)
except Exception as exc:
msg = "Exception while reading anaddb.nc file at %s:\n%s" % (self.anaddb_nc_path, str(exc))
logger.critical(msg)
raise PostProcessError(msg)
@explicit_serialize
class AutoparalTask(AbiFireTask):
"""
Task to run the autoparal for many tasks of the same type already defined as children
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: AutoparalTask
"""
task_type = "autoparal"
def __init__(self, abiinput, restart_info=None, handlers=None, deps=None, history=None, is_autoparal=True,
task_type=None, forward_spec=False, skip_spec_keys=None):
"""
Note that the method still takes is_autoparal as input even if this is switched to True irrespectively of the
provided value, as the point of the task is to run autoparal. This is done to preserve the API in cases where
automatic generation of tasks is involved.
skip_spec_keys allows to specify a list of keys to skip when forwarding the spec: 'wf_task_index' will
be always skipped. All the reserved keys starting with _ will always be skipped as well.
If abiinput is None, autoparal will not run and the preferred configuration will be chosen based on
the options set in the manager.
#FIXME find a better solution if this model is preserved
"""
if handlers is None:
handlers = []
if history is None:
history = []
super().__init__(abiinput, restart_info=restart_info, handlers=handlers, is_autoparal=True,
deps=deps, history=history, task_type=task_type)
self.forward_spec = forward_spec
if not skip_spec_keys:
skip_spec_keys = []
skip_spec_keys.append('wf_task_index')
self.skip_spec_keys = skip_spec_keys
def autoparal(self, fw_spec):
"""
Runs the autoparal, if an input is available. Does not return a detour, updates the children fws instead.
"""
# Copy the appropriate dependencies in the in dir. needed in some cases
self.resolve_deps(fw_spec)
if self.abiinput is None:
optconf, qadapter_spec, qtk_qadapter = self.run_fake_autoparal(self.ftm)
else:
optconf, qadapter_spec, qtk_qadapter = self.run_autoparal(self.abiinput, os.path.abspath('.'), self.ftm)
self.history.log_autoparal(optconf)
mod_spec = [{'_push_all': {'spec->_tasks->0->abiinput->abi_args': list(optconf.vars.items())}}]
if self.forward_spec:
# forward all the specs of the task
new_spec = {k: v for k, v in fw_spec.items() if k.startswith('_') and k not in self.skip_spec_keys}
else:
new_spec = {}
# set quadapter specification. Note that mpi_ncpus may be different from ntasks
new_spec['_queueadapter'] = qadapter_spec
new_spec['mpi_ncpus'] = optconf['mpi_ncpus']
return FWAction(update_spec=new_spec, mod_spec=mod_spec)
##############################
# Generation tasks
##############################
@explicit_serialize
class GeneratePhononFlowFWAbinitTask(BasicAbinitTaskMixin, FireTaskBase):
"""
Task that generates all the phonon perturbation based on the input of the previous ground state step
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: GeneratePhononFlowFWAbinitTask
"""
def __init__(self, phonon_factory, previous_task_type=ScfFWTask.task_type, handlers=None, with_autoparal=None, ddb_file=None):
if handlers is None:
handlers = []
self.phonon_factory = phonon_factory
self.previous_task_type = previous_task_type
self.handlers = handlers
self.with_autoparal = with_autoparal
self.ddb_file = ddb_file
def get_fws(self, multi_inp, task_class, deps, new_spec, ftm, nscf_fws=None):
"""
Prepares the fireworks for a specific type of calculation
Args:
multi_inp: |MultiDataset| with the inputs that should be run
task_class: class of the tasks that should be generated
deps: dict with the dependencies already set for this type of task
new_spec: spec for the new Fireworks that will be created
ftm: a FWTaskManager
nscf_fws: list of NSCF fws for the calculation of WFQ files, in case they are present.
Will be linked if needed.
Returns:
(tuple): tuple containing:
- fws (list): The list of new Fireworks.
- fw_deps (dict): The dependencies related to these fireworks. Should be used when generating
the workflow.
"""
if deps is None:
deps = {}
formula = multi_inp[0].structure.composition.reduced_formula
fws = []
fw_deps = defaultdict(list)
autoparal_spec = {}
for i, inp in enumerate(multi_inp):
new_spec = dict(new_spec)
start_task_index = 1
if self.with_autoparal:
if not autoparal_spec:
autoparal_dir = os.path.join(os.path.abspath('.'), "autoparal_{}_{}".format(task_class.__name__, str(i)))
optconf, qadapter_spec, qadapter = self.run_autoparal(inp, autoparal_dir, ftm)
autoparal_spec['_queueadapter'] = qadapter_spec
autoparal_spec['mpi_ncpus'] = optconf['mpi_ncpus']
new_spec.update(autoparal_spec)
inp.set_vars(optconf.vars)
current_deps = dict(deps)
parent_fw = None
if nscf_fws:
qpt = inp['qpt']
for nscf_fw in nscf_fws:
if np.allclose(nscf_fw.tasks[0].abiinput['qpt'], qpt):
parent_fw = nscf_fw
current_deps[nscf_fw.tasks[0].task_type] = "WFQ"
break
task = task_class(inp, handlers=self.handlers, deps=current_deps, is_autoparal=False)
# this index is for the different task, each performing a different perturbation
indexed_task_type = task_class.task_type + '_' + str(i)
# this index is to index the restarts of the single task
new_spec['wf_task_index'] = indexed_task_type + '_' + str(start_task_index)
fw = Firework(task, spec=new_spec, name=(formula + '_' + indexed_task_type)[:15])
fws.append(fw)
if parent_fw is not None:
fw_deps[parent_fw].append(fw)
return fws, fw_deps
def run_task(self, fw_spec):
previous_input = fw_spec.get('previous_fws', {}).get(self.previous_task_type, [{}])[0].get('input', None)
if not previous_input:
raise InitializationError('No input file available from task of type {}'.format(self.previous_task_type))
# compatibility with old DECODE_MONTY=False
if not isinstance(previous_input, AbinitInput):
previous_input = AbinitInput.from_dict(previous_input)
ftm = self.get_fw_task_manager(fw_spec)
if self.with_autoparal is None:
self.with_autoparal = ftm.fw_policy.autoparal
if self.with_autoparal:
if not ftm.has_task_manager():
msg = 'No task manager available: autoparal could not be performed.'
logger.error(msg)
raise InitializationError(msg)
# inject task manager
tasks.set_user_config_taskmanager(ftm.task_manager)
ph_inputs = self.phonon_factory.build_input(previous_input)
initialization_info = fw_spec.get('initialization_info', {})
initialization_info['input_factory'] = self.phonon_factory.as_dict()
new_spec = dict(previous_fws=fw_spec['previous_fws'], initialization_info=initialization_info,
_preserve_fworker=True)
if '_fworker' in fw_spec:
new_spec['_fworker'] = fw_spec['_fworker']
ph_q_pert_inputs = ph_inputs.filter_by_tags(atags.PH_Q_PERT)
ddk_inputs = ph_inputs.filter_by_tags(atags.DDK)
dde_inputs = ph_inputs.filter_by_tags(atags.DDE)
bec_inputs = ph_inputs.filter_by_tags(atags.BEC)
nscf_inputs = ph_inputs.filter_by_tags(atags.NSCF)
nscf_fws = []
if nscf_inputs is not None:
nscf_fws, nscf_fw_deps = self.get_fws(nscf_inputs, NscfWfqFWTask,
{self.previous_task_type: "WFK", self.previous_task_type: "DEN"}, new_spec, ftm)
ph_fws = []
if ph_q_pert_inputs:
ph_q_pert_inputs.set_vars(prtwf=-1)
ph_fws, ph_fw_deps = self.get_fws(ph_q_pert_inputs, PhononTask, {self.previous_task_type: "WFK"}, new_spec,
ftm, nscf_fws)
ddk_fws = []
if ddk_inputs:
ddk_fws, ddk_fw_deps = self.get_fws(ddk_inputs, DdkTask, {self.previous_task_type: "WFK"}, new_spec, ftm)
dde_fws = []
if dde_inputs:
dde_inputs.set_vars(prtwf=-1)
dde_fws, dde_fw_deps = self.get_fws(dde_inputs, DdeTask,
{self.previous_task_type: "WFK", DdkTask.task_type: "DDK"}, new_spec, ftm)
bec_fws = []
if bec_inputs:
bec_inputs.set_vars(prtwf=-1)
bec_fws, bec_fw_deps = self.get_fws(bec_inputs, BecTask,
{self.previous_task_type: "WFK", DdkTask.task_type: "DDK"}, new_spec, ftm)
mrgddb_spec = dict(new_spec)
mrgddb_spec['wf_task_index'] = 'mrgddb'
#FIXME import here to avoid circular imports.
from abiflows.fireworks.utils.fw_utils import get_short_single_core_spec
qadapter_spec = get_short_single_core_spec(ftm)
mrgddb_spec['mpi_ncpus'] = 1
mrgddb_spec['_queueadapter'] = qadapter_spec
# Set a higher priority to favour the end of the WF
#TODO improve the handling of the priorities
mrgddb_spec['_priority'] = 10
# add one for the scf that is linked to the mrgddbtask and will be merged as well
num_ddbs_to_be_merged = len(ph_fws) + len(dde_fws) + len(bec_fws) + 1
mrgddb_fw = Firework(MergeDdbAbinitTask(num_ddbs=num_ddbs_to_be_merged, delete_source_ddbs=False), spec=mrgddb_spec,
name=ph_inputs[0].structure.composition.reduced_formula+'_mergeddb')
fws_deps = {}
if ddk_fws:
for ddk_fw in ddk_fws:
if dde_fws:
fws_deps[ddk_fw] = dde_fws
if bec_fws:
fws_deps[ddk_fw] = bec_fws
ddb_fws = dde_fws + ph_fws + bec_fws
#TODO pass all the tasks to the MergeDdbTask for logging or easier retrieve of the DDK?
for ddb_fw in ddb_fws:
fws_deps[ddb_fw] = mrgddb_fw
total_list_fws = ddb_fws+ddk_fws+[mrgddb_fw] + nscf_fws
fws_deps.update(ph_fw_deps)
ph_wf = Workflow(total_list_fws, fws_deps)
stored_data = dict(finalized=True)
return FWAction(stored_data=stored_data, detours=ph_wf)
#TODO old implementation of GeneratePiezoElasticFlowFWAbinitTask based on SRC. Needs to be rewritten.
# @explicit_serialize
# class GeneratePiezoElasticFlowFWAbinitTask(BasicAbinitTaskMixin, FireTaskBase):
# def __init__(self, piezo_elastic_factory=None, previous_scf_task_type=ScfFWTask.task_type,
# previous_ddk_task_type=DdkTask.task_type,
# handlers=None, validators=None, mrgddb_task_type='mrgddb-strains', rf_tol=None):
# if piezo_elastic_factory is None:
# self.piezo_elastic_factory = PiezoElasticFromGsFactory(rf_tol=rf_tol, rf_split=True)
# else:
# self.piezo_elastic_factory = piezo_elastic_factory
# self.previous_scf_task_type = previous_scf_task_type
# self.previous_ddk_task_type = previous_ddk_task_type
# self.handlers = handlers
# self.validators = validators
# self.mrgddb_task_type = mrgddb_task_type
# self.rf_tol = rf_tol
#
# def run_task(self, fw_spec):
# # Get the previous SCF input
# previous_scf_input = fw_spec.get('previous_fws', {}).get(self.previous_scf_task_type,
# [{}])[0].get('input', None)
# if not previous_scf_input:
# raise InitializationError('No input file available '
# 'from task of type {}'.format(self.previous_scf_task_type))
# #previous_scf_input = AbinitInput.from_dict(previous_scf_input)
#
# # # Get the previous DDK input
# # previous_ddk_input = fw_spec.get('previous_fws', {}).get(self.previous_ddk_task_type,
# # [{}])[0].get('input', None)
# # if not previous_ddk_input:
# # raise InitializationError('No input file available '
# # 'from task of type {}'.format(self.previous_ddk_task_type))
# # previous_ddk_input = AbinitInput.from_dict(previous_ddk_input)
#
# ftm = self.get_fw_task_manager(fw_spec)
# tasks._USER_CONFIG_TASKMANAGER = ftm.task_manager
# # if self.with_autoparal:
# # if not ftm.has_task_manager():
# # msg = 'No task manager available: autoparal could not be performed.'
# # logger.error(msg)
# # raise InitializationError(msg)
# #
# # # inject task manager
# # tasks._USER_CONFIG_TASKMANAGER = ftm.task_manager
#
# # Get the strain RF inputs
# piezo_elastic_inputs = self.piezo_elastic_factory.build_input(previous_scf_input)
# rf_strain_inputs = piezo_elastic_inputs.filter_by_tags(STRAIN)
#
# initialization_info = fw_spec.get('initialization_info', {})
# initialization_info['input_factory'] = self.piezo_elastic_factory.as_dict()
# new_spec = dict(previous_fws=fw_spec['previous_fws'], initialization_info=initialization_info)
#
# # Get the initial queue_adapter_updates
# queue_adapter_update = initialization_info.get('queue_adapter_update', None)
#
# # Create the SRC fireworks for each perturbation
# all_SRC_rf_fws = []
# total_list_fws = []
# fws_deps = {}
# rf_strain_handlers = self.handlers['_all'] if self.handlers is not None else []
# rf_strain_validators = self.validators['_all'] if self.validators is not None else []
# for istrain_pert, rf_strain_input in enumerate(rf_strain_inputs):
# SRC_rf_fws = createSRCFireworksOld(task_class=StrainPertTask, task_input=rf_strain_input, SRC_spec=new_spec,
# initialization_info=initialization_info,
# wf_task_index_prefix='rfstrains-pert-{:d}'.format(istrain_pert+1),
# handlers=rf_strain_handlers, validators=rf_strain_validators,
# deps={self.previous_scf_task_type: 'WFK',
# self.previous_ddk_task_type: 'DDK'},
# queue_adapter_update=queue_adapter_update)
# all_SRC_rf_fws.append(SRC_rf_fws)
# total_list_fws.extend(SRC_rf_fws['fws'])
# links_dict_update(links_dict=fws_deps, links_update=SRC_rf_fws['links_dict'])
#
# # Adding the MrgDdb Firework
# mrgddb_spec = dict(new_spec)
# mrgddb_spec['wf_task_index_prefix'] = 'mrgddb-rfstrains'
# mrgddb_spec['wf_task_index'] = mrgddb_spec['wf_task_index_prefix']
# mrgddb_spec = set_short_single_core_to_spec(mrgddb_spec)
# mrgddb_spec['_priority'] = 10
# num_ddbs_to_be_merged = len(all_SRC_rf_fws)
# mrgddb_fw = Firework(MergeDdbAbinitTask(num_ddbs=num_ddbs_to_be_merged, delete_source_ddbs=True,
# task_type= self.mrgddb_task_type),
# spec=mrgddb_spec,
# name=mrgddb_spec['wf_task_index'])
# total_list_fws.append(mrgddb_fw)
# #Adding the dependencies
# for src_fws in all_SRC_rf_fws:
# links_dict_update(links_dict=fws_deps, links_update={src_fws['check_fw']: mrgddb_fw})
#
# rf_strains_wf = Workflow(total_list_fws, fws_deps)
#
# return FWAction(detours=rf_strains_wf)
##############################
# Exceptions
##############################
class ErrorCode(object):
"""
Error code to classify the errors
"""
ERROR = 'Error'
UNRECOVERABLE = 'Unrecoverable'
UNCLASSIFIED = 'Unclassified'
UNCONVERGED = 'Unconverged'
UNCONVERGED_PARAMETERS = 'Unconverged_parameters'
INITIALIZATION = 'Initialization'
RESTART = 'Restart'
POSTPROCESS = 'Postprocess'
WALLTIME = 'Walltime'
class AbiFWError(Exception):
"""
Base class for the errors in abiflows
"""
def __init__(self, msg):
super().__init__(msg)
self.msg = msg
def to_dict(self):
return dict(error_code=self.ERROR_CODE, msg=self.msg)
class AbinitRuntimeError(AbiFWError):
"""
Exception raised for errors during Abinit calculation.
Contains the information about the errors and warning extracted from the output files.
Initialized with a task, uses it to prepare a suitable error message.
"""
ERROR_CODE = ErrorCode.ERROR
def __init__(self, task=None, msg=None, num_errors=None, num_warnings=None, errors=None, warnings=None):
"""
If the task has a report all the information will be extracted from it, otherwise the arguments will be used.
Args:
task: the abiflows Task
msg: the error message
num_errors: number of errors in the abinit execution. Only used if task doesn't have a report.
num_warnings: number of warning in the abinit execution. Only used if task doesn't have a report.
errors: list of errors in the abinit execution. Only used if task doesn't have a report.
warnings: list of warnings in the abinit execution. Only used if task doesn't have a report.
"""
# This can handle both the cases of DECODE_MONTY=True and False (Since it has a from_dict method).
super().__init__(msg)
self.task = task
if self.task is not None and hasattr(self.task, "report") and self.task.report is not None:
report = self.task.report
self.num_errors = report.num_errors
self.num_warnings = report.num_warnings
self.errors = report.errors
self.warnings = report.warnings
else:
self.num_errors = num_errors
self.num_warnings = num_warnings
self.errors = errors
self.warnings = warnings
self.msg = msg
@pmg_serialize
def to_dict(self):
d = {}
d['num_errors'] = self.num_errors
d['num_warnings'] = self.num_warnings
if self.errors:
errors = []
for error in self.errors:
errors.append(error.as_dict())
d['errors'] = errors
if self.warnings:
warnings = []
for warning in self.warnings:
warnings.append(warning.as_dict())
d['warnings'] = warnings
if self.msg:
d['error_message'] = self.msg
d['error_code'] = self.ERROR_CODE
return d
def as_dict(self):
return self.to_dict()
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
warnings = [dec.process_decoded(w) for w in d['warnings']] if 'warnings' in d else []
errors = [dec.process_decoded(w) for w in d['errors']] if 'errors' in d else []
msg = d['error_message'] if 'error_message' in d else None
return cls(warnings=warnings, errors=errors, num_errors=d['num_errors'], num_warnings=d['num_warnings'],
msg=msg)
class UnconvergedError(AbinitRuntimeError):
"""
Exception raised when a calculation didn't converge within the selected number of restarts.
"""
ERROR_CODE = ErrorCode.UNCONVERGED
def __init__(self, task=None, msg=None, num_errors=None, num_warnings=None, errors=None, warnings=None,
abiinput=None, restart_info=None, history=None):
"""
If the task has a report all the information will be extracted from it, otherwise the arguments will be used.
It contains information that can be used to further restart the job.
Args:
task: the abiflows Task
msg: the error message
num_errors: number of errors in the abinit execution. Only used if task doesn't have a report.
num_warnings: number of warning in the abinit execution. Only used if task doesn't have a report.
errors: list of errors in the abinit execution. Only used if task doesn't have a report.
warnings: list of warnings in the abinit execution. Only used if task doesn't have a report.
abiinput: the last AbinitInput used.
restart_info: the RestartInfo required to restart the job.
history: a TaskHistory.
"""
super().__init__(task, msg, num_errors, num_warnings, errors, warnings)
self.abiinput = abiinput
self.restart_info = restart_info
self.history = history
@pmg_serialize
def to_dict(self):
d = super().to_dict()
d['abiinput'] = self.abiinput.as_dict() if self.abiinput else None
d['restart_info'] = self.restart_info.as_dict() if self.restart_info else None
d['history'] = self.history.as_dict() if self.history else None
return d
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
warnings = [dec.process_decoded(w) for w in d['warnings']] if 'warnings' in d else []
errors = [dec.process_decoded(w) for w in d['errors']] if 'errors' in d else []
if 'abiinput' in d and d['abiinput'] is not None:
abiinput = dec.process_decoded(d['abiinput'])
else:
abiinput = None
if 'restart_info' in d and d['restart_info'] is not None:
restart_info = dec.process_decoded(d['restart_info'])
else:
restart_info = None
if 'history' in d and d['history'] is not None:
history = dec.process_decoded(d['history'])
else:
history = None
return cls(warnings=warnings, errors=errors, num_errors=d['num_errors'], num_warnings=d['num_warnings'],
msg=d['error_message'], abiinput=abiinput, restart_info=restart_info, history=history)
class UnconvergedParametersError(UnconvergedError):
"""
Exception raised when the iteration to converge some parameter didn't converge within the selected number
of restarts.
"""
ERROR_CODE = ErrorCode.UNCONVERGED_PARAMETERS
class WalltimeError(AbiFWError):
"""
Exception raised when the calculation didn't complete within the specified walltime.
"""
ERROR_CODE = ErrorCode.WALLTIME
class InitializationError(AbiFWError):
"""
Exception raised if errors are present during the initialization of the task, before abinit is started.
"""
ERROR_CODE = ErrorCode.INITIALIZATION
class RestartError(InitializationError):
"""
Exception raised if errors show up during the set up of the restart.
"""
ERROR_CODE = ErrorCode.RESTART
class PostProcessError(AbiFWError):
"""
Exception raised if problems are encountered during the post processing of the abinit calculation.
"""
ERROR_CODE = ErrorCode.POSTPROCESS
##############################
# Other objects
##############################
class RestartInfo(MSONable):
"""
Object that contains the information about the restart of a task.
"""
def __init__(self, previous_dir, reset=False, num_restarts=0):
self.previous_dir = previous_dir
self.reset = reset
self.num_restarts = num_restarts
@pmg_serialize
def as_dict(self):
return dict(previous_dir=self.previous_dir, reset=self.reset, num_restarts=self.num_restarts)
@classmethod
def from_dict(cls, d):
return cls(previous_dir=d['previous_dir'], reset=d['reset'], num_restarts=d['num_restarts'])
@property
def prev_outdir(self):
"""
A Directory object pointing to the outdir of the previous step.
"""
return Directory(os.path.join(self.previous_dir, OUTDIR_NAME))
@property
def prev_indir(self):
"""
A Directory object pointing to the indir of the previous step.
"""
return Directory(os.path.join(self.previous_dir, INDIR_NAME))
class ElasticComplianceTensor(Has_Structure):
"""This object is used to store the elastic and compliance tensors."""
def __init__(self, elastic_tensor, compliance_tensor, structure, additional_info=None):
"""
Args:
elastic_tensor: (6, 6) array with the elastic tensor in Cartesian coordinates
compliance_tensor: (6, 6) array with the compliance tensor in Cartesian coordinates
structure: |Structure| object.
"""
self._structure = structure
self.elastic_tensor = elastic_tensor
self.compliance_tensor = compliance_tensor
self.additional_info = additional_info
@property
def structure(self):
"""|Structure| object."""
return self._structure
def __repr__(self):
return self.to_string()
@classmethod
def from_ec_nc_file(cls, ec_nc_file, tensor_type='relaxed_ion'):
with NetcdfReader(ec_nc_file) as nc_reader:
if tensor_type == 'relaxed_ion':
ec = np.array(nc_reader.read_variable('elastic_constants_relaxed_ion'))
compl = np.array(nc_reader.read_variable('compliance_constants_relaxed_ion'))
elif tensor_type == 'clamped_ion':
ec = np.array(nc_reader.read_variable('elastic_constants_clamped_ion'))
compl = np.array(nc_reader.read_variable('compliance_constants_clamped_ion'))
elif tensor_type == 'relaxed_ion_stress_corrected':
ec = np.array(nc_reader.read_variable('elastic_constants_relaxed_ion_stress_corrected'))
compl = np.array(nc_reader.read_variable('compliance_constants_relaxed_ion_stress_corrected'))
else:
raise ValueError('tensor_type "{0}" not allowed'.format(tensor_type))
#TODO: add the structure object!
return cls(elastic_tensor=ec, compliance_tensor=compl, structure=None,
additional_info={'tensor_type': tensor_type})
def as_dict(self):
return {'elastic_tensor': self.elastic_tensor, 'compliance_tensor': self.compliance_tensor,
'structure': self.structure.as_dict() if self.structure is not None else None,
'additional_info': self.additional_info}
def extended_dict(self):
dd = self.as_dict()
K_Voigt = (self.elastic_tensor[0, 0] + self.elastic_tensor[1, 1] + self.elastic_tensor[2, 2] +
2.0*self.elastic_tensor[0, 1] + 2.0*self.elastic_tensor[1, 2] + 2.0*self.elastic_tensor[2, 0]) / 9.0
K_Reuss = 1.0 / (self.compliance_tensor[0, 0] + self.compliance_tensor[1, 1] + self.compliance_tensor[2, 2] +
2.0*self.compliance_tensor[0, 1] + 2.0*self.compliance_tensor[1, 2] +
2.0*self.compliance_tensor[2, 0])
G_Voigt = (self.elastic_tensor[0, 0] + self.elastic_tensor[1, 1] + self.elastic_tensor[2, 2] -
self.elastic_tensor[0, 1] - self.elastic_tensor[1, 2] - self.elastic_tensor[2, 0] +
3.0*self.elastic_tensor[3, 3] + 3.0*self.elastic_tensor[4, 4] + 3.0*self.elastic_tensor[5, 5]) / 15.0
G_Reuss = 15.0 / (4.0*self.compliance_tensor[0, 0] + 4.0*self.compliance_tensor[1, 1] +
4.0*self.compliance_tensor[2, 2] - 4.0*self.compliance_tensor[0, 1] -
4.0*self.compliance_tensor[1, 2] - 4.0*self.compliance_tensor[2, 0] +
3.0*self.compliance_tensor[3, 3] + 3.0*self.compliance_tensor[4, 4] +
3.0*self.compliance_tensor[5, 5])
K_VRH = (K_Voigt + K_Reuss) / 2.0
G_VRH = (G_Voigt + G_Reuss) / 2.0
universal_elastic_anisotropy = 5.0*G_Voigt/G_Reuss + K_Voigt/K_Reuss - 6.0
isotropic_poisson_ratio = (3.0*K_VRH - 2.0*G_VRH) / (6.0*K_VRH + 2.0*G_VRH)
dd['K_Voigt'] = K_Voigt
dd['G_Voigt'] = G_Voigt
dd['K_Reuss'] = K_Reuss
dd['G_Reuss'] = G_Reuss
dd['K_VRH'] = K_VRH
dd['G_VRH'] = G_VRH
dd['universal_elastic_anistropy'] = universal_elastic_anisotropy
dd['isotropic_poisson_ratio'] = isotropic_poisson_ratio
return dd
@classmethod
def from_dict(cls, dd):
return cls(elastic_tensor=dd['elastic_tensor'], compliance_tensor=dd['compliance_tensor'],
structure=dd['structure'] if dd['structure'] is not None else None,
additional_info=dd['additional_info'])
def get_pmg_elastic_tensor(self):
"""
Converts to a pymatgen :class:`ElasticTensor` object.
"""
return ElasticTensor.from_voigt(self.elastic_tensor)
| [
"abipy.electrons.gsr.GsrFile",
"os.remove",
"abipy.dfpt.anaddbnc.AnaddbNcFile",
"abipy.flowtk.netcdf.NetcdfReader",
"numpy.allclose",
"abipy.flowtk.utils.Directory",
"collections.defaultdict",
"logging.Formatter",
"os.path.isfile",
"os.path.islink",
"glob.glob",
"shutil.rmtree",
"abipy.abio.... | [((1887, 1914), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1904, 1914), False, 'import logging\n'), ((13976, 14017), 'collections.namedtuple', 'namedtuple', (['"""Prefix"""', '"""idata odata tdata"""'], {}), "('Prefix', 'idata odata tdata')\n", (13986, 14017), False, 'from collections import namedtuple, defaultdict\n'), ((2527, 2541), 'monty.json.MontyDecoder', 'MontyDecoder', ([], {}), '()\n', (2539, 2541), False, 'from monty.json import MontyEncoder, MontyDecoder, MSONable\n'), ((7048, 7078), 'abipy.flowtk.tasks.ParalHints', 'ParalHints', (['{}', 'fake_conf_list'], {}), '({}, fake_conf_list)\n', (7058, 7078), False, 'from abipy.flowtk.tasks import ParalHints\n'), ((8141, 8173), 'logging.FileHandler', 'logging.FileHandler', (['"""abipy.log"""'], {}), "('abipy.log')\n", (8160, 8173), False, 'import logging\n'), ((9024, 9079), 'os.path.join', 'os.path.join', (['source_dir', "(self.prefix.odata + '_' + ext)"], {}), "(source_dir, self.prefix.odata + '_' + ext)\n", (9036, 9079), False, 'import os\n'), ((9163, 9220), 'os.path.join', 'os.path.join', (['self.workdir', "(self.prefix.idata + '_' + ext)"], {}), "(self.workdir, self.prefix.idata + '_' + ext)\n", (9175, 9220), False, 'import os\n'), ((12920, 12982), 'os.path.join', 'os.path.join', (['self.workdir', "(self.prefix.idata + '_' + ext_full)"], {}), "(self.workdir, self.prefix.idata + '_' + ext_full)\n", (12932, 12982), False, 'import os\n'), ((17082, 17102), 'abiflows.fireworks.utils.task_history.TaskHistory', 'TaskHistory', (['history'], {}), '(history)\n', (17093, 17102), False, 'from abiflows.fireworks.utils.task_history import TaskHistory\n'), ((17293, 17317), 'os.path.abspath', 'os.path.abspath', (['workdir'], {}), '(workdir)\n', (17308, 17317), False, 'import os\n'), ((23559, 23598), 'threading.Thread', 'threading.Thread', ([], {'target': 'abinit_process'}), '(target=abinit_process)\n', (23575, 23598), False, 'import threading\n'), ((23789, 23800), 'time.time', 'time.time', ([], {}), '()\n', (23798, 23800), False, 'import time\n'), ((24660, 24681), 'abipy.flowtk.events.EventsParser', 'events.EventsParser', ([], {}), '()\n', (24679, 24681), False, 'from abipy.flowtk import events, tasks\n'), ((36285, 36324), 'fireworks.core.firework.Firework', 'Firework', (['[restart_task]'], {'spec': 'new_spec'}), '([restart_task], spec=new_spec)\n', (36293, 36324), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((38308, 38319), 'time.time', 'time.time', ([], {}), '()\n', (38317, 38319), False, 'import time\n'), ((47153, 47179), 'fireworks.core.firework.Firework', 'Firework', (['[task]', 'new_spec'], {}), '([task], new_spec)\n', (47161, 47179), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((47196, 47220), 'fireworks.core.firework.FWAction', 'FWAction', ([], {'detours': 'new_fw'}), '(detours=new_fw)\n', (47204, 47220), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((53436, 53474), 'os.path.join', 'os.path.join', (['self.indir.path', 'in_file'], {}), '(self.indir.path, in_file)\n', (53448, 53474), False, 'import os\n'), ((54723, 54749), 'shutil.copy', 'shutil.copy', (['in_file', 'dest'], {}), '(in_file, dest)\n', (54734, 54749), False, 'import shutil\n'), ((60691, 60729), 'os.path.join', 'os.path.join', (['self.indir.path', 'in_file'], {}), '(self.indir.path, in_file)\n', (60703, 60729), False, 'import os\n'), ((67114, 67152), 'os.path.join', 'os.path.join', (['self.indir.path', 'in_file'], {}), '(self.indir.path, in_file)\n', (67126, 67152), False, 'import os\n'), ((79214, 79235), 'abipy.flowtk.events.EventsParser', 'events.EventsParser', ([], {}), '()\n', (79233, 79235), False, 'from abipy.flowtk import events, tasks\n'), ((80081, 80094), 'abiflows.fireworks.utils.task_history.TaskHistory', 'TaskHistory', ([], {}), '()\n', (80092, 80094), False, 'from abiflows.fireworks.utils.task_history import TaskHistory\n'), ((88053, 88073), 'abiflows.fireworks.utils.task_history.TaskHistory', 'TaskHistory', (['history'], {}), '(history)\n', (88064, 88073), False, 'from abiflows.fireworks.utils.task_history import TaskHistory\n'), ((93682, 93721), 'threading.Thread', 'threading.Thread', ([], {'target': 'anaddb_process'}), '(target=anaddb_process)\n', (93698, 93721), False, 'import threading\n'), ((94363, 94374), 'time.time', 'time.time', ([], {}), '()\n', (94372, 94374), False, 'import time\n'), ((97278, 97288), 'fireworks.core.firework.FWAction', 'FWAction', ([], {}), '()\n', (97286, 97288), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((97391, 97415), 'os.path.abspath', 'os.path.abspath', (['workdir'], {}), '(workdir)\n', (97406, 97415), False, 'import os\n'), ((98240, 98263), 'abipy.flowtk.utils.Directory', 'Directory', (['self.workdir'], {}), '(self.workdir)\n', (98249, 98263), False, 'from abipy.flowtk.utils import Directory, File\n'), ((105510, 105559), 'fireworks.core.firework.FWAction', 'FWAction', ([], {'update_spec': 'new_spec', 'mod_spec': 'mod_spec'}), '(update_spec=new_spec, mod_spec=mod_spec)\n', (105518, 105559), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((107416, 107433), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (107427, 107433), False, 'from collections import namedtuple, defaultdict\n'), ((110049, 110100), 'abipy.flowtk.tasks.set_user_config_taskmanager', 'tasks.set_user_config_taskmanager', (['ftm.task_manager'], {}), '(ftm.task_manager)\n', (110082, 110100), False, 'from abipy.flowtk import events, tasks\n'), ((112371, 112402), 'abiflows.fireworks.utils.fw_utils.get_short_single_core_spec', 'get_short_single_core_spec', (['ftm'], {}), '(ftm)\n', (112397, 112402), False, 'from abiflows.fireworks.utils.fw_utils import get_short_single_core_spec\n'), ((113597, 113631), 'fireworks.core.firework.Workflow', 'Workflow', (['total_list_fws', 'fws_deps'], {}), '(total_list_fws, fws_deps)\n', (113605, 113631), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((113692, 113740), 'fireworks.core.firework.FWAction', 'FWAction', ([], {'stored_data': 'stored_data', 'detours': 'ph_wf'}), '(stored_data=stored_data, detours=ph_wf)\n', (113700, 113740), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((122411, 122425), 'monty.json.MontyDecoder', 'MontyDecoder', ([], {}), '()\n', (122423, 122425), False, 'from monty.json import MontyEncoder, MontyDecoder, MSONable\n'), ((124642, 124656), 'monty.json.MontyDecoder', 'MontyDecoder', ([], {}), '()\n', (124654, 124656), False, 'from monty.json import MontyEncoder, MontyDecoder, MSONable\n'), ((132232, 132277), 'pymatgen.analysis.elasticity.ElasticTensor.from_voigt', 'ElasticTensor.from_voigt', (['self.elastic_tensor'], {}), '(self.elastic_tensor)\n', (132256, 132277), False, 'from pymatgen.analysis.elasticity import ElasticTensor\n'), ((2076, 2113), 'inspect.getfullargspec', 'inspect.getfullargspec', (['self.__init__'], {}), '(self.__init__)\n', (2098, 2113), False, 'import inspect\n'), ((3081, 3125), 'abiflows.fireworks.utils.fw_utils.FWTaskManager.from_file', 'FWTaskManager.from_file', (["fw_spec['ftm_file']"], {}), "(fw_spec['ftm_file'])\n", (3104, 3125), False, 'from abiflows.fireworks.utils.fw_utils import FWTaskManager\n'), ((3158, 3190), 'abiflows.fireworks.utils.fw_utils.FWTaskManager.from_user_config', 'FWTaskManager.from_user_config', ([], {}), '()\n', (3188, 3190), False, 'from abiflows.fireworks.utils.fw_utils import FWTaskManager\n'), ((4402, 4447), 'os.path.join', 'os.path.join', (['autoparal_dir', '"""autoparal.json"""'], {}), "(autoparal_dir, 'autoparal.json')\n", (4414, 4447), False, 'import os\n'), ((8207, 8246), 'logging.Formatter', 'logging.Formatter', (['logging.BASIC_FORMAT'], {}), '(logging.BASIC_FORMAT)\n', (8224, 8246), False, 'import logging\n'), ((9236, 9258), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (9250, 9258), False, 'import os\n'), ((9386, 9408), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (9400, 9408), False, 'import os\n'), ((9439, 9461), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (9453, 9461), False, 'import os\n'), ((11001, 11038), 'os.path.join', 'os.path.join', (['source_dir', 'OUTDIR_NAME'], {}), '(source_dir, OUTDIR_NAME)\n', (11013, 11038), False, 'import os\n'), ((17394, 17437), 'os.path.join', 'os.path.join', (['self.workdir', 'INPUT_FILE_NAME'], {}), '(self.workdir, INPUT_FILE_NAME)\n', (17406, 17437), False, 'import os\n'), ((17471, 17515), 'os.path.join', 'os.path.join', (['self.workdir', 'OUTPUT_FILE_NAME'], {}), '(self.workdir, OUTPUT_FILE_NAME)\n', (17483, 17515), False, 'import os\n'), ((17548, 17591), 'os.path.join', 'os.path.join', (['self.workdir', 'FILES_FILE_NAME'], {}), '(self.workdir, FILES_FILE_NAME)\n', (17560, 17591), False, 'import os\n'), ((17622, 17663), 'os.path.join', 'os.path.join', (['self.workdir', 'LOG_FILE_NAME'], {}), '(self.workdir, LOG_FILE_NAME)\n', (17634, 17663), False, 'import os\n'), ((17697, 17741), 'os.path.join', 'os.path.join', (['self.workdir', 'STDERR_FILE_NAME'], {}), '(self.workdir, STDERR_FILE_NAME)\n', (17709, 17741), False, 'import os\n'), ((17848, 17888), 'os.path.join', 'os.path.join', (['self.workdir', 'MPIABORTFILE'], {}), '(self.workdir, MPIABORTFILE)\n', (17860, 17888), False, 'import os\n'), ((17978, 18016), 'os.path.join', 'os.path.join', (['self.workdir', 'INDIR_NAME'], {}), '(self.workdir, INDIR_NAME)\n', (17990, 18016), False, 'import os\n'), ((18050, 18089), 'os.path.join', 'os.path.join', (['self.workdir', 'OUTDIR_NAME'], {}), '(self.workdir, OUTDIR_NAME)\n', (18062, 18089), False, 'import os\n'), ((18123, 18162), 'os.path.join', 'os.path.join', (['self.workdir', 'TMPDIR_NAME'], {}), '(self.workdir, TMPDIR_NAME)\n', (18135, 18162), False, 'import os\n'), ((32810, 32847), 'inspect.getfullargspec', 'inspect.getfullargspec', (['self.__init__'], {}), '(self.__init__)\n', (32832, 32847), False, 'import inspect\n'), ((46432, 46452), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (46447, 46452), False, 'import os\n'), ((52253, 52309), 'os.path.join', 'os.path.join', (['self.restart_info.previous_dir', 'INDIR_NAME'], {}), '(self.restart_info.previous_dir, INDIR_NAME)\n', (52265, 52309), False, 'import os\n'), ((52331, 52354), 'os.listdir', 'os.listdir', (['prev_indata'], {}), '(prev_indata)\n', (52341, 52354), False, 'import os\n'), ((53487, 53507), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (53501, 53507), False, 'import os\n'), ((53791, 53822), 'shutil.copyfile', 'shutil.copyfile', (['out_file', 'dest'], {}), '(out_file, dest)\n', (53806, 53822), False, 'import shutil\n'), ((54514, 54539), 'os.path.basename', 'os.path.basename', (['in_file'], {}), '(in_file)\n', (54530, 54539), False, 'import os\n'), ((54553, 54573), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (54567, 54573), False, 'import os\n'), ((56400, 56417), 'abipy.electrons.gsr.GsrFile', 'GsrFile', (['gsr_path'], {}), '(gsr_path)\n', (56407, 56417), False, 'from abipy.electrons.gsr import GsrFile\n'), ((58784, 58804), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['ext'], {}), '(ext)\n', (58799, 58804), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((60054, 60076), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['"""WFK"""'], {}), "('WFK')\n", (60069, 60076), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((60742, 60762), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (60756, 60762), False, 'import os\n'), ((61046, 61077), 'shutil.copyfile', 'shutil.copyfile', (['out_file', 'dest'], {}), '(out_file, dest)\n', (61061, 61077), False, 'import shutil\n'), ((67165, 67185), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (67179, 67185), False, 'import os\n'), ((67469, 67500), 'shutil.copyfile', 'shutil.copyfile', (['out_file', 'dest'], {}), '(out_file, dest)\n', (67484, 67500), False, 'import shutil\n'), ((69093, 69116), 'abipy.electrons.gw.SigresFile', 'SigresFile', (['sigres_path'], {}), '(sigres_path)\n', (69103, 69116), False, 'from abipy.electrons.gw import SigresFile\n'), ((70534, 70556), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['"""1WF"""'], {}), "('1WF')\n", (70549, 70556), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((72193, 72228), 'os.symlink', 'os.symlink', (['f.path', "(f.path + '_DDK')"], {}), "(f.path, f.path + '_DDK')\n", (72203, 72228), False, 'import os\n'), ((79157, 79195), 'os.path.join', 'os.path.join', (['self.workdir', 'ofile_name'], {}), '(self.workdir, ofile_name)\n', (79169, 79195), False, 'import os\n'), ((79906, 79945), 'os.path.join', 'os.path.join', (['self.workdir', 'OUTDIR_NAME'], {}), '(self.workdir, OUTDIR_NAME)\n', (79918, 79945), False, 'import os\n'), ((80327, 80412), 'abipy.flowtk.wrappers.Mrgddb', 'Mrgddb', ([], {'manager': 'ftm.task_manager', 'executable': 'ftm.fw_policy.mrgddb_cmd', 'verbose': '(0)'}), '(manager=ftm.task_manager, executable=ftm.fw_policy.mrgddb_cmd, verbose=0\n )\n', (80333, 80412), False, 'from abipy.flowtk.wrappers import Mrgddb\n'), ((81321, 81371), 'os.path.join', 'os.path.join', (['self.workdir', 'OUTDIR_NAME', '"""out_DDB"""'], {}), "(self.workdir, OUTDIR_NAME, 'out_DDB')\n", (81333, 81371), False, 'import os\n'), ((84416, 84468), 'fireworks.core.firework.FWAction', 'FWAction', ([], {'stored_data': 'stored_data', 'mod_spec': 'mod_spec'}), '(stored_data=stored_data, mod_spec=mod_spec)\n', (84424, 84468), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((95292, 95303), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (95301, 95303), False, 'import os\n'), ((97492, 97535), 'os.path.join', 'os.path.join', (['self.workdir', 'INPUT_FILE_NAME'], {}), '(self.workdir, INPUT_FILE_NAME)\n', (97504, 97535), False, 'import os\n'), ((97569, 97613), 'os.path.join', 'os.path.join', (['self.workdir', 'OUTPUT_FILE_NAME'], {}), '(self.workdir, OUTPUT_FILE_NAME)\n', (97581, 97613), False, 'import os\n'), ((97646, 97689), 'os.path.join', 'os.path.join', (['self.workdir', 'FILES_FILE_NAME'], {}), '(self.workdir, FILES_FILE_NAME)\n', (97658, 97689), False, 'import os\n'), ((97720, 97761), 'os.path.join', 'os.path.join', (['self.workdir', 'LOG_FILE_NAME'], {}), '(self.workdir, LOG_FILE_NAME)\n', (97732, 97761), False, 'import os\n'), ((97795, 97839), 'os.path.join', 'os.path.join', (['self.workdir', 'STDERR_FILE_NAME'], {}), '(self.workdir, STDERR_FILE_NAME)\n', (97807, 97839), False, 'import os\n'), ((97877, 97928), 'os.path.join', 'os.path.join', (['self.workdir', 'ELPHON_OUTPUT_FILE_NAME'], {}), '(self.workdir, ELPHON_OUTPUT_FILE_NAME)\n', (97889, 97928), False, 'import os\n'), ((97965, 98012), 'os.path.join', 'os.path.join', (['self.workdir', 'DDK_FILES_FILE_NAME'], {}), '(self.workdir, DDK_FILES_FILE_NAME)\n', (97977, 98012), False, 'import os\n'), ((98119, 98159), 'os.path.join', 'os.path.join', (['self.workdir', 'MPIABORTFILE'], {}), '(self.workdir, MPIABORTFILE)\n', (98131, 98159), False, 'import os\n'), ((98295, 98333), 'os.path.join', 'os.path.join', (['self.workdir', 'INDIR_NAME'], {}), '(self.workdir, INDIR_NAME)\n', (98307, 98333), False, 'import os\n'), ((98367, 98406), 'os.path.join', 'os.path.join', (['self.workdir', 'OUTDIR_NAME'], {}), '(self.workdir, OUTDIR_NAME)\n', (98379, 98406), False, 'import os\n'), ((98440, 98479), 'os.path.join', 'os.path.join', (['self.workdir', 'TMPDIR_NAME'], {}), '(self.workdir, TMPDIR_NAME)\n', (98452, 98479), False, 'import os\n'), ((101101, 101127), 'abipy.dfpt.phonons.PhbstFile', 'PhbstFile', (['self.phbst_path'], {}), '(self.phbst_path)\n', (101110, 101127), False, 'from abipy.dfpt.phonons import PhbstFile, PhdosFile\n'), ((101777, 101803), 'abipy.dfpt.phonons.PhdosFile', 'PhdosFile', (['self.phdos_path'], {}), '(self.phdos_path)\n', (101786, 101803), False, 'from abipy.dfpt.phonons import PhbstFile, PhdosFile\n'), ((102471, 102504), 'abipy.dfpt.anaddbnc.AnaddbNcFile', 'AnaddbNcFile', (['self.anaddb_nc_path'], {}), '(self.anaddb_nc_path)\n', (102483, 102504), False, 'from abipy.dfpt.anaddbnc import AnaddbNcFile\n'), ((108930, 109006), 'fireworks.core.firework.Firework', 'Firework', (['task'], {'spec': 'new_spec', 'name': "(formula + '_' + indexed_task_type)[:15]"}), "(task, spec=new_spec, name=(formula + '_' + indexed_task_type)[:15])\n", (108938, 109006), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((109582, 109619), 'abipy.abio.inputs.AbinitInput.from_dict', 'AbinitInput.from_dict', (['previous_input'], {}), '(previous_input)\n', (109603, 109619), False, 'from abipy.abio.inputs import AbinitInput\n'), ((127370, 127414), 'os.path.join', 'os.path.join', (['self.previous_dir', 'OUTDIR_NAME'], {}), '(self.previous_dir, OUTDIR_NAME)\n', (127382, 127414), False, 'import os\n'), ((127578, 127621), 'os.path.join', 'os.path.join', (['self.previous_dir', 'INDIR_NAME'], {}), '(self.previous_dir, INDIR_NAME)\n', (127590, 127621), False, 'import os\n'), ((128548, 128572), 'abipy.flowtk.netcdf.NetcdfReader', 'NetcdfReader', (['ec_nc_file'], {}), '(ec_nc_file)\n', (128560, 128572), False, 'from abipy.flowtk.netcdf import NetcdfReader\n'), ((4560, 4593), 'os.path.join', 'os.path.join', (['autoparal_dir', 'name'], {}), '(autoparal_dir, name)\n', (4572, 4593), False, 'import os\n'), ((4613, 4632), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (4626, 4632), False, 'import os\n'), ((4909, 4956), 'os.path.join', 'os.path.join', (['autoparal_dir', '"""autoparal_backup"""'], {}), "(autoparal_dir, 'autoparal_backup')\n", (4921, 4956), False, 'import os\n'), ((5103, 5136), 'os.path.join', 'os.path.join', (['autoparal_dir', 'name'], {}), '(autoparal_dir, name)\n', (5115, 5136), False, 'import os\n'), ((5163, 5203), 'os.path.join', 'os.path.join', (['autoparal_backup_dir', 'name'], {}), '(autoparal_backup_dir, name)\n', (5175, 5203), False, 'import os\n'), ((7294, 7305), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7303, 7305), False, 'import os\n'), ((8256, 8295), 'logging.getLogger', 'logging.getLogger', (['"""pymatgen.io.abinit"""'], {}), "('pymatgen.io.abinit')\n", (8273, 8295), False, 'import logging\n'), ((8328, 8354), 'logging.getLogger', 'logging.getLogger', (['"""abipy"""'], {}), "('abipy')\n", (8345, 8354), False, 'import logging\n'), ((8387, 8416), 'logging.getLogger', 'logging.getLogger', (['"""abiflows"""'], {}), "('abiflows')\n", (8404, 8416), False, 'import logging\n'), ((9898, 9918), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (9912, 9918), False, 'import os\n'), ((9995, 10024), 'shutil.copyfile', 'shutil.copyfile', (['source', 'dest'], {}), '(source, dest)\n', (10010, 10024), False, 'import shutil\n'), ((10059, 10083), 'os.symlink', 'os.symlink', (['source', 'dest'], {}), '(source, dest)\n', (10069, 10083), False, 'import os\n'), ((13181, 13201), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (13195, 13201), False, 'import os\n'), ((13278, 13307), 'shutil.copyfile', 'shutil.copyfile', (['source', 'dest'], {}), '(source, dest)\n', (13293, 13307), False, 'import shutil\n'), ((13342, 13366), 'os.symlink', 'os.symlink', (['source', 'dest'], {}), '(source, dest)\n', (13352, 13366), False, 'import os\n'), ((19013, 19039), 'glob.glob', 'glob.glob', (["(file_path + '*')"], {}), "(file_path + '*')\n", (19022, 19039), False, 'import glob\n'), ((19299, 19321), 'os.rename', 'os.rename', (['f', 'new_path'], {}), '(f, new_path)\n', (19308, 19321), False, 'import os\n'), ((23353, 23421), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdin': 'stdin', 'stdout': 'stdout', 'stderr': 'stderr'}), '(command, stdin=stdin, stdout=stdout, stderr=stderr)\n', (23369, 23421), False, 'import subprocess\n'), ((38617, 38758), 'subprocess.Popen', 'subprocess.Popen', (['self.ftm.fw_policy.walltime_command'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(self.ftm.fw_policy.walltime_command, shell=True, stdin=\n subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (38633, 38758), False, 'import subprocess\n'), ((42104, 42115), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (42113, 42115), False, 'import os\n'), ((44243, 44310), 'json.dump', 'json.dump', (['self.history', 'f'], {'cls': 'MontyEncoder', 'indent': '(4)', 'sort_keys': '(4)'}), '(self.history, f, cls=MontyEncoder, indent=4, sort_keys=4)\n', (44252, 44310), False, 'import json\n'), ((52490, 52518), 'os.path.join', 'os.path.join', (['prev_indata', 'f'], {}), '(prev_indata, f)\n', (52502, 52518), False, 'import os\n'), ((52538, 52560), 'os.path.islink', 'os.path.islink', (['source'], {}), '(source)\n', (52552, 52560), False, 'import os\n'), ((53370, 53396), 'os.path.basename', 'os.path.basename', (['out_file'], {}), '(out_file)\n', (53386, 53396), False, 'import os\n'), ((53516, 53536), 'os.path.islink', 'os.path.islink', (['dest'], {}), '(dest)\n', (53530, 53536), False, 'import os\n'), ((53967, 53993), 'os.symlink', 'os.symlink', (['out_file', 'dest'], {}), '(out_file, dest)\n', (53977, 53993), False, 'import os\n'), ((54582, 54602), 'os.path.islink', 'os.path.islink', (['dest'], {}), '(dest)\n', (54596, 54602), False, 'import os\n'), ((57402, 57422), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['ext'], {}), '(ext)\n', (57417, 57422), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((60556, 60582), 'os.path.basename', 'os.path.basename', (['out_file'], {}), '(out_file)\n', (60572, 60582), False, 'import os\n'), ((60625, 60650), 'os.path.basename', 'os.path.basename', (['in_file'], {}), '(in_file)\n', (60641, 60650), False, 'import os\n'), ((60771, 60791), 'os.path.islink', 'os.path.islink', (['dest'], {}), '(dest)\n', (60785, 60791), False, 'import os\n'), ((61222, 61248), 'os.symlink', 'os.symlink', (['out_file', 'dest'], {}), '(out_file, dest)\n', (61232, 61248), False, 'import os\n'), ((67194, 67214), 'os.path.islink', 'os.path.islink', (['dest'], {}), '(dest)\n', (67208, 67214), False, 'import os\n'), ((67645, 67671), 'os.symlink', 'os.symlink', (['out_file', 'dest'], {}), '(out_file, dest)\n', (67655, 67671), False, 'import os\n'), ((80014, 80025), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (80023, 80025), False, 'import os\n'), ((83217, 83253), 'shutil.move', 'shutil.move', (['out_ddb', 'out_ddb_backup'], {}), '(out_ddb, out_ddb_backup)\n', (83228, 83253), False, 'import shutil\n'), ((84696, 84763), 'json.dump', 'json.dump', (['self.history', 'f'], {'cls': 'MontyEncoder', 'indent': '(4)', 'sort_keys': '(4)'}), '(self.history, f, cls=MontyEncoder, indent=4, sort_keys=4)\n', (84705, 84763), False, 'import json\n'), ((93341, 93409), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdin': 'stdin', 'stdout': 'stdout', 'stderr': 'stderr'}), '(command, stdin=stdin, stdout=stdout, stderr=stderr)\n', (93357, 93409), False, 'import subprocess\n'), ((94672, 94813), 'subprocess.Popen', 'subprocess.Popen', (['self.ftm.fw_policy.walltime_command'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(self.ftm.fw_policy.walltime_command, shell=True, stdin=\n subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (94688, 94813), False, 'import subprocess\n'), ((97032, 97099), 'json.dump', 'json.dump', (['self.history', 'f'], {'cls': 'MontyEncoder', 'indent': '(4)', 'sort_keys': '(4)'}), '(self.history, f, cls=MontyEncoder, indent=4, sort_keys=4)\n', (97041, 97099), False, 'import json\n'), ((104891, 104911), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (104906, 104911), False, 'import os\n'), ((4654, 4673), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (4667, 4673), False, 'import shutil\n'), ((4716, 4731), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (4725, 4731), False, 'import os\n'), ((4980, 5016), 'os.path.exists', 'os.path.exists', (['autoparal_backup_dir'], {}), '(autoparal_backup_dir)\n', (4994, 5016), False, 'import os\n'), ((5038, 5071), 'os.makedirs', 'os.makedirs', (['autoparal_backup_dir'], {}), '(autoparal_backup_dir)\n', (5049, 5071), False, 'import os\n'), ((5227, 5250), 'os.path.exists', 'os.path.exists', (['newpath'], {}), '(newpath)\n', (5241, 5250), False, 'import os\n'), ((5272, 5306), 'shutil.move', 'shutil.move', (['current_path', 'newpath'], {}), '(current_path, newpath)\n', (5283, 5306), False, 'import shutil\n'), ((10384, 10406), 'os.path.realpath', 'os.path.realpath', (['dest'], {}), '(dest)\n', (10400, 10406), False, 'import os\n'), ((13667, 13689), 'os.path.realpath', 'os.path.realpath', (['dest'], {}), '(dest)\n', (13683, 13689), False, 'import os\n'), ((16123, 16157), 'abipy.flowtk.events.get_event_handler_classes', 'events.get_event_handler_classes', ([], {}), '()\n', (16155, 16157), False, 'from abipy.flowtk import events, tasks\n'), ((18882, 18899), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (18896, 18899), False, 'import os\n'), ((23900, 23911), 'time.time', 'time.time', ([], {}), '()\n', (23909, 23911), False, 'import time\n'), ((34562, 34573), 'time.time', 'time.time', ([], {}), '()\n', (34571, 34573), False, 'import time\n'), ((41477, 41514), 'abipy.abio.inputs.AbinitInput.from_dict', 'AbinitInput.from_dict', (['previous_input'], {}), '(previous_input)\n', (41498, 41514), False, 'from abipy.abio.inputs import AbinitInput\n'), ((52591, 52610), 'os.readlink', 'os.readlink', (['source'], {}), '(source)\n', (52602, 52610), False, 'import os\n'), ((52646, 52687), 'os.path.join', 'os.path.join', (['self.workdir', 'INDIR_NAME', 'f'], {}), '(self.workdir, INDIR_NAME, f)\n', (52658, 52687), False, 'import os\n'), ((63572, 63594), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['"""WFK"""'], {}), "('WFK')\n", (63587, 63594), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((64452, 64475), 'os.path.exists', 'os.path.exists', (['out_den'], {}), '(out_den)\n', (64466, 64475), False, 'import os\n'), ((81449, 81463), 'time.asctime', 'time.asctime', ([], {}), '()\n', (81461, 81463), False, 'import time\n'), ((84110, 84133), 'os.path.isfile', 'os.path.isfile', (['out_ddb'], {}), '(out_ddb)\n', (84124, 84133), False, 'import os\n'), ((87118, 87152), 'abipy.flowtk.events.get_event_handler_classes', 'events.get_event_handler_classes', ([], {}), '()\n', (87150, 87152), False, 'from abipy.flowtk import events, tasks\n'), ((108297, 108347), 'numpy.allclose', 'np.allclose', (["nscf_fw.tasks[0].abiinput['qpt']", 'qpt'], {}), "(nscf_fw.tasks[0].abiinput['qpt'], qpt)\n", (108308, 108347), True, 'import numpy as np\n'), ((2634, 2670), 'inspect.getfullargspec', 'inspect.getfullargspec', (['cls.__init__'], {}), '(cls.__init__)\n', (2656, 2670), False, 'import inspect\n'), ((11350, 11371), 'os.path.basename', 'os.path.basename', (['ddk'], {}), '(ddk)\n', (11366, 11371), False, 'import os\n'), ((23129, 23152), 'abipy.flowtk.qutils.time2slurm', 'time2slurm', (['mytimelimit'], {}), '(mytimelimit)\n', (23139, 23152), False, 'from abipy.flowtk.qutils import time2slurm\n'), ((23696, 23707), 'time.time', 'time.time', ([], {}), '()\n', (23705, 23707), False, 'import time\n'), ((29819, 29896), 'fireworks.core.firework.FWAction', 'FWAction', ([], {'stored_data': 'stored_data', 'update_spec': 'update_spec', 'mod_spec': 'mod_spec'}), '(stored_data=stored_data, update_spec=update_spec, mod_spec=mod_spec)\n', (29827, 29896), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((30719, 30772), 'fireworks.core.firework.FWAction', 'FWAction', ([], {'detours': 'restart_fw', 'stored_data': 'stored_data'}), '(detours=restart_fw, stored_data=stored_data)\n', (30727, 30772), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((54091, 54106), 'os.remove', 'os.remove', (['dest'], {}), '(dest)\n', (54100, 54106), False, 'import os\n'), ((54127, 54153), 'os.symlink', 'os.symlink', (['out_file', 'dest'], {}), '(out_file, dest)\n', (54137, 54153), False, 'import os\n'), ((55095, 55113), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['e'], {}), '(e)\n', (55110, 55113), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((61346, 61361), 'os.remove', 'os.remove', (['dest'], {}), '(dest)\n', (61355, 61361), False, 'import os\n'), ((61382, 61408), 'os.symlink', 'os.symlink', (['out_file', 'dest'], {}), '(out_file, dest)\n', (61392, 61408), False, 'import os\n'), ((64511, 64533), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['"""DEN"""'], {}), "('DEN')\n", (64526, 64533), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((65309, 65331), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['"""DEN"""'], {}), "('DEN')\n", (65324, 65331), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((67769, 67784), 'os.remove', 'os.remove', (['dest'], {}), '(dest)\n', (67778, 67784), False, 'import os\n'), ((67805, 67831), 'os.symlink', 'os.symlink', (['out_file', 'dest'], {}), '(out_file, dest)\n', (67815, 67831), False, 'import os\n'), ((77759, 77794), 'os.path.join', 'os.path.join', (["t['dir']", 'OUTDIR_NAME'], {}), "(t['dir'], OUTDIR_NAME)\n", (77771, 77794), False, 'import os\n'), ((93819, 93830), 'time.time', 'time.time', ([], {}), '()\n', (93828, 93830), False, 'import time\n'), ((107701, 107721), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (107716, 107721), False, 'import os\n'), ((12290, 12322), 'os.path.split', 'os.path.split', (['self.prefix.odata'], {}), '(self.prefix.odata)\n', (12303, 12322), False, 'import os\n'), ((27935, 27988), 'fireworks.core.firework.FWAction', 'FWAction', ([], {'detours': 'restart_fw', 'stored_data': 'stored_data'}), '(detours=restart_fw, stored_data=stored_data)\n', (27943, 27988), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n'), ((43728, 43765), 'os.path.isfile', 'os.path.isfile', (['self.output_file.path'], {}), '(self.output_file.path)\n', (43742, 43765), False, 'import os\n'), ((48681, 48699), 'abipy.flowtk.utils.irdvars_for_ext', 'irdvars_for_ext', (['d'], {}), '(d)\n', (48696, 48699), False, 'from abipy.flowtk.utils import irdvars_for_ext\n'), ((29290, 29343), 'fireworks.core.firework.FWAction', 'FWAction', ([], {'detours': 'restart_fw', 'stored_data': 'stored_data'}), '(detours=restart_fw, stored_data=stored_data)\n', (29298, 29343), False, 'from fireworks.core.firework import Firework, FireTaskBase, FWAction, Workflow\n')] |
import numpy as np
from scipy.optimize import fmin
def coexistence(lnpi, N):
"""Locate the coexistence acticity near the critical point by
maximizing compressibility.
Args:
lnpi: The original log of probability distribution.
N: particle number distribution.
Returns:
A newlog of probability distribution at the coexistence
point.
"""
def compress(ratio):
"""Return the compressibility kai"""
lnpi_new = lnpi + ratio*N
prob = np.exp(lnpi_new)/np.sum(np.exp(lnpi_new))
kai = np.dot(N*N,prob)-np.dot(N,prob)**2
return kai
solution = fmin(lambda x: -compress(x), x0=0)
lnpi += solution[0] * N
return lnpi
def finite_scaling(frac,path,T,T0,H,d):
"""Calculate the first order cumulant M = <m^2>/<|m|>^2, m = eta_c -
<eta_c>
Args:
frac : The activity at simulation condition.
path: The path where simulation data is stored.
T: The list of temperatures to reweight to.
T0: The simulation temperature.
H: Cubic Box length(3D).
d : Dimension
Retruns:
M: The second order cumulant ratio, <m^2>/<|m|>^2
U: The fourth order cumulant ratio, <m^4>/<m^2>^2
"""
N,nlnpi = np.loadtxt(path + '/lnpi_op.dat', usecols=(0,1), unpack=True)
nlnpi = np.log(np.exp(nlnpi)/np.sum(np.exp(nlnpi)))
elim = np.loadtxt(path + '/elim.dat')[:,1:4]
ehist = np.loadtxt(path + '/ehist.dat')[:,1:]
"""Histogram Reweighting and M calculation"""
# Set constants and parameters
sigma = 4.0
kb = 1.38e-23
m = np.zeros(len(T))
m_2=np.zeros(len(T))
m_4=np.zeros(len(T))
if d == 3:
rho = np.pi/6*sigma**3*N/H**3
elif d == 2:
rho = np.pi*sigma**2*N/H**2
for i in range(len(T)):
nlnpi_new = np.zeros(len(N))
#Reweight and calculate the new pi(N)[j] at each N[j]
for j in range(len(N)):
num,e_st,e_en = elim[j,:]
emicro = np.linspace(e_st,e_en,num)
eh = ehist[:num,j]
elnpi = np.log(eh/np.sum(eh))
elnpi_new = elnpi + emicro*(1.0/kb/T0-1.0/kb/T[i])
eprob_new = np.exp(elnpi_new)/np.sum(np.exp(elnpi_new))
lnpi_new = (elnpi_new + nlnpi[j]
+ (1.0/kb/T[i]-1.0/kb/T0)*frac[0]/(1.0/kb/T0)*N[j])
nlnpi_new[j] = np.log(np.sum(np.exp(lnpi_new)))
#Reweight new lnpi(N) to saturated acticity
nlnpi_new = coexistence(nlnpi_new, N)
prob = np.exp(nlnpi_new)/np.sum(np.exp(nlnpi_new))
rho_av = np.dot(rho,prob)
m[i] = np.dot(np.abs(rho-rho_av),prob)
m_2[i] = np.dot((rho-rho_av)**2,prob)
m_4[i] = np.dot((rho-rho_av)**4,prob)
M = m_2/m**2
U = m_4/m_2**2
return M, U
| [
"numpy.abs",
"numpy.sum",
"numpy.exp",
"numpy.loadtxt",
"numpy.linspace",
"numpy.dot"
] | [((1378, 1440), 'numpy.loadtxt', 'np.loadtxt', (["(path + '/lnpi_op.dat')"], {'usecols': '(0, 1)', 'unpack': '(True)'}), "(path + '/lnpi_op.dat', usecols=(0, 1), unpack=True)\n", (1388, 1440), True, 'import numpy as np\n'), ((1510, 1540), 'numpy.loadtxt', 'np.loadtxt', (["(path + '/elim.dat')"], {}), "(path + '/elim.dat')\n", (1520, 1540), True, 'import numpy as np\n'), ((1562, 1593), 'numpy.loadtxt', 'np.loadtxt', (["(path + '/ehist.dat')"], {}), "(path + '/ehist.dat')\n", (1572, 1593), True, 'import numpy as np\n'), ((2739, 2756), 'numpy.dot', 'np.dot', (['rho', 'prob'], {}), '(rho, prob)\n', (2745, 2756), True, 'import numpy as np\n'), ((2822, 2855), 'numpy.dot', 'np.dot', (['((rho - rho_av) ** 2)', 'prob'], {}), '((rho - rho_av) ** 2, prob)\n', (2828, 2855), True, 'import numpy as np\n'), ((2869, 2902), 'numpy.dot', 'np.dot', (['((rho - rho_av) ** 4)', 'prob'], {}), '((rho - rho_av) ** 4, prob)\n', (2875, 2902), True, 'import numpy as np\n'), ((582, 598), 'numpy.exp', 'np.exp', (['lnpi_new'], {}), '(lnpi_new)\n', (588, 598), True, 'import numpy as np\n'), ((643, 662), 'numpy.dot', 'np.dot', (['(N * N)', 'prob'], {}), '(N * N, prob)\n', (649, 662), True, 'import numpy as np\n'), ((1461, 1474), 'numpy.exp', 'np.exp', (['nlnpi'], {}), '(nlnpi)\n', (1467, 1474), True, 'import numpy as np\n'), ((2140, 2168), 'numpy.linspace', 'np.linspace', (['e_st', 'e_en', 'num'], {}), '(e_st, e_en, num)\n', (2151, 2168), True, 'import numpy as np\n'), ((2677, 2694), 'numpy.exp', 'np.exp', (['nlnpi_new'], {}), '(nlnpi_new)\n', (2683, 2694), True, 'import numpy as np\n'), ((2779, 2799), 'numpy.abs', 'np.abs', (['(rho - rho_av)'], {}), '(rho - rho_av)\n', (2785, 2799), True, 'import numpy as np\n'), ((606, 622), 'numpy.exp', 'np.exp', (['lnpi_new'], {}), '(lnpi_new)\n', (612, 622), True, 'import numpy as np\n'), ((660, 675), 'numpy.dot', 'np.dot', (['N', 'prob'], {}), '(N, prob)\n', (666, 675), True, 'import numpy as np\n'), ((1482, 1495), 'numpy.exp', 'np.exp', (['nlnpi'], {}), '(nlnpi)\n', (1488, 1495), True, 'import numpy as np\n'), ((2331, 2348), 'numpy.exp', 'np.exp', (['elnpi_new'], {}), '(elnpi_new)\n', (2337, 2348), True, 'import numpy as np\n'), ((2702, 2719), 'numpy.exp', 'np.exp', (['nlnpi_new'], {}), '(nlnpi_new)\n', (2708, 2719), True, 'import numpy as np\n'), ((2230, 2240), 'numpy.sum', 'np.sum', (['eh'], {}), '(eh)\n', (2236, 2240), True, 'import numpy as np\n'), ((2356, 2373), 'numpy.exp', 'np.exp', (['elnpi_new'], {}), '(elnpi_new)\n', (2362, 2373), True, 'import numpy as np\n'), ((2540, 2556), 'numpy.exp', 'np.exp', (['lnpi_new'], {}), '(lnpi_new)\n', (2546, 2556), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import numpy as np
import unittest
from matplotlib import pyplot
pyplot.switch_backend('template')
from ... import Graph
from .. import mountain_car as mcar
class TestMountainCar(unittest.TestCase):
def test_traj_sampling(self):
traj, traces = mcar.mountain_car_trajectories(3)
self.assertEqual(len(traces), 3)
self.assertEqual(len(traj), 3)
self.assertEqual(traj[0].shape[1], 2)
self.assertEqual(traj[1].shape[1], 2)
self.assertEqual(traj[2].shape[1], 2)
def test_basis_plotting(self):
pts = np.random.random((5, 2))
G = Graph.from_adj_matrix(np.random.random((5,5)))
mcar.plot_mcar_basis(G, pts)
if __name__ == '__main__':
unittest.main()
| [
"matplotlib.pyplot.switch_backend",
"unittest.main",
"numpy.random.random"
] | [((104, 137), 'matplotlib.pyplot.switch_backend', 'pyplot.switch_backend', (['"""template"""'], {}), "('template')\n", (125, 137), False, 'from matplotlib import pyplot\n'), ((713, 728), 'unittest.main', 'unittest.main', ([], {}), '()\n', (726, 728), False, 'import unittest\n'), ((569, 593), 'numpy.random.random', 'np.random.random', (['(5, 2)'], {}), '((5, 2))\n', (585, 593), True, 'import numpy as np\n'), ((624, 648), 'numpy.random.random', 'np.random.random', (['(5, 5)'], {}), '((5, 5))\n', (640, 648), True, 'import numpy as np\n')] |
import numpy as np
from skimage.transform import downscale_local_mean, rescale
from fibercnn.modeling.spline import calculate_length, interpolation, to_mask
def _calculate_point_distances(As, Bs):
return np.sqrt(np.sum((As - Bs) ** 2, axis=1))
def _calculate_segment_lengths(keypoints):
lengths = _calculate_point_distances(keypoints[:-1, :], keypoints[1:, :])
return lengths
def _calculate_intersection_over_union(keypoints, fiber_width, mask, downsampling_factor=1):
if downsampling_factor != 1:
keypoints /= downsampling_factor
fiber_width /= downsampling_factor
mask = downscale_local_mean(mask, (downsampling_factor, downsampling_factor))
image_size = mask.shape
try:
spline_mask = to_mask(image_size, keypoints, fiber_width)
except TypeError:
return 0
spline_mask = spline_mask.astype("bool")
mask = mask.astype("bool")
try:
mask, spline_mask = _crop_masks(mask, spline_mask)
except IndexError:
return 0
intersection_over_union = np.sum(np.logical_and(mask, spline_mask)) / np.sum(
np.logical_or(mask, spline_mask)
)
return intersection_over_union
def _crop_masks(mask1, mask2):
rmin1, rmax1, cmin1, cmax1 = _get_mask_bounding_box(mask1)
rmin2, rmax2, cmin2, cmax2 = _get_mask_bounding_box(mask2)
rmin = min([rmin1, rmin2])
rmax = max([rmax1, rmax2])
cmin = min([cmin1, cmin2])
cmax = max([cmax1, cmax2])
mask1 = mask1[rmin:rmax, cmin:cmax]
mask2 = mask2[rmin:rmax, cmin:cmax]
return mask1, mask2
def _get_mask_bounding_box(mask):
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return rmin, rmax, cmin, cmax
def prune_keypoints(
keypoints,
fiber_width,
mask,
target_fiber_length,
length_deviation_min=0.25,
length_deviation_max=float("inf"),
):
num_keypoints = len(keypoints)
iou = _calculate_intersection_over_union(keypoints, fiber_width, mask)
length_deviation = calculate_spline_length_deviation(keypoints, target_fiber_length)
is_precise_enough = length_deviation < length_deviation_min
is_too_messed_up = length_deviation > length_deviation_max
is_out_of_options = False
while not (is_out_of_options or is_precise_enough or is_too_messed_up):
segment_lengths = _calculate_segment_lengths(keypoints)
segment_testing_order = np.flip(np.argsort(segment_lengths), axis=0)
is_out_of_options = True # assumption
for segment_id in segment_testing_order: # test the assumption
potential_new_ious, potential_new_keypoint_sets = _try_keypoint_pruning_for_segment(
segment_id, keypoints, fiber_width, mask
)
if np.any(potential_new_ious >= iou):
better_cadidate_id = np.argmax(potential_new_ious)
potential_new_keypoints = potential_new_keypoint_sets[better_cadidate_id]
potential_new_iou = potential_new_ious[better_cadidate_id]
potential_new_length_deviation = calculate_spline_length_deviation(
potential_new_keypoints, target_fiber_length
)
if potential_new_length_deviation <= length_deviation:
keypoints = potential_new_keypoints
iou = potential_new_iou
length_deviation = potential_new_length_deviation
is_precise_enough = length_deviation < length_deviation_min
is_out_of_options = False
break
# Restore the number of keypoints.
if len(keypoints) != num_keypoints:
keypoints = interpolation(keypoints, num_interpolation_steps=num_keypoints)
return keypoints
def _try_keypoint_pruning_for_segment(segment_id, keypoints, fiber_width, mask):
candidate_keypoint_ids = [segment_id, segment_id + 1]
potential_new_ious = []
potential_new_keypoint_sets = []
for candidate_keypoint_id in candidate_keypoint_ids:
potential_new_keypoints = np.delete(keypoints, candidate_keypoint_id, axis=0)
potential_new_iou = _calculate_intersection_over_union(
potential_new_keypoints, fiber_width, mask
)
potential_new_ious.append(potential_new_iou)
potential_new_keypoint_sets.append(potential_new_keypoints)
potential_new_ious = np.array(potential_new_ious)
return potential_new_ious, potential_new_keypoint_sets
def calculate_spline_length_deviation(keypoints, target_spline_length):
actual_spline_length = calculate_length(keypoints)
spline_length_deviation = abs(1 - actual_spline_length / target_spline_length)
return spline_length_deviation
| [
"fibercnn.modeling.spline.to_mask",
"numpy.sum",
"numpy.logical_and",
"numpy.argmax",
"fibercnn.modeling.spline.calculate_length",
"numpy.any",
"skimage.transform.downscale_local_mean",
"numpy.argsort",
"numpy.where",
"numpy.array",
"numpy.logical_or",
"fibercnn.modeling.spline.interpolation",... | [((1623, 1643), 'numpy.any', 'np.any', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (1629, 1643), True, 'import numpy as np\n'), ((1655, 1675), 'numpy.any', 'np.any', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (1661, 1675), True, 'import numpy as np\n'), ((4487, 4515), 'numpy.array', 'np.array', (['potential_new_ious'], {}), '(potential_new_ious)\n', (4495, 4515), True, 'import numpy as np\n'), ((4676, 4703), 'fibercnn.modeling.spline.calculate_length', 'calculate_length', (['keypoints'], {}), '(keypoints)\n', (4692, 4703), False, 'from fibercnn.modeling.spline import calculate_length, interpolation, to_mask\n'), ((219, 249), 'numpy.sum', 'np.sum', (['((As - Bs) ** 2)'], {'axis': '(1)'}), '((As - Bs) ** 2, axis=1)\n', (225, 249), True, 'import numpy as np\n'), ((620, 690), 'skimage.transform.downscale_local_mean', 'downscale_local_mean', (['mask', '(downsampling_factor, downsampling_factor)'], {}), '(mask, (downsampling_factor, downsampling_factor))\n', (640, 690), False, 'from skimage.transform import downscale_local_mean, rescale\n'), ((752, 795), 'fibercnn.modeling.spline.to_mask', 'to_mask', (['image_size', 'keypoints', 'fiber_width'], {}), '(image_size, keypoints, fiber_width)\n', (759, 795), False, 'from fibercnn.modeling.spline import calculate_length, interpolation, to_mask\n'), ((3774, 3837), 'fibercnn.modeling.spline.interpolation', 'interpolation', (['keypoints'], {'num_interpolation_steps': 'num_keypoints'}), '(keypoints, num_interpolation_steps=num_keypoints)\n', (3787, 3837), False, 'from fibercnn.modeling.spline import calculate_length, interpolation, to_mask\n'), ((4158, 4209), 'numpy.delete', 'np.delete', (['keypoints', 'candidate_keypoint_id'], {'axis': '(0)'}), '(keypoints, candidate_keypoint_id, axis=0)\n', (4167, 4209), True, 'import numpy as np\n'), ((1059, 1092), 'numpy.logical_and', 'np.logical_and', (['mask', 'spline_mask'], {}), '(mask, spline_mask)\n', (1073, 1092), True, 'import numpy as np\n'), ((1112, 1144), 'numpy.logical_or', 'np.logical_or', (['mask', 'spline_mask'], {}), '(mask, spline_mask)\n', (1125, 1144), True, 'import numpy as np\n'), ((1693, 1707), 'numpy.where', 'np.where', (['rows'], {}), '(rows)\n', (1701, 1707), True, 'import numpy as np\n'), ((1737, 1751), 'numpy.where', 'np.where', (['cols'], {}), '(cols)\n', (1745, 1751), True, 'import numpy as np\n'), ((2502, 2529), 'numpy.argsort', 'np.argsort', (['segment_lengths'], {}), '(segment_lengths)\n', (2512, 2529), True, 'import numpy as np\n'), ((2844, 2877), 'numpy.any', 'np.any', (['(potential_new_ious >= iou)'], {}), '(potential_new_ious >= iou)\n', (2850, 2877), True, 'import numpy as np\n'), ((2916, 2945), 'numpy.argmax', 'np.argmax', (['potential_new_ious'], {}), '(potential_new_ious)\n', (2925, 2945), True, 'import numpy as np\n')] |
import numpy as np
import datetime
from dateutil.relativedelta import relativedelta
def add_scenarios(df):
assert "co2_kt_total" in df.columns
assert "trend_const_kt" in df.columns
assert "trend_lin_kt" in df.columns
df["scenario_trendlin_kt"] = df["co2_kt_total"].fillna(df["trend_lin_kt"])
df["scenario_trendconst_kt"] = df["co2_kt_total"].fillna(df["trend_const_kt"])
df["scenario_target30_kt"] = df["co2_kt_total"].fillna(df["target30_kt"]).fillna(0)
df["scenario_target50_kt"] = df["co2_kt_total"].fillna(df["target50_kt"]).fillna(0)
df["scenario_target30_new_kt"] = (
df["co2_kt_total"].fillna(df["target30_new_kt"]).fillna(0)
)
df["scenario_target50_new_kt"] = (
df["co2_kt_total"].fillna(df["target50_new_kt"]).fillna(0)
)
return df
def when_scenario_0(df, scenario_name):
s = df[scenario_name]
assert not s.isnull().any()
first_y_when_data_is_0 = s.index[s == 0].min()
if np.isnan(first_y_when_data_is_0):
first_y_when_data_is_0 = np.inf
return first_y_when_data_is_0
def cumulated_emissions(df, scenario_name, from_y=None, to_y=None):
s = df[scenario_name]
assert not s.isnull().any()
if from_y is not None:
s = s[s.index >= from_y]
if to_y is not None:
s = s[s.index <= to_y]
sum = s.sum()
if when_scenario_0(df, scenario_name) == np.inf:
sum = np.inf
return sum
def cumulated_emissions_this_second(df, scenario_name, from_y):
s = df[scenario_name]
assert not s.isnull().any()
if from_y is not None:
s = s[s.index >= from_y]
now = datetime.datetime.now()
current_year = now.year
first_sec_of_current_year_str = f"{current_year}-01-01 00:00:00"
first_sec_of_current_year = datetime.datetime.strptime(
first_sec_of_current_year_str, "%Y-%m-%d %H:%M:%S"
)
seconds_this_year_so_far = (now - first_sec_of_current_year).total_seconds()
this_year_complete_fraction = seconds_this_year_so_far / (60 * 60 * 24 * 365.2)
cumulated_emissions_past_years = s[s.index < current_year].sum()
emissions_this_year = s[current_year]
emissions_this_year_so_far = emissions_this_year * this_year_complete_fraction
cumulated_emissions_so_far = (
cumulated_emissions_past_years + emissions_this_year_so_far
)
return cumulated_emissions_so_far
def when_budget_is_spend(df, scenario_name, budget_kt, from_y):
s = df[scenario_name]
assert not s.isnull().any()
s = s[s.index >= from_y]
first_year_the_budget_is_depleted = s.index[s.cumsum() > budget_kt].min()
return first_year_the_budget_is_depleted
| [
"datetime.datetime.strptime",
"datetime.datetime.now",
"numpy.isnan"
] | [((970, 1002), 'numpy.isnan', 'np.isnan', (['first_y_when_data_is_0'], {}), '(first_y_when_data_is_0)\n', (978, 1002), True, 'import numpy as np\n'), ((1631, 1654), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1652, 1654), False, 'import datetime\n'), ((1785, 1863), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['first_sec_of_current_year_str', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(first_sec_of_current_year_str, '%Y-%m-%d %H:%M:%S')\n", (1811, 1863), False, 'import datetime\n')] |
import pandas as pd
import os
import numpy as np
import csv
import re
from kmeans import kmeans, avg_iou
csv_path = r'G:\Deep_Learning\kaggle\global-wheat-detection\dataset\train.csv'
CLUSTERS = 9
df = pd.read_csv(csv_path)
def process_bbox(df):
ids = []
values = []
imd = np.unique(df['image_id'])
df['bbox'] = df['bbox'].apply(lambda x: eval(x))
# for image_id in os.listdir(train_dir):
# image_id = image_id.split('.')[0]
# if image_id not in imd :
# ids.append(image_id)
# values.append(str([-1,-1,-1,-1]))
# new_df = {'image_id':ids, 'bbox':values}
# new_df = pd.DataFrame(new_df)
df = df[['image_id','bbox']]
# df.append(new_df)
df = df.reset_index(drop=True)
df['x'] = df['bbox'].apply(lambda x: x[0])
df['y'] = df['bbox'].apply(lambda x: x[1])
df['w'] = df['bbox'].apply(lambda x: x[2])
df['h'] = df['bbox'].apply(lambda x: x[3])
df.drop(columns=['bbox'],inplace=True)
return df
df_ = process_bbox(df)
df_idx = df_.set_index("image_id")
bbox = df_idx.reset_index(drop=True)
bbox_arr = bbox.to_numpy()
w_h = bbox_arr[:,(2,3)]
# anchors = []
# for b in w_h[:5]:
# anchors.append(b)
data = np.array(w_h)
out = kmeans(data, k=CLUSTERS)
print("Accuracy: {:.2f}%".format(avg_iou(data, out) * 100))
print("Boxes:\n {}".format(out))
ratios = np.around(out[:, 0] / out[:, 1], decimals=2).tolist()
print("Ratios:\n {}".format(sorted(ratios))) | [
"kmeans.kmeans",
"pandas.read_csv",
"kmeans.avg_iou",
"numpy.around",
"numpy.array",
"numpy.unique"
] | [((214, 235), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (225, 235), True, 'import pandas as pd\n'), ((1251, 1264), 'numpy.array', 'np.array', (['w_h'], {}), '(w_h)\n', (1259, 1264), True, 'import numpy as np\n'), ((1272, 1296), 'kmeans.kmeans', 'kmeans', (['data'], {'k': 'CLUSTERS'}), '(data, k=CLUSTERS)\n', (1278, 1296), False, 'from kmeans import kmeans, avg_iou\n'), ((301, 326), 'numpy.unique', 'np.unique', (["df['image_id']"], {}), "(df['image_id'])\n", (310, 326), True, 'import numpy as np\n'), ((1404, 1448), 'numpy.around', 'np.around', (['(out[:, 0] / out[:, 1])'], {'decimals': '(2)'}), '(out[:, 0] / out[:, 1], decimals=2)\n', (1413, 1448), True, 'import numpy as np\n'), ((1331, 1349), 'kmeans.avg_iou', 'avg_iou', (['data', 'out'], {}), '(data, out)\n', (1338, 1349), False, 'from kmeans import kmeans, avg_iou\n')] |
import numpy as np
import torch
from network.vggish import Postprocessor, VGGish
def tensor_mapper(pre_trained: np.array) -> torch.Tensor:
"""
Transpose the tensor depending on whether it is and FC or CN layer to match dimensions with
Pytorch implementation.
"""
if len(pre_trained.shape) == 4:
tensor = torch.from_numpy(pre_trained.transpose(3, 2, 0, 1)).float()
else:
tensor = torch.from_numpy(pre_trained.T).float()
return tensor
def set_layer(name, pre_trained, model: VGGish) -> None:
"""
Utility function that sets the models layer with the pre-trained (Tensorflow) weights.
"""
# Get the name corresponding to the networks layers.
module_name = name.rsplit('/', 1)[0]
if 'conv' in module_name:
module: torch.nn.Module = model.features._modules[module_name]
elif 'fc' in module_name:
module = model.embedding._modules[module_name]
else:
raise Exception(f"{name} Unexpected name, please try again!")
tensor = tensor_mapper(pre_trained)
print(f"{name}\t Pytorch shape: {module.bias.shape}\t Tensorflow shape: {pre_trained.T.shape} (transposed)")
if 'bias' in name:
module.bias = torch.nn.Parameter(tensor, requires_grad=False).float()
else:
module.weight = torch.nn.Parameter(tensor, requires_grad=False).float()
def numpy_to_post_process(pca_matrix, pca_means, model: Postprocessor) -> None:
"""
# Note that we can 'pre-compute' the effect of subtracting the means, as a layer effectively
# implements a linear system of equations (i.e. a matrix multiplication). (More precisely,
# the commutativity property of linear systems allows for this trick)
# As such, this is equivalent to the code provided by Tensorflow
# Our code
self.layer(embeddings_batch)
# Tensorflow
pca_applied = np.dot(self._pca_matrix,
(embeddings_batch.T - self._pca_means)).T
"""
model.layer.weight = torch.nn.Parameter(torch.from_numpy(pca_matrix).float(), requires_grad=False)
model.layer.bias = torch.nn.Parameter(-torch.from_numpy(np.matmul(pca_matrix, pca_means).T).float(),
requires_grad=False)
| [
"torch.nn.Parameter",
"numpy.matmul",
"torch.from_numpy"
] | [((422, 453), 'torch.from_numpy', 'torch.from_numpy', (['pre_trained.T'], {}), '(pre_trained.T)\n', (438, 453), False, 'import torch\n'), ((1209, 1256), 'torch.nn.Parameter', 'torch.nn.Parameter', (['tensor'], {'requires_grad': '(False)'}), '(tensor, requires_grad=False)\n', (1227, 1256), False, 'import torch\n'), ((1299, 1346), 'torch.nn.Parameter', 'torch.nn.Parameter', (['tensor'], {'requires_grad': '(False)'}), '(tensor, requires_grad=False)\n', (1317, 1346), False, 'import torch\n'), ((2004, 2032), 'torch.from_numpy', 'torch.from_numpy', (['pca_matrix'], {}), '(pca_matrix)\n', (2020, 2032), False, 'import torch\n'), ((2123, 2155), 'numpy.matmul', 'np.matmul', (['pca_matrix', 'pca_means'], {}), '(pca_matrix, pca_means)\n', (2132, 2155), True, 'import numpy as np\n')] |
from collections import namedtuple
from io import BytesIO
import math
import pkgutil
from typing import Tuple
from PIL import Image, ImageOps, ImageEnhance
from cv2.data import haarcascades
import cv2
import os
import numpy
__all__ = ('Colour', 'ColourTuple', 'DefaultColours', 'deepfry')
Colour = Tuple[int, int, int]
ColourTuple = Tuple[Colour, Colour]
class DefaultColours:
"""Default colours provided for deepfrying"""
red = ((254, 0, 2), (255, 255, 15))
blue = ((36, 113, 229), (255,) * 3)
face_cascade = cv2.CascadeClassifier(os.getcwd() + '/slacky/plugins/custom/deepfry/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(os.getcwd() + '/slacky/plugins/custom/deepfry/haarcascade_eye.xml')
flare_img = Image.open(BytesIO(pkgutil.get_data(__package__, 'flare.png')))
FlarePosition = namedtuple('FlarePosition', ['x', 'y', 'size'])
def deepfryy(img=None, colours= DefaultColours.red, flares= True):
"""
Deepfry a given image.
Parameters
----------
img : `Image`
Image to manipulate.
colours : `ColourTuple`, optional
A tuple of the colours to apply on the image.
flares : `bool`, optional
Whether or not to try and detect faces for applying lens flares.
Returns
-------
`Image`
Deepfried image.
"""
img = img.copy().convert('RGB')
flare_positions = []
if flares:
opencv_img = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2GRAY)
faces = face_cascade.detectMultiScale(
opencv_img,
scaleFactor=1.3,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
face_roi = opencv_img[y:y+h, x:x+w] # Get region of interest (detected face)
eyes = eye_cascade.detectMultiScale(face_roi)
for (ex, ey, ew, eh) in eyes:
eye_corner = (ex + ew / 2, ey + eh / 2)
flare_size = eh if eh > ew else ew
flare_size *= 4
corners = [math.floor(x) for x in eye_corner]
eye_corner = FlarePosition(*corners, flare_size)
flare_positions.append(eye_corner)
# Crush image to hell and back
img = img.convert('RGB')
width, height = img.width, img.height
img = img.resize((int(width ** .75), int(height ** .75)), resample=Image.LANCZOS)
img = img.resize((int(width ** .88), int(height ** .88)), resample=Image.BILINEAR)
img = img.resize((int(width ** .9), int(height ** .9)), resample=Image.BICUBIC)
img = img.resize((width, height), resample=Image.BICUBIC)
img = ImageOps.posterize(img, 4)
# Generate colour overlay
r = img.split()[0]
r = ImageEnhance.Contrast(r).enhance(2.0)
r = ImageEnhance.Brightness(r).enhance(1.5)
r = ImageOps.colorize(r, colours[0], colours[1])
# Overlay red and yellow onto main image and sharpen the hell out of it
img = Image.blend(img, r, 0.75)
img = ImageEnhance.Sharpness(img).enhance(100.0)
# Apply flares on any detected eyes
for flare in flare_positions:
flare_transformed = flare_img.copy().resize((flare.size,) * 2, resample=Image.BILINEAR)
img.paste(flare_transformed, (flare.x, flare.y), flare_transformed)
return img | [
"pkgutil.get_data",
"PIL.ImageEnhance.Brightness",
"os.getcwd",
"PIL.ImageEnhance.Contrast",
"math.floor",
"PIL.ImageEnhance.Sharpness",
"PIL.ImageOps.colorize",
"collections.namedtuple",
"numpy.array",
"PIL.Image.blend",
"PIL.ImageOps.posterize"
] | [((831, 878), 'collections.namedtuple', 'namedtuple', (['"""FlarePosition"""', "['x', 'y', 'size']"], {}), "('FlarePosition', ['x', 'y', 'size'])\n", (841, 878), False, 'from collections import namedtuple\n'), ((2668, 2694), 'PIL.ImageOps.posterize', 'ImageOps.posterize', (['img', '(4)'], {}), '(img, 4)\n', (2686, 2694), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2852, 2896), 'PIL.ImageOps.colorize', 'ImageOps.colorize', (['r', 'colours[0]', 'colours[1]'], {}), '(r, colours[0], colours[1])\n', (2869, 2896), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2984, 3009), 'PIL.Image.blend', 'Image.blend', (['img', 'r', '(0.75)'], {}), '(img, r, 0.75)\n', (2995, 3009), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((550, 561), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (559, 561), False, 'import os\n'), ((670, 681), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (679, 681), False, 'import os\n'), ((769, 811), 'pkgutil.get_data', 'pkgutil.get_data', (['__package__', '"""flare.png"""'], {}), "(__package__, 'flare.png')\n", (785, 811), False, 'import pkgutil\n'), ((1437, 1453), 'numpy.array', 'numpy.array', (['img'], {}), '(img)\n', (1448, 1453), False, 'import numpy\n'), ((2757, 2781), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['r'], {}), '(r)\n', (2778, 2781), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2803, 2829), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['r'], {}), '(r)\n', (2826, 2829), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((3020, 3047), 'PIL.ImageEnhance.Sharpness', 'ImageEnhance.Sharpness', (['img'], {}), '(img)\n', (3042, 3047), False, 'from PIL import Image, ImageOps, ImageEnhance\n'), ((2080, 2093), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (2090, 2093), False, 'import math\n')] |
import logging
import numpy as np
import torch
from torch import nn
from algorithm.nets import PolicyNet
from algorithm.policies import Policy
logger = logging.getLogger(__name__)
class ClfPolicy(Policy):
def rollout(self, placeholder, data, config):
assert self.policy_net is not None, 'Set model first!'
assert isinstance(self.policy_net, PolicyNet), '{}'.format(type(self.policy_net))
torch.set_grad_enabled(False)
device = torch.device('cuda:0' if torch.cuda.is_available() and config.cuda else 'cpu')
inputs, labels = data
placeholder.data.resize_(inputs.shape).copy_(inputs)
placeholder.to(device)
labels.to(device)
self.policy_net.to(device)
# virtual batch norm
if self.vbn:
self.policy_net.train()
self.policy_net(torch.empty_like(self.ref_batch).copy_(self.ref_batch))
self.policy_net.eval()
outputs = self.policy_net(placeholder)
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
# print(loss) --> tensor(2.877)
result = -float(loss.detach().item())
del inputs, labels, outputs, loss, criterion
return result
def accuracy_on(self, dataloader, config, directory) -> float:
assert self.policy_net is not None, 'Set model first!'
assert isinstance(self.policy_net, PolicyNet), '{}'.format(type(self.policy_net))
torch.set_grad_enabled(False)
self.policy_net.eval()
accuracies = []
end = config.num_val_batches if config.num_val_batches else len(dataloader)
for i, data in enumerate(dataloader):
if i >= end:
break
device = torch.device('cuda:0' if torch.cuda.is_available() and config.cuda else 'cpu')
inputs, labels = data
inputs.to(device)
labels.to(device)
self.policy_net.to(device)
outputs = self.policy_net(inputs)
# get the index of the max log-probability
prediction = outputs.cpu().detach().argmax(dim=1, keepdim=True)
correct = prediction.eq(labels.view_as(prediction)).sum().item()
accuracies.append(float(correct) / labels.size()[0])
del inputs, labels, outputs, prediction, correct
# todo accuracy calculation not 100% correct, last batch might be smaller so should count less
accuracy = np.mean(accuracies).item()
del accuracies
return accuracy
| [
"torch.nn.CrossEntropyLoss",
"numpy.mean",
"torch.cuda.is_available",
"torch.empty_like",
"torch.set_grad_enabled",
"logging.getLogger"
] | [((156, 183), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (173, 183), False, 'import logging\n'), ((424, 453), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (446, 453), False, 'import torch\n'), ((1007, 1028), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1026, 1028), False, 'from torch import nn\n'), ((1463, 1492), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (1485, 1492), False, 'import torch\n'), ((2466, 2485), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (2473, 2485), True, 'import numpy as np\n'), ((497, 522), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (520, 522), False, 'import torch\n'), ((850, 882), 'torch.empty_like', 'torch.empty_like', (['self.ref_batch'], {}), '(self.ref_batch)\n', (866, 882), False, 'import torch\n'), ((1773, 1798), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1796, 1798), False, 'import torch\n')] |
from datetime import datetime, timedelta
from scipy import stats
import pandas as pd
import math
import numpy as np
def create_sharpe_ratio(returns, periods=252, rf=0):
'''
Create Sharpe ratio for the strategy, based on a benchmark of zero (i.e. no risk-free rate information).
:param returns: A pandas Series representing period percentage returns.
:param periods: Daily (252), Hourly (252 * 6.5), Minutely (252 * 6.5 * 60), etc.
'''
return np.sqrt(periods) * (np.mean(returns - rf/periods)) / np.std(returns - rf/periods)
def create_drawdowns(pnl):
'''
Calculate the largest peak-to-trough drawdown of the PnL curve as well as the duration of the drawdown. Requires that the pnl_returns is a pandas Series.
:param pnl: A pandas Series representing period percentage returns.
'''
# Calculate cumulative returns curve and set up High Water Mark
hwm = [0]
# Create drawdown and duration series
idx = pnl.index
drawdown = pd.Series(index=idx)
duration = pd.Series(index=idx)
# Loop over the index range
for t in range(1, len(idx)):
hwm.append(max(hwm[t-1], pnl[t]))
drawdown[t] = (hwm[t] - pnl[t])
duration[t] = (0 if drawdown[t] == 0 else duration[t-1] + 1)
return drawdown, drawdown.max(), duration.max()
# TODO: Under development - American/European? Dividends?
def black_scholes(stock_price, strike_price, time, rf, div, volatility, option_type):
'''
Calculates option prices for European calls/puts using the Black-Scholes formula.
:param stock_price: Current stock price
:param strike_price: Strike price of the option to be priced
:param time: Days until option expiry
:param rf: Risk-free rate
:param div: Dividend yield of the stock
:param option_type: CALL or PUT
'''
d1 = (math.log(float(stock_price)/strike_price) + ((rf - div) + volatility * volatility / 2) * time) / (volatility * math.sqrt(time))
d2 = d1 - volatility * math.sqrt(time)
if option_type == 'call':
return stock_price * math.exp(-div * time) * stats.norm.cdf(d1) - strike_price * math.exp(-rf * time) * stats.norm.cdf(d2)
else:
return strike_price * math.exp(-rf * time) * stats.norm.cdf(-d2) - stock_price * math.exp(-div * time) * stats.norm.cdf(-d1) | [
"math.exp",
"math.sqrt",
"numpy.std",
"scipy.stats.norm.cdf",
"numpy.mean",
"pandas.Series",
"numpy.sqrt"
] | [((987, 1007), 'pandas.Series', 'pd.Series', ([], {'index': 'idx'}), '(index=idx)\n', (996, 1007), True, 'import pandas as pd\n'), ((1023, 1043), 'pandas.Series', 'pd.Series', ([], {'index': 'idx'}), '(index=idx)\n', (1032, 1043), True, 'import pandas as pd\n'), ((523, 553), 'numpy.std', 'np.std', (['(returns - rf / periods)'], {}), '(returns - rf / periods)\n', (529, 553), True, 'import numpy as np\n'), ((470, 486), 'numpy.sqrt', 'np.sqrt', (['periods'], {}), '(periods)\n', (477, 486), True, 'import numpy as np\n'), ((490, 521), 'numpy.mean', 'np.mean', (['(returns - rf / periods)'], {}), '(returns - rf / periods)\n', (497, 521), True, 'import numpy as np\n'), ((1944, 1959), 'math.sqrt', 'math.sqrt', (['time'], {}), '(time)\n', (1953, 1959), False, 'import math\n'), ((1988, 2003), 'math.sqrt', 'math.sqrt', (['time'], {}), '(time)\n', (1997, 2003), False, 'import math\n'), ((2088, 2106), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['d1'], {}), '(d1)\n', (2102, 2106), False, 'from scipy import stats\n'), ((2147, 2165), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['d2'], {}), '(d2)\n', (2161, 2165), False, 'from scipy import stats\n'), ((2229, 2248), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['(-d2)'], {}), '(-d2)\n', (2243, 2248), False, 'from scipy import stats\n'), ((2289, 2308), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['(-d1)'], {}), '(-d1)\n', (2303, 2308), False, 'from scipy import stats\n'), ((2064, 2085), 'math.exp', 'math.exp', (['(-div * time)'], {}), '(-div * time)\n', (2072, 2085), False, 'import math\n'), ((2124, 2144), 'math.exp', 'math.exp', (['(-rf * time)'], {}), '(-rf * time)\n', (2132, 2144), False, 'import math\n'), ((2206, 2226), 'math.exp', 'math.exp', (['(-rf * time)'], {}), '(-rf * time)\n', (2214, 2226), False, 'import math\n'), ((2265, 2286), 'math.exp', 'math.exp', (['(-div * time)'], {}), '(-div * time)\n', (2273, 2286), False, 'import math\n')] |
# -*- coding: utf-8 -*-
import scipy.stats as ss
import numpy.random as npr
from functools import partial
from . import core
def npr_op(distribution, size, input):
prng = npr.RandomState(0)
prng.set_state(input['random_state'])
distribution = getattr(prng, distribution)
size = (input['n'],)+tuple(size)
data = distribution(*input['data'], size=size)
return core.to_output(input, data=data, random_state=prng.get_state())
class NumpyRV(core.RandomStateMixin, core.Operation):
"""
Examples
--------
NumpyRV('tau', 'normal', 5, size=(2,3))
"""
def __init__(self, name, distribution, *params, size=(1,)):
if not isinstance(size, tuple):
size = (size,)
op = partial(npr_op, distribution, size)
super(NumpyRV, self).__init__(name, op, *params)
class Prior(NumpyRV):
pass
class Model(core.ObservedMixin, NumpyRV):
def __init__(self, *args, observed=None, size=None, **kwargs):
if observed is None:
raise ValueError('Observed cannot be None')
if size is None:
size = observed.shape
super(Model, self).__init__(*args, observed=observed, size=size, **kwargs)
| [
"functools.partial",
"numpy.random.RandomState"
] | [((178, 196), 'numpy.random.RandomState', 'npr.RandomState', (['(0)'], {}), '(0)\n', (193, 196), True, 'import numpy.random as npr\n'), ((737, 772), 'functools.partial', 'partial', (['npr_op', 'distribution', 'size'], {}), '(npr_op, distribution, size)\n', (744, 772), False, 'from functools import partial\n')] |
import os
import time
import numpy as np
"""
From http://wiki.scipy.org/Cookbook/SegmentAxis
"""
def segment_axis(a, length, overlap=0, axis=None, end='cut', endvalue=0):
"""Generate a new array that chops the given array along the given axis into overlapping frames.
example:
>>> segment_axis(np.arange(10), 4, 2)
array([[0, 1, 2, 3],
[2, 3, 4, 5],
[4, 5, 6, 7],
[6, 7, 8, 9]])
arguments:
a The array to segment
length The length of each frame
overlap The number of array elements by which the frames should overlap
axis The axis to operate on; if None, act on the flattened array
end What to do with the last frame, if the array is not evenly
divisible into pieces. Options are:
'cut' Simply discard the extra values
'wrap' Copy values from the beginning of the array
'pad' Pad with a constant value
endvalue The value to use for end='pad'
The array is not copied unless necessary (either because it is
unevenly strided and being flattened or because end is set to
'pad' or 'wrap').
"""
if axis is None:
a = np.ravel(a) # may copy
axis = 0
l = a.shape[axis]
if overlap >= length: raise ValueError(
"frames cannot overlap by more than 100%")
if overlap < 0 or length <= 0: raise ValueError(
"overlap must be nonnegative and length must be positive")
if l < length or (l - length) % (length - overlap):
if l > length:
roundup = length + (1 + (l - length) // (length - overlap)) * (
length - overlap)
rounddown = length + ((l - length) // (length - overlap)) * (
length - overlap)
else:
roundup = length
rounddown = 0
assert rounddown < l < roundup
assert roundup == rounddown + (length - overlap) or (
roundup == length and rounddown == 0)
a = a.swapaxes(-1, axis)
if end == 'cut':
a = a[..., :rounddown]
elif end in ['pad', 'wrap']: # copying will be necessary
s = list(a.shape)
s[-1] = roundup
b = np.empty(s, dtype=a.dtype)
b[..., :l] = a
if end == 'pad':
b[..., l:] = endvalue
elif end == 'wrap':
b[..., l:] = a[..., :roundup - l]
a = b
a = a.swapaxes(-1, axis)
l = a.shape[axis]
if l == 0: raise ValueError(
"Not enough data points to segment array in 'cut' mode; "
"try 'pad' or 'wrap'")
assert l >= length
assert (l - length) % (length - overlap) == 0
n = 1 + (l - length) // (length - overlap)
s = a.strides[axis]
newshape = a.shape[:axis] + (n, length) + a.shape[axis + 1:]
newstrides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[
axis + 1:]
if not a.flags.contiguous:
a = a.copy()
newstrides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[
axis + 1:]
return np.ndarray.__new__(np.ndarray, strides=newstrides,
shape=newshape, buffer=a, dtype=a.dtype)
try:
return np.ndarray.__new__(np.ndarray, strides=newstrides,
shape=newshape, buffer=a, dtype=a.dtype)
except TypeError or ValueError:
warnings.warn("Problem with ndarray creation forces copy.")
a = a.copy()
# Shape doesn't change but strides does
newstrides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[
axis + 1:]
return np.ndarray.__new__(np.ndarray, strides=newstrides,
shape=newshape, buffer=a, dtype=a.dtype)
def mkdir_p(path):
""" Creates a path recursively without throwing an error if it already exists
:param path: path to create
:return: None
"""
try:
os.makedirs(path)
except FileExistsError:
pass
except FileNotFoundError:
if path == '':
pass
class Timer(object):
""" Time code execution.
Example usage::
with Timer as t:
sleep(10)
print(t.secs)
"""
def __init__(self, verbose=False):
self.verbose = verbose
self.secs = 0
self.msecs = 0
self.start = 0
self.end = 0
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print('elapsed time: %f ms' % self.msecs)
| [
"os.makedirs",
"numpy.ravel",
"numpy.empty",
"numpy.ndarray.__new__",
"time.time"
] | [((1195, 1206), 'numpy.ravel', 'np.ravel', (['a'], {}), '(a)\n', (1203, 1206), True, 'import numpy as np\n'), ((3244, 3339), 'numpy.ndarray.__new__', 'np.ndarray.__new__', (['np.ndarray'], {'strides': 'newstrides', 'shape': 'newshape', 'buffer': 'a', 'dtype': 'a.dtype'}), '(np.ndarray, strides=newstrides, shape=newshape, buffer=a,\n dtype=a.dtype)\n', (3262, 3339), True, 'import numpy as np\n'), ((3395, 3490), 'numpy.ndarray.__new__', 'np.ndarray.__new__', (['np.ndarray'], {'strides': 'newstrides', 'shape': 'newshape', 'buffer': 'a', 'dtype': 'a.dtype'}), '(np.ndarray, strides=newstrides, shape=newshape, buffer=a,\n dtype=a.dtype)\n', (3413, 3490), True, 'import numpy as np\n'), ((4176, 4193), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4187, 4193), False, 'import os\n'), ((4664, 4675), 'time.time', 'time.time', ([], {}), '()\n', (4673, 4675), False, 'import time\n'), ((4747, 4758), 'time.time', 'time.time', ([], {}), '()\n', (4756, 4758), False, 'import time\n'), ((3871, 3966), 'numpy.ndarray.__new__', 'np.ndarray.__new__', (['np.ndarray'], {'strides': 'newstrides', 'shape': 'newshape', 'buffer': 'a', 'dtype': 'a.dtype'}), '(np.ndarray, strides=newstrides, shape=newshape, buffer=a,\n dtype=a.dtype)\n', (3889, 3966), True, 'import numpy as np\n'), ((2235, 2261), 'numpy.empty', 'np.empty', (['s'], {'dtype': 'a.dtype'}), '(s, dtype=a.dtype)\n', (2243, 2261), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import networkx as nx
import numpy as np
import os
import re
import subprocess
from scipy.spatial.distance import pdist
from networkx.drawing.nx_pydot import write_dot
from scipy.sparse.csgraph import shortest_path
from sklearn.neighbors import BallTree
def shortest_path_distances(G):
return shortest_path(csgraph=nx.adjacency_matrix(G, nodelist=np.arange(G.number_of_nodes())),
directed=False, unweighted=True)[np.triu_indices(G.number_of_nodes(), k=1)]
def stress(G, emb, data_dict=None, **kwargs):
"""
Compute the normalized stress metric. This involves computing shortest
path distances and pairwise embedding distances between ALL nodes.
"""
print("Computing pairwise Euclidean distances...", end="")
emb_dist = pdist(emb, metric="euclidean")
print("Done.")
if "graph_dist" in data_dict:
graph_dist = data_dict["graph_dist"]
elif "graph_dist_file" in data_dict and os.path.isfile(data_dict["graph_dist_file"]):
print("Reading shortest path distances from file...", end="")
graph_dist = np.loadtxt(
data_dict["graph_dist_file"], delimiter=",", dtype=int)[:, 2]
data_dict["graph_dist"] = graph_dist
print("Done.")
else:
print("Computing shortest paths...", end="")
graph_dist = shortest_path_distances(G)
print("Done.")
indices = np.triu_indices(G.number_of_nodes(), k=1)
filename = os.path.join("data", data_dict["name"] + "_graph_distances.txt")
np.savetxt(filename,
np.hstack((indices[0][:, np.newaxis], indices[1]
[:, np.newaxis], graph_dist[:, np.newaxis])),
fmt="%d",
delimiter=",",
header="node1,node2,spd")
print(f"Saved graph shortest path distances to {filename}. " +
f"Read them using 'graph_dist_file={filename}' in the {data_dict['name']} config for faster stress computation.")
data_dict["graph_dist"] = graph_dist
def stress_func(alpha):
return np.sum((alpha * np.divide(emb_dist, graph_dist) - 1) ** 2)
alpha = np.sum(np.divide(emb_dist, graph_dist)) / \
np.sum(np.divide(emb_dist, graph_dist)**2)
stress = stress_func(alpha) / len(graph_dist)
return stress
def neighborhood_preservation(G, emb, k=2, **kwargs):
"""
Compute the neighborhood preservation as defined in
'DRGraph: An Efficient Graph Layout Algorithm for Large-scale
Graphs by Dimensionality Reduction'. It is defined as the Jaccard
similarity between k-order neighborhoods in the the graph and
the embedding space.
Parameters
----------
G : nx.graph
The original graph to use for the evaluation.
emb : ndarray
Low-dimensional embedding of all nodes in G.
k : int, default 2
Order to define the maximum hop-distance in G to obtain
the neighbors of each node. The node itself is not included
in the set of neighbors.
Return
-------
neighborhood preservation score in [0,1]
"""
num_nodes = G.number_of_nodes()
similarity = 0.0
tree = BallTree(emb, leaf_size=40)
for i in range(num_nodes):
G_neigh = nx.single_source_shortest_path_length(G, i, k)
G_neigh.pop(i)
G_neigh = G_neigh.keys()
emb_neigh = tree.query(emb[i].reshape(
1, -1), k=len(G_neigh)+1, return_distance=False)[0][1:]
similarity += jaccard(G_neigh, emb_neigh)
return similarity / num_nodes
def jaccard(a, b):
intersection = len(list(set(a).intersection(b)))
union = (len(a) + len(b)) - intersection
return float(intersection) / union
def glam_scores(G, emb, glam_path=None, metrics="all", **kwargs):
"""
Computes the scores using the glam implementation:
Parameters
----------
G : nx.graph
The original graph to use for the evaluation.
emb : ndarray
Low-dimensional embedding of all nodes in G.
glam_path: str
Path to glam executable.
Return
-------
dictionary with keys for each metric and values are the computed scores
"""
# store embedding coordinates in networkx object
pos_x = dict(zip(np.arange(0, len(G)), emb[:, 0]))
pos_y = dict(zip(np.arange(0, len(G)), emb[:, 1]))
# add embedding coordinates to copy of the graph
tmp_G = G.copy()
nx.set_node_attributes(tmp_G, pos_x, "x")
nx.set_node_attributes(tmp_G, pos_y, "y")
tmp_G_file = "tmp_G.dot"
write_dot(tmp_G, tmp_G_file)
if metrics == "all":
metrics = ["crosslessness", "edge_length_cv",
"min_angle", "shape_gabriel"]
cmd = [glam_path, tmp_G_file, "-m"] + metrics
p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p1.wait()
stdout, _ = p1.communicate()
stdout = stdout.decode()
# read output
glam_eval = dict()
stdout_lines = stdout.split("\n")
for line in stdout_lines:
if "=" in line:
metric, val = re.split("=| ", line)[0:2]
glam_eval[metric] = val
if os.path.exists(tmp_G_file):
os.remove(tmp_G_file)
return glam_eval
| [
"numpy.divide",
"subprocess.Popen",
"os.remove",
"re.split",
"networkx.set_node_attributes",
"os.path.exists",
"networkx.single_source_shortest_path_length",
"numpy.hstack",
"os.path.isfile",
"sklearn.neighbors.BallTree",
"scipy.spatial.distance.pdist",
"networkx.drawing.nx_pydot.write_dot",
... | [((823, 853), 'scipy.spatial.distance.pdist', 'pdist', (['emb'], {'metric': '"""euclidean"""'}), "(emb, metric='euclidean')\n", (828, 853), False, 'from scipy.spatial.distance import pdist\n'), ((3228, 3255), 'sklearn.neighbors.BallTree', 'BallTree', (['emb'], {'leaf_size': '(40)'}), '(emb, leaf_size=40)\n', (3236, 3255), False, 'from sklearn.neighbors import BallTree\n'), ((4473, 4514), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['tmp_G', 'pos_x', '"""x"""'], {}), "(tmp_G, pos_x, 'x')\n", (4495, 4514), True, 'import networkx as nx\n'), ((4519, 4560), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['tmp_G', 'pos_y', '"""y"""'], {}), "(tmp_G, pos_y, 'y')\n", (4541, 4560), True, 'import networkx as nx\n'), ((4594, 4622), 'networkx.drawing.nx_pydot.write_dot', 'write_dot', (['tmp_G', 'tmp_G_file'], {}), '(tmp_G, tmp_G_file)\n', (4603, 4622), False, 'from networkx.drawing.nx_pydot import write_dot\n'), ((4812, 4857), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE)\n', (4828, 4857), False, 'import subprocess\n'), ((5165, 5191), 'os.path.exists', 'os.path.exists', (['tmp_G_file'], {}), '(tmp_G_file)\n', (5179, 5191), False, 'import os\n'), ((3306, 3352), 'networkx.single_source_shortest_path_length', 'nx.single_source_shortest_path_length', (['G', 'i', 'k'], {}), '(G, i, k)\n', (3343, 3352), True, 'import networkx as nx\n'), ((5201, 5222), 'os.remove', 'os.remove', (['tmp_G_file'], {}), '(tmp_G_file)\n', (5210, 5222), False, 'import os\n'), ((997, 1041), 'os.path.isfile', 'os.path.isfile', (["data_dict['graph_dist_file']"], {}), "(data_dict['graph_dist_file'])\n", (1011, 1041), False, 'import os\n'), ((1501, 1565), 'os.path.join', 'os.path.join', (['"""data"""', "(data_dict['name'] + '_graph_distances.txt')"], {}), "('data', data_dict['name'] + '_graph_distances.txt')\n", (1513, 1565), False, 'import os\n'), ((2213, 2244), 'numpy.divide', 'np.divide', (['emb_dist', 'graph_dist'], {}), '(emb_dist, graph_dist)\n', (2222, 2244), True, 'import numpy as np\n'), ((1134, 1200), 'numpy.loadtxt', 'np.loadtxt', (["data_dict['graph_dist_file']"], {'delimiter': '""","""', 'dtype': 'int'}), "(data_dict['graph_dist_file'], delimiter=',', dtype=int)\n", (1144, 1200), True, 'import numpy as np\n'), ((1614, 1711), 'numpy.hstack', 'np.hstack', (['(indices[0][:, np.newaxis], indices[1][:, np.newaxis], graph_dist[:, np.\n newaxis])'], {}), '((indices[0][:, np.newaxis], indices[1][:, np.newaxis], graph_dist\n [:, np.newaxis]))\n', (1623, 1711), True, 'import numpy as np\n'), ((2265, 2296), 'numpy.divide', 'np.divide', (['emb_dist', 'graph_dist'], {}), '(emb_dist, graph_dist)\n', (2274, 2296), True, 'import numpy as np\n'), ((5094, 5115), 're.split', 're.split', (['"""=| """', 'line'], {}), "('=| ', line)\n", (5102, 5115), False, 'import re\n'), ((2150, 2181), 'numpy.divide', 'np.divide', (['emb_dist', 'graph_dist'], {}), '(emb_dist, graph_dist)\n', (2159, 2181), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def lithotrack(df:pd.DataFrame,
codecols:list,
percols:list,
dtick:bool=False,
lims:list=None,
codedict: dict=None,
fontsize=8,
ax=None,
correlation: pd.DataFrame = None,
grid_numbers : list = [11,51],
steps: list = None,
corr_kw={}):
"""lithotrack [summary]
Parameters
----------
df : pd.DataFrame
[description]
codecols : list
[description]
percols : list
[description]
dtick : bool, optional
[description], by default False
lims : list, optional
[description], by default None
codedict : dict, optional
[description], by default None
fontsize : int, optional
[description], by default 8
ax : [type], optional
[description], by default None
correlation : pd.DataFrame, optional
[description], by default None
grid_numbers : list, optional
[description], by default [11,51]
steps : list, optional
[description], by default None
corr_kw : dict, optional
[description], by default {}
"""
lit=ax or plt.gca()
def_corr_kw = {
'color': 'red',
'linestyle':'--',
'linewidth': 2
}
for (k,v) in def_corr_kw.items():
if k not in corr_kw:
corr_kw[k]=v
df.index.names=['depth']
df=df.reset_index()
df=df.loc[(df.index>=lims[0])&(df.index<=lims[1]),:]
#Create a pivot table concatenating the lithology code names
mm=pd.DataFrame()
for (k,v) in enumerate(codecols):
m=df.pivot_table(index=['depth'],columns=[v],values=percols[k])
mm=pd.concat([mm,m],axis=1)
#Merge in a single dataframe the repeated colnames
mm=mm.fillna(0)
lm=pd.DataFrame()
for i in mm.columns.unique():
if mm[i].ndim>1:
lm[i]=mm[i].max(axis=1)
elif mm[i].ndim==1:
lm[i]=mm[i]
try:
lm=lm.drop(columns=[0])
except:
pass
lmc=np.cumsum(lm,axis=1)
for i, col in enumerate(lmc.columns):
lit.fill_betweenx(lmc.index, lmc.iloc[:,i], label=codedict[col], zorder=-i)
if lims==None: #Depth Limits
lims=[df.index.min(),df.index.max()]
lit.set_ylim([lims[1],lims[0]])
#Set the vertical grid spacing
if steps is None:
mayor_grid = np.linspace(lims[0],lims[1],grid_numbers[0])
minor_grid = np.linspace(lims[0],lims[1],grid_numbers[1])
else:
mayor_grid = np.arange(lims[0],lims[1],steps[0])
minor_grid = np.arange(lims[0],lims[1],steps[1])
lit.legend()
lit.set_xlim([0,100])
lit.set_yticks(mayor_grid)
lit.set_yticks(minor_grid,minor=True)
if dtick==True:
lit.set_yticklabels(mayor_grid)
else:
lit.set_yticklabels([])
lit.set_xlabel("Lithology")
lit.xaxis.tick_top()
lit.xaxis.set_label_position("top")
lit.tick_params("both",labelsize=fontsize)
def lithointtrack(depth,lint=None,dtick=False, lims=None, codedict=None, fontsize=8, ax=None):
lth = ax or plt.gca()
master=lint[(lint.index>=lims[0])&(lint.index<=lims[1])]
m=pd.get_dummies(master)
try:
m=m.drop(columns=[0])
except:
pass
for i, col in enumerate(m.columns):
lth.fill_betweenx(m.index, m.iloc[:,i], label=codedict[col], zorder=-i)
if lims==None: #Depth Limits
lims=[depth.max(),depth.min()]
lth.set_ylim(lims)
else:
lth.set_ylim([lims[1],lims[0]])
lth.legend()
lth.set_xlim([0,1])
lth.set_yticks(np.linspace(lims[0],lims[1],11))
lth.set_yticks(np.linspace(lims[0],lims[1],51),minor=True)
if dtick==True:
lth.set_yticklabels(np.linspace(lims[0],lims[1],11))
else:
lth.set_yticklabels([])
lth.set_xlabel("Lithology Interpreted")
lth.xaxis.tick_top()
lth.xaxis.set_label_position("top")
lth.tick_params("both",labelsize=fontsize) | [
"pandas.DataFrame",
"pandas.get_dummies",
"numpy.cumsum",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.gca",
"pandas.concat"
] | [((1691, 1705), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1703, 1705), True, 'import pandas as pd\n'), ((1939, 1953), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1951, 1953), True, 'import pandas as pd\n'), ((2188, 2209), 'numpy.cumsum', 'np.cumsum', (['lm'], {'axis': '(1)'}), '(lm, axis=1)\n', (2197, 2209), True, 'import numpy as np\n'), ((3352, 3374), 'pandas.get_dummies', 'pd.get_dummies', (['master'], {}), '(master)\n', (3366, 3374), True, 'import pandas as pd\n'), ((1306, 1315), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1313, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1831, 1857), 'pandas.concat', 'pd.concat', (['[mm, m]'], {'axis': '(1)'}), '([mm, m], axis=1)\n', (1840, 1857), True, 'import pandas as pd\n'), ((2551, 2597), 'numpy.linspace', 'np.linspace', (['lims[0]', 'lims[1]', 'grid_numbers[0]'], {}), '(lims[0], lims[1], grid_numbers[0])\n', (2562, 2597), True, 'import numpy as np\n'), ((2617, 2663), 'numpy.linspace', 'np.linspace', (['lims[0]', 'lims[1]', 'grid_numbers[1]'], {}), '(lims[0], lims[1], grid_numbers[1])\n', (2628, 2663), True, 'import numpy as np\n'), ((2693, 2730), 'numpy.arange', 'np.arange', (['lims[0]', 'lims[1]', 'steps[0]'], {}), '(lims[0], lims[1], steps[0])\n', (2702, 2730), True, 'import numpy as np\n'), ((2750, 2787), 'numpy.arange', 'np.arange', (['lims[0]', 'lims[1]', 'steps[1]'], {}), '(lims[0], lims[1], steps[1])\n', (2759, 2787), True, 'import numpy as np\n'), ((3275, 3284), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3282, 3284), True, 'import matplotlib.pyplot as plt\n'), ((3778, 3811), 'numpy.linspace', 'np.linspace', (['lims[0]', 'lims[1]', '(11)'], {}), '(lims[0], lims[1], 11)\n', (3789, 3811), True, 'import numpy as np\n'), ((3830, 3863), 'numpy.linspace', 'np.linspace', (['lims[0]', 'lims[1]', '(51)'], {}), '(lims[0], lims[1], 51)\n', (3841, 3863), True, 'import numpy as np\n'), ((3924, 3957), 'numpy.linspace', 'np.linspace', (['lims[0]', 'lims[1]', '(11)'], {}), '(lims[0], lims[1], 11)\n', (3935, 3957), True, 'import numpy as np\n')] |
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
from skimage import io, color
import numpy as np
import sqlite3
import datetime
import json
import cv2
from mrcnn import visualize, utils
image_path = "./images/"
#image = io.imread(image_path, as_gray=True)
#image = (image*255).astype(np.uint8)
#flat = image.flatten()
#index = np.where(flat == 0)
#plt.imshow(image, cmap='gray')
#plt.show()
#idxlist = json.dumps(index[0].tolist())
#print(idxlist)
conn = sqlite3.connect('./results/filament.db')
cur = conn.execute('select * from FILE where FILE_NAME="2017-10-23_13-26-48.png"')
ax=None
colors=None
for row in cur.fetchall():
image = io.imread(image_path+row[0])
#image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#draw solar disk
cv2.circle(image, (row[3], row[4]), row[5], (255,255,255), 1)
features = conn.execute("select * from SOLAR_FEATURES where FILE_NAME=?",[row[0]])
rowcount = features.fetchall()
boxes = np.zeros([len(rowcount),4],dtype=np.int)
class_ids = np.zeros([len(rowcount)],dtype=np.uint8)
class_names = ['BG', 'filament', 'prominence', 'sunspot', 'plage']
masks = np.zeros([image.shape[0], image.shape[1], len(rowcount)],dtype=np.uint8)
i=0
for feature in rowcount:
bbox = json.loads(feature[1])
boxes[i,:] = np.asarray(bbox)
class_ids[i] = feature[2]
if(feature[2]=='1'):
filaments = conn.execute('select * from FILAMENTS where FILE_NAME=? AND BOUNDING_BOX=?',[row[0],str(bbox)])
for filament in filaments.fetchall():
area_index = json.loads(filament[2])
spine_index = json.loads(filament[3])
h = int(bbox[2]) - int(bbox[0])
w = int(bbox[3]) - int(bbox[1])
#build filament area
area = np.ones([h*w], dtype=np.uint8)*255
for a in area_index:
area[a] = 0
area = area.reshape((h,w))
area_mask = np.zeros_like(area)
area_mask = np.where(area==0,1,0)
masks[boxes[i,0]:boxes[i,2],boxes[i,1]:boxes[i,3],i] = area_mask
#build filament spine
spine = np.ones([h*w], dtype=np.uint8)*255
for s in spine_index:
spine[s] = 0
spine = spine.reshape((h,w))
#draw spine
for d in range(3):
image[bbox[0]:bbox[2],bbox[1]:bbox[3],d] = \
np.where(spine==0,0,image[bbox[0]:bbox[2],bbox[1]:bbox[3],d])
i=i+1
#visualize
N = boxes.shape[0]
if not ax:
_, ax = plt.subplots(1, figsize=(16,16))
auto_show = True
colors = colors or visualize.random_colors(N)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=1,
alpha=0.4, linestyle="dashed", edgecolor=color, facecolor='none')
ax.add_patch(p)
#masks
mask = masks[:, :, i]
for c in range(3):
image[:, :, c] = np.where(mask == 1, image[:, :, c] * (1 - 0.4) + 0.4 * color[c] * 255, image[:, :, c])
ax.imshow(image)
plt.show()
#conn.close()
| [
"cv2.circle",
"matplotlib.pyplot.show",
"json.loads",
"numpy.zeros_like",
"matplotlib.patches.Rectangle",
"numpy.asarray",
"numpy.ones",
"numpy.any",
"numpy.where",
"sqlite3.connect",
"mrcnn.visualize.random_colors",
"matplotlib.pyplot.subplots",
"skimage.io.imread"
] | [((539, 579), 'sqlite3.connect', 'sqlite3.connect', (['"""./results/filament.db"""'], {}), "('./results/filament.db')\n", (554, 579), False, 'import sqlite3\n'), ((720, 750), 'skimage.io.imread', 'io.imread', (['(image_path + row[0])'], {}), '(image_path + row[0])\n', (729, 750), False, 'from skimage import io, color\n'), ((820, 883), 'cv2.circle', 'cv2.circle', (['image', '(row[3], row[4])', 'row[5]', '(255, 255, 255)', '(1)'], {}), '(image, (row[3], row[4]), row[5], (255, 255, 255), 1)\n', (830, 883), False, 'import cv2\n'), ((3110, 3120), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3118, 3120), True, 'import matplotlib.pyplot as plt\n'), ((1298, 1320), 'json.loads', 'json.loads', (['feature[1]'], {}), '(feature[1])\n', (1308, 1320), False, 'import json\n'), ((1336, 1352), 'numpy.asarray', 'np.asarray', (['bbox'], {}), '(bbox)\n', (1346, 1352), True, 'import numpy as np\n'), ((2381, 2414), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(16, 16)'}), '(1, figsize=(16, 16))\n', (2393, 2414), True, 'import matplotlib.pyplot as plt\n'), ((2453, 2479), 'mrcnn.visualize.random_colors', 'visualize.random_colors', (['N'], {}), '(N)\n', (2476, 2479), False, 'from mrcnn import visualize, utils\n'), ((2709, 2837), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x1, y1)', '(x2 - x1)', '(y2 - y1)'], {'linewidth': '(1)', 'alpha': '(0.4)', 'linestyle': '"""dashed"""', 'edgecolor': 'color', 'facecolor': '"""none"""'}), "((x1, y1), x2 - x1, y2 - y1, linewidth=1, alpha=0.4,\n linestyle='dashed', edgecolor=color, facecolor='none')\n", (2726, 2837), False, 'from matplotlib import patches, lines\n'), ((2624, 2640), 'numpy.any', 'np.any', (['boxes[i]'], {}), '(boxes[i])\n', (2630, 2640), True, 'import numpy as np\n'), ((3003, 3093), 'numpy.where', 'np.where', (['(mask == 1)', '(image[:, :, c] * (1 - 0.4) + 0.4 * color[c] * 255)', 'image[:, :, c]'], {}), '(mask == 1, image[:, :, c] * (1 - 0.4) + 0.4 * color[c] * 255,\n image[:, :, c])\n', (3011, 3093), True, 'import numpy as np\n'), ((1573, 1596), 'json.loads', 'json.loads', (['filament[2]'], {}), '(filament[2])\n', (1583, 1596), False, 'import json\n'), ((1615, 1638), 'json.loads', 'json.loads', (['filament[3]'], {}), '(filament[3])\n', (1625, 1638), False, 'import json\n'), ((1875, 1894), 'numpy.zeros_like', 'np.zeros_like', (['area'], {}), '(area)\n', (1888, 1894), True, 'import numpy as np\n'), ((1911, 1936), 'numpy.where', 'np.where', (['(area == 0)', '(1)', '(0)'], {}), '(area == 0, 1, 0)\n', (1919, 1936), True, 'import numpy as np\n'), ((1750, 1782), 'numpy.ones', 'np.ones', (['[h * w]'], {'dtype': 'np.uint8'}), '([h * w], dtype=np.uint8)\n', (1757, 1782), True, 'import numpy as np\n'), ((2045, 2077), 'numpy.ones', 'np.ones', (['[h * w]'], {'dtype': 'np.uint8'}), '([h * w], dtype=np.uint8)\n', (2052, 2077), True, 'import numpy as np\n'), ((2255, 2322), 'numpy.where', 'np.where', (['(spine == 0)', '(0)', 'image[bbox[0]:bbox[2], bbox[1]:bbox[3], d]'], {}), '(spine == 0, 0, image[bbox[0]:bbox[2], bbox[1]:bbox[3], d])\n', (2263, 2322), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 11:14:55 2020
@author: ST16
"""
import matplotlib.pyplot as plt
import numpy as np
normal_samples = np.random.normal(size = 100000) # 生成 100000 組標準常態分配(平均值為 0,標準差為 1 的常態分配)隨機變數
uniform_samples = np.random.uniform(size = 100000) # 生成 100000 組介於 0 與 1 之間均勻分配隨機變數
plt.hist(normal_samples)
plt.show()
plt.hist(uniform_samples)
plt.show() | [
"numpy.random.uniform",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"numpy.random.normal"
] | [((153, 182), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100000)'}), '(size=100000)\n', (169, 182), True, 'import numpy as np\n'), ((248, 278), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100000)'}), '(size=100000)\n', (265, 278), True, 'import numpy as np\n'), ((315, 339), 'matplotlib.pyplot.hist', 'plt.hist', (['normal_samples'], {}), '(normal_samples)\n', (323, 339), True, 'import matplotlib.pyplot as plt\n'), ((340, 350), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (348, 350), True, 'import matplotlib.pyplot as plt\n'), ((351, 376), 'matplotlib.pyplot.hist', 'plt.hist', (['uniform_samples'], {}), '(uniform_samples)\n', (359, 376), True, 'import matplotlib.pyplot as plt\n'), ((377, 387), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (385, 387), True, 'import matplotlib.pyplot as plt\n')] |
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import os
import numpy as np
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict[b'data'].reshape((-1, 3, 32, 32)).astype(np.float64), dict[b'labels']
class Cifar10Train(Dataset):
def __init__(self, dataset_path):
super(Cifar10Train, self).__init__()
self.data_dict = dict()
self.dataset_path = dataset_path
# file_list = os.listdir(self.dataset_path)
data1, label1 = unpickle(os.path.join(self.dataset_path, 'data_batch_1'))
data2, label2 = unpickle(os.path.join(self.dataset_path, 'data_batch_2'))
data3, label3 = unpickle(os.path.join(self.dataset_path, 'data_batch_3'))
data4, label4 = unpickle(os.path.join(self.dataset_path, 'data_batch_4'))
data5, label5 = unpickle(os.path.join(self.dataset_path, 'data_batch_5'))
self.data = np.concatenate([data1, data2, data3, data4, data5], axis=0)
self.labels = label1 + label2 + label3 + label4 + label5
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx], self.labels[idx]
class Cifar10Valid(Dataset):
def __init__(self, dataset_path):
super(Cifar10Valid, self).__init__()
self.data_dict = dict()
self.dataset_path = dataset_path
# file_list = os.listdir(self.dataset_path)
self.data, self.label = unpickle(os.path.join(self.dataset_path, 'test_batch'))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx], self.label[idx]
if __name__ == '__main__':
dataset = Cifar10Train('/home/josh/Data/cifar-10-python/cifar-10-batches-py/')
dataloader = DataLoader(dataset, batch_size=2, shuffle=False, num_workers=1)
print('successfully loaded {} images and labels'.format(len(dataloader.dataset)))
for step, item in enumerate(dataloader):
data, label = item
print(data.shape, label) | [
"pickle.load",
"os.path.join",
"numpy.concatenate",
"torch.utils.data.DataLoader"
] | [((1849, 1912), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(2)', 'shuffle': '(False)', 'num_workers': '(1)'}), '(dataset, batch_size=2, shuffle=False, num_workers=1)\n', (1859, 1912), False, 'from torch.utils.data import DataLoader\n'), ((207, 240), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""bytes"""'}), "(fo, encoding='bytes')\n", (218, 240), False, 'import pickle\n'), ((998, 1057), 'numpy.concatenate', 'np.concatenate', (['[data1, data2, data3, data4, data5]'], {'axis': '(0)'}), '([data1, data2, data3, data4, data5], axis=0)\n', (1012, 1057), True, 'import numpy as np\n'), ((600, 647), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""data_batch_1"""'], {}), "(self.dataset_path, 'data_batch_1')\n", (612, 647), False, 'import os\n'), ((682, 729), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""data_batch_2"""'], {}), "(self.dataset_path, 'data_batch_2')\n", (694, 729), False, 'import os\n'), ((764, 811), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""data_batch_3"""'], {}), "(self.dataset_path, 'data_batch_3')\n", (776, 811), False, 'import os\n'), ((846, 893), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""data_batch_4"""'], {}), "(self.dataset_path, 'data_batch_4')\n", (858, 893), False, 'import os\n'), ((928, 975), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""data_batch_5"""'], {}), "(self.dataset_path, 'data_batch_5')\n", (940, 975), False, 'import os\n'), ((1539, 1584), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""test_batch"""'], {}), "(self.dataset_path, 'test_batch')\n", (1551, 1584), False, 'import os\n')] |
import numpy as np
from predicu.data import CUM_COLUMNS
from predicu.preprocessing import preprocess_bedcounts
from predicu.tests.utils import load_test_data
def test_bedcounts_data_preprocessing():
test_data = load_test_data()
preprocessed = preprocess_bedcounts(test_data["bedcounts"])
assert len(preprocessed) > 0
for icu_name, dg in preprocessed.groupby("icu_name"):
dg = dg.sort_values(by="date")
for col in CUM_COLUMNS:
diffs = dg[col].diff(1).fillna(0).values
assert np.all(diffs >= 0)
| [
"predicu.tests.utils.load_test_data",
"numpy.all",
"predicu.preprocessing.preprocess_bedcounts"
] | [((218, 234), 'predicu.tests.utils.load_test_data', 'load_test_data', ([], {}), '()\n', (232, 234), False, 'from predicu.tests.utils import load_test_data\n'), ((254, 298), 'predicu.preprocessing.preprocess_bedcounts', 'preprocess_bedcounts', (["test_data['bedcounts']"], {}), "(test_data['bedcounts'])\n", (274, 298), False, 'from predicu.preprocessing import preprocess_bedcounts\n'), ((533, 551), 'numpy.all', 'np.all', (['(diffs >= 0)'], {}), '(diffs >= 0)\n', (539, 551), True, 'import numpy as np\n')] |
#
# Author: <NAME>
# Copyright 2016
#
import os
import isceobj
import logging
import numpy as np
from imageMath import IML
def runCropOffsetGeo(self):
'''
Crops and resamples lat/lon/los/z images created by topsApp to the
same grid as the offset field image.
'''
print('\n====================================')
print('Cropping topo products to offset grid...')
print('====================================')
suffix = '.full'
if (self.numberRangeLooks == 1) and (self.numberAzimuthLooks == 1):
suffix=''
flist1b = ['lat.rdr'+suffix, 'lon.rdr'+suffix, 'z.rdr'+suffix]
flist2b = [self._insar.mergedLosName+suffix]
wend = (self.offset_width*self.skipwidth) + self.offset_left
lend = (self.offset_length*self.skiphgt) + self.offset_top
for filename in flist1b:
print('\nCropping %s to %s ...\n' % (filename,filename+'.crop'))
f = os.path.join(self._insar.mergedDirname, filename)
outArr = []
mmap = IML.mmapFromISCE(f,logging)
'''
for i in range(self.offset_top, mmap.length, self.skiphgt):
outArr.append(mmap.bands[0][i][self.offset_left::self.skipwidth])
'''
for i in range(self.offset_top, lend, self.skiphgt):
outArr.append(mmap.bands[0][i][self.offset_left:wend:self.skipwidth])
outFile = os.path.join(self._insar.mergedDirname, filename+'.crop')
outImg = isceobj.createImage()
outImg.bands = 1
outImg.scheme = 'BIP'
outImg.dataType = 'DOUBLE'
outImg.setWidth(len(outArr[0]))
outImg.setLength(len(outArr))
outImg.setFilename(outFile)
with open(outFile,'wb') as fid:
for i in range(len(outArr)):
np.array(outArr[i]).astype(np.double).tofile(fid) ### WAY easier to write to file like this
outImg.renderHdr()
print('Cropped %s' % (filename))
for filename in flist2b:
print('\nCropping %s to %s ...\n' % (filename,filename+'.crop'))
f = os.path.join(self._insar.mergedDirname, filename)
outArrCh1 = []
outArrCh2 = []
mmap = IML.mmapFromISCE(f,logging)
'''
for i in range(self.offset_top, mmap.length, self.skiphgt):
outArrCh1.append(mmap.bands[0][i][self.offset_left::self.skipwidth])
outArrCh2.append(mmap.bands[1][i][self.offset_left::self.skipwidth])
'''
for i in range(self.offset_top, lend, self.skiphgt):
outArrCh1.append(mmap.bands[0][i][self.offset_left:wend:self.skipwidth])
outArrCh2.append(mmap.bands[1][i][self.offset_left:wend:self.skipwidth])
outFile = os.path.join(self._insar.mergedDirname, filename+'.crop')
outImg = isceobj.createImage()
outImg.bands = 2
outImg.scheme = 'BIL'
outImg.dataType = 'FLOAT'
outImg.setWidth(len(outArrCh1[0]))
outImg.setLength(len(outArrCh1))
outImg.setFilename(outFile)
with open(outFile,'wb') as fid:
for i in range(len(outArrCh1)):
np.array(outArrCh1[i]).astype(np.float32).tofile(fid)
np.array(outArrCh2[i]).astype(np.float32).tofile(fid)
outImg.renderHdr()
print('Cropped %s' % (filename))
if __name__ == "__main__":
'''
Default run method for runCropOffsetGeo.
'''
main()
| [
"numpy.array",
"isceobj.createImage",
"os.path.join",
"imageMath.IML.mmapFromISCE"
] | [((913, 962), 'os.path.join', 'os.path.join', (['self._insar.mergedDirname', 'filename'], {}), '(self._insar.mergedDirname, filename)\n', (925, 962), False, 'import os\n'), ((998, 1026), 'imageMath.IML.mmapFromISCE', 'IML.mmapFromISCE', (['f', 'logging'], {}), '(f, logging)\n', (1014, 1026), False, 'from imageMath import IML\n'), ((1358, 1417), 'os.path.join', 'os.path.join', (['self._insar.mergedDirname', "(filename + '.crop')"], {}), "(self._insar.mergedDirname, filename + '.crop')\n", (1370, 1417), False, 'import os\n'), ((1433, 1454), 'isceobj.createImage', 'isceobj.createImage', ([], {}), '()\n', (1452, 1454), False, 'import isceobj\n'), ((2033, 2082), 'os.path.join', 'os.path.join', (['self._insar.mergedDirname', 'filename'], {}), '(self._insar.mergedDirname, filename)\n', (2045, 2082), False, 'import os\n'), ((2144, 2172), 'imageMath.IML.mmapFromISCE', 'IML.mmapFromISCE', (['f', 'logging'], {}), '(f, logging)\n', (2160, 2172), False, 'from imageMath import IML\n'), ((2676, 2735), 'os.path.join', 'os.path.join', (['self._insar.mergedDirname', "(filename + '.crop')"], {}), "(self._insar.mergedDirname, filename + '.crop')\n", (2688, 2735), False, 'import os\n'), ((2751, 2772), 'isceobj.createImage', 'isceobj.createImage', ([], {}), '()\n', (2770, 2772), False, 'import isceobj\n'), ((1756, 1775), 'numpy.array', 'np.array', (['outArr[i]'], {}), '(outArr[i])\n', (1764, 1775), True, 'import numpy as np\n'), ((3082, 3104), 'numpy.array', 'np.array', (['outArrCh1[i]'], {}), '(outArrCh1[i])\n', (3090, 3104), True, 'import numpy as np\n'), ((3152, 3174), 'numpy.array', 'np.array', (['outArrCh2[i]'], {}), '(outArrCh2[i])\n', (3160, 3174), True, 'import numpy as np\n')] |
import argparse
import os
import numpy as np
import torch
from torch import nn
from hparams import HParam
from lpcnet_bunched import MDense, LPCNetModelBunch
max_rnn_neurons = 1
max_conv_inputs = 1
max_mdense_tmp = 1
def pk_convert_input_kernel(kernel):
kernel_r, kernel_z, kernel_h = np.vsplit(kernel, 3)
kernels = [kernel_z.T, kernel_r.T, kernel_h.T]
return np.hstack(kernels)
def pk_convert_recurrent_kernel(kernel):
kernel_r, kernel_z, kernel_h = np.vsplit(kernel, 3)
kernels = [kernel_z.T, kernel_r.T, kernel_h.T]
return np.hstack(kernels)
def pk_convert_bias(bias):
bias = bias.reshape(2, 3, -1)
return bias[:, [1, 0, 2], :].reshape(-1)
def convert_recurrent_kernel(kernel):
kernel_r, kernel_z, kernel_h = np.vsplit(kernel, 3)
kernels = [kernel_z, kernel_r, kernel_h]
return np.hstack(kernels)
def re_convert_recurrent_kernel(kernel):
kernel_z, kernel_r, kernel_h = np.hsplit(kernel, 3)
kernels = [kernel_r, kernel_z, kernel_h]
return np.vstack(kernels)
def dump_layer_ignore(self, name, f, hf):
print("ignoring layer " + name + " of type " + self.__class__.__name__)
return False
nn.Module.dump_layer = dump_layer_ignore
def printSparseVector(f, A, name):
print("A.size: ", A.shape)
N = A.shape[0]
W = np.zeros((0,))
diag = np.concatenate([np.diag(A[:, :N]), np.diag(A[:, N:2 * N]), np.diag(A[:, 2 * N:])])
A[:, :N] = A[:, :N] - np.diag(np.diag(A[:, :N]))
A[:, N:2 * N] = A[:, N:2 * N] - np.diag(np.diag(A[:, N:2 * N]))
A[:, 2 * N:] = A[:, 2 * N:] - np.diag(np.diag(A[:, 2 * N:]))
printVector(f, diag, name + '_diag')
idx = np.zeros((0,), dtype='int')
for i in range(3 * N // 16):
pos = idx.shape[0]
idx = np.append(idx, -1)
nb_nonzero = 0
for j in range(N):
if np.sum(np.abs(A[j, i * 16:(i + 1) * 16])) > 1e-10:
nb_nonzero = nb_nonzero + 1
idx = np.append(idx, j)
W = np.concatenate([W, A[j, i * 16:(i + 1) * 16]])
idx[pos] = nb_nonzero
printVector(f, W, name)
print("len(idx): ", idx.shape)
# idx = np.tile(np.concatenate([np.array([N]), np.arange(N)]), 3*N//16)
printVector(f, idx, name + '_idx', dtype='int')
return
def printVector(f, vector, name, dtype='float'):
v = np.reshape(vector, (-1))
# print('static const float ', name, '[', len(v), '] = \n', file=f)
f.write('static const {} {}[{}] = {{\n '.format(dtype, name, len(v)))
for i in range(0, len(v)):
f.write('{}'.format(v[i]))
if i != len(v) - 1:
f.write(',')
else:
break
if i % 8 == 7:
f.write("\n ")
else:
f.write(" ")
# print(v, file=f)
f.write('\n};\n\n')
return
def dump_embedding_layer_impl(name, weights, f, hf):
printVector(f, weights, name + '_weights')
f.write('const EmbeddingLayer {} = {{\n {}_weights,\n {}, {}\n}};\n\n'
.format(name, name, weights.shape[0], weights.shape[1]))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1]))
hf.write('extern const EmbeddingLayer {};\n\n'.format(name))
def dump_dense_layer_impl(name, weights, bias, activation, f, hf):
printVector(f, weights, name + '_weights')
printVector(f, bias, name + '_bias')
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, weights.shape[0], weights.shape[1], activation))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1]))
hf.write('extern const DenseLayer {};\n\n'.format(name))
def dump_sparse_gru(self, f, hf):
global max_rnn_neurons
name = 'sparse_gru_a'
print("printing layer " + name + " of type sparse " + self.__class__.__name__)
weight_ih_l0 = pk_convert_input_kernel(self.weight_ih_l0.detach().numpy())
weight_hh_l0 = pk_convert_recurrent_kernel(self.weight_hh_l0.detach().numpy())
bias_ih_l0 = self.bias_ih_l0.detach().numpy().reshape(-1)
bias_hh_l0 = self.bias_hh_l0.detach().numpy().reshape(-1)
bias = np.concatenate((bias_ih_l0, bias_hh_l0))
bias = pk_convert_bias(bias)
printSparseVector(f, weight_hh_l0, name + '_recurrent_weights')
printVector(f, bias, name + '_bias')
if hasattr(self, 'activation'):
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
if hasattr(self, 'reset_after') and not self.reset_after:
reset_after = 0
else:
reset_after = 1
neurons = weight_ih_l0.shape[1] // 3
max_rnn_neurons = max(max_rnn_neurons, neurons)
f.write(
'const SparseGRULayer {} = {{\n {}_bias,\n {}_recurrent_weights_diag,\n {}_recurrent_weights,\n {}_recurrent_weights_idx,\n {}, ACTIVATION_{}, {}\n}};\n\n'
.format(name, name, name, name, name, weight_ih_l0.shape[1] // 3, activation, reset_after))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weight_ih_l0.shape[1] // 3))
hf.write('#define {}_STATE_SIZE {}\n'.format(name.upper(), weight_ih_l0.shape[1] // 3))
hf.write('extern const SparseGRULayer {};\n\n'.format(name))
return True
def dump_gru_layer(self, name, f, hf):
global max_rnn_neurons
print("printing layer " + name + " of type " + self.__class__.__name__)
W_0 = self.weight_ih_l0.detach().numpy()
W0 = pk_convert_input_kernel(W_0) # 将pytorch格式转换为keras格式
W_1 = self.weight_hh_l0.detach().numpy()
W1 = pk_convert_recurrent_kernel(W_1) # 将pytorch格式转换为keras格式
bias_ih_l0 = self.bias_ih_l0.detach().numpy().reshape(-1)
bias_hh_l0 = self.bias_hh_l0.detach().numpy().reshape(-1)
bias = np.concatenate((bias_ih_l0, bias_hh_l0))
b = pk_convert_bias(bias)
printVector(f, W0, name + '_weights')
printVector(f, W1, name + '_recurrent_weights')
printVector(f, b, name + '_bias')
if hasattr(self, 'activation'):
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
if hasattr(self, 'reset_after') and not self.reset_after:
reset_after = 0
else:
reset_after = 1
neurons = W0.shape[1] // 3
max_rnn_neurons = max(max_rnn_neurons, neurons)
f.write(
'const GRULayer {} = {{\n {}_bias,\n {}_weights,\n {}_recurrent_weights,\n {}, {}, ACTIVATION_{}, {}\n}};\n\n'
.format(name, name, name, name, W0.shape[0], W0.shape[1] // 3, activation, reset_after))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), W0.shape[1] // 3))
hf.write('#define {}_STATE_SIZE {}\n'.format(name.upper(), W0.shape[1] // 3))
hf.write('extern const GRULayer {};\n\n'.format(name))
return True
nn.GRU.dump_layer = dump_gru_layer
def dump_dense_layer(self, name, f, hf, activation="TANH"):
print("printing layer " + name + " of type " + self.__class__.__name__)
weight = self.weight.detach().numpy()
weight = weight.T
bias = self.bias.detach().numpy()
activation = activation
dump_dense_layer_impl(name, weight, bias, activation, f, hf)
return False
nn.Linear.dump_layer = dump_dense_layer
def dump_mdense_layer(self, name, f, hf):
global max_mdense_tmp
print("printing layer " + name + " of type " + self.__class__.__name__)
if name != "dual_fc_1":
weight1 = self.weight1.detach().numpy()[:, :16]
weight2 = self.weight2.detach().numpy()[:, :16]
print("weight1.size: ", self.weight1.detach().numpy().shape)
else:
weight1 = self.weight1.detach().numpy()
weight2 = self.weight2.detach().numpy()
print("weight1.size: ", weight1.shape)
weight1 = np.reshape(weight1, (weight1.shape[0], weight1.shape[1], 1))
weight2 = np.reshape(weight2, (weight2.shape[0], weight2.shape[1], 1))
weight = np.concatenate((weight1, weight2), 2)
bias1 = self.bias1.detach().numpy()
bias2 = self.bias2.detach().numpy()
bias1 = bias1.reshape(bias1.shape[0], 1)
bias2 = bias2.reshape(bias2.shape[0], 1)
bias = np.concatenate((bias1, bias2), 1)
factor1 = self.factor1.detach().numpy()
factor2 = self.factor2.detach().numpy()
factor1 = factor1.reshape(factor1.shape[0], 1)
factor2 = factor2.reshape(factor2.shape[0], 1)
factor = np.concatenate((factor1, factor2), 1)
printVector(f, np.transpose(weight, (1, 2, 0)), name + '_weights')
printVector(f, np.transpose(bias, (1, 0)), name + '_bias')
printVector(f, np.transpose(factor, (1, 0)), name + '_factor')
activation = 'SOFTMAX'
max_mdense_tmp = max(max_mdense_tmp, weight.shape[0] * weight.shape[2])
f.write(
'const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, name, weight.shape[1], weight.shape[0], weight.shape[2], activation))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weight.shape[0]))
hf.write('extern const MDenseLayer {};\n\n'.format(name))
return False
MDense.dump_layer = dump_mdense_layer
def dump_conv1d_layer(self, name, f, hf):
global max_conv_inputs
print("printing layer " + name + " of type " + self.__class__.__name__)
weight = self.weight.detach().numpy()
weight = weight.transpose(2, 1, 0)
bias = self.bias.detach().numpy()
printVector(f, weight, name + '_weights')
printVector(f, bias, name + '_bias')
activation = 'TANH'
max_conv_inputs = max(max_conv_inputs, weight.shape[1] * weight.shape[0])
f.write('const Conv1DLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, weight.shape[1], weight.shape[0], weight.shape[2], activation))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weight.shape[2]))
hf.write('#define {}_STATE_SIZE ({}*{})\n'.format(name.upper(), weight.shape[1], (weight.shape[0] - 1)))
hf.write('#define {}_DELAY {}\n'.format(name.upper(), (weight.shape[0] - 1) // 2))
hf.write('extern const Conv1DLayer {};\n\n'.format(name))
return True
nn.Conv1d.dump_layer = dump_conv1d_layer
def dump_embedding_layer(self, name, f, hf):
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.weight.detach().numpy()
dump_embedding_layer_impl(name, weights, f, hf)
return False
nn.Embedding.dump_layer = dump_embedding_layer
def compute_exc_md(self, f, hf, embed_exc, rnn_units2):
print("self.weight1.shape: ", self.weight1.detach().numpy().shape)
exc_weight_1 = self.weight1.detach().numpy()[:, rnn_units2:]
exc_weight_2 = self.weight2.detach().numpy()[:, rnn_units2:] # [256, 128]
print("exbed_exc.shape: ", embed_exc.shape)
exc_weight_1 = np.dot(embed_exc, exc_weight_1.T) # [embed_size, 128] * [128, 256] ==> [embed_size, 256]
exc_weight_2 = np.dot(embed_exc, exc_weight_2.T) # [embed_size, 128] * [128, 256] ==> [embed_size, 256]
print("exc_weight_1.size: ", exc_weight_1.shape)
md_embed_sig = np.concatenate((exc_weight_1, exc_weight_2), 1)
print("md_embed_sig.size: ", md_embed_sig.shape)
dump_embedding_layer_impl('md_embed_sig', md_embed_sig, f, hf)
def dump_lpcnet(chekpoint, hparams):
model = LPCNetModelBunch(hparams)
if os.path.isfile(chekpoint):
checkpoint_dict = torch.load(chekpoint)
else:
raise ValueError("no such checkpoint file")
model.load_state_dict(checkpoint_dict['state_dict'])
# 这里的model_init是为了测试
# model_init(model)
if os.path.exists("../src"):
cfile = '../src/nnet_data.c'
hfile = '../src/nnet_data.h'
else:
cfile = 'src/nnet_data.c'
hfile = 'src/nnet_data.h'
assert os.path.exists("src")
f = open(cfile, 'w')
hf = open(hfile, 'w')
f.write('/*This file is automatically generated from a Pytorch model*/\n\n')
f.write('#ifdef HAVE_CONFIG_H\n#include "config.h"\n#endif\n\n#include "nnet.h"\n#include "{}"\n\n'.format(hfile))
hf.write('/*This file is automatically generated from a Pytorch model*/\n\n')
hf.write('#ifndef RNN_DATA_H\n#define RNN_DATA_H\n\n#include "nnet.h"\n\n')
embed_size = hparams.embedding_size
E1 = model.embed_sig.weight.detach().numpy()
W = model.gru_a.weight_ih_l0.detach().numpy()
W = pk_convert_input_kernel(W) # 将pytorch格式转换为keras格式
# for i in range(0, 3*hparams.n_samples_per_step, 3):
W1 = W[:embed_size, :]
dump_embedding_layer_impl('gru_a_embed_sig_1', np.dot(E1, W1), f, hf)
W2 = W[embed_size:2* embed_size, :]
dump_embedding_layer_impl('gru_a_embed_pred_1', np.dot(E1, W2), f, hf)
W3 = W[2*embed_size:3*embed_size, :]
dump_embedding_layer_impl('gru_a_embed_exc_1', np.dot(E1, W3), f, hf)
W4 = W[3*embed_size:4*embed_size, :]
dump_embedding_layer_impl('gru_a_embed_sig_0', np.dot(E1, W4), f, hf)
W5 = W[4*embed_size:5*embed_size, :]
dump_embedding_layer_impl('gru_a_embed_pred_0', np.dot(E1, W5), f, hf)
W6 = W[5*embed_size:6*embed_size, :]
dump_embedding_layer_impl('gru_a_embed_exc_0', np.dot(E1, W6), f, hf)
W7 = W[6*embed_size:, :]
bias_ih_l0 = model.gru_a.bias_ih_l0.detach().numpy().reshape(-1)
bias_hh_l0 = model.gru_a.bias_hh_l0.detach().numpy().reshape(-1)
bias = np.concatenate((bias_ih_l0, bias_hh_l0))
b = pk_convert_bias(bias)
dump_dense_layer_impl('gru_a_dense_feature', W7, b, 'LINEAR', f, hf)
# layer列表
layer_list = []
model_list = [model.embed_pitch, model.feature_conv1, model.feature_conv2, model.feature_dense1, model.feature_dense2,
model.gru_a, model.gru_b, model.md_1, model.embed_sig, model.md_2]
model_name = ['embed_pitch', 'feature_conv1', 'feature_conv2', 'feature_dense1', 'feature_dense2',
'gru_a', 'gru_b', "dual_fc_1", 'embed_sig', "dual_fc_2"]
assert 1 <= hparams.n_samples_per_step <= 4
# model_list = model_list[:len(model_list)-4+hparams.n_samples_per_step]
# print(model_list)
assert len(model_list) == len(model_name)
for idx in range(len(model_list)):
name = model_name[idx]
layer = model_list[idx]
print("----------------" + name + "--------------------")
if layer.dump_layer(name, f, hf):
layer_list.append(name)
dump_sparse_gru(model.gru_a, f, hf)
compute_exc_md(model.md_2, f, hf, E1, hparams.rnn_units2)
hf.write('#define MAX_RNN_NEURONS {}\n\n'.format(max_rnn_neurons))
hf.write('#define MAX_CONV_INPUTS {}\n\n'.format(max_conv_inputs))
hf.write('#define MAX_MDENSE_TMP {}\n\n'.format(max_mdense_tmp))
hf.write('typedef struct {\n')
for i, name in enumerate(layer_list):
hf.write(' float {}_state[{}_STATE_SIZE];\n'.format(name, name.upper()))
hf.write('} NNetState;\n')
hf.write('\n\n#endif\n')
f.close()
hf.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', '-c', type=str, default=None, help='checkpoint')
parser.add_argument('--hparams', type=str, default="config.yaml")
args = parser.parse_args()
hparams = HParam(args.hparams)
dump_lpcnet(args.checkpoint, hparams)
| [
"numpy.diag",
"numpy.vsplit",
"numpy.abs",
"numpy.concatenate",
"argparse.ArgumentParser",
"torch.load",
"numpy.zeros",
"os.path.exists",
"numpy.hsplit",
"numpy.hstack",
"numpy.transpose",
"numpy.append",
"os.path.isfile",
"numpy.reshape",
"hparams.HParam",
"numpy.dot",
"lpcnet_bunch... | [((294, 314), 'numpy.vsplit', 'np.vsplit', (['kernel', '(3)'], {}), '(kernel, 3)\n', (303, 314), True, 'import numpy as np\n'), ((377, 395), 'numpy.hstack', 'np.hstack', (['kernels'], {}), '(kernels)\n', (386, 395), True, 'import numpy as np\n'), ((473, 493), 'numpy.vsplit', 'np.vsplit', (['kernel', '(3)'], {}), '(kernel, 3)\n', (482, 493), True, 'import numpy as np\n'), ((556, 574), 'numpy.hstack', 'np.hstack', (['kernels'], {}), '(kernels)\n', (565, 574), True, 'import numpy as np\n'), ((756, 776), 'numpy.vsplit', 'np.vsplit', (['kernel', '(3)'], {}), '(kernel, 3)\n', (765, 776), True, 'import numpy as np\n'), ((833, 851), 'numpy.hstack', 'np.hstack', (['kernels'], {}), '(kernels)\n', (842, 851), True, 'import numpy as np\n'), ((929, 949), 'numpy.hsplit', 'np.hsplit', (['kernel', '(3)'], {}), '(kernel, 3)\n', (938, 949), True, 'import numpy as np\n'), ((1006, 1024), 'numpy.vstack', 'np.vstack', (['kernels'], {}), '(kernels)\n', (1015, 1024), True, 'import numpy as np\n'), ((1297, 1311), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (1305, 1311), True, 'import numpy as np\n'), ((1643, 1670), 'numpy.zeros', 'np.zeros', (['(0,)'], {'dtype': '"""int"""'}), "((0,), dtype='int')\n", (1651, 1670), True, 'import numpy as np\n'), ((2322, 2344), 'numpy.reshape', 'np.reshape', (['vector', '(-1)'], {}), '(vector, -1)\n', (2332, 2344), True, 'import numpy as np\n'), ((4148, 4188), 'numpy.concatenate', 'np.concatenate', (['(bias_ih_l0, bias_hh_l0)'], {}), '((bias_ih_l0, bias_hh_l0))\n', (4162, 4188), True, 'import numpy as np\n'), ((5723, 5763), 'numpy.concatenate', 'np.concatenate', (['(bias_ih_l0, bias_hh_l0)'], {}), '((bias_ih_l0, bias_hh_l0))\n', (5737, 5763), True, 'import numpy as np\n'), ((7683, 7743), 'numpy.reshape', 'np.reshape', (['weight1', '(weight1.shape[0], weight1.shape[1], 1)'], {}), '(weight1, (weight1.shape[0], weight1.shape[1], 1))\n', (7693, 7743), True, 'import numpy as np\n'), ((7758, 7818), 'numpy.reshape', 'np.reshape', (['weight2', '(weight2.shape[0], weight2.shape[1], 1)'], {}), '(weight2, (weight2.shape[0], weight2.shape[1], 1))\n', (7768, 7818), True, 'import numpy as np\n'), ((7833, 7870), 'numpy.concatenate', 'np.concatenate', (['(weight1, weight2)', '(2)'], {}), '((weight1, weight2), 2)\n', (7847, 7870), True, 'import numpy as np\n'), ((8052, 8085), 'numpy.concatenate', 'np.concatenate', (['(bias1, bias2)', '(1)'], {}), '((bias1, bias2), 1)\n', (8066, 8085), True, 'import numpy as np\n'), ((8290, 8327), 'numpy.concatenate', 'np.concatenate', (['(factor1, factor2)', '(1)'], {}), '((factor1, factor2), 1)\n', (8304, 8327), True, 'import numpy as np\n'), ((10753, 10786), 'numpy.dot', 'np.dot', (['embed_exc', 'exc_weight_1.T'], {}), '(embed_exc, exc_weight_1.T)\n', (10759, 10786), True, 'import numpy as np\n'), ((10862, 10895), 'numpy.dot', 'np.dot', (['embed_exc', 'exc_weight_2.T'], {}), '(embed_exc, exc_weight_2.T)\n', (10868, 10895), True, 'import numpy as np\n'), ((11025, 11072), 'numpy.concatenate', 'np.concatenate', (['(exc_weight_1, exc_weight_2)', '(1)'], {}), '((exc_weight_1, exc_weight_2), 1)\n', (11039, 11072), True, 'import numpy as np\n'), ((11244, 11269), 'lpcnet_bunched.LPCNetModelBunch', 'LPCNetModelBunch', (['hparams'], {}), '(hparams)\n', (11260, 11269), False, 'from lpcnet_bunched import MDense, LPCNetModelBunch\n'), ((11278, 11303), 'os.path.isfile', 'os.path.isfile', (['chekpoint'], {}), '(chekpoint)\n', (11292, 11303), False, 'import os\n'), ((11530, 11554), 'os.path.exists', 'os.path.exists', (['"""../src"""'], {}), "('../src')\n", (11544, 11554), False, 'import os\n'), ((13276, 13316), 'numpy.concatenate', 'np.concatenate', (['(bias_ih_l0, bias_hh_l0)'], {}), '((bias_ih_l0, bias_hh_l0))\n', (13290, 13316), True, 'import numpy as np\n'), ((14892, 14917), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14915, 14917), False, 'import argparse\n'), ((15122, 15142), 'hparams.HParam', 'HParam', (['args.hparams'], {}), '(args.hparams)\n', (15128, 15142), False, 'from hparams import HParam\n'), ((1745, 1763), 'numpy.append', 'np.append', (['idx', '(-1)'], {}), '(idx, -1)\n', (1754, 1763), True, 'import numpy as np\n'), ((8348, 8379), 'numpy.transpose', 'np.transpose', (['weight', '(1, 2, 0)'], {}), '(weight, (1, 2, 0))\n', (8360, 8379), True, 'import numpy as np\n'), ((8419, 8445), 'numpy.transpose', 'np.transpose', (['bias', '(1, 0)'], {}), '(bias, (1, 0))\n', (8431, 8445), True, 'import numpy as np\n'), ((8482, 8510), 'numpy.transpose', 'np.transpose', (['factor', '(1, 0)'], {}), '(factor, (1, 0))\n', (8494, 8510), True, 'import numpy as np\n'), ((11331, 11352), 'torch.load', 'torch.load', (['chekpoint'], {}), '(chekpoint)\n', (11341, 11352), False, 'import torch\n'), ((11723, 11744), 'os.path.exists', 'os.path.exists', (['"""src"""'], {}), "('src')\n", (11737, 11744), False, 'import os\n'), ((12497, 12511), 'numpy.dot', 'np.dot', (['E1', 'W1'], {}), '(E1, W1)\n', (12503, 12511), True, 'import numpy as np\n'), ((12612, 12626), 'numpy.dot', 'np.dot', (['E1', 'W2'], {}), '(E1, W2)\n', (12618, 12626), True, 'import numpy as np\n'), ((12727, 12741), 'numpy.dot', 'np.dot', (['E1', 'W3'], {}), '(E1, W3)\n', (12733, 12741), True, 'import numpy as np\n'), ((12842, 12856), 'numpy.dot', 'np.dot', (['E1', 'W4'], {}), '(E1, W4)\n', (12848, 12856), True, 'import numpy as np\n'), ((12958, 12972), 'numpy.dot', 'np.dot', (['E1', 'W5'], {}), '(E1, W5)\n', (12964, 12972), True, 'import numpy as np\n'), ((13073, 13087), 'numpy.dot', 'np.dot', (['E1', 'W6'], {}), '(E1, W6)\n', (13079, 13087), True, 'import numpy as np\n'), ((1339, 1356), 'numpy.diag', 'np.diag', (['A[:, :N]'], {}), '(A[:, :N])\n', (1346, 1356), True, 'import numpy as np\n'), ((1358, 1380), 'numpy.diag', 'np.diag', (['A[:, N:2 * N]'], {}), '(A[:, N:2 * N])\n', (1365, 1380), True, 'import numpy as np\n'), ((1382, 1403), 'numpy.diag', 'np.diag', (['A[:, 2 * N:]'], {}), '(A[:, 2 * N:])\n', (1389, 1403), True, 'import numpy as np\n'), ((1440, 1457), 'numpy.diag', 'np.diag', (['A[:, :N]'], {}), '(A[:, :N])\n', (1447, 1457), True, 'import numpy as np\n'), ((1503, 1525), 'numpy.diag', 'np.diag', (['A[:, N:2 * N]'], {}), '(A[:, N:2 * N])\n', (1510, 1525), True, 'import numpy as np\n'), ((1569, 1590), 'numpy.diag', 'np.diag', (['A[:, 2 * N:]'], {}), '(A[:, 2 * N:])\n', (1576, 1590), True, 'import numpy as np\n'), ((1946, 1963), 'numpy.append', 'np.append', (['idx', 'j'], {}), '(idx, j)\n', (1955, 1963), True, 'import numpy as np\n'), ((1984, 2030), 'numpy.concatenate', 'np.concatenate', (['[W, A[j, i * 16:(i + 1) * 16]]'], {}), '([W, A[j, i * 16:(i + 1) * 16]])\n', (1998, 2030), True, 'import numpy as np\n'), ((1836, 1869), 'numpy.abs', 'np.abs', (['A[j, i * 16:(i + 1) * 16]'], {}), '(A[j, i * 16:(i + 1) * 16])\n', (1842, 1869), True, 'import numpy as np\n')] |
from numba import cuda
import cu_utils.transform as cutr
import numpy as np
SIZE = 128
np.random.seed(0)
def test_cu_mean_transform():
arr = np.random.rand(SIZE)
res = np.zeros(SIZE)
cuda.jit(cutr.cu_mean_transform)(arr, res)
np.testing.assert_almost_equal(res, np.mean(arr))
np.testing.assert_equal(res.shape[0], SIZE)
def test_cu_min_transform():
arr = np.random.rand(SIZE)
res = np.zeros(SIZE)
cuda.jit(cutr.cu_min_transform)(arr, res)
np.testing.assert_almost_equal(res, np.min(arr))
np.testing.assert_equal(res.shape[0], SIZE)
def test_cu_max_transform():
arr = np.random.rand(SIZE)
res = np.zeros(SIZE)
cuda.jit(cutr.cu_max_transform)(arr, res)
np.testing.assert_almost_equal(res, np.max(arr))
np.testing.assert_equal(res.shape[0], SIZE)
def test_cu_shift_transform():
arr = np.random.rand(SIZE)
res = np.zeros(SIZE)
shift = 3
null_val = -1
cuda.jit(cutr.get_cu_shift_transform(shift_by=shift, null_val=null_val))(arr, res)
np.testing.assert_equal(res[shift:], arr[:-shift])
np.testing.assert_equal(res[:shift], null_val)
np.testing.assert_equal(res.shape[0], SIZE)
res = np.zeros(SIZE)
shift = -4
cuda.jit(cutr.get_cu_shift_transform(shift_by=shift, null_val=null_val))(arr, res)
np.testing.assert_equal(res[:shift], arr[-shift:])
np.testing.assert_equal(res[shift:], null_val)
np.testing.assert_equal(res.shape[0], SIZE)
def test_cu_rolling_mean_transform():
arr = np.random.rand(SIZE)
res = np.zeros(SIZE)
window = 3
null_val = -1
cuda.jit(cutr.get_cu_rolling_mean_transform(window=window, null_val=null_val))(arr, res)
expected = arr.copy()
for i in range(1, window):
expected += np.roll(arr, i)
expected /= window
np.testing.assert_equal(res[window-1:], expected[window-1:])
np.testing.assert_equal(res[:window-1], null_val)
np.testing.assert_equal(res.shape[0], SIZE)
def test_cu_rolling_max_transform():
arr = np.random.rand(SIZE)
res = np.zeros(SIZE)
window = 5
null_val = -1
cuda.jit(cutr.get_cu_rolling_max_transform(window=window, null_val=null_val))(arr, res)
expected = arr.copy()
for i in range(1, window):
expected = np.maximum(np.roll(arr, i), expected)
np.testing.assert_equal(res[window-1:], expected[window-1:])
np.testing.assert_equal(res[:window-1], null_val)
np.testing.assert_equal(res.shape[0], SIZE)
def test_cu_rolling_min_transform():
arr = np.random.rand(SIZE)
res = np.zeros(SIZE)
window = 5
null_val = -1
cuda.jit(cutr.get_cu_rolling_min_transform(window=window, null_val=null_val))(arr, res)
expected = arr.copy()
for i in range(1, window):
expected = np.minimum(np.roll(arr, i), expected)
np.testing.assert_equal(res[window-1:], expected[window-1:])
np.testing.assert_equal(res[:window-1], null_val)
np.testing.assert_equal(res.shape[0], SIZE)
| [
"cu_utils.transform.get_cu_shift_transform",
"numpy.random.seed",
"numpy.roll",
"cu_utils.transform.get_cu_rolling_mean_transform",
"numpy.zeros",
"numpy.min",
"numpy.mean",
"numpy.max",
"cu_utils.transform.get_cu_rolling_min_transform",
"numba.cuda.jit",
"numpy.testing.assert_equal",
"numpy.r... | [((88, 105), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (102, 105), True, 'import numpy as np\n'), ((148, 168), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (162, 168), True, 'import numpy as np\n'), ((179, 193), 'numpy.zeros', 'np.zeros', (['SIZE'], {}), '(SIZE)\n', (187, 193), True, 'import numpy as np\n'), ((301, 344), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res.shape[0]', 'SIZE'], {}), '(res.shape[0], SIZE)\n', (324, 344), True, 'import numpy as np\n'), ((386, 406), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (400, 406), True, 'import numpy as np\n'), ((417, 431), 'numpy.zeros', 'np.zeros', (['SIZE'], {}), '(SIZE)\n', (425, 431), True, 'import numpy as np\n'), ((537, 580), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res.shape[0]', 'SIZE'], {}), '(res.shape[0], SIZE)\n', (560, 580), True, 'import numpy as np\n'), ((622, 642), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (636, 642), True, 'import numpy as np\n'), ((653, 667), 'numpy.zeros', 'np.zeros', (['SIZE'], {}), '(SIZE)\n', (661, 667), True, 'import numpy as np\n'), ((773, 816), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res.shape[0]', 'SIZE'], {}), '(res.shape[0], SIZE)\n', (796, 816), True, 'import numpy as np\n'), ((860, 880), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (874, 880), True, 'import numpy as np\n'), ((891, 905), 'numpy.zeros', 'np.zeros', (['SIZE'], {}), '(SIZE)\n', (899, 905), True, 'import numpy as np\n'), ((1031, 1081), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[shift:]', 'arr[:-shift]'], {}), '(res[shift:], arr[:-shift])\n', (1054, 1081), True, 'import numpy as np\n'), ((1086, 1132), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[:shift]', 'null_val'], {}), '(res[:shift], null_val)\n', (1109, 1132), True, 'import numpy as np\n'), ((1137, 1180), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res.shape[0]', 'SIZE'], {}), '(res.shape[0], SIZE)\n', (1160, 1180), True, 'import numpy as np\n'), ((1192, 1206), 'numpy.zeros', 'np.zeros', (['SIZE'], {}), '(SIZE)\n', (1200, 1206), True, 'import numpy as np\n'), ((1315, 1365), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[:shift]', 'arr[-shift:]'], {}), '(res[:shift], arr[-shift:])\n', (1338, 1365), True, 'import numpy as np\n'), ((1370, 1416), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[shift:]', 'null_val'], {}), '(res[shift:], null_val)\n', (1393, 1416), True, 'import numpy as np\n'), ((1421, 1464), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res.shape[0]', 'SIZE'], {}), '(res.shape[0], SIZE)\n', (1444, 1464), True, 'import numpy as np\n'), ((1515, 1535), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (1529, 1535), True, 'import numpy as np\n'), ((1546, 1560), 'numpy.zeros', 'np.zeros', (['SIZE'], {}), '(SIZE)\n', (1554, 1560), True, 'import numpy as np\n'), ((1810, 1874), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[window - 1:]', 'expected[window - 1:]'], {}), '(res[window - 1:], expected[window - 1:])\n', (1833, 1874), True, 'import numpy as np\n'), ((1875, 1926), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[:window - 1]', 'null_val'], {}), '(res[:window - 1], null_val)\n', (1898, 1926), True, 'import numpy as np\n'), ((1929, 1972), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res.shape[0]', 'SIZE'], {}), '(res.shape[0], SIZE)\n', (1952, 1972), True, 'import numpy as np\n'), ((2022, 2042), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (2036, 2042), True, 'import numpy as np\n'), ((2053, 2067), 'numpy.zeros', 'np.zeros', (['SIZE'], {}), '(SIZE)\n', (2061, 2067), True, 'import numpy as np\n'), ((2314, 2378), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[window - 1:]', 'expected[window - 1:]'], {}), '(res[window - 1:], expected[window - 1:])\n', (2337, 2378), True, 'import numpy as np\n'), ((2379, 2430), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[:window - 1]', 'null_val'], {}), '(res[:window - 1], null_val)\n', (2402, 2430), True, 'import numpy as np\n'), ((2433, 2476), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res.shape[0]', 'SIZE'], {}), '(res.shape[0], SIZE)\n', (2456, 2476), True, 'import numpy as np\n'), ((2526, 2546), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (2540, 2546), True, 'import numpy as np\n'), ((2557, 2571), 'numpy.zeros', 'np.zeros', (['SIZE'], {}), '(SIZE)\n', (2565, 2571), True, 'import numpy as np\n'), ((2818, 2882), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[window - 1:]', 'expected[window - 1:]'], {}), '(res[window - 1:], expected[window - 1:])\n', (2841, 2882), True, 'import numpy as np\n'), ((2883, 2934), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res[:window - 1]', 'null_val'], {}), '(res[:window - 1], null_val)\n', (2906, 2934), True, 'import numpy as np\n'), ((2937, 2980), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res.shape[0]', 'SIZE'], {}), '(res.shape[0], SIZE)\n', (2960, 2980), True, 'import numpy as np\n'), ((199, 231), 'numba.cuda.jit', 'cuda.jit', (['cutr.cu_mean_transform'], {}), '(cutr.cu_mean_transform)\n', (207, 231), False, 'from numba import cuda\n'), ((283, 295), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (290, 295), True, 'import numpy as np\n'), ((437, 468), 'numba.cuda.jit', 'cuda.jit', (['cutr.cu_min_transform'], {}), '(cutr.cu_min_transform)\n', (445, 468), False, 'from numba import cuda\n'), ((520, 531), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (526, 531), True, 'import numpy as np\n'), ((673, 704), 'numba.cuda.jit', 'cuda.jit', (['cutr.cu_max_transform'], {}), '(cutr.cu_max_transform)\n', (681, 704), False, 'from numba import cuda\n'), ((756, 767), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (762, 767), True, 'import numpy as np\n'), ((1766, 1781), 'numpy.roll', 'np.roll', (['arr', 'i'], {}), '(arr, i)\n', (1773, 1781), True, 'import numpy as np\n'), ((952, 1014), 'cu_utils.transform.get_cu_shift_transform', 'cutr.get_cu_shift_transform', ([], {'shift_by': 'shift', 'null_val': 'null_val'}), '(shift_by=shift, null_val=null_val)\n', (979, 1014), True, 'import cu_utils.transform as cutr\n'), ((1236, 1298), 'cu_utils.transform.get_cu_shift_transform', 'cutr.get_cu_shift_transform', ([], {'shift_by': 'shift', 'null_val': 'null_val'}), '(shift_by=shift, null_val=null_val)\n', (1263, 1298), True, 'import cu_utils.transform as cutr\n'), ((1608, 1676), 'cu_utils.transform.get_cu_rolling_mean_transform', 'cutr.get_cu_rolling_mean_transform', ([], {'window': 'window', 'null_val': 'null_val'}), '(window=window, null_val=null_val)\n', (1642, 1676), True, 'import cu_utils.transform as cutr\n'), ((2115, 2182), 'cu_utils.transform.get_cu_rolling_max_transform', 'cutr.get_cu_rolling_max_transform', ([], {'window': 'window', 'null_val': 'null_val'}), '(window=window, null_val=null_val)\n', (2148, 2182), True, 'import cu_utils.transform as cutr\n'), ((2282, 2297), 'numpy.roll', 'np.roll', (['arr', 'i'], {}), '(arr, i)\n', (2289, 2297), True, 'import numpy as np\n'), ((2619, 2686), 'cu_utils.transform.get_cu_rolling_min_transform', 'cutr.get_cu_rolling_min_transform', ([], {'window': 'window', 'null_val': 'null_val'}), '(window=window, null_val=null_val)\n', (2652, 2686), True, 'import cu_utils.transform as cutr\n'), ((2786, 2801), 'numpy.roll', 'np.roll', (['arr', 'i'], {}), '(arr, i)\n', (2793, 2801), True, 'import numpy as np\n')] |
"""
Script to call different plots and illustrative methods - specifically tailored for the paper
Author: <NAME>
Version: 0.1
Date 21.12.2021
"""
import numpy as np
import pandas as pd
from utils import load_density_function, scatter_plot_2d_N2, scatter_plot_3d, scatter_plot_2d
def paper_illustrations():
# ---- illustrate moment dynamics
x_dat = np.load('data/sod1D/X.npy')
y_dat = np.load('data/sod1D/Y.npy')
z_dat = np.load('data/sod1D/Z.npy')
iter_dat = np.load('data/sod1D/I.npy')
# 1) moment - Sod test case
moment_u = x_dat[:3, :]
# normalize everything
u_normal = moment_u / moment_u[0, :]
scatter_plot_2d_N2(x_in=u_normal[1:, :].reshape((u_normal.shape[1], 2)), z_in=iter_dat[0], show_fig=False,
log=False,
folder_name='illustrations', color_map=1, name='moment_dynamics', label_x='velocity',
label_y='temperature', title='macroscopic variables over time')
# 2) gradient - sod test case
grad_u = x_dat[3:6, :]
scatter_plot_3d(xyz_in=grad_u.reshape((u_normal.shape[1], 3)), color_in=iter_dat[0], show_fig=False, log=False,
folder_name='illustrations', color_map=1, name='grad_moment_dynamics',
title='gradient macroscopic variables over time', lim_x=(-1, 10),
lim_y=(0, 10), lim_z=(0, 10))
scatter_plot_2d(x_in=grad_u[1:, :].reshape((u_normal.shape[1], 2)), z_in=iter_dat[0], show_fig=False, log=False,
folder_name='illustrations', color_map=1, name='grad_moment_dynamics2D', label_x=r"$\nabla_x U$",
label_y=r"$\nabla_x T$", title='gradient macroscopic variables over time')
# II ----- illustrate generated data
x_dat = np.load('data/generator1D/X.npy')
y_dat = np.load('data/generator1D/Y.npy')
# 1) moment - generated
moment_u = x_dat[:3, :]
# normalize everything
u_normal = moment_u / moment_u[0, :]
scatter_plot_2d_N2(x_in=u_normal[1:, :].reshape((u_normal.shape[1], 2)), z_in=np.zeros(shape=(u_normal.shape[1])),
show_fig=False, log=False,
folder_name='illustrations', color_map=1, name='moment_dynamics_genData', label_x='velocity',
label_y='temperature', title='macroscopic variables (generated)')
# 2) gradient - generated
grad_u = x_dat[3:6, :]
scatter_plot_3d(xyz_in=grad_u.reshape((u_normal.shape[1], 3)),
color_in=np.zeros(shape=(u_normal.shape[1])), show_fig=False, log=False,
folder_name='illustrations', color_map=1, name='grad_moment_dynamics_genData',
title='gradient macroscopic variables (generated)', lim_x=(-1, 10),
lim_y=(0, 10), lim_z=(0, 10))
scatter_plot_2d(x_in=grad_u[1:, :].reshape((u_normal.shape[1], 2)),
z_in=np.zeros(shape=(u_normal.shape[1])), show_fig=False, log=False,
folder_name='illustrations', color_map=1, name='grad_moment_dynamics2D_genData',
label_x=r"$\nabla_x U$", label_y=r"$\nabla_x T$",
title='gradient macroscopic variables (generated)')
return 0
if __name__ == '__main__':
paper_illustrations()
| [
"numpy.load",
"numpy.zeros"
] | [((359, 386), 'numpy.load', 'np.load', (['"""data/sod1D/X.npy"""'], {}), "('data/sod1D/X.npy')\n", (366, 386), True, 'import numpy as np\n'), ((399, 426), 'numpy.load', 'np.load', (['"""data/sod1D/Y.npy"""'], {}), "('data/sod1D/Y.npy')\n", (406, 426), True, 'import numpy as np\n'), ((439, 466), 'numpy.load', 'np.load', (['"""data/sod1D/Z.npy"""'], {}), "('data/sod1D/Z.npy')\n", (446, 466), True, 'import numpy as np\n'), ((482, 509), 'numpy.load', 'np.load', (['"""data/sod1D/I.npy"""'], {}), "('data/sod1D/I.npy')\n", (489, 509), True, 'import numpy as np\n'), ((1769, 1802), 'numpy.load', 'np.load', (['"""data/generator1D/X.npy"""'], {}), "('data/generator1D/X.npy')\n", (1776, 1802), True, 'import numpy as np\n'), ((1815, 1848), 'numpy.load', 'np.load', (['"""data/generator1D/Y.npy"""'], {}), "('data/generator1D/Y.npy')\n", (1822, 1848), True, 'import numpy as np\n'), ((2055, 2088), 'numpy.zeros', 'np.zeros', ([], {'shape': 'u_normal.shape[1]'}), '(shape=u_normal.shape[1])\n', (2063, 2088), True, 'import numpy as np\n'), ((2502, 2535), 'numpy.zeros', 'np.zeros', ([], {'shape': 'u_normal.shape[1]'}), '(shape=u_normal.shape[1])\n', (2510, 2535), True, 'import numpy as np\n'), ((2900, 2933), 'numpy.zeros', 'np.zeros', ([], {'shape': 'u_normal.shape[1]'}), '(shape=u_normal.shape[1])\n', (2908, 2933), True, 'import numpy as np\n')] |
import numpy as np
#np.set_printoptions(precision=2)
import pandas as pd
from typing import Any, Dict, List, Tuple, NoReturn
import argparse
import os
import pickle
import json
from sklearn.mixture import BayesianGaussianMixture
def parse_arguments() -> Any:
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
required=True,
type=str,
help="Directory where the features (npy files) are saved",
)
parser.add_argument(
"--model_dir",
required=True,
type=str,
help="Directory where the model is saved",
)
parser.add_argument(
"--result_dir",
required=True,
type=str,
help="Directory where the model is saved",
)
parser.add_argument("--mode",
required=True,
type=str,
help="train/val/test",
choices=['train', 'test', 'val'])
parser.add_argument("--obs_len",
default=2,
type=int,
help="Observed length of the trajectory in seconds",
choices=[1,2,3,4,5])
parser.add_argument("--filter",
default='ekf',
type=str,
help="Filter to process the data noise. (ekf/none/ekf-savgol/savgol",
choices=['ekf', 'none', 'ekf-savgol', 'savgol'])
return parser.parse_args()
def train(data:np.ndarray,
obs_len:int,
filter_name:str,
model_dir:str,
result_dir:str,
save_model:bool=True)->NoReturn:
print('[Bayesian Gaussian Mixture Clustering][train] creating model...')
bgm = BayesianGaussianMixture(n_components=3,
covariance_type="full",
max_iter=1000,
tol=1e-5,
n_init=10,
random_state=7,
weight_concentration_prior_type='dirichlet_process',
init_params="kmeans")
print('[Bayesian Gaussian Mixture Clustering][train] training...')
_y = bgm.fit_predict(X=data)
_y = np.expand_dims(_y, axis=1)
print(f'[Bayesian Gaussian Mixture Clustering][train] converged?:{bgm.converged_}')
print('[Bayesian Gaussian Mixture Clustering][train] params (center and covariance):')
for i, m, c, w in zip(range(1, 4), bgm.means_, bgm.covariances_, bgm.weights_):
print(f'\tc_{i}-> mean: {m}')
print(f'\t\tcov: {c}')
print(f'\t\tweight: {w}')
print('[Bayesian Gaussian Mixture Clustering][train] results:')
_c, _l = np.unique(_y, return_counts=True)
for i, c in zip(_c,_l):
print (f'\tc_{i}: {c}')
if save_model:
model_file=f'bgm_{obs_len}s_{filter_name}.pkl'
print (f'[Bayesian Gaussian Mixture Clustering][train] saving model ({model_file})...')
with open(os.path.join(model_dir, model_file), 'wb') as f:
pickle.dump(bgm, f)
result_file = f'results_bgm_train_{obs_len}s_{filter_name}.csv'
print (f'[Bayesian Gaussian Mixture Clustering][train] saving results ({result_file})...')
labels = ['mean_velocity',
'mean_acceleration',
'mean_deceleration',
'std_lateral_jerk',
'driving_style']
result = np.concatenate((data, _y), axis=1)
df = pd.DataFrame(data=result, columns=labels)
df.to_csv(os.path.join(result_dir,result_file))
result_file = result_file.replace('results', 'params').replace('csv', 'json')
print (f'[Bayesian Gaussian Mixture Clustering][train] saving results ({result_file})...')
_d = {}
_d['means'] = bgm.means_.tolist()
_d['covariances'] = bgm.covariances_.tolist()
_d['weights'] = bgm.weights_.tolist()
with open(os.path.join(result_dir, result_file), 'w') as f:
json.dump(_d, f)
def process(data:np.ndarray,
obs_len:int,
filter_name:str,
model_dir:str,
result_dir:str,
mode:str)->NoReturn:
model_file=f'bgm_{obs_len}s_{filter_name}.pkl'
assert os.path.exists(os.path.join(model_dir, model_file)),\
f'[Bayesian Gaussian Mixture Clustering][{mode}][ERROR] model not found! ({model_file})'
print(f'[Bayesian Gaussian Mixture Clustering][{mode}] loading the model...')
bgm = None
with open(os.path.join(model_dir, model_file), 'rb') as f:
bgm = pickle.load(f)
assert bgm is not None,\
f'[Bayesian Gaussian Mixture Clustering][{mode}][ERROR] error while loading model! ({model_file})'
_y = bgm.predict(X=data)
_y = np.expand_dims(_y, axis=1)
print(f'[Bayesian Gaussian Mixture Clustering][{mode}] converged?:{bgm.converged_}')
print(f'[Bayesian Gaussian Mixture Clustering][{mode}] params (center and covariance):')
for i, m, c in zip(range(1, 4), bgm.means_, bgm.covariances_):
print(f'\tc_{i}-> mean: {m}')
print(f'\t\tcov: {c}')
print(f'[Bayesian Gaussian Mixture Clustering][{mode}] results:')
_c, _l = np.unique(_y, return_counts=True)
for i, c in zip(_c,_l):
print (f'\tc_{i}: {c}')
result_file = f'results_bgm_{mode}_{obs_len}s_{filter_name}.csv'
print (f'[Bayesian Gaussian Mixture Clustering][{mode}] saving results ({result_file})...')
labels = ['mean_velocity',
'mean_acceleration',
'mean_deceleration',
'std_lateral_jerk',
'driving_style']
result = np.concatenate((data, _y), axis=1)
df = pd.DataFrame(data=result, columns=labels)
df.to_csv(os.path.join(result_dir,result_file))
if __name__ == '__main__':
'''
apply Bayesian Gaussian Mixture Clustering clustering to classify the data into
driving styles (calm, moderate, aggresive)
'''
print ('[Bayesian Gaussian Mixture Clustering] running....')
args = parse_arguments()
if args.mode == 'test':
args.obs_len = 2
assert os.path.exists(args.data_dir),\
f'[Bayesian Gaussian Mixture Clustering][main][ERROR] data_dir not found!({args.data_dir})'
data_file = 'features_{}_{}s_{}.npy'.format(args.mode,
args.obs_len,
args.filter)
data_file = os.path.join(args.data_dir, data_file)
assert os.path.exists(data_file),\
f'[Bayesian Gaussian Mixture Clustering][main][ERROR] data_file not found!({data_file})'
print ('[Bayesian Gaussian Mixture Clustering][main] loading dataset....')
# (m, 4)
# [mean_v, mean_acc, mean_deac, std_jy]
data = np.load(os.path.join(args.data_dir,data_file))
if args.mode == 'train':
train(data=data,
save_model=True,
obs_len=args.obs_len,
filter_name=args.filter,
model_dir=args.model_dir,
result_dir=args.result_dir)
elif args.mode == 'test':
process(data=data,
obs_len=args.obs_len,
filter_name=args.filter,
model_dir=args.model_dir,
result_dir=args.result_dir,
mode='test')
else:#val
process(data=data,
obs_len=args.obs_len,
filter_name=args.filter,
model_dir=args.model_dir,
result_dir=args.result_dir,
mode='val')
| [
"pandas.DataFrame",
"json.dump",
"pickle.dump",
"argparse.ArgumentParser",
"numpy.unique",
"os.path.exists",
"numpy.expand_dims",
"pickle.load",
"sklearn.mixture.BayesianGaussianMixture",
"os.path.join",
"numpy.concatenate"
] | [((312, 337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (335, 337), False, 'import argparse\n'), ((1457, 1657), 'sklearn.mixture.BayesianGaussianMixture', 'BayesianGaussianMixture', ([], {'n_components': '(3)', 'covariance_type': '"""full"""', 'max_iter': '(1000)', 'tol': '(1e-05)', 'n_init': '(10)', 'random_state': '(7)', 'weight_concentration_prior_type': '"""dirichlet_process"""', 'init_params': '"""kmeans"""'}), "(n_components=3, covariance_type='full', max_iter=\n 1000, tol=1e-05, n_init=10, random_state=7,\n weight_concentration_prior_type='dirichlet_process', init_params='kmeans')\n", (1480, 1657), False, 'from sklearn.mixture import BayesianGaussianMixture\n'), ((1838, 1864), 'numpy.expand_dims', 'np.expand_dims', (['_y'], {'axis': '(1)'}), '(_y, axis=1)\n', (1852, 1864), True, 'import numpy as np\n'), ((2282, 2315), 'numpy.unique', 'np.unique', (['_y'], {'return_counts': '(True)'}), '(_y, return_counts=True)\n', (2291, 2315), True, 'import numpy as np\n'), ((2908, 2942), 'numpy.concatenate', 'np.concatenate', (['(data, _y)'], {'axis': '(1)'}), '((data, _y), axis=1)\n', (2922, 2942), True, 'import numpy as np\n'), ((2949, 2990), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'result', 'columns': 'labels'}), '(data=result, columns=labels)\n', (2961, 2990), True, 'import pandas as pd\n'), ((4105, 4131), 'numpy.expand_dims', 'np.expand_dims', (['_y'], {'axis': '(1)'}), '(_y, axis=1)\n', (4119, 4131), True, 'import numpy as np\n'), ((4509, 4542), 'numpy.unique', 'np.unique', (['_y'], {'return_counts': '(True)'}), '(_y, return_counts=True)\n', (4518, 4542), True, 'import numpy as np\n'), ((4897, 4931), 'numpy.concatenate', 'np.concatenate', (['(data, _y)'], {'axis': '(1)'}), '((data, _y), axis=1)\n', (4911, 4931), True, 'import numpy as np\n'), ((4938, 4979), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'result', 'columns': 'labels'}), '(data=result, columns=labels)\n', (4950, 4979), True, 'import pandas as pd\n'), ((5344, 5373), 'os.path.exists', 'os.path.exists', (['args.data_dir'], {}), '(args.data_dir)\n', (5358, 5373), False, 'import os\n'), ((5593, 5631), 'os.path.join', 'os.path.join', (['args.data_dir', 'data_file'], {}), '(args.data_dir, data_file)\n', (5605, 5631), False, 'import os\n'), ((5641, 5666), 'os.path.exists', 'os.path.exists', (['data_file'], {}), '(data_file)\n', (5655, 5666), False, 'import os\n'), ((3002, 3039), 'os.path.join', 'os.path.join', (['result_dir', 'result_file'], {}), '(result_dir, result_file)\n', (3014, 3039), False, 'import os\n'), ((3405, 3421), 'json.dump', 'json.dump', (['_d', 'f'], {}), '(_d, f)\n', (3414, 3421), False, 'import json\n'), ((3637, 3672), 'os.path.join', 'os.path.join', (['model_dir', 'model_file'], {}), '(model_dir, model_file)\n', (3649, 3672), False, 'import os\n'), ((3928, 3942), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3939, 3942), False, 'import pickle\n'), ((4991, 5028), 'os.path.join', 'os.path.join', (['result_dir', 'result_file'], {}), '(result_dir, result_file)\n', (5003, 5028), False, 'import os\n'), ((5904, 5942), 'os.path.join', 'os.path.join', (['args.data_dir', 'data_file'], {}), '(args.data_dir, data_file)\n', (5916, 5942), False, 'import os\n'), ((2587, 2606), 'pickle.dump', 'pickle.dump', (['bgm', 'f'], {}), '(bgm, f)\n', (2598, 2606), False, 'import pickle\n'), ((3353, 3390), 'os.path.join', 'os.path.join', (['result_dir', 'result_file'], {}), '(result_dir, result_file)\n', (3365, 3390), False, 'import os\n'), ((3870, 3905), 'os.path.join', 'os.path.join', (['model_dir', 'model_file'], {}), '(model_dir, model_file)\n', (3882, 3905), False, 'import os\n'), ((2535, 2570), 'os.path.join', 'os.path.join', (['model_dir', 'model_file'], {}), '(model_dir, model_file)\n', (2547, 2570), False, 'import os\n')] |
"""
CollectionIndex.py
Author: <NAME> (<EMAIL>)
Manages the collection index.
"""
import h5py
from math import floor, sqrt
import os
import numpy as np
import random
import scipy.sparse as sparse
import scipy.spatial
from sklearn.cluster import KMeans, MiniBatchKMeans
from time import time
from aimodel.commons import t
class CollectionIndex:
"""
II-20's collection index is based on product quantization (PQ), this class
handles both the creation (handled by the class-level methods) and index
utilization in the system proper (handled by the instance-level methods).
"""
DEFAULT_N_SUBMAT = 32
MAX_K = 1024 # The maximum no. of clusters in a subquantizer
MAX_N = 25000
INDEX_FILENAME = "index.npy"
INVERTED_INDEX_FILENAME = "inv_index.npy"
SUBQUANT_CNTR_FILENAME = "sq_centroids.npy"
DIST_MAT_FILENAME = "dist_mat.npy"
KNN_FILENAME = "knn.npy"
K_IN_KNN = 10
STRIDE_MAX_N_VARS = int(20e9/8)
def __init__(self, index_dir):
"""
Constructor.
Parameters
----------
index_dir : path
The path to the directory where the index structures are stored.
"""
index_path =\
os.path.join(index_dir, CollectionIndex.INDEX_FILENAME)
inverted_index_path =\
os.path.join(index_dir, CollectionIndex.INVERTED_INDEX_FILENAME)
subquant_centroids_path =\
os.path.join(index_dir, CollectionIndex.SUBQUANT_CNTR_FILENAME)
distance_matrix_path =\
os.path.join(index_dir, CollectionIndex.DIST_MAT_FILENAME)
knn_path = os.path.join(index_dir, CollectionIndex.KNN_FILENAME)
knn_path = os.path.join(index_dir, CollectionIndex.KNN_FILENAME)
self.index = np.load(index_path)
self.inverted_index = np.load(inverted_index_path, allow_pickle=True)
self.subquant_centroids = np.load(subquant_centroids_path)
self.distance_matrix = np.load(distance_matrix_path)
try:
self.knn = np.load(knn_path)
except OSError:
pass
self.n = len(self.index)
self.n_submat = len(self.distance_matrix)
def distances(self, img1, img2):
"""
Computes the distances between two sets of images.
Parameters
----------
img1, img2 : list
A list of image IDs that we want to compute the distance between.
Returns
-------
np.array
A 2-D matrix of size len(img1) x len(img2) that contains the
distances between images.
"""
n_queries = len(img1)
if n_queries == 0:
raise ValueError("No query images provided.")
n_cols = len(img2)
distances = np.zeros((n_queries, n_cols), dtype=np.float64)
for s in range(self.n_submat):
distances +=\
self.distance_matrix[s, self.index[img1, s]][:, self.index[img2, s]] # noqa E501
return distances
@classmethod
def _dist_mat(cls, img1, img2, distance_matrix, index):
"""
The class-level equivalent of distances() with the distance matrix and
index being thrown in as explicit parameters. Not an ideal solution
due to code redundancy, but it works for now.
Parameters
----------
img1, img2 : list
A list of image IDs that we want to compute the distance between.
distance_matrix : np.array
The distance matrix used to compute the distances.
index : np.array
The index used for the distance computations.
Returns
-------
np.array
A 2-D matrix of size len(img1) x len(img2) that contains the
distances between images.
"""
n_queries = len(img1)
if n_queries == 0:
raise ValueError("No query images provided.")
n_cols = len(img2)
distances = np.zeros((n_queries, n_cols), dtype=np.float64)
for s in range(distance_matrix.shape[0]):
distances +=\
distance_matrix[s, index[img1, s]][:, index[img2, s]]
return distances
@classmethod
def create_index(cls, dataset_config):
"""
Creates the PQ index for the specified collection.
Parameters:
dataset_config : dict
A valid dataset config (see the README for formatting specs).
"""
# Dataset config shortcuts
root_dir = dataset_config["root_dir"]
index_features_path =\
os.path.join(root_dir, dataset_config["index_features_path"])
index_dir = os.path.join(root_dir, dataset_config["index_dir"])
n_submat = dataset_config["index_n_submat"]
# Verify the output directory
if not os.path.exists(index_dir):
try:
os.makedirs(index_dir)
except OSError:
err = "Cannot create the index directory (%s)." % index_dir
raise CollectionIndexError(err)
# First pass over the features: get n and n_feat
n = 0
n_feat = None
with h5py.File(index_features_path, "r") as f:
for fs in range(len(f.keys())):
features = f["data%s" % fs]
if n_feat:
if features.shape[1] != n_feat:
err = (("The number of features is inconsistent "
"between the feature matrices in '%s' (%s "
"features in '%s', previous matrices had %s).")
% (index_features_path, features.shape[1],
"data%s" % fs, n_feat))
raise CollectionIndexError(err)
else:
n_feat = features.shape[1]
n += features.shape[0]
print("%sx%s" % (n, n_feat))
# Product quantization parameters
n_feat_sq = n_feat // n_submat
k = min(floor(sqrt(n)), CollectionIndex.MAX_K) # The no. of clusters
# The indexed data, n rows, a vector of subquantizer centroids for each
indexed_data = np.zeros((n, n_submat), dtype=np.uint16)
# The subquantizer-centric view of data: which items belong to each
inverted_index = []
# The centroid coordinates for each subquantizer cluster
subquant_centroids = []
# The 3-D distance matrix between centroids for all subquantizers
dist_mat = np.zeros((n_submat, k, k), dtype=np.float64)
print("%s +++ CONSTRUCTING INDEX +++" % t())
# CREATE THE INDEX
for s in range(n_submat):
signature = "(Submatrix %s/%s)" % (s + 1, n_submat)
f_start = s*n_feat_sq
if s == n_submat - 1:
f_end = n_feat
else:
f_end = (s + 1)*n_feat_sq
# Train the K-means
print("%s %s Subquantizing..." % (t(), signature), end=" ")
stopwatch = time()
kmeans_subquantizer = MiniBatchKMeans(n_clusters=k)
with h5py.File(index_features_path, "r") as f:
for fs in range(len(f.keys())):
feat_submat = f["data%s" % fs][:, f_start: f_end]
kmeans_subquantizer.partial_fit(feat_submat)
# Record the cluster centroids for the new subquantizer
subquant_centroids.append(kmeans_subquantizer.cluster_centers_)
# Compute the distance matrix between centroids
dist_mat[s, :, :] =\
scipy.spatial.distance_matrix(subquant_centroids[s],
subquant_centroids[s])
# Index the data
i_start = 0
i_end = None
with h5py.File(index_features_path, "r") as f:
for fs in range(len(f.keys())):
features = f["data%s" % fs][:, f_start: f_end]
i_end = i_start + features.shape[0]
indexed_data[i_start: i_end, s] =\
kmeans_subquantizer.predict(features)
i_start = i_end
# Fill the inverted index
inverted_index.append([])
for cluster in range(k):
items_in_cluster =\
[int(x) for x
in np.where(indexed_data[:, s] == cluster)[0]]
inverted_index[s].append(items_in_cluster)
print("done in %s seconds." % (round(time() - stopwatch, 2)))
# Convert inverted index to dtype=object explicitly (implicit
# conversion deprecated by NumPy)
inverted_index = np.array(inverted_index, dtype=object)
# Record the results to the output directory
indexed_data_path =\
os.path.join(index_dir, CollectionIndex.INDEX_FILENAME)
inverted_index_path =\
os.path.join(index_dir, CollectionIndex.INVERTED_INDEX_FILENAME)
subquant_centroids_path =\
os.path.join(index_dir,
CollectionIndex.SUBQUANT_CNTR_FILENAME)
dist_mat_path =\
os.path.join(index_dir, CollectionIndex.DIST_MAT_FILENAME)
try:
np.save(indexed_data_path, indexed_data)
np.save(inverted_index_path, inverted_index)
np.save(subquant_centroids_path, subquant_centroids)
np.save(dist_mat_path, dist_mat)
except OSError:
err = "Could not write the collection index files to disk."
raise CollectionIndexError(err)
print("%s +++ INDEX CONSTRUCTED +++" % t())
@classmethod
def compute_knn(cls, dataset_config):
"""
Creates a k-nearest neighbour matrix for the dataset specified by the
config.
Parameters:
dataset_config : dict
A valid dataset config (see the README for formatting specs).
"""
# Dataset config shortcut
index_dir = os.path.join(dataset_config["root_dir"],
dataset_config["index_dir"])
indexed_data_path =\
os.path.join(index_dir, CollectionIndex.INDEX_FILENAME)
dist_mat_path =\
os.path.join(index_dir, CollectionIndex.DIST_MAT_FILENAME)
indexed_data = np.load(indexed_data_path)
dist_mat = np.load(dist_mat_path)
n = indexed_data.shape[0]
knn = np.zeros((n, cls.K_IN_KNN), dtype=int)
stride = int(CollectionIndex.STRIDE_MAX_N_VARS / n)
print("%s +++ CONSTRUCTING K-NN MATRIX +++" % t())
for i in range(0, n, stride):
i_end = i + stride
if i_end > n:
i_end = n
query = list(range(i, i_end))
dst =\
cls._dist_mat(query, range(n), dist_mat, indexed_data)
for img in query:
dst[img-i, img] = np.inf
knn[i: i_end, :] = np.argsort(dst, axis=1)[:, :cls.K_IN_KNN]
print("%s kNN neighbours for %s items established."
% (t(), i_end))
knn_path = os.path.join(index_dir, cls.KNN_FILENAME)
np.save(knn_path, knn)
print("%s +++ K-NN MATRIX CONSTRUCTION COMPLETE +++" % t())
class CollectionIndexError(Exception):
"""
Raised when a fatal error is encountered during collection index
construction.
"""
pass
| [
"sklearn.cluster.MiniBatchKMeans",
"numpy.load",
"numpy.save",
"h5py.File",
"os.makedirs",
"math.sqrt",
"aimodel.commons.t",
"numpy.zeros",
"os.path.exists",
"time.time",
"numpy.argsort",
"numpy.where",
"numpy.array",
"os.path.join"
] | [((1219, 1274), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.INDEX_FILENAME'], {}), '(index_dir, CollectionIndex.INDEX_FILENAME)\n', (1231, 1274), False, 'import os\n'), ((1318, 1382), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.INVERTED_INDEX_FILENAME'], {}), '(index_dir, CollectionIndex.INVERTED_INDEX_FILENAME)\n', (1330, 1382), False, 'import os\n'), ((1430, 1493), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.SUBQUANT_CNTR_FILENAME'], {}), '(index_dir, CollectionIndex.SUBQUANT_CNTR_FILENAME)\n', (1442, 1493), False, 'import os\n'), ((1538, 1596), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.DIST_MAT_FILENAME'], {}), '(index_dir, CollectionIndex.DIST_MAT_FILENAME)\n', (1550, 1596), False, 'import os\n'), ((1616, 1669), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.KNN_FILENAME'], {}), '(index_dir, CollectionIndex.KNN_FILENAME)\n', (1628, 1669), False, 'import os\n'), ((1689, 1742), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.KNN_FILENAME'], {}), '(index_dir, CollectionIndex.KNN_FILENAME)\n', (1701, 1742), False, 'import os\n'), ((1765, 1784), 'numpy.load', 'np.load', (['index_path'], {}), '(index_path)\n', (1772, 1784), True, 'import numpy as np\n'), ((1815, 1862), 'numpy.load', 'np.load', (['inverted_index_path'], {'allow_pickle': '(True)'}), '(inverted_index_path, allow_pickle=True)\n', (1822, 1862), True, 'import numpy as np\n'), ((1897, 1929), 'numpy.load', 'np.load', (['subquant_centroids_path'], {}), '(subquant_centroids_path)\n', (1904, 1929), True, 'import numpy as np\n'), ((1961, 1990), 'numpy.load', 'np.load', (['distance_matrix_path'], {}), '(distance_matrix_path)\n', (1968, 1990), True, 'import numpy as np\n'), ((2761, 2808), 'numpy.zeros', 'np.zeros', (['(n_queries, n_cols)'], {'dtype': 'np.float64'}), '((n_queries, n_cols), dtype=np.float64)\n', (2769, 2808), True, 'import numpy as np\n'), ((3962, 4009), 'numpy.zeros', 'np.zeros', (['(n_queries, n_cols)'], {'dtype': 'np.float64'}), '((n_queries, n_cols), dtype=np.float64)\n', (3970, 4009), True, 'import numpy as np\n'), ((4577, 4638), 'os.path.join', 'os.path.join', (['root_dir', "dataset_config['index_features_path']"], {}), "(root_dir, dataset_config['index_features_path'])\n", (4589, 4638), False, 'import os\n'), ((4659, 4710), 'os.path.join', 'os.path.join', (['root_dir', "dataset_config['index_dir']"], {}), "(root_dir, dataset_config['index_dir'])\n", (4671, 4710), False, 'import os\n'), ((6199, 6239), 'numpy.zeros', 'np.zeros', (['(n, n_submat)'], {'dtype': 'np.uint16'}), '((n, n_submat), dtype=np.uint16)\n', (6207, 6239), True, 'import numpy as np\n'), ((6534, 6578), 'numpy.zeros', 'np.zeros', (['(n_submat, k, k)'], {'dtype': 'np.float64'}), '((n_submat, k, k), dtype=np.float64)\n', (6542, 6578), True, 'import numpy as np\n'), ((8731, 8769), 'numpy.array', 'np.array', (['inverted_index'], {'dtype': 'object'}), '(inverted_index, dtype=object)\n', (8739, 8769), True, 'import numpy as np\n'), ((8865, 8920), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.INDEX_FILENAME'], {}), '(index_dir, CollectionIndex.INDEX_FILENAME)\n', (8877, 8920), False, 'import os\n'), ((8964, 9028), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.INVERTED_INDEX_FILENAME'], {}), '(index_dir, CollectionIndex.INVERTED_INDEX_FILENAME)\n', (8976, 9028), False, 'import os\n'), ((9076, 9139), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.SUBQUANT_CNTR_FILENAME'], {}), '(index_dir, CollectionIndex.SUBQUANT_CNTR_FILENAME)\n', (9088, 9139), False, 'import os\n'), ((9202, 9260), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.DIST_MAT_FILENAME'], {}), '(index_dir, CollectionIndex.DIST_MAT_FILENAME)\n', (9214, 9260), False, 'import os\n'), ((10046, 10115), 'os.path.join', 'os.path.join', (["dataset_config['root_dir']", "dataset_config['index_dir']"], {}), "(dataset_config['root_dir'], dataset_config['index_dir'])\n", (10058, 10115), False, 'import os\n'), ((10191, 10246), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.INDEX_FILENAME'], {}), '(index_dir, CollectionIndex.INDEX_FILENAME)\n', (10203, 10246), False, 'import os\n'), ((10284, 10342), 'os.path.join', 'os.path.join', (['index_dir', 'CollectionIndex.DIST_MAT_FILENAME'], {}), '(index_dir, CollectionIndex.DIST_MAT_FILENAME)\n', (10296, 10342), False, 'import os\n'), ((10367, 10393), 'numpy.load', 'np.load', (['indexed_data_path'], {}), '(indexed_data_path)\n', (10374, 10393), True, 'import numpy as np\n'), ((10413, 10435), 'numpy.load', 'np.load', (['dist_mat_path'], {}), '(dist_mat_path)\n', (10420, 10435), True, 'import numpy as np\n'), ((10485, 10523), 'numpy.zeros', 'np.zeros', (['(n, cls.K_IN_KNN)'], {'dtype': 'int'}), '((n, cls.K_IN_KNN), dtype=int)\n', (10493, 10523), True, 'import numpy as np\n'), ((11166, 11207), 'os.path.join', 'os.path.join', (['index_dir', 'cls.KNN_FILENAME'], {}), '(index_dir, cls.KNN_FILENAME)\n', (11178, 11207), False, 'import os\n'), ((11216, 11238), 'numpy.save', 'np.save', (['knn_path', 'knn'], {}), '(knn_path, knn)\n', (11223, 11238), True, 'import numpy as np\n'), ((2028, 2045), 'numpy.load', 'np.load', (['knn_path'], {}), '(knn_path)\n', (2035, 2045), True, 'import numpy as np\n'), ((4817, 4842), 'os.path.exists', 'os.path.exists', (['index_dir'], {}), '(index_dir)\n', (4831, 4842), False, 'import os\n'), ((5160, 5195), 'h5py.File', 'h5py.File', (['index_features_path', '"""r"""'], {}), "(index_features_path, 'r')\n", (5169, 5195), False, 'import h5py\n'), ((7048, 7054), 'time.time', 'time', ([], {}), '()\n', (7052, 7054), False, 'from time import time\n'), ((7090, 7119), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (7105, 7119), False, 'from sklearn.cluster import KMeans, MiniBatchKMeans\n'), ((9287, 9327), 'numpy.save', 'np.save', (['indexed_data_path', 'indexed_data'], {}), '(indexed_data_path, indexed_data)\n', (9294, 9327), True, 'import numpy as np\n'), ((9340, 9384), 'numpy.save', 'np.save', (['inverted_index_path', 'inverted_index'], {}), '(inverted_index_path, inverted_index)\n', (9347, 9384), True, 'import numpy as np\n'), ((9397, 9449), 'numpy.save', 'np.save', (['subquant_centroids_path', 'subquant_centroids'], {}), '(subquant_centroids_path, subquant_centroids)\n', (9404, 9449), True, 'import numpy as np\n'), ((9462, 9494), 'numpy.save', 'np.save', (['dist_mat_path', 'dist_mat'], {}), '(dist_mat_path, dist_mat)\n', (9469, 9494), True, 'import numpy as np\n'), ((4877, 4899), 'os.makedirs', 'os.makedirs', (['index_dir'], {}), '(index_dir)\n', (4888, 4899), False, 'import os\n'), ((6039, 6046), 'math.sqrt', 'sqrt', (['n'], {}), '(n)\n', (6043, 6046), False, 'from math import floor, sqrt\n'), ((6628, 6631), 'aimodel.commons.t', 't', ([], {}), '()\n', (6629, 6631), False, 'from aimodel.commons import t\n'), ((7138, 7173), 'h5py.File', 'h5py.File', (['index_features_path', '"""r"""'], {}), "(index_features_path, 'r')\n", (7147, 7173), False, 'import h5py\n'), ((7837, 7872), 'h5py.File', 'h5py.File', (['index_features_path', '"""r"""'], {}), "(index_features_path, 'r')\n", (7846, 7872), False, 'import h5py\n'), ((9683, 9686), 'aimodel.commons.t', 't', ([], {}), '()\n', (9684, 9686), False, 'from aimodel.commons import t\n'), ((10640, 10643), 'aimodel.commons.t', 't', ([], {}), '()\n', (10641, 10643), False, 'from aimodel.commons import t\n'), ((11005, 11028), 'numpy.argsort', 'np.argsort', (['dst'], {'axis': '(1)'}), '(dst, axis=1)\n', (11015, 11028), True, 'import numpy as np\n'), ((11303, 11306), 'aimodel.commons.t', 't', ([], {}), '()\n', (11304, 11306), False, 'from aimodel.commons import t\n'), ((6998, 7001), 'aimodel.commons.t', 't', ([], {}), '()\n', (6999, 7001), False, 'from aimodel.commons import t\n'), ((11133, 11136), 'aimodel.commons.t', 't', ([], {}), '()\n', (11134, 11136), False, 'from aimodel.commons import t\n'), ((8415, 8454), 'numpy.where', 'np.where', (['(indexed_data[:, s] == cluster)'], {}), '(indexed_data[:, s] == cluster)\n', (8423, 8454), True, 'import numpy as np\n'), ((8568, 8574), 'time.time', 'time', ([], {}), '()\n', (8572, 8574), False, 'from time import time\n')] |
"""
converts text to a matrix where every row is an observation and every
feature is a unique word. The value of each element in the matrix is
either a binary indicator marking the presence of that word or an intergert
of the number of times that workd appears.
"""
# Load library
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
# Create text
text_data = np.array(['I love Brazil. Brazil!',
'Sweden is best',
'Germany beats both'])
# Create the bag of words feature matrix
count = CountVectorizer()
bag_of_words = count.fit_transform(text_data)
# Show feature matrix
bag_of_words.toarray()
# Get feature names
feature_names = count.get_feature_names()
# Create data frame
pd.DataFrame(bag_of_words.toarray(), columns=feature_names)
| [
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.array"
] | [((408, 484), 'numpy.array', 'np.array', (["['I love Brazil. Brazil!', 'Sweden is best', 'Germany beats both']"], {}), "(['I love Brazil. Brazil!', 'Sweden is best', 'Germany beats both'])\n", (416, 484), True, 'import numpy as np\n'), ((579, 596), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (594, 596), False, 'from sklearn.feature_extraction.text import CountVectorizer\n')] |
import matplotlib.pyplot as plt
import numpy as np
from keras.models import load_model
import cv2
model = load_model("../model/semantic_model(92.4).h5")
cap = cv2.VideoCapture(0)
while cap.isOpened():
_, frame = cap.read()
frame = cv2.resize(frame, (256, 256))
pred = model.predict(frame.reshape(1, frame.shape[0], frame.shape[1], frame.shape[2]))
# print(pred.shape)
newImg = np.zeros((256, 256))
# print(pred)
for i in range(13):
for j in range(256):
for k in range(256):
if pred[0, j, k, i] > 0.5:
newImg[j, k] = i
cv2.imshow('Segmented Result', newImg)
plt.imshow(newImg, cmap="Paired")
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"keras.models.load_model",
"cv2.waitKey",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((107, 153), 'keras.models.load_model', 'load_model', (['"""../model/semantic_model(92.4).h5"""'], {}), "('../model/semantic_model(92.4).h5')\n", (117, 153), False, 'from keras.models import load_model\n'), ((160, 179), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (176, 179), False, 'import cv2\n'), ((761, 784), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (782, 784), False, 'import cv2\n'), ((242, 271), 'cv2.resize', 'cv2.resize', (['frame', '(256, 256)'], {}), '(frame, (256, 256))\n', (252, 271), False, 'import cv2\n'), ((401, 421), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (409, 421), True, 'import numpy as np\n'), ((611, 649), 'cv2.imshow', 'cv2.imshow', (['"""Segmented Result"""', 'newImg'], {}), "('Segmented Result', newImg)\n", (621, 649), False, 'import cv2\n'), ((654, 687), 'matplotlib.pyplot.imshow', 'plt.imshow', (['newImg'], {'cmap': '"""Paired"""'}), "(newImg, cmap='Paired')\n", (664, 687), True, 'import matplotlib.pyplot as plt\n'), ((696, 711), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (707, 711), False, 'import cv2\n')] |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
classes = ('__background__', 'person', 'bicycle', 'car',
'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra',
'giraffe', 'backpack', 'umbrella', 'handbag',
'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle',
'wine glass', 'cup', 'fork', 'knife',
'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink',
'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush')
NUM_COLORS = len(STANDARD_COLORS)
FONT = ImageFont.truetype("data/arial.ttf", 15)
def _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, font, color='black', thickness=4):
draw = ImageDraw.Draw(image)
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
text_bottom = bottom
# Reverse list and print from bottom to top.
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
return image
def draw_bounding_boxes(image, gt_boxes, im_info):
num_boxes = gt_boxes.shape[0]
gt_boxes_new = gt_boxes.copy()
gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2])
disp_image = Image.fromarray(np.uint8(image[0]))
for i in range(num_boxes):
this_class = int(gt_boxes_new[i, 4])
disp_image = _draw_single_box(disp_image,
gt_boxes_new[i, 0],
gt_boxes_new[i, 1],
gt_boxes_new[i, 2],
gt_boxes_new[i, 3],
'N%02d-C%02d' % (i, this_class),
FONT,
color=STANDARD_COLORS[this_class % NUM_COLORS])
image[0, :] = np.array(disp_image)
return image | [
"numpy.uint8",
"numpy.ceil",
"six.moves.range",
"PIL.ImageFont.truetype",
"numpy.array",
"PIL.ImageDraw.Draw"
] | [((3587, 3627), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""data/arial.ttf"""', '(15)'], {}), "('data/arial.ttf', 15)\n", (3605, 3627), True, 'import PIL.ImageFont as ImageFont\n'), ((3738, 3759), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (3752, 3759), True, 'import PIL.ImageDraw as ImageDraw\n'), ((4080, 4107), 'numpy.ceil', 'np.ceil', (['(0.05 * text_height)'], {}), '(0.05 * text_height)\n', (4087, 4107), True, 'import numpy as np\n'), ((4687, 4703), 'six.moves.range', 'range', (['num_boxes'], {}), '(num_boxes)\n', (4692, 4703), False, 'from six.moves import range\n'), ((5201, 5221), 'numpy.array', 'np.array', (['disp_image'], {}), '(disp_image)\n', (5209, 5221), True, 'import numpy as np\n'), ((4655, 4673), 'numpy.uint8', 'np.uint8', (['image[0]'], {}), '(image[0])\n', (4663, 4673), True, 'import numpy as np\n')] |
import os
import random
from typing import Union
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
try:
import plotly
except ModuleNotFoundError:
plotly = None
from .plotting_tools import save_or_show
from ai4water.utils.utils import init_subplots
# TODO add Murphy's plot as shown in MLAir
# https://robjhyndman.com/hyndsight/murphy-diagrams/
# competitive skill score plot/ bootstrap skill score plot as in MLAir
# rank histogram and reliability diagram for probabilitic forecasting model.
# show availability plot of data
regplot_combs = [
['cadetblue', 'slateblue', 'darkslateblue'],
['cadetblue', 'mediumblue', 'mediumblue'],
['cornflowerblue', 'dodgerblue', 'darkblue'],
['cornflowerblue', 'dodgerblue', 'steelblue'],
['cornflowerblue', 'mediumblue', 'dodgerblue'],
['cornflowerblue', 'steelblue', 'mediumblue'],
['darkslateblue', 'aliceblue', 'mediumblue'],
['darkslateblue', 'blue', 'royalblue'],
['darkslateblue', 'blueviolet', 'royalblue'],
['darkslateblue', 'darkblue', 'midnightblue'],
['darkslateblue', 'mediumblue', 'darkslateblue'],
['darkslateblue', 'midnightblue', 'mediumblue'],
['seagreen', 'darkslateblue', 'cadetblue'],
['cadetblue', 'darkblue', 'midnightblue'],
['cadetblue', 'deepskyblue', 'cadetblue']
]
class Plot(object):
def __init__(self, path=None, backend='plotly', save=True, dpi=300):
self.path = path
self.backend = backend
self.save = save
self.dpi = dpi
@property
def backend(self):
return self._backend
@backend.setter
def backend(self, x):
_backend = x
assert x in ['plotly', 'matplotlib'], f"unknown backend {x}. Allowed values are `plotly` and `matplotlib`"
if x == 'plotly':
if plotly is None:
_backend = 'matplotlib'
self._backend = _backend
@property
def path(self):
return self._path
@path.setter
def path(self, x):
if x is None:
x = os.getcwd()
self._path = x
def save_or_show(self, save: bool = None, fname=None, where='', dpi=None, bbox_inches='tight',
close=True, show=False):
if save is None:
save = self.save
if dpi is None:
dpi = self.dpi
return save_or_show(self.path, save, fname, where, dpi, bbox_inches, close, show=show)
class PlotResults(Plot):
def __init__(self,
data=None,
config: dict = None,
path=None,
dpi=300,
in_cols=None,
out_cols=None,
backend: str = 'plotly'
):
self.config = config
self.data = data
self.dpi = dpi
self.in_cols = in_cols
self.out_cols = out_cols
super().__init__(path, backend=backend)
@property
def config(self):
return self._config
@config.setter
def config(self, x):
self._config = x
@property
def data(self):
return self._data
@data.setter
def data(self, x):
self._data = x
def horizon_plots(self, errors: dict, fname='', save=True):
plt.close('')
_, axis = plt.subplots(len(errors), sharex='all')
legends = {'r2': "$R^2$", 'rmse': "RMSE", 'nse': "NSE"}
idx = 0
for metric_name, val in errors.items():
ax = axis[idx]
ax.plot(val, '--o', label=legends.get(metric_name, metric_name))
ax.legend(fontsize=14)
if idx >= len(errors)-1:
ax.set_xlabel("Horizons", fontsize=14)
ax.set_ylabel(legends.get(metric_name, metric_name), fontsize=14)
idx += 1
self.save_or_show(save=save, fname=fname)
return
def plot_results(self, true, predicted: pd.DataFrame, save=True, name=None, where=None,
annotation_key=None, annotation_val=None, show=False):
"""
# kwargs can be any/all of followings
# fillstyle:
# marker:
# linestyle:
# markersize:
# color:
"""
regplot(true, predicted, title=name,
annotation_key=annotation_key,
annotation_val=annotation_val)
self.save_or_show(save=save, fname=f"{name}_reg", close=False,
where=where, show=show)
mpl.rcParams.update(mpl.rcParamsDefault)
_, axis = init_subplots(width=12, height=8)
# it is quite possible that when data is datetime indexed, then it is not
# equalidistant and large amount of graph
# will have not data in that case lines plot will create a lot of useless
# interpolating lines where no data is present.
datetime_axis = False
if isinstance(true.index, pd.DatetimeIndex) and pd.infer_freq(true.index) is not None:
style = '.'
true = true
predicted = predicted
datetime_axis = True
else:
if np.isnan(true.values).sum() > 0:
style = '.' # For Nan values we should be using this style otherwise nothing is plotted.
else:
style = '-'
true = true.values
predicted = predicted.values
ms = 4 if style == '.' else 2
if len(true)>1000: # because the data is very large, so better to use small marker size
ms = 2
axis.plot(predicted, style, color='r', label='Prediction')
axis.plot(true, style, color='b', marker='o', fillstyle='none', markersize=ms, label='True')
axis.legend(loc="best", fontsize=22, markerscale=4)
if datetime_axis:
loc = mdates.AutoDateLocator(minticks=4, maxticks=6)
axis.xaxis.set_major_locator(loc)
fmt = mdates.AutoDateFormatter(loc)
axis.xaxis.set_major_formatter(fmt)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("Time", fontsize=18)
self.save_or_show(save=save, fname=name, close=False, where=where)
return
def plot_loss(self, history: dict, name="loss_curve", show=False):
"""Considering history is a dictionary of different arrays, possible
training and validation loss arrays, this method plots those arrays."""
plt.clf()
plt.close('all')
fig = plt.figure()
plt.style.use('ggplot')
i = 1
legends = {
'mean_absolute_error': 'Mean Absolute Error',
'mape': 'Mean Absolute Percentage Error',
'mean_squared_logarithmic_error': 'Mean Squared Logrithmic Error',
'pbias': "Percent Bias",
"nse": "Nash-Sutcliff Efficiency",
"kge": "Kling-Gupta Efficiency",
"tf_r2": "$R^{2}$",
"r2": "$R^{2}$"
}
sub_plots = {1: {'axis': (1, 1), 'width': 9, 'height': 6},
2: {'axis': (1, 2), 'width': 9, 'height': 6},
3: {'axis': (1, 3), 'width': 9, 'height': 6},
4: {'axis': (2, 2), 'width': 9, 'height': 6},
5: {'axis': (5, 1), 'width': 8, 'height': 12},
6: {'axis': (3, 2), 'width': 8, 'height': 12},
7: {'axis': (3, 2), 'width': 20, 'height': 20},
8: {'axis': (4, 2), 'width': 20, 'height': 20},
9: {'axis': (5, 2), 'width': 20, 'height': 20},
10: {'axis': (5, 2), 'width': 20, 'height': 20},
12: {'axis': (4, 3), 'width': 20, 'height': 20},
}
epochs = range(1, len(history['loss']) + 1)
axis_cache = {}
for key, val in history.items():
m_name = key.split('_')[1:] if 'val' in key and '_' in key else key
if isinstance(m_name, list):
m_name = '_'.join(m_name)
if m_name in list(axis_cache.keys()):
axis = axis_cache[m_name]
axis.plot(epochs, val, color=[0.96707953, 0.46268314, 0.45772886], label='Validation ')
axis.legend()
else:
axis = fig.add_subplot(*sub_plots[len(history)]['axis'], i)
axis.plot(epochs, val, color=[0.13778617, 0.06228198, 0.33547859], label='Training ')
axis.legend()
axis.set_xlabel("Epochs")
axis.set_ylabel(legends.get(key, key))
axis_cache[key] = axis
i += 1
axis.set(frame_on=True)
fig.set_figheight(sub_plots[len(history)]['height'])
fig.set_figwidth(sub_plots[len(history)]['width'])
self.save_or_show(fname=name, save=True if name is not None else False, show=show)
mpl.rcParams.update(mpl.rcParamsDefault)
return
def regplot(
x:Union[np.ndarray, pd.DataFrame, pd.Series, list],
y:Union[np.ndarray, pd.DataFrame, pd.Series, list],
title:str = None,
show:bool = False,
annotation_key:str=None,
annotation_val=None,
line_color = None,
marker_color = None,
fill_color = None,
marker_size:int = 20,
ci:Union[int, None] = 95,
figsize:tuple = None,
xlabel:str = 'Observed',
ylabel:str = 'Predicted'
):
"""
Regpression plot with regression line and confidence interval
Arguments:
x : array like, the 'x' value.
y : array like
ci : confidence interval. Set to None if not required.
show : whether to show the plot or not
annotation_key : The name of the value to annotate with.
annotation_val : The value to annotate with.
marker_size :
line_color :
marker_color:
fill_color : only relevent if ci is not None.
figsize : tuple
title : name to be used for title
xlabel :
ylabel :
Example
--------
```python
>>>from ai4water.datasets import arg_beach
>>>from ai4water.utils.visualizations import regplot
>>>data = arg_beach()
>>>regplot(data['pcp3_mm'], data['pcp6_mm'], show=True)
```
"""
x = to_1d_array(x)
y = to_1d_array(y)
mc, lc, fc = random.choice(regplot_combs)
_metric_names = {'r2': '$R^2$'}
plt.close('all')
_, axis = plt.subplots(figsize=figsize or (6, 5))
axis.scatter(x, y, c=marker_color or mc,
s=marker_size) # set style options
if annotation_key is not None:
assert annotation_val is not None
plt.annotate(f'{annotation_key}: {round(annotation_val, 3)}',
xy=(0.3, 0.95),
xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top', fontsize=16)
_regplot(x,
y,
ax=axis,
ci=ci,
line_color=line_color or lc,
fill_color=fill_color or fc)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
plt.title(title, fontsize=26)
if show:
plt.show()
return axis
def _ci(a, which=95, axis=None):
"""Return a percentile range from an array of values."""
p = 50 - which / 2, 50 + which / 2
return np.nanpercentile(a, p, axis)
def reg_func(_x, _y):
return np.linalg.pinv(_x).dot(_y)
def bootdist(f, args, n_boot=1000, **func_kwargs):
n = len(args[0])
integers = np.random.randint
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _regplot(x, y, ax, ci=None, line_color=None, fill_color=None):
grid = np.linspace(np.min(x), np.max(x), 100)
X = np.c_[np.ones(len(x)), x]
grid = np.c_[np.ones(len(grid)), grid]
yhat = grid.dot(reg_func(X, y))
ax.plot(grid[:, 1], yhat, color=line_color)
if ci:
boots = bootdist(reg_func, args=[X, y], n_boot=1000).T
yhat_boots = grid.dot(boots).T
err_bands = _ci(yhat_boots, ci, axis=0)
ax.fill_between(grid[:, 1], *err_bands,
facecolor=fill_color,
alpha=.15)
return ax
def to_1d_array(array_like)->np.ndarray:
if array_like.__class__.__name__ in ['list', 'tuple', 'Series']:
return np.array(array_like)
elif array_like.__class__.__name__ == 'ndarray':
if array_like.ndim == 1:
return array_like
else:
assert array_like.size == len(array_like), f'cannot convert multidim ' \
f'array of shape {array_like.shape} to 1d'
return array_like.reshape(-1, )
elif array_like.__class__.__name__ == 'DataFrame' and array_like.ndim == 2:
return array_like.values.reshape(-1,)
else:
raise ValueError(f'cannot convert object array {array_like.__class__.__name__} to 1d ')
| [
"matplotlib.pyplot.title",
"numpy.nanpercentile",
"matplotlib.pyplot.clf",
"pandas.infer_freq",
"numpy.isnan",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.style.use",
"numpy.linalg.pinv",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"matplotlib.pyplot.yticks",
"matplotlib.dates.... | [((10408, 10436), 'random.choice', 'random.choice', (['regplot_combs'], {}), '(regplot_combs)\n', (10421, 10436), False, 'import random\n'), ((10477, 10493), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10486, 10493), True, 'import matplotlib.pyplot as plt\n'), ((10509, 10548), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(figsize or (6, 5))'}), '(figsize=figsize or (6, 5))\n', (10521, 10548), True, 'import matplotlib.pyplot as plt\n'), ((11141, 11172), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'fontsize': '(14)'}), '(xlabel, fontsize=14)\n', (11151, 11172), True, 'import matplotlib.pyplot as plt\n'), ((11177, 11208), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'fontsize': '(14)'}), '(ylabel, fontsize=14)\n', (11187, 11208), True, 'import matplotlib.pyplot as plt\n'), ((11213, 11242), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(26)'}), '(title, fontsize=26)\n', (11222, 11242), True, 'import matplotlib.pyplot as plt\n'), ((11439, 11467), 'numpy.nanpercentile', 'np.nanpercentile', (['a', 'p', 'axis'], {}), '(a, p, axis)\n', (11455, 11467), True, 'import numpy as np\n'), ((11892, 11911), 'numpy.array', 'np.array', (['boot_dist'], {}), '(boot_dist)\n', (11900, 11911), True, 'import numpy as np\n'), ((3306, 3319), 'matplotlib.pyplot.close', 'plt.close', (['""""""'], {}), "('')\n", (3315, 3319), True, 'import matplotlib.pyplot as plt\n'), ((4531, 4571), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['mpl.rcParamsDefault'], {}), '(mpl.rcParamsDefault)\n', (4550, 4571), True, 'import matplotlib as mpl\n'), ((4591, 4624), 'ai4water.utils.utils.init_subplots', 'init_subplots', ([], {'width': '(12)', 'height': '(8)'}), '(width=12, height=8)\n', (4604, 4624), False, 'from ai4water.utils.utils import init_subplots\n'), ((6077, 6100), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (6087, 6100), True, 'import matplotlib.pyplot as plt\n'), ((6109, 6132), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (6119, 6132), True, 'import matplotlib.pyplot as plt\n'), ((6141, 6172), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontsize': '(18)'}), "('Time', fontsize=18)\n", (6151, 6172), True, 'import matplotlib.pyplot as plt\n'), ((6502, 6511), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6509, 6511), True, 'import matplotlib.pyplot as plt\n'), ((6520, 6536), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6529, 6536), True, 'import matplotlib.pyplot as plt\n'), ((6551, 6563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6561, 6563), True, 'import matplotlib.pyplot as plt\n'), ((6572, 6595), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (6585, 6595), True, 'import matplotlib.pyplot as plt\n'), ((8947, 8987), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['mpl.rcParamsDefault'], {}), '(mpl.rcParamsDefault)\n', (8966, 8987), True, 'import matplotlib as mpl\n'), ((11265, 11275), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11273, 11275), True, 'import matplotlib.pyplot as plt\n'), ((12005, 12014), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (12011, 12014), True, 'import numpy as np\n'), ((12016, 12025), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (12022, 12025), True, 'import numpy as np\n'), ((12629, 12649), 'numpy.array', 'np.array', (['array_like'], {}), '(array_like)\n', (12637, 12649), True, 'import numpy as np\n'), ((2104, 2115), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2113, 2115), False, 'import os\n'), ((5867, 5913), 'matplotlib.dates.AutoDateLocator', 'mdates.AutoDateLocator', ([], {'minticks': '(4)', 'maxticks': '(6)'}), '(minticks=4, maxticks=6)\n', (5889, 5913), True, 'import matplotlib.dates as mdates\n'), ((5986, 6015), 'matplotlib.dates.AutoDateFormatter', 'mdates.AutoDateFormatter', (['loc'], {}), '(loc)\n', (6010, 6015), True, 'import matplotlib.dates as mdates\n'), ((11503, 11521), 'numpy.linalg.pinv', 'np.linalg.pinv', (['_x'], {}), '(_x)\n', (11517, 11521), True, 'import numpy as np\n'), ((4982, 5007), 'pandas.infer_freq', 'pd.infer_freq', (['true.index'], {}), '(true.index)\n', (4995, 5007), True, 'import pandas as pd\n'), ((5165, 5186), 'numpy.isnan', 'np.isnan', (['true.values'], {}), '(true.values)\n', (5173, 5186), True, 'import numpy as np\n')] |
import os
import tifffile
from skimage.metrics import mean_squared_error, normalized_root_mse
import numpy as np
import matplotlib.pyplot as plt
fake_root_path = 'mse_data/fake_64'
real_root_path = 'mse_data/real_64'
compared_root_path = 'compared/copperfoam_0.tiff'
fake_image_names = os.listdir(fake_root_path)
real_image_names = os.listdir(real_root_path)
compared_img = tifffile.imread(compared_root_path)
print('fake and compared')
fake_mse_list = []
for fake_image_name in fake_image_names:
fake_image_path = os.path.join(fake_root_path, fake_image_name)
fake_img = tifffile.imread(fake_image_path)
# mse = mean_squared_error((fake_img / 255).astype(np.int), (compared_img / 255).astype(np.int))
mse = mean_squared_error(fake_img.astype(np.uint8), compared_img.astype(np.uint8))
rmse = mse ** 0.5
# mse = normalized_root_mse(fake_img.astype(np.uint8), compared_img.astype(np.uint8))
# mse = mean_squared_error((fake_img / 255).astype(np.int), (compared_img / 255).astype(np.int))
rmse = round(rmse, 3)
fake_mse_list.append(rmse)
# print(mse)
print('real and compared')
real_mse_list = []
for real_image_name in real_image_names:
real_image_path = os.path.join(real_root_path, real_image_name)
real_img = tifffile.imread(real_image_path)
mse = mean_squared_error(real_img.astype(np.uint8), compared_img.astype(np.uint8))
rmse = mse ** 0.5
# mse = normalized_root_mse(real_img.astype(np.uint8), compared_img.astype(np.uint8))
rmse = round(rmse, 3)
real_mse_list.append(rmse)
# print(mse)
# RMSE的偏差
fake_img_mean = np.mean(fake_mse_list)
fake_img_sigma = np.std(fake_mse_list)
real_img_mean = np.mean(real_mse_list)
real_img_sigma = np.std(real_mse_list)
print('fake_img_mean:', fake_img_mean, 'fake_img_sigma:', fake_img_sigma)
print('real_img_mean:', real_img_mean, 'real_img_sigma:', real_img_sigma)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.figure(figsize=(4, 4)) # 设置画布的尺寸
plt.title('均方根误差', fontsize=20) # 标题,并设定字号大小
labels = '生成图像', '真实图像' # 图例
plt.boxplot([fake_mse_list, real_mse_list], labels=labels) # grid=False:代表不显示背景中的网格线
# data.boxplot()#画箱型图的另一种方法,参数较少,而且只接受dataframe,不常用
plt.show() # 显示图像
# for fake_image_name, real_image_name in zip(fake_image_names, real_image_names):
# fake_image_path = os.path.join(fake_root_path, fake_image_name)
# real_image_path = os.path.join(real_root_path, real_image_name)
#
# fake_img = tifffile.imread(fake_image_path)
# real_img = tifffile.imread(real_image_path)
# mse = mean_squared_error((fake_img / 255).astype(np.int), (real_img / 255).astype(np.int))
# # mse = mse ** 0.5
# mse = round(mse, 3)
# print(mse)
# print('--------')
# for fake_image_name, real_image_name in zip(fake_image_names, real_image_names):
# fake_image_path = os.path.join(fake_root_path, fake_image_name)
# real_image_path = os.path.join(real_root_path, real_image_name)
#
# fake_img = tifffile.imread(fake_image_path)
# real_img = tifffile.imread(real_image_path)
# nrmse = normalized_root_mse(fake_img/255, real_img/255)
# nrmse = round(nrmse, 3)
# print(nrmse)
# fake_list = []
# for image_name in image_names:
# image_path = os.path.join(fake_root_path, image_name)
# img = tifffile.imread(image_path)
# fake_list.append(img)
#
# image_names = os.listdir(real_root_path)
# real_list = []
# for image_name in image_names:
# image_path = os.path.join(real_root_path, image_name)
# img = tifffile.imread(image_path)
# real_list.append(img)
#
#
# mse = compare_mse(fake_list, real_list)
# print(mse)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.std",
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.figure",
"numpy.mean",
"tifffile.imread",
"os.path.join",
"os.listdir"
] | [((287, 313), 'os.listdir', 'os.listdir', (['fake_root_path'], {}), '(fake_root_path)\n', (297, 313), False, 'import os\n'), ((333, 359), 'os.listdir', 'os.listdir', (['real_root_path'], {}), '(real_root_path)\n', (343, 359), False, 'import os\n'), ((375, 410), 'tifffile.imread', 'tifffile.imread', (['compared_root_path'], {}), '(compared_root_path)\n', (390, 410), False, 'import tifffile\n'), ((1594, 1616), 'numpy.mean', 'np.mean', (['fake_mse_list'], {}), '(fake_mse_list)\n', (1601, 1616), True, 'import numpy as np\n'), ((1634, 1655), 'numpy.std', 'np.std', (['fake_mse_list'], {}), '(fake_mse_list)\n', (1640, 1655), True, 'import numpy as np\n'), ((1672, 1694), 'numpy.mean', 'np.mean', (['real_mse_list'], {}), '(real_mse_list)\n', (1679, 1694), True, 'import numpy as np\n'), ((1712, 1733), 'numpy.std', 'np.std', (['real_mse_list'], {}), '(real_mse_list)\n', (1718, 1733), True, 'import numpy as np\n'), ((1928, 1954), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (1938, 1954), True, 'import matplotlib.pyplot as plt\n'), ((1966, 1997), 'matplotlib.pyplot.title', 'plt.title', (['"""均方根误差"""'], {'fontsize': '(20)'}), "('均方根误差', fontsize=20)\n", (1975, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2100), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['[fake_mse_list, real_mse_list]'], {'labels': 'labels'}), '([fake_mse_list, real_mse_list], labels=labels)\n', (2053, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2180, 2190), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2188, 2190), True, 'import matplotlib.pyplot as plt\n'), ((520, 565), 'os.path.join', 'os.path.join', (['fake_root_path', 'fake_image_name'], {}), '(fake_root_path, fake_image_name)\n', (532, 565), False, 'import os\n'), ((581, 613), 'tifffile.imread', 'tifffile.imread', (['fake_image_path'], {}), '(fake_image_path)\n', (596, 613), False, 'import tifffile\n'), ((1200, 1245), 'os.path.join', 'os.path.join', (['real_root_path', 'real_image_name'], {}), '(real_root_path, real_image_name)\n', (1212, 1245), False, 'import os\n'), ((1261, 1293), 'tifffile.imread', 'tifffile.imread', (['real_image_path'], {}), '(real_image_path)\n', (1276, 1293), False, 'import tifffile\n')] |
'optimizer.py'
'Main Datei'
'Das Programm wird über dieses Steuerungsskript gesteuert'
'Einige Dateien werden hier direkt aufgerufen, andere indirekt'
from d_optimal_design import Doptimaldesign
import parallelcomp
import numpy as np
from leastsquares import buildmodel,regkoeff
var = [579,40,18,193,195,13]
alllogs = False
info = ['example','PMSM']# Dateiname/Typname(sehr wichtig für das Logfile da diese Bezeichnung die zugehörigkeit bestimmt)
nodedist = [3.00,3.00]#FE Genauigkeit Strator/Genauigkeit Rotor
lenvar = len(var)
if __name__ == "__main__":
D,optimale_stuetzstellen,ent_sp = Doptimaldesign(var).run()
op = optimale_stuetzstellen.tolist()
logs,datetime = parallelcomp.run(info,nodedist,op)
Y,X = buildmodel(D,lenvar,info,datetime,alllogs,ent_sp)
B = regkoeff(Y,X)
ent_sp = []
import numpy as np
from scipy import optimize
from boundaries import ub,lb
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy.random as rnd
x0 = var
e = []
yc = []
for i in range(len(B)):
Ycal = np.dot(X, B[i])
E = Y[i]-Ycal
SSe = np.dot(np.transpose(E),E)
MSe = SSe/(len(X)-len(B[0]))
Ym = sum(Y[i])/len(X)
SSt = np.dot(np.transpose(Y[i]),Y[i])-((sum(Y[i])**2)/len(X))
SSr = np.dot(np.dot(np.transpose(B[i]),np.transpose(X)),Y[i]) - ((sum(Y[i])**2)/len(X))
R2 = SSr/SSt
v = len(X)-1
c = len(X)-len(B[0])
Radj = 1-((SSe/c)/(SSt/v))
print('Adjustiertes Bestimmtheitsmaß der Regression von Ergebnis{0}'.format(i),Radj[0][0])
e.append(E)
yc.append(Ycal)
for i in range(len(e)):
for q in range(len(e[i])):
e[i][q] = 100/(yc[i][q]/ e[i][q])
fig = plt.figure(figsize=(12, 6))
vax = fig.add_subplot(221)
hax = fig.add_subplot(222)
rax = fig.add_subplot(223)
kax = fig.add_subplot(223)
lax = fig.add_subplot(224)
vax.plot(yc[0],e[0], 'o')
#vax.plot([0, 2500], [0, 0], 'k-', lw=1)
vax.grid(True)
vax.set_xlabel('Approximiertes Moment M')
vax.set_ylabel('Residuen e in Prozent')
hax.plot(yc[1],e[1], 'o')
#hax.plot([0, 500], [0, 0], 'k-', lw=1)
hax.grid(True)
hax.set_xlabel('Approximierte Spannung U')
hax.set_ylabel('Residuen e in Prozent')
rax.plot(yc[2],e[2], 'o')
#rax.plot([0, 500], [0, 0], 'k-', lw=1)
rax.grid(True)
rax.set_xlabel('Approximierte Eisenverluste Pve')
rax.set_ylabel('Residuen e in Prozent')
kax.plot(yc[3],e[3], 'o')
#kax.plot([0, 500], [0, 0], 'k-', lw=1)
kax.grid(True)
kax.set_xlabel('Approximierte Verluste Pve Pvw ')
kax.set_ylabel('Residuen e in Prozent')
lax.plot(yc[5],e[5], 'o')
#lax.plot([0, 1], [0, 0], 'k-', lw=1)
lax.grid(True)
lax.set_xlabel('Approximierter Leistungsfaktor cosphi')
lax.set_ylabel('Residuen e in Prozent')
#plt.show()
def matrixbuildall(x,entferntespalten):
'quadratischer Teil'
xquad = []
for i in range(len(x)):
for j in range(i,len(x)):
xquad.append(x[i]*x[j])
xquad = np.asarray(xquad)
if len(entferntespalten) != 0:
for i in range(len(entferntespalten)):
xquad = np.delete(xquad, np.s_[entferntespalten[i]:entferntespalten[i]+1], axis=1)
else:
pass
X = np.concatenate((np.array([1]), x,xquad))
return X
def optimierungsfunktion(x,B,entferntespalten):
x = matrixbuildall(x,entferntespalten)
y = []
for i in range(len(B)):
y.append(np.dot(x, B[i]))
Pab = y[0]*2*np.pi*50
n = Pab/(abs(y[2])+abs(y[3]))
Vm = x[5]*x[6]*2*np.pi*0.85*(x[4]-x[6])
z1 = ((1-(Pab/36500))**2)*10
z2 = ((1-y[5])**2)*10
z3 = ((1-(y[1]/400))**2)*10000
z4 = ((1-n)**2)*1
z5 = Vm*0.001
z = z1+z2+z3+z4+z5
return z
def constraint00(x):
return x[0]-((x[3]*2)+4+x[1]+200)
def constraint01(x):
return x[0]+((x[3]*2)+4+(x[1]*2)+40)
def constraint02(x):
return (((x[0]-390)/2)-10)-x[1]
def constraint03(x):
return (x[3]-140)-x[5]
def bereichabfrage(X):
for i in range(len(X)):
status = X[i] <= ub(X, i) and X[i] >= lb(X, i)
status1 = X[i] >= lb(X, i)
if status != True:
if status1 == True:
return False,i-1,'ub'
else:
return False,i-1,'lb'
else:
pass
return True,i
X = X[:,[1,2,3,4,5,6]]
E = []
cons = ({'type':'ineq','fun':constraint00},
{'type':'ineq','fun':constraint01},
{'type':'ineq','fun':constraint02},
{'type':'ineq','fun':constraint03})
bnd = ((-np.inf,np.inf),(30,np.inf),(10,15.525),(153,250),(50,300),(5,np.inf))
for i in range(len(X)):
x0 = X[i]
op = optimize.minimize(optimierungsfunktion, x0, args=(B,ent_sp), method='SLSQP', bounds=bnd,constraints=cons,options={'disp':True})
print(bereichabfrage(op.x))
optimalx = op.x
for i in range(len(optimalx)):
optimalx[i]=round(optimalx[i],2)
E.append([round(op.fun,2),list(op.x)])
E = sorted(E, key=itemgetter(0), reverse=False)
Eopt = E[1]
xfin = E[0][1]
xfin = [526.27, 46.44, 15.52, 153.0, 50.0, 5.0]
Xfin = matrixbuildall(xfin,ent_sp)
yfin = np.dot(Xfin, B)
#print(bereichabfrage(xfin))
print('{0}\n\n{1}\n DA: Außen Durchmesser (m.yoke_diam) \n• H: Nut tiefe (m.slot_height)\n• TW: Zahnbreite (m.tooth_width)\n• RA: Magnet außen Radius (m.mag_rad)\n• BT: Bautiefe (m.arm_lenght)\n• HM: Magnet Höhe (m.mag_height)'.format(yfin,xfin)) | [
"scipy.optimize.minimize",
"leastsquares.buildmodel",
"numpy.delete",
"numpy.asarray",
"numpy.transpose",
"boundaries.ub",
"boundaries.lb",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.dot",
"d_optimal_design.Doptimaldesign",
"operator.itemgetter",
"leastsquares.regkoeff",
"parallelco... | [((795, 831), 'parallelcomp.run', 'parallelcomp.run', (['info', 'nodedist', 'op'], {}), '(info, nodedist, op)\n', (811, 831), False, 'import parallelcomp\n'), ((869, 923), 'leastsquares.buildmodel', 'buildmodel', (['D', 'lenvar', 'info', 'datetime', 'alllogs', 'ent_sp'], {}), '(D, lenvar, info, datetime, alllogs, ent_sp)\n', (879, 923), False, 'from leastsquares import buildmodel, regkoeff\n'), ((931, 945), 'leastsquares.regkoeff', 'regkoeff', (['Y', 'X'], {}), '(Y, X)\n', (939, 945), False, 'from leastsquares import buildmodel, regkoeff\n'), ((2040, 2067), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2050, 2067), True, 'import matplotlib.pyplot as plt\n'), ((6074, 6089), 'numpy.dot', 'np.dot', (['Xfin', 'B'], {}), '(Xfin, B)\n', (6080, 6089), True, 'import numpy as np\n'), ((1278, 1293), 'numpy.dot', 'np.dot', (['X', 'B[i]'], {}), '(X, B[i])\n', (1284, 1293), True, 'import numpy as np\n'), ((3430, 3447), 'numpy.asarray', 'np.asarray', (['xquad'], {}), '(xquad)\n', (3440, 3447), True, 'import numpy as np\n'), ((5511, 5647), 'scipy.optimize.minimize', 'optimize.minimize', (['optimierungsfunktion', 'x0'], {'args': '(B, ent_sp)', 'method': '"""SLSQP"""', 'bounds': 'bnd', 'constraints': 'cons', 'options': "{'disp': True}"}), "(optimierungsfunktion, x0, args=(B, ent_sp), method=\n 'SLSQP', bounds=bnd, constraints=cons, options={'disp': True})\n", (5528, 5647), False, 'from scipy import optimize\n'), ((686, 705), 'd_optimal_design.Doptimaldesign', 'Doptimaldesign', (['var'], {}), '(var)\n', (700, 705), False, 'from d_optimal_design import Doptimaldesign\n'), ((1353, 1368), 'numpy.transpose', 'np.transpose', (['E'], {}), '(E)\n', (1365, 1368), True, 'import numpy as np\n'), ((5880, 5893), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (5890, 5893), False, 'from operator import itemgetter\n'), ((1483, 1501), 'numpy.transpose', 'np.transpose', (['Y[i]'], {}), '(Y[i])\n', (1495, 1501), True, 'import numpy as np\n'), ((3565, 3641), 'numpy.delete', 'np.delete', (['xquad', 'np.s_[entferntespalten[i]:entferntespalten[i] + 1]'], {'axis': '(1)'}), '(xquad, np.s_[entferntespalten[i]:entferntespalten[i] + 1], axis=1)\n', (3574, 3641), True, 'import numpy as np\n'), ((3721, 3734), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3729, 3734), True, 'import numpy as np\n'), ((3975, 3990), 'numpy.dot', 'np.dot', (['x', 'B[i]'], {}), '(x, B[i])\n', (3981, 3990), True, 'import numpy as np\n'), ((4821, 4829), 'boundaries.lb', 'lb', (['X', 'i'], {}), '(X, i)\n', (4823, 4829), False, 'from boundaries import ub, lb\n'), ((1567, 1585), 'numpy.transpose', 'np.transpose', (['B[i]'], {}), '(B[i])\n', (1579, 1585), True, 'import numpy as np\n'), ((1586, 1601), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (1598, 1601), True, 'import numpy as np\n'), ((4748, 4756), 'boundaries.ub', 'ub', (['X', 'i'], {}), '(X, i)\n', (4750, 4756), False, 'from boundaries import ub, lb\n'), ((4769, 4777), 'boundaries.lb', 'lb', (['X', 'i'], {}), '(X, i)\n', (4771, 4777), False, 'from boundaries import ub, lb\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.