code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from osrsmath.combat.successful_hits import *
from matplotlib import cm
import matplotlib.pyplot as plt
import osrsmath.config as config
import numpy as np
import sys
import os
def plot(m_bounds, h_bounds):
m_min, m_max = m_bounds
h_min, h_max = h_bounds
max_hits = np.array(range(m_min, m_max+1))
healths = np.array(range(h_min, h_max+1))
Ms, Hs = np.meshgrid(max_hits, healths)
fig, ax = config.get_figure(50, 18, scale=5 if showing else 10)
plt.xlabel("$h_0$", fontsize=25, labelpad=20)
plt.ylabel("$M$", fontsize=25, labelpad=20)
ax.set_zlabel("Turns to kill", fontsize=25, rotation=90, labelpad=20)
ax.tick_params(axis='z', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.tick_params(axis='x', labelsize=18)
A = np.vectorize(lambda m, h: MarkovChain().turns_to_kill(m, h))(Hs, Ms)
surf = ax.plot_surface(Hs, Ms, A, cmap=cm.hot)
A = np.vectorize(lambda m, h: Crude().turns_to_kill(m, h))(Hs, Ms)
surf = ax.plot_surface(Hs, Ms, A, cmap=cm.cool)
return plt
if __name__ == '__main__':
showing = len(sys.argv) >= 2 and sys.argv[1] == 'show'
if showing:
plot(m_bounds=(1, 110), h_bounds=(1, 250)).show()
plot(m_bounds=(20, 110), h_bounds=(1, 110)).show()
else:
plot(m_bounds=(1, 110), h_bounds=(1, 250))
file_name = "turns_to_kill"
plt.savefig(f"{file_name}.pdf")
os.system(f"pdfcrop {file_name}.pdf")
os.rename(f"{file_name}-crop.pdf", f"{file_name}.pdf")
plot(m_bounds=(20, 110), h_bounds=(1, 110))
file_name = "turns_to_kill_zoom"
plt.savefig(f"{file_name}.pdf")
os.system(f"pdfcrop {file_name}.pdf")
os.rename(f"{file_name}-crop.pdf", f"{file_name}.pdf")
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"os.rename",
"matplotlib.pyplot.xlabel",
"osrsmath.config.get_figure",
"numpy.meshgrid",
"os.system"
] | [((368, 398), 'numpy.meshgrid', 'np.meshgrid', (['max_hits', 'healths'], {}), '(max_hits, healths)\n', (379, 398), True, 'import numpy as np\n'), ((411, 464), 'osrsmath.config.get_figure', 'config.get_figure', (['(50)', '(18)'], {'scale': '(5 if showing else 10)'}), '(50, 18, scale=5 if showing else 10)\n', (428, 464), True, 'import osrsmath.config as config\n'), ((467, 512), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$h_0$"""'], {'fontsize': '(25)', 'labelpad': '(20)'}), "('$h_0$', fontsize=25, labelpad=20)\n", (477, 512), True, 'import matplotlib.pyplot as plt\n'), ((515, 558), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$M$"""'], {'fontsize': '(25)', 'labelpad': '(20)'}), "('$M$', fontsize=25, labelpad=20)\n", (525, 558), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1341), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{file_name}.pdf"""'], {}), "(f'{file_name}.pdf')\n", (1321, 1341), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1382), 'os.system', 'os.system', (['f"""pdfcrop {file_name}.pdf"""'], {}), "(f'pdfcrop {file_name}.pdf')\n", (1354, 1382), False, 'import os\n'), ((1386, 1440), 'os.rename', 'os.rename', (['f"""{file_name}-crop.pdf"""', 'f"""{file_name}.pdf"""'], {}), "(f'{file_name}-crop.pdf', f'{file_name}.pdf')\n", (1395, 1440), False, 'import os\n'), ((1529, 1560), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{file_name}.pdf"""'], {}), "(f'{file_name}.pdf')\n", (1540, 1560), True, 'import matplotlib.pyplot as plt\n'), ((1564, 1601), 'os.system', 'os.system', (['f"""pdfcrop {file_name}.pdf"""'], {}), "(f'pdfcrop {file_name}.pdf')\n", (1573, 1601), False, 'import os\n'), ((1605, 1659), 'os.rename', 'os.rename', (['f"""{file_name}-crop.pdf"""', 'f"""{file_name}.pdf"""'], {}), "(f'{file_name}-crop.pdf', f'{file_name}.pdf')\n", (1614, 1659), False, 'import os\n')] |
import os
import cv2
import numpy as np
from server.services.errors import Errors, PortalError
from server.services.hashing import get_hash
from server.models.abstract.BaseModel import BaseModel
class DarknetModel(BaseModel):
def _load_label_map_(self):
labels = (
open(os.path.join(self._directory_, self._labelsname_))
.read()
.strip()
.split("\n")
)
self._label_map_ = {
str(label_index): {"id": label_index, "name": label_name}
for label_index, label_name in enumerate(labels)
}
def register(self):
self._labelsname_ = self._weightsname_ = self._configname_ = ""
labels = weights = configs = 0
for file in os.listdir(self._directory_):
if file.endswith(".names"):
self._labelsname_ = os.path.join(self._directory_, file)
labels += 1
if file.endswith(".weights"):
self._weightsname_ = os.path.join(self._directory_, file)
weights += 1
if file.endswith(".cfg"):
self._configname_ = os.path.join(self._directory_, file)
configs += 1
if self._labelsname_ == "":
raise PortalError(
Errors.INVALIDFILEPATH,
"class label file .names is not found in given directory.",
)
if labels > 1:
raise PortalError(
Errors.OVERLOADED, "multiple class label files found."
)
if self._weightsname_ == "":
raise PortalError(
Errors.INVALIDFILEPATH,
"weights file .weights is not found in given directory",
)
if weights > 1:
raise PortalError(
Errors.OVERLOADED, "multiple weights label files found."
)
if self._configname_ == "":
raise PortalError(
Errors.INVALIDFILEPATH,
"config file .cfg is not found in given directory.",
)
if configs > 1:
raise PortalError(
Errors.OVERLOADED, "multiple config files found."
)
with open(self._configname_, "r") as conf:
heightcheck = False
widthcheck = False
for line in conf:
if heightcheck and widthcheck:
break
if "height" in line:
self._height_ = int(
line.replace("=", "").replace("height", "").strip()
)
heightcheck = True
if "width" in line:
self._width_ = int(
line.replace("=", "").replace("width", "").strip()
)
widthcheck = True
self._load_label_map_()
self._key_ = get_hash(self._directory_)
return self._key_, self
def load(self):
loaded_model = cv2.dnn.readNetFromDarknet(
self._configname_, self._weightsname_
)
return loaded_model
def predict(self, model, image_array):
try:
(H, W) = image_array.shape[:2]
ln = model.getLayerNames()
ln = [ln[i[0] - 1] for i in model.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(
image_array,
1 / 255.0,
(self._height_, self._width_),
swapRB=True,
crop=False,
)
model.setInput(blob)
layerOutputs = model.forward(ln)
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
box = detection[0:4]
(centerX, centerY, width, height) = box
xmin = centerX - (width / 2)
ymin = centerY - (height / 2)
xmax = xmin + width
ymax = ymin + height
boxes.append([ymin, xmin, ymax, xmax])
confidences.append(float(confidence))
classIDs.append(classID)
detections = {}
detections["detection_masks"] = None
detections["detection_boxes"] = np.squeeze(np.array(boxes))
detections["detection_scores"] = np.squeeze(np.array(confidences))
detections["detection_classes"] = np.squeeze(np.array(classIDs))
return detections
except Exception as e:
raise PortalError(Errors.FAILEDPREDICTION, str(e))
| [
"cv2.dnn.blobFromImage",
"os.listdir",
"server.services.hashing.get_hash",
"os.path.join",
"numpy.argmax",
"numpy.array",
"server.services.errors.PortalError",
"cv2.dnn.readNetFromDarknet"
] | [((752, 780), 'os.listdir', 'os.listdir', (['self._directory_'], {}), '(self._directory_)\n', (762, 780), False, 'import os\n'), ((2903, 2929), 'server.services.hashing.get_hash', 'get_hash', (['self._directory_'], {}), '(self._directory_)\n', (2911, 2929), False, 'from server.services.hashing import get_hash\n'), ((3006, 3071), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['self._configname_', 'self._weightsname_'], {}), '(self._configname_, self._weightsname_)\n', (3032, 3071), False, 'import cv2\n'), ((1263, 1362), 'server.services.errors.PortalError', 'PortalError', (['Errors.INVALIDFILEPATH', '"""class label file .names is not found in given directory."""'], {}), "(Errors.INVALIDFILEPATH,\n 'class label file .names is not found in given directory.')\n", (1274, 1362), False, 'from server.services.errors import Errors, PortalError\n'), ((1447, 1514), 'server.services.errors.PortalError', 'PortalError', (['Errors.OVERLOADED', '"""multiple class label files found."""'], {}), "(Errors.OVERLOADED, 'multiple class label files found.')\n", (1458, 1514), False, 'from server.services.errors import Errors, PortalError\n'), ((1600, 1696), 'server.services.errors.PortalError', 'PortalError', (['Errors.INVALIDFILEPATH', '"""weights file .weights is not found in given directory"""'], {}), "(Errors.INVALIDFILEPATH,\n 'weights file .weights is not found in given directory')\n", (1611, 1696), False, 'from server.services.errors import Errors, PortalError\n'), ((1782, 1851), 'server.services.errors.PortalError', 'PortalError', (['Errors.OVERLOADED', '"""multiple weights label files found."""'], {}), "(Errors.OVERLOADED, 'multiple weights label files found.')\n", (1793, 1851), False, 'from server.services.errors import Errors, PortalError\n'), ((1936, 2028), 'server.services.errors.PortalError', 'PortalError', (['Errors.INVALIDFILEPATH', '"""config file .cfg is not found in given directory."""'], {}), "(Errors.INVALIDFILEPATH,\n 'config file .cfg is not found in given directory.')\n", (1947, 2028), False, 'from server.services.errors import Errors, PortalError\n'), ((2114, 2176), 'server.services.errors.PortalError', 'PortalError', (['Errors.OVERLOADED', '"""multiple config files found."""'], {}), "(Errors.OVERLOADED, 'multiple config files found.')\n", (2125, 2176), False, 'from server.services.errors import Errors, PortalError\n'), ((3353, 3458), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image_array', '(1 / 255.0)', '(self._height_, self._width_)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image_array, 1 / 255.0, (self._height_, self._width_),\n swapRB=True, crop=False)\n', (3374, 3458), False, 'import cv2\n'), ((858, 894), 'os.path.join', 'os.path.join', (['self._directory_', 'file'], {}), '(self._directory_, file)\n', (870, 894), False, 'import os\n'), ((1002, 1038), 'os.path.join', 'os.path.join', (['self._directory_', 'file'], {}), '(self._directory_, file)\n', (1014, 1038), False, 'import os\n'), ((1142, 1178), 'os.path.join', 'os.path.join', (['self._directory_', 'file'], {}), '(self._directory_, file)\n', (1154, 1178), False, 'import os\n'), ((4503, 4518), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (4511, 4518), True, 'import numpy as np\n'), ((4576, 4597), 'numpy.array', 'np.array', (['confidences'], {}), '(confidences)\n', (4584, 4597), True, 'import numpy as np\n'), ((4656, 4674), 'numpy.array', 'np.array', (['classIDs'], {}), '(classIDs)\n', (4664, 4674), True, 'import numpy as np\n'), ((3861, 3878), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3870, 3878), True, 'import numpy as np\n'), ((299, 348), 'os.path.join', 'os.path.join', (['self._directory_', 'self._labelsname_'], {}), '(self._directory_, self._labelsname_)\n', (311, 348), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script will do the inference process for feature-extract based
classification methods with for pre-trained model on specific video
frames. It will automatically cover the feature extract part for each
video frames and inference part. Target video frames should be prepared
under data dir for "train" or "test" part and recorded in data_file.csv.
Currently this script only verified on MobileNetV2 extract model
and MLP inference model.
You can change you sequence length and limit to a set number of classes
below, but need to match your pre-trained inference model!!
"""
from tensorflow.keras.models import load_model
import numpy as np
import os.path
import os, argparse
from data import DataSet
from extractor import Extractor
from tensorflow.keras import backend as K
from utils.common import get_config
K.clear_session()
def extract(data, seq_length, video_name):
# get the model.
model = Extractor()
# init the sequence
sequence = []
# First, find the sample row.
sample = None
for row in data.data:
if row[2] == video_name:
sample = row
break
if sample is None:
raise ValueError("Couldn't find sample: %s" % video_name)
# Get the frames for this video.
frames = data.get_frames_for_sample(sample)
# Now downsample to just the ones we need.
frames = data.rescale_list(frames, seq_length)
# Now loop through and extract features to build the sequence.
for image in frames:
features = model.extract(image)
sequence.append(features)
sequence = np.asarray(sequence)
return sequence
def predict(data, sequence, saved_model):
model = load_model(saved_model)
# Predict!
prediction = model.predict(np.expand_dims(sequence, axis=0))
data.print_class_from_prediction(np.squeeze(prediction, axis=0))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_file', help='Model file name with path. Should be under data/checkpoints/ dir', type=str, default=os.path.join(os.path.dirname(__file__), 'data/checkpoints/mlp-features.523-0.346-0.92.hdf5'))
parser.add_argument('--video_name', help='Inferenced video file in data/data_file.csv. Do not include the extension ', type=str, default='restRoom_001')
args = parser.parse_args()
cf = get_config()
# Sequence length must match the lengh used during training.
seq_length = cf.getint('sequence', 'seq_length')
# Limit must match that used during training.
class_limit = cf.get('sequence', 'class_limit')
class_limit = int(class_limit) if class_limit != 'None' else None
# Get the dataset.
data = DataSet(seq_length=seq_length, class_limit=class_limit)
sequence = extract(data, seq_length, args.video_name)
predict(data, sequence, args.model_file)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"utils.common.get_config",
"numpy.asarray",
"data.DataSet",
"numpy.squeeze",
"extractor.Extractor",
"os.path.dirname",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"tensorflow.keras.backend.clear_session"
] | [((867, 884), 'tensorflow.keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (882, 884), True, 'from tensorflow.keras import backend as K\n'), ((963, 974), 'extractor.Extractor', 'Extractor', ([], {}), '()\n', (972, 974), False, 'from extractor import Extractor\n'), ((1628, 1648), 'numpy.asarray', 'np.asarray', (['sequence'], {}), '(sequence)\n', (1638, 1648), True, 'import numpy as np\n'), ((1725, 1748), 'tensorflow.keras.models.load_model', 'load_model', (['saved_model'], {}), '(saved_model)\n', (1735, 1748), False, 'from tensorflow.keras.models import load_model\n'), ((1926, 1951), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1949, 1951), False, 'import os, argparse\n'), ((2375, 2387), 'utils.common.get_config', 'get_config', ([], {}), '()\n', (2385, 2387), False, 'from utils.common import get_config\n'), ((2713, 2768), 'data.DataSet', 'DataSet', ([], {'seq_length': 'seq_length', 'class_limit': 'class_limit'}), '(seq_length=seq_length, class_limit=class_limit)\n', (2720, 2768), False, 'from data import DataSet\n'), ((1796, 1828), 'numpy.expand_dims', 'np.expand_dims', (['sequence'], {'axis': '(0)'}), '(sequence, axis=0)\n', (1810, 1828), True, 'import numpy as np\n'), ((1867, 1897), 'numpy.squeeze', 'np.squeeze', (['prediction'], {'axis': '(0)'}), '(prediction, axis=0)\n', (1877, 1897), True, 'import numpy as np\n'), ((2096, 2121), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2111, 2121), False, 'import os, argparse\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def norm_col_init(weights, std=1.0):
x = torch.randn(weights.size())
x *= std / torch.sqrt((x**2).sum(1, keepdim=True))
return x
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
class A3Clstm(torch.nn.Module):
def __init__(self, num_inputs, action_space):
super(A3Clstm, self).__init__()
# convolutional neural networks
self.conv1 = nn.Conv2d(num_inputs, 32, 5, stride=1, padding=2)
self.maxp1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, 5, stride=1, padding=1)
self.maxp2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(32, 64, 4, stride=1, padding=1)
self.maxp3 = nn.MaxPool2d(2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.maxp4 = nn.MaxPool2d(2, 2)
# LSTM Cells
self.lstm = nn.LSTMCell(1024, 512)
num_outputs = action_space.n
# The critic layer
self.critic_linear = nn.Linear(512, 1)
# The actor layer
self.actor_linear = nn.Linear(512, num_outputs)
self.apply(weights_init)
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01)
self.actor_linear.bias.data.fill_(0)
self.critic_linear.weight.data = norm_col_init(
self.critic_linear.weight.data, 1.0)
self.critic_linear.bias.data.fill_(0)
self.lstm.bias_ih.data.fill_(0)
self.lstm.bias_hh.data.fill_(0)
self.train()
# forward propagation
def forward(self, inputs):
inputs, (hx, cx) = inputs
x = F.relu(self.maxp1(self.conv1(inputs)))
x = F.relu(self.maxp2(self.conv2(x)))
x = F.relu(self.maxp3(self.conv3(x)))
x = F.relu(self.maxp4(self.conv4(x)))
x = x.view(x.size(0), -1)
hx, cx = self.lstm(x, (hx, cx))
x = hx
return self.critic_linear(x), self.actor_linear(x), (hx, cx)
| [
"numpy.prod",
"numpy.sqrt",
"torch.nn.LSTMCell",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear"
] | [((392, 418), 'numpy.prod', 'np.prod', (['weight_shape[1:4]'], {}), '(weight_shape[1:4])\n', (399, 418), True, 'import numpy as np\n'), ((500, 533), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (507, 533), True, 'import numpy as np\n'), ((1085, 1134), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_inputs', '(32)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(num_inputs, 32, 5, stride=1, padding=2)\n', (1094, 1134), True, 'import torch.nn as nn\n'), ((1156, 1174), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1168, 1174), True, 'import torch.nn as nn\n'), ((1196, 1237), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)', '(5)'], {'stride': '(1)', 'padding': '(1)'}), '(32, 32, 5, stride=1, padding=1)\n', (1205, 1237), True, 'import torch.nn as nn\n'), ((1259, 1277), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1271, 1277), True, 'import torch.nn as nn\n'), ((1299, 1340), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(4)'], {'stride': '(1)', 'padding': '(1)'}), '(32, 64, 4, stride=1, padding=1)\n', (1308, 1340), True, 'import torch.nn as nn\n'), ((1362, 1380), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1374, 1380), True, 'import torch.nn as nn\n'), ((1402, 1443), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 64, 3, stride=1, padding=1)\n', (1411, 1443), True, 'import torch.nn as nn\n'), ((1465, 1483), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1477, 1483), True, 'import torch.nn as nn\n'), ((1526, 1548), 'torch.nn.LSTMCell', 'nn.LSTMCell', (['(1024)', '(512)'], {}), '(1024, 512)\n', (1537, 1548), True, 'import torch.nn as nn\n'), ((1643, 1660), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (1652, 1660), True, 'import torch.nn as nn\n'), ((1715, 1742), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_outputs'], {}), '(512, num_outputs)\n', (1724, 1742), True, 'import torch.nn as nn\n'), ((437, 463), 'numpy.prod', 'np.prod', (['weight_shape[2:4]'], {}), '(weight_shape[2:4])\n', (444, 463), True, 'import numpy as np\n'), ((788, 821), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (795, 821), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import tempfile
import os
import numpy as np
from oneflow.test_utils.test_util import GenArgDict
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
from oneflow.nn.parameter import Parameter
def compare_with_numpy_sgd(
test_case, momentum, weight_decay, scale, learning_rate, train_iters,
):
num_rows = 500
embedding_size = 128
model_shape = (num_rows, embedding_size)
line_size = embedding_size * 2 if momentum > 0 else embedding_size
num_valid_seq = np.random.randint(1, num_rows, (train_iters))
skip_if_seq = [np.random.randint(2) for i in range(train_iters)]
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=model_shape).astype(np.float32))
init_value = np.random.uniform(size=(num_rows, line_size)).astype(np.float32)
down_scale_by = 10
def sgd_by_oneflow():
unique_embeddings_tensor = flow.tensor(init_value, requires_grad=False).to(
"cuda"
)
lr_tensor = flow.tensor(
np.array(learning_rate).reshape(1,).astype(np.float32)
).to("cuda")
down_scale_by_tensor = flow.tensor(
np.array(down_scale_by).astype(np.float32)
).to("cuda")
def train_one_iter(num_valid, unique_embeddings, embedding_grad, skip_if):
return flow._C.one_embedding_sgd_update(
num_valid,
unique_embeddings,
embedding_grad,
lr_tensor,
down_scale_by_tensor,
skip_if,
scale,
weight_decay,
momentum,
)
for i in range(train_iters):
num_valid_tensor = flow.tensor(
np.array(num_valid_seq[i]).reshape(1,).astype(np.int32)
).to("cuda")
grad_tensor = flow.tensor(random_grad_seq[i]).to("cuda")
skip_if_tensor = flow.tensor(
np.array(skip_if_seq[i]).reshape(1,).astype(np.int64)
).to("cuda")
updated_tensor = train_one_iter(
num_valid_tensor, unique_embeddings_tensor, grad_tensor, skip_if_tensor
)
unique_embeddings_tensor[0 : num_valid_seq[i]] = updated_tensor[
0 : num_valid_seq[i]
]
return unique_embeddings_tensor
def sgd_by_numpy():
x = init_value[:, 0:embedding_size]
vt = init_value[:, embedding_size:]
def train_one_iter(num_valid, grad, model, state):
grad[0:num_valid] = grad[0:num_valid] * (scale / down_scale_by)
next_state = (
momentum * state[0:num_valid] if momentum > 0 else 0
) - learning_rate * grad[0:num_valid]
if momentum > 0:
state[0:num_valid] = next_state
model[0:num_valid] = (
model[0:num_valid]
+ next_state
- learning_rate * weight_decay * model[0:num_valid]
)
return (model, state)
for i in range(train_iters):
if skip_if_seq[i] > 0:
pass
else:
(x, vt) = train_one_iter(
int(num_valid_seq[i]), random_grad_seq[i], x, vt
)
return x, vt
oneflow_res = sgd_by_oneflow().numpy()
of_model = oneflow_res[:, 0:embedding_size]
of_momentum = oneflow_res[:, embedding_size:]
np_model, np_momentum = sgd_by_numpy()
test_case.assertTrue(
np.allclose(of_model.flatten(), np_model.flatten(), rtol=0.001, atol=0.001)
)
if momentum > 0:
test_case.assertTrue(
np.allclose(
of_momentum.flatten(), np_momentum.flatten(), rtol=0.001, atol=0.001
)
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestOptimizers(flow.unittest.TestCase):
def test_one_embedding_sgd(test_case):
arg_dict = OrderedDict()
arg_dict["momentum"] = [0, 0.9]
arg_dict["weight_decay"] = [0, 0.1]
arg_dict["scale"] = [1, 0.1]
arg_dict["learning_rate"] = [1, 0.9]
arg_dict["train_iters"] = [10]
for arg in GenArgDict(arg_dict):
compare_with_numpy_sgd(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| [
"collections.OrderedDict",
"os.getenv",
"oneflow._C.one_embedding_sgd_update",
"oneflow.test_utils.test_util.GenArgDict",
"numpy.array",
"numpy.random.randint",
"numpy.random.uniform",
"unittest.main",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor"
] | [((4524, 4556), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4554, 4556), True, 'import oneflow as flow\n'), ((1147, 1190), 'numpy.random.randint', 'np.random.randint', (['(1)', 'num_rows', 'train_iters'], {}), '(1, num_rows, train_iters)\n', (1164, 1190), True, 'import numpy as np\n'), ((4464, 4498), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (4473, 4498), False, 'import os\n'), ((5011, 5026), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5024, 5026), False, 'import unittest\n'), ((1212, 1232), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (1229, 1232), True, 'import numpy as np\n'), ((4665, 4678), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4676, 4678), False, 'from collections import OrderedDict\n'), ((4903, 4923), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (4913, 4923), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((1426, 1471), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(num_rows, line_size)'}), '(size=(num_rows, line_size))\n', (1443, 1471), True, 'import numpy as np\n'), ((1999, 2158), 'oneflow._C.one_embedding_sgd_update', 'flow._C.one_embedding_sgd_update', (['num_valid', 'unique_embeddings', 'embedding_grad', 'lr_tensor', 'down_scale_by_tensor', 'skip_if', 'scale', 'weight_decay', 'momentum'], {}), '(num_valid, unique_embeddings,\n embedding_grad, lr_tensor, down_scale_by_tensor, skip_if, scale,\n weight_decay, momentum)\n', (2031, 2158), True, 'import oneflow as flow\n'), ((1577, 1621), 'oneflow.tensor', 'flow.tensor', (['init_value'], {'requires_grad': '(False)'}), '(init_value, requires_grad=False)\n', (1588, 1621), True, 'import oneflow as flow\n'), ((1352, 1387), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'model_shape'}), '(size=model_shape)\n', (1369, 1387), True, 'import numpy as np\n'), ((2515, 2546), 'oneflow.tensor', 'flow.tensor', (['random_grad_seq[i]'], {}), '(random_grad_seq[i])\n', (2526, 2546), True, 'import oneflow as flow\n'), ((1832, 1855), 'numpy.array', 'np.array', (['down_scale_by'], {}), '(down_scale_by)\n', (1840, 1855), True, 'import numpy as np\n'), ((1700, 1723), 'numpy.array', 'np.array', (['learning_rate'], {}), '(learning_rate)\n', (1708, 1723), True, 'import numpy as np\n'), ((2408, 2434), 'numpy.array', 'np.array', (['num_valid_seq[i]'], {}), '(num_valid_seq[i])\n', (2416, 2434), True, 'import numpy as np\n'), ((2616, 2640), 'numpy.array', 'np.array', (['skip_if_seq[i]'], {}), '(skip_if_seq[i])\n', (2624, 2640), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtariPlotter used for rendering Atari 2600 frames.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dopamine.utils import plotter
import gin
import numpy as np
import pygame
@gin.configurable
class AtariPlotter(plotter.Plotter):
"""A Plotter for rendering Atari 2600 frames."""
_defaults = {
'x': 0,
'y': 0,
'width': 160,
'height': 210,
}
def __init__(self, parameter_dict=None):
"""Constructor for AtariPlotter.
Args:
parameter_dict: None or dict of parameter specifications for
visualization. If an expected parameter is present, its value will
be used, otherwise it will use defaults.
"""
super(AtariPlotter, self).__init__(parameter_dict)
assert 'environment' in self.parameters
self.game_surface = pygame.Surface((self.parameters['width'],
self.parameters['height']))
def draw(self):
"""Render the Atari 2600 frame.
Returns:
object to be rendered by AgentVisualizer.
"""
environment = self.parameters['environment']
numpy_surface = np.frombuffer(self.game_surface.get_buffer(),
dtype=np.int32)
obs = environment.render(mode='rgb_array').astype(np.int32)
obs = np.transpose(obs)
obs = np.swapaxes(obs, 1, 2)
obs = obs[2] | (obs[1] << 8) | (obs[0] << 16)
np.copyto(numpy_surface, obs.ravel())
return pygame.transform.scale(self.game_surface,
(self.parameters['width'],
self.parameters['height']))
| [
"numpy.swapaxes",
"numpy.transpose",
"pygame.transform.scale",
"pygame.Surface"
] | [((1516, 1585), 'pygame.Surface', 'pygame.Surface', (["(self.parameters['width'], self.parameters['height'])"], {}), "((self.parameters['width'], self.parameters['height']))\n", (1530, 1585), False, 'import pygame\n'), ((2036, 2053), 'numpy.transpose', 'np.transpose', (['obs'], {}), '(obs)\n', (2048, 2053), True, 'import numpy as np\n'), ((2068, 2090), 'numpy.swapaxes', 'np.swapaxes', (['obs', '(1)', '(2)'], {}), '(obs, 1, 2)\n', (2079, 2090), True, 'import numpy as np\n'), ((2206, 2307), 'pygame.transform.scale', 'pygame.transform.scale', (['self.game_surface', "(self.parameters['width'], self.parameters['height'])"], {}), "(self.game_surface, (self.parameters['width'], self.\n parameters['height']))\n", (2228, 2307), False, 'import pygame\n')] |
#!/usr/bin/env python
"""Analyze metrics stored in panda tables"""
import argparse
import os
import sys
import re
from collections import OrderedDict
from itertools import permutations
import numpy as np
import pandas as pd
from scipy.stats import ttest_rel, wilcoxon
NAME_REGEXP = re.compile(r'.+_(.+)_\d\d\d\d.+')
SIGNIFICANCE_LVL = 0.05
REC_DICE_GT = 0.7964832518779061
parser = argparse.ArgumentParser(description='Evaluate metrics')
parser.add_argument('-v', action='store_true', help='Verbosity')
parser.add_argument('-o', '--order', help='Output order')
parser.add_argument('-p', default='auto', help='Floating-point precision')
parser.add_argument('-l', action='store_true', help='Output latex markup')
parser.add_argument('-f', '--filter',
help='Filter outputs by substring')
parser.add_argument('--sis-gt-perf', default=REC_DICE_GT,
help='Performance on GT for SIS')
parser.add_argument('--pprint', action='store_true',
help='Print out percentiles')
parser.add_argument('--percentiles', default=[0, 25, 50, 75, 100],
help='Percentiles to print')
parser.add_argument('--stest', action='store_true',
help='Perform statistical testing')
parser.add_argument('--sprint', action='store_true',
help='Print results of statistical testing')
parser.add_argument('--slvl', default=SIGNIFICANCE_LVL,
help='Significance level')
parser.add_argument('--stest-mode', default='wilcoxon',
choices=('ttest', 'wilcoxon'),
help='Mode of statistical testing')
parser.add_argument('--no-name', action='store_true',
help='Do not print leading run name')
parser.add_argument('--no-std', action='store_true',
help='Do not print std')
parser.add_argument('--metric-name', default='dice_avg',
help='Metric name to aggregate')
parser.add_argument('inputs', nargs='+', help='Input csvs to process')
def get_best_fn(metric_name):
max_metrics = ['dice', 'psnr', 'ssim', 'segscore']
for metric in max_metrics:
if metric in metric_name.lower():
return max
return min
def get_precision(metric_name):
default = 2
precisions = {
'dice': 3,
'segscore': 3,
'ssim': 3
}
for metric, prec in precisions.items():
if metric in metric_name:
return prec
return default
def statistical_testing(args, metrics_by_input, groups_by_name):
test_fn = ttest_rel if args.stest_mode == 'ttest' else wilcoxon
# Get group averages
samples_by_name = {}
for name, group in groups_by_name.items():
gmeans = np.mean([metrics_by_input[inp] for inp in group], axis=0)
samples_by_name[name] = gmeans
perms = permutations(samples_by_name.items(), 2)
if args.sprint:
print('Performing {}'.format(args.stest_mode))
tested_names = set()
pvalues_by_name = {}
for (n1, s1), (n2, s2) in perms:
if n1 not in tested_names:
if args.sprint:
print('Testing {} against:'.format(n1))
tested_names.add(n1)
assert len(s1) == len(s2)
test = test_fn(s1, s2)
pvalues_by_name.setdefault(n1, []).append(test.pvalue)
if args.sprint:
print('\t{}: {:.4f}'.format(n2, test.pvalue))
significantly_different_names = []
for name, pvalues in pvalues_by_name.items():
if all((p < args.slvl) for p in pvalues):
significantly_different_names.append(name)
if args.sprint:
print(('{} ({:.3f}) has p < {} '
'for all other inputs').format(name,
samples_by_name[name].mean(),
args.slvl))
return significantly_different_names
def collect_mean_std(args, metric_name, metrics_by_input, groups_by_name):
gavgs_by_name = OrderedDict()
for name, group in groups_by_name.items():
gmean = np.mean([metrics_by_input[inp].mean() for inp in group])
gstd = np.mean([metrics_by_input[inp].std() for inp in group])
gavgs_by_name[name] = (gmean, gstd)
if args.v:
means = [metrics_by_input[inp].mean() for inp in group]
print(name, ','.join(('{:.3f}'.format(m) for m in means)),
'({:.3f} +- {:.3f})'.format(gmean, np.std(means)))
if 'segscore' in metric_name.lower():
for name, gavg in gavgs_by_name.items():
gavgs_by_name[name] = (gavg[0] / args.sis_gt_perf, 0)
return gavgs_by_name
def print_mean_std(args, metric_name, gavgs_by_name,
significantly_different_names, name_order):
best_fn = get_best_fn(metric_name)
best_val = best_fn(gavgs_by_name, key=lambda k: gavgs_by_name[k])
if args.p == 'auto':
prec = get_precision(metric_name)
else:
prec = args.p
max_width = max((len(inp) for inp in gavgs_by_name))
str_fmt = '{:' + str(max_width+2) + '}'
fp_fmt = '{:.' + str(prec) + 'f}'
if len(name_order) == 2:
name_order.append('diff')
mdiff = gavgs_by_name[name_order[1]][0] - gavgs_by_name[name_order[0]][0]
sdiff = gavgs_by_name[name_order[1]][1] - gavgs_by_name[name_order[0]][1]
gavgs_by_name['diff'] = (mdiff, sdiff)
for name in name_order:
(mean, std) = gavgs_by_name[name]
s = ''
mean_fmt = fp_fmt
std_fmt = fp_fmt
delim = ' '
mean_std_delim = ' +- '
if args.l:
delim = '$'
mean_std_delim = ' \pm '
if args.stest and name in significantly_different_names:
mean_fmt += '^{{*}}'
if name == best_val:
mean_fmt = '\mathbf{{' + mean_fmt + '}}'
else:
if args.stest and name in significantly_different_names:
mean_fmt += '*'
if not args.no_name:
s += str_fmt.format(name)
s += delim + mean_fmt.format(mean)
if not args.no_std:
s += mean_std_delim + std_fmt.format(std)
s += delim
print(s)
def print_percentiles(args, metric_name, metrics_by_input, groups_by_name, name_order):
if args.p == 'auto':
prec = 3 if 'dice' in metric_name else 2
else:
prec = args.p
# Get group averages
samples_by_name = {}
for name, group in groups_by_name.items():
gmeans = np.mean([metrics_by_input[inp] for inp in group], axis=0)
samples_by_name[name] = gmeans
max_width = max((len(name) for name in groups_by_name))
str_fmt = '{:' + str(max_width+2) + '}'
fp_fmt = '{:.' + str(prec) + 'f}'
percs_by_name = {name: np.percentile(samples_by_name[name],
args.percentiles)
for name in name_order}
if len(name_order) == 2:
name_order.append('diff')
pdiff = percs_by_name[name_order[1]] - percs_by_name[name_order[0]]
percs_by_name['diff'] = pdiff
for name in name_order:
percs = percs_by_name[name]
s = ''
if not args.no_name:
s += str_fmt.format(name)
if args.l:
s += '$'
s += '/'.join((fp_fmt.format(p) for p in percs))
s += '$'
else:
s += '/'.join((fp_fmt.format(p) for p in percs))
print(s)
def evaluate_for_metric(args, dfs, metric_name):
metrics_by_input = {}
for name, df in dfs.items():
df = df.dropna(subset=[metric_name])
metrics_by_input[name] = df[metric_name]
if args.v:
print('Available columns in {}'.format(inp))
print(list(df.columns))
groups_by_name = OrderedDict()
for inp in metrics_by_input:
m = NAME_REGEXP.match(inp)
assert m is not None, inp
groups_by_name.setdefault(m.group(1), []).append(inp)
if args.filter is not None:
filtered_groups_by_name = OrderedDict()
for name in groups_by_name:
if not any((name_to_filter in name for name_to_filter in args.filter)):
filtered_groups_by_name[name] = groups_by_name[name]
groups_by_name = filtered_groups_by_name
if args.order is not None:
name_order = []
for key in args.order:
for name in groups_by_name:
if key in name and name not in name_order:
name_order.append(name)
break
else:
name_order = list(groups_by_name.keys())
if args.pprint:
print_percentiles(args, metric_name, metrics_by_input, groups_by_name,
name_order)
elif not args.sprint:
gavgs_by_name = collect_mean_std(args, metric_name,
metrics_by_input, groups_by_name)
significantly_different_names = statistical_testing(args,
metrics_by_input,
groups_by_name)
print_mean_std(args, metric_name, gavgs_by_name,
significantly_different_names, name_order)
else:
statistical_testing(args, metrics_by_input, groups_by_name)
def main(argv):
args = parser.parse_args(argv)
if args.order is not None:
args.order = args.order.split(',')
if args.filter is not None:
args.filter = args.filter.split(',')
args.inputs = [inp for inp in args.inputs if inp.endswith('.csv')]
dfs = {}
for inp in args.inputs:
df = pd.read_csv(inp)
name = os.path.basename(inp)
dfs[name] = df
metric_names = args.metric_name.split(',')
for metric_name in metric_names:
print(metric_name)
evaluate_for_metric(args, dfs, metric_name)
print()
if __name__ == '__main__':
main(sys.argv[1:])
| [
"numpy.mean",
"collections.OrderedDict",
"argparse.ArgumentParser",
"re.compile",
"pandas.read_csv",
"os.path.basename",
"numpy.std",
"numpy.percentile"
] | [((284, 320), 're.compile', 're.compile', (['""".+_(.+)_\\\\d\\\\d\\\\d\\\\d.+"""'], {}), "('.+_(.+)_\\\\d\\\\d\\\\d\\\\d.+')\n", (294, 320), False, 'import re\n'), ((386, 441), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate metrics"""'}), "(description='Evaluate metrics')\n", (409, 441), False, 'import argparse\n'), ((3852, 3865), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3863, 3865), False, 'from collections import OrderedDict\n'), ((7313, 7326), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7324, 7326), False, 'from collections import OrderedDict\n'), ((2673, 2730), 'numpy.mean', 'np.mean', (['[metrics_by_input[inp] for inp in group]'], {'axis': '(0)'}), '([metrics_by_input[inp] for inp in group], axis=0)\n', (2680, 2730), True, 'import numpy as np\n'), ((6147, 6204), 'numpy.mean', 'np.mean', (['[metrics_by_input[inp] for inp in group]'], {'axis': '(0)'}), '([metrics_by_input[inp] for inp in group], axis=0)\n', (6154, 6204), True, 'import numpy as np\n'), ((6403, 6457), 'numpy.percentile', 'np.percentile', (['samples_by_name[name]', 'args.percentiles'], {}), '(samples_by_name[name], args.percentiles)\n', (6416, 6457), True, 'import numpy as np\n'), ((7538, 7551), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7549, 7551), False, 'from collections import OrderedDict\n'), ((9029, 9045), 'pandas.read_csv', 'pd.read_csv', (['inp'], {}), '(inp)\n', (9040, 9045), True, 'import pandas as pd\n'), ((9057, 9078), 'os.path.basename', 'os.path.basename', (['inp'], {}), '(inp)\n', (9073, 9078), False, 'import os\n'), ((4276, 4289), 'numpy.std', 'np.std', (['means'], {}), '(means)\n', (4282, 4289), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import argparse
import math
import sys
import time
import copy
import matplotlib.pylab as plt
#matplotlib.use('Agg') # for AWS
import numpy as np
from numpy .random import multivariate_normal, permutation
import torch
import torch.utils.data as data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# import models
from models import fc_model, model_loss, cross_loss
from dataset import classify_anomaly_dataset
### model options ###
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', '-e', default=30, type=int,
help='number of epochs to learn')
parser.add_argument('--batchsize', '-b', type=int, default=128,
help='learning minibatch size')
parser.add_argument('--train_file_name', '-train_name', type=str, default='./data/mit/x_train.npy',
help='the file name of the training data set')
parser.add_argument('--test_file_name', '-test_name', type=str, default='./data//mit/x_test.npy',
help='the file name of the test data set')
parser.add_argument('--window_size', '-ws', type=int, default=720,
help='window size')
parser.add_argument('--lr', '-l', type=float, default=1e-2,
help='learn rate')
parser.add_argument('--output_file_name', default='log')
parser.set_defaults(test=False)
args = parser.parse_args()
# set parser to var
outputn = args.output_file_name
train_name = args.train_file_name
test_name = args.test_file_name
D = args.window_size #the size of the window width
batch_size = args.batchsize # minibatch size
num_epoch=args.epoch
###### data preparation #####
# load
x_train_data=np.load(train_name)
x_test_data = np.load(test_name)
y_train_data=np.load("data/mit/y_train.npy").astype(np.int64)
y_test_data=np.load("data/mit/y_test.npy").astype(np.int64)
print("x_train data", x_train_data.shape)
print("x_test data", x_test_data.shape)
# 正常サンプルのみピックアップ.
Split_train_data_x = x_train_data.astype(np.float32)
Split_test_data_x = x_test_data.astype(np.float32)
# define dataset
train_dataset = classify_anomaly_dataset(Split_train_data_x[:8000], y_train_data[:8000])
val_dataset = classify_anomaly_dataset(Split_test_data_x, y_test_data)
# データローダーの作成
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False)
# 辞書型変数にまとめる
dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}
# 動作の確認
batch_iterator = iter(dataloaders_dict["val"]) # イタレータに変換
images, targets = next(batch_iterator) # 1番目の要素を取り出す
print(images.size())
print("batch len is ", len(targets))
print(targets[0].shape) # ミニバッチのサイズのリスト、各要素は[n, 5]、nは物体数
# set model
model = fc_model([720, 500, 400, 360, 180, 90, 45, 2])
print(model)
# define loss
criterion = cross_loss()
# define optimizer. use SGD here.
optimizer = optim.SGD(model.parameters(), lr=args.lr,
momentum=0.9,
weight_decay=0.0001)
######### start training ##########
# enable GPUs if any.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
model.to(device)
model.train()
iteration = 1
phase = "train"
log_loss = []
for epoch in range(num_epoch):
# 開始時刻を保存
t_epoch_start = time.time()
t_iter_start = time.time()
epoch_train_loss = 0.0 # epochの損失和
epoch_val_loss = 0.0 # epochの損失和
for phase in ["train", "val"]:
epoch_corrects = 0.0
epoch_loss = 0
print("mode is........", phase)
print('-------------')
print('Epoch {}/{}'.format(epoch+1, num_epoch))
print('-------------')
for imges, targets in dataloaders_dict[phase]:
imges = imges.to(device)
targets = targets.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
outputs = model(imges)
loss = criterion(outputs, targets)
_, preds = torch.max(outputs, 1)
if phase=="train":
loss.backward()
optimizer.step()
if (iteration % 10 == 0): # 10iterに1度、lossを表示
t_iter_finish = time.time()
duration = t_iter_finish - t_iter_start
print('イテレーション {} || Loss: {:.4f} || 10iter: {:.4f} sec.'.format(
iteration, loss.item()/batch_size, duration))
t_iter_start = time.time()
epoch_loss += loss.item()
iteration += 1
epoch_corrects += torch.sum(preds == targets.data)
# epochのphaseごとのlossと正解率
t_epoch_finish = time.time()
epoch_acc = epoch_corrects.double() / len(dataloaders_dict[phase].dataset)
print('-------------')
print('epoch {} || Epoch_Loss:{:.4f} ||Epoch_VAL_Loss:{:.4f}'.format(
epoch+1, epoch_loss, 0))
print('timer: {:.4f} sec.'.format(t_epoch_finish - t_epoch_start))
print(phase)
print("Acc:", epoch_acc)
t_epoch_start = time.time()
log_loss.append(epoch_train_loss)
import os
if not os.path.isdir("weights"): os.mkdir("weights")
torch.save(model.state_dict(), 'weights/fc' +
str(epoch+1) + '.pth')
# evaluate
model.eval()
predict = model(torch.from_numpy(Split_test_data_x[:8000]))
measured = Split_test_data_x[:8000].reshape(8000*720)
predicted = predict.detach().numpy().reshape(8000*720)
Loss_model=np.power(measured-predicted, 2)
Loss_perdata=np.sum(Loss_model.reshape(8000, 720), 1)
mean_window = 1000
Loss_model_processed = Loss_model[0:Loss_model.size-mean_window]
# smoothen anomaly score
for x in range(Loss_model.size-mean_window):
Loss_model_processed[x] = np.mean(Loss_model[x:x+mean_window])
# normalize the score
Loss_model_processed = Loss_model_processed/(np.std(Loss_model_processed))
##### plot results #####
if not os.path.isdir("figs"): os.mkdir("figs")
fig0 = plt.figure()
plt.xlabel("epoch")
plt.ylabel("train loss")
plt.plot(log_loss, label='trainloss')
plt.legend()
plt.show()
fig0.savefig('figs/FC_trainloss.png')
fig1 = plt.figure()
plt.xlabel("sample")
plt.ylabel("anomaly score")
plt.plot(Loss_model_processed, label='FC model score')
plt.legend()
plt.show()
fig1.savefig('figs/FC_anomaly_score.png')
anno_score = []
normal_score = []
for i, bool in enumerate(y_test_data[:8000]):
if bool == 0:
normal_score.append(Loss_perdata[i])
else:
anno_score.append(Loss_perdata[i])
figanno = plt.figure()
plt.xlabel("sample")
plt.ylabel("value")
plt.plot(anno_score, label='Anomal score for annomal data')
plt.legend()
plt.show()
figanno.savefig("figs/FC_annomalscore.png")
fignormal = plt.figure()
plt.xlabel("sample")
plt.ylabel("value")
plt.plot(normal_score, label='Anomal score for Normal data')
plt.legend()
plt.show()
figanno.savefig("figs/FC_normalscore.png")
fig2 = plt.figure()
plt.xlabel("sample")
plt.ylabel("value")
plt.plot(predicted[157500:163000], label='Pytorch FC model prediction')
plt.plot(measured[157500:163000], label='real data')
plt.legend()
plt.show()
fig2.savefig("figs/FC_waveforms.png")
fig3 = plt.figure()
plt.xlabel("sample")
plt.ylabel("value")
plt.plot(measured[0:3000], label='real data')
plt.plot(predicted[0:3000], label='Pytorch FC model prediction')
plt.legend()
plt.show()
fig3.savefig("figs/normal_waveform_predict.png")
| [
"torch.max",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.sum",
"matplotlib.pylab.show",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pylab.figure",
"matplotlib.pylab.legend",
"os.path.isdir",
"os.mkdir",
"models.fc_model",
"matplotlib.pylab.plot",
"matplotlib.pylab.xlabel... | [((504, 529), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (527, 529), False, 'import argparse\n'), ((1722, 1741), 'numpy.load', 'np.load', (['train_name'], {}), '(train_name)\n', (1729, 1741), True, 'import numpy as np\n'), ((1756, 1774), 'numpy.load', 'np.load', (['test_name'], {}), '(test_name)\n', (1763, 1774), True, 'import numpy as np\n'), ((2141, 2213), 'dataset.classify_anomaly_dataset', 'classify_anomaly_dataset', (['Split_train_data_x[:8000]', 'y_train_data[:8000]'], {}), '(Split_train_data_x[:8000], y_train_data[:8000])\n', (2165, 2213), False, 'from dataset import classify_anomaly_dataset\n'), ((2228, 2284), 'dataset.classify_anomaly_dataset', 'classify_anomaly_dataset', (['Split_test_data_x', 'y_test_data'], {}), '(Split_test_data_x, y_test_data)\n', (2252, 2284), False, 'from dataset import classify_anomaly_dataset\n'), ((2318, 2397), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=True)\n', (2345, 2397), False, 'import torch\n'), ((2421, 2499), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=batch_size, shuffle=False)\n', (2448, 2499), False, 'import torch\n'), ((2848, 2894), 'models.fc_model', 'fc_model', (['[720, 500, 400, 360, 180, 90, 45, 2]'], {}), '([720, 500, 400, 360, 180, 90, 45, 2])\n', (2856, 2894), False, 'from models import fc_model, model_loss, cross_loss\n'), ((2935, 2947), 'models.cross_loss', 'cross_loss', ([], {}), '()\n', (2945, 2947), False, 'from models import fc_model, model_loss, cross_loss\n'), ((5753, 5786), 'numpy.power', 'np.power', (['(measured - predicted)', '(2)'], {}), '(measured - predicted, 2)\n', (5761, 5786), True, 'import numpy as np\n'), ((6243, 6255), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (6253, 6255), True, 'import matplotlib.pylab as plt\n'), ((6256, 6275), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6266, 6275), True, 'import matplotlib.pylab as plt\n'), ((6276, 6300), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""train loss"""'], {}), "('train loss')\n", (6286, 6300), True, 'import matplotlib.pylab as plt\n'), ((6301, 6338), 'matplotlib.pylab.plot', 'plt.plot', (['log_loss'], {'label': '"""trainloss"""'}), "(log_loss, label='trainloss')\n", (6309, 6338), True, 'import matplotlib.pylab as plt\n'), ((6339, 6351), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (6349, 6351), True, 'import matplotlib.pylab as plt\n'), ((6352, 6362), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6360, 6362), True, 'import matplotlib.pylab as plt\n'), ((6409, 6421), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (6419, 6421), True, 'import matplotlib.pylab as plt\n'), ((6422, 6442), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""sample"""'], {}), "('sample')\n", (6432, 6442), True, 'import matplotlib.pylab as plt\n'), ((6443, 6470), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""anomaly score"""'], {}), "('anomaly score')\n", (6453, 6470), True, 'import matplotlib.pylab as plt\n'), ((6471, 6525), 'matplotlib.pylab.plot', 'plt.plot', (['Loss_model_processed'], {'label': '"""FC model score"""'}), "(Loss_model_processed, label='FC model score')\n", (6479, 6525), True, 'import matplotlib.pylab as plt\n'), ((6526, 6538), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (6536, 6538), True, 'import matplotlib.pylab as plt\n'), ((6539, 6549), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6547, 6549), True, 'import matplotlib.pylab as plt\n'), ((6801, 6813), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (6811, 6813), True, 'import matplotlib.pylab as plt\n'), ((6814, 6834), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""sample"""'], {}), "('sample')\n", (6824, 6834), True, 'import matplotlib.pylab as plt\n'), ((6835, 6854), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (6845, 6854), True, 'import matplotlib.pylab as plt\n'), ((6855, 6914), 'matplotlib.pylab.plot', 'plt.plot', (['anno_score'], {'label': '"""Anomal score for annomal data"""'}), "(anno_score, label='Anomal score for annomal data')\n", (6863, 6914), True, 'import matplotlib.pylab as plt\n'), ((6915, 6927), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (6925, 6927), True, 'import matplotlib.pylab as plt\n'), ((6928, 6938), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6936, 6938), True, 'import matplotlib.pylab as plt\n'), ((6997, 7009), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (7007, 7009), True, 'import matplotlib.pylab as plt\n'), ((7010, 7030), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""sample"""'], {}), "('sample')\n", (7020, 7030), True, 'import matplotlib.pylab as plt\n'), ((7031, 7050), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (7041, 7050), True, 'import matplotlib.pylab as plt\n'), ((7051, 7111), 'matplotlib.pylab.plot', 'plt.plot', (['normal_score'], {'label': '"""Anomal score for Normal data"""'}), "(normal_score, label='Anomal score for Normal data')\n", (7059, 7111), True, 'import matplotlib.pylab as plt\n'), ((7112, 7124), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (7122, 7124), True, 'import matplotlib.pylab as plt\n'), ((7125, 7135), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7133, 7135), True, 'import matplotlib.pylab as plt\n'), ((7188, 7200), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (7198, 7200), True, 'import matplotlib.pylab as plt\n'), ((7201, 7221), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""sample"""'], {}), "('sample')\n", (7211, 7221), True, 'import matplotlib.pylab as plt\n'), ((7222, 7241), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (7232, 7241), True, 'import matplotlib.pylab as plt\n'), ((7242, 7313), 'matplotlib.pylab.plot', 'plt.plot', (['predicted[157500:163000]'], {'label': '"""Pytorch FC model prediction"""'}), "(predicted[157500:163000], label='Pytorch FC model prediction')\n", (7250, 7313), True, 'import matplotlib.pylab as plt\n'), ((7314, 7366), 'matplotlib.pylab.plot', 'plt.plot', (['measured[157500:163000]'], {'label': '"""real data"""'}), "(measured[157500:163000], label='real data')\n", (7322, 7366), True, 'import matplotlib.pylab as plt\n'), ((7367, 7379), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (7377, 7379), True, 'import matplotlib.pylab as plt\n'), ((7380, 7390), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7388, 7390), True, 'import matplotlib.pylab as plt\n'), ((7438, 7450), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (7448, 7450), True, 'import matplotlib.pylab as plt\n'), ((7451, 7471), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""sample"""'], {}), "('sample')\n", (7461, 7471), True, 'import matplotlib.pylab as plt\n'), ((7472, 7491), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (7482, 7491), True, 'import matplotlib.pylab as plt\n'), ((7492, 7537), 'matplotlib.pylab.plot', 'plt.plot', (['measured[0:3000]'], {'label': '"""real data"""'}), "(measured[0:3000], label='real data')\n", (7500, 7537), True, 'import matplotlib.pylab as plt\n'), ((7538, 7602), 'matplotlib.pylab.plot', 'plt.plot', (['predicted[0:3000]'], {'label': '"""Pytorch FC model prediction"""'}), "(predicted[0:3000], label='Pytorch FC model prediction')\n", (7546, 7602), True, 'import matplotlib.pylab as plt\n'), ((7603, 7615), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (7613, 7615), True, 'import matplotlib.pylab as plt\n'), ((7616, 7626), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7624, 7626), True, 'import matplotlib.pylab as plt\n'), ((3429, 3440), 'time.time', 'time.time', ([], {}), '()\n', (3438, 3440), False, 'import time\n'), ((3460, 3471), 'time.time', 'time.time', ([], {}), '()\n', (3469, 3471), False, 'import time\n'), ((5417, 5441), 'os.path.isdir', 'os.path.isdir', (['"""weights"""'], {}), "('weights')\n", (5430, 5441), False, 'import os\n'), ((5443, 5462), 'os.mkdir', 'os.mkdir', (['"""weights"""'], {}), "('weights')\n", (5451, 5462), False, 'import os\n'), ((5588, 5630), 'torch.from_numpy', 'torch.from_numpy', (['Split_test_data_x[:8000]'], {}), '(Split_test_data_x[:8000])\n', (5604, 5630), False, 'import torch\n'), ((6025, 6063), 'numpy.mean', 'np.mean', (['Loss_model[x:x + mean_window]'], {}), '(Loss_model[x:x + mean_window])\n', (6032, 6063), True, 'import numpy as np\n'), ((6129, 6157), 'numpy.std', 'np.std', (['Loss_model_processed'], {}), '(Loss_model_processed)\n', (6135, 6157), True, 'import numpy as np\n'), ((6196, 6217), 'os.path.isdir', 'os.path.isdir', (['"""figs"""'], {}), "('figs')\n", (6209, 6217), False, 'import os\n'), ((6219, 6235), 'os.mkdir', 'os.mkdir', (['"""figs"""'], {}), "('figs')\n", (6227, 6235), False, 'import os\n'), ((1788, 1819), 'numpy.load', 'np.load', (['"""data/mit/y_train.npy"""'], {}), "('data/mit/y_train.npy')\n", (1795, 1819), True, 'import numpy as np\n'), ((1849, 1879), 'numpy.load', 'np.load', (['"""data/mit/y_test.npy"""'], {}), "('data/mit/y_test.npy')\n", (1856, 1879), True, 'import numpy as np\n'), ((3209, 3234), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3232, 3234), False, 'import torch\n'), ((4941, 4952), 'time.time', 'time.time', ([], {}), '()\n', (4950, 4952), False, 'import time\n'), ((5345, 5356), 'time.time', 'time.time', ([], {}), '()\n', (5354, 5356), False, 'import time\n'), ((4011, 4051), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (4033, 4051), False, 'import torch\n'), ((4187, 4208), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (4196, 4208), False, 'import torch\n'), ((4837, 4869), 'torch.sum', 'torch.sum', (['(preds == targets.data)'], {}), '(preds == targets.data)\n', (4846, 4869), False, 'import torch\n'), ((4450, 4461), 'time.time', 'time.time', ([], {}), '()\n', (4459, 4461), False, 'import time\n'), ((4713, 4724), 'time.time', 'time.time', ([], {}), '()\n', (4722, 4724), False, 'import time\n')] |
import numpy as np
from mapping.utils.residual import orth_residual
from math import fsum
class LowMemLanczosSpecialSparse:
def __init__(self, gamma, xi, v0=None, view=None, max_cutoff=None, stable=False):
if len(gamma) != len(xi):
print('Matrix must be square')
self.dim = len(gamma) + 1
self.stable = stable
if view is None:
view = self.dim
else:
assert view <= self.dim
if max_cutoff is None:
max_cutoff = self.dim
else:
assert max_cutoff <= self.dim
if v0 is None:
v0 = np.zeros(self.dim)
v0[0] = 1
self.max_cutoff = max_cutoff
# Underlying buffers with full dimensions
self._gamma_buf = gamma[:]
self._xi_buf = xi[:]
self._v0_buf = v0[:]
self._V_buf = np.empty((self.dim, 2), dtype=np.float64, order='F')
self._w_buf = np.empty(self.dim, dtype=np.float64)
self._alpha_buf = np.empty(self.max_cutoff, dtype=np.float64)
self._beta_buf = np.empty(self.max_cutoff, dtype=np.float64)
self.n = view
self.gamma = self._gamma_buf[:view-1]
self.xi = self._xi_buf[:view-1]
self.v0 = self._v0_buf[:view]
self.V = self._V_buf[:view, :2]
self.w = self._w_buf[:view]
self.alpha = self._alpha_buf
self.beta = self._beta_buf
def update_view(self, view):
assert view <= self.dim
self.n = view
self.gamma = self._gamma_buf[:view-1]
self.xi = self._xi_buf[:view-1]
self.v0 = self._v0_buf[:view]
self.V = self._V_buf[:view, :2]
self.w = self._w_buf[:view]
def _dgemv(self, v):
self.w[0] = self.gamma.dot(v[1:])
self.w[1:] = self.gamma * v[0] + self.xi*v[1:]
def _core_loop(self, cutoff):
# Initial step:
self.V[:, 0] = self.v0
self._dgemv(self.V[:, 0])
self.alpha[0] = self.w.dot(self.V[:, 0])
self.w -= self.alpha[0] * self.V[:, 0]
# Core loop:
if not self.stable:
for i in range(1, cutoff):
self.beta[i] = np.linalg.norm(self.w)
if self.beta[i] == 0:
raise AssertionError
else:
np.multiply(1/self.beta[i], self.w, out=self.V[:, 1])
self._dgemv(self.V[:, 1])
self.alpha[i] = self.w.dot(self.V[:, 1])
self.w -= self.alpha[i] * self.V[:, 1] + self.beta[i] * self.V[:, 0]
self.V[:, 0] = self.V[:, 1]
else:
for i in range(1, cutoff):
self.beta[i] = np.sqrt(fsum(np.square(self.w)))
if self.beta[i] == 0:
raise AssertionError
else:
np.multiply(1/self.beta[i], self.w, out=self.V[:, 1])
self._dgemv(self.V[:, 1])
self.alpha[i] = self.w.dot(self.V[:, 1])
self.w -= self.alpha[i] * self.V[:, 1] + self.beta[i] * self.V[:, 0]
self.V[:, 0] = self.V[:, 1]
return self.alpha[:cutoff].copy(), self.beta[1:cutoff].copy()
def _core_loop_with_trafo(self, cutoff):
V = np.empty((self.V.shape[0], cutoff), dtype=np.float64, order='F')
# Initial step:
V[:, 0] = self.v0
self._dgemv(V[:, 0])
self.alpha[0] = self.w.dot(V[:, 0])
self.w[:] -= self.alpha[0] * V[:, 0]
# Core loop:
if not self.stable:
for i in range(1, cutoff):
self.beta[i] = np.linalg.norm(self.w)
if self.beta[i] == 0:
raise AssertionError
else:
np.multiply(1/self.beta[i], self.w, out=V[:, i])
self._dgemv(V[:, i])
self.alpha[i] = self.w.dot(V[:, i])
self.w[:] -= self.alpha[i] * V[:, i] + self.beta[i] * V[:, i - 1]
else:
for i in range(1, cutoff):
self.beta[i] = np.sqrt(fsum(np.square(self.w)))
if self.beta[i] == 0:
raise AssertionError
else:
np.multiply(1/self.beta[i], self.w, out=V[:, i])
self._dgemv(V[:, i])
self.alpha[i] = self.w.dot(V[:, i])
self.w[:] -= self.alpha[i] * V[:, i] + self.beta[i] * V[:, i - 1]
return self.alpha[:cutoff].copy(), self.beta[1:cutoff].copy(), V
def get_tridiagonal(self, cutoff=None, residual=False, get_trafo=False):
if cutoff is None:
cutoff = self.max_cutoff
else:
assert 0 < cutoff <= self.max_cutoff
info = dict()
info['trafo'] = None
info['res'] = None
if residual or get_trafo:
diag, offdiag, V = self._core_loop_with_trafo(cutoff)
if get_trafo:
info['trafo'] = V
if residual:
info['res'] = orth_residual(V)
else:
diag, offdiag = self._core_loop(cutoff)
return diag, offdiag, info
| [
"numpy.multiply",
"mapping.utils.residual.orth_residual",
"numpy.square",
"numpy.zeros",
"numpy.empty",
"numpy.linalg.norm"
] | [((859, 911), 'numpy.empty', 'np.empty', (['(self.dim, 2)'], {'dtype': 'np.float64', 'order': '"""F"""'}), "((self.dim, 2), dtype=np.float64, order='F')\n", (867, 911), True, 'import numpy as np\n'), ((934, 970), 'numpy.empty', 'np.empty', (['self.dim'], {'dtype': 'np.float64'}), '(self.dim, dtype=np.float64)\n', (942, 970), True, 'import numpy as np\n'), ((997, 1040), 'numpy.empty', 'np.empty', (['self.max_cutoff'], {'dtype': 'np.float64'}), '(self.max_cutoff, dtype=np.float64)\n', (1005, 1040), True, 'import numpy as np\n'), ((1066, 1109), 'numpy.empty', 'np.empty', (['self.max_cutoff'], {'dtype': 'np.float64'}), '(self.max_cutoff, dtype=np.float64)\n', (1074, 1109), True, 'import numpy as np\n'), ((3228, 3292), 'numpy.empty', 'np.empty', (['(self.V.shape[0], cutoff)'], {'dtype': 'np.float64', 'order': '"""F"""'}), "((self.V.shape[0], cutoff), dtype=np.float64, order='F')\n", (3236, 3292), True, 'import numpy as np\n'), ((616, 634), 'numpy.zeros', 'np.zeros', (['self.dim'], {}), '(self.dim)\n', (624, 634), True, 'import numpy as np\n'), ((2154, 2176), 'numpy.linalg.norm', 'np.linalg.norm', (['self.w'], {}), '(self.w)\n', (2168, 2176), True, 'import numpy as np\n'), ((3580, 3602), 'numpy.linalg.norm', 'np.linalg.norm', (['self.w'], {}), '(self.w)\n', (3594, 3602), True, 'import numpy as np\n'), ((4973, 4989), 'mapping.utils.residual.orth_residual', 'orth_residual', (['V'], {}), '(V)\n', (4986, 4989), False, 'from mapping.utils.residual import orth_residual\n'), ((2298, 2353), 'numpy.multiply', 'np.multiply', (['(1 / self.beta[i])', 'self.w'], {'out': 'self.V[:, 1]'}), '(1 / self.beta[i], self.w, out=self.V[:, 1])\n', (2309, 2353), True, 'import numpy as np\n'), ((2818, 2873), 'numpy.multiply', 'np.multiply', (['(1 / self.beta[i])', 'self.w'], {'out': 'self.V[:, 1]'}), '(1 / self.beta[i], self.w, out=self.V[:, 1])\n', (2829, 2873), True, 'import numpy as np\n'), ((3724, 3774), 'numpy.multiply', 'np.multiply', (['(1 / self.beta[i])', 'self.w'], {'out': 'V[:, i]'}), '(1 / self.beta[i], self.w, out=V[:, i])\n', (3735, 3774), True, 'import numpy as np\n'), ((4182, 4232), 'numpy.multiply', 'np.multiply', (['(1 / self.beta[i])', 'self.w'], {'out': 'V[:, i]'}), '(1 / self.beta[i], self.w, out=V[:, i])\n', (4193, 4232), True, 'import numpy as np\n'), ((2677, 2694), 'numpy.square', 'np.square', (['self.w'], {}), '(self.w)\n', (2686, 2694), True, 'import numpy as np\n'), ((4041, 4058), 'numpy.square', 'np.square', (['self.w'], {}), '(self.w)\n', (4050, 4058), True, 'import numpy as np\n')] |
"""track_to_track_association
The module tests two tracks for track association. It uses hypothesis testing to decide whether the two tracks are of
the same target. See report for more mathematical derivation.
"""
import numpy as np
from scipy.stats.distributions import chi2
def test_association_independent_tracks(track1, track2, alpha=0.05):
"""
Checks whether the tracks are from the same target, under the independence assumption
:param track1: track to check for association
:param track2: track to check for association
:param alpha: desired confidence interval
:return: true if the tracks are from the same target, false else
"""
delta_estimates = track1.state_vector - track2.state_vector
error_delta_estimates = delta_estimates # as the difference of the true states is 0 if it is the same target
error_delta_estimates_covar = track1.covar + track2.covar # under the error independence assumption
d = (error_delta_estimates.transpose() @ np.linalg.inv(error_delta_estimates_covar) @ error_delta_estimates)[0]
# 4 degrees of freedom as we have 4 dimensions in the state vector
d_alpha = chi2.ppf((1 - alpha), df=4)
# Accept H0 if d <= d_alpha
return d <= d_alpha
def test_association_dependent_tracks(track1_mean, track1_cov, track2_mean, track2_cov, cross_cov_ij, cross_cov_ji,
alpha=0.05):
"""
checks whether the tracks are from the same target, when the dependence is accounted for.
:param track1: track to check for association
:param track2: track to check for association
:param cross_cov_ij: cross-covariance of the estimation errors. See article
:param cross_cov_ji:
:param alpha: desired test power
:return: true if the tracks are from the same target, false else
"""
delta_estimates = track1_mean - track2_mean
error_delta_estimates = delta_estimates # as the difference of the true states is 0 if it is the same target
error_delta_estimates_covar = track1_cov + track2_cov - cross_cov_ij - cross_cov_ji
d = (error_delta_estimates.transpose() @ np.linalg.inv(error_delta_estimates_covar) @ error_delta_estimates)[0]
# 4 degrees of freedom as we have 4 dimensions in the state vector
d_alpha = chi2.ppf((1 - alpha), df=4)
# Accept H0 if d <= d_alpha
return d <= d_alpha
| [
"numpy.linalg.inv",
"scipy.stats.distributions.chi2.ppf"
] | [((1155, 1180), 'scipy.stats.distributions.chi2.ppf', 'chi2.ppf', (['(1 - alpha)'], {'df': '(4)'}), '(1 - alpha, df=4)\n', (1163, 1180), False, 'from scipy.stats.distributions import chi2\n'), ((2283, 2308), 'scipy.stats.distributions.chi2.ppf', 'chi2.ppf', (['(1 - alpha)'], {'df': '(4)'}), '(1 - alpha, df=4)\n', (2291, 2308), False, 'from scipy.stats.distributions import chi2\n'), ((998, 1040), 'numpy.linalg.inv', 'np.linalg.inv', (['error_delta_estimates_covar'], {}), '(error_delta_estimates_covar)\n', (1011, 1040), True, 'import numpy as np\n'), ((2126, 2168), 'numpy.linalg.inv', 'np.linalg.inv', (['error_delta_estimates_covar'], {}), '(error_delta_estimates_covar)\n', (2139, 2168), True, 'import numpy as np\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from collections import defaultdict
import numpy as np
import time
from pymor.core.exceptions import GmshError
from pymor.core.logger import getLogger
from pymor.grids.interfaces import BoundaryInfoInterface
from pymor.grids.unstructured import UnstructuredTriangleGrid
def load_gmsh(gmsh_file):
"""Parse a Gmsh file and create a corresponding :class:`GmshGrid` and :class:`GmshBoundaryInfo`.
Parameters
----------
gmsh_file
File handle of the Gmsh MSH-file.
Returns
-------
grid
The generated :class:`GmshGrid`.
boundary_info
The generated :class:`GmshBoundaryInfo`.
"""
logger = getLogger('pymor.grids.gmsh.load_gmsh')
logger.info('Parsing gmsh file ...')
tic = time.time()
sections = _parse_gmsh_file(gmsh_file)
toc = time.time()
t_parse = toc - tic
logger.info('Create GmshGrid ...')
tic = time.time()
grid = GmshGrid(sections)
toc = time.time()
t_grid = toc - tic
logger.info('Create GmshBoundaryInfo ...')
tic = time.time()
bi = GmshBoundaryInfo(grid, sections)
toc = time.time()
t_bi = toc - tic
logger.info(f'Parsing took {t_parse} s; Grid creation took {t_grid} s; BoundaryInfo creation took {t_bi} s')
return grid, bi
class GmshGrid(UnstructuredTriangleGrid):
"""An :class:`~pymor.grids.unstructured.UnstructuredTriangleGrid` built from an existing Gmsh MSH-file.
Parameters
----------
sections
Parsed sections of the MSH-file as returned by :func:`load_gmsh`.
"""
def __init__(self, sections):
self.logger.info('Checking if grid is a 2d triangular grid ...')
assert {'Nodes', 'Elements', 'PhysicalNames'} <= set(sections.keys())
assert set(sections['Elements'].keys()) <= {'line', 'triangle'}
assert 'triangle' in sections['Elements']
assert all(n[1][2] == 0 for n in sections['Nodes'])
node_ids = dict(zip([n[0] for n in sections['Nodes']], np.arange(len(sections['Nodes']), dtype=np.int32)))
vertices = np.array([n[1][0:2] for n in sections['Nodes']])
faces = np.array([[node_ids[nodes[0]], node_ids[nodes[1]], node_ids[nodes[2]]]
for _, _, nodes in sections['Elements']['triangle']])
super().__init__(vertices, faces)
def __str__(self):
return f'GmshGrid with {self.size(0)} triangles, {self.size(1)} edges, {self.size(2)} vertices'
class GmshBoundaryInfo(BoundaryInfoInterface):
"""|BoundaryInfo| for a :class:`GmshGrid`.
Parameters
----------
grid
The corresponding :class:`GmshGrid`.
sections
Parsed sections of the MSH-file as returned by :func:`load_gmsh`.
"""
def __init__(self, grid, sections):
assert isinstance(grid, GmshGrid)
self.grid = grid
# Save boundary types.
self.boundary_types = [pn[2] for pn in sections['PhysicalNames'] if pn[1] == 1]
# Compute ids, since Gmsh starts numbering with 1 instead of 0.
name_ids = dict(zip([pn[0] for pn in sections['PhysicalNames']], np.arange(len(sections['PhysicalNames']),
dtype=np.int32)))
node_ids = dict(zip([n[0] for n in sections['Nodes']], np.arange(len(sections['Nodes']), dtype=np.int32)))
if 'line' in sections['Elements']:
superentities = grid.superentities(2, 1)
# find the edge for given vertices.
def find_edge(vertices):
edge_set = set(superentities[vertices[0]]).intersection(superentities[vertices[1]]) - {-1}
if len(edge_set) != 1:
raise ValueError
return next(iter(edge_set))
line_ids = {l[0]: find_edge([node_ids[l[2][0]], node_ids[l[2][1]]]) for l in sections['Elements']['line']}
# compute boundary masks for all boundary types.
masks = {}
for bt in self.boundary_types:
masks[bt] = [np.array([False]*grid.size(1)), np.array([False]*grid.size(2))]
masks[bt][0][[line_ids[l[0]] for l in sections['Elements']['line']]] = \
[(bt == sections['PhysicalNames'][name_ids[l[1][0]]][2]) for l in sections['Elements']['line']]
ind = np.array([node_ids[n] for l in sections['Elements']['line'] for n in l[2]])
val = masks[bt][0][[line_ids[l[0]] for l in sections['Elements']['line'] for n in l[2]]]
masks[bt][1][ind[val]] = True
self._masks = masks
def mask(self, boundary_type, codim):
assert 1 <= codim <= self.grid.dim
assert boundary_type in self.boundary_types
return self._masks[boundary_type][codim - 1]
def _parse_gmsh_file(f):
allowed_sections = ['Nodes', 'Elements', 'PhysicalNames', 'Periodic', 'NodeData',
'ElementData', 'ElementNodeData']
supported_sections = ['Nodes', 'Elements', 'PhysicalNames']
try:
l = next(f).strip()
if l != '$MeshFormat':
raise GmshError(f'expected $MeshFormat, got {l}')
l = next(f).strip()
header = l.split(' ')
if len(header) != 3:
raise GmshError(f'header {l} has {len(header)} fields, expected 3')
if header[0] != '2.2':
raise GmshError(f'wrong file format version: got {header[0]}, expected 2.2')
try:
file_type = int(header[1])
except ValueError:
raise GmshError(f'malformed header: expected integer, got {header[1]}')
if file_type != 0:
raise GmshError('wrong file type: only ASCII gmsh files are supported')
try:
data_size = int(header[2]) # NOQA
except ValueError:
raise GmshError(f'malformed header: expected integer, got {header[2]}')
l = next(f).strip()
if l != '$EndMeshFormat':
raise GmshError(f'expected $EndMeshFormat, got {l}')
except StopIteration:
raise GmshError('unexcpected end of file')
in_section = False
sections = defaultdict(list)
for l in f:
l = l.strip()
if l == '':
continue
if not in_section:
if not l.startswith('$'):
raise GmshError(f'expected section name, got {l}')
section = l[1:]
if section not in allowed_sections:
raise GmshError(f'unknown section type: {section}')
if section not in supported_sections:
raise GmshError(f'unsupported section type: {section}')
if section in sections:
raise GmshError(f'only one {section} section allowed')
in_section = True
elif l.startswith('$'):
if l != '$End' + section:
raise GmshError(f'expected $End{section}, got {l}')
in_section = False
else:
sections[section].append(l)
if in_section:
raise GmshError(f'file ended while in section {section}')
# now we parse each section ...
def parse_nodes(nodes):
try:
num_nodes = int(nodes[0])
except ValueError:
raise GmshError(f'first line of nodes sections is not a number: {nodes[0]}')
if len(nodes) != num_nodes + 1:
raise GmshError('number-of-nodes field does not match number of lines in nodes section')
nodes = [n.split(' ') for n in nodes[1:]]
if not all(len(n) == 4 for n in nodes):
raise GmshError('malformed nodes section')
try:
nodes = [(int(a), (float(b), float(c), float(d))) for a, b, c, d in nodes]
except ValueError:
raise GmshError('malformed nodes section')
return nodes
def parse_elements(elements):
try:
num_elements = int(elements[0])
except ValueError:
raise GmshError(f'first line of elements sections is not a number: {elements[0]}')
if len(elements) != num_elements + 1:
raise GmshError('number-of-elements field does not match number of lines in elements section')
elements = [e.split(' ') for e in elements[1:]]
try:
elements = [tuple(int(f) for f in e) for e in elements]
except ValueError:
raise GmshError('malformed elements section')
element_types = {1: 'line', 2: 'triangle'}
element_nodes = {'line': 2, 'triangle': 3}
def parse_line(fields):
if fields[1] not in element_types:
raise GmshError(f'element type {fields[0]} not supported')
element_type = element_types[fields[1]]
num_nodes = element_nodes[element_type]
num_tags = fields[2]
if len(fields) != num_nodes + num_tags + 3:
raise GmshError('malformed elements section')
return element_type, (fields[0], tuple(fields[3:3 + num_tags]), fields[3 + num_tags:])
elements_by_type = defaultdict(list)
for e in elements:
t, l = parse_line(e)
elements_by_type[t].append(l)
return elements_by_type
def parse_names(physical_names):
try:
num_elements = int(physical_names[0])
except ValueError:
raise GmshError(f'first line of physical names sections is not a number: {physical_names[0]}')
if len(physical_names) != num_elements + 1:
raise GmshError('number-of-names field does not match number of lines in physical names section')
physical_names = [pn.split(' ') for pn in physical_names[1:]]
if not all(len(pn) == 3 for pn in physical_names):
raise GmshError('malformed physical names section')
try:
physical_names = [(int(b), int(a), str(c).replace('"', '')) for a, b, c in physical_names]
except ValueError:
raise GmshError('malformed physical names section')
return physical_names
parser_map = {'Nodes': parse_nodes, 'Elements': parse_elements, 'PhysicalNames': parse_names}
for k, v in sections.items():
sections[k] = parser_map[k](v)
return sections
| [
"pymor.core.exceptions.GmshError",
"numpy.array",
"collections.defaultdict",
"pymor.core.logger.getLogger",
"time.time"
] | [((873, 912), 'pymor.core.logger.getLogger', 'getLogger', (['"""pymor.grids.gmsh.load_gmsh"""'], {}), "('pymor.grids.gmsh.load_gmsh')\n", (882, 912), False, 'from pymor.core.logger import getLogger\n'), ((965, 976), 'time.time', 'time.time', ([], {}), '()\n', (974, 976), False, 'import time\n'), ((1030, 1041), 'time.time', 'time.time', ([], {}), '()\n', (1039, 1041), False, 'import time\n'), ((1116, 1127), 'time.time', 'time.time', ([], {}), '()\n', (1125, 1127), False, 'import time\n'), ((1168, 1179), 'time.time', 'time.time', ([], {}), '()\n', (1177, 1179), False, 'import time\n'), ((1261, 1272), 'time.time', 'time.time', ([], {}), '()\n', (1270, 1272), False, 'import time\n'), ((1325, 1336), 'time.time', 'time.time', ([], {}), '()\n', (1334, 1336), False, 'import time\n'), ((6310, 6327), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6321, 6327), False, 'from collections import defaultdict\n'), ((2274, 2322), 'numpy.array', 'np.array', (["[n[1][0:2] for n in sections['Nodes']]"], {}), "([n[1][0:2] for n in sections['Nodes']])\n", (2282, 2322), True, 'import numpy as np\n'), ((2340, 2469), 'numpy.array', 'np.array', (["[[node_ids[nodes[0]], node_ids[nodes[1]], node_ids[nodes[2]]] for _, _,\n nodes in sections['Elements']['triangle']]"], {}), "([[node_ids[nodes[0]], node_ids[nodes[1]], node_ids[nodes[2]]] for \n _, _, nodes in sections['Elements']['triangle']])\n", (2348, 2469), True, 'import numpy as np\n'), ((7200, 7251), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""file ended while in section {section}"""'], {}), "(f'file ended while in section {section}')\n", (7209, 7251), False, 'from pymor.core.exceptions import GmshError\n'), ((9215, 9232), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9226, 9232), False, 'from collections import defaultdict\n'), ((4518, 4593), 'numpy.array', 'np.array', (["[node_ids[n] for l in sections['Elements']['line'] for n in l[2]]"], {}), "([node_ids[n] for l in sections['Elements']['line'] for n in l[2]])\n", (4526, 4593), True, 'import numpy as np\n'), ((5282, 5325), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""expected $MeshFormat, got {l}"""'], {}), "(f'expected $MeshFormat, got {l}')\n", (5291, 5325), False, 'from pymor.core.exceptions import GmshError\n'), ((5544, 5614), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""wrong file format version: got {header[0]}, expected 2.2"""'], {}), "(f'wrong file format version: got {header[0]}, expected 2.2')\n", (5553, 5614), False, 'from pymor.core.exceptions import GmshError\n'), ((5825, 5890), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""wrong file type: only ASCII gmsh files are supported"""'], {}), "('wrong file type: only ASCII gmsh files are supported')\n", (5834, 5890), False, 'from pymor.core.exceptions import GmshError\n'), ((6146, 6192), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""expected $EndMeshFormat, got {l}"""'], {}), "(f'expected $EndMeshFormat, got {l}')\n", (6155, 6192), False, 'from pymor.core.exceptions import GmshError\n'), ((6234, 6270), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""unexcpected end of file"""'], {}), "('unexcpected end of file')\n", (6243, 6270), False, 'from pymor.core.exceptions import GmshError\n'), ((7543, 7630), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""number-of-nodes field does not match number of lines in nodes section"""'], {}), "(\n 'number-of-nodes field does not match number of lines in nodes section')\n", (7552, 7630), False, 'from pymor.core.exceptions import GmshError\n'), ((7743, 7779), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""malformed nodes section"""'], {}), "('malformed nodes section')\n", (7752, 7779), False, 'from pymor.core.exceptions import GmshError\n'), ((8263, 8361), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""number-of-elements field does not match number of lines in elements section"""'], {}), "(\n 'number-of-elements field does not match number of lines in elements section'\n )\n", (8272, 8361), False, 'from pymor.core.exceptions import GmshError\n'), ((9673, 9774), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""number-of-names field does not match number of lines in physical names section"""'], {}), "(\n 'number-of-names field does not match number of lines in physical names section'\n )\n", (9682, 9774), False, 'from pymor.core.exceptions import GmshError\n'), ((9913, 9958), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""malformed physical names section"""'], {}), "('malformed physical names section')\n", (9922, 9958), False, 'from pymor.core.exceptions import GmshError\n'), ((5713, 5778), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""malformed header: expected integer, got {header[1]}"""'], {}), "(f'malformed header: expected integer, got {header[1]}')\n", (5722, 5778), False, 'from pymor.core.exceptions import GmshError\n'), ((5999, 6064), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""malformed header: expected integer, got {header[2]}"""'], {}), "(f'malformed header: expected integer, got {header[2]}')\n", (6008, 6064), False, 'from pymor.core.exceptions import GmshError\n'), ((6495, 6539), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""expected section name, got {l}"""'], {}), "(f'expected section name, got {l}')\n", (6504, 6539), False, 'from pymor.core.exceptions import GmshError\n'), ((6638, 6683), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""unknown section type: {section}"""'], {}), "(f'unknown section type: {section}')\n", (6647, 6683), False, 'from pymor.core.exceptions import GmshError\n'), ((6756, 6805), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""unsupported section type: {section}"""'], {}), "(f'unsupported section type: {section}')\n", (6765, 6805), False, 'from pymor.core.exceptions import GmshError\n'), ((6864, 6912), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""only one {section} section allowed"""'], {}), "(f'only one {section} section allowed')\n", (6873, 6912), False, 'from pymor.core.exceptions import GmshError\n'), ((7414, 7484), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""first line of nodes sections is not a number: {nodes[0]}"""'], {}), "(f'first line of nodes sections is not a number: {nodes[0]}')\n", (7423, 7484), False, 'from pymor.core.exceptions import GmshError\n'), ((7926, 7962), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""malformed nodes section"""'], {}), "('malformed nodes section')\n", (7935, 7962), False, 'from pymor.core.exceptions import GmshError\n'), ((8122, 8198), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""first line of elements sections is not a number: {elements[0]}"""'], {}), "(f'first line of elements sections is not a number: {elements[0]}')\n", (8131, 8198), False, 'from pymor.core.exceptions import GmshError\n'), ((8535, 8574), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""malformed elements section"""'], {}), "('malformed elements section')\n", (8544, 8574), False, 'from pymor.core.exceptions import GmshError\n'), ((8780, 8832), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""element type {fields[0]} not supported"""'], {}), "(f'element type {fields[0]} not supported')\n", (8789, 8832), False, 'from pymor.core.exceptions import GmshError\n'), ((9048, 9087), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""malformed elements section"""'], {}), "('malformed elements section')\n", (9057, 9087), False, 'from pymor.core.exceptions import GmshError\n'), ((9514, 9612), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""first line of physical names sections is not a number: {physical_names[0]}"""'], {}), "(\n f'first line of physical names sections is not a number: {physical_names[0]}'\n )\n", (9523, 9612), False, 'from pymor.core.exceptions import GmshError\n'), ((10121, 10166), 'pymor.core.exceptions.GmshError', 'GmshError', (['"""malformed physical names section"""'], {}), "('malformed physical names section')\n", (10130, 10166), False, 'from pymor.core.exceptions import GmshError\n'), ((7035, 7080), 'pymor.core.exceptions.GmshError', 'GmshError', (['f"""expected $End{section}, got {l}"""'], {}), "(f'expected $End{section}, got {l}')\n", (7044, 7080), False, 'from pymor.core.exceptions import GmshError\n')] |
import ray
import time
import numpy as np
@ray.remote
class ReplayBuffer(object):
"""Reference : DISTRIBUTED PRIORITIZED EXPERIENCE REPLAY
Algo. 1 and Algo. 2 in Page-3 of (https://arxiv.org/pdf/1803.00933.pdf
"""
def __init__(self, config=None):
self.config = config
self.batch_size = config.batch_size
self.keep_ratio = 1
self.model_index = 0
self.model_update_interval = 10
self.buffer = []
self.priorities = []
self.game_look_up = []
self._eps_collected = 0
self.base_idx = 0
self._alpha = config.priority_prob_alpha
self.transition_top = int(config.transition_num * 10 ** 6)
self.clear_time = 0
def save_pools(self, pools, gap_step):
# save a list of game histories
for (game, priorities) in pools:
# Only append end game
# if end_tag:
self.save_game(game, True, gap_step, priorities)
def save_game(self, game, end_tag, gap_steps, priorities=None):
"""Save a game history block
Parameters
----------
game: Any
a game history block
end_tag: bool
True -> the game is finished. (always True)
gap_steps: int
if the game is not finished, we only save the transitions that can be computed
priorities: list
the priorities corresponding to the transitions in the game history
"""
if self.get_total_len() >= self.config.total_transitions:
return
if end_tag:
self._eps_collected += 1
valid_len = len(game)
else:
valid_len = len(game) - gap_steps
if priorities is None:
max_prio = self.priorities.max() if self.buffer else 1
self.priorities = np.concatenate((self.priorities, [max_prio for _ in range(valid_len)] + [0. for _ in range(valid_len, len(game))]))
else:
assert len(game) == len(priorities), " priorities should be of same length as the game steps"
priorities = priorities.copy().reshape(-1)
# priorities[valid_len:len(game)] = 0.
self.priorities = np.concatenate((self.priorities, priorities))
self.buffer.append(game)
self.game_look_up += [(self.base_idx + len(self.buffer) - 1, step_pos) for step_pos in range(len(game))]
def get_game(self, idx):
# return a game
game_id, game_pos = self.game_look_up[idx]
game_id -= self.base_idx
game = self.buffer[game_id]
return game
def prepare_batch_context(self, batch_size, beta):
"""Prepare a batch context that contains:
game_lst: a list of game histories
game_pos_lst: transition index in game (relative index)
indices_lst: transition index in replay buffer
weights_lst: the weight concering the priority
make_time: the time the batch is made (for correctly updating replay buffer when data is deleted)
Parameters
----------
batch_size: int
batch size
beta: float
the parameter in PER for calculating the priority
"""
assert beta > 0
total = self.get_total_len()
probs = self.priorities ** self._alpha
probs /= probs.sum()
# sample data
indices_lst = np.random.choice(total, batch_size, p=probs, replace=False)
weights_lst = (total * probs[indices_lst]) ** (-beta)
weights_lst /= weights_lst.max()
game_lst = []
game_pos_lst = []
for idx in indices_lst:
game_id, game_pos = self.game_look_up[idx]
game_id -= self.base_idx
game = self.buffer[game_id]
game_lst.append(game)
game_pos_lst.append(game_pos)
make_time = [time.time() for _ in range(len(indices_lst))]
context = (game_lst, game_pos_lst, indices_lst, weights_lst, make_time)
return context
def update_priorities(self, batch_indices, batch_priorities, make_time):
# update the priorities for data still in replay buffer
for i in range(len(batch_indices)):
if make_time[i] > self.clear_time:
idx, prio = batch_indices[i], batch_priorities[i]
self.priorities[idx] = prio
def remove_to_fit(self):
# remove some old data if the replay buffer is full.
current_size = self.size()
total_transition = self.get_total_len()
if total_transition > self.transition_top:
index = 0
for i in range(current_size):
total_transition -= len(self.buffer[i])
if total_transition <= self.transition_top * self.keep_ratio:
index = i
break
if total_transition >= self.config.batch_size:
self._remove(index + 1)
def _remove(self, num_excess_games):
# delete game histories
excess_games_steps = sum([len(game) for game in self.buffer[:num_excess_games]])
del self.buffer[:num_excess_games]
self.priorities = self.priorities[excess_games_steps:]
del self.game_look_up[:excess_games_steps]
self.base_idx += num_excess_games
self.clear_time = time.time()
def clear_buffer(self):
del self.buffer[:]
def size(self):
# number of games
return len(self.buffer)
def episodes_collected(self):
# number of collected histories
return self._eps_collected
def get_batch_size(self):
return self.batch_size
def get_priorities(self):
return self.priorities
def get_total_len(self):
# number of transitions
return len(self.priorities)
| [
"numpy.random.choice",
"time.time",
"numpy.concatenate"
] | [((3453, 3512), 'numpy.random.choice', 'np.random.choice', (['total', 'batch_size'], {'p': 'probs', 'replace': '(False)'}), '(total, batch_size, p=probs, replace=False)\n', (3469, 3512), True, 'import numpy as np\n'), ((5391, 5402), 'time.time', 'time.time', ([], {}), '()\n', (5400, 5402), False, 'import time\n'), ((2215, 2260), 'numpy.concatenate', 'np.concatenate', (['(self.priorities, priorities)'], {}), '((self.priorities, priorities))\n', (2229, 2260), True, 'import numpy as np\n'), ((3930, 3941), 'time.time', 'time.time', ([], {}), '()\n', (3939, 3941), False, 'import time\n')] |
################################################################################
# NAM Groningen 2017 Model: DeepNL/Utrecht University/Seismology
#
# <NAME> - <EMAIL>
################################################################################
import numpy as np
import time
mod_perc = 10
class GMUtils:
dummy = ''
def __init__(self):
dummy = 'still dumb'
#############################################
#
# Create and write nodes file
#
#############################################
def writeNodes2File(self,fpath,data):
props = data['props']
xdata = data['xd']
ydata = data['yd']
zdata = data['zd']
print('writeNodes: props.shape:',props.shape)
nx = int(xdata[2])
ny = int(ydata[2])
nz = int(zdata[2])
dx = int(xdata[1])
dy = int(ydata[1])
dz = int(zdata[1])
nxp1 = int(nx + 1)
nyp1 = int(ny + 1)
nzp1 = int(nz + 1)
xmin = xdata[0]
ymin = ydata[0]
zmin = zdata[0]
#
# 8 nodes define each cell
#
np_x = xmin - 0.5*dx + dx*np.arange(nxp1).reshape((1,nxp1))
np_y = ymin - 0.5*dy + dy*np.arange(nyp1).reshape((nyp1,1))
np_z = zmin + 0.5*dz - dz*np.arange(nzp1)
np_z = np_z[::-1]
print('np_z:\n',np_z)
nodes = np.zeros((nyp1,nxp1,2))
nodes[:,:,0] = nodes[:,:,0] + np_x
nodes[:,:,1] = nodes[:,:,1] + np_y
del np_x
del np_y
nodes = nodes.reshape(-1,2)
#############################
# Write nodes
f = open('%s/nodes_coords_file' % fpath, 'w')
f.write('%d\n' %(nxp1*nyp1*nzp1))
z_stride = 0
str_nodes = []
for iiz in range(nzp1):
for ixy in range(nxp1*nyp1):
str_nodes.append('%9d %10.1f %10.1f %10.1f\n' % (iiz*nxp1*nyp1 + ixy + 1, nodes[ixy,0], nodes[ixy,1], np_z[iiz]))
if (iiz+1)%mod_perc == 0 or iiz == nzp1-1:
f.writelines(str_nodes)
del str_nodes
str_nodes = []
f.close()
del np_z
#end def writeNodes2File
#############################################
#
# Create and write Mesh files
#
#############################################
def writeMesh2Files(self,fpath, data):
#######################
# Create cells
mesh = []
mats = []
nummats = []
xminfaces = []
xmaxfaces = []
yminfaces = []
ymaxfaces = []
zminfaces = []
zmaxfaces = []
props = data['props']
xdata = data['xd']
ydata = data['yd']
zdata = data['zd']
nx = int(xdata[2])
ny = int(ydata[2])
nz = int(zdata[2])
dx = int(xdata[1])
dy = int(ydata[1])
dz = int(zdata[1])
nxp1 = int(nx + 1)
nyp1 = int(ny + 1)
nzp1 = int(nz + 1)
xmin = xdata[0]
ymin = ydata[0]
zmin = zdata[0]
vp = props[0,:,:,::-1]
vs = props[1,:,:,::-1]
rho = props[2,:,:,::-1]
Qu = props[3,:,:,::-1]
f = open('%s/mesh_file' % fpath, 'w')
f.write('%ld\n' % (nx*ny*nz))
f.close()
mf = open('%s/materials_file' % fpath, 'w')
mf.close()
nmf = open('%s/nummaterial_velocity_file' % fpath, 'w')
nmf.close()
i_e = 0
cv = np.zeros((8),dtype=np.int32)
sgn = -1
for iz in range(nz):
for iy in range(ny):
for ix in range(nx):
#
# Work out corner points
#
# Bottom face
#a
cv[0] = ix + iy*nxp1 + iz*nyp1*nxp1
#b
cv[1] = cv[0] + 1
#d
cv[3] = cv[0] + nxp1
#c
cv[2] = cv[3] + 1
# Top face
#e
cv[4] = cv[0] + nyp1*nxp1
#f
cv[5] = cv[4] + 1
#h
cv[7] = cv[4] + nxp1
#g
cv[6] = cv[7] + 1
cv += 1
i_e += 1
mi = i_e
'''
#
# Determine physical centre coordinate
#
mx = 0.5*(np_x[ix] + np_x[ix+1])
my = 0.5*(np_y[iy] + np_y[iy+1])
mz = 0.5*(np_z[iz] + np_z[iz+1])
m_tup = (i_e, cv[0], cv[1], cv[2], cv[3], cv[4], cv[5], cv[6], cv[7], mx, my, mz)
mesh.append('%d %d %d %d %d %d %d %d %d %10.1f %10.1f %10.1f\n' %m_tup)
'''
m_tup = (i_e, cv[0], cv[1], cv[2], cv[3], cv[4], cv[5], cv[6], cv[7])
mesh.append('%d %d %d %d %d %d %d %d %d\n' %m_tup)
mats.append('%d %d\n' %(i_e,i_e))
nummats.append('2 %d %d %d %d 9999 %d 0\n' % (i_e,rho[ix,iy,iz],vp[ix,iy,iz],vs[ix,iy,iz],Qu[ix,iy,iz]))
if iz == 0:
# Add bottom face
zminfaces.append((mi, cv[3], cv[2], cv[1], cv[0]))
if iz == (nz - 1):
# Add top face
zmaxfaces.append((mi, cv[4], cv[5], cv[6], cv[7]))
if ix == 0:
# Add xmin face
xminfaces.append((mi, cv[4], cv[7], cv[3], cv[0]))
if ix == (nx - 1):
# Add xmax face
xmaxfaces.append((mi, cv[6], cv[5], cv[1], cv[2]))
if iy == 0:
# Add ymin face
yminfaces.append((mi, cv[0], cv[1], cv[5], cv[4]))
if iy == (ny - 1):
ymaxfaces.append((mi, cv[3], cv[7], cv[6], cv[2]))
#end ix loop
#end iy loop
################################
# 1. Write mesh
# 2. Generate materials and associate to mesh cells
# g. Generate nummaterials
if (iz)%mod_perc == 0 or iz == nz-1:
f = open('%s/mesh_file' % fpath, 'a')
f.writelines(mesh)
del mesh
mesh = []
f.close()
mf = open('%s/materials_file' % fpath, 'a')
mf.writelines(mats)
del mats
mats = []
mf.close()
nmf = open('%s/nummaterial_velocity_file' % fpath, 'a')
nmf.writelines(nummats)
del nummats
nummats = []
nmf.close()
#end if
#end iz loop
#
# Write boundary faces
#
names = ['absorbing_surface_file_xmin',
'absorbing_surface_file_xmax',
'absorbing_surface_file_ymin',
'absorbing_surface_file_ymax',
'absorbing_surface_file_bottom',
'free_or_absorbing_surface_file_zmax']
allfaces = [xminfaces, xmaxfaces, yminfaces, ymaxfaces, zminfaces, zmaxfaces]
for name, faces in zip(names, allfaces):
f = open('%s/%s' % (fpath, name), 'w')
f.write('%d\n' % len(faces))
for face in faces:
#f.write(' '.join(map(lambda x: str(x + 1), face)))
f.write(' '.join(map(lambda x: str(x), face)))
f.write('\n')
f.close()
#end def writeMesh2Files
###################################################################
#
# Create and write Specfem3D Node, Mesh, an other related files
#
###################################################################
def writeSpecfem3DMesh(self, fpath, data):
t_start = time.time()
self.writeNodes2File(fpath,data)
self.writeMesh2Files(fpath,data)
t_total = (time.time() - t_start)/3600
print('runtime (h):', t_total)
return t_total
#end def writeSpecfem3DMesh
| [
"numpy.zeros",
"time.time",
"numpy.arange"
] | [((1374, 1399), 'numpy.zeros', 'np.zeros', (['(nyp1, nxp1, 2)'], {}), '((nyp1, nxp1, 2))\n', (1382, 1399), True, 'import numpy as np\n'), ((3495, 3522), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'np.int32'}), '(8, dtype=np.int32)\n', (3503, 3522), True, 'import numpy as np\n'), ((8165, 8176), 'time.time', 'time.time', ([], {}), '()\n', (8174, 8176), False, 'import time\n'), ((1283, 1298), 'numpy.arange', 'np.arange', (['nzp1'], {}), '(nzp1)\n', (1292, 1298), True, 'import numpy as np\n'), ((8281, 8292), 'time.time', 'time.time', ([], {}), '()\n', (8290, 8292), False, 'import time\n'), ((1147, 1162), 'numpy.arange', 'np.arange', (['nxp1'], {}), '(nxp1)\n', (1156, 1162), True, 'import numpy as np\n'), ((1215, 1230), 'numpy.arange', 'np.arange', (['nyp1'], {}), '(nyp1)\n', (1224, 1230), True, 'import numpy as np\n')] |
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.ticker as mtick
from scipy.optimize import minimize_scalar
import filter
kBarWidth = 0.2
def fitLine(row, formantName, start, end, outputDir):
key = '@'.join([row['Filename'], row['Annotation'], formantName])
x = np.arange(2, 11)
y = row[formantName + '_' +
str(start): formantName + '_' + str(end)].to_numpy(dtype='float')
coeff = np.polyfit(x, y, 4)
line1 = np.poly1d(coeff)
line1d = np.polyder(line1, 1)
line1dd = np.polyder(line1, 2)
line1dd_max = minimize_scalar(-line1dd, bounds=(2, 10), method='bounded')
inflection = line1dd_max.x
plt.plot(x, y, 'o')
plt.plot(x, line1(x), label='fitted line')
plt.plot(x, line1d(x), label='1st deriv')
plt.plot(x, line1dd(x), label='2nd deriv')
plt.axvline(x=inflection, linestyle='dashed', label='inflection')
plt.legend(loc='best')
plt.title(key)
# plt.show()
plt.savefig(outputDir / (key + '.png'))
plt.clf()
plt.cla()
# return pd.Series(coeff, index=['x4', 'x3', 'x2', 'x1', 'x0'])
return pd.Series(inflection, index=['Inflection_'+formantName])
def removeChars(s):
for c in [' ', '\\', '/', '^']:
s = s.replace(c, '')
return s
class Analyzer(object):
def RunAnalysis(self, df, group_name, output_base_dir):
raise NotImplementedError
def GetName(self):
raise NotImplementedError
class FormantQuantiles(Analyzer):
def GetName(self):
return "FormantQuantiles"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, group_name, output_dir):
# output = df[['Filename']].copy()
# output['Annotation'] = df[['Annotation']]
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF1_50p'] = df[['barkF1_6']]
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['barkF2_50p'] = df[['barkF2_6']]
df['diff_F1F1_25p'] = df['barkF1_25p'] - df['barkF2_25p']
df['diff_F1F1_50p'] = df['barkF1_50p'] - df['barkF2_50p']
df['diff_F1F1_75p'] = df['barkF1_75p'] - df['barkF2_75p']
output_debug = pd.concat(
[df[['Filename']],
df[['Annotation']],
df.loc[:, df.columns.str.startswith("barkF1")],
df.loc[:, df.columns.str.startswith("barkF2")],
df.loc[:, df.columns.str.startswith("diff")],
], axis=1)
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (group_name + '.csv')
output_debug_path = output_dir / (group_name + '.debug.csv')
output_debug.to_csv(output_debug_path, index=False)
output.to_csv(output_path, index=False)
class FormantQuantilesByDemographic(Analyzer):
def GetName(self):
return "FormantQuantilesByDemographic"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, outer_filters, inner_filters, group_name, output_dir):
for outer_f in outer_filters:
key = outer_f.GetValue()
matched_rows = dict()
for _, row in df.iterrows():
if not outer_f.IsMatched(row):
continue
for inner_f in inner_filters:
if inner_f.IsMatched(row):
matched_rows.setdefault(
inner_f.GetValue(), []).append(row)
if len(matched_rows) == 0:
continue
x = np.arange(3)
for k, v in matched_rows.items():
matched_df = pd.DataFrame(v)
full_group_name = group_name + '@' + outer_f.GetValue() + '@@' + k
df_mean = self.ComputeMean(
matched_df, full_group_name, output_dir)
y = [df_mean['diff_F1F2_25p'][0],
df_mean['diff_F1F2_50p'][0],
df_mean['diff_F1F2_75p'][0]]
plt.bar(x, y, width=kBarWidth, label=k)
x = [xval + kBarWidth for xval in x]
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.xticks([r + kBarWidth for r in range(3)],
('25%', '50%', '75%'))
plt.title(key)
plt.savefig(output_dir / (group_name + '@' +
key + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
def ComputeMean(self, df, full_group_name, output_dir):
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF1_50p'] = df[['barkF1_6']]
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['barkF2_50p'] = df[['barkF2_6']]
df['diff_F1F2_25p'] = df['barkF1_25p'] - df['barkF2_25p']
df['diff_F1F2_50p'] = df['barkF1_50p'] - df['barkF2_50p']
df['diff_F1F2_75p'] = df['barkF1_75p'] - df['barkF2_75p']
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (full_group_name + '.csv')
output_debug_path = output_dir / (full_group_name + '.debug.csv')
output.to_csv(output_path, index=False)
df.to_csv(output_debug_path, index=False)
return output
class FormantRegression(Analyzer):
def GetName(self):
return "FormantRegression"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, group_name, output_dir):
s_f1 = df.loc[:, df.columns.str.startswith("barkF1")].mean()
s_f2 = df.loc[:, df.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
# line1d = np.polyder(line1, 1)
# line2d = np.polyder(line2, 1)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
df_inflex = pd.DataFrame(
data={'f1_inflection': [inflection1], 'f2_inflection': [inflection2]})
df_inflex.to_csv(output_dir / (group_name + '.csv'), index=False)
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# plt.plot(x, line1d(x), label='F1 1st deriv')
# plt.plot(x, line2d(x), label='F2 1st deriv')
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 inflection')
plt.axvline(x=inflection2, linestyle='-.', label='F2 inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.inflection.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrRegression(Analyzer):
def GetName(self):
return "HnrRegression"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
for i in range(1, 10):
df['mid_'+str(i)] = df[['HNR_'+str(i),
'HNR_'+str(i+1)]].mean(axis=1)
sy = df.loc[:, df.columns.str.startswith('mid_')].mean()
y = sy['mid_1': 'mid_9'].to_numpy(dtype='float')
x = np.arange(0, 9)
coeff = np.polyfit(x, y, 4)
line1 = np.poly1d(coeff)
line1dd = np.polyder(line1, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
inflection = line1dd_max.x
df_inflex = pd.DataFrame(data={'inflection': [inflection]})
df_inflex.to_csv(output_dir / (group_name + '.csv'), index=False)
plt.plot(x, y, 'o')
plt.plot(x, line1(x), label='fitted')
plt.plot(x, line1dd(x), label='2nd deriv')
plt.axvline(x=inflection, linestyle=':', label='inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrQuantilesMean(Analyzer):
def GetName(self):
return "HnrQuantilesMean"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
df['HNR_p25'] = df[['HNR_2', 'HNR_3']].mean(axis=1)
df['HNR_p75'] = df[['HNR_7', 'HNR_8']].mean(axis=1)
df['HNR_p50'] = df[['HNR_5']]
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("HNR_p")].mean()).T
output_path = output_dir / (group_name + '.csv')
output.to_csv(output_path, index=False)
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrTTest(Analyzer):
def GetName(self):
return "HnrTTest"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
df['HNR_25p'] = df[['HNR_2', 'HNR_3']].mean(axis=1)
df['HNR_75p'] = df[['HNR_7', 'HNR_8']].mean(axis=1)
df['HNR_50p'] = df[['HNR_5']]
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (group_name + '.csv')
output.to_csv(output_path, index=False)
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
def ComputeF1F2Diff(df):
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['diff_F1_7525'] = df['barkF1_75p'] - df['barkF1_25p']
df['diff_F2_7525'] = df['barkF2_75p'] - df['barkF2_25p']
return df
class FormantQuantilesF1F2Base(Analyzer):
def __init__(self, filter_map):
self.filter_map = filter_map
def RunAnalysis(self, df, group_name, output_dir):
matched_rows_map = {}
for key, _ in self.filter_map.items():
matched_rows_map[key] = []
for _, row in df.iterrows():
for key, filters in self.filter_map.items():
is_all_matched = [f.IsMatched(row) for f in filters]
if np.all(is_all_matched):
matched_rows_map[key].append(row)
matched_df = {}
for key, rows in matched_rows_map.items():
matched_df[key] = pd.DataFrame(rows)
x = np.arange(2)
for key, mdf in matched_df.items():
mdf = ComputeF1F2Diff(mdf)
df_mean = pd.DataFrame(
mdf.loc[:, mdf.columns.str.startswith("diff")].mean()).T
mdf.to_csv(output_dir / (group_name + '@@@' +
key + '.debug.csv'), index=False)
df_mean.to_csv(output_dir / (group_name + '@@@' +
key+'Mean.debug.csv'), index=False)
y = [df_mean['diff_F1_7525'][0], df_mean['diff_F2_7525'][0]]
plt.bar(x, y, width=kBarWidth, label=key)
x = [xval + kBarWidth for xval in x]
plt.xticks([r + kBarWidth for r in range(2)], ('delta_F1', 'delta_F2'))
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
class FormantQuantilesF1F2SaSb(FormantQuantilesF1F2Base):
def __init__(self):
super().__init__({
'Sa': [filter.IsShanghainese(), filter.IsPosition('a')],
'Sb': [filter.IsShanghainese(), filter.IsPosition('b')],
})
class FormantQuantilesF1F2SbMb(FormantQuantilesF1F2Base):
def __init__(self):
super().__init__({
'Sb': [filter.IsShanghainese(), filter.IsPosition('b')],
'Mb': [filter.IsMandarin(), filter.IsPosition('b')],
})
class FormantQuantilesSbMbBase(Analyzer):
def __init__(self, formant):
self.formant = formant
def RunAnalysis(self, df, group_name, output_dir):
rows_sb = []
rows_mb = []
for _, row in df.iterrows():
if filter.IsShanghainese().IsMatched(row) and filter.IsPosition('b').IsMatched(row):
rows_sb.append(row)
continue
if filter.IsMandarin().IsMatched(row) and filter.IsPosition('b').IsMatched(row):
rows_mb.append(row)
continue
df_sb = pd.DataFrame(rows_sb)
df_sb = ComputeF1F2Diff(df_sb)
df_sb_avg = pd.DataFrame(
df_sb.loc[:, df_sb.columns.str.startswith("diff")].mean()).T
df_sb.to_csv(output_dir / (group_name +
'@@@Sb.debug.csv'), index=False)
df_sb_avg.to_csv(output_dir / (group_name +
'@@@SbMean.debug.csv'), index=False)
df_mb = pd.DataFrame(rows_mb)
df_mb = ComputeF1F2Diff(df_mb)
df_mb_avg = pd.DataFrame(
df_mb.loc[:, df_mb.columns.str.startswith("diff")].mean()).T
df_mb.to_csv(output_dir / (group_name +
'@@@Mb.debug.csv'), index=False)
df_mb_avg.to_csv(output_dir / (group_name +
'@@@MbMean.debug.csv'), index=False)
x = ['Sb', 'Mb']
y = [df_sb_avg['diff_' + self.formant + '_7525'][0],
df_mb_avg['diff_'+self.formant+'_7525'][0]]
plt.bar(x, y, width=kBarWidth)
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
class FormantQuantilesF1SbMb(FormantQuantilesSbMbBase):
def __init__(self):
super().__init__('F1')
class FormantQuantilesF2SbMb(FormantQuantilesSbMbBase):
def __init__(self):
super().__init__('F2')
class FormantRegressionBase(Analyzer):
def __init__(self, filters):
self.filters = filters
def RunAnalysis(self, df, group_name, output_dir):
matched_rows = []
for _, row in df.iterrows():
is_all_matched = [f.IsMatched(row) for f in self.filters]
if np.all(is_all_matched):
matched_rows.append(row)
df = pd.DataFrame(matched_rows)
filter_name = '_'.join([f.GetValue() for f in self.filters])
full_group_name = group_name + '@@' + filter_name
s_f1 = df.loc[:, df.columns.str.startswith("barkF1")].mean()
s_f2 = df.loc[:, df.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
# line1ddd = np.polyder(line1, 3)
# line2ddd = np.polyder(line2, 3)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
# line1ddd_max_left = minimize_scalar(-line1ddd,
# bounds=(0, inflection1), method='bounded')
# line1ddd_max_right = minimize_scalar(-line1ddd,
# bounds=(inflection1, 8), method='bounded')
# line2ddd_max_left = minimize_scalar(-line2ddd,
# bounds=(0, inflection2), method='bounded')
# line2ddd_max_right = minimize_scalar(-line2ddd,
# bounds=(inflection2, 8), method='bounded')
# inflection1d_left = line1ddd_max_left.x
# inflection1d_right = line1ddd_max_right.x
# inflection2d_left = line2ddd_max_left.x
# inflection2d_right = line2ddd_max_right.x
df_inflex = pd.DataFrame(
data={'f1_inflection': [inflection1], 'f2_inflection': [inflection2]})
df_inflex.to_csv(output_dir / (full_group_name + '.csv'), index=False)
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 inflection')
plt.axvline(x=inflection2, linestyle='-.', label='F2 inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.inflection.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# Plot 3rd deriv and inflection
# plt.plot(x, line1ddd(x), label='F1 3rd deriv')
# plt.plot(x, line2ddd(x), label='F2 3rd deriv')
# plt.axvline(x=inflection1d_left, linestyle=':', label='F1 inf L')
# plt.axvline(x=inflection1d_right, linestyle=':', label='F1 inf R')
# plt.axvline(x=inflection2d_left, linestyle='-.', label='F2 inf L')
# plt.axvline(x=inflection2d_right, linestyle='-.', label='F2 inf R')
# plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
# plt.title(full_group_name)
# plt.savefig(output_dir / (full_group_name + '.inflection3rd.png'),
# bbox_inches="tight")
# plt.clf()
# plt.cla()
# output_debug_path = output_dir / (full_group_name + '.debug.csv')
# df.to_csv(output_debug_path, index=False)
class FormantRegressionSa(FormantRegressionBase):
def __init__(self):
super().__init__([filter.IsShanghainese(), filter.IsPosition('a')])
class FormantRegressionSb(FormantRegressionBase):
def __init__(self):
super().__init__([filter.IsShanghainese(), filter.IsPosition('b')])
class FormantRegressionMb(FormantRegressionBase):
def __init__(self):
super().__init__([filter.IsMandarin(), filter.IsPosition('b')])
class FormantInflectionBase(Analyzer):
def __init__(self, filter_map):
self.filter_map = filter_map
def RunAnalysis(self, df, group_name, output_dir):
matched_rows_map = {}
for key, _ in self.filter_map.items():
matched_rows_map[key] = []
for _, row in df.iterrows():
for key, filters in self.filter_map.items():
is_all_matched = [f.IsMatched(row) for f in filters]
if np.all(is_all_matched):
matched_rows_map[key].append(row)
matched_df = {}
for key, rows in matched_rows_map.items():
matched_df[key] = pd.DataFrame(rows)
x_all = []
f1_front = []
f1_back = []
f2_front = []
f2_back = []
for key, mdf in matched_df.items():
s_f1 = mdf.loc[:, mdf.columns.str.startswith("barkF1")].mean()
s_f2 = mdf.loc[:, mdf.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
x_all.append(key)
f1_front.append(inflection1 / 8.0)
f1_back.append(1 - inflection1 / 8.0)
f2_front.append(inflection2 / 8.0)
f2_back.append(1 - inflection2 / 8.0)
full_group_name = group_name + '@@' + '_'.join(matched_df.keys())
plt.bar(x_all, f1_front, width=kBarWidth, label='Front')
plt.bar(x_all, f1_back, bottom=np.array(
f1_front), width=kBarWidth, label='Back')
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1))
plt.title(full_group_name+'@@@F1')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.savefig(output_dir / (full_group_name + '.f1.png'), bbox_inches="tight")
plt.clf()
plt.cla()
plt.bar(x_all, f2_front, width=kBarWidth, label='Front')
plt.bar(x_all, f2_back, bottom=np.array(
f2_front), width=kBarWidth, label='Back')
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1))
plt.title(full_group_name+'@@@F2')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.savefig(output_dir / (full_group_name + '.f2.png'), bbox_inches="tight")
plt.clf()
plt.cla()
class FormantInflectionMb(FormantInflectionBase):
def __init__(self):
super().__init__({'Mb': [filter.IsMandarin(), filter.IsPosition('b')]})
class FormantInflectionSbMb(FormantInflectionBase):
def __init__(self):
super().__init__({
'Sb': [filter.IsShanghainese(), filter.IsPosition('b')],
'Mb': [filter.IsMandarin(), filter.IsPosition('b')]
})
class FormantInflectionSaSb(FormantInflectionBase):
def __init__(self):
super().__init__({
'Sa': [filter.IsShanghainese(), filter.IsPosition('a')],
'Sb': [filter.IsShanghainese(), filter.IsPosition('b')],
})
class GetAge:
def GetSlice(self, row):
comps = row['Filename'].split('_')
assert len(comps) == 5 or len(comps) == 6
age_gender = int(comps[2])
if 1 <= age_gender <= 20:
return 'Senior'
if 21 <= age_gender <= 40:
return 'Adult'
if 41 <= age_gender <= 60:
return 'Youth'
if 61 <= age_gender <= 80:
return 'Child'
raise NotImplementedError
def GetOrder(self):
return ['Senior', 'Adult', 'Youth', 'Child']
class GetGender:
def GetSlice(self, row):
comps = row['Filename'].split('_')
assert len(comps) == 5 or len(comps) == 6
age_gender = int(comps[2])
if age_gender % 2 == 0:
return 'Female'
else:
return 'Male'
def GetOrder(self):
return ['Female', 'Male']
class FormantQuantilesSlicedBase(Analyzer):
def __init__(self, formant, word, word_filters, slicer):
self.formant = formant
self.word = word
self.word_filters = word_filters
self.slicer = slicer()
def RunAnalysis(self, df, group_name, output_dir):
matched_rows_map = {}
for _, row in df.iterrows():
is_all_matched = [f.IsMatched(row) for f in self.word_filters]
if np.all(is_all_matched):
matched_rows_map.setdefault(self.slicer.GetSlice(row), []).append(row)
x = []
y = []
full_group_name = group_name + '@@' + self.formant+'_'+self.word
for key in self.slicer.GetOrder():
if key not in matched_rows_map:
x.append(key)
y.append(0)
continue
matched_rows = matched_rows_map[key]
mdf = pd.DataFrame(matched_rows)
mdf = ComputeF1F2Diff(mdf)
df_mean = pd.DataFrame(
mdf.loc[:, mdf.columns.str.startswith("diff")].mean()).T
mdf.to_csv(output_dir / (full_group_name + '@@@' +
key + '.debug.csv'), index=False)
df_mean.to_csv(output_dir / (full_group_name + '@@@' +
key+'Mean.debug.csv'), index=False)
x.append(key)
y.append(df_mean['diff_'+self.formant+'_7525'][0])
plt.bar(x, y)
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
class FormantQuantilesF1SaAge(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F1', 'Sa', [filter.IsShanghainese(), filter.IsPosition('a')],
GetAge)
class FormantQuantilesF1SbAge(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F1', 'Sb', [filter.IsShanghainese(), filter.IsPosition('b')],
GetAge)
class FormantQuantilesF2SaAge(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F2', 'Sa', [filter.IsShanghainese(), filter.IsPosition('a')],
GetAge)
class FormantQuantilesF2SbAge(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F2', 'Sb', [filter.IsShanghainese(), filter.IsPosition('b')],
GetAge)
class FormantQuantilesF1MbAge(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F1', 'Mb', [filter.IsMandarin(), filter.IsPosition('b')],
GetAge)
class FormantQuantilesF2MbAge(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F2', 'Mb', [filter.IsMandarin(), filter.IsPosition('b')],
GetAge)
class FormantQuantilesF1SaGender(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F1', 'Sa', [filter.IsShanghainese(), filter.IsPosition('a')],
GetGender)
class FormantQuantilesF1SbGender(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F1', 'Sb', [filter.IsShanghainese(), filter.IsPosition('b')],
GetGender)
class FormantQuantilesF2SaGender(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F2', 'Sa', [filter.IsShanghainese(), filter.IsPosition('a')],
GetGender)
class FormantQuantilesF2SbGender(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F2', 'Sb', [filter.IsShanghainese(), filter.IsPosition('b')],
GetGender)
class FormantQuantilesF1MbGender(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F1', 'Mb', [filter.IsMandarin(), filter.IsPosition('b')],
GetGender)
class FormantQuantilesF2MbGender(FormantQuantilesSlicedBase):
def __init__(self):
super().__init__('F2', 'Mb', [filter.IsMandarin(), filter.IsPosition('b')],
GetGender)
class FormantRegressionSlicedBase(Analyzer):
def __init__(self, word, word_filters, slicer):
self.word = word
self.word_filters = word_filters
self.slicer = slicer()
def RunAnalysis(self, df, group_name, output_dir):
matched_rows_map = {}
for _, row in df.iterrows():
is_all_matched = [f.IsMatched(row) for f in self.word_filters]
if np.all(is_all_matched):
matched_rows_map.setdefault(self.slicer.GetSlice(row), []).append(row)
full_group_name = group_name + '@@' + self.word
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
z_label = self.slicer.GetOrder()
cmap = plt.get_cmap('viridis')
colors = cmap(np.linspace(0, 1, len(z_label)))
for key in self.slicer.GetOrder():
x = np.arange(0, 9)
color = colors[z_label.index(key)]
z = z_label.index(key)
if key not in matched_rows_map:
dummy_y = np.zeros(9)
ax.plot(x, dummy_y, zs=z, zdir='x', c=color)
continue
matched_rows = matched_rows_map[key]
mdf = pd.DataFrame(matched_rows)
s_f1 = mdf.loc[:, mdf.columns.str.startswith("barkF1")].mean()
s_f2 = mdf.loc[:, mdf.columns.str.startswith("barkF2")].mean()
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
inflection1y = line1(inflection1)
inflection2y = line2(inflection2)
ax.plot(x, y1, zs=z, zdir='x', c=color, label='F1', linewidth=3.0)
ax.plot(x, y2, zs=z, zdir='x', c=color, label='F2')
ax.plot([inflection1, inflection1], [inflection1y-1, inflection1y+1], zs=z, zdir='x', c='black')
ax.plot([inflection2, inflection2], [inflection2y-1, inflection2y+1], zs=z, zdir='x', c='black')
ax.set(xticks=range(len(z_label)), xticklabels=z_label)
plt.title(full_group_name)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.savefig(output_dir / (full_group_name + '.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
class FormantRegressionSaAge(FormantRegressionSlicedBase):
def __init__(self):
super().__init__('Sa', [filter.IsShanghainese(), filter.IsPosition('a')], GetAge)
class FormantRegressionSbAge(FormantRegressionSlicedBase):
def __init__(self):
super().__init__('Sb', [filter.IsShanghainese(), filter.IsPosition('b')], GetAge)
class FormantRegressionMbAge(FormantRegressionSlicedBase):
def __init__(self):
super().__init__('Mb', [filter.IsMandarin(), filter.IsPosition('b')], GetAge)
class FormantRegressionSaGender(FormantRegressionSlicedBase):
def __init__(self):
super().__init__('Sa', [filter.IsShanghainese(), filter.IsPosition('a')], GetGender)
class FormantRegressionSbGender(FormantRegressionSlicedBase):
def __init__(self):
super().__init__('Sb', [filter.IsShanghainese(), filter.IsPosition('b')], GetGender)
class FormantRegressionMbGender(FormantRegressionSlicedBase):
def __init__(self):
super().__init__('Mb', [filter.IsMandarin(), filter.IsPosition('b')], GetGender)
class FormantInflectionSlicedBase(Analyzer):
def __init__(self, formant, word, word_filters, slicer):
self.formant = formant
self.word = word
self.word_filters = word_filters
self.slicer = slicer()
def RunAnalysis(self, df, group_name, output_dir):
matched_rows_map = {}
for _, row in df.iterrows():
is_all_matched = [f.IsMatched(row) for f in self.word_filters]
if np.all(is_all_matched):
matched_rows_map.setdefault(self.slicer.GetSlice(row), []).append(row)
x_all = []
y_front = []
y_back = []
full_group_name = group_name + '@@' + self.formant+'_'+self.word
for key in self.slicer.GetOrder():
x_all.append(key)
if key not in matched_rows_map:
y_front.append(0)
y_back.append(0)
continue
matched_rows = matched_rows_map[key]
mdf = pd.DataFrame(matched_rows)
formant_prefix = 'bark' + self.formant
f = mdf.loc[:, mdf.columns.str.startswith(formant_prefix)].mean()
x = np.arange(0, 9)
y = f[formant_prefix + '_2': formant_prefix + '_10'].to_numpy(dtype='float')
coeff = np.polyfit(x, y, 4)
line = np.poly1d(coeff)
linedd = np.polyder(line, 2)
linedd_max = minimize_scalar(-linedd,
bounds=(0, 8), method='bounded')
inflection = linedd_max.x
y_front.append(inflection / 8.0)
y_back.append(1 - inflection / 8.0)
full_group_name = group_name + '@@' + self.formant + '_' + self.word
plt.bar(x_all, y_front, width=kBarWidth, label='Front')
plt.bar(x_all, y_back, bottom=np.array(y_front), width=kBarWidth, label='Back')
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1))
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
class FormantInflectionF1SaAge(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F1', 'Sa', [filter.IsShanghainese(), filter.IsPosition('a')],
GetAge)
class FormantInflectionF1SbAge(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F1', 'Sb', [filter.IsShanghainese(), filter.IsPosition('b')],
GetAge)
class FormantInflectionF2SaAge(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F2', 'Sa', [filter.IsShanghainese(), filter.IsPosition('a')],
GetAge)
class FormantInflectionF2SbAge(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F2', 'Sb', [filter.IsShanghainese(), filter.IsPosition('b')],
GetAge)
class FormantInflectionF1MbAge(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F1', 'Mb', [filter.IsMandarin(), filter.IsPosition('b')],
GetAge)
class FormantInflectionF2MbAge(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F2', 'Mb', [filter.IsMandarin(), filter.IsPosition('b')],
GetAge)
class FormantInflectionF1SaGender(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F1', 'Sa', [filter.IsShanghainese(), filter.IsPosition('a')],
GetGender)
class FormantInflectionF1SbGender(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F1', 'Sb', [filter.IsShanghainese(), filter.IsPosition('b')],
GetGender)
class FormantInflectionF2SaGender(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F2', 'Sa', [filter.IsShanghainese(), filter.IsPosition('a')],
GetGender)
class FormantInflectionF2SbGender(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F2', 'Sb', [filter.IsShanghainese(), filter.IsPosition('b')],
GetGender)
class FormantInflectionF1MbGender(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F1', 'Mb', [filter.IsMandarin(), filter.IsPosition('b')],
GetGender)
class FormantInflectionF2MbGender(FormantInflectionSlicedBase):
def __init__(self):
super().__init__('F2', 'Mb', [filter.IsMandarin(), filter.IsPosition('b')],
GetGender) | [
"numpy.polyfit",
"matplotlib.ticker.PercentFormatter",
"numpy.array",
"numpy.poly1d",
"numpy.arange",
"matplotlib.pyplot.plot",
"filter.IsShanghainese",
"scipy.optimize.minimize_scalar",
"pandas.DataFrame",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"numpy... | [((360, 376), 'numpy.arange', 'np.arange', (['(2)', '(11)'], {}), '(2, 11)\n', (369, 376), True, 'import numpy as np\n'), ((499, 518), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(4)'], {}), '(x, y, 4)\n', (509, 518), True, 'import numpy as np\n'), ((531, 547), 'numpy.poly1d', 'np.poly1d', (['coeff'], {}), '(coeff)\n', (540, 547), True, 'import numpy as np\n'), ((561, 581), 'numpy.polyder', 'np.polyder', (['line1', '(1)'], {}), '(line1, 1)\n', (571, 581), True, 'import numpy as np\n'), ((596, 616), 'numpy.polyder', 'np.polyder', (['line1', '(2)'], {}), '(line1, 2)\n', (606, 616), True, 'import numpy as np\n'), ((635, 694), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line1dd)'], {'bounds': '(2, 10)', 'method': '"""bounded"""'}), "(-line1dd, bounds=(2, 10), method='bounded')\n", (650, 694), False, 'from scipy.optimize import minimize_scalar\n'), ((731, 750), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (739, 750), True, 'from matplotlib import pyplot as plt\n'), ((895, 960), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'inflection', 'linestyle': '"""dashed"""', 'label': '"""inflection"""'}), "(x=inflection, linestyle='dashed', label='inflection')\n", (906, 960), True, 'from matplotlib import pyplot as plt\n'), ((965, 987), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (975, 987), True, 'from matplotlib import pyplot as plt\n'), ((992, 1006), 'matplotlib.pyplot.title', 'plt.title', (['key'], {}), '(key)\n', (1001, 1006), True, 'from matplotlib import pyplot as plt\n'), ((1028, 1067), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outputDir / (key + '.png'))"], {}), "(outputDir / (key + '.png'))\n", (1039, 1067), True, 'from matplotlib import pyplot as plt\n'), ((1072, 1081), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1079, 1081), True, 'from matplotlib import pyplot as plt\n'), ((1086, 1095), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1093, 1095), True, 'from matplotlib import pyplot as plt\n'), ((1175, 1233), 'pandas.Series', 'pd.Series', (['inflection'], {'index': "['Inflection_' + formantName]"}), "(inflection, index=['Inflection_' + formantName])\n", (1184, 1233), True, 'import pandas as pd\n'), ((6037, 6052), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (6046, 6052), True, 'import numpy as np\n'), ((6204, 6224), 'numpy.polyfit', 'np.polyfit', (['x', 'y1', '(4)'], {}), '(x, y1, 4)\n', (6214, 6224), True, 'import numpy as np\n'), ((6242, 6262), 'numpy.polyfit', 'np.polyfit', (['x', 'y2', '(4)'], {}), '(x, y2, 4)\n', (6252, 6262), True, 'import numpy as np\n'), ((6279, 6296), 'numpy.poly1d', 'np.poly1d', (['coeff1'], {}), '(coeff1)\n', (6288, 6296), True, 'import numpy as np\n'), ((6313, 6330), 'numpy.poly1d', 'np.poly1d', (['coeff2'], {}), '(coeff2)\n', (6322, 6330), True, 'import numpy as np\n'), ((6429, 6449), 'numpy.polyder', 'np.polyder', (['line1', '(2)'], {}), '(line1, 2)\n', (6439, 6449), True, 'import numpy as np\n'), ((6468, 6488), 'numpy.polyder', 'np.polyder', (['line2', '(2)'], {}), '(line2, 2)\n', (6478, 6488), True, 'import numpy as np\n'), ((6511, 6569), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line1dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line1dd, bounds=(0, 8), method='bounded')\n", (6526, 6569), False, 'from scipy.optimize import minimize_scalar\n'), ((6630, 6688), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line2dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line2dd, bounds=(0, 8), method='bounded')\n", (6645, 6688), False, 'from scipy.optimize import minimize_scalar\n'), ((6819, 6907), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'f1_inflection': [inflection1], 'f2_inflection': [inflection2]}"}), "(data={'f1_inflection': [inflection1], 'f2_inflection': [\n inflection2]})\n", (6831, 6907), True, 'import pandas as pd\n'), ((7020, 7040), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1', '"""o"""'], {}), "(x, y1, 'o')\n", (7028, 7040), True, 'from matplotlib import pyplot as plt\n'), ((7049, 7069), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2', '"""x"""'], {}), "(x, y2, 'x')\n", (7057, 7069), True, 'from matplotlib import pyplot as plt\n'), ((7176, 7230), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (7186, 7230), True, 'from matplotlib import pyplot as plt\n'), ((7239, 7260), 'matplotlib.pyplot.title', 'plt.title', (['group_name'], {}), '(group_name)\n', (7248, 7260), True, 'from matplotlib import pyplot as plt\n'), ((7269, 7344), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (group_name + '.fitted.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (group_name + '.fitted.png'), bbox_inches='tight')\n", (7280, 7344), True, 'from matplotlib import pyplot as plt\n'), ((7373, 7382), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7380, 7382), True, 'from matplotlib import pyplot as plt\n'), ((7391, 7400), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7398, 7400), True, 'from matplotlib import pyplot as plt\n'), ((7663, 7727), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'inflection1', 'linestyle': '""":"""', 'label': '"""F1 inflection"""'}), "(x=inflection1, linestyle=':', label='F1 inflection')\n", (7674, 7727), True, 'from matplotlib import pyplot as plt\n'), ((7736, 7801), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'inflection2', 'linestyle': '"""-."""', 'label': '"""F2 inflection"""'}), "(x=inflection2, linestyle='-.', label='F2 inflection')\n", (7747, 7801), True, 'from matplotlib import pyplot as plt\n'), ((7810, 7864), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (7820, 7864), True, 'from matplotlib import pyplot as plt\n'), ((7873, 7894), 'matplotlib.pyplot.title', 'plt.title', (['group_name'], {}), '(group_name)\n', (7882, 7894), True, 'from matplotlib import pyplot as plt\n'), ((7903, 7982), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (group_name + '.inflection.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (group_name + '.inflection.png'), bbox_inches='tight')\n", (7914, 7982), True, 'from matplotlib import pyplot as plt\n'), ((8011, 8020), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8018, 8020), True, 'from matplotlib import pyplot as plt\n'), ((8029, 8038), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (8036, 8038), True, 'from matplotlib import pyplot as plt\n'), ((8634, 8649), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (8643, 8649), True, 'import numpy as np\n'), ((8666, 8685), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(4)'], {}), '(x, y, 4)\n', (8676, 8685), True, 'import numpy as np\n'), ((8702, 8718), 'numpy.poly1d', 'np.poly1d', (['coeff'], {}), '(coeff)\n', (8711, 8718), True, 'import numpy as np\n'), ((8737, 8757), 'numpy.polyder', 'np.polyder', (['line1', '(2)'], {}), '(line1, 2)\n', (8747, 8757), True, 'import numpy as np\n'), ((8780, 8838), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line1dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line1dd, bounds=(0, 8), method='bounded')\n", (8795, 8838), False, 'from scipy.optimize import minimize_scalar\n'), ((8932, 8979), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'inflection': [inflection]}"}), "(data={'inflection': [inflection]})\n", (8944, 8979), True, 'import pandas as pd\n'), ((9063, 9082), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (9071, 9082), True, 'from matplotlib import pyplot as plt\n'), ((9188, 9248), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'inflection', 'linestyle': '""":"""', 'label': '"""inflection"""'}), "(x=inflection, linestyle=':', label='inflection')\n", (9199, 9248), True, 'from matplotlib import pyplot as plt\n'), ((9257, 9311), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (9267, 9311), True, 'from matplotlib import pyplot as plt\n'), ((9320, 9341), 'matplotlib.pyplot.title', 'plt.title', (['group_name'], {}), '(group_name)\n', (9329, 9341), True, 'from matplotlib import pyplot as plt\n'), ((9350, 9418), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (group_name + '.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (group_name + '.png'), bbox_inches='tight')\n", (9361, 9418), True, 'from matplotlib import pyplot as plt\n'), ((9427, 9436), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9434, 9436), True, 'from matplotlib import pyplot as plt\n'), ((9445, 9454), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (9452, 9454), True, 'from matplotlib import pyplot as plt\n'), ((12032, 12044), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (12041, 12044), True, 'import numpy as np\n'), ((12770, 12824), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (12780, 12824), True, 'from matplotlib import pyplot as plt\n'), ((12833, 12854), 'matplotlib.pyplot.title', 'plt.title', (['group_name'], {}), '(group_name)\n', (12842, 12854), True, 'from matplotlib import pyplot as plt\n'), ((12863, 12931), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (group_name + '.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (group_name + '.png'), bbox_inches='tight')\n", (12874, 12931), True, 'from matplotlib import pyplot as plt\n'), ((12940, 12949), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (12947, 12949), True, 'from matplotlib import pyplot as plt\n'), ((12958, 12967), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (12965, 12967), True, 'from matplotlib import pyplot as plt\n'), ((14056, 14077), 'pandas.DataFrame', 'pd.DataFrame', (['rows_sb'], {}), '(rows_sb)\n', (14068, 14077), True, 'import pandas as pd\n'), ((14485, 14506), 'pandas.DataFrame', 'pd.DataFrame', (['rows_mb'], {}), '(rows_mb)\n', (14497, 14506), True, 'import pandas as pd\n'), ((15049, 15079), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {'width': 'kBarWidth'}), '(x, y, width=kBarWidth)\n', (15056, 15079), True, 'from matplotlib import pyplot as plt\n'), ((15089, 15110), 'matplotlib.pyplot.title', 'plt.title', (['group_name'], {}), '(group_name)\n', (15098, 15110), True, 'from matplotlib import pyplot as plt\n'), ((15119, 15187), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (group_name + '.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (group_name + '.png'), bbox_inches='tight')\n", (15130, 15187), True, 'from matplotlib import pyplot as plt\n'), ((15196, 15205), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15203, 15205), True, 'from matplotlib import pyplot as plt\n'), ((15214, 15223), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (15221, 15223), True, 'from matplotlib import pyplot as plt\n'), ((15837, 15863), 'pandas.DataFrame', 'pd.DataFrame', (['matched_rows'], {}), '(matched_rows)\n', (15849, 15863), True, 'import pandas as pd\n'), ((16142, 16157), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (16151, 16157), True, 'import numpy as np\n'), ((16309, 16329), 'numpy.polyfit', 'np.polyfit', (['x', 'y1', '(4)'], {}), '(x, y1, 4)\n', (16319, 16329), True, 'import numpy as np\n'), ((16347, 16367), 'numpy.polyfit', 'np.polyfit', (['x', 'y2', '(4)'], {}), '(x, y2, 4)\n', (16357, 16367), True, 'import numpy as np\n'), ((16384, 16401), 'numpy.poly1d', 'np.poly1d', (['coeff1'], {}), '(coeff1)\n', (16393, 16401), True, 'import numpy as np\n'), ((16418, 16435), 'numpy.poly1d', 'np.poly1d', (['coeff2'], {}), '(coeff2)\n', (16427, 16435), True, 'import numpy as np\n'), ((16454, 16474), 'numpy.polyder', 'np.polyder', (['line1', '(2)'], {}), '(line1, 2)\n', (16464, 16474), True, 'import numpy as np\n'), ((16493, 16513), 'numpy.polyder', 'np.polyder', (['line2', '(2)'], {}), '(line2, 2)\n', (16503, 16513), True, 'import numpy as np\n'), ((16620, 16678), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line1dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line1dd, bounds=(0, 8), method='bounded')\n", (16635, 16678), False, 'from scipy.optimize import minimize_scalar\n'), ((16739, 16797), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line2dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line2dd, bounds=(0, 8), method='bounded')\n", (16754, 16797), False, 'from scipy.optimize import minimize_scalar\n'), ((17694, 17782), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'f1_inflection': [inflection1], 'f2_inflection': [inflection2]}"}), "(data={'f1_inflection': [inflection1], 'f2_inflection': [\n inflection2]})\n", (17706, 17782), True, 'import pandas as pd\n'), ((17900, 17920), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1', '"""o"""'], {}), "(x, y1, 'o')\n", (17908, 17920), True, 'from matplotlib import pyplot as plt\n'), ((17929, 17949), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2', '"""x"""'], {}), "(x, y2, 'x')\n", (17937, 17949), True, 'from matplotlib import pyplot as plt\n'), ((18056, 18110), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (18066, 18110), True, 'from matplotlib import pyplot as plt\n'), ((18119, 18145), 'matplotlib.pyplot.title', 'plt.title', (['full_group_name'], {}), '(full_group_name)\n', (18128, 18145), True, 'from matplotlib import pyplot as plt\n'), ((18154, 18239), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (full_group_name + '.fitted.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (full_group_name + '.fitted.png'), bbox_inches='tight'\n )\n", (18165, 18239), True, 'from matplotlib import pyplot as plt\n'), ((18263, 18272), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (18270, 18272), True, 'from matplotlib import pyplot as plt\n'), ((18281, 18290), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (18288, 18290), True, 'from matplotlib import pyplot as plt\n'), ((18443, 18507), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'inflection1', 'linestyle': '""":"""', 'label': '"""F1 inflection"""'}), "(x=inflection1, linestyle=':', label='F1 inflection')\n", (18454, 18507), True, 'from matplotlib import pyplot as plt\n'), ((18516, 18581), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'inflection2', 'linestyle': '"""-."""', 'label': '"""F2 inflection"""'}), "(x=inflection2, linestyle='-.', label='F2 inflection')\n", (18527, 18581), True, 'from matplotlib import pyplot as plt\n'), ((18590, 18644), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (18600, 18644), True, 'from matplotlib import pyplot as plt\n'), ((18653, 18679), 'matplotlib.pyplot.title', 'plt.title', (['full_group_name'], {}), '(full_group_name)\n', (18662, 18679), True, 'from matplotlib import pyplot as plt\n'), ((18688, 18777), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (full_group_name + '.inflection.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (full_group_name + '.inflection.png'), bbox_inches\n ='tight')\n", (18699, 18777), True, 'from matplotlib import pyplot as plt\n'), ((18801, 18810), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (18808, 18810), True, 'from matplotlib import pyplot as plt\n'), ((18819, 18828), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (18826, 18828), True, 'from matplotlib import pyplot as plt\n'), ((22166, 22222), 'matplotlib.pyplot.bar', 'plt.bar', (['x_all', 'f1_front'], {'width': 'kBarWidth', 'label': '"""Front"""'}), "(x_all, f1_front, width=kBarWidth, label='Front')\n", (22173, 22222), True, 'from matplotlib import pyplot as plt\n'), ((22405, 22441), 'matplotlib.pyplot.title', 'plt.title', (["(full_group_name + '@@@F1')"], {}), "(full_group_name + '@@@F1')\n", (22414, 22441), True, 'from matplotlib import pyplot as plt\n'), ((22448, 22502), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (22458, 22502), True, 'from matplotlib import pyplot as plt\n'), ((22511, 22587), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (full_group_name + '.f1.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (full_group_name + '.f1.png'), bbox_inches='tight')\n", (22522, 22587), True, 'from matplotlib import pyplot as plt\n'), ((22596, 22605), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (22603, 22605), True, 'from matplotlib import pyplot as plt\n'), ((22614, 22623), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (22621, 22623), True, 'from matplotlib import pyplot as plt\n'), ((22633, 22689), 'matplotlib.pyplot.bar', 'plt.bar', (['x_all', 'f2_front'], {'width': 'kBarWidth', 'label': '"""Front"""'}), "(x_all, f2_front, width=kBarWidth, label='Front')\n", (22640, 22689), True, 'from matplotlib import pyplot as plt\n'), ((22872, 22908), 'matplotlib.pyplot.title', 'plt.title', (["(full_group_name + '@@@F2')"], {}), "(full_group_name + '@@@F2')\n", (22881, 22908), True, 'from matplotlib import pyplot as plt\n'), ((22915, 22969), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (22925, 22969), True, 'from matplotlib import pyplot as plt\n'), ((22978, 23054), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (full_group_name + '.f2.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (full_group_name + '.f2.png'), bbox_inches='tight')\n", (22989, 23054), True, 'from matplotlib import pyplot as plt\n'), ((23063, 23072), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (23070, 23072), True, 'from matplotlib import pyplot as plt\n'), ((23081, 23090), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (23088, 23090), True, 'from matplotlib import pyplot as plt\n'), ((25970, 25983), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {}), '(x, y)\n', (25977, 25983), True, 'from matplotlib import pyplot as plt\n'), ((25992, 26018), 'matplotlib.pyplot.title', 'plt.title', (['full_group_name'], {}), '(full_group_name)\n', (26001, 26018), True, 'from matplotlib import pyplot as plt\n'), ((26027, 26100), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (full_group_name + '.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (full_group_name + '.png'), bbox_inches='tight')\n", (26038, 26100), True, 'from matplotlib import pyplot as plt\n'), ((26129, 26138), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (26136, 26138), True, 'from matplotlib import pyplot as plt\n'), ((26147, 26156), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (26154, 26156), True, 'from matplotlib import pyplot as plt\n'), ((29234, 29246), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29244, 29246), True, 'from matplotlib import pyplot as plt\n'), ((29354, 29377), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (29366, 29377), True, 'from matplotlib import pyplot as plt\n'), ((31244, 31270), 'matplotlib.pyplot.title', 'plt.title', (['full_group_name'], {}), '(full_group_name)\n', (31253, 31270), True, 'from matplotlib import pyplot as plt\n'), ((31279, 31333), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (31289, 31333), True, 'from matplotlib import pyplot as plt\n'), ((31342, 31415), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (full_group_name + '.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (full_group_name + '.png'), bbox_inches='tight')\n", (31353, 31415), True, 'from matplotlib import pyplot as plt\n'), ((31444, 31453), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (31451, 31453), True, 'from matplotlib import pyplot as plt\n'), ((31462, 31471), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (31469, 31471), True, 'from matplotlib import pyplot as plt\n'), ((34227, 34282), 'matplotlib.pyplot.bar', 'plt.bar', (['x_all', 'y_front'], {'width': 'kBarWidth', 'label': '"""Front"""'}), "(x_all, y_front, width=kBarWidth, label='Front')\n", (34234, 34282), True, 'from matplotlib import pyplot as plt\n'), ((34450, 34504), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (34460, 34504), True, 'from matplotlib import pyplot as plt\n'), ((34513, 34539), 'matplotlib.pyplot.title', 'plt.title', (['full_group_name'], {}), '(full_group_name)\n', (34522, 34539), True, 'from matplotlib import pyplot as plt\n'), ((34548, 34621), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (full_group_name + '.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (full_group_name + '.png'), bbox_inches='tight')\n", (34559, 34621), True, 'from matplotlib import pyplot as plt\n'), ((34650, 34659), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (34657, 34659), True, 'from matplotlib import pyplot as plt\n'), ((34668, 34677), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (34675, 34677), True, 'from matplotlib import pyplot as plt\n'), ((3778, 3790), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (3787, 3790), True, 'import numpy as np\n'), ((4341, 4395), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.04, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.04, 1), loc='upper left')\n", (4351, 4395), True, 'from matplotlib import pyplot as plt\n'), ((4512, 4526), 'matplotlib.pyplot.title', 'plt.title', (['key'], {}), '(key)\n', (4521, 4526), True, 'from matplotlib import pyplot as plt\n'), ((4539, 4624), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / (group_name + '@' + key + '.png'))"], {'bbox_inches': '"""tight"""'}), "(output_dir / (group_name + '@' + key + '.png'), bbox_inches='tight'\n )\n", (4550, 4624), True, 'from matplotlib import pyplot as plt\n'), ((4670, 4679), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4677, 4679), True, 'from matplotlib import pyplot as plt\n'), ((4692, 4701), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4699, 4701), True, 'from matplotlib import pyplot as plt\n'), ((12000, 12018), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (12012, 12018), True, 'import pandas as pd\n'), ((12590, 12631), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {'width': 'kBarWidth', 'label': 'key'}), '(x, y, width=kBarWidth, label=key)\n', (12597, 12631), True, 'from matplotlib import pyplot as plt\n'), ((15759, 15781), 'numpy.all', 'np.all', (['is_all_matched'], {}), '(is_all_matched)\n', (15765, 15781), True, 'import numpy as np\n'), ((20786, 20804), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (20798, 20804), True, 'import pandas as pd\n'), ((21121, 21136), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (21130, 21136), True, 'import numpy as np\n'), ((21300, 21320), 'numpy.polyfit', 'np.polyfit', (['x', 'y1', '(4)'], {}), '(x, y1, 4)\n', (21310, 21320), True, 'import numpy as np\n'), ((21342, 21362), 'numpy.polyfit', 'np.polyfit', (['x', 'y2', '(4)'], {}), '(x, y2, 4)\n', (21352, 21362), True, 'import numpy as np\n'), ((21383, 21400), 'numpy.poly1d', 'np.poly1d', (['coeff1'], {}), '(coeff1)\n', (21392, 21400), True, 'import numpy as np\n'), ((21421, 21438), 'numpy.poly1d', 'np.poly1d', (['coeff2'], {}), '(coeff2)\n', (21430, 21438), True, 'import numpy as np\n'), ((21461, 21481), 'numpy.polyder', 'np.polyder', (['line1', '(2)'], {}), '(line1, 2)\n', (21471, 21481), True, 'import numpy as np\n'), ((21504, 21524), 'numpy.polyder', 'np.polyder', (['line2', '(2)'], {}), '(line2, 2)\n', (21514, 21524), True, 'import numpy as np\n'), ((21551, 21609), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line1dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line1dd, bounds=(0, 8), method='bounded')\n", (21566, 21609), False, 'from scipy.optimize import minimize_scalar\n'), ((21678, 21736), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line2dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line2dd, bounds=(0, 8), method='bounded')\n", (21693, 21736), False, 'from scipy.optimize import minimize_scalar\n'), ((22370, 22395), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', (['(1)'], {}), '(1)\n', (22392, 22395), True, 'import matplotlib.ticker as mtick\n'), ((22837, 22862), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', (['(1)'], {}), '(1)\n', (22859, 22862), True, 'import matplotlib.ticker as mtick\n'), ((24973, 24995), 'numpy.all', 'np.all', (['is_all_matched'], {}), '(is_all_matched)\n', (24979, 24995), True, 'import numpy as np\n'), ((25419, 25445), 'pandas.DataFrame', 'pd.DataFrame', (['matched_rows'], {}), '(matched_rows)\n', (25431, 25445), True, 'import pandas as pd\n'), ((29052, 29074), 'numpy.all', 'np.all', (['is_all_matched'], {}), '(is_all_matched)\n', (29058, 29074), True, 'import numpy as np\n'), ((29492, 29507), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (29501, 29507), True, 'import numpy as np\n'), ((29819, 29845), 'pandas.DataFrame', 'pd.DataFrame', (['matched_rows'], {}), '(matched_rows)\n', (29831, 29845), True, 'import pandas as pd\n'), ((30159, 30179), 'numpy.polyfit', 'np.polyfit', (['x', 'y1', '(4)'], {}), '(x, y1, 4)\n', (30169, 30179), True, 'import numpy as np\n'), ((30201, 30221), 'numpy.polyfit', 'np.polyfit', (['x', 'y2', '(4)'], {}), '(x, y2, 4)\n', (30211, 30221), True, 'import numpy as np\n'), ((30242, 30259), 'numpy.poly1d', 'np.poly1d', (['coeff1'], {}), '(coeff1)\n', (30251, 30259), True, 'import numpy as np\n'), ((30280, 30297), 'numpy.poly1d', 'np.poly1d', (['coeff2'], {}), '(coeff2)\n', (30289, 30297), True, 'import numpy as np\n'), ((30320, 30340), 'numpy.polyder', 'np.polyder', (['line1', '(2)'], {}), '(line1, 2)\n', (30330, 30340), True, 'import numpy as np\n'), ((30363, 30383), 'numpy.polyder', 'np.polyder', (['line2', '(2)'], {}), '(line2, 2)\n', (30373, 30383), True, 'import numpy as np\n'), ((30410, 30468), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line1dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line1dd, bounds=(0, 8), method='bounded')\n", (30425, 30468), False, 'from scipy.optimize import minimize_scalar\n'), ((30537, 30595), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-line2dd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-line2dd, bounds=(0, 8), method='bounded')\n", (30552, 30595), False, 'from scipy.optimize import minimize_scalar\n'), ((32976, 32998), 'numpy.all', 'np.all', (['is_all_matched'], {}), '(is_all_matched)\n', (32982, 32998), True, 'import numpy as np\n'), ((33491, 33517), 'pandas.DataFrame', 'pd.DataFrame', (['matched_rows'], {}), '(matched_rows)\n', (33503, 33517), True, 'import pandas as pd\n'), ((33663, 33678), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {}), '(0, 9)\n', (33672, 33678), True, 'import numpy as np\n'), ((33788, 33807), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(4)'], {}), '(x, y, 4)\n', (33798, 33807), True, 'import numpy as np\n'), ((33827, 33843), 'numpy.poly1d', 'np.poly1d', (['coeff'], {}), '(coeff)\n', (33836, 33843), True, 'import numpy as np\n'), ((33865, 33884), 'numpy.polyder', 'np.polyder', (['line', '(2)'], {}), '(line, 2)\n', (33875, 33884), True, 'import numpy as np\n'), ((33910, 33967), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['(-linedd)'], {'bounds': '(0, 8)', 'method': '"""bounded"""'}), "(-linedd, bounds=(0, 8), method='bounded')\n", (33925, 33967), False, 'from scipy.optimize import minimize_scalar\n'), ((34415, 34440), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', (['(1)'], {}), '(1)\n', (34437, 34440), True, 'import matplotlib.ticker as mtick\n'), ((3866, 3881), 'pandas.DataFrame', 'pd.DataFrame', (['v'], {}), '(v)\n', (3878, 3881), True, 'import pandas as pd\n'), ((4236, 4275), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {'width': 'kBarWidth', 'label': 'k'}), '(x, y, width=kBarWidth, label=k)\n', (4243, 4275), True, 'from matplotlib import pyplot as plt\n'), ((11817, 11839), 'numpy.all', 'np.all', (['is_all_matched'], {}), '(is_all_matched)\n', (11823, 11839), True, 'import numpy as np\n'), ((19784, 19807), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (19805, 19807), False, 'import filter\n'), ((19809, 19831), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (19826, 19831), False, 'import filter\n'), ((19936, 19959), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (19957, 19959), False, 'import filter\n'), ((19961, 19983), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (19978, 19983), False, 'import filter\n'), ((20088, 20107), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (20105, 20107), False, 'import filter\n'), ((20109, 20131), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (20126, 20131), False, 'import filter\n'), ((20603, 20625), 'numpy.all', 'np.all', (['is_all_matched'], {}), '(is_all_matched)\n', (20609, 20625), True, 'import numpy as np\n'), ((22262, 22280), 'numpy.array', 'np.array', (['f1_front'], {}), '(f1_front)\n', (22270, 22280), True, 'import numpy as np\n'), ((22729, 22747), 'numpy.array', 'np.array', (['f2_front'], {}), '(f2_front)\n', (22737, 22747), True, 'import numpy as np\n'), ((26279, 26302), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (26300, 26302), False, 'import filter\n'), ((26304, 26326), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (26321, 26326), False, 'import filter\n'), ((26485, 26508), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (26506, 26508), False, 'import filter\n'), ((26510, 26532), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (26527, 26532), False, 'import filter\n'), ((26691, 26714), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (26712, 26714), False, 'import filter\n'), ((26716, 26738), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (26733, 26738), False, 'import filter\n'), ((26897, 26920), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (26918, 26920), False, 'import filter\n'), ((26922, 26944), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (26939, 26944), False, 'import filter\n'), ((27103, 27122), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (27120, 27122), False, 'import filter\n'), ((27124, 27146), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (27141, 27146), False, 'import filter\n'), ((27305, 27324), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (27322, 27324), False, 'import filter\n'), ((27326, 27348), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (27343, 27348), False, 'import filter\n'), ((27509, 27532), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (27530, 27532), False, 'import filter\n'), ((27534, 27556), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (27551, 27556), False, 'import filter\n'), ((27720, 27743), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (27741, 27743), False, 'import filter\n'), ((27745, 27767), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (27762, 27767), False, 'import filter\n'), ((27931, 27954), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (27952, 27954), False, 'import filter\n'), ((27956, 27978), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (27973, 27978), False, 'import filter\n'), ((28142, 28165), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (28163, 28165), False, 'import filter\n'), ((28167, 28189), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (28184, 28189), False, 'import filter\n'), ((28354, 28373), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (28371, 28373), False, 'import filter\n'), ((28375, 28397), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (28392, 28397), False, 'import filter\n'), ((28562, 28581), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (28579, 28581), False, 'import filter\n'), ((28583, 28605), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (28600, 28605), False, 'import filter\n'), ((29658, 29669), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (29666, 29669), True, 'import numpy as np\n'), ((31588, 31611), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (31609, 31611), False, 'import filter\n'), ((31613, 31635), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (31630, 31635), False, 'import filter\n'), ((31762, 31785), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (31783, 31785), False, 'import filter\n'), ((31787, 31809), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (31804, 31809), False, 'import filter\n'), ((31936, 31955), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (31953, 31955), False, 'import filter\n'), ((31957, 31979), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (31974, 31979), False, 'import filter\n'), ((32109, 32132), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (32130, 32132), False, 'import filter\n'), ((32134, 32156), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (32151, 32156), False, 'import filter\n'), ((32289, 32312), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (32310, 32312), False, 'import filter\n'), ((32314, 32336), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (32331, 32336), False, 'import filter\n'), ((32469, 32488), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (32486, 32488), False, 'import filter\n'), ((32490, 32512), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (32507, 32512), False, 'import filter\n'), ((34321, 34338), 'numpy.array', 'np.array', (['y_front'], {}), '(y_front)\n', (34329, 34338), True, 'import numpy as np\n'), ((34802, 34825), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (34823, 34825), False, 'import filter\n'), ((34827, 34849), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (34844, 34849), False, 'import filter\n'), ((35009, 35032), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (35030, 35032), False, 'import filter\n'), ((35034, 35056), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (35051, 35056), False, 'import filter\n'), ((35216, 35239), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (35237, 35239), False, 'import filter\n'), ((35241, 35263), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (35258, 35263), False, 'import filter\n'), ((35423, 35446), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (35444, 35446), False, 'import filter\n'), ((35448, 35470), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (35465, 35470), False, 'import filter\n'), ((35630, 35649), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (35647, 35649), False, 'import filter\n'), ((35651, 35673), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (35668, 35673), False, 'import filter\n'), ((35833, 35852), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (35850, 35852), False, 'import filter\n'), ((35854, 35876), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (35871, 35876), False, 'import filter\n'), ((36039, 36062), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (36060, 36062), False, 'import filter\n'), ((36064, 36086), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (36081, 36086), False, 'import filter\n'), ((36252, 36275), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (36273, 36275), False, 'import filter\n'), ((36277, 36299), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (36294, 36299), False, 'import filter\n'), ((36465, 36488), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (36486, 36488), False, 'import filter\n'), ((36490, 36512), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (36507, 36512), False, 'import filter\n'), ((36678, 36701), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (36699, 36701), False, 'import filter\n'), ((36703, 36725), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (36720, 36725), False, 'import filter\n'), ((36891, 36910), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (36908, 36910), False, 'import filter\n'), ((36912, 36934), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (36929, 36934), False, 'import filter\n'), ((37100, 37119), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (37117, 37119), False, 'import filter\n'), ((37121, 37143), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (37138, 37143), False, 'import filter\n'), ((13098, 13121), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (13119, 13121), False, 'import filter\n'), ((13123, 13145), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (13140, 13145), False, 'import filter\n'), ((13167, 13190), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (13188, 13190), False, 'import filter\n'), ((13192, 13214), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (13209, 13214), False, 'import filter\n'), ((13358, 13381), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (13379, 13381), False, 'import filter\n'), ((13383, 13405), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (13400, 13405), False, 'import filter\n'), ((13427, 13446), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (13444, 13446), False, 'import filter\n'), ((13448, 13470), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (13465, 13470), False, 'import filter\n'), ((22334, 22343), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22341, 22343), True, 'from matplotlib import pyplot as plt\n'), ((22801, 22810), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22808, 22810), True, 'from matplotlib import pyplot as plt\n'), ((23200, 23219), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (23217, 23219), False, 'import filter\n'), ((23221, 23243), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (23238, 23243), False, 'import filter\n'), ((23371, 23394), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (23392, 23394), False, 'import filter\n'), ((23396, 23418), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (23413, 23418), False, 'import filter\n'), ((23440, 23459), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (23457, 23459), False, 'import filter\n'), ((23461, 23483), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (23478, 23483), False, 'import filter\n'), ((23619, 23642), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (23640, 23642), False, 'import filter\n'), ((23644, 23666), 'filter.IsPosition', 'filter.IsPosition', (['"""a"""'], {}), "('a')\n", (23661, 23666), False, 'import filter\n'), ((23688, 23711), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (23709, 23711), False, 'import filter\n'), ((23713, 23735), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (23730, 23735), False, 'import filter\n'), ((34379, 34388), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34386, 34388), True, 'from matplotlib import pyplot as plt\n'), ((13742, 13765), 'filter.IsShanghainese', 'filter.IsShanghainese', ([], {}), '()\n', (13763, 13765), False, 'import filter\n'), ((13785, 13807), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (13802, 13807), False, 'import filter\n'), ((13900, 13919), 'filter.IsMandarin', 'filter.IsMandarin', ([], {}), '()\n', (13917, 13919), False, 'import filter\n'), ((13939, 13961), 'filter.IsPosition', 'filter.IsPosition', (['"""b"""'], {}), "('b')\n", (13956, 13961), False, 'import filter\n')] |
#
# author: <NAME> (<EMAIL>)
# last updated: September 24, 2020
#
"""test_utils_common"""
import typing
import pytest
import numpy as np
from bayeso.utils import utils_common as package_target
TEST_EPSILON = 1e-5
def test_get_grids_typing():
annos = package_target.get_grids.__annotations__
assert annos['ranges'] == np.ndarray
assert annos['num_grids'] == int
assert annos['return'] == np.ndarray
def test_get_grids():
arr_range_1 = np.array([
[0.0, 10.0],
[-2.0, 2.0],
[-5.0, 5.0],
])
arr_range_2 = np.array([
[0.0, 10.0],
[2.0, 2.0],
[5.0, 5.0],
])
truth_arr_grid_1 = np.array([
[0., -2., -5.],
[0., -2., 0.],
[0., -2., 5.],
[5., -2., -5.],
[5., -2., 0.],
[5., -2., 5.],
[10., -2., -5.],
[10., -2., 0.],
[10., -2., 5.],
[0., 0., -5.],
[0., 0., 0.],
[0., 0., 5.],
[5., 0., -5.],
[5., 0., 0.],
[5., 0., 5.],
[10., 0., -5.],
[10., 0., 0.],
[10., 0., 5.],
[0., 2., -5.],
[0., 2., 0.],
[0., 2., 5.],
[5., 2., -5.],
[5., 2., 0.],
[5., 2., 5.],
[10., 2., -5.],
[10., 2., 0.],
[10., 2., 5.],
])
truth_arr_grid_2 = np.array([
[0., 2., 5.],
[0., 2., 5.],
[0., 2., 5.],
[5., 2., 5.],
[5., 2., 5.],
[5., 2., 5.],
[10., 2., 5.],
[10., 2., 5.],
[10., 2., 5.],
[0., 2., 5.],
[0., 2., 5.],
[0., 2., 5.],
[5., 2., 5.],
[5., 2., 5.],
[5., 2., 5.],
[10., 2., 5.],
[10., 2., 5.],
[10., 2., 5.],
[0., 2., 5.],
[0., 2., 5.],
[0., 2., 5.],
[5., 2., 5.],
[5., 2., 5.],
[5., 2., 5.],
[10., 2., 5.],
[10., 2., 5.],
[10., 2., 5.],
])
with pytest.raises(AssertionError) as error:
package_target.get_grids('abc', 3)
with pytest.raises(AssertionError) as error:
package_target.get_grids(arr_range_1, 'abc')
with pytest.raises(AssertionError) as error:
package_target.get_grids(np.arange(0, 10), 3)
with pytest.raises(AssertionError) as error:
package_target.get_grids(np.ones((3, 3)), 3)
with pytest.raises(AssertionError) as error:
package_target.get_grids(np.array([[0.0, -2.0], [10.0, 20.0]]), 3)
arr_grid_1 = package_target.get_grids(arr_range_1, 3)
arr_grid_2 = package_target.get_grids(arr_range_2, 3)
assert (arr_grid_1 == truth_arr_grid_1).all()
assert (arr_grid_2 == truth_arr_grid_2).all()
def test_get_minimum_typing():
annos = package_target.get_minimum.__annotations__
assert annos['Y_all'] == np.ndarray
assert annos['num_init'] == int
assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, np.ndarray]
def test_get_minimum():
with pytest.raises(AssertionError) as error:
package_target.get_minimum(1.2, 2.1)
with pytest.raises(AssertionError) as error:
package_target.get_minimum(1.2, 3)
num_init = 3
num_exp = 3
num_data = 10
all_data = np.zeros((num_exp, num_init + num_data))
with pytest.raises(AssertionError) as error:
package_target.get_minimum(all_data, 2.1)
cur_minimum, cur_mean, cur_std = package_target.get_minimum(all_data, num_init)
assert len(cur_minimum.shape) == 2
assert cur_minimum.shape == (num_exp, 1 + num_data)
assert len(cur_mean.shape) == 1
assert cur_mean.shape == (1 + num_data, )
assert len(cur_std.shape) == 1
assert cur_std.shape == (1 + num_data, )
num_init = 5
num_exp = 10
num_data = -2
all_data = np.zeros((num_exp, num_init + num_data))
with pytest.raises(AssertionError) as error:
package_target.get_minimum(all_data, num_init)
num_init = 3
all_data = np.array([
[3.1, 2.1, 4.1, 2.0, 1.0, 4.1, 0.4],
[2.3, 4.9, 2.9, 8.2, 3.2, 4.2, 4.9],
[0.8, 2.4, 5.4, 4.5, 0.3, 1.5, 2.3],
])
truth_all_data = np.array([
[2.1, 2.0, 1.0, 1.0, 0.4],
[2.3, 2.3, 2.3, 2.3, 2.3],
[0.8, 0.8, 0.3, 0.3, 0.3],
])
cur_minimum, cur_mean, cur_std = package_target.get_minimum(all_data, num_init)
assert (cur_minimum == truth_all_data).all()
assert (cur_mean == np.mean(truth_all_data, axis=0)).all()
assert (cur_std == np.std(truth_all_data, axis=0)).all()
def test_get_time_typing():
annos = package_target.get_time.__annotations__
assert annos['time_all'] == np.ndarray
assert annos['num_init'] == int
assert annos['include_init'] == bool
assert annos['return'] == np.ndarray
def test_get_time():
arr_time = np.array([
[1.0, 0.5, 0.2, 0.7, 2.0],
[2.0, 0.7, 1.2, 0.3, 0.7],
[0.2, 0.1, 1.0, 0.2, 1.5],
])
int_init = 2
is_initial = True
with pytest.raises(AssertionError) as error:
package_target.get_time(arr_time, int_init, 1)
with pytest.raises(AssertionError) as error:
package_target.get_time(arr_time, 'abc', is_initial)
with pytest.raises(AssertionError) as error:
package_target.get_time('abc', int_init, is_initial)
with pytest.raises(AssertionError) as error:
package_target.get_time(np.arange(0, 10), int_init, is_initial)
with pytest.raises(AssertionError) as error:
package_target.get_time(arr_time, 10, is_initial)
cur_time = package_target.get_time(arr_time, int_init, is_initial)
truth_cur_time = np.array([0.0, 0.8, 1.2, 2.6])
assert (np.abs(cur_time - truth_cur_time) < TEST_EPSILON).all()
cur_time = package_target.get_time(arr_time, int_init, False)
truth_cur_time = np.array([0.0, 1.06666667, 1.5, 2.3, 2.7, 4.1])
assert (np.abs(cur_time - truth_cur_time) < TEST_EPSILON).all()
| [
"numpy.mean",
"numpy.abs",
"numpy.ones",
"bayeso.utils.utils_common.get_time",
"numpy.array",
"numpy.zeros",
"pytest.raises",
"numpy.std",
"bayeso.utils.utils_common.get_minimum",
"bayeso.utils.utils_common.get_grids",
"numpy.arange"
] | [((461, 510), 'numpy.array', 'np.array', (['[[0.0, 10.0], [-2.0, 2.0], [-5.0, 5.0]]'], {}), '([[0.0, 10.0], [-2.0, 2.0], [-5.0, 5.0]])\n', (469, 510), True, 'import numpy as np\n'), ((560, 607), 'numpy.array', 'np.array', (['[[0.0, 10.0], [2.0, 2.0], [5.0, 5.0]]'], {}), '([[0.0, 10.0], [2.0, 2.0], [5.0, 5.0]])\n', (568, 607), True, 'import numpy as np\n'), ((663, 1187), 'numpy.array', 'np.array', (['[[0.0, -2.0, -5.0], [0.0, -2.0, 0.0], [0.0, -2.0, 5.0], [5.0, -2.0, -5.0],\n [5.0, -2.0, 0.0], [5.0, -2.0, 5.0], [10.0, -2.0, -5.0], [10.0, -2.0, \n 0.0], [10.0, -2.0, 5.0], [0.0, 0.0, -5.0], [0.0, 0.0, 0.0], [0.0, 0.0, \n 5.0], [5.0, 0.0, -5.0], [5.0, 0.0, 0.0], [5.0, 0.0, 5.0], [10.0, 0.0, -\n 5.0], [10.0, 0.0, 0.0], [10.0, 0.0, 5.0], [0.0, 2.0, -5.0], [0.0, 2.0, \n 0.0], [0.0, 2.0, 5.0], [5.0, 2.0, -5.0], [5.0, 2.0, 0.0], [5.0, 2.0, \n 5.0], [10.0, 2.0, -5.0], [10.0, 2.0, 0.0], [10.0, 2.0, 5.0]]'], {}), '([[0.0, -2.0, -5.0], [0.0, -2.0, 0.0], [0.0, -2.0, 5.0], [5.0, -2.0,\n -5.0], [5.0, -2.0, 0.0], [5.0, -2.0, 5.0], [10.0, -2.0, -5.0], [10.0, -\n 2.0, 0.0], [10.0, -2.0, 5.0], [0.0, 0.0, -5.0], [0.0, 0.0, 0.0], [0.0, \n 0.0, 5.0], [5.0, 0.0, -5.0], [5.0, 0.0, 0.0], [5.0, 0.0, 5.0], [10.0, \n 0.0, -5.0], [10.0, 0.0, 0.0], [10.0, 0.0, 5.0], [0.0, 2.0, -5.0], [0.0,\n 2.0, 0.0], [0.0, 2.0, 5.0], [5.0, 2.0, -5.0], [5.0, 2.0, 0.0], [5.0, \n 2.0, 5.0], [10.0, 2.0, -5.0], [10.0, 2.0, 0.0], [10.0, 2.0, 5.0]])\n', (671, 1187), True, 'import numpy as np\n'), ((1326, 1833), 'numpy.array', 'np.array', (['[[0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [5.0, 2.0, 5.0], [5.0, \n 2.0, 5.0], [5.0, 2.0, 5.0], [10.0, 2.0, 5.0], [10.0, 2.0, 5.0], [10.0, \n 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [5.0, 2.0,\n 5.0], [5.0, 2.0, 5.0], [5.0, 2.0, 5.0], [10.0, 2.0, 5.0], [10.0, 2.0, \n 5.0], [10.0, 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, \n 5.0], [5.0, 2.0, 5.0], [5.0, 2.0, 5.0], [5.0, 2.0, 5.0], [10.0, 2.0, \n 5.0], [10.0, 2.0, 5.0], [10.0, 2.0, 5.0]]'], {}), '([[0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [5.0, 2.0, 5.0\n ], [5.0, 2.0, 5.0], [5.0, 2.0, 5.0], [10.0, 2.0, 5.0], [10.0, 2.0, 5.0],\n [10.0, 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [\n 5.0, 2.0, 5.0], [5.0, 2.0, 5.0], [5.0, 2.0, 5.0], [10.0, 2.0, 5.0], [\n 10.0, 2.0, 5.0], [10.0, 2.0, 5.0], [0.0, 2.0, 5.0], [0.0, 2.0, 5.0], [\n 0.0, 2.0, 5.0], [5.0, 2.0, 5.0], [5.0, 2.0, 5.0], [5.0, 2.0, 5.0], [\n 10.0, 2.0, 5.0], [10.0, 2.0, 5.0], [10.0, 2.0, 5.0]])\n', (1334, 1833), True, 'import numpy as np\n'), ((2489, 2529), 'bayeso.utils.utils_common.get_grids', 'package_target.get_grids', (['arr_range_1', '(3)'], {}), '(arr_range_1, 3)\n', (2513, 2529), True, 'from bayeso.utils import utils_common as package_target\n'), ((2547, 2587), 'bayeso.utils.utils_common.get_grids', 'package_target.get_grids', (['arr_range_2', '(3)'], {}), '(arr_range_2, 3)\n', (2571, 2587), True, 'from bayeso.utils import utils_common as package_target\n'), ((3210, 3250), 'numpy.zeros', 'np.zeros', (['(num_exp, num_init + num_data)'], {}), '((num_exp, num_init + num_data))\n', (3218, 3250), True, 'import numpy as np\n'), ((3387, 3433), 'bayeso.utils.utils_common.get_minimum', 'package_target.get_minimum', (['all_data', 'num_init'], {}), '(all_data, num_init)\n', (3413, 3433), True, 'from bayeso.utils import utils_common as package_target\n'), ((3759, 3799), 'numpy.zeros', 'np.zeros', (['(num_exp, num_init + num_data)'], {}), '((num_exp, num_init + num_data))\n', (3767, 3799), True, 'import numpy as np\n'), ((3937, 4063), 'numpy.array', 'np.array', (['[[3.1, 2.1, 4.1, 2.0, 1.0, 4.1, 0.4], [2.3, 4.9, 2.9, 8.2, 3.2, 4.2, 4.9],\n [0.8, 2.4, 5.4, 4.5, 0.3, 1.5, 2.3]]'], {}), '([[3.1, 2.1, 4.1, 2.0, 1.0, 4.1, 0.4], [2.3, 4.9, 2.9, 8.2, 3.2, \n 4.2, 4.9], [0.8, 2.4, 5.4, 4.5, 0.3, 1.5, 2.3]])\n', (3945, 4063), True, 'import numpy as np\n'), ((4111, 4207), 'numpy.array', 'np.array', (['[[2.1, 2.0, 1.0, 1.0, 0.4], [2.3, 2.3, 2.3, 2.3, 2.3], [0.8, 0.8, 0.3, 0.3,\n 0.3]]'], {}), '([[2.1, 2.0, 1.0, 1.0, 0.4], [2.3, 2.3, 2.3, 2.3, 2.3], [0.8, 0.8, \n 0.3, 0.3, 0.3]])\n', (4119, 4207), True, 'import numpy as np\n'), ((4271, 4317), 'bayeso.utils.utils_common.get_minimum', 'package_target.get_minimum', (['all_data', 'num_init'], {}), '(all_data, num_init)\n', (4297, 4317), True, 'from bayeso.utils import utils_common as package_target\n'), ((4771, 4867), 'numpy.array', 'np.array', (['[[1.0, 0.5, 0.2, 0.7, 2.0], [2.0, 0.7, 1.2, 0.3, 0.7], [0.2, 0.1, 1.0, 0.2,\n 1.5]]'], {}), '([[1.0, 0.5, 0.2, 0.7, 2.0], [2.0, 0.7, 1.2, 0.3, 0.7], [0.2, 0.1, \n 1.0, 0.2, 1.5]])\n', (4779, 4867), True, 'import numpy as np\n'), ((5501, 5556), 'bayeso.utils.utils_common.get_time', 'package_target.get_time', (['arr_time', 'int_init', 'is_initial'], {}), '(arr_time, int_init, is_initial)\n', (5524, 5556), True, 'from bayeso.utils import utils_common as package_target\n'), ((5578, 5608), 'numpy.array', 'np.array', (['[0.0, 0.8, 1.2, 2.6]'], {}), '([0.0, 0.8, 1.2, 2.6])\n', (5586, 5608), True, 'import numpy as np\n'), ((5693, 5743), 'bayeso.utils.utils_common.get_time', 'package_target.get_time', (['arr_time', 'int_init', '(False)'], {}), '(arr_time, int_init, False)\n', (5716, 5743), True, 'from bayeso.utils import utils_common as package_target\n'), ((5765, 5812), 'numpy.array', 'np.array', (['[0.0, 1.06666667, 1.5, 2.3, 2.7, 4.1]'], {}), '([0.0, 1.06666667, 1.5, 2.3, 2.7, 4.1])\n', (5773, 5812), True, 'import numpy as np\n'), ((1957, 1986), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1970, 1986), False, 'import pytest\n'), ((2005, 2039), 'bayeso.utils.utils_common.get_grids', 'package_target.get_grids', (['"""abc"""', '(3)'], {}), "('abc', 3)\n", (2029, 2039), True, 'from bayeso.utils import utils_common as package_target\n'), ((2049, 2078), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2062, 2078), False, 'import pytest\n'), ((2097, 2141), 'bayeso.utils.utils_common.get_grids', 'package_target.get_grids', (['arr_range_1', '"""abc"""'], {}), "(arr_range_1, 'abc')\n", (2121, 2141), True, 'from bayeso.utils import utils_common as package_target\n'), ((2151, 2180), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2164, 2180), False, 'import pytest\n'), ((2254, 2283), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2267, 2283), False, 'import pytest\n'), ((2356, 2385), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2369, 2385), False, 'import pytest\n'), ((2966, 2995), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2979, 2995), False, 'import pytest\n'), ((3014, 3050), 'bayeso.utils.utils_common.get_minimum', 'package_target.get_minimum', (['(1.2)', '(2.1)'], {}), '(1.2, 2.1)\n', (3040, 3050), True, 'from bayeso.utils import utils_common as package_target\n'), ((3060, 3089), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3073, 3089), False, 'import pytest\n'), ((3108, 3142), 'bayeso.utils.utils_common.get_minimum', 'package_target.get_minimum', (['(1.2)', '(3)'], {}), '(1.2, 3)\n', (3134, 3142), True, 'from bayeso.utils import utils_common as package_target\n'), ((3260, 3289), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3273, 3289), False, 'import pytest\n'), ((3308, 3349), 'bayeso.utils.utils_common.get_minimum', 'package_target.get_minimum', (['all_data', '(2.1)'], {}), '(all_data, 2.1)\n', (3334, 3349), True, 'from bayeso.utils import utils_common as package_target\n'), ((3809, 3838), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3822, 3838), False, 'import pytest\n'), ((3857, 3903), 'bayeso.utils.utils_common.get_minimum', 'package_target.get_minimum', (['all_data', 'num_init'], {}), '(all_data, num_init)\n', (3883, 3903), True, 'from bayeso.utils import utils_common as package_target\n'), ((4942, 4971), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (4955, 4971), False, 'import pytest\n'), ((4990, 5036), 'bayeso.utils.utils_common.get_time', 'package_target.get_time', (['arr_time', 'int_init', '(1)'], {}), '(arr_time, int_init, 1)\n', (5013, 5036), True, 'from bayeso.utils import utils_common as package_target\n'), ((5046, 5075), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5059, 5075), False, 'import pytest\n'), ((5094, 5146), 'bayeso.utils.utils_common.get_time', 'package_target.get_time', (['arr_time', '"""abc"""', 'is_initial'], {}), "(arr_time, 'abc', is_initial)\n", (5117, 5146), True, 'from bayeso.utils import utils_common as package_target\n'), ((5156, 5185), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5169, 5185), False, 'import pytest\n'), ((5204, 5256), 'bayeso.utils.utils_common.get_time', 'package_target.get_time', (['"""abc"""', 'int_init', 'is_initial'], {}), "('abc', int_init, is_initial)\n", (5227, 5256), True, 'from bayeso.utils import utils_common as package_target\n'), ((5266, 5295), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5279, 5295), False, 'import pytest\n'), ((5387, 5416), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5400, 5416), False, 'import pytest\n'), ((5435, 5484), 'bayeso.utils.utils_common.get_time', 'package_target.get_time', (['arr_time', '(10)', 'is_initial'], {}), '(arr_time, 10, is_initial)\n', (5458, 5484), True, 'from bayeso.utils import utils_common as package_target\n'), ((2224, 2240), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (2233, 2240), True, 'import numpy as np\n'), ((2327, 2342), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2334, 2342), True, 'import numpy as np\n'), ((2429, 2466), 'numpy.array', 'np.array', (['[[0.0, -2.0], [10.0, 20.0]]'], {}), '([[0.0, -2.0], [10.0, 20.0]])\n', (2437, 2466), True, 'import numpy as np\n'), ((5338, 5354), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (5347, 5354), True, 'import numpy as np\n'), ((4391, 4422), 'numpy.mean', 'np.mean', (['truth_all_data'], {'axis': '(0)'}), '(truth_all_data, axis=0)\n', (4398, 4422), True, 'import numpy as np\n'), ((4453, 4483), 'numpy.std', 'np.std', (['truth_all_data'], {'axis': '(0)'}), '(truth_all_data, axis=0)\n', (4459, 4483), True, 'import numpy as np\n'), ((5621, 5654), 'numpy.abs', 'np.abs', (['(cur_time - truth_cur_time)'], {}), '(cur_time - truth_cur_time)\n', (5627, 5654), True, 'import numpy as np\n'), ((5825, 5858), 'numpy.abs', 'np.abs', (['(cur_time - truth_cur_time)'], {}), '(cur_time - truth_cur_time)\n', (5831, 5858), True, 'import numpy as np\n')] |
#!/usr/bin/python3.7
#Dependencies
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.optimize import minimize_scalar
import scipy.special as sp
import matplotlib.pyplot as plt
from scipy.stats import linregress
from scipy.stats import t
from scipy import integrate
import derivative
#-----------------------------------------------------------------------------------------------------------
class DataExtraction(object):
"""
Class that manipulates raw data to create lists and Data Frames
that will be used to compute the Activation Energy.
"""
def __init__(self):
"""
Constructor.
Parameters: None
Notes: It only defines variables.
"""
self.DFlis = [] #list of DataFrames containing data
self.Beta = [] #list of heating rates
self.BetaCC = [] #list of correlation coefficient for T vs t
self.files = [] #list of files containing raw data
self.da_dt = [] #list of experimental conversion rates
self.T = [] #list of experimental temperature
self.t = [] #list off experimental time
self.TempIsoDF = pd.DataFrame() #Isoconversional temperature DataFrame
self.timeIsoDF = pd.DataFrame() #Isoconversional time DataFrame
self.diffIsoDF = pd.DataFrame() #Isoconversional conversion rate DataFrame
self.TempAdvIsoDF = pd.DataFrame() #Advanced isoconversional temperature DataFrame
self.timeAdvIsoDF = pd.DataFrame() #Advanced isoconversional time DataFrame
self.alpha = [] #list of experimental conversion
self.d_a = 0.00001 #default value of alpha step for aVy method
#-----------------------------------------------------------------------------------------------------------
def set_data(self, filelist):
"""
Method to establish the file list for the extrator.
Parameters: filelist : list object containing the paths
of the files to be used.
Notes: The paths must be sorted in ascendent heating
rate order.
"""
print("Files to be used: \n{} ".format(filelist))
self.files = filelist
#-----------------------------------------------------------------------------------------------------------
def data_extraction(self,encoding='utf8'):
"""
Method to extract the data contained in the files into a list of DataFrames.
Adds three columns: one corresponding to the absolute temperature, another
corresponding to the conversion ('alpha') and a third for d(alpha)/dt.
Also computes The heating rate ('Beta') with its Correlation Coefficient.
Parameters: encoding: The available encodings for pandas.read_csv() method. Includes but not limited
to 'utf8', 'utf16','latin1'. For more information on the python standar encoding:
(https://docs.python.org/3/library/codecs.html#standard-encodings)
"""
BetaCorrCoeff = self.BetaCC
DFlis = self.DFlis
Beta = self.Beta
filelist = self.files
alpha = self.alpha
da_dt = self.da_dt
T = self.T
t = self.t
# Read the data from each csv
# Create the Dataframe of each experiment
# Add three columns (T,alpha,(da/dt))
# Compute the linear regression of T vs t
for item in filelist:
try:
DF = pd.read_table(item, sep = '\t', encoding = encoding)
DF['Temperature [K]'] = DF[DF.columns[1]] + 273.15
DF[r'$\alpha$'] = (DF[DF.columns[2]][0]-DF[DF.columns[2]])/(DF[DF.columns[2]][0]-DF[DF.columns[2]][DF.shape[0]-1])
dadt = derivative.dxdt(DF[r'$\alpha$'],DF[DF.columns[0]],kind='spline',s=0.01,order=5)
DF[r'$d\alpha/dt$'] = DF[DF.columns[0]]
DF[r'$d\alpha/dt$'] = dadt
except IndexError:
DF = pd.read_table(item, sep = ',', encoding = encoding)
DF['Temperature [K]'] = DF[DF.columns[1]] + 273.15
DF[r'$\alpha$'] = (DF[DF.columns[2]][0]-DF[DF.columns[2]])/(DF[DF.columns[2]][0]-DF[DF.columns[2]][DF.shape[0]-1])
dadt = derivative.dxdt(DF[r'$\alpha$'],DF[DF.columns[0]],kind='spline',s=0.01,order=5)
DF[r'$d\alpha/dt$'] = DF[DF.columns[0]]
DF[r'$d\alpha/dt$'] = dadt
LR = linregress(DF[DF.columns[0]],DF[DF.columns[3]])
BetaCorrCoeff.append(LR.rvalue)
Beta.append(LR.slope)
DFlis.append(DF)
# Create an array of sorted in ascendent order values of conversion (alpha) and arrays
# for the temperature, time and rate of conversion corresponding to said conversion values
for i in range(len(DFlis)):
a = [DFlis[i][r'$\alpha$'].values[0]]
Temp = [DFlis[i]['Temperature [K]'].values[0]]
time = [DFlis[i][DFlis[i].columns[0]].values[0]]
diff = [DFlis[i][r'$d\alpha/dt$'].values[1]]
for j in range(len(DFlis[i][r'$\alpha$'].values)):
if DFlis[i][r'$\alpha$'].values[j] == a[-1]:
pass
elif DFlis[i][r'$\alpha$'].values[j] > a[-1]:
a.append(DFlis[i][r'$\alpha$'].values[j])
Temp.append(DFlis[i]['Temperature [K]'].values[j])
time.append(DFlis[i][DFlis[i].columns[0]].values[j])
diff.append(DFlis[i][r'$d\alpha/dt$'].values[j])
else:
pass
alpha.append(np.array(a))
T.append(np.array(Temp))
t.append(np.array(time))
da_dt.append(np.array(diff))
self.BetaCC = BetaCorrCoeff
self.DFlis = DFlis
self.Beta = Beta
self.da_dt = da_dt
self.T = T
self.t = t
self.alpha = alpha
#-----------------------------------------------------------------------------------------------------------
def get_beta(self):
"""
Parameters: None
Returns: list object containing the experimental heating rate sorted
in ascendent order obtained from a linear regression of T vs t.
"""
return self.Beta
#-----------------------------------------------------------------------------------------------------------
def get_betaCC(self):
"""
Parameters: None
Returns: list object containing the experimental T vs t correlation coefficient
obtained from a linear regression, sorted in correspondance with the
heating rate list (attribute Beta).
"""
return self.BetaCC
#-----------------------------------------------------------------------------------------------------------
def get_DFlis(self):
"""
Parameters: None
Returns: list object containing the DataFrames with the experimental data, sorted
in correspondance with the heating rate list (attribute Beta).
"""
return self.DFlis
#-----------------------------------------------------------------------------------------------------------
def isoconversional(self):
"""
Isoconversional DataFrames building method for the Friedman, KAS, OFW and Vyazovkin methods.
The isoconversional values for T, t and da/dt are obtained by interpolation.
Parameters: None
Returns: None
Notes: This method asigns values to the attributes: TempIsoDF, timeIsoDF and diffIsoDF
"""
alpha = self.alpha
da_dt = self.da_dt
T = self.T
t = self.t
DFlis = self.DFlis
TempIsoDF = pd.DataFrame()
timeIsoDF = pd.DataFrame()
diffIsoDF = pd.DataFrame()
Beta = self.Beta
# Take the experimental data set with the less data points (alps), so the interpolation is made with the
# data sets with more experimental information.
# Create the interpolation functions and evaluate them over the conversion values of the latter set (alps).
# Create the isoconversional DataFrames with the conversion values (alps) as index and the
# interpolation values as columns corresponding to their experimental heating rates.
alps = np.array(alpha[-1])
TempIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(T[-1], decimals = 4)
timeIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(t[-1], decimals = 4)
diffIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(da_dt[-1], decimals = 4)
for i in range(len(Beta)-1):
inter_func = interp1d(alpha[i],
t[i],
kind='cubic',
bounds_error=False,
fill_value="extrapolate")
timeIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func(alps), decimals = 4)
inter_func2 = interp1d(alpha[i],
T[i],
kind='cubic',
bounds_error=False,
fill_value="extrapolate")
TempIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func2(alps), decimals = 4)
inter_func3 = interp1d(alpha[i],
da_dt[i],
kind='cubic',
bounds_error=False,
fill_value="extrapolate")
diffIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func3(alps), decimals = 4)
colnames = TempIsoDF.columns.tolist()
colnames = colnames[1:] + colnames[:1]
TempIsoDF.index = alpha[-1]
TempIsoDF = TempIsoDF[colnames]
timeIsoDF.index = alpha[-1]
timeIsoDF = timeIsoDF[colnames]
diffIsoDF.index = alpha[-1]
diffIsoDF = diffIsoDF[colnames]
self.TempIsoDF = TempIsoDF
self.timeIsoDF = timeIsoDF
self.diffIsoDF = diffIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_TempIsoDF(self):
"""
Parameters: None
Returns: DataFrame of isoconversional temperatures. The index is the set of conversion
values from the experiment with the less data points (which correspond to the
smallest heating rate). The columns are isoconversional temperatures, sorted in
heating rate ascendent order from left to right.
"""
return self.TempIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_timeIsoDF(self):
"""
Parameters: None
Returns: DataFrame of isoconversional times. The index is the set of conversion values
from the experiment with the less data points (which correspond to the smallest
heating rate). The columns are isoconversional times, sorted in heating rate
ascendent order from left to right.
"""
return self.timeIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_diffIsoDF(self):
"""
Parameters: None
Returns: DataFrame of isoconversional conversion rates. The index is the set of conversion
values from the experiment with the less data points (which correspond to the smallest
heating rate). The columns are isoconversional conversion rates, sorted in heating
rate ascendent order from left to right.
"""
return self.timeIsoDF
#-----------------------------------------------------------------------------------------------------------
def adv_isoconversional(self, method='points', N = 1000, d_a = 0.001):
"""
Isoconversional DataFrames building method for the advanced Vyazovkin method. The isoconversional
values for T and t are obtained by interpolation.
Parameters: method : String. Value can be either 'points' or 'interval'. ṕoints'is the
default value.
N : Int. Number of conversion points if the 'points' method is given.
1000 is the default value.
d_a : Float. Size of the interval between conversion values if the method
'interval' is given. 0.001 is the default value.
Returns: None
Notes: This method asigns values to the attributes: TempAdvIsoDF, timeAdvIsoDF and d_a
"""
TempAdvIsoDF = pd.DataFrame()
timeAdvIsoDF = pd.DataFrame()
Beta = self.Beta
alpha = self.alpha
T = self.T
t = self.t
# Evaluate which methd was given and create an array of conversion values (alps)
# Create interpolation functions and evaluate on the conversion values (alps)
# Create the isoconversional DataFrames with the conversion values (alps) as index and the
# interpolation values as columns corresponding to their experimental heating rates.
if method == 'points':
alps, d_a = np.linspace(alpha[-1][0],alpha[-1][-1],N,retstep=True)
elif method == 'interval':
alps = np.arange(alpha[-1][0],alpha[-1][-1],d_a)
else:
raise ValueError('Method not recognized')
for i in range(0,len(Beta)):
inter_func = interp1d(alpha[i],
T[i],
kind='cubic', bounds_error=False,
fill_value="extrapolate")
TempAdvIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func(alps), decimals = 4)
inter_func2 = interp1d(alpha[i],
t[i],
kind='cubic', bounds_error=False,
fill_value="extrapolate")
timeAdvIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func2(alps), decimals = 4)
timeAdvIsoDF.index = alps
TempAdvIsoDF.index = alps
self.d_a = d_a
self.TempAdvIsoDF = TempAdvIsoDF
self.timeAdvIsoDF = timeAdvIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_TempAdvIsoDF(self):
"""
Parameters: None
Returns: DataFrame of isoconversional temperatures for the advanced Vyazovkin method.
The index is a set of equidistant (attribute d_a) conversion values, with
initial and final points taken from the experiment with the less data points
(which correspond to the smallest heating rate). The columns are isoconversional
temperatures, sorted in heating rate ascendent order from left to right.
"""
return self.TempAdvIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_timeAdvIsoDF(self):
"""
Parameters: None
Returns: DataFrame of isoconversional times for the advanced Vyazovkin method.
The index is a set of equidistant (attribute d_a) conversion values, with
initial and final points taken from the experiment with the less data points
(which correspond to the smallest heating rate). The columns are isoconversional
times, sorted in heating rate ascendent order from left to right.
"""
return self.timeAdvIsoDF
#-----------------------------------------------------------------------------------------------------------
def get_alpha(self):
"""
Parameters: None
Returns: list object containing arrays of the conversion values in ascendent order.
The elements are sorted in correspondance with the heating rate list (attribute Beta).
"""
return self.alpha
#-----------------------------------------------------------------------------------------------------------
def get_dadt(self):
"""
Parameters: None
Returns: list object containing arrays of the conversion rates data corresponding
to the conversion values of each element in the attribute alpha. The elements
are sorted in correspondance with the heating rate list (attribute Beta).
"""
return self.da_dt
#-----------------------------------------------------------------------------------------------------------
def get_t(self):
"""
Parameters: None
Returns: list object containing arrays of the time data corresponding to the conversion
values of each element in the attribute alpha. The elements are sorted in
correspondance with the heating rate list (attribute Beta).
"""
return self.t
#-----------------------------------------------------------------------------------------------------------
def get_T(self):
"""
Parameters: None
Returns: list object containing arrays of the temperature data corresponding to the
conversion values of each element in the attribute alpha. The elements are
sorted in correspondance with the heating rate list (attribute Beta).
"""
return self.T
#-----------------------------------------------------------------------------------------------------------
def save_Ea(self, E_Fr= None, E_OFW=None, E_KAS=None, E_Vy=None, E_aVy=None, file_t="xlsx" ):
"""
Method to save activation energy values calculated with the ActivationEnergy class
Parameters: E_Fr : array of activation energies obtained by de Friedman method.
E_OFW : array of activation energies obtained by de OFW method.
E_KAS : array of activation energies obtained by de KAS method.
E_Vy : array of activation energies obtained by de Vyazovkin method.
E_aVy : array of activation energies obtained by de advanced Vyazovkin
method.
file_t : String. Type of file, can be 'csv' of 'xlsx'.
'xlsx' is the default value.
returns: If 'xlsx' is selected, a spreadsheet containg one sheet per experiment
containing the values of T, t, and da_dt, plus a sheet containing the
activation energies.
If 'csv' is selected, one 'csv' file per experiment, containing the
values of T, t, and da_dt, plus a sheet containing the activation energies.
"""
TempIsoDF = self.TempIsoDF
dflist = self.DFlis
Beta = self.Beta
da_dt = self.da_dt
T = self.T
t = self.t
DFreslis = []
for k in range(len(Beta)):
DF = pd.DataFrame([], columns=['time [min]',
'Temperature [K]',
'da_dt'])
DF['time [min]'] = t[k]
DF['Temperature [K]'] = T[k]
DF['da_dt']=da_dt[k]
DFreslis.append(DF)
alps = TempIsoDF.index.values
columns = ['alpha']
if np.any(E_OFW)!=None:
columns.append('OFW')
if np.any(E_KAS)!=None:
columns.append('KAS')
if np.any(E_Vy)!=None:
columns.append('Vyazovkin')
if np.any(E_aVy)!=None:
columns.append('adv.Vyazovkin')
DF_nrgy = pd.DataFrame([], columns = columns)
DF_nrgy['alpha'] = alps
if 'OFW' in columns:
DF_nrgy['OFW']=E_OFW
if 'KAS' in columns:
DF_nrgy['KAS'] = E_KAS
if 'Vyazovkin' in columns:
DF_nrgy['Vyazovkin'] = E_Vy
if 'adv.Vyazovkin' in columns:
DF_nrgy['adv.Vyazovkin'][0] = np.nan
DF_nrgy['adv.Vyazovkin'][1:] = E_aVy
if(dialect=='xlsx'):
nombre = 'Activation_energies_results.xlsx'
with pd.ExcelWriter(nombre) as writer:
for i in range(len(DFreslis)):
DFreslis[i].to_excel(writer,
sheet_name='B ='+ str(np.round(Beta[i],decimals=1)) + 'K_min',
index=False)
DF_nrgy.to_excel(writer, sheet_name='Act. Energies',index=False)
print("Workseet {} ".format(nombre))
elif(dialect=='csv'):
print("Saving csvs\n")
for i in range(len(Beta)):
nombre = 'HR={0:0.3}_K_per_min.csv'.format(Beta[i])
df = pd.DataFrame({'t':t[i],
'T':T[i],
'da_dt':da_dt[i]})
print("Saving {}".format(nombre))
df.to_csv(nombre, sep=',',index=False)
print("Saving activation energies")
DF_nrgy.to_csv('Activation_energies_results.csv',
encoding='utf8',
sep=',',
index=False)
else:
raise ValueError("File type not recognized")
#-----------------------------------------------------------------------------------------------------------
def get_avsT_plot(self):
"""
Visualization method for alpha vs T
Parameters: None
Returns: A matplotlib figure plotting conversion vs temperature for
each heating rate in attribute Beta.
"""
for i in range(len(self.DFlis)):
plt.plot(self.T[i],
self.alpha[i],
label=str(np.round(self.Beta[i],decimals=1))+' K/min')
plt.xlabel(self.DFlis[i].columns[3])
plt.ylabel(self.DFlis[i].columns[4])
plt.legend()
return plt.show()
#-----------------------------------------------------------------------------------------------------------
def get_dadtvsT_plot(self):
"""
Visualization method for da_dt vs T
Parameters: None
Returns: A matplotlib figure plotting conversion rate vs temperature
for each heating rate in attribute Beta.
"""
for i in range(len(self.DFlis)):
plt.plot(self.T[i],
self.da_dt[i],
label=str(np.round(self.Beta[i],decimals=1))+' K/min')
plt.xlabel(self.DFlis[i].columns[3])
plt.ylabel(self.DFlis[i].columns[5])
plt.legend()
return plt.show()
#-----------------------------------------------------------------------------------------------------------
def get_avst_plot(self):
"""
Visualization method for alpha vs t
Parameters: None
Returns: A matplotlib figure plotting conversion vs time for each
heating rate in attribute Beta.
"""
for i in range(len(self.DFlis)):
plt.plot(self.t[i],
self.alpha[i],
label=str(np.round(self.Beta[i],decimals=1))+' K/min')
plt.xlabel(self.DFlis[i].columns[0])
plt.ylabel(self.DFlis[i].columns[4])
plt.legend()
return plt.show()
#-----------------------------------------------------------------------------------------------------------
def get_dadtvst_plot(self):
"""
Visualization method for da_dt vs t
Parameters: None
Returns: A matplotlib figure plotting conversion rate vs time for
each heating rate in attribute Beta.
"""
for i in range(len(self.DFlis)):
plt.plot(self.t[i],
self.da_dt[i],
label=str(np.round(self.Beta[i],decimals=1))+' K/min')
plt.xlabel(self.DFlis[i].columns[0])
plt.ylabel(self.DFlis[i].columns[5])
plt.legend()
return plt.show()
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
class ActivationEnergy(object):
"""
Class that uses the attributes of Dataextraction to compute activation
energy values based on five methods: Friedman, FOW, KAS, Vyazovkin and
Advanced Vyazovkin.
"""
def __init__(self, Beta, TempIsoDF=None, diffIsoDF=None, TempAdvIsoDF=None, timeAdvIsoDF=None):
"""
Constructor. Defines variables and the constant R=8.314 J/(mol K)
Parameters: Beta : list object containing the values of heating
rate for each experiment.
TempIsoDF : pandas DataFrame containing the isoconversional
temperatures.
diffIsoDF : pandas DataFrame containing the isoconversional
derivatives of conversion in respest to time (da_dt).
TempAdvIsoDF : pandas DataFrame containing the isoconversional
temperatures, corresponding to evenly spaced values
of conversion.
timeAdvIsoDF : pandas DataFrame containing the isoconversional
times, corresponding to evenly spaced values of
conversion.
"""
self.E_Fr = []
self.E_OFW = []
self.E_KAS = []
self.E_Vy = []
self.E_aVy = []
self.Beta = Beta
self.logB = np.log(Beta)
self.TempIsoDF = TempIsoDF
self.diffIsoDF = diffIsoDF
self.TempAdvIsoDF = TempAdvIsoDF
self.timeAdvIsoDF = timeAdvIsoDF
"""
Universal gas constant
0.0083144626 kJ/(mol*K)
"""
self.R = 0.0083144626
#-----------------------------------------------------------------------------------------------------------
def Fr(self):
"""
Method to compute the Activation Energy based on the Friedman treatment.
Parameters: None
Returns: Tuple of arrays:
E_Fr : numpy array containing the activation energy values
obtained by the Friedman method.
Fr_95e : numpy array containing the 95% trust interval values
obtained by the linear regression in the Friedman method.
Fr_b : numpy array containing the intersection values obtained
by the linear regression in the Friedman method.
"""
E_Fr = []
E_Fr_err = []
Fr_b = []
diffIsoDF = self.diffIsoDF
TempIsoDF = self.TempIsoDF
for i in range(0,diffIsoDF.shape[0]):
y = np.log(diffIsoDF.iloc[i].values)
x = 1/(TempIsoDF.iloc[i].values)
tinv = lambda p, df: abs(t.ppf(p/2, df))
ts = tinv(0.05, len(x)-2)
LR = linregress(x,y)
E_a_i = -(self.R)*(LR.slope)
error = -(self.R)*(LR.stderr)
Fr_b.append(LR.intercept)
E_Fr_err.append(ts*error)
E_Fr.append(E_a_i)
self.E_Fr = np.array(E_Fr)
self.Fr_95e = np.array(E_Fr_err)
self.Fr_b = np.array(Fr_b)
return (self.E_Fr, self.Fr_95e, self.Fr_b)
#-----------------------------------------------------------------------------------------------------------
def OFW(self):
"""
Method to compute the Activation Energy based on the Osawa-Flynn-Wall
(OFW) treatment.
Parameters: None
Returns : Tuple of arrays:
E_OFW : numpy array containing the activation energy values
obtained by the Ozawa_Flynn-Wall method
OFW_95e : numpy array containing the 95% trust interval values
obtained by the linear regression in the
Ozawa-Flynn-Wall method
"""
logB = self.logB
E_OFW = []
E_OFW_err = []
TempIsoDF = self.TempIsoDF
for i in range(TempIsoDF.shape[0]):
y = (logB)
x = 1/(TempIsoDF.iloc[i].values)
tinv = lambda p, df: abs(t.ppf(p/2, df))
ts = tinv(0.05, len(x)-2)
LR = linregress(x,y)
E_a_i = -(self.R/1.052)*(LR.slope)
error = -(self.R/1.052)*(LR.stderr)
E_OFW_err.append(ts*error)
E_OFW.append(E_a_i)
self.E_OFW = np.array(E_OFW)
self.OFW_95e = np.array(E_OFW_err)
return self.E_OFW, self.OFW_95e
#-----------------------------------------------------------------------------------------------------------
def KAS(self):
"""
Method to compute the Activation Energy based on the Kissinger-Akahira-Sunose
(KAS) treatment.
Parameters: None
Returns : Tuple of arrays:
E_KAS : numpy array containing the activation energy values
obtained by the Kissinger-Akahra-Sunose method.
KAS_95e : numpy array containing the 95% trust interval values
obtained by the linear regression in the
Kissinger-Akahra-Sunose method
"""
logB = self.logB
E_KAS = []
E_KAS_err = []
TempIsoDF = self.TempIsoDF
for i in range(TempIsoDF.shape[0]):
y = (logB)- np.log((TempIsoDF.iloc[i].values)**1.92)
x = 1/(TempIsoDF.iloc[i].values)
tinv = lambda p, df: abs(t.ppf(p/2, df))
ts = tinv(0.05, len(x)-2)
LR = linregress(x,y)
E_a_i = -(self.R)*(LR.slope)
error = -(self.R)*(LR.stderr)
E_KAS_err.append(ts*error)
E_KAS.append(E_a_i)
self.E_KAS = np.array(E_KAS)
self.KAS_95e = np.array(E_KAS_err)
return self.E_KAS, self.KAS_95e
#-----------------------------------------------------------------------------------------------------------
def omega(self,E,row,Beta,method = 'senum-yang'):
"""
Method to calculate the function to minimize for the Vyazovkin method:
\Omega(Ea) = \sum_{i}^{n}\sum_{j}^{n-1}{[B_{j}{I(E,T_{i})]}/[B_{i}{I(E,T_{j})}]}
Parameters: E : The activation energy value used to calculate
the value of omega.
row : index value for the row of conversion in the
pandas DataFrame containing the isoconversional
temperatures.
Beta : list object containing the heatibg rate values
for each experiment.
method : Method to compute the integral temperature.
The available methods are: 'senum-yang' for
the Senum-Yang approximation, 'trapeoid' for
the the trapezoid rule of numerical integration,
and 'quad' for using a technique from the Fortran
library QUADPACK implemented in the scipy.integrate
subpackage.
Returns: O : Float. Value of the omega function for the given E.
"""
#Define integration limits
IsoDF = self.TempIsoDF
T0 = IsoDF.iloc[0].values
T = IsoDF.iloc[row].values
#Senum-Yang approximation
def senum_yang(E):
x = E/(self.R*T)
num = x**3 + 18*(x**2) + 88*x + 96
den = x**4 + 20*(x**3) + 120*(x**2) +240*x +120
s_y = ((np.exp(-x))/x)*(num/den)
return (E/self.R)*s_y
#Trapezoid rule implemented from scipy
def trapezoid(E):
x0 = T0
y0 = np.exp(-E/(self.R*x0))
xf = T
yf = np.exp(-E/(self.R*xf))
tpz = []
for i in range(len(T)):
tpz.append(integrate.trapezoid([y0[i],yf[i]],
[x0[i],xf[i]]))
return np.array(tpz)
#QUAD function implemented from scipy
def quad(E):
def integral(x,E):
return np.exp(-E/(self.R*x))
quad = []
for i in range(len(T)):
quad.append(integrate.quad(integral,
T0[i],
T[i],
args=(E))[0])
return np.array(quad)
omega_i = []
if method == 'senum-yang':
p = senum_yang(E)
p_B = p/Beta
for j in range(len(Beta)):
y = p_B[j]*((np.sum(1/(p_B)))-(1/p_B[j]))
omega_i.append(y)
return np.sum((omega_i))
elif method == 'trapezoid':
p = trapezoid(E)
p_B = p/Beta
for j in range(len(Beta)):
y = p_B[j]*((np.sum(1/(p_B)))-(1/p_B[j]))
omega_i.append(y)
return np.sum((omega_i))
elif method == 'quad':
p = quad(E)
p_B = p/Beta
for j in range(len(Beta)):
y = p_B[j]*((np.sum(1/(p_B)))-(1/p_B[j]))
omega_i.append(y)
return np.sum((omega_i))
else:
raise ValueError('method not recognized')
#-----------------------------------------------------------------------------------------------------------
def visualize_omega(self,row,bounds=(1,300),N=1000,method = 'senum-yang'):
"""
Method to visualize omega function:
Parameters: row : Int object. Implicit index for the row of conversion in
the pandas DataFrame containing the isoconversional
temperatures.
bounds : Tuple object containing the lower and upper limit values
for E, to evaluate omega.
N : Int. Number of points in the E array for the plot.
method : Method to evaluate the temperature integral. The available
methods are: 'senum-yang' for the Senum-Yang approximation,
'trapezoid' for the the trapezoid rule of numerical integration,
and 'quad' for using a technique from the Fortran library
QUADPACK implemented in the scipy.integrate subpackage.
Returns: A matplotlib figure plotting omega vs E.
"""
IsoDF = self.TempIsoDF
method = method
E = np.linspace(bounds[0], bounds[1], N)
O = np.array([float(self.omega(E[i],row,self.Beta,method)) for i in range(len(E))])
plt.style.use('seaborn')
plt.plot(E,O,color='teal',label=r'$\alpha$ = '+str(np.round(IsoDF.index[row],decimals=3)))
plt.ylabel(r'$\Omega\left(E_{\alpha}\right)$')
plt.xlabel(r'$E_{\alpha}$')
plt.legend()
return plt.show()
#-----------------------------------------------------------------------------------------------------------
def Vy(self, bounds, method='senum-yang'):
"""
Method to compute the Activation Energy based on the Vyazovkin treatment.
Parameters: bounds : Tuple object containing the lower and upper limit values
for E, to evaluate omega.
method : Method to evaluate the temperature integral. The available
methods are: 'senum-yang' for the Senum-Yang approximation,
'trapezoid' for the the trapezoid rule of numerical integration,
and 'quad' for using a technique from the Fortran library
QUADPACK implemented in the scipy.integrate subpackage.
Returns: E_Vy : numpy array containing the activation energy values
obtained by the Vyazovkin method.
"""
E_Vy = []
Beta = self.Beta
IsoDF = self.TempIsoDF
for k in range(len(IsoDF.index)):
E_Vy.append(minimize_scalar(self.omega, args=(k,Beta,method),bounds=bounds, method = 'bounded').x)
self.E_Vy = np.array(E_Vy)
return self.E_Vy
#-----------------------------------------------------------------------------------------------------------
def variance_Vy(self, E, row_i, method = 'senum-yang'):
"""
Method to calculate the variance of the activation energy E obtained with the Vyazovkin
treatment. The variance is computed as:
S^{2}(E) = {1}/{n(n-1)}\sum_{i}^{n}\sum_{j}^{n-1}{[{J(E,T_{i})]}/[{J(E,T_{j})}]-1}^{2}
Parameters: E : The activation energy value used to calculate
the value of omega.
row_i : index value for the row of conversion in the
pandas DataFrame containing the isoconversional
temperatures.
method : Method to compute the integral temperature.
The available methods are: 'senum-yang' for
the Senum-Yang approximation, 'trapezoid' for
the the trapezoid rule of numerical integration,
and 'quad' for using a technique from the Fortran
library QUADPACK implemented in the scipy.integrate
subpackage.
Returns: Float object. Value of the variance associated to a given E.
--------------------------------------------------------------------------------------------
Reference: <NAME>., & <NAME>. (2000). Estimating realistic confidence intervals
for the activation energy determined from thermoanalytical measurements.
Analytical chemistry, 72(14), 3171-3175.
"""
N = len(self.Beta)*(len(self.Beta)-1)
T0 = self.TempIsoDF.iloc[0].values
T = self.TempIsoDF.iloc[row_i].values
def senum_yang(E):
x = E/(0.008314*T)
num = x**3 + 18*(x**2) + 88*x + 96
den = x**4 + 20*(x**3) + 120*(x**2) +240*x +120
s_y = ((np.exp(-x))/x)*(num/den)
return (E/0.008314)*s_y
def trapezoid(E):
x0 = T0
y0 = np.exp(-E/(0.008314*x0))
xf = T
yf = np.exp(-E/(0.008314*xf))
tpz = [integrate.trapezoid([y0[i],yf[i]],
[x0[i],xf[i]])
for i in range(len(T))]
return np.array(tpz)
def quad(E):
def integral(x,E):
return np.exp(-E/(0.008314*x))
quad = [integrate.quad(integral,
T0[i],
T[i],
args=(E))[0]
for i in range(len(T))]
return np.array(quad)
if method == 'senum-yang':
J = senum_yang(E)
s = np.array([J[i]/J for i in range(len(J))])
return np.sum((s-1)**2)/N
elif method == 'trapezoid':
J = trapezoid(E)
s = np.array([J[i]/J for i in range(len(J))])
return np.sum((s-1)**2)/N
elif method == 'quad':
J = quad(E)
s = np.array([J[i]/J for i in range(len(J))])
return np.sum((s-1)**2)/N
else:
raise ValueError('method not recognized')
#-----------------------------------------------------------------------------------------------------------
def psi_Vy(self, E, row_i, bounds, method = 'senum-yang'):
"""
Method to calculate the F distribution to minimize for the Vyazovkin method.
The distribution is computed as:
\Psi(E) = S^{2}(E)/S^{2}_{min}
Parameters: E : The activation energy value used to calculate
the value of omega.
row_i : index value for the row of conversion in the
pandas DataFrame containing the isoconversional
temperatures.
bounds : Tuple object containing the lower and upper limit values
for E, to evaluate the variance.
method : Method to compute the integral temperature.
The available methods are: 'senum-yang' for
the Senum-Yang approximation, 'trapezoid' for
the the trapezoid rule of numerical integration,
and 'quad' for using a technique from the Fortran
library QUADPACK implemented in the scipy.integrate
subpackage.
Returns: Psi : Float. Value of the distribution function that sets the lower
and upper confidence limits for E.
--------------------------------------------------------------------------------------------
Reference: <NAME>., & <NAME>. (2000). Estimating realistic confidence intervals
for the activation energy determined from thermoanalytical measurements.
Analytical chemistry, 72(14), 3171-3175.
"""
F = [161.4, 19, 9.277, 6.388, 5.050, 4.284, 3.787, 3.438, 3.179]
f = F[len(self.Beta)-2]
method = method
E_min = minimize_scalar(self.variance_Vy,
bounds=bounds,
args=(row_i,
method),
method = 'bounded').x
s_min = self.variance_Vy(E_min, row_i, method)
s = self.variance_Vy(E, row_i, method)
return (s/s_min) - (f+1)
#-----------------------------------------------------------------------------------------------------------
def er_Vy(self, E, row_i, bounds, method = 'senum-yang'):
"""
Method to compute the error associated to a given activation energy value obtained
by the vyazovkin method.
Parameters: E : The activation energy value used to calculate
the value of omega.
row_i : index value for the row of conversion in the
pandas DataFrame containing the isoconversional
temperatures.
bounds : Tuple object containing the lower and upper limit values
for E, to evaluate omega.
method : Method to compute the integral temperature.
The available methods are: 'senum-yang' for
the Senum-Yang approximation, 'trapezoid' for
the the trapezoid rule of numerical integration,
and 'quad' for using a technique from the Fortran
library QUADPACK implemented in the scipy.integrate
subpackage.
Returns: error : Float. Value of the error associated to a given E.
"""
method = method
E_p = np.linspace(5,80,50)
P = np.array([self.psi_Vy(E_p[i],row_i, bounds, method) for i in range(len(E_p))])
inter_func = interp1d(E_p,
P,
kind='cubic',
bounds_error=False,
fill_value="extrapolate")
zeros = np.array([fsolve(inter_func, E-150)[0],
fsolve(inter_func, E+150)[0]])
error = np.mean(np.array([abs(E-zeros[0]), abs(E-zeros[1])]))
return error
#-----------------------------------------------------------------------------------------------------------
def error_Vy(self, bounds, method = 'senum-yang'):
"""
Method to calculate the distribution to minimize for the Vyazovkin method.
Parameters: bounds : Tuple object containing the lower and upper limit values
for E, to evaluate omega.
method : Method to compute the integral temperature. The available
methods are: 'senum-yang' for the Senum-Yang approximation,
'trapezoid' for the the trapezoid rule of numerical integration,
and 'quad' for using a technique from the Fortran library
QUADPACK implemented in the scipy.integrate subpackage.
Returns: error_Vy : Array of error values associated to the array of activation
energies obtained by the Vyazovkin method.
"""
bounds = bounds
method = method
error_Vy = np.array([self.er_Vy(self.E_Vy[i], i, bounds, method=method) for i in range(len(self.E_Vy))])
self.error_Vy = error_Vy
return self.error_Vy
#-----------------------------------------------------------------------------------------------------------
def J_Temp(self, E, inf, sup):
"""
Temperature integral for the Advanced Vyazovkin Treatment.
Prameters: E : Float object. Value for the activation energy to evaluate the integral
inf : Inferior integral evaluation limit.
sup : Superior integral evaluation limit.
Returns: J : Float. Value of the integral obtained by an analytic expression. Based
on a linear heating rate.
"""
a = E/(self.R)
b = inf
c = sup
J = a*(sp.expi(-a/c)-sp.expi(-a/b)) + c*np.exp(-a/c) - b*np.exp(-a/b)
return J
#-----------------------------------------------------------------------------------------------------------
def J_time(self, E, row_i, col_i, T0, method = 'trapezoid'):
"""
Time integral for the Advanced Vyazovkin Treatment. Considering a linear heating rate.
Prameters: E : Float object. Value for the activation energy to evaluate the
integral
row_i : Index value for the row of conversion in the pandas DataFrame
containing the isoconversional times for evenly spaced conversion
values.
col_i : Index value for the column of heating rate in the pandas DataFrame
containing the isoconversional times for evenly spaced conversion
values.
T0 : Float. Initial temperature. Must be that corresponding to the
experimental heating rate B.
method : Numerical integration method. Can be 'trapezoid', 'simpson' or 'quad'.
The method correspond to those implemented in the scipy.integrate
subpackage.
Returns: J_t : Float. Value of the integral obtained by a numerical integration method.
"""
timeAdvIsoDF = self.timeAdvIsoDF
B = self.Beta[col_i]
t0 = timeAdvIsoDF[timeAdvIsoDF.columns[col_i]][timeAdvIsoDF.index.values[row_i]]
t = timeAdvIsoDF[timeAdvIsoDF.columns[col_i]][timeAdvIsoDF.index.values[row_i+1]]
y0 = np.exp(-E/(self.R*(T0+B*t0)))
y = np.exp(-E/(self.R*(T0+B*t)))
if method == 'trapezoid':
J_t = integrate.trapezoid(y=[y0,y],x=[t0,t])
return J_t
elif method == 'simpson':
J_t = integrate.simpson(y=[y0,y],x=[t0,t])
return J_t
elif method == 'quad':
def time_int(t,T0,B,E):
return np.exp(-E/(self.R*(T0+B*t)))
J_t = integrate.quad(time_int,t0,t,args=(T0,B,E))[0]
return J_t
else:
raise ValueError('method not recognized')
#-----------------------------------------------------------------------------------------------------------
def adv_omega(self,E, row, var = 'time', method='trapezoid'):
"""
Function to minimize according to the advanced Vyazovkin treatment:
\Omega(Ea) = \sum_{i}^{n}\sum_{j}^{n-1}{[{J(E,T(t_{i}))]}/[B_{i}{J(E,T(t_{j}))}]}
Parameters: E : Float object. Value for the activation energy to evaluate
the integral
row : Index value for the row of conversion in the pandas DataFrame
containing the isoconversional times for evenly spaced conversion
values.
var : The variable to perform the integral with, it can be either 'time'
or 'Temperature'
method : Numerical integration method. Can be 'trapezoid', 'simpson' or
'quad'. The method correspond to those implemented in the
scipy.integrate subpackage.
Returns: O : Float. Value of the advanced omega function for a given E.
"""
TempAdvIsoDF = self.TempAdvIsoDF
timeAdvIsoDF = self.timeAdvIsoDF
Beta = self.Beta
j = row
if var == 'Temperature':
I_x = np.array([self.J_Temp(E,
TempAdvIsoDF[TempAdvIsoDF.columns[i]][TempAdvIsoDF.index[j]],
TempAdvIsoDF[TempAdvIsoDF.columns[i]][TempAdvIsoDF.index[j+1]])
for i in range(len(TempAdvIsoDF.columns))])
I_B = I_x/Beta
omega_i = np.array([I_B[k]*((np.sum(1/(I_B)))-(1/I_B[k])) for k in range(len(Beta))])
O = np.array(np.sum((omega_i)))
return O
elif var == 'time':
I_B = np.array([self.J_time(E,
row,
i,
TempAdvIsoDF.iloc[0][i],
method)
for i in range(len(timeAdvIsoDF.columns))])
omega_i = np.array([I_B[k]*((np.sum(1/(I_B)))-(1/I_B[k])) for k in range(len(Beta))])
O = np.array(np.sum((omega_i)))
return O
#-----------------------------------------------------------------------------------------------------------
def visualize_advomega(self,row,var='time',bounds=(1,300),N=1000, method='trapezoid'):
"""
Method to visualize adv_omega function.
Parameters: row : Index value for the row of conversion in the pandas DataFrame
containing the isoconversional times or temperatures.
var : The variable to perform the integral with, it can be either
'time' or 'Temperature'. Default 'time'.
bounds : Tuple object containing the lower limit and the upper limit values
of E, for evaluating adv_omega. Default (1,300).
N : Int. Number of points in the E array for the plot. Default 1000.
method : Numerical integration method. Can be 'trapezoid', 'simpson'
or 'quad'. The method correspond to those implemented in
the scipy.integrate subpackage. Default 'trapezoid'.
Returns: A matplotlib plot of adv_omega vs E
"""
TempAdvIsoDF = self.TempAdvIsoDF
timeAdvIsoDF = self.timeAdvIsoDF
Beta = self.Beta
E = np.linspace(bounds[0], bounds[1], N)
O = np.array([float(self.adv_omega(E[i],row,var,method)) for i in range(len(E))])
plt.style.use('seaborn')
plt.plot(E,O,color='teal',label=r'$\alpha$ = '+str(np.round(timeAdvIsoDF.index[row],decimals=3)))
plt.ylabel(r'$\Omega\left(E_{\alpha}\right)$')
plt.xlabel(r'$E_{\alpha}$')
plt.legend()
return plt.show()
#-----------------------------------------------------------------------------------------------------------
def aVy(self,bounds, var='time', method='trapezoid'):
"""
Method to compute the Activation Energy based on the Advanced Vyazovkin treatment.
Parameters: bounds : Tuple object containing the lower limit and the upper
limit values of E, for evaluating omega.
T : List object containing the experimental temperatures.
Must be those corresponding to the experimental heating
rate.
var : The variable to perform the integral with, it can be either
'time' or 'Temperature'
method : Numerical integration method. Can be 'trapezoid', 'simpson'
or 'quad'. The method correspond to those implemented in
the scipy.integrate subpackage. Default 'trapezoid'.
Returns: E_Vy : numpy array containing the activation energy values
obtained by the Vyazovkin method.
"""
TempAdvIsoDF = self.TempAdvIsoDF
timeAdvIsoDF = self.timeAdvIsoDF
Beta = self.Beta
E_aVy = [minimize_scalar(self.adv_omega,bounds=bounds,args=(k,var,method), method = 'bounded').x
for k in range(len(timeAdvIsoDF.index)-1)]
self.E_aVy = np.array(E_aVy)
return self.E_aVy
#-----------------------------------------------------------------------------------------------------------
def variance_aVy(self, E, row_i, var = 'time', method = 'trapezoid'):
"""
Method to calculate the variance of the activation energy E obtained with the Vyazovkin
treatment. The variance is computed as:
S^{2}(E) = {1}/{n(n-1)}\sum_{i}^{n}\sum_{j}^{n-1}{[{J(E,T(t_{i}))]}/[{J(E,T(t_{j}))}]-1}^{2}
Parameters: E : The activation energy value used to calculate
the value of omega.
row_i : index value for the row of conversion in the
pandas DataFrame containing the isoconversional
temperatures.
var : The variable to perform the integral with, it can be either
'time' or 'Temperature'
method : Numerical integration method. Can be 'trapezoid', 'simpson'
or 'quad'. The method correspond to those implemented in
the scipy.integrate subpackage. Default 'trapezoid'.
Returns: Float object. Value of the variance associated to a given E.
--------------------------------------------------------------------------------------------
Reference: <NAME>., & <NAME>. (2000). Estimating realistic confidence intervals
for the activation energy determined from thermoanalytical measurements.
Analytical chemistry, 72(14), 3171-3175.
"""
N = len(self.Beta)*(len(self.Beta)-1)
if var == 'time':
inf = self.timeAdvIsoDF.index.values[row_i]
sup = self.timeAdvIsoDF.index.values[row_i+1]
T0 = self.TempIsoDF.iloc[0]
J = np.array([self.J_time(E, row_i, i, T0[i], method) for i in range(len(self.Beta))])
s = np.array([J[i]/J for i in range(len(J))])
return np.sum((s-1)**2)/N
elif var == 'Temperature':
inf = self.TempAdvIsoDF.index.values[row_i]
sup = self.TempAdvIsoDF.index.values[row_i+1]
J = [self.J_Temp(E,
self.TempAdvIsoDF[self.TempAdvIsoDF.columns[i]][inf],
self.TempAdvIsoDF[self.TempAdvIsoDF.columns[i]][sup])
for i in range(len(self.Beta))]
s = np.array([J[i]/J for i in range(len(J))])
return np.sum((s-1)**2)/N
else:
raise ValueError('variable not valid')
#-----------------------------------------------------------------------------------------------------------
def psi_aVy(self, E, row_i, bounds, var = 'time', method = 'trapezoid'):
"""
Method to calculate the F distribution to minimize for the Vyazovkin method.
The distribution is computed as:
\Psi(E) = S^{2}(E)/S^{2}_{min}
Parameters: E : The activation energy value used to calculate
the value of omega.
row_i : index value for the row of conversion in the
pandas DataFrame containing the isoconversional
temperatures.
bounds : Tuple object containing the lower and upper limit values
for E, to evaluate the variance.
var : The variable to perform the integral with, it can be either
'time' or 'Temperature'
method : Numerical integration method. Can be 'trapezoid', 'simpson'
or 'quad'. The method correspond to those implemented in
the scipy.integrate subpackage. Default 'trapezoid'.
Returns: Psi : Float. Value of the distribution function that sets the lower
and upper confidence limits for E.
--------------------------------------------------------------------------------------------
Reference: <NAME>., & <NAME>. (2000). Estimating realistic confidence intervals
for the activation energy determined from thermoanalytical measurements.
Analytical chemistry, 72(14), 3171-3175.
"""
F = [161.4, 19, 9.277, 6.388, 5.050, 4.284, 3.787, 3.438, 3.179]
f = F[len(self.Beta)-2]
var = var
method = method
E_min = minimize_scalar(self.variance_aVy,
bounds=bounds,
args=(row_i,
var,
method),
method = 'bounded').x
s_min = self.variance_aVy(E_min, row_i, var, method)
s = self.variance_aVy(E, row_i, var, method)
return (s/s_min) - (f+1)
#-----------------------------------------------------------------------------------------------------------
def er_aVy(self, E, row_i, bounds, var = 'time', method = 'trapezoid'):
"""
Method to compute the error associated to a given activation energy value obtained
by the vyazovkin method.
Parameters: E : The activation energy value used to calculate
the value of omega.
row_i : index value for the row of conversion in the
pandas DataFrame containing the isoconversional
temperatures.
bounds : Tuple object containing the lower and upper limit values
for E, to evaluate adv_omega.
var : The variable to perform the integral with, it can be either
'time' or 'Temperature'
method : Numerical integration method. Can be 'trapezoid', 'simpson'
or 'quad'. The method correspond to those implemented in
the scipy.integrate subpackage. Default 'trapezoid'.
Returns: error : Float. Value of the error associated to a given E.
"""
var = var
method = method
E_p = np.linspace(5,80,50)
P = np.array([self.psi_aVy(E_p[i],row_i, bounds, var, method) for i in range(len(E_p))])
inter_func = interp1d(E_p,
P,
kind='cubic',
bounds_error=False,
fill_value="extrapolate")
zeros = np.array([fsolve(inter_func, E-150)[0],
fsolve(inter_func, E+150)[0]])
error = np.mean(np.array([abs(E-zeros[0]), abs(E-zeros[1])]))
return error
#-----------------------------------------------------------------------------------------------------------
def error_aVy(self, bounds, var = 'time', method = 'trapezoid'):
"""
Method to calculate the distribution to minimize for the Vyazovkin method.
Parameters: bounds : Tuple object containing the lower and upper limit values
for E, to evaluate adv_omega.
var : The variable to perform the integral with, it can be either
'time' or 'Temperature'
method : Numerical integration method. Can be 'trapezoid', 'simpson'
or 'quad'. The method correspond to those implemented in
the scipy.integrate subpackage. Default 'trapezoid'.
Returns: error_aVy : Array of error values associated to the array of activation
energies obtained by the Vyazovkin method.
"""
bounds = bounds
var = var
method = method
error_aVy = np.array([self.er_aVy(self.E_aVy[i], i, bounds, var=var, method=method) for i in range(len(self.E_aVy))])
self.error_aVy = error_aVy
return self.error_aVy
#-----------------------------------------------------------------------------------------------------------
def prediction(self, E = None, B = 1, T0 = 298.15, Tf=1298.15):
"""
Method to calculate a kinetic prediction, based on an isoconversional
activation energy
Parameters: E : numpy array of the activation energy values to use for
the prediction.
B : Float. Value of the heating rate for the prediction.
T0 : Float. Initial temperature, in Kelvin, for the prediction.
Tf : Float. Final temperature, in Kelvin, for the prediction.
Returns: a : numpy array containing the predicted conversion values.
T : numpy array cppntaining the temperature values corresponding
to the predicted conversion.
t : numpy array cppntaining the time values corresponding to the
predicted conversion.
"""
b = np.exp(self.Fr_b)
a_pred = [0]
T = np.linspace(T0,Tf,len(b))
t = (T-T0)/B
dt = t[1]-t[0]
for i in range(len(b)-1):
a = a_pred[i] + b[i]*np.exp(-(E[i]/(self.R*(T0+B*t[i]))))*dt
a_pred.append(a)
a_pred = np.array(a_pred)
self.a_pred = a_pred
return (self.a_pred, T, t)
| [
"scipy.stats.linregress",
"matplotlib.pyplot.ylabel",
"numpy.log",
"scipy.interpolate.interp1d",
"scipy.integrate.trapezoid",
"numpy.array",
"pandas.ExcelWriter",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.exp",
"scipy.integrate.simpson",
"numpy.linspac... | [((1360, 1374), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1372, 1374), True, 'import pandas as pd\n'), ((1445, 1459), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1457, 1459), True, 'import pandas as pd\n'), ((1523, 1537), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1535, 1537), True, 'import pandas as pd\n'), ((1612, 1626), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1624, 1626), True, 'import pandas as pd\n'), ((1706, 1720), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1718, 1720), True, 'import pandas as pd\n'), ((8289, 8303), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8301, 8303), True, 'import pandas as pd\n'), ((8327, 8341), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8339, 8341), True, 'import pandas as pd\n'), ((8366, 8380), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8378, 8380), True, 'import pandas as pd\n'), ((8908, 8927), 'numpy.array', 'np.array', (['alpha[-1]'], {}), '(alpha[-1])\n', (8916, 8927), True, 'import numpy as np\n'), ((9005, 9032), 'numpy.round', 'np.round', (['T[-1]'], {'decimals': '(4)'}), '(T[-1], decimals=4)\n', (9013, 9032), True, 'import numpy as np\n'), ((9111, 9138), 'numpy.round', 'np.round', (['t[-1]'], {'decimals': '(4)'}), '(t[-1], decimals=4)\n', (9119, 9138), True, 'import numpy as np\n'), ((9225, 9256), 'numpy.round', 'np.round', (['da_dt[-1]'], {'decimals': '(4)'}), '(da_dt[-1], decimals=4)\n', (9233, 9256), True, 'import numpy as np\n'), ((13783, 13797), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13795, 13797), True, 'import pandas as pd\n'), ((13823, 13837), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13835, 13837), True, 'import pandas as pd\n'), ((21229, 21262), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': 'columns'}), '([], columns=columns)\n', (21241, 21262), True, 'import pandas as pd\n'), ((23597, 23607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23605, 23607), True, 'import matplotlib.pyplot as plt\n'), ((24318, 24328), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24326, 24328), True, 'import matplotlib.pyplot as plt\n'), ((25025, 25035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25033, 25035), True, 'import matplotlib.pyplot as plt\n'), ((25739, 25749), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25747, 25749), True, 'import matplotlib.pyplot as plt\n'), ((27640, 27652), 'numpy.log', 'np.log', (['Beta'], {}), '(Beta)\n', (27646, 27652), True, 'import numpy as np\n'), ((29394, 29408), 'numpy.array', 'np.array', (['E_Fr'], {}), '(E_Fr)\n', (29402, 29408), True, 'import numpy as np\n'), ((29431, 29449), 'numpy.array', 'np.array', (['E_Fr_err'], {}), '(E_Fr_err)\n', (29439, 29449), True, 'import numpy as np\n'), ((29472, 29486), 'numpy.array', 'np.array', (['Fr_b'], {}), '(Fr_b)\n', (29480, 29486), True, 'import numpy as np\n'), ((30811, 30826), 'numpy.array', 'np.array', (['E_OFW'], {}), '(E_OFW)\n', (30819, 30826), True, 'import numpy as np\n'), ((30850, 30869), 'numpy.array', 'np.array', (['E_OFW_err'], {}), '(E_OFW_err)\n', (30858, 30869), True, 'import numpy as np\n'), ((32240, 32255), 'numpy.array', 'np.array', (['E_KAS'], {}), '(E_KAS)\n', (32248, 32255), True, 'import numpy as np\n'), ((32279, 32298), 'numpy.array', 'np.array', (['E_KAS_err'], {}), '(E_KAS_err)\n', (32287, 32298), True, 'import numpy as np\n'), ((37337, 37373), 'numpy.linspace', 'np.linspace', (['bounds[0]', 'bounds[1]', 'N'], {}), '(bounds[0], bounds[1], N)\n', (37348, 37373), True, 'import numpy as np\n'), ((37474, 37498), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (37487, 37498), True, 'import matplotlib.pyplot as plt\n'), ((37614, 37663), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Omega\\\\left(E_{\\\\alpha}\\\\right)$"""'], {}), "('$\\\\Omega\\\\left(E_{\\\\alpha}\\\\right)$')\n", (37624, 37663), True, 'import matplotlib.pyplot as plt\n'), ((37669, 37696), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$E_{\\\\alpha}$"""'], {}), "('$E_{\\\\alpha}$')\n", (37679, 37696), True, 'import matplotlib.pyplot as plt\n'), ((37705, 37717), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (37715, 37717), True, 'import matplotlib.pyplot as plt\n'), ((37734, 37744), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37742, 37744), True, 'import matplotlib.pyplot as plt\n'), ((39035, 39049), 'numpy.array', 'np.array', (['E_Vy'], {}), '(E_Vy)\n', (39043, 39049), True, 'import numpy as np\n'), ((46621, 46643), 'numpy.linspace', 'np.linspace', (['(5)', '(80)', '(50)'], {}), '(5, 80, 50)\n', (46632, 46643), True, 'import numpy as np\n'), ((46763, 46839), 'scipy.interpolate.interp1d', 'interp1d', (['E_p', 'P'], {'kind': '"""cubic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(E_p, P, kind='cubic', bounds_error=False, fill_value='extrapolate')\n", (46771, 46839), False, 'from scipy.interpolate import interp1d\n'), ((50990, 51027), 'numpy.exp', 'np.exp', (['(-E / (self.R * (T0 + B * t0)))'], {}), '(-E / (self.R * (T0 + B * t0)))\n', (50996, 51027), True, 'import numpy as np\n'), ((51033, 51069), 'numpy.exp', 'np.exp', (['(-E / (self.R * (T0 + B * t)))'], {}), '(-E / (self.R * (T0 + B * t)))\n', (51039, 51069), True, 'import numpy as np\n'), ((55443, 55479), 'numpy.linspace', 'np.linspace', (['bounds[0]', 'bounds[1]', 'N'], {}), '(bounds[0], bounds[1], N)\n', (55454, 55479), True, 'import numpy as np\n'), ((55578, 55602), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (55591, 55602), True, 'import matplotlib.pyplot as plt\n'), ((55717, 55766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Omega\\\\left(E_{\\\\alpha}\\\\right)$"""'], {}), "('$\\\\Omega\\\\left(E_{\\\\alpha}\\\\right)$')\n", (55727, 55766), True, 'import matplotlib.pyplot as plt\n'), ((55772, 55799), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$E_{\\\\alpha}$"""'], {}), "('$E_{\\\\alpha}$')\n", (55782, 55799), True, 'import matplotlib.pyplot as plt\n'), ((55808, 55820), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (55818, 55820), True, 'import matplotlib.pyplot as plt\n'), ((55841, 55851), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (55849, 55851), True, 'import matplotlib.pyplot as plt\n'), ((57398, 57413), 'numpy.array', 'np.array', (['E_aVy'], {}), '(E_aVy)\n', (57406, 57413), True, 'import numpy as np\n'), ((64123, 64145), 'numpy.linspace', 'np.linspace', (['(5)', '(80)', '(50)'], {}), '(5, 80, 50)\n', (64134, 64145), True, 'import numpy as np\n'), ((64271, 64347), 'scipy.interpolate.interp1d', 'interp1d', (['E_p', 'P'], {'kind': '"""cubic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(E_p, P, kind='cubic', bounds_error=False, fill_value='extrapolate')\n", (64279, 64347), False, 'from scipy.interpolate import interp1d\n'), ((67089, 67106), 'numpy.exp', 'np.exp', (['self.Fr_b'], {}), '(self.Fr_b)\n', (67095, 67106), True, 'import numpy as np\n'), ((67385, 67401), 'numpy.array', 'np.array', (['a_pred'], {}), '(a_pred)\n', (67393, 67401), True, 'import numpy as np\n'), ((4843, 4891), 'scipy.stats.linregress', 'linregress', (['DF[DF.columns[0]]', 'DF[DF.columns[3]]'], {}), '(DF[DF.columns[0]], DF[DF.columns[3]])\n', (4853, 4891), False, 'from scipy.stats import linregress\n'), ((9338, 9427), 'scipy.interpolate.interp1d', 'interp1d', (['alpha[i]', 't[i]'], {'kind': '"""cubic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(alpha[i], t[i], kind='cubic', bounds_error=False, fill_value=\n 'extrapolate')\n", (9346, 9427), False, 'from scipy.interpolate import interp1d\n'), ((9721, 9810), 'scipy.interpolate.interp1d', 'interp1d', (['alpha[i]', 'T[i]'], {'kind': '"""cubic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(alpha[i], T[i], kind='cubic', bounds_error=False, fill_value=\n 'extrapolate')\n", (9729, 9810), False, 'from scipy.interpolate import interp1d\n'), ((10110, 10203), 'scipy.interpolate.interp1d', 'interp1d', (['alpha[i]', 'da_dt[i]'], {'kind': '"""cubic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(alpha[i], da_dt[i], kind='cubic', bounds_error=False, fill_value=\n 'extrapolate')\n", (10118, 10203), False, 'from scipy.interpolate import interp1d\n'), ((14409, 14466), 'numpy.linspace', 'np.linspace', (['alpha[-1][0]', 'alpha[-1][-1]', 'N'], {'retstep': '(True)'}), '(alpha[-1][0], alpha[-1][-1], N, retstep=True)\n', (14420, 14466), True, 'import numpy as np\n'), ((14691, 14780), 'scipy.interpolate.interp1d', 'interp1d', (['alpha[i]', 'T[i]'], {'kind': '"""cubic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(alpha[i], T[i], kind='cubic', bounds_error=False, fill_value=\n 'extrapolate')\n", (14699, 14780), False, 'from scipy.interpolate import interp1d\n'), ((15030, 15119), 'scipy.interpolate.interp1d', 'interp1d', (['alpha[i]', 't[i]'], {'kind': '"""cubic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(alpha[i], t[i], kind='cubic', bounds_error=False, fill_value=\n 'extrapolate')\n", (15038, 15119), False, 'from scipy.interpolate import interp1d\n'), ((20566, 20634), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['time [min]', 'Temperature [K]', 'da_dt']"}), "([], columns=['time [min]', 'Temperature [K]', 'da_dt'])\n", (20578, 20634), True, 'import pandas as pd\n'), ((20943, 20956), 'numpy.any', 'np.any', (['E_OFW'], {}), '(E_OFW)\n', (20949, 20956), True, 'import numpy as np\n'), ((21009, 21022), 'numpy.any', 'np.any', (['E_KAS'], {}), '(E_KAS)\n', (21015, 21022), True, 'import numpy as np\n'), ((21075, 21087), 'numpy.any', 'np.any', (['E_Vy'], {}), '(E_Vy)\n', (21081, 21087), True, 'import numpy as np\n'), ((21146, 21159), 'numpy.any', 'np.any', (['E_aVy'], {}), '(E_aVy)\n', (21152, 21159), True, 'import numpy as np\n'), ((23471, 23507), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['self.DFlis[i].columns[3]'], {}), '(self.DFlis[i].columns[3])\n', (23481, 23507), True, 'import matplotlib.pyplot as plt\n'), ((23520, 23556), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.DFlis[i].columns[4]'], {}), '(self.DFlis[i].columns[4])\n', (23530, 23556), True, 'import matplotlib.pyplot as plt\n'), ((23569, 23581), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (23579, 23581), True, 'import matplotlib.pyplot as plt\n'), ((24192, 24228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['self.DFlis[i].columns[3]'], {}), '(self.DFlis[i].columns[3])\n', (24202, 24228), True, 'import matplotlib.pyplot as plt\n'), ((24241, 24277), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.DFlis[i].columns[5]'], {}), '(self.DFlis[i].columns[5])\n', (24251, 24277), True, 'import matplotlib.pyplot as plt\n'), ((24290, 24302), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (24300, 24302), True, 'import matplotlib.pyplot as plt\n'), ((24899, 24935), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['self.DFlis[i].columns[0]'], {}), '(self.DFlis[i].columns[0])\n', (24909, 24935), True, 'import matplotlib.pyplot as plt\n'), ((24948, 24984), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.DFlis[i].columns[4]'], {}), '(self.DFlis[i].columns[4])\n', (24958, 24984), True, 'import matplotlib.pyplot as plt\n'), ((24997, 25009), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (25007, 25009), True, 'import matplotlib.pyplot as plt\n'), ((25613, 25649), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['self.DFlis[i].columns[0]'], {}), '(self.DFlis[i].columns[0])\n', (25623, 25649), True, 'import matplotlib.pyplot as plt\n'), ((25662, 25698), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.DFlis[i].columns[5]'], {}), '(self.DFlis[i].columns[5])\n', (25672, 25698), True, 'import matplotlib.pyplot as plt\n'), ((25711, 25723), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (25721, 25723), True, 'import matplotlib.pyplot as plt\n'), ((28977, 29009), 'numpy.log', 'np.log', (['diffIsoDF.iloc[i].values'], {}), '(diffIsoDF.iloc[i].values)\n', (28983, 29009), True, 'import numpy as np\n'), ((29165, 29181), 'scipy.stats.linregress', 'linregress', (['x', 'y'], {}), '(x, y)\n', (29175, 29181), False, 'from scipy.stats import linregress\n'), ((30605, 30621), 'scipy.stats.linregress', 'linregress', (['x', 'y'], {}), '(x, y)\n', (30615, 30621), False, 'from scipy.stats import linregress\n'), ((32046, 32062), 'scipy.stats.linregress', 'linregress', (['x', 'y'], {}), '(x, y)\n', (32056, 32062), False, 'from scipy.stats import linregress\n'), ((34359, 34385), 'numpy.exp', 'np.exp', (['(-E / (self.R * x0))'], {}), '(-E / (self.R * x0))\n', (34365, 34385), True, 'import numpy as np\n'), ((34426, 34452), 'numpy.exp', 'np.exp', (['(-E / (self.R * xf))'], {}), '(-E / (self.R * xf))\n', (34432, 34452), True, 'import numpy as np\n'), ((34655, 34668), 'numpy.array', 'np.array', (['tpz'], {}), '(tpz)\n', (34663, 34668), True, 'import numpy as np\n'), ((35123, 35137), 'numpy.array', 'np.array', (['quad'], {}), '(quad)\n', (35131, 35137), True, 'import numpy as np\n'), ((35411, 35426), 'numpy.sum', 'np.sum', (['omega_i'], {}), '(omega_i)\n', (35417, 35426), True, 'import numpy as np\n'), ((41328, 41356), 'numpy.exp', 'np.exp', (['(-E / (0.008314 * x0))'], {}), '(-E / (0.008314 * x0))\n', (41334, 41356), True, 'import numpy as np\n'), ((41397, 41425), 'numpy.exp', 'np.exp', (['(-E / (0.008314 * xf))'], {}), '(-E / (0.008314 * xf))\n', (41403, 41425), True, 'import numpy as np\n'), ((41601, 41614), 'numpy.array', 'np.array', (['tpz'], {}), '(tpz)\n', (41609, 41614), True, 'import numpy as np\n'), ((42006, 42020), 'numpy.array', 'np.array', (['quad'], {}), '(quad)\n', (42014, 42020), True, 'import numpy as np\n'), ((44749, 44841), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['self.variance_Vy'], {'bounds': 'bounds', 'args': '(row_i, method)', 'method': '"""bounded"""'}), "(self.variance_Vy, bounds=bounds, args=(row_i, method),\n method='bounded')\n", (44764, 44841), False, 'from scipy.optimize import minimize_scalar\n'), ((51127, 51168), 'scipy.integrate.trapezoid', 'integrate.trapezoid', ([], {'y': '[y0, y]', 'x': '[t0, t]'}), '(y=[y0, y], x=[t0, t])\n', (51146, 51168), False, 'from scipy import integrate\n'), ((62256, 62354), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['self.variance_aVy'], {'bounds': 'bounds', 'args': '(row_i, var, method)', 'method': '"""bounded"""'}), "(self.variance_aVy, bounds=bounds, args=(row_i, var, method),\n method='bounded')\n", (62271, 62354), False, 'from scipy.optimize import minimize_scalar\n'), ((3861, 3909), 'pandas.read_table', 'pd.read_table', (['item'], {'sep': '"""\t"""', 'encoding': 'encoding'}), "(item, sep='\\t', encoding=encoding)\n", (3874, 3909), True, 'import pandas as pd\n'), ((4135, 4222), 'derivative.dxdt', 'derivative.dxdt', (["DF['$\\\\alpha$']", 'DF[DF.columns[0]]'], {'kind': '"""spline"""', 's': '(0.01)', 'order': '(5)'}), "(DF['$\\\\alpha$'], DF[DF.columns[0]], kind='spline', s=0.01,\n order=5)\n", (4150, 4222), False, 'import derivative\n'), ((6027, 6038), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (6035, 6038), True, 'import numpy as np\n'), ((6061, 6075), 'numpy.array', 'np.array', (['Temp'], {}), '(Temp)\n', (6069, 6075), True, 'import numpy as np\n'), ((6098, 6112), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (6106, 6112), True, 'import numpy as np\n'), ((6139, 6153), 'numpy.array', 'np.array', (['diff'], {}), '(diff)\n', (6147, 6153), True, 'import numpy as np\n'), ((14518, 14561), 'numpy.arange', 'np.arange', (['alpha[-1][0]', 'alpha[-1][-1]', 'd_a'], {}), '(alpha[-1][0], alpha[-1][-1], d_a)\n', (14527, 14561), True, 'import numpy as np\n'), ((21740, 21762), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['nombre'], {}), '(nombre)\n', (21754, 21762), True, 'import pandas as pd\n'), ((31850, 31890), 'numpy.log', 'np.log', (['(TempIsoDF.iloc[i].values ** 1.92)'], {}), '(TempIsoDF.iloc[i].values ** 1.92)\n', (31856, 31890), True, 'import numpy as np\n'), ((34799, 34824), 'numpy.exp', 'np.exp', (['(-E / (self.R * x))'], {}), '(-E / (self.R * x))\n', (34805, 34824), True, 'import numpy as np\n'), ((35670, 35685), 'numpy.sum', 'np.sum', (['omega_i'], {}), '(omega_i)\n', (35676, 35685), True, 'import numpy as np\n'), ((41444, 41495), 'scipy.integrate.trapezoid', 'integrate.trapezoid', (['[y0[i], yf[i]]', '[x0[i], xf[i]]'], {}), '([y0[i], yf[i]], [x0[i], xf[i]])\n', (41463, 41495), False, 'from scipy import integrate\n'), ((41712, 41739), 'numpy.exp', 'np.exp', (['(-E / (0.008314 * x))'], {}), '(-E / (0.008314 * x))\n', (41718, 41739), True, 'import numpy as np\n'), ((42197, 42217), 'numpy.sum', 'np.sum', (['((s - 1) ** 2)'], {}), '((s - 1) ** 2)\n', (42203, 42217), True, 'import numpy as np\n'), ((49280, 49294), 'numpy.exp', 'np.exp', (['(-a / b)'], {}), '(-a / b)\n', (49286, 49294), True, 'import numpy as np\n'), ((51254, 51293), 'scipy.integrate.simpson', 'integrate.simpson', ([], {'y': '[y0, y]', 'x': '[t0, t]'}), '(y=[y0, y], x=[t0, t])\n', (51271, 51293), False, 'from scipy import integrate\n'), ((53499, 53514), 'numpy.sum', 'np.sum', (['omega_i'], {}), '(omega_i)\n', (53505, 53514), True, 'import numpy as np\n'), ((57218, 57309), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['self.adv_omega'], {'bounds': 'bounds', 'args': '(k, var, method)', 'method': '"""bounded"""'}), "(self.adv_omega, bounds=bounds, args=(k, var, method),\n method='bounded')\n", (57233, 57309), False, 'from scipy.optimize import minimize_scalar\n'), ((59585, 59605), 'numpy.sum', 'np.sum', (['((s - 1) ** 2)'], {}), '((s - 1) ** 2)\n', (59591, 59605), True, 'import numpy as np\n'), ((4370, 4417), 'pandas.read_table', 'pd.read_table', (['item'], {'sep': '""","""', 'encoding': 'encoding'}), "(item, sep=',', encoding=encoding)\n", (4383, 4417), True, 'import pandas as pd\n'), ((4643, 4730), 'derivative.dxdt', 'derivative.dxdt', (["DF['$\\\\alpha$']", 'DF[DF.columns[0]]'], {'kind': '"""spline"""', 's': '(0.01)', 'order': '(5)'}), "(DF['$\\\\alpha$'], DF[DF.columns[0]], kind='spline', s=0.01,\n order=5)\n", (4658, 4730), False, 'import derivative\n'), ((22361, 22416), 'pandas.DataFrame', 'pd.DataFrame', (["{'t': t[i], 'T': T[i], 'da_dt': da_dt[i]}"], {}), "({'t': t[i], 'T': T[i], 'da_dt': da_dt[i]})\n", (22373, 22416), True, 'import pandas as pd\n'), ((29092, 29108), 'scipy.stats.t.ppf', 't.ppf', (['(p / 2)', 'df'], {}), '(p / 2, df)\n', (29097, 29108), False, 'from scipy.stats import t\n'), ((30532, 30548), 'scipy.stats.t.ppf', 't.ppf', (['(p / 2)', 'df'], {}), '(p / 2, df)\n', (30537, 30548), False, 'from scipy.stats import t\n'), ((31973, 31989), 'scipy.stats.t.ppf', 't.ppf', (['(p / 2)', 'df'], {}), '(p / 2, df)\n', (31978, 31989), False, 'from scipy.stats import t\n'), ((34182, 34192), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (34188, 34192), True, 'import numpy as np\n'), ((34537, 34588), 'scipy.integrate.trapezoid', 'integrate.trapezoid', (['[y0[i], yf[i]]', '[x0[i], xf[i]]'], {}), '([y0[i], yf[i]], [x0[i], xf[i]])\n', (34556, 34588), False, 'from scipy import integrate\n'), ((35919, 35934), 'numpy.sum', 'np.sum', (['omega_i'], {}), '(omega_i)\n', (35925, 35934), True, 'import numpy as np\n'), ((38927, 39016), 'scipy.optimize.minimize_scalar', 'minimize_scalar', (['self.omega'], {'args': '(k, Beta, method)', 'bounds': 'bounds', 'method': '"""bounded"""'}), "(self.omega, args=(k, Beta, method), bounds=bounds, method=\n 'bounded')\n", (38942, 39016), False, 'from scipy.optimize import minimize_scalar\n'), ((41187, 41197), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (41193, 41197), True, 'import numpy as np\n'), ((41773, 41818), 'scipy.integrate.quad', 'integrate.quad', (['integral', 'T0[i]', 'T[i]'], {'args': 'E'}), '(integral, T0[i], T[i], args=E)\n', (41787, 41818), False, 'from scipy import integrate\n'), ((42384, 42404), 'numpy.sum', 'np.sum', (['((s - 1) ** 2)'], {}), '((s - 1) ** 2)\n', (42390, 42404), True, 'import numpy as np\n'), ((49263, 49277), 'numpy.exp', 'np.exp', (['(-a / c)'], {}), '(-a / c)\n', (49269, 49277), True, 'import numpy as np\n'), ((54011, 54026), 'numpy.sum', 'np.sum', (['omega_i'], {}), '(omega_i)\n', (54017, 54026), True, 'import numpy as np\n'), ((60140, 60160), 'numpy.sum', 'np.sum', (['((s - 1) ** 2)'], {}), '((s - 1) ** 2)\n', (60146, 60160), True, 'import numpy as np\n'), ((8957, 8987), 'numpy.round', 'np.round', (['Beta[-1]'], {'decimals': '(1)'}), '(Beta[-1], decimals=1)\n', (8965, 8987), True, 'import numpy as np\n'), ((9063, 9093), 'numpy.round', 'np.round', (['Beta[-1]'], {'decimals': '(1)'}), '(Beta[-1], decimals=1)\n', (9071, 9093), True, 'import numpy as np\n'), ((9177, 9207), 'numpy.round', 'np.round', (['Beta[-1]'], {'decimals': '(1)'}), '(Beta[-1], decimals=1)\n', (9185, 9207), True, 'import numpy as np\n'), ((34923, 34968), 'scipy.integrate.quad', 'integrate.quad', (['integral', 'T0[i]', 'T[i]'], {'args': 'E'}), '(integral, T0[i], T[i], args=E)\n', (34937, 34968), False, 'from scipy import integrate\n'), ((35329, 35344), 'numpy.sum', 'np.sum', (['(1 / p_B)'], {}), '(1 / p_B)\n', (35335, 35344), True, 'import numpy as np\n'), ((37558, 37596), 'numpy.round', 'np.round', (['IsoDF.index[row]'], {'decimals': '(3)'}), '(IsoDF.index[row], decimals=3)\n', (37566, 37596), True, 'import numpy as np\n'), ((42561, 42581), 'numpy.sum', 'np.sum', (['((s - 1) ** 2)'], {}), '((s - 1) ** 2)\n', (42567, 42581), True, 'import numpy as np\n'), ((49230, 49245), 'scipy.special.expi', 'sp.expi', (['(-a / c)'], {}), '(-a / c)\n', (49237, 49245), True, 'import scipy.special as sp\n'), ((49244, 49259), 'scipy.special.expi', 'sp.expi', (['(-a / b)'], {}), '(-a / b)\n', (49251, 49259), True, 'import scipy.special as sp\n'), ((51417, 51453), 'numpy.exp', 'np.exp', (['(-E / (self.R * (T0 + B * t)))'], {}), '(-E / (self.R * (T0 + B * t)))\n', (51423, 51453), True, 'import numpy as np\n'), ((51477, 51525), 'scipy.integrate.quad', 'integrate.quad', (['time_int', 't0', 't'], {'args': '(T0, B, E)'}), '(time_int, t0, t, args=(T0, B, E))\n', (51491, 51525), False, 'from scipy import integrate\n'), ((55662, 55707), 'numpy.round', 'np.round', (['timeAdvIsoDF.index[row]'], {'decimals': '(3)'}), '(timeAdvIsoDF.index[row], decimals=3)\n', (55670, 55707), True, 'import numpy as np\n'), ((67293, 67337), 'numpy.exp', 'np.exp', (['(-(E[i] / (self.R * (T0 + B * t[i]))))'], {}), '(-(E[i] / (self.R * (T0 + B * t[i]))))\n', (67299, 67337), True, 'import numpy as np\n'), ((9594, 9623), 'numpy.round', 'np.round', (['Beta[i]'], {'decimals': '(1)'}), '(Beta[i], decimals=1)\n', (9602, 9623), True, 'import numpy as np\n'), ((9982, 10011), 'numpy.round', 'np.round', (['Beta[i]'], {'decimals': '(1)'}), '(Beta[i], decimals=1)\n', (9990, 10011), True, 'import numpy as np\n'), ((10375, 10404), 'numpy.round', 'np.round', (['Beta[i]'], {'decimals': '(1)'}), '(Beta[i], decimals=1)\n', (10383, 10404), True, 'import numpy as np\n'), ((14915, 14944), 'numpy.round', 'np.round', (['Beta[i]'], {'decimals': '(1)'}), '(Beta[i], decimals=1)\n', (14923, 14944), True, 'import numpy as np\n'), ((15257, 15286), 'numpy.round', 'np.round', (['Beta[i]'], {'decimals': '(1)'}), '(Beta[i], decimals=1)\n', (15265, 15286), True, 'import numpy as np\n'), ((23414, 23448), 'numpy.round', 'np.round', (['self.Beta[i]'], {'decimals': '(1)'}), '(self.Beta[i], decimals=1)\n', (23422, 23448), True, 'import numpy as np\n'), ((24135, 24169), 'numpy.round', 'np.round', (['self.Beta[i]'], {'decimals': '(1)'}), '(self.Beta[i], decimals=1)\n', (24143, 24169), True, 'import numpy as np\n'), ((24842, 24876), 'numpy.round', 'np.round', (['self.Beta[i]'], {'decimals': '(1)'}), '(self.Beta[i], decimals=1)\n', (24850, 24876), True, 'import numpy as np\n'), ((25556, 25590), 'numpy.round', 'np.round', (['self.Beta[i]'], {'decimals': '(1)'}), '(self.Beta[i], decimals=1)\n', (25564, 25590), True, 'import numpy as np\n'), ((35588, 35603), 'numpy.sum', 'np.sum', (['(1 / p_B)'], {}), '(1 / p_B)\n', (35594, 35603), True, 'import numpy as np\n'), ((53417, 53432), 'numpy.sum', 'np.sum', (['(1 / I_B)'], {}), '(1 / I_B)\n', (53423, 53432), True, 'import numpy as np\n'), ((35837, 35852), 'numpy.sum', 'np.sum', (['(1 / p_B)'], {}), '(1 / p_B)\n', (35843, 35852), True, 'import numpy as np\n'), ((53929, 53944), 'numpy.sum', 'np.sum', (['(1 / I_B)'], {}), '(1 / I_B)\n', (53935, 53944), True, 'import numpy as np\n'), ((21934, 21963), 'numpy.round', 'np.round', (['Beta[i]'], {'decimals': '(1)'}), '(Beta[i], decimals=1)\n', (21942, 21963), True, 'import numpy as np\n')] |
##!/usr/bin/python
import numpy as np
import pylab as pl
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
data0 = np.genfromtxt(fname='dat2/en.dat')
data = np.genfromtxt(fname='dat3/en.dat')
dat = np.genfromtxt(fname='../1.0.1/dat1/en.dat')
dat1 = np.genfromtxt(fname='../1.0.2/dat4/en.dat')
#data1 = np.genfromtxt(fname='err.dat')
#data2 = np.genfromtxt(fname='../2d8/err.dat')
#dat1 = np.genfromtxt(fname='../2d7/en.dat')
#dat2 = np.genfromtxt(fname='../2d7/err.dat')
#data = np.loadtxt('traj.dat')
#for x in range(1,10):
pl.subplot(111)
pl.xlim(0,4)
#pl.title('$\gamma = 0.5$, $V(x,y)=x^2/2+x^4/4+y^2/2+y^4/4+\gamma xy$')
pl.ylabel('Energy [hartree]')
#pl.plot(data[:,0],data[:,2],'b-',linewidth=2,label='Potential')
#pl.plot(data[:,0],data[:,3],'g-',linewidth=2,label='Quantum Potential')
pl.plot(data[:,0],data[:,4],'k-',linewidth=2,label='One-step')
#pl.plot(data0[:,0],data0[:,2],'b--',linewidth=2,label='Potential')
#pl.plot(data0[:,0],data0[:,3],'g--',linewidth=2,label='Quantum Potential')
pl.plot(data0[:,0],data0[:,4],'g-.',linewidth=2,label='three steps')
pl.plot(dat[:,0],dat[:,4],'g-.',linewidth=2,label='9600, 12, 0.75')
pl.plot(dat1[:,0],dat1[:,4],'g-.',linewidth=2,label='9600, 12, 0.8')
#pl.plot(dat1[:,0],dat1[:,4],'r--',linewidth=2,label='two steps')
#pl.legend(bbox_to_anchor=(0.5, 0.38, 0.42, .302), loc=3,ncol=1, mode="expand", borderaxespad=0.)
pl.legend()
#pl.subplot(212)
#pl.xlim(0,4)
#pl.xlabel('time [a.u.]')
#pl.plot(data1[:,0],data1[:,1],'r-',linewidth=2,label='2s err($r_x$)')
##pl.plot(data1[:,0],data1[:,2],'g-',linewidth=2,label='2s err($r_y$)')
#
#pl.plot(data2[:,0],data2[:,1],'r--',linewidth=2,label='err($r_x$)')
##pl.plot(data2[:,0],data2[:,2],'g--',linewidth=2,label='err($r_y$)')
##pl.plot(dat2[:,0],dat2[:,1],'k--',linewidth=2,label='3s err($r_x$)')
pl.legend(loc=1)
pl.savefig('err.pdf')
pl.show()
| [
"pylab.subplot",
"pylab.plot",
"pylab.savefig",
"pylab.legend",
"pylab.xlim",
"numpy.genfromtxt",
"pylab.ylabel",
"pylab.show"
] | [((511, 545), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': '"""dat2/en.dat"""'}), "(fname='dat2/en.dat')\n", (524, 545), True, 'import numpy as np\n'), ((554, 588), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': '"""dat3/en.dat"""'}), "(fname='dat3/en.dat')\n", (567, 588), True, 'import numpy as np\n'), ((595, 638), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': '"""../1.0.1/dat1/en.dat"""'}), "(fname='../1.0.1/dat1/en.dat')\n", (608, 638), True, 'import numpy as np\n'), ((646, 689), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': '"""../1.0.2/dat4/en.dat"""'}), "(fname='../1.0.2/dat4/en.dat')\n", (659, 689), True, 'import numpy as np\n'), ((923, 938), 'pylab.subplot', 'pl.subplot', (['(111)'], {}), '(111)\n', (933, 938), True, 'import pylab as pl\n'), ((939, 952), 'pylab.xlim', 'pl.xlim', (['(0)', '(4)'], {}), '(0, 4)\n', (946, 952), True, 'import pylab as pl\n'), ((1024, 1053), 'pylab.ylabel', 'pl.ylabel', (['"""Energy [hartree]"""'], {}), "('Energy [hartree]')\n", (1033, 1053), True, 'import pylab as pl\n'), ((1192, 1260), 'pylab.plot', 'pl.plot', (['data[:, 0]', 'data[:, 4]', '"""k-"""'], {'linewidth': '(2)', 'label': '"""One-step"""'}), "(data[:, 0], data[:, 4], 'k-', linewidth=2, label='One-step')\n", (1199, 1260), True, 'import pylab as pl\n'), ((1399, 1473), 'pylab.plot', 'pl.plot', (['data0[:, 0]', 'data0[:, 4]', '"""g-."""'], {'linewidth': '(2)', 'label': '"""three steps"""'}), "(data0[:, 0], data0[:, 4], 'g-.', linewidth=2, label='three steps')\n", (1406, 1473), True, 'import pylab as pl\n'), ((1468, 1541), 'pylab.plot', 'pl.plot', (['dat[:, 0]', 'dat[:, 4]', '"""g-."""'], {'linewidth': '(2)', 'label': '"""9600, 12, 0.75"""'}), "(dat[:, 0], dat[:, 4], 'g-.', linewidth=2, label='9600, 12, 0.75')\n", (1475, 1541), True, 'import pylab as pl\n'), ((1536, 1610), 'pylab.plot', 'pl.plot', (['dat1[:, 0]', 'dat1[:, 4]', '"""g-."""'], {'linewidth': '(2)', 'label': '"""9600, 12, 0.8"""'}), "(dat1[:, 0], dat1[:, 4], 'g-.', linewidth=2, label='9600, 12, 0.8')\n", (1543, 1610), True, 'import pylab as pl\n'), ((1770, 1781), 'pylab.legend', 'pl.legend', ([], {}), '()\n', (1779, 1781), True, 'import pylab as pl\n'), ((2195, 2211), 'pylab.legend', 'pl.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (2204, 2211), True, 'import pylab as pl\n'), ((2212, 2233), 'pylab.savefig', 'pl.savefig', (['"""err.pdf"""'], {}), "('err.pdf')\n", (2222, 2233), True, 'import pylab as pl\n'), ((2234, 2243), 'pylab.show', 'pl.show', ([], {}), '()\n', (2241, 2243), True, 'import pylab as pl\n')] |
import math
import os
from typing import Tuple, List, Dict
import torch
import sys
import json
import h5py
import numpy as np
import time
def cur_time():
return time.strftime('%Y,%b,%d,%X')
def log_important(message, log_file):
print(message, cur_time())
with open(log_file, 'a') as f:
print(message, cur_time(), file=f)
def extract_deps_from_weights_file(file_path):
weight_dic = read_hdf5(file_path)
if 'deps' in weight_dic:
return weight_dic['deps']
else:
return None
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def read_hdf5(file_path):
result = {}
with h5py.File(file_path, 'r') as f:
for k in f.keys():
value = np.asarray(f[k])
if representsInt(k):
result[int(k)] = value
else:
result[str(k).replace('+','/')] = value
print('read {} arrays from {}'.format(len(result), file_path))
f.close()
return result
def save_hdf5(numpy_dict, file_path):
with h5py.File(file_path, 'w') as f:
for k,v in numpy_dict.items():
f.create_dataset(str(k).replace('/','+'), data=v)
print('saved {} arrays to {}'.format(len(numpy_dict), file_path))
f.close()
def start_exp():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--try_arg", type=str, default='')
args = parser.parse_args()
try_arg = args.try_arg
print('the try_arg is ', try_arg)
print('we have {} torch devices'.format(torch.cuda.device_count()),
'the allocated GPU memory is {}'.format(torch.cuda.memory_allocated()))
return try_arg
def torch_accuracy(output, target, topk=(1,)) -> List[torch.Tensor]:
'''
param output, target: should be torch Variable
'''
# assert isinstance(output, torch.cuda.Tensor), 'expecting Torch Tensor'
# assert isinstance(target, torch.Tensor), 'expecting Torch Tensor'
# print(type(output))
topn = max(topk)
batch_size = output.size(0)
_, pred = output.topk(topn, 1, True, True)
pred = pred.t()
is_correct = pred.eq(target.view(1, -1).expand_as(pred))
ans = []
for i in topk:
is_correct_i = is_correct[:i].view(-1).float().sum(0, keepdim=True)
ans.append(is_correct_i.mul_(100.0 / batch_size))
return ans
class AvgMeter(object):
'''
Computing mean
'''
name = 'No name'
def __init__(self, name='No name', fmt = ':.2f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.sum = 0
self.mean = 0
self.num = 0
self.now = 0
def update(self, mean_var, count=1):
if math.isnan(mean_var):
mean_var = 1e6
print('Avgmeter getting Nan!')
self.now = mean_var
self.num += count
self.sum += mean_var * count
self.mean = float(self.sum) / self.num
def __str__(self):
print_str = self.name + '-{' + self.fmt + '}'
return print_str.format(self.mean)
def save_args(args, save_dir = None):
if save_dir == None:
param_path = os.path.join(args.resume, "params.json")
else:
param_path = os.path.join(save_dir, 'params.json')
#logger.info("[*] MODEL dir: %s" % args.resume)
#logger.info("[*] PARAM path: %s" % param_path)
with open(param_path, 'w') as fp:
json.dump(args.__dict__, fp, indent=4, sort_keys=True)
def mkdir(path):
if not os.path.exists(path):
print('creating dir {}'.format(path))
os.mkdir(path)
# def save_checkpoint(cur_iters, net, optimizer, lr_scheduler, file_name):
# checkpoint = {'cur_iters': cur_iters,
# 'state_dict': net.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'lr_scheduler_state_dict':lr_scheduler.state_dict()}
# if os.path.exists(file_name):
# print('Overwriting {}'.format(file_name))
# torch.save(checkpoint, file_name)
# link_name = os.path.join('/', *file_name.split(os.path.sep)[:-1], 'last.checkpoint')
# #print(link_name)
# make_symlink(source = file_name, link_name=link_name)
def load_checkpoint(file_name, net = None, optimizer = None, lr_scheduler = None):
if os.path.isfile(file_name):
print("=> loading checkpoint '{}'".format(file_name))
check_point = torch.load(file_name)
if net is not None:
print('Loading network state dict')
net.load_state_dict(check_point['state_dict'])
if optimizer is not None:
print('Loading optimizer state dict')
optimizer.load_state_dict(check_point['optimizer_state_dict'])
if lr_scheduler is not None:
print('Loading lr_scheduler state dict')
lr_scheduler.load_state_dict(check_point['lr_scheduler_state_dict'])
return check_point['cur_iters']
else:
print("=> no checkpoint found at '{}'".format(file_name))
def make_symlink(source, link_name):
'''
Note: overwriting enabled!
'''
if os.path.exists(link_name):
#print("Link name already exist! Removing '{}' and overwriting".format(link_name))
os.remove(link_name)
if os.path.exists(source):
os.symlink(source, link_name)
return
else:
print('Source path not exists')
#print('SymLink Wrong!')
def add_path(path):
if path not in sys.path:
print('Adding {}'.format(path))
sys.path.append(path)
def format_metric_dict_to_line(metric_dict):
msg = ''
for key, value in metric_dict.items():
msg += '{}={:.5f},'.format(key, value)
return msg
| [
"os.path.exists",
"argparse.ArgumentParser",
"torch.cuda.memory_allocated",
"torch.load",
"time.strftime",
"os.path.join",
"os.symlink",
"numpy.asarray",
"h5py.File",
"os.path.isfile",
"os.remove",
"torch.cuda.device_count",
"os.mkdir",
"sys.path.append",
"json.dump",
"math.isnan"
] | [((166, 194), 'time.strftime', 'time.strftime', (['"""%Y,%b,%d,%X"""'], {}), "('%Y,%b,%d,%X')\n", (179, 194), False, 'import time\n'), ((1343, 1368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1366, 1368), False, 'import argparse\n'), ((4324, 4349), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (4338, 4349), False, 'import os\n'), ((5132, 5157), 'os.path.exists', 'os.path.exists', (['link_name'], {}), '(link_name)\n', (5146, 5157), False, 'import os\n'), ((5286, 5308), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (5300, 5308), False, 'import os\n'), ((685, 710), 'h5py.File', 'h5py.File', (['file_path', '"""r"""'], {}), "(file_path, 'r')\n", (694, 710), False, 'import h5py\n'), ((1074, 1099), 'h5py.File', 'h5py.File', (['file_path', '"""w"""'], {}), "(file_path, 'w')\n", (1083, 1099), False, 'import h5py\n'), ((2742, 2762), 'math.isnan', 'math.isnan', (['mean_var'], {}), '(mean_var)\n', (2752, 2762), False, 'import math\n'), ((3179, 3219), 'os.path.join', 'os.path.join', (['args.resume', '"""params.json"""'], {}), "(args.resume, 'params.json')\n", (3191, 3219), False, 'import os\n'), ((3251, 3288), 'os.path.join', 'os.path.join', (['save_dir', '"""params.json"""'], {}), "(save_dir, 'params.json')\n", (3263, 3288), False, 'import os\n'), ((3441, 3495), 'json.dump', 'json.dump', (['args.__dict__', 'fp'], {'indent': '(4)', 'sort_keys': '(True)'}), '(args.__dict__, fp, indent=4, sort_keys=True)\n', (3450, 3495), False, 'import json\n'), ((3526, 3546), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3540, 3546), False, 'import os\n'), ((3602, 3616), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (3610, 3616), False, 'import os\n'), ((4435, 4456), 'torch.load', 'torch.load', (['file_name'], {}), '(file_name)\n', (4445, 4456), False, 'import torch\n'), ((5258, 5278), 'os.remove', 'os.remove', (['link_name'], {}), '(link_name)\n', (5267, 5278), False, 'import os\n'), ((5318, 5347), 'os.symlink', 'os.symlink', (['source', 'link_name'], {}), '(source, link_name)\n', (5328, 5347), False, 'import os\n'), ((5540, 5561), 'sys.path.append', 'sys.path.append', (['path'], {}), '(path)\n', (5555, 5561), False, 'import sys\n'), ((764, 780), 'numpy.asarray', 'np.asarray', (['f[k]'], {}), '(f[k])\n', (774, 780), True, 'import numpy as np\n'), ((1568, 1593), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1591, 1593), False, 'import torch\n'), ((1646, 1675), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (1673, 1675), False, 'import torch\n')] |
import torch
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
import os
from tqdm import tqdm
from harvester import HardestNegativeTripletSelector, AllTripletSelector
from utils import compute_eer
class TrainLoop(object):
def __init__(self, model, optimizer, train_loader, valid_loader, margin, lambda_, patience, verbose=-1, cp_name=None, save_cp=False, checkpoint_path=None, checkpoint_epoch=None, swap=False, cuda=True):
if checkpoint_path is None:
# Save to current directory
self.checkpoint_path = os.getcwd()
else:
self.checkpoint_path = checkpoint_path
if not os.path.isdir(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.save_epoch_fmt = os.path.join(self.checkpoint_path, cp_name) if cp_name else os.path.join(self.checkpoint_path, 'checkpoint_{}ep.pt')
self.cuda_mode = cuda
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.valid_loader = valid_loader
self.history = {'train_loss': [], 'train_loss_batch': [], 'triplet_loss': [], 'triplet_loss_batch': [], 'ce_loss': [], 'ce_loss_batch': [],'ErrorRate': [], 'EER': []}
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=0.5, patience=patience, verbose=True if verbose>0 else False, threshold=1e-4, min_lr=1e-8)
self.total_iters = 0
self.cur_epoch = 0
self.lambda_ = lambda_
self.swap = swap
self.margin = margin
self.harvester = HardestNegativeTripletSelector(margin=0.1, cpu=not self.cuda_mode)
self.harvester_val = AllTripletSelector()
self.verbose = verbose
self.save_cp = save_cp
self.device = next(self.model.parameters()).device
if checkpoint_epoch is not None:
self.load_checkpoint(self.save_epoch_fmt.format(checkpoint_epoch))
def train(self, n_epochs=1, save_every=1):
while self.cur_epoch < n_epochs:
np.random.seed()
if self.verbose>0:
print(' ')
print('Epoch {}/{}'.format(self.cur_epoch+1, n_epochs))
train_iter = tqdm(enumerate(self.train_loader))
else:
train_iter = enumerate(self.train_loader)
ce=0.0
triplet_loss=0.0
train_loss=0.0
# Train step
for t, batch in train_iter:
ce_batch, triplet_loss_batch = self.train_step(batch)
ce += ce_batch
triplet_loss += triplet_loss_batch
train_loss += ce_batch + triplet_loss_batch
self.history['train_loss_batch'].append(ce_batch + triplet_loss_batch)
self.history['triplet_loss_batch'].append(triplet_loss_batch)
self.history['ce_loss_batch'].append(ce_batch)
self.total_iters += 1
self.history['train_loss'].append(train_loss/(t+1))
self.history['triplet_loss'].append(triplet_loss/(t+1))
self.history['ce_loss'].append(ce/(t+1))
if self.verbose>0:
print(' ')
print('Total train loss, Triplet loss, and Cross-entropy: {:0.4f}, {:0.4f}, {:0.4f}'.format(self.history['train_loss'][-1], self.history['triplet_loss'][-1], self.history['ce_loss'][-1]))
# Validation
tot_correct = 0
tot_ = 0
scores, labels = None, None
for t, batch in enumerate(self.valid_loader):
correct, total, scores_batch, labels_batch = self.valid(batch)
try:
scores = np.concatenate([scores, scores_batch], 0)
labels = np.concatenate([labels, labels_batch], 0)
except:
scores, labels = scores_batch, labels_batch
tot_correct += correct
tot_ += total
self.history['EER'].append(compute_eer(labels, scores))
self.history['ErrorRate'].append(1.-float(tot_correct)/tot_)
if self.verbose>0:
print(' ')
print('Current, best validation error rate, and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['ErrorRate'][-1], np.min(self.history['ErrorRate']), 1+np.argmin(self.history['ErrorRate'])))
print(' ')
print('Current, best validation EER, and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['EER'][-1], np.min(self.history['EER']), 1+np.argmin(self.history['EER'])))
self.scheduler.step(self.history['ErrorRate'][-1])
if self.verbose>0:
print(' ')
print('Current LR: {}'.format(self.optimizer.param_groups[0]['lr']))
if self.save_cp and (self.cur_epoch % save_every == 0 or (self.history['ErrorRate'][-1] < np.min([np.inf]+self.history['ErrorRate'][:-1])) or (self.history['EER'][-1] < np.min([np.inf]+self.history['EER'][:-1]))):
self.checkpointing()
self.cur_epoch += 1
if self.verbose>0:
print('Training done!')
if self.valid_loader is not None:
print('Best error rate and corresponding epoch: {:0.4f}, {}'.format(np.min(self.history['ErrorRate']), 1+np.argmin(self.history['ErrorRate'])))
print('Best EER and corresponding epoch: {:0.4f}, {}'.format(np.min(self.history['EER']), 1+np.argmin(self.history['EER'])))
return np.min(self.history['ErrorRate'])
def train_step(self, batch):
self.model.train()
self.optimizer.zero_grad()
x, y = batch
if self.cuda_mode:
x = x.to(self.device)
y = y.to(self.device)
embeddings = self.model.forward(x)
embeddings_norm = F.normalize(embeddings, p=2, dim=1)
loss_class = torch.nn.CrossEntropyLoss()(self.model.out_proj(embeddings_norm, y), y)
triplets_idx, entropy_indices = self.harvester.get_triplets(embeddings_norm.detach(), y)
if self.cuda_mode:
triplets_idx = triplets_idx.to(self.device)
emb_a = torch.index_select(embeddings_norm, 0, triplets_idx[:, 0])
emb_p = torch.index_select(embeddings_norm, 0, triplets_idx[:, 1])
emb_n = torch.index_select(embeddings_norm, 0, triplets_idx[:, 2])
loss_metric = self.triplet_loss(emb_a, emb_p, emb_n)
loss = loss_class + loss_metric
entropy_regularizer = torch.nn.functional.pairwise_distance(embeddings_norm, embeddings_norm[entropy_indices,:]).mean()
loss -= entropy_regularizer*self.lambda_
loss.backward()
self.optimizer.step()
return loss_class.item(), loss_metric.item()
def valid(self, batch):
self.model.eval()
x, y = batch
if self.cuda_mode:
x = x.to(self.device)
y = y.to(self.device)
with torch.no_grad():
embeddings = self.model.forward(x)
embeddings_norm = F.normalize(embeddings, p=2, dim=1)
out=self.model.out_proj(embeddings_norm, y)
pred = F.softmax(out, dim=1).max(1)[1].long()
correct = pred.squeeze().eq(y.squeeze()).detach().sum().item()
triplets_idx = self.harvester_val.get_triplets(embeddings, y)
embeddings = embeddings.cpu()
emb_a = torch.index_select(embeddings, 0, triplets_idx[:, 0])
emb_p = torch.index_select(embeddings, 0, triplets_idx[:, 1])
emb_n = torch.index_select(embeddings, 0, triplets_idx[:, 2])
scores_p = F.cosine_similarity(emb_a, emb_p)
scores_n = F.cosine_similarity(emb_a, emb_n)
return correct, x.size(0), np.concatenate([scores_p.detach().cpu().numpy(), scores_n.detach().cpu().numpy()], 0), np.concatenate([np.ones(scores_p.size(0)), np.zeros(scores_n.size(0))], 0)
def triplet_loss(self, emba, embp, embn, reduce_=True):
loss_ = torch.nn.TripletMarginLoss(margin=self.margin, p=2.0, eps=1e-06, swap=self.swap, reduction='mean' if reduce_ else 'none')(emba, embp, embn)
return loss_
def checkpointing(self):
# Checkpointing
if self.verbose>0:
print(' ')
print('Checkpointing...')
ckpt = {'model_state': self.model.state_dict(),
'optimizer_state': self.optimizer.state_dict(),
'scheduler_state': self.scheduler.state_dict(),
'history': self.history,
'total_iters': self.total_iters,
'cur_epoch': self.cur_epoch}
try:
torch.save(ckpt, self.save_epoch_fmt.format(self.cur_epoch))
except:
torch.save(ckpt, self.save_epoch_fmt)
def load_checkpoint(self, ckpt):
if os.path.isfile(ckpt):
ckpt = torch.load(ckpt)
# Load model state
self.model.load_state_dict(ckpt['model_state'])
# Load optimizer state
self.optimizer.load_state_dict(ckpt['optimizer_state'])
# Load scheduler state
self.scheduler.load_state_dict(ckpt['scheduler_state'])
# Load history
self.history = ckpt['history']
self.total_iters = ckpt['total_iters']
self.cur_epoch = ckpt['cur_epoch']
else:
print('No checkpoint found at: {}'.format(ckpt))
def print_grad_norms(self):
norm = 0.0
for params in list(self.model.parameters()):
norm+=params.grad.norm(2).data[0]
print('Sum of grads norms: {}'.format(norm))
def check_nans(self):
for params in list(self.model.parameters()):
if np.any(np.isnan(params.data.cpu().numpy())):
print('params NANs!!!!!')
if np.any(np.isnan(params.grad.data.cpu().numpy())):
print('grads NANs!!!!!!')
def initialize_params(self):
for layer in self.model.modules():
if isinstance(layer, torch.nn.Conv2d):
init.kaiming_normal(layer.weight.data)
elif isinstance(layer, torch.nn.BatchNorm2d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
| [
"torch.nn.CrossEntropyLoss",
"utils.compute_eer",
"torch.nn.functional.pairwise_distance",
"torch.nn.functional.softmax",
"torch.nn.functional.cosine_similarity",
"harvester.HardestNegativeTripletSelector",
"os.path.isdir",
"numpy.random.seed",
"os.mkdir",
"numpy.min",
"numpy.concatenate",
"nu... | [((1202, 1372), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['self.optimizer'], {'factor': '(0.5)', 'patience': 'patience', 'verbose': '(True if verbose > 0 else False)', 'threshold': '(0.0001)', 'min_lr': '(1e-08)'}), '(self.optimizer, factor=0.5,\n patience=patience, verbose=True if verbose > 0 else False, threshold=\n 0.0001, min_lr=1e-08)\n', (1244, 1372), False, 'import torch\n'), ((1489, 1555), 'harvester.HardestNegativeTripletSelector', 'HardestNegativeTripletSelector', ([], {'margin': '(0.1)', 'cpu': '(not self.cuda_mode)'}), '(margin=0.1, cpu=not self.cuda_mode)\n', (1519, 1555), False, 'from harvester import HardestNegativeTripletSelector, AllTripletSelector\n'), ((1579, 1599), 'harvester.AllTripletSelector', 'AllTripletSelector', ([], {}), '()\n', (1597, 1599), False, 'from harvester import HardestNegativeTripletSelector, AllTripletSelector\n'), ((4761, 4794), 'numpy.min', 'np.min', (["self.history['ErrorRate']"], {}), "(self.history['ErrorRate'])\n", (4767, 4794), True, 'import numpy as np\n'), ((5025, 5060), 'torch.nn.functional.normalize', 'F.normalize', (['embeddings'], {'p': '(2)', 'dim': '(1)'}), '(embeddings, p=2, dim=1)\n', (5036, 5060), True, 'import torch.nn.functional as F\n'), ((5321, 5379), 'torch.index_select', 'torch.index_select', (['embeddings_norm', '(0)', 'triplets_idx[:, 0]'], {}), '(embeddings_norm, 0, triplets_idx[:, 0])\n', (5339, 5379), False, 'import torch\n'), ((5390, 5448), 'torch.index_select', 'torch.index_select', (['embeddings_norm', '(0)', 'triplets_idx[:, 1]'], {}), '(embeddings_norm, 0, triplets_idx[:, 1])\n', (5408, 5448), False, 'import torch\n'), ((5459, 5517), 'torch.index_select', 'torch.index_select', (['embeddings_norm', '(0)', 'triplets_idx[:, 2]'], {}), '(embeddings_norm, 0, triplets_idx[:, 2])\n', (5477, 5517), False, 'import torch\n'), ((7613, 7633), 'os.path.isfile', 'os.path.isfile', (['ckpt'], {}), '(ckpt)\n', (7627, 7633), False, 'import os\n'), ((584, 595), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (593, 595), False, 'import os\n'), ((753, 796), 'os.path.join', 'os.path.join', (['self.checkpoint_path', 'cp_name'], {}), '(self.checkpoint_path, cp_name)\n', (765, 796), False, 'import os\n'), ((813, 869), 'os.path.join', 'os.path.join', (['self.checkpoint_path', '"""checkpoint_{}ep.pt"""'], {}), "(self.checkpoint_path, 'checkpoint_{}ep.pt')\n", (825, 869), False, 'import os\n'), ((1894, 1910), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (1908, 1910), True, 'import numpy as np\n'), ((5077, 5104), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (5102, 5104), False, 'import torch\n'), ((6010, 6025), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6023, 6025), False, 'import torch\n'), ((6087, 6122), 'torch.nn.functional.normalize', 'F.normalize', (['embeddings'], {'p': '(2)', 'dim': '(1)'}), '(embeddings, p=2, dim=1)\n', (6098, 6122), True, 'import torch.nn.functional as F\n'), ((6398, 6451), 'torch.index_select', 'torch.index_select', (['embeddings', '(0)', 'triplets_idx[:, 0]'], {}), '(embeddings, 0, triplets_idx[:, 0])\n', (6416, 6451), False, 'import torch\n'), ((6463, 6516), 'torch.index_select', 'torch.index_select', (['embeddings', '(0)', 'triplets_idx[:, 1]'], {}), '(embeddings, 0, triplets_idx[:, 1])\n', (6481, 6516), False, 'import torch\n'), ((6528, 6581), 'torch.index_select', 'torch.index_select', (['embeddings', '(0)', 'triplets_idx[:, 2]'], {}), '(embeddings, 0, triplets_idx[:, 2])\n', (6546, 6581), False, 'import torch\n'), ((6597, 6630), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['emb_a', 'emb_p'], {}), '(emb_a, emb_p)\n', (6616, 6630), True, 'import torch.nn.functional as F\n'), ((6645, 6678), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['emb_a', 'emb_n'], {}), '(emb_a, emb_n)\n', (6664, 6678), True, 'import torch.nn.functional as F\n'), ((6940, 7066), 'torch.nn.TripletMarginLoss', 'torch.nn.TripletMarginLoss', ([], {'margin': 'self.margin', 'p': '(2.0)', 'eps': '(1e-06)', 'swap': 'self.swap', 'reduction': "('mean' if reduce_ else 'none')"}), "(margin=self.margin, p=2.0, eps=1e-06, swap=self.\n swap, reduction='mean' if reduce_ else 'none')\n", (6966, 7066), False, 'import torch\n'), ((7646, 7662), 'torch.load', 'torch.load', (['ckpt'], {}), '(ckpt)\n', (7656, 7662), False, 'import torch\n'), ((656, 691), 'os.path.isdir', 'os.path.isdir', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (669, 691), False, 'import os\n'), ((697, 727), 'os.mkdir', 'os.mkdir', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (705, 727), False, 'import os\n'), ((3442, 3469), 'utils.compute_eer', 'compute_eer', (['labels', 'scores'], {}), '(labels, scores)\n', (3453, 3469), False, 'from utils import compute_eer\n'), ((5634, 5730), 'torch.nn.functional.pairwise_distance', 'torch.nn.functional.pairwise_distance', (['embeddings_norm', 'embeddings_norm[entropy_indices, :]'], {}), '(embeddings_norm, embeddings_norm[\n entropy_indices, :])\n', (5671, 5730), False, 'import torch\n'), ((7534, 7571), 'torch.save', 'torch.save', (['ckpt', 'self.save_epoch_fmt'], {}), '(ckpt, self.save_epoch_fmt)\n', (7544, 7571), False, 'import torch\n'), ((8625, 8663), 'torch.nn.init.kaiming_normal', 'init.kaiming_normal', (['layer.weight.data'], {}), '(layer.weight.data)\n', (8644, 8663), True, 'import torch.nn.init as init\n'), ((3206, 3247), 'numpy.concatenate', 'np.concatenate', (['[scores, scores_batch]', '(0)'], {}), '([scores, scores_batch], 0)\n', (3220, 3247), True, 'import numpy as np\n'), ((3262, 3303), 'numpy.concatenate', 'np.concatenate', (['[labels, labels_batch]', '(0)'], {}), '([labels, labels_batch], 0)\n', (3276, 3303), True, 'import numpy as np\n'), ((3692, 3725), 'numpy.min', 'np.min', (["self.history['ErrorRate']"], {}), "(self.history['ErrorRate'])\n", (3698, 3725), True, 'import numpy as np\n'), ((3890, 3917), 'numpy.min', 'np.min', (["self.history['EER']"], {}), "(self.history['EER'])\n", (3896, 3917), True, 'import numpy as np\n'), ((4214, 4263), 'numpy.min', 'np.min', (["([np.inf] + self.history['ErrorRate'][:-1])"], {}), "([np.inf] + self.history['ErrorRate'][:-1])\n", (4220, 4263), True, 'import numpy as np\n'), ((4293, 4336), 'numpy.min', 'np.min', (["([np.inf] + self.history['EER'][:-1])"], {}), "([np.inf] + self.history['EER'][:-1])\n", (4299, 4336), True, 'import numpy as np\n'), ((4546, 4579), 'numpy.min', 'np.min', (["self.history['ErrorRate']"], {}), "(self.history['ErrorRate'])\n", (4552, 4579), True, 'import numpy as np\n'), ((4687, 4714), 'numpy.min', 'np.min', (["self.history['EER']"], {}), "(self.history['EER'])\n", (4693, 4714), True, 'import numpy as np\n'), ((3729, 3765), 'numpy.argmin', 'np.argmin', (["self.history['ErrorRate']"], {}), "(self.history['ErrorRate'])\n", (3738, 3765), True, 'import numpy as np\n'), ((3921, 3951), 'numpy.argmin', 'np.argmin', (["self.history['EER']"], {}), "(self.history['EER'])\n", (3930, 3951), True, 'import numpy as np\n'), ((4583, 4619), 'numpy.argmin', 'np.argmin', (["self.history['ErrorRate']"], {}), "(self.history['ErrorRate'])\n", (4592, 4619), True, 'import numpy as np\n'), ((4718, 4748), 'numpy.argmin', 'np.argmin', (["self.history['EER']"], {}), "(self.history['EER'])\n", (4727, 4748), True, 'import numpy as np\n'), ((6181, 6202), 'torch.nn.functional.softmax', 'F.softmax', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (6190, 6202), True, 'import torch.nn.functional as F\n')] |
"""Faster R-CNN
Pretrained Faster RCNN on Open Images V4 Dataset with 600 categories.
Object detection model trained on Open Images V4 with ImageNet pre-trained Inception Resnet V2 as image feature extractor.
Categories of interest from sun rgbd | possible category of Open Images Dataset
- bed | Bed
- table | Table
- sofa | Sofa bed
- chair | Chair
- toilet | Toilet
- desk | Desk
- dresser | Filing cabinet
- night_stand | Nightstand
- bookshelf | Bookcase
- bathtub | Bathtub
"""
import os
# For running inference on the TF-Hub module.
import tensorflow as tf
import tensorflow_hub as hub
# For downloading the image.
import matplotlib.pyplot as plt
import tempfile
from six.moves.urllib.request import urlopen
from six import BytesIO
# For drawing onto the image.
import numpy as np
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
# For filtering out objects of interest from 600 categories
from collections import defaultdict
# For measuring the inference time.
import time
# Print Tensorflow version
print(tf.__version__)
# Check available GPU devices.
print(tf.test.is_built_with_cuda())
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
def display_image(image):
fig = plt.figure(figsize=(10, 8))
plt.grid(False)
plt.imshow(image)
def download_and_resize_image(url, new_width=256, new_height=256, display=False):
"""
To test on sample images from the internet
"""
_, filename = tempfile.mkstemp(suffix=".jpg")
response = urlopen(url)
image_data = response.read()
image_data = BytesIO(image_data)
pil_image = Image.open(image_data)
pil_image = ImageOps.fit(pil_image, (new_width, new_height), Image.ANTIALIAS)
pil_image_rgb = pil_image.convert("RGB")
pil_image_rgb.save(filename, format="JPEG", quality=90)
print("Image downloaded to %s." % filename)
if display:
display_image(pil_image)
return filename
def draw_bounding_box_on_image(image, ymin, xmin, ymax, xmax, color, font, thickness=4, display_str_list=()):
"""
Adds a bounding box to an image.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
# since bbox coordinates are normalized between 0 to 1
(left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = top + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)], fill=color)
draw.text((left + margin, text_bottom - text_height - margin), display_str, fill="black", font=font)
text_bottom -= text_height - 2 * margin
def draw_boxes(image, boxes, class_names, scores, max_boxes=10, min_score=0.15):
"""
Overlay labeled boxes on an image with formatted scores and label names.
"""
colors = list(ImageColor.colormap.values())
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf", 25)
except IOError:
print("Font not found, using default font.")
font = ImageFont.load_default()
for i in range(min(boxes.shape[0], max_boxes)):
if scores[i] >= min_score:
ymin, xmin, ymax, xmax = tuple(boxes[i])
display_str = "{}: {}%".format(class_names[i].decode("ascii"), int(100 * scores[i]))
color = colors[hash(class_names[i]) % len(colors)]
image_pil = Image.fromarray(np.uint8(image)).convert("RGB")
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color, font, display_str_list=[display_str])
np.copyto(image, np.array(image_pil))
return image
def load_img(path):
img = tf.io.read_file(path)
img = tf.image.decode_jpeg(img, channels=3)
return img
# Main Function which uses pretrained detector from tensorflow hub and inputs the image path and dump directory file path
def run_detector(detector, path, filePath):
img = load_img(path)
image = img.numpy()
image = Image.fromarray(np.uint8(image)).convert("RGB")
im_width, im_height = image.size
converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]
start_time = time.time()
result = detector(converted_img)
end_time = time.time()
result = {key: value.numpy() for key, value in result.items()}
# new dictionary which filters out objects of interest as mentioned at the start
filter = defaultdict(list)
for i in range(len(result["detection_class_entities"])):
if result["detection_class_entities"][i] == b"Bed" or \
result["detection_class_entities"][i] == b"Kitchen & dining room table" or \
result["detection_class_entities"][i] == b"Table" or \
result["detection_class_entities"][i] == b"Sofa bed" or \
result["detection_class_entities"][i] == b"Chair" or \
result["detection_class_entities"][i] == b"Toilet" or \
result["detection_class_entities"][i] == b"Filing cabinet" or \
result["detection_class_entities"][i] == b"Desk" or \
result["detection_class_entities"][i] == b"Nightstand" or \
result["detection_class_entities"][i] == b"Bookcase" or \
result["detection_class_entities"][i] == b"Bathtub":
filter["detection_class_entities"].append(result["detection_class_entities"][i])
filter["detection_boxes"].append(result["detection_boxes"][i])
filter["detection_scores"].append(result["detection_scores"][i])
# print(filter["detection_class_entities"])
# print(filter["detection_boxes"])
# print(filter["detection_scores"])
print("Found %d objects." % len(result["detection_scores"]))
print("Inference time: ", end_time - start_time)
# code to save all detected objects in a local text file (as per ImVoteNet requirements)
currentFile = open(filePath, mode='w')
for i in range(len(filter["detection_class_entities"])):
xmin = filter["detection_boxes"][i][0] * im_width
xmax = filter["detection_boxes"][i][2] * im_width
ymin = filter["detection_boxes"][i][1] * im_height
ymax = filter["detection_boxes"][i][3] * im_height
if str(filter["detection_class_entities"][i].decode("ascii")) == 'Bed':
className = 'bed'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Table':
className = 'table'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Sofa bed':
className = 'sofa'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Chair':
className = 'chair'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Toilet':
className = 'toilet'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Desk':
className = 'desk'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Filing Cabinet':
className = 'dresser'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Nightstand':
className = 'night_stand'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Bookcase':
className = 'bookshelf'
elif str(filter["detection_class_entities"][i].decode("ascii")) == 'Bathtub':
className = 'bathtub'
currentFile.write(
className + ' ' + '0' + ' ' + '0' + ' ' + '-10' + ' ' + str(xmin) + ' ' + str(ymin) + ' ' + str(
xmax) + ' ' + str(ymax) + ' ' + str(filter["detection_scores"][i]) + '\n')
currentFile.close()
image_with_boxes = draw_boxes(img.numpy(), np.array(filter["detection_boxes"]),
np.array(filter["detection_class_entities"]), np.array(filter["detection_scores"]))
display_image(image_with_boxes)
module_handle = "https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1"
detector = hub.load(module_handle).signatures['default']
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
img_path = os.path.join(BASE_DIR, 'demo/image/000001.jpg')
# path at which resulting text file needs to be dumped
filePath = os.path.join(BASE_DIR, 'demo/FasterRCNN_labels/textfile.txt')
run_detector(detector, img_path, filePath) | [
"numpy.uint8",
"matplotlib.pyplot.grid",
"tensorflow.image.convert_image_dtype",
"PIL.ImageOps.fit",
"tensorflow.io.read_file",
"numpy.array",
"PIL.ImageDraw.Draw",
"tensorflow.config.list_physical_devices",
"matplotlib.pyplot.imshow",
"PIL.ImageFont.load_default",
"tensorflow.test.is_built_with... | [((9002, 9049), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""demo/image/000001.jpg"""'], {}), "(BASE_DIR, 'demo/image/000001.jpg')\n", (9014, 9049), False, 'import os\n'), ((9116, 9177), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""demo/FasterRCNN_labels/textfile.txt"""'], {}), "(BASE_DIR, 'demo/FasterRCNN_labels/textfile.txt')\n", (9128, 9177), False, 'import os\n'), ((1154, 1182), 'tensorflow.test.is_built_with_cuda', 'tf.test.is_built_with_cuda', ([], {}), '()\n', (1180, 1182), True, 'import tensorflow as tf\n'), ((1297, 1324), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1307, 1324), True, 'import matplotlib.pyplot as plt\n'), ((1329, 1344), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1337, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1349, 1366), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1359, 1366), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1563), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".jpg"""'}), "(suffix='.jpg')\n", (1548, 1563), False, 'import tempfile\n'), ((1579, 1591), 'six.moves.urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1586, 1591), False, 'from six.moves.urllib.request import urlopen\n'), ((1642, 1661), 'six.BytesIO', 'BytesIO', (['image_data'], {}), '(image_data)\n', (1649, 1661), False, 'from six import BytesIO\n'), ((1678, 1700), 'PIL.Image.open', 'Image.open', (['image_data'], {}), '(image_data)\n', (1688, 1700), False, 'from PIL import Image\n'), ((1717, 1782), 'PIL.ImageOps.fit', 'ImageOps.fit', (['pil_image', '(new_width, new_height)', 'Image.ANTIALIAS'], {}), '(pil_image, (new_width, new_height), Image.ANTIALIAS)\n', (1729, 1782), False, 'from PIL import ImageOps\n'), ((2181, 2202), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (2195, 2202), False, 'from PIL import ImageDraw\n'), ((4576, 4597), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (4591, 4597), True, 'import tensorflow as tf\n'), ((4608, 4645), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (4628, 4645), True, 'import tensorflow as tf\n'), ((5076, 5087), 'time.time', 'time.time', ([], {}), '()\n', (5085, 5087), False, 'import time\n'), ((5140, 5151), 'time.time', 'time.time', ([], {}), '()\n', (5149, 5151), False, 'import time\n'), ((5319, 5336), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5330, 5336), False, 'from collections import defaultdict\n'), ((8944, 8969), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (8959, 8969), False, 'import os\n'), ((1218, 1256), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (1249, 1256), True, 'import tensorflow as tf\n'), ((3223, 3250), 'numpy.ceil', 'np.ceil', (['(0.05 * text_height)'], {}), '(0.05 * text_height)\n', (3230, 3250), True, 'import numpy as np\n'), ((3721, 3749), 'PIL.ImageColor.colormap.values', 'ImageColor.colormap.values', ([], {}), '()\n', (3747, 3749), False, 'from PIL import ImageColor\n'), ((3776, 3881), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf"""', '(25)'], {}), "(\n '/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf', 25\n )\n", (3794, 3881), False, 'from PIL import ImageFont\n'), ((4996, 5041), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (5024, 5041), True, 'import tensorflow as tf\n'), ((8575, 8610), 'numpy.array', 'np.array', (["filter['detection_boxes']"], {}), "(filter['detection_boxes'])\n", (8583, 8610), True, 'import numpy as np\n'), ((8646, 8690), 'numpy.array', 'np.array', (["filter['detection_class_entities']"], {}), "(filter['detection_class_entities'])\n", (8654, 8690), True, 'import numpy as np\n'), ((8692, 8728), 'numpy.array', 'np.array', (["filter['detection_scores']"], {}), "(filter['detection_scores'])\n", (8700, 8728), True, 'import numpy as np\n'), ((8870, 8893), 'tensorflow_hub.load', 'hub.load', (['module_handle'], {}), '(module_handle)\n', (8878, 8893), True, 'import tensorflow_hub as hub\n'), ((3960, 3984), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (3982, 3984), False, 'from PIL import ImageFont\n'), ((4506, 4525), 'numpy.array', 'np.array', (['image_pil'], {}), '(image_pil)\n', (4514, 4525), True, 'import numpy as np\n'), ((4906, 4921), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (4914, 4921), True, 'import numpy as np\n'), ((4326, 4341), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (4334, 4341), True, 'import numpy as np\n')] |
"""
All the functions that don't fit in anywhere else or reduce readability.
"""
import tensorflow as tf
import tflib as lib
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.save_images
import tflib.celebA_64x64 # This is where the input_pipe is
import tflib.ops.layernorm
import tflib.plot
import numpy as np
from glob import glob
import os
import functools
FLAGS = tf.app.flags.FLAGS
def LeakyReLU(x, alpha=0.2):
return tf.maximum(alpha * x, x)
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(
name + '.Linear', n_in, n_out, inputs, initialization='he')
return tf.nn.relu(output)
def LeakyReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(
name + '.Linear', n_in, n_out, inputs, initialization='he')
return LeakyReLU(output)
# BCHW --> BHWC Mapping for Batchnorm is done in the function definition deeper in tflib.
def Batchnorm(name, axes, inputs):
if ('Discriminator' in name) and (FLAGS.gan_version == 'wgan-gp'):
if axes != [0, 2, 3]:
raise Exception('Layernorm over non-standard axes is unsupported')
return lib.ops.layernorm.Layernorm(name, [1, 2, 3], inputs)
else:
return lib.ops.batchnorm.Batchnorm(name, axes, inputs, fused=True)
def pixcnn_gated_nonlinearity(a, b):
return tf.sigmoid(a) * tf.tanh(b)
def SubpixelConv2D(*args, **kwargs):
kwargs['output_dim'] = 4 * kwargs['output_dim']
output = lib.ops.conv2d.Conv2D(*args, **kwargs)
output = tf.transpose(output, [0, 2, 3, 1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0, 3, 1, 2])
return output
def ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if resample == 'down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim // 2)
conv_1b = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim // 2, output_dim=output_dim // 2, stride=2)
conv_2 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=output_dim // 2, output_dim=output_dim)
elif resample == 'up':
conv_shortcut = SubpixelConv2D
conv_1 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim // 2)
conv_1b = functools.partial(
lib.ops.deconv2d.Deconv2D, input_dim=input_dim // 2, output_dim=output_dim // 2)
conv_2 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=output_dim // 2, output_dim=output_dim)
elif resample == None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim // 2)
conv_1b = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim // 2, output_dim=output_dim // 2)
conv_2 = functools.partial(
lib.ops.conv2d.Conv2D, input_dim=input_dim // 2, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim == input_dim and resample == None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name + '.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
output = tf.nn.relu(output)
output = conv_1(name + '.Conv1', filter_size=1,
inputs=output, he_init=he_init, weightnorm=False)
output = tf.nn.relu(output)
output = conv_1b(name + '.Conv1B', filter_size=filter_size,
inputs=output, he_init=he_init, weightnorm=False)
output = tf.nn.relu(output)
output = conv_2(name + '.Conv2', filter_size=1, inputs=output,
he_init=he_init, weightnorm=False, biases=False)
output = Batchnorm(name + '.BN', [0, 2, 3], output)
return shortcut + (0.3 * output)
def prepare_noise_samples(devices, Generator):
fixed_noise = tf.constant(np.random.normal(
size=(FLAGS.batch_size, 128)).astype('float32'))
all_fixed_noise_samples = []
for device_index, device in enumerate(devices):
n_samples = FLAGS.batch_size // len(devices)
all_fixed_noise_samples.append(Generator(
n_samples, noise=fixed_noise[device_index * n_samples:(device_index + 1) * n_samples]))
all_fixed_noise_samples = tf.concat(all_fixed_noise_samples, axis=0)
# print(all_fixed_noise_samples)
return all_fixed_noise_samples
def get_dataset_files():
pattern = os.path.join(FLAGS.dataset_path, FLAGS.dataset + ".tfrecords")
files = glob(pattern)
assert len(
files) > 0, "Did not find any tfrecords files in the dataset_path folder"
train_data_list = []
for entity in files:
train_data_list.append(entity)
return train_data_list
def refresh_dirs(SUMMARY_DIR, OUTPUT_DIR, SAVE_DIR, restore):
ckpt = tf.train.get_checkpoint_state(SAVE_DIR)
restore = restore and ckpt and ckpt.model_checkpoint_path
if not restore:
if tf.gfile.Exists(SUMMARY_DIR):
tf.gfile.DeleteRecursively(SUMMARY_DIR)
print("Log directory reconstructed")
tf.gfile.MakeDirs(SUMMARY_DIR)
if tf.gfile.Exists(SAVE_DIR):
tf.gfile.DeleteRecursively(SAVE_DIR)
print("Save directory reconstructed")
tf.gfile.MakeDirs(SAVE_DIR)
if tf.gfile.Exists(OUTPUT_DIR):
tf.gfile.DeleteRecursively(OUTPUT_DIR)
print("Output directory reconstructed")
tf.gfile.MakeDirs(OUTPUT_DIR)
def generate_image(iteration, sess, output_dir, all_fixed_noise_samples, Generator, summary_writer):
# add image to summary
height = FLAGS.height
if FLAGS.data_format == "NCHW":
output_shape = [FLAGS.batch_size,
FLAGS.n_ch, height, height]
else:
output_shape = [FLAGS.batch_size,
height, height, FLAGS.n_ch]
samples_reshaped = tf.reshape(
all_fixed_noise_samples, output_shape)
if FLAGS.data_format == "NCHW":
# NCHW --> NHWC
samples_reshaped = tf.transpose(samples_reshaped, [0, 2, 3, 1])
image_op = tf.summary.image(
'generator output', samples_reshaped)
image_summary, samples = sess.run(
[image_op, all_fixed_noise_samples])
samples = ((samples + 1.) * (255.99 / 2)).astype('int32')
samples = np.reshape(samples, output_shape)
if FLAGS.data_format == "NHWC" and samples.shape[3] in [1, 3]:
# NHWC --> NCHW
samples = np.transpose(samples, [0, 3, 1, 2])
lib.save_images.save_images(
samples, output_dir + '/samples_{}.png'.format(iteration))
"""
Making a sample of how the training data looks like
"""
def sample_dataset(sess, all_real_data_conv, output_dir):
_x_r = sess.run(all_real_data_conv)
if np.amin(_x_r) < 0:
_x_r = ((_x_r + 1.) * (255.99 / 2)).astype('int32')
else:
_x_r = (_x_r).astype('int32')
if FLAGS.data_format == "NHWC":
lib.save_images.save_images(np.transpose(_x_r.reshape(
(FLAGS.batch_size, FLAGS.height, FLAGS.height, FLAGS.n_ch)), (0,3,1,2)), output_dir + '/samples_groundtruth.png')
else:
lib.save_images.save_images(_x_r.reshape(
(FLAGS.batch_size, FLAGS.n_ch, FLAGS.height, FLAGS.height)), output_dir + '/samples_groundtruth.png')
| [
"tensorflow.transpose",
"tensorflow.tanh",
"tensorflow.gfile.MakeDirs",
"tflib.ops.linear.Linear",
"tensorflow.summary.image",
"numpy.reshape",
"tensorflow.gfile.Exists",
"tflib.ops.conv2d.Conv2D",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.concat",
"tensorflow.maximum",
"glob.glob",
... | [((495, 519), 'tensorflow.maximum', 'tf.maximum', (['(alpha * x)', 'x'], {}), '(alpha * x, x)\n', (505, 519), True, 'import tensorflow as tf\n'), ((577, 663), 'tflib.ops.linear.Linear', 'lib.ops.linear.Linear', (["(name + '.Linear')", 'n_in', 'n_out', 'inputs'], {'initialization': '"""he"""'}), "(name + '.Linear', n_in, n_out, inputs, initialization\n ='he')\n", (598, 663), True, 'import tflib as lib\n'), ((679, 697), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (689, 697), True, 'import tensorflow as tf\n'), ((760, 846), 'tflib.ops.linear.Linear', 'lib.ops.linear.Linear', (["(name + '.Linear')", 'n_in', 'n_out', 'inputs'], {'initialization': '"""he"""'}), "(name + '.Linear', n_in, n_out, inputs, initialization\n ='he')\n", (781, 846), True, 'import tflib as lib\n'), ((1522, 1560), 'tflib.ops.conv2d.Conv2D', 'lib.ops.conv2d.Conv2D', (['*args'], {}), '(*args, **kwargs)\n', (1543, 1560), True, 'import tflib as lib\n'), ((1574, 1608), 'tensorflow.transpose', 'tf.transpose', (['output', '[0, 2, 3, 1]'], {}), '(output, [0, 2, 3, 1])\n', (1586, 1608), True, 'import tensorflow as tf\n'), ((1622, 1650), 'tensorflow.depth_to_space', 'tf.depth_to_space', (['output', '(2)'], {}), '(output, 2)\n', (1639, 1650), True, 'import tensorflow as tf\n'), ((1664, 1698), 'tensorflow.transpose', 'tf.transpose', (['output', '[0, 3, 1, 2]'], {}), '(output, [0, 3, 1, 2])\n', (1676, 1698), True, 'import tensorflow as tf\n'), ((3623, 3641), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (3633, 3641), True, 'import tensorflow as tf\n'), ((3778, 3796), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (3788, 3796), True, 'import tensorflow as tf\n'), ((3945, 3963), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (3955, 3963), True, 'import tensorflow as tf\n'), ((4666, 4708), 'tensorflow.concat', 'tf.concat', (['all_fixed_noise_samples'], {'axis': '(0)'}), '(all_fixed_noise_samples, axis=0)\n', (4675, 4708), True, 'import tensorflow as tf\n'), ((4822, 4884), 'os.path.join', 'os.path.join', (['FLAGS.dataset_path', "(FLAGS.dataset + '.tfrecords')"], {}), "(FLAGS.dataset_path, FLAGS.dataset + '.tfrecords')\n", (4834, 4884), False, 'import os\n'), ((4897, 4910), 'glob.glob', 'glob', (['pattern'], {}), '(pattern)\n', (4901, 4910), False, 'from glob import glob\n'), ((5200, 5239), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['SAVE_DIR'], {}), '(SAVE_DIR)\n', (5229, 5239), True, 'import tensorflow as tf\n'), ((6272, 6321), 'tensorflow.reshape', 'tf.reshape', (['all_fixed_noise_samples', 'output_shape'], {}), '(all_fixed_noise_samples, output_shape)\n', (6282, 6321), True, 'import tensorflow as tf\n'), ((6478, 6532), 'tensorflow.summary.image', 'tf.summary.image', (['"""generator output"""', 'samples_reshaped'], {}), "('generator output', samples_reshaped)\n", (6494, 6532), True, 'import tensorflow as tf\n'), ((6702, 6735), 'numpy.reshape', 'np.reshape', (['samples', 'output_shape'], {}), '(samples, output_shape)\n', (6712, 6735), True, 'import numpy as np\n'), ((1203, 1255), 'tflib.ops.layernorm.Layernorm', 'lib.ops.layernorm.Layernorm', (['name', '[1, 2, 3]', 'inputs'], {}), '(name, [1, 2, 3], inputs)\n', (1230, 1255), True, 'import tflib as lib\n'), ((1281, 1340), 'tflib.ops.batchnorm.Batchnorm', 'lib.ops.batchnorm.Batchnorm', (['name', 'axes', 'inputs'], {'fused': '(True)'}), '(name, axes, inputs, fused=True)\n', (1308, 1340), True, 'import tflib as lib\n'), ((1391, 1404), 'tensorflow.sigmoid', 'tf.sigmoid', (['a'], {}), '(a)\n', (1401, 1404), True, 'import tensorflow as tf\n'), ((1407, 1417), 'tensorflow.tanh', 'tf.tanh', (['b'], {}), '(b)\n', (1414, 1417), True, 'import tensorflow as tf\n'), ((1920, 1970), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'stride': '(2)'}), '(lib.ops.conv2d.Conv2D, stride=2)\n', (1937, 1970), False, 'import functools\n'), ((1988, 2081), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'input_dim': 'input_dim', 'output_dim': '(input_dim // 2)'}), '(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=\n input_dim // 2)\n', (2005, 2081), False, 'import functools\n'), ((2108, 2216), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'input_dim': '(input_dim // 2)', 'output_dim': '(output_dim // 2)', 'stride': '(2)'}), '(lib.ops.conv2d.Conv2D, input_dim=input_dim // 2,\n output_dim=output_dim // 2, stride=2)\n', (2125, 2216), False, 'import functools\n'), ((2243, 2337), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'input_dim': '(output_dim // 2)', 'output_dim': 'output_dim'}), '(lib.ops.conv2d.Conv2D, input_dim=output_dim // 2,\n output_dim=output_dim)\n', (2260, 2337), False, 'import functools\n'), ((5333, 5361), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['SUMMARY_DIR'], {}), '(SUMMARY_DIR)\n', (5348, 5361), True, 'import tensorflow as tf\n'), ((5472, 5502), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['SUMMARY_DIR'], {}), '(SUMMARY_DIR)\n', (5489, 5502), True, 'import tensorflow as tf\n'), ((5515, 5540), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['SAVE_DIR'], {}), '(SAVE_DIR)\n', (5530, 5540), True, 'import tensorflow as tf\n'), ((5649, 5676), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['SAVE_DIR'], {}), '(SAVE_DIR)\n', (5666, 5676), True, 'import tensorflow as tf\n'), ((5689, 5716), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (5704, 5716), True, 'import tensorflow as tf\n'), ((5829, 5858), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (5846, 5858), True, 'import tensorflow as tf\n'), ((6418, 6462), 'tensorflow.transpose', 'tf.transpose', (['samples_reshaped', '[0, 2, 3, 1]'], {}), '(samples_reshaped, [0, 2, 3, 1])\n', (6430, 6462), True, 'import tensorflow as tf\n'), ((6845, 6880), 'numpy.transpose', 'np.transpose', (['samples', '[0, 3, 1, 2]'], {}), '(samples, [0, 3, 1, 2])\n', (6857, 6880), True, 'import numpy as np\n'), ((7150, 7163), 'numpy.amin', 'np.amin', (['_x_r'], {}), '(_x_r)\n', (7157, 7163), True, 'import numpy as np\n'), ((2430, 2523), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'input_dim': 'input_dim', 'output_dim': '(input_dim // 2)'}), '(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=\n input_dim // 2)\n', (2447, 2523), False, 'import functools\n'), ((2550, 2652), 'functools.partial', 'functools.partial', (['lib.ops.deconv2d.Deconv2D'], {'input_dim': '(input_dim // 2)', 'output_dim': '(output_dim // 2)'}), '(lib.ops.deconv2d.Deconv2D, input_dim=input_dim // 2,\n output_dim=output_dim // 2)\n', (2567, 2652), False, 'import functools\n'), ((2679, 2773), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'input_dim': '(output_dim // 2)', 'output_dim': 'output_dim'}), '(lib.ops.conv2d.Conv2D, input_dim=output_dim // 2,\n output_dim=output_dim)\n', (2696, 2773), False, 'import functools\n'), ((5375, 5414), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['SUMMARY_DIR'], {}), '(SUMMARY_DIR)\n', (5401, 5414), True, 'import tensorflow as tf\n'), ((5554, 5590), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['SAVE_DIR'], {}), '(SAVE_DIR)\n', (5580, 5590), True, 'import tensorflow as tf\n'), ((5730, 5768), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (5756, 5768), True, 'import tensorflow as tf\n'), ((2873, 2966), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'input_dim': 'input_dim', 'output_dim': '(input_dim // 2)'}), '(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=\n input_dim // 2)\n', (2890, 2966), False, 'import functools\n'), ((2994, 3092), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'input_dim': '(input_dim // 2)', 'output_dim': '(output_dim // 2)'}), '(lib.ops.conv2d.Conv2D, input_dim=input_dim // 2,\n output_dim=output_dim // 2)\n', (3011, 3092), False, 'import functools\n'), ((3120, 3213), 'functools.partial', 'functools.partial', (['lib.ops.conv2d.Conv2D'], {'input_dim': '(input_dim // 2)', 'output_dim': 'output_dim'}), '(lib.ops.conv2d.Conv2D, input_dim=input_dim // 2,\n output_dim=output_dim)\n', (3137, 3213), False, 'import functools\n'), ((4273, 4319), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(FLAGS.batch_size, 128)'}), '(size=(FLAGS.batch_size, 128))\n', (4289, 4319), True, 'import numpy as np\n')] |
import numpy as np
# 计算转移矩阵对应平稳分布
'''
转移矩阵如下,分布为列向量
|1 0.5| |x|
|0 0.5| |y|
'''
def calc_stationary_distribution(M):
matrix_shape = np.shape(M)
if matrix_shape[0] == matrix_shape[1]:
pass
else:
print("平稳分布计算错误:输入应该为方阵")
return
# 减去对角阵
C = M - np.identity(matrix_shape[0])
# 末行设置为1
C[matrix_shape[0]-1] = 1
# 设置向量
X = np.zeros(matrix_shape[0], dtype=float)
X[matrix_shape[0]-1] = 1
# 解线性方程求解
ans = np.linalg.solve(C, X)
return ans
def calc_coupling_p(p_a, p_b):
# a为高优先级 b为低优先级
# 保底抽数
pos_a = len(p_a) # 91
pos_b = len(p_b) # 11
# 状态数为pos_a*pos_b个
# 状态a_state*pos_b+b_state 表示a_state抽没出a且b_state没出b的状态
M = np.zeros(((pos_a-1)*pos_b, (pos_a-1)*pos_b), dtype=float)
# 高优先级没有抽到
for i in range(1, pos_a-1): # 本抽高优先级物品状态 1-89
# 抽到了低优先级
now_state = i*pos_b
for j in range(pos_b-1): # 上抽低优先级物品状态 0-9
pre_state = (i-1)*pos_b + j
trans_p = min(1-p_a[i], p_b[j+1])
M[now_state][pre_state] = trans_p
# 处理被挤走情况 上抽为10
pre_state = (i-1)*pos_b + pos_b-1
M[now_state][pre_state] = 1-p_a[i]
# 低优先级也没有抽到
for j in range(pos_b-1): # 上抽低优先级物品状态 0-9
now_state = i*pos_b + (j+1)
pre_state = (i-1)*pos_b + j
trans_p = max(1-p_a[i]-p_b[j+1], 0)
M[now_state][pre_state] = trans_p
# 高优先级的抽到了
for i in range(pos_a-1): # 上抽高优先级物品状态 0-89
for j in range(1, pos_b): # 上抽低优先级物品状态 0-10
now_state = j # 稳态时i/j不可能同时为0
pre_state = i*pos_b + (j-1)
trans_p = p_a[i+1]
M[now_state][pre_state] = trans_p
# 一直出高优先级物品的情况
for i in range(pos_a-1): # 上抽高优先级物品状态 0-88
now_state = pos_b-1
pre_state = i*pos_b + pos_b-1
trans_p = p_a[i+1]
M[now_state][pre_state] = trans_p
# 转移矩阵设置完成后求解平稳分布
ans = calc_stationary_distribution(M)
# 低优先度物品分布计算
ans_b = np.zeros(pos_b+1, dtype=float)
for i in range(pos_a-1):
for j in range(0, pos_b-1):
trans_p = min(1-p_a[i+1], p_b[j+1])
ans_b[j+1] += ans[i*pos_b + j] * trans_p
trans_p = min(1-p_a[i+1], 1)
ans_b[pos_b] += ans[i*pos_b + pos_b-1] * trans_p
return ans_b/ans_b.sum()
| [
"numpy.identity",
"numpy.shape",
"numpy.linalg.solve",
"numpy.zeros"
] | [((157, 168), 'numpy.shape', 'np.shape', (['M'], {}), '(M)\n', (165, 168), True, 'import numpy as np\n'), ((398, 436), 'numpy.zeros', 'np.zeros', (['matrix_shape[0]'], {'dtype': 'float'}), '(matrix_shape[0], dtype=float)\n', (406, 436), True, 'import numpy as np\n'), ((490, 511), 'numpy.linalg.solve', 'np.linalg.solve', (['C', 'X'], {}), '(C, X)\n', (505, 511), True, 'import numpy as np\n'), ((733, 798), 'numpy.zeros', 'np.zeros', (['((pos_a - 1) * pos_b, (pos_a - 1) * pos_b)'], {'dtype': 'float'}), '(((pos_a - 1) * pos_b, (pos_a - 1) * pos_b), dtype=float)\n', (741, 798), True, 'import numpy as np\n'), ((2023, 2055), 'numpy.zeros', 'np.zeros', (['(pos_b + 1)'], {'dtype': 'float'}), '(pos_b + 1, dtype=float)\n', (2031, 2055), True, 'import numpy as np\n'), ((308, 336), 'numpy.identity', 'np.identity', (['matrix_shape[0]'], {}), '(matrix_shape[0])\n', (319, 336), True, 'import numpy as np\n')] |
import numpy as np
from math import cos, sin, sqrt
import math
class Joint(object):
def __init__(self, d=0, theta=0, a=0, alpha=0, prevLink=None, **kwargs):
super().__init__(**kwargs)
self.d = d
self.theta = theta
self.a = a
self.alpha = alpha
if not isinstance(prevLink, Joint):
raise ValueError("Previous link must be a joint")
self.prevLink = prevLink
self.prevLink.addLink(self)
self.nextLink = None
def set_joint_value(self, newValue):
setattr(self, self.variable_property, newValue)
def change_joint_value(self, newValue):
current = getattr(self, self.variable_property)
setattr(self, self.variable_property, current+newValue)
def addLink(self, newLink):
self.nextLink = newLink
def evaluate_inverse(self):
if self.nextLink is None:
return self.matrix_dh_inv
else:
return self.matrix_dh_inv @ self.nextLink.evaluate_inverse()
def evaluate_forwards(self):
if self.prevLink is None:
return self.matrix_dh
else:
return self.prevLink.evaluate_forwards() @ self.matrix_dh
@property
def matrix_dh(self):
th = self.theta
al = self.alpha
d = self.d
a = self.a
return self.make_dh_matrix(a, al, d, th)
@property
def matrix_dh_inv(self):
RT = self.matrixRotation.transpose()
return np.vstack([np.hstack([
RT, -RT @ self.matrixTranslation]),
[0,0,0,1]])
@property
def matrixRotation(self):
return self.matrix_dh[0:3,0:3]
@property
def matrixTranslation(self):
return np.reshape(self.matrix_dh[0:3, 3], (3,1))
@property
def jointCoordinates(self):
return tuple(self.evaluate_forwards()[0:3,3])
@property
def link_length(self):
return sqrt(self.a**2 + self.d**2)
@staticmethod
def make_dh_matrix(a, al, d, th):
return np.array([
[cos(th), -sin(th)*cos(al), sin(th)*sin(al), a*cos(th)],
[sin(th), cos(th)*cos(al), -cos(th)*sin(al), a*sin(th)],
[0, sin(al), cos(al), d],
[0, 0, 0, 1]
])
class BaseFrame(Joint):
variable_property = None
def __init__(self, loc=(0,0,0), rot=(0,0), *_):
'''
Creates a base frame to attach other arm
parts to at the coordinates specified by loc,
and a rotation specified by rot.
'''
self.d = 0
self.theta = rot[0]
self.a = 0
self.alpha = rot[1]
self.prevLink = None
self.nextLink = None
self.loc = loc
@property
def matrix_dh(self):
al = self.alpha
th = self.theta
return np.array([
[cos(th), -sin(th)*cos(al), sin(th)*sin(al), self.loc[0]],
[sin(th), cos(th)*cos(al), -cos(th)*sin(al), self.loc[1]],
[0, sin(al), cos(al), self.loc[2]],
[0, 0, 0, 1]
])
def set_joint_value(self, _):
pass
def change_joint_value(self, _):
pass
class EndEffector(Joint):
variable_property = None
def addLink(self, _):
raise TypeError("Can't add new link to end efector")
def set_joint_value(self, _):
pass
def change_joint_value(self, _):
pass
class RevoluteJoint(Joint):
variable_property = 'theta'
class PrismaticJoint(Joint):
variable_property = 'd'
| [
"numpy.reshape",
"numpy.hstack",
"math.sqrt",
"math.cos",
"math.sin"
] | [((1780, 1822), 'numpy.reshape', 'np.reshape', (['self.matrix_dh[0:3, 3]', '(3, 1)'], {}), '(self.matrix_dh[0:3, 3], (3, 1))\n', (1790, 1822), True, 'import numpy as np\n'), ((1988, 2019), 'math.sqrt', 'sqrt', (['(self.a ** 2 + self.d ** 2)'], {}), '(self.a ** 2 + self.d ** 2)\n', (1992, 2019), False, 'from math import cos, sin, sqrt\n'), ((1539, 1584), 'numpy.hstack', 'np.hstack', (['[RT, -RT @ self.matrixTranslation]'], {}), '([RT, -RT @ self.matrixTranslation])\n', (1548, 1584), True, 'import numpy as np\n'), ((2117, 2124), 'math.cos', 'cos', (['th'], {}), '(th)\n', (2120, 2124), False, 'from math import cos, sin, sqrt\n'), ((2194, 2201), 'math.sin', 'sin', (['th'], {}), '(th)\n', (2197, 2201), False, 'from math import cos, sin, sqrt\n'), ((2282, 2289), 'math.sin', 'sin', (['al'], {}), '(al)\n', (2285, 2289), False, 'from math import cos, sin, sqrt\n'), ((2302, 2309), 'math.cos', 'cos', (['al'], {}), '(al)\n', (2305, 2309), False, 'from math import cos, sin, sqrt\n'), ((3013, 3020), 'math.cos', 'cos', (['th'], {}), '(th)\n', (3016, 3020), False, 'from math import cos, sin, sqrt\n'), ((3090, 3097), 'math.sin', 'sin', (['th'], {}), '(th)\n', (3093, 3097), False, 'from math import cos, sin, sqrt\n'), ((3178, 3185), 'math.sin', 'sin', (['al'], {}), '(al)\n', (3181, 3185), False, 'from math import cos, sin, sqrt\n'), ((3198, 3205), 'math.cos', 'cos', (['al'], {}), '(al)\n', (3201, 3205), False, 'from math import cos, sin, sqrt\n'), ((2137, 2144), 'math.cos', 'cos', (['al'], {}), '(al)\n', (2140, 2144), False, 'from math import cos, sin, sqrt\n'), ((2148, 2155), 'math.sin', 'sin', (['th'], {}), '(th)\n', (2151, 2155), False, 'from math import cos, sin, sqrt\n'), ((2156, 2163), 'math.sin', 'sin', (['al'], {}), '(al)\n', (2159, 2163), False, 'from math import cos, sin, sqrt\n'), ((2170, 2177), 'math.cos', 'cos', (['th'], {}), '(th)\n', (2173, 2177), False, 'from math import cos, sin, sqrt\n'), ((2205, 2212), 'math.cos', 'cos', (['th'], {}), '(th)\n', (2208, 2212), False, 'from math import cos, sin, sqrt\n'), ((2213, 2220), 'math.cos', 'cos', (['al'], {}), '(al)\n', (2216, 2220), False, 'from math import cos, sin, sqrt\n'), ((2234, 2241), 'math.sin', 'sin', (['al'], {}), '(al)\n', (2237, 2241), False, 'from math import cos, sin, sqrt\n'), ((2247, 2254), 'math.sin', 'sin', (['th'], {}), '(th)\n', (2250, 2254), False, 'from math import cos, sin, sqrt\n'), ((3033, 3040), 'math.cos', 'cos', (['al'], {}), '(al)\n', (3036, 3040), False, 'from math import cos, sin, sqrt\n'), ((3044, 3051), 'math.sin', 'sin', (['th'], {}), '(th)\n', (3047, 3051), False, 'from math import cos, sin, sqrt\n'), ((3052, 3059), 'math.sin', 'sin', (['al'], {}), '(al)\n', (3055, 3059), False, 'from math import cos, sin, sqrt\n'), ((3101, 3108), 'math.cos', 'cos', (['th'], {}), '(th)\n', (3104, 3108), False, 'from math import cos, sin, sqrt\n'), ((3109, 3116), 'math.cos', 'cos', (['al'], {}), '(al)\n', (3112, 3116), False, 'from math import cos, sin, sqrt\n'), ((3130, 3137), 'math.sin', 'sin', (['al'], {}), '(al)\n', (3133, 3137), False, 'from math import cos, sin, sqrt\n'), ((2129, 2136), 'math.sin', 'sin', (['th'], {}), '(th)\n', (2132, 2136), False, 'from math import cos, sin, sqrt\n'), ((2226, 2233), 'math.cos', 'cos', (['th'], {}), '(th)\n', (2229, 2233), False, 'from math import cos, sin, sqrt\n'), ((3025, 3032), 'math.sin', 'sin', (['th'], {}), '(th)\n', (3028, 3032), False, 'from math import cos, sin, sqrt\n'), ((3122, 3129), 'math.cos', 'cos', (['th'], {}), '(th)\n', (3125, 3129), False, 'from math import cos, sin, sqrt\n')] |
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
| [
"sklearn.ensemble.RandomForestRegressor",
"pandas.read_parquet",
"numpy.log",
"numpy.sum",
"numpy.array",
"pandas.DataFrame"
] | [((599, 625), 'pandas.read_parquet', 'pd.read_parquet', (['file_path'], {}), '(file_path)\n', (614, 625), True, 'import pandas as pd\n'), ((1637, 1651), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1649, 1651), True, 'import pandas as pd\n'), ((2352, 2389), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2373, 2389), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((284, 314), 'numpy.sum', 'np.sum', (['(series_log_return ** 2)'], {}), '(series_log_return ** 2)\n', (290, 314), True, 'import numpy as np\n'), ((186, 211), 'numpy.log', 'np.log', (['list_stock_prices'], {}), '(list_stock_prices)\n', (192, 211), True, 'import numpy as np\n'), ((2235, 2268), 'numpy.array', 'np.array', (["df_joined_train['pred']"], {}), "(df_joined_train['pred'])\n", (2243, 2268), True, 'import numpy as np\n'), ((2291, 2326), 'numpy.array', 'np.array', (["df_joined_train['target']"], {}), "(df_joined_train['target'])\n", (2299, 2326), True, 'import numpy as np\n'), ((2560, 2602), 'numpy.array', 'np.array', (["naive_predictions_test['target']"], {}), "(naive_predictions_test['target'])\n", (2568, 2602), True, 'import numpy as np\n')] |
import sys
import matplotlib.pyplot
import pandas
import numpy
import struct
import os
data = None
with open(sys.argv[1], "rb") as f:
data = f.read()
data = struct.unpack("{}Q".format(len(data) // 8), data)
data = numpy.array(data, dtype=numpy.uint64)[1:]
data = numpy.array([x for x in data if x < 100000])
rt = numpy.cumsum(data) / 1000000
lTime = rt[-1]
lTime += 5
lScalar = ((lTime // 60) + 1)
lTime = lScalar * 60
data = 1000000 / data
highest = numpy.max(data)
vertScale = ((highest) // 300) + 1
print(vertScale)
#pd = pandas.DataFrame(data)
print(lTime)
fig = matplotlib.pyplot.figure(figsize=(lScalar*9, 2 * vertScale))
ax = matplotlib.pyplot.axes()
ax.plot(rt, data)
#ax = pd.plot()
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
ax.plot([-5,lTime], [300,300])
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=vertScale*300, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
name = os.path.basename(sys.argv[1])
ax.get_figure().savefig("FullSize_{}.png".format(name))
fig = matplotlib.pyplot.figure(figsize=(lScalar*9, min((lScalar * 81 / 16), 16)))
ax = matplotlib.pyplot.axes()
ax.plot(rt, data)
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
#ax = pd.plot()
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=300, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
ax.get_figure().savefig("CapSize_300_{}.png".format(name))
print((lScalar*9, min((lScalar * 81 / 16), 16)))
fig = matplotlib.pyplot.figure(figsize=(lScalar*9, min((lScalar * 81 / 16), 16)))
ax = matplotlib.pyplot.axes()
ax.plot(rt, data)
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
#ax = pd.plot()
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=150, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
ax.get_figure().savefig("CapSize_150_{}.png".format(name))
print((lScalar*9, min((lScalar * 81 / 16), 16)))
fig = matplotlib.pyplot.figure(figsize=(16, 9))
ax = matplotlib.pyplot.axes()
ax.plot(rt, data)
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
#ax = pd.plot()
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=vertScale*300, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
ax.get_figure().savefig("CapSize_500_{}.png".format(name))
smoothfactor = 60
cumsum = numpy.cumsum(numpy.insert(data, 0, 0))
comp = (cumsum[smoothfactor:] - cumsum[:-smoothfactor]) / float(smoothfactor)
comp = numpy.convolve(data, numpy.ones((smoothfactor,))/smoothfactor, mode='valid')
print(lScalar*9, min((lScalar * 81 / 16), max(16, (lScalar * 18 / 16))))
fig = matplotlib.pyplot.figure(figsize=(lScalar*9, min((lScalar * 81 / 16), max(16, (lScalar * 9 / 16)))))
ax = matplotlib.pyplot.axes()
ax.plot(rt[:-(smoothfactor-1)], comp)
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
#ax = pd.plot()
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=500, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
ax.get_figure().savefig("Sliding_{}.png".format(name)) | [
"numpy.insert",
"numpy.ones",
"numpy.max",
"numpy.array",
"os.path.basename",
"numpy.cumsum"
] | [((271, 315), 'numpy.array', 'numpy.array', (['[x for x in data if x < 100000]'], {}), '([x for x in data if x < 100000])\n', (282, 315), False, 'import numpy\n'), ((463, 478), 'numpy.max', 'numpy.max', (['data'], {}), '(data)\n', (472, 478), False, 'import numpy\n'), ((948, 977), 'os.path.basename', 'os.path.basename', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (964, 977), False, 'import os\n'), ((221, 258), 'numpy.array', 'numpy.array', (['data'], {'dtype': 'numpy.uint64'}), '(data, dtype=numpy.uint64)\n', (232, 258), False, 'import numpy\n'), ((322, 340), 'numpy.cumsum', 'numpy.cumsum', (['data'], {}), '(data)\n', (334, 340), False, 'import numpy\n'), ((2344, 2368), 'numpy.insert', 'numpy.insert', (['data', '(0)', '(0)'], {}), '(data, 0, 0)\n', (2356, 2368), False, 'import numpy\n'), ((2478, 2505), 'numpy.ones', 'numpy.ones', (['(smoothfactor,)'], {}), '((smoothfactor,))\n', (2488, 2505), False, 'import numpy\n')] |
import time
import numpy as np
import cv2
import os
import shutil
from tqdm import tqdm
"""
If preprocess=True the image gets mapped to a one channel image where non-red areas are replaced by 0. If no
preprocessing is needed object still needs to be used for normalization and reshaping.
The object can be used for different operations:
1. Real time inference: preprocess_and_normalize_image()
2. Preprocessing whole datasets: preprocess_dataset()
3. for learning (with and without data generators): normalize_image() and load_and_normalize_image()
It is important to use the right functions for training and real time use. The images get preprocessed
differently, even if it is used for training with generators and without generators. Visualize the images before
feeding them to the training. Range needs to be in (0,1) not (0,255) and channel order needs to be consistent.
"""
IMAGE_SHAPE = (220, 220, 3)
class ImagePreprocessor:
def __init__(self, preprocess, image_shape=IMAGE_SHAPE):
self.image_shape = image_shape
self.preprocess = preprocess
if preprocess:
image_depth_preprocessed = 1 # number of channels after preprocessing, needs to be aligned with the preprocessing fct.
self.color_mode = 'grayscale'
else:
image_depth_preprocessed = 3
self.color_mode = 'rgb'
self.image_shape_preprocessed = (image_shape[0], image_shape[1], image_depth_preprocessed)
def preprocess_image(self, image):
# if image.shape[0] != self.image_shape_preprocessed[0]:
# image = cv2.resize(image, self.image_shape[:-1])
# else:
# pass
# # image = image[:, :, ::-1]
image = cv2.resize(image, self.image_shape[:-1])
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.preprocess and image.shape[2] != 1:
img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 50, 50])
upper_red = np.array([15, 255, 255])
mask0 = cv2.inRange(img_hsv, lower_red, upper_red)
lower_red = np.array([165, 50, 50])
upper_red = np.array([180, 255, 255])
mask1 = cv2.inRange(img_hsv, lower_red, upper_red)
mask = mask0 + mask1
image[np.where(mask == 0)] = 0
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4, 4))
image = clahe.apply(image)
# image = cv2.Canny(image, 100, 200)
image = np.reshape(image, self.image_shape_preprocessed)
return image
def normalize_image(self, image):
# for generators
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image / 255.
def load_and_normalize_image(self, path):
# for data loading without generators
if self.preprocess:
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
image = np.reshape(image, self.image_shape_preprocessed)
else:
image = cv2.imread(path, cv2.IMREAD_COLOR)
return image / 255.
def preprocess_and_normalize_image(self, image):
# for real time inference
image = self.preprocess_image(image)
return image / 255.
def _load_preprocess_save(self, path):
image = cv2.imread(path)
image = self.preprocess_image(image)
cv2.imwrite(path, image)
def preprocess_dataset(self, path):
print("Preprocessing all images ...")
if not os.path.exists(path + "/img_raw"):
os.mkdir(path + "/img_raw")
path += "/img/"
img_paths = [f for f in os.listdir(path)]
for i, f in enumerate(tqdm(img_paths, desc='Preprocess image')):
f = path + f
f_new = f.replace("img", "img_raw")
if not os.path.exists(f_new):
shutil.copyfile(f, f_new)
self._load_preprocess_save(f)
# image = cv2.imread(f)
# cv2.imshow("train", image)
# cv2.waitKey()
# cv2.destroyAllWindows()
else:
path += "/img_raw/"
img_paths = [f for f in os.listdir(path)]
for i, f in enumerate(tqdm(img_paths, desc='Preprocess image')):
f = path + f
f_new = f.replace("img_raw", "img")
shutil.copyfile(f, f_new)
self._load_preprocess_save(f_new)
if self.preprocess:
print("Images in " + path + " resized and preprocessed, still need to be NORMALIZED")
else:
print("Images in " + path + " resized, still need to be NORMALIZED")
| [
"cv2.imwrite",
"os.path.exists",
"os.listdir",
"numpy.reshape",
"numpy.where",
"cv2.inRange",
"tqdm.tqdm",
"cv2.createCLAHE",
"numpy.array",
"shutil.copyfile",
"os.mkdir",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread"
] | [((1742, 1782), 'cv2.resize', 'cv2.resize', (['image', 'self.image_shape[:-1]'], {}), '(image, self.image_shape[:-1])\n', (1752, 1782), False, 'import cv2\n'), ((2587, 2635), 'numpy.reshape', 'np.reshape', (['image', 'self.image_shape_preprocessed'], {}), '(image, self.image_shape_preprocessed)\n', (2597, 2635), True, 'import numpy as np\n'), ((2737, 2775), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2749, 2775), False, 'import cv2\n'), ((3371, 3387), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3381, 3387), False, 'import cv2\n'), ((3441, 3465), 'cv2.imwrite', 'cv2.imwrite', (['path', 'image'], {}), '(path, image)\n', (3452, 3465), False, 'import cv2\n'), ((1915, 1953), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (1927, 1953), False, 'import cv2\n'), ((1978, 1999), 'numpy.array', 'np.array', (['[0, 50, 50]'], {}), '([0, 50, 50])\n', (1986, 1999), True, 'import numpy as np\n'), ((2024, 2048), 'numpy.array', 'np.array', (['[15, 255, 255]'], {}), '([15, 255, 255])\n', (2032, 2048), True, 'import numpy as np\n'), ((2069, 2111), 'cv2.inRange', 'cv2.inRange', (['img_hsv', 'lower_red', 'upper_red'], {}), '(img_hsv, lower_red, upper_red)\n', (2080, 2111), False, 'import cv2\n'), ((2137, 2160), 'numpy.array', 'np.array', (['[165, 50, 50]'], {}), '([165, 50, 50])\n', (2145, 2160), True, 'import numpy as np\n'), ((2185, 2210), 'numpy.array', 'np.array', (['[180, 255, 255]'], {}), '([180, 255, 255])\n', (2193, 2210), True, 'import numpy as np\n'), ((2231, 2273), 'cv2.inRange', 'cv2.inRange', (['img_hsv', 'lower_red', 'upper_red'], {}), '(img_hsv, lower_red, upper_red)\n', (2242, 2273), False, 'import cv2\n'), ((2372, 2411), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2384, 2411), False, 'import cv2\n'), ((2433, 2484), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2.0)', 'tileGridSize': '(4, 4)'}), '(clipLimit=2.0, tileGridSize=(4, 4))\n', (2448, 2484), False, 'import cv2\n'), ((2945, 2983), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (2955, 2983), False, 'import cv2\n'), ((3004, 3052), 'numpy.reshape', 'np.reshape', (['image', 'self.image_shape_preprocessed'], {}), '(image, self.image_shape_preprocessed)\n', (3014, 3052), True, 'import numpy as np\n'), ((3087, 3121), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_COLOR'], {}), '(path, cv2.IMREAD_COLOR)\n', (3097, 3121), False, 'import cv2\n'), ((3568, 3601), 'os.path.exists', 'os.path.exists', (["(path + '/img_raw')"], {}), "(path + '/img_raw')\n", (3582, 3601), False, 'import os\n'), ((3615, 3642), 'os.mkdir', 'os.mkdir', (["(path + '/img_raw')"], {}), "(path + '/img_raw')\n", (3623, 3642), False, 'import os\n'), ((2326, 2345), 'numpy.where', 'np.where', (['(mask == 0)'], {}), '(mask == 0)\n', (2334, 2345), True, 'import numpy as np\n'), ((3759, 3799), 'tqdm.tqdm', 'tqdm', (['img_paths'], {'desc': '"""Preprocess image"""'}), "(img_paths, desc='Preprocess image')\n", (3763, 3799), False, 'from tqdm import tqdm\n'), ((4314, 4354), 'tqdm.tqdm', 'tqdm', (['img_paths'], {'desc': '"""Preprocess image"""'}), "(img_paths, desc='Preprocess image')\n", (4318, 4354), False, 'from tqdm import tqdm\n'), ((4454, 4479), 'shutil.copyfile', 'shutil.copyfile', (['f', 'f_new'], {}), '(f, f_new)\n', (4469, 4479), False, 'import shutil\n'), ((3707, 3723), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3717, 3723), False, 'import os\n'), ((3906, 3927), 'os.path.exists', 'os.path.exists', (['f_new'], {}), '(f_new)\n', (3920, 3927), False, 'import os\n'), ((3949, 3974), 'shutil.copyfile', 'shutil.copyfile', (['f', 'f_new'], {}), '(f, f_new)\n', (3964, 3974), False, 'import shutil\n'), ((4262, 4278), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4272, 4278), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from modeler.dynamicmemorynetmodel import DynamicMemoryNet
from trainer.tftrainer import TFTrainer
class DynMemNetTrainer(TFTrainer):
def __init__(self,config):
self.num_classes = config.num_classes
self.learning_rate =config.learning_rate
self.batch_size = config.batch_size
self.decay_steps = config.decay_steps
self.decay_rate = config.decay_rate
self.sequence_length = config.sequence_length
self.vocab_size = config.vocab_size
self.embed_size = config.embed_size
self.hidden_size = config.hidden_size
self.is_training = config.is_training
self.story_length = config.story_length
self.dropout_keep_prob = config.dropout_keep_prob
super(DynMemNetTrainer,self).__init__()
pass
def get_model(self):
self.model = DynamicMemoryNet(self.num_classes, self.learning_rate, self.batch_size,
self.decay_steps, self.decay_rate, self.sequence_length,
self.story_length, self.vocab_size, self.embed_size, self.hidden_size,
self.is_training, multi_label_flag=False)
def get_feed_dict(self):
self.train_feed = {self.model.query: self.query, self.model.story: self.story_feed,
self.model.answer_single: self.answer_single,
self.model.dropout_keep_prob: self.dropout_keep_prob}
pass
def get_data(self):
self.story_feed = np.random.randn(self.batch_size, self.story_length, self.sequence_length)
self.story_feed[self.story_feed > 0] = 1
self.story_feed[self.story_feed <= 0] = 0
self.query = np.random.randn(self.batch_size, self.sequence_length) # [batch_size, sequence_length]
self.query[self.query > 0] = 1
self.query[self.query <= 0] = 0
self.answer_single = np.sum(self.query, axis=1) + np.round(0.1 * np.sum(np.sum(self.story_feed, axis=1),
axis=1))
pass
def train(self):
ckpt_dir = '../../model/dmn/'
saver = tf.train.Saver()
for i in range(150):
# [batch_size].e.g. np.array([1, 0, 1, 1, 1, 2, 1, 1])
loss, acc, predict, _ = self.session.run(
[self.model.loss_val, self.model.accuracy, self.model.predictions, self.model.train_op],
feed_dict=self.train_feed)
print(i, "query:", self.query, "=====================>")
print(i, "loss:", loss, "acc:", acc, "label:", self.answer_single, "prediction:", predict)
if i % 30 == 0:
save_path = ckpt_dir + "model.ckpt"
saver.save(self.session, save_path, global_step=i * 300)
pass
if __name__ == '__main__':
dmn=DynMemNetTrainer()
dmn.run()
pass | [
"numpy.sum",
"tensorflow.train.Saver",
"numpy.random.randn",
"modeler.dynamicmemorynetmodel.DynamicMemoryNet"
] | [((912, 1167), 'modeler.dynamicmemorynetmodel.DynamicMemoryNet', 'DynamicMemoryNet', (['self.num_classes', 'self.learning_rate', 'self.batch_size', 'self.decay_steps', 'self.decay_rate', 'self.sequence_length', 'self.story_length', 'self.vocab_size', 'self.embed_size', 'self.hidden_size', 'self.is_training'], {'multi_label_flag': '(False)'}), '(self.num_classes, self.learning_rate, self.batch_size,\n self.decay_steps, self.decay_rate, self.sequence_length, self.\n story_length, self.vocab_size, self.embed_size, self.hidden_size, self.\n is_training, multi_label_flag=False)\n', (928, 1167), False, 'from modeler.dynamicmemorynetmodel import DynamicMemoryNet\n'), ((1608, 1681), 'numpy.random.randn', 'np.random.randn', (['self.batch_size', 'self.story_length', 'self.sequence_length'], {}), '(self.batch_size, self.story_length, self.sequence_length)\n', (1623, 1681), True, 'import numpy as np\n'), ((1802, 1856), 'numpy.random.randn', 'np.random.randn', (['self.batch_size', 'self.sequence_length'], {}), '(self.batch_size, self.sequence_length)\n', (1817, 1856), True, 'import numpy as np\n'), ((2260, 2276), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2274, 2276), True, 'import tensorflow as tf\n'), ((1998, 2024), 'numpy.sum', 'np.sum', (['self.query'], {'axis': '(1)'}), '(self.query, axis=1)\n', (2004, 2024), True, 'import numpy as np\n'), ((2049, 2080), 'numpy.sum', 'np.sum', (['self.story_feed'], {'axis': '(1)'}), '(self.story_feed, axis=1)\n', (2055, 2080), True, 'import numpy as np\n')] |
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
# daal4py SGD (Stochastic Gradient Descent) example for shared memory systems
# using Logisitc Loss objective function
import daal4py as d4p
import numpy as np
# let's try to use pandas' fast csv reader
try:
import pandas
read_csv = lambda f, c, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
except:
# fall back to numpy loadtxt
read_csv = lambda f, c, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
def main(readcsv=read_csv, method='defaultDense'):
infile = "./data/batch/custom.csv"
# Read the data, let's have 4 independent variables
data = readcsv(infile, range(4))
dep_data = readcsv(infile, range(4,5))
nVectors = data.shape[0]
# configure a logistic loss object
ll_algo = d4p.optimization_solver_logistic_loss(nVectors, interceptFlag=True)
ll_algo.setup(data, dep_data)
# configure a SGD object
lrs = np.array([[0.01]], dtype=np.double)
niters = 1000
sgd_algo = d4p.optimization_solver_sgd(ll_algo,
learningRateSequence=lrs,
accuracyThreshold=0.02,
nIterations=niters)
# finally do the computation
inp = np.array([[1], [1], [1], [1], [1]], dtype=np.double)
res = sgd_algo.compute(inp)
# The SGD result provides minimum and nIterations
assert res.minimum.shape == inp.shape and res.nIterations[0][0] <= niters
return res
if __name__ == "__main__":
res = main()
print("\nMinimum:\n", res.minimum)
print("\nNumber of iterations performed:\n", res.nIterations[0][0])
print('All looks good!')
| [
"pandas.read_csv",
"daal4py.optimization_solver_logistic_loss",
"daal4py.optimization_solver_sgd",
"numpy.array",
"numpy.loadtxt"
] | [((1601, 1668), 'daal4py.optimization_solver_logistic_loss', 'd4p.optimization_solver_logistic_loss', (['nVectors'], {'interceptFlag': '(True)'}), '(nVectors, interceptFlag=True)\n', (1638, 1668), True, 'import daal4py as d4p\n'), ((1743, 1778), 'numpy.array', 'np.array', (['[[0.01]]'], {'dtype': 'np.double'}), '([[0.01]], dtype=np.double)\n', (1751, 1778), True, 'import numpy as np\n'), ((1812, 1922), 'daal4py.optimization_solver_sgd', 'd4p.optimization_solver_sgd', (['ll_algo'], {'learningRateSequence': 'lrs', 'accuracyThreshold': '(0.02)', 'nIterations': 'niters'}), '(ll_algo, learningRateSequence=lrs,\n accuracyThreshold=0.02, nIterations=niters)\n', (1839, 1922), True, 'import daal4py as d4p\n'), ((2092, 2144), 'numpy.array', 'np.array', (['[[1], [1], [1], [1], [1]]'], {'dtype': 'np.double'}), '([[1], [1], [1], [1], [1]], dtype=np.double)\n', (2100, 2144), True, 'import numpy as np\n'), ((1085, 1151), 'pandas.read_csv', 'pandas.read_csv', (['f'], {'usecols': 'c', 'delimiter': '""","""', 'header': 'None', 'dtype': 't'}), "(f, usecols=c, delimiter=',', header=None, dtype=t)\n", (1100, 1151), False, 'import pandas\n'), ((1235, 1283), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'usecols': 'c', 'delimiter': '""","""', 'ndmin': '(2)'}), "(f, usecols=c, delimiter=',', ndmin=2)\n", (1245, 1283), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import time
import numpy as np
import skrobot_tlp as skrobot
import pdb
def _get_tile_shape(num, hw_ratio=1):
r_num = int(round(np.sqrt(num / hw_ratio))) # weighted by wh_ratio
c_num = 0
while r_num * c_num < num:
c_num += 1
while (r_num - 1) * c_num >= num:
r_num -= 1
return r_num, c_num
def main():
viewer = skrobot.viewers.TrimeshSceneViewer(resolution=(640, 480))
robots = [
skrobot.models.Kuka(),
skrobot.models.Fetch(),
skrobot.models.PR2(),
skrobot.models.Panda(),
]
nrow, ncol = _get_tile_shape(len(robots))
row, col = 2, 2
for i in range(nrow):
for j in range(ncol):
try:
robot = robots[i * nrow + j]
except IndexError:
break
plane = skrobot.model.Box(extents=(row - 0.01, col - 0.01, 0.01))
plane.translate((row * i, col * j, -0.01))
viewer.add(plane)
robot.translate((row * i, col * j, 0))
viewer.add(robot)
viewer.set_camera(angles=[np.deg2rad(30), 0, 0])
viewer.show()
print('==> Press [q] to close window')
while not viewer.has_exit:
time.sleep(0.1)
viewer.redraw()
if __name__ == '__main__':
main()
| [
"skrobot_tlp.models.Fetch",
"numpy.sqrt",
"time.sleep",
"skrobot_tlp.models.Kuka",
"skrobot_tlp.viewers.TrimeshSceneViewer",
"numpy.deg2rad",
"skrobot_tlp.model.Box",
"skrobot_tlp.models.Panda",
"skrobot_tlp.models.PR2"
] | [((382, 439), 'skrobot_tlp.viewers.TrimeshSceneViewer', 'skrobot.viewers.TrimeshSceneViewer', ([], {'resolution': '(640, 480)'}), '(resolution=(640, 480))\n', (416, 439), True, 'import skrobot_tlp as skrobot\n'), ((464, 485), 'skrobot_tlp.models.Kuka', 'skrobot.models.Kuka', ([], {}), '()\n', (483, 485), True, 'import skrobot_tlp as skrobot\n'), ((495, 517), 'skrobot_tlp.models.Fetch', 'skrobot.models.Fetch', ([], {}), '()\n', (515, 517), True, 'import skrobot_tlp as skrobot\n'), ((527, 547), 'skrobot_tlp.models.PR2', 'skrobot.models.PR2', ([], {}), '()\n', (545, 547), True, 'import skrobot_tlp as skrobot\n'), ((557, 579), 'skrobot_tlp.models.Panda', 'skrobot.models.Panda', ([], {}), '()\n', (577, 579), True, 'import skrobot_tlp as skrobot\n'), ((1224, 1239), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1234, 1239), False, 'import time\n'), ((160, 183), 'numpy.sqrt', 'np.sqrt', (['(num / hw_ratio)'], {}), '(num / hw_ratio)\n', (167, 183), True, 'import numpy as np\n'), ((845, 902), 'skrobot_tlp.model.Box', 'skrobot.model.Box', ([], {'extents': '(row - 0.01, col - 0.01, 0.01)'}), '(extents=(row - 0.01, col - 0.01, 0.01))\n', (862, 902), True, 'import skrobot_tlp as skrobot\n'), ((1100, 1114), 'numpy.deg2rad', 'np.deg2rad', (['(30)'], {}), '(30)\n', (1110, 1114), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import matplotlib
# import nose
#
matplotlib.use('agg')
#
from read_dcm import DcmRead
from PyDMD import DMD
import numpy as np
import time
from past.utils import old_div
import SimpleITK as sitk
import matplotlib.pyplot as plt
# the root location that used for saving results
root_location = '../result/lung/'
# the loc that used to save origin images
origin_loc = 'original_images'
# the loc that used to save step one results
low_rank_loc = 'low_rank_images'
sparse_loc = 'sparse_images'
# the loc that used to save step two results
rec_loc = 'reconstructed_images'
each_mode_loc = 'low_rank_mode'
win_size = 3
# num of modes that used to reconstructed
first_n_modes = 0
time1 = time.time()
for first_n_modes in range(1,15):
print('...'+str(first_n_modes)+'...')
time2 = time.time()
print(time2 - time1)
dcm = DcmRead(dir_files="../data_set/lung", key_word="IM")
Data = dcm.read_images()
"""
step one windowed DMD, sampling rate: 3
"""
print('begin step one')
threshold = 1
dmd = DMD(svd_rank=-1)
# creating a list for storing modes and eigs
modes = None
eig = None
low_rank_modes = []
sparse_modes = []
# normalize each col of the image data
norm_data = dcm.normalize_data(Data)
# if first_n_modes == 1:
# dcm.save_dcm_sequence(norm_data, save_location=root_location+origin_loc)
for i in range(0, Data.shape[1]-win_size+1):
Data_win = norm_data[:, i:i+win_size]
dmd_info = dmd.fit(X=Data_win)
# sort the array in descending order
index = np.argsort(np.abs(old_div(np.log(dmd_info.eigs), (2. * np.pi))))
# select first n slow modes
slow_mode_index = index < threshold
# eig = dmd_info.eigs[slow_mode_index]
modes = dmd_info.modes[:, slow_mode_index]
sparse = dmd_info.modes[:, ~slow_mode_index]
low_rank_modes.append(modes)
sparse_modes.append(sparse[:, -1])
# save low_rank_images and sparse images
low_rank_images = np.array(low_rank_modes)[:, :, 0].T
sparse_images = np.array(sparse_modes).T
if first_n_modes == 1:
norm_low_rank_images = dcm.normalize_data(low_rank_images)
norm_sparse_images = dcm.normalize_data(sparse_images)
dcm.save_dcm_sequence(norm_low_rank_images, save_location=root_location+low_rank_loc+'w_'+str(win_size))
dcm.save_dcm_sequence(norm_sparse_images, save_location=root_location+sparse_loc+'w_'+str(win_size))
"""
step two: apply dmd to the reconstructed images and then extracted the first three low frequency modes
"""
print('begin step two')
modes_st = None
dmd_step_two = dmd.fit(X=low_rank_images)
# sort the array in increasing order
index = np.argsort(np.abs(old_div(np.log(dmd_step_two.eigs), (2. * np.pi))))
slow_mode_index = index < first_n_modes
modes_step_two = dmd_step_two.modes[:, slow_mode_index]
eigs_step_two = dmd_step_two.eigs[slow_mode_index]
re_images = dmd.recon_fn_data(modes_step_two, eigs_step_two)
# norm_re_images = dcm.normalize_data(re_images)
dcm.save_dcm_sequence(re_images, save_location=root_location+rec_loc+'w_'+str(win_size)+'st_'+str(first_n_modes))
# save each slow mode/low rank image in terms of increasing order
num = np.shape(index)[0]
image_list = []
name = []
print('the num of images are:'+str(num))
for i in range(0, num):
mode_index = index == i
mode = dmd_step_two.modes[:, mode_index]
image_list.append(mode)
name.append(str(i))
images = np.array(image_list)[:, :, 0].T
loc = root_location+each_mode_loc+'w_'+str(win_size)+'st_'+str(first_n_modes)
if first_n_modes == 1:
dcm.save_data_sequence(images, save_location=loc, name=name)
| [
"matplotlib.use",
"numpy.log",
"PyDMD.DMD",
"numpy.array",
"read_dcm.DcmRead",
"numpy.shape",
"time.time"
] | [((63, 84), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (77, 84), False, 'import matplotlib\n'), ((735, 746), 'time.time', 'time.time', ([], {}), '()\n', (744, 746), False, 'import time\n'), ((838, 849), 'time.time', 'time.time', ([], {}), '()\n', (847, 849), False, 'import time\n'), ((887, 939), 'read_dcm.DcmRead', 'DcmRead', ([], {'dir_files': '"""../data_set/lung"""', 'key_word': '"""IM"""'}), "(dir_files='../data_set/lung', key_word='IM')\n", (894, 939), False, 'from read_dcm import DcmRead\n'), ((1092, 1108), 'PyDMD.DMD', 'DMD', ([], {'svd_rank': '(-1)'}), '(svd_rank=-1)\n', (1095, 1108), False, 'from PyDMD import DMD\n'), ((2152, 2174), 'numpy.array', 'np.array', (['sparse_modes'], {}), '(sparse_modes)\n', (2160, 2174), True, 'import numpy as np\n'), ((3393, 3408), 'numpy.shape', 'np.shape', (['index'], {}), '(index)\n', (3401, 3408), True, 'import numpy as np\n'), ((2095, 2119), 'numpy.array', 'np.array', (['low_rank_modes'], {}), '(low_rank_modes)\n', (2103, 2119), True, 'import numpy as np\n'), ((3682, 3702), 'numpy.array', 'np.array', (['image_list'], {}), '(image_list)\n', (3690, 3702), True, 'import numpy as np\n'), ((2865, 2890), 'numpy.log', 'np.log', (['dmd_step_two.eigs'], {}), '(dmd_step_two.eigs)\n', (2871, 2890), True, 'import numpy as np\n'), ((1667, 1688), 'numpy.log', 'np.log', (['dmd_info.eigs'], {}), '(dmd_info.eigs)\n', (1673, 1688), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.ioff()
mpl.rcParams.update({'font.size': 18})
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from scipy import odr as odr
from scipy import stats as scistats
import RTxploitation as rt
iopw = rt.auxdata.iopw().get_iopw
opj= os.path.join
idir = os.path.abspath('/DATA/OBS2CO/data/rogerio')
figdir = opj(idir,'fig')
file = opj(idir,'data/Simulation_Rrs_OSOAA_TSS_COD_aCDOM.xlsx')
data = pd.read_excel(file)
data.sort_values(by="TSS (mg L)", inplace=True)
data = data.set_index(['Station', 'Date'])
acdom = data.iloc[:, 17]
acdom_sd = data.iloc[:, 18]
#remove sparse data of CDOM
data = data.iloc[:,:-2]
params=['SPM','DOC']
param=params[0]
if param == 'DOC':
data.dropna(inplace=True)
month = data.index.get_level_values(1).month
spm = data.iloc[:, 0] # .values.reshape(-1, 1)
spm_sd = data.iloc[:, 1]
doc = data.iloc[:, 15]
doc_sd = data.iloc[:, 16]
def Rrs2rrs(Rrs):
return Rrs / (0.52 + 1.7 * Rrs)
def inv_gordon88(rrs,g0=0.089,g1=0.125):
deltas = g0**2 + 4 * g1 * rrs
return (-g0 + np.sqrt(deltas)) / (2*g1)
def black_water_model_old(B, x , wl=550):
aw,bbw = iopw(wl)
rrs = Rrs2rrs(x)
u = inv_gordon88(rrs)
return (B[0])**2 * (u - np.abs(B[1]) )
def black_water_model(B, x, wl=550):
rrs = Rrs2rrs(x)
u = inv_gordon88(rrs)
aw,bbw = iopw(wl)
N = (u*(aw+bbw+B[0])-bbw)/ (B[1]- u *(B[1]+B[2]))
return B[3] * N
def linear(B, x):
'''Linear function y = m*x + b'''
return B[0] + B[1] * x
def exponential(B, x):
#return B[0] - (B[0]-B[1]) * np.exp(-B[2] * x)
return B[0] * np.exp(B[1] * x) + B[2]
def hyperbolic(B,x,b=1):
return B[1]/(1+B[0]*x)**(1/b)
def poly(B, x):
return B[0] + B[1] * x + B[2] * x ** 2 # + B[3]*x**3
def confidence_interval(xn, model, res, nstd=1):
'''
:param xn: x-axis data for computation
:param model: numerical model used for the fit
:param res: output from scipy.odr.run
:param nstd: number of sigma to compute confidence interval
:return: data up and data down
'''
'''
:param res: output from scipy.odr.run
:param nstd: number of sigma to compute confidence interval
:return:
'''
popt_up = res.beta + nstd * res.sd_beta
popt_dw = res.beta - nstd * res.sd_beta
return model(popt_up, xn), model(popt_dw, xn)
if param == 'SPM':
y=spm
y_sd=spm_sd
fit_eq='$g_0={:.1f};\ g_1={:.1f};\ C={:.1f}$'
ylabel='SPM (mg/L)'
# gordon values
g0 = 0.089
g1 = 0.125
model,beta0=black_water_model_old,[150,10]
#model,beta0=black_water_model,[1,1,1,10]
else:
y=doc
y_sd=doc_sd
fit_eq='y = {:.1f}/(1+{:.1f}$x)$\n res_var = {:.1f}'
ylabel='DOC (mg/L)'
model,beta0=hyperbolic,[10,10]
c=doc
cmap = plt.cm.get_cmap("Spectral").reversed()
stats = pd.DataFrame(columns=['band','algo','b0','b1','b2','sig_b0','sig_b1','sig_b2'])
# plot Rrs = f(SPM)
wls = [560, 665, 704,740, 782, 864]
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(20, 12))
for i, ax in enumerate(axs.ravel()):
wl=wls[i]
Rrs = data.iloc[:, 2 * i + 2]
print(Rrs.name,wl)
Rrs_sd = data.iloc[:, 2 * i + 3]
x,x_sd = Rrs, Rrs_sd
ax.errorbar(x, y, xerr=x_sd, yerr=y_sd, fmt='o', color='grey', ecolor='black', alpha=0.8)
# to put color in scatter dots
#ax.scatter(x, y, c=c, s=55, cmap=cmap)
ax.set_title(x.name)
ax.set_xlabel(r'$R_{rs}\ (sr^{-1})$')
ax.set_ylabel(ylabel)
testdata = odr.RealData(x, y, sx=x_sd, sy=y_sd)
_odr = odr.ODR(testdata, odr.Model(model, extra_args=[wl]), beta0=beta0) #
# fit with ordinary least square (OLS, fit_type=2 )
_odr.set_job(fit_type=2)
res = _odr.run()
res.pprint()
xn = np.linspace(0, np.max(x) * 1.25, 500)
yn = model(res.beta, xn)
fit_up, fit_dw = confidence_interval(xn, model, res)
#stats.loc[2*i]=np.concatenate([[Rrs.name, 'OLS'], res.beta, res.sd_beta])
ax.plot(xn, yn, 'b--', label='OLS',#; '+fit_eq.format(*res.beta),
linewidth=2)
ax.fill_between(xn, fit_up, fit_dw, alpha=.25, facecolor="b") # ,label="1-sigma interval")
# fit with Orthogonal distance regression (ODR, fit_type = 0)
_odr.set_job(fit_type=0)
res = _odr.run()
res.pprint()
xn = np.linspace(0, np.max(x) * 1.25, 500)
yn = model(res.beta, xn)
fit_up, fit_dw = confidence_interval(xn, model, res)
print()
#stats.loc[2*i+1]=np.concatenate([[Rrs.name, 'ODR'], res.beta, res.sd_beta])
ax.plot(xn, yn, 'r-', label='ODR',#; '+fit_eq.format(*res.beta),
linewidth=2)
ax.fill_between(xn, fit_up, fit_dw, alpha=.25, facecolor="r") # ,label="1-sigma interval")
ax.legend()
ax.set_ylim([0,40])
plt.suptitle('Fit based on modified Gordon model')
stats.to_csv(opj(idir,'stats_SPM_Rrs.csv'), float_format='%.2f',index=False)
plt.tight_layout(rect=[0.0, 0.0, 0.99, 0.94])
fig.savefig(opj(figdir,'blackwater_'+param+'_vs_Rrs.png'), dpi=300)
plt.show()
plt.close()
stats= stats.set_index(['band','algo'])
stats = stats.astype('float')
#------------------------------------------------
# Error histograms
#------------------------------------------------
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(20, 12))
axs=axs.ravel()
i=0
bandwidth=0.4
for g,d in data.loc[:,['Rrs_sim(B3)','Rrs_sim(B4)','Rrs_sim(B5)','Rrs_sim(B6)','Rrs_sim(B7)','Rrs_sim(B8a)']].iteritems():
print(g)
ax=axs[i]
for method in ('OLS','ODR'):
c='red'
if method == 'OLS':
c='blue'
stat=stats.loc[(g,method)]
err = model([stat.b0, stat.b1,stat.b2], d) - y.values.ravel()
kde = scistats.gaussian_kde(err,bw_method=bandwidth)# / x.std(ddof=1))
xx = np.linspace(-12,12, 1000)
ax.hist(err, bins=np.arange(-15,15,bandwidth), color=c,label=method, alpha=0.5, density=True,rwidth=0.8)
ax.plot(xx, kde.evaluate(xx),color=c,label=method,lw=2)
ax.axes.get_yaxis().set_visible(False)
ax.set_title(g)
ax.set_xlim(-12,12)
ax.set_xlabel(r'Retrieved - Measured SSSC (mg/L)')
ax.set_ylabel(r'N')
ax.legend()
print(y,)
i+=1
plt.suptitle('Normalized histogram of errors')
plt.tight_layout(rect=[0.05, 0.05, 0.99, 0.95])
#fig.savefig(opj(figdir,'blackwater_histogram_SPM-err-retrieval.png'), dpi=200)
plt.show()
#------------------------------------------------
# compare with uncertainty
#------------------------------------------------
def sig_param(Rrs,sig_a,sig_b,cov_ab):
return ((Rrs*sig_a)**2 + sig_b**2 + 2*Rrs*cov_ab)**0.5
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(20, 12))
axs=axs.ravel()
i=0
for g,d in data.loc[:,['Rrs_sim(B3)','Rrs_sim(B4)','Rrs_sim(B5)','Rrs_sim(B6)','Rrs_sim(B7)','Rrs_sim(B8a)']].iteritems():
print(g)
ax=axs[i]
for method in ('OLS','ODR'):
c='red'
if method == 'OLS':
c='blue'
stat=stats.loc[(g,method)]
est = model([stat.b0, stat.b1,stat.b2], d)
slope, intercept, r_value, p_value, std_err = scistats.linregress(y, est)
ax.plot([-10, 100], intercept + slope * np.array([-10, 100]), color=c, lw=1.6)
#sig_est = sig_param(d,stat.sig_a,stat.sig_b,stat.cov_ab)
#yerr = sig_est,
ax.errorbar(y, est, xerr=y_sd, fmt='o', color=c, ecolor=c, label=method+'; y={:.1f}x+{:.1f}; $R^2$={:.3f}'.format(slope, intercept,r_value**2),alpha=0.8)
ax.set_title(g)
ax.set_xlabel(r'Measured SSSC (mg/L)')
ax.set_ylabel(r'Retrieved SSSC (mg/L)')
ax.plot([-10, 100], [-10, 100], '--', color="grey", lw=1.6)
ax.set_xlim(0,30)
ax.set_ylim(0,30)
ax.legend()
print(y,)
i+=1
plt.suptitle('Comparison retrievals')
plt.tight_layout(rect=[0.05, 0.05, 0.99, 0.95])
fig.savefig(opj(figdir,'blackwater_compar_SPM-retrieval.png'), dpi=200)
plt.show()
| [
"scipy.stats.linregress",
"numpy.sqrt",
"numpy.array",
"pandas.read_excel",
"numpy.arange",
"scipy.stats.gaussian_kde",
"numpy.max",
"matplotlib.pyplot.close",
"RTxploitation.auxdata.iopw",
"scipy.odr.RealData",
"numpy.linspace",
"numpy.exp",
"pandas.DataFrame",
"numpy.abs",
"matplotlib.... | [((108, 118), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (116, 118), True, 'import matplotlib.pyplot as plt\n'), ((119, 157), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (138, 157), True, 'import matplotlib as mpl\n'), ((460, 504), 'os.path.abspath', 'os.path.abspath', (['"""/DATA/OBS2CO/data/rogerio"""'], {}), "('/DATA/OBS2CO/data/rogerio')\n", (475, 504), False, 'import os\n'), ((602, 621), 'pandas.read_excel', 'pd.read_excel', (['file'], {}), '(file)\n', (615, 621), True, 'import pandas as pd\n'), ((2989, 3079), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['band', 'algo', 'b0', 'b1', 'b2', 'sig_b0', 'sig_b1', 'sig_b2']"}), "(columns=['band', 'algo', 'b0', 'b1', 'b2', 'sig_b0', 'sig_b1',\n 'sig_b2'])\n", (3001, 3079), True, 'import pandas as pd\n'), ((3138, 3186), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': '(20, 12)'}), '(nrows=2, ncols=3, figsize=(20, 12))\n', (3150, 3186), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4930), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Fit based on modified Gordon model"""'], {}), "('Fit based on modified Gordon model')\n", (4892, 4930), True, 'import matplotlib.pyplot as plt\n'), ((5008, 5053), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0.0, 0.0, 0.99, 0.94]'}), '(rect=[0.0, 0.0, 0.99, 0.94])\n', (5024, 5053), True, 'import matplotlib.pyplot as plt\n'), ((5122, 5132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5130, 5132), True, 'import matplotlib.pyplot as plt\n'), ((5134, 5145), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5143, 5145), True, 'import matplotlib.pyplot as plt\n'), ((5348, 5396), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': '(20, 12)'}), '(nrows=2, ncols=3, figsize=(20, 12))\n', (5360, 5396), True, 'import matplotlib.pyplot as plt\n'), ((6287, 6333), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Normalized histogram of errors"""'], {}), "('Normalized histogram of errors')\n", (6299, 6333), True, 'import matplotlib.pyplot as plt\n'), ((6334, 6381), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0.05, 0.05, 0.99, 0.95]'}), '(rect=[0.05, 0.05, 0.99, 0.95])\n', (6350, 6381), True, 'import matplotlib.pyplot as plt\n'), ((6462, 6472), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6470, 6472), True, 'import matplotlib.pyplot as plt\n'), ((6711, 6759), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': '(20, 12)'}), '(nrows=2, ncols=3, figsize=(20, 12))\n', (6723, 6759), True, 'import matplotlib.pyplot as plt\n'), ((7797, 7834), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Comparison retrievals"""'], {}), "('Comparison retrievals')\n", (7809, 7834), True, 'import matplotlib.pyplot as plt\n'), ((7835, 7882), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0.05, 0.05, 0.99, 0.95]'}), '(rect=[0.05, 0.05, 0.99, 0.95])\n', (7851, 7882), True, 'import matplotlib.pyplot as plt\n'), ((7955, 7965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7963, 7965), True, 'import matplotlib.pyplot as plt\n'), ((407, 424), 'RTxploitation.auxdata.iopw', 'rt.auxdata.iopw', ([], {}), '()\n', (422, 424), True, 'import RTxploitation as rt\n'), ((3642, 3678), 'scipy.odr.RealData', 'odr.RealData', (['x', 'y'], {'sx': 'x_sd', 'sy': 'y_sd'}), '(x, y, sx=x_sd, sy=y_sd)\n', (3654, 3678), True, 'from scipy import odr as odr\n'), ((2942, 2969), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (2957, 2969), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3741), 'scipy.odr.Model', 'odr.Model', (['model'], {'extra_args': '[wl]'}), '(model, extra_args=[wl])\n', (3717, 3741), True, 'from scipy import odr as odr\n'), ((5800, 5847), 'scipy.stats.gaussian_kde', 'scistats.gaussian_kde', (['err'], {'bw_method': 'bandwidth'}), '(err, bw_method=bandwidth)\n', (5821, 5847), True, 'from scipy import stats as scistats\n'), ((5878, 5904), 'numpy.linspace', 'np.linspace', (['(-12)', '(12)', '(1000)'], {}), '(-12, 12, 1000)\n', (5889, 5904), True, 'import numpy as np\n'), ((7170, 7197), 'scipy.stats.linregress', 'scistats.linregress', (['y', 'est'], {}), '(y, est)\n', (7189, 7197), True, 'from scipy import stats as scistats\n'), ((1233, 1248), 'numpy.sqrt', 'np.sqrt', (['deltas'], {}), '(deltas)\n', (1240, 1248), True, 'import numpy as np\n'), ((1399, 1411), 'numpy.abs', 'np.abs', (['B[1]'], {}), '(B[1])\n', (1405, 1411), True, 'import numpy as np\n'), ((1775, 1791), 'numpy.exp', 'np.exp', (['(B[1] * x)'], {}), '(B[1] * x)\n', (1781, 1791), True, 'import numpy as np\n'), ((3907, 3916), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3913, 3916), True, 'import numpy as np\n'), ((4447, 4456), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4453, 4456), True, 'import numpy as np\n'), ((5931, 5960), 'numpy.arange', 'np.arange', (['(-15)', '(15)', 'bandwidth'], {}), '(-15, 15, bandwidth)\n', (5940, 5960), True, 'import numpy as np\n'), ((7246, 7266), 'numpy.array', 'np.array', (['[-10, 100]'], {}), '([-10, 100])\n', (7254, 7266), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 3 21:31:48 2019
@author: bill
This contains all the functions needed to execute the main NMF Analysis strategy as contained in the NMF_Analysis class.
"""
import pickle
import numpy as np
import scipy.sparse
from sklearn.decomposition import NMF
import sklearn.preprocessing
import scipy
'''
Modifications to H that ensure each topic is mapped to a unit vector in the term space.
'''
def norm_fun(vector):
return np.linalg.norm(vector) #Normalizing the vector to have a length of one in topic space.
def b_mat(H):
num_topics = np.shape(H)[0]
B = np.zeros((num_topics,num_topics), dtype = float)
B_inv = np.zeros((num_topics,num_topics), dtype = float)
for topic in range(num_topics):
norm = norm_fun(H[topic])
B[topic,topic] = 1/norm
B_inv[topic,topic] = norm
return B, B_inv
'''
The main function to run NMF on the desired number of topics.
'''
def run_ensemble_NMF_strategy(num_topics, num_folds, num_runs, num_docs, doc_term_matrix):
#Defines the number of elements in each fold and ensures that the total sums correctly
fold_sizes = (num_docs // num_folds) * np.ones(num_folds, dtype=np.int)
fold_sizes[:num_docs % num_folds] += 1
#Creates a list that will save all the final H matrices for the last NMF application.
H_list = []
#For every run over all folds
for run in range(num_runs):
doc_ids = np.arange(num_docs)
np.random.shuffle(doc_ids)
current_fold = 0
for fold, fold_size in enumerate(fold_sizes):
#Updates the currentfold in the process
start, stop = current_fold, current_fold+fold_size
current_fold = stop
#Removes the current fold
sample_ids = list(doc_ids)
for id in doc_ids[start:stop]:
sample_ids.remove(id)
#
sample_doc_ids = []
for doc_index in sample_ids:
sample_doc_ids.append(doc_ids[doc_index])
S = doc_term_matrix[sample_ids,:]
S = scipy.sparse.csr_matrix(S)
model = NMF( init="nndsvd", n_components = num_topics )
W = model.fit_transform( doc_term_matrix )
H = model.components_
H_list.append(H)
H = 0.0
W = 0.0
model = 0.0
M = np.vstack(H_list)
model = NMF( init="nndsvd", n_components = num_topics )
W = model.fit_transform(M)
ensemble_H = model.components_
HT = sklearn.preprocessing.normalize( ensemble_H.T, "l2", axis=0 )
ensemble_W = doc_term_matrix.dot(HT)
#Updating the W and H matrices to normalize H.
B,B_inv = b_mat(ensemble_H)
ensemble_H = np.matmul(B,ensemble_H)
ensemble_W = np.matmul(ensemble_W, B_inv)
print(num_topics, 'th topic analyzed')
return num_topics, ensemble_W, ensemble_H | [
"sklearn.decomposition.NMF",
"numpy.ones",
"scipy.sparse.csr_matrix",
"numpy.zeros",
"numpy.matmul",
"numpy.vstack",
"numpy.linalg.norm",
"numpy.shape",
"numpy.arange",
"numpy.random.shuffle"
] | [((492, 514), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (506, 514), True, 'import numpy as np\n'), ((634, 681), 'numpy.zeros', 'np.zeros', (['(num_topics, num_topics)'], {'dtype': 'float'}), '((num_topics, num_topics), dtype=float)\n', (642, 681), True, 'import numpy as np\n'), ((695, 742), 'numpy.zeros', 'np.zeros', (['(num_topics, num_topics)'], {'dtype': 'float'}), '((num_topics, num_topics), dtype=float)\n', (703, 742), True, 'import numpy as np\n'), ((2514, 2531), 'numpy.vstack', 'np.vstack', (['H_list'], {}), '(H_list)\n', (2523, 2531), True, 'import numpy as np\n'), ((2549, 2592), 'sklearn.decomposition.NMF', 'NMF', ([], {'init': '"""nndsvd"""', 'n_components': 'num_topics'}), "(init='nndsvd', n_components=num_topics)\n", (2552, 2592), False, 'from sklearn.decomposition import NMF\n'), ((2891, 2915), 'numpy.matmul', 'np.matmul', (['B', 'ensemble_H'], {}), '(B, ensemble_H)\n', (2900, 2915), True, 'import numpy as np\n'), ((2932, 2960), 'numpy.matmul', 'np.matmul', (['ensemble_W', 'B_inv'], {}), '(ensemble_W, B_inv)\n', (2941, 2960), True, 'import numpy as np\n'), ((611, 622), 'numpy.shape', 'np.shape', (['H'], {}), '(H)\n', (619, 622), True, 'import numpy as np\n'), ((1198, 1230), 'numpy.ones', 'np.ones', (['num_folds'], {'dtype': 'np.int'}), '(num_folds, dtype=np.int)\n', (1205, 1230), True, 'import numpy as np\n'), ((1474, 1493), 'numpy.arange', 'np.arange', (['num_docs'], {}), '(num_docs)\n', (1483, 1493), True, 'import numpy as np\n'), ((1502, 1528), 'numpy.random.shuffle', 'np.random.shuffle', (['doc_ids'], {}), '(doc_ids)\n', (1519, 1528), True, 'import numpy as np\n'), ((2170, 2196), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['S'], {}), '(S)\n', (2193, 2196), False, 'import scipy\n'), ((2230, 2273), 'sklearn.decomposition.NMF', 'NMF', ([], {'init': '"""nndsvd"""', 'n_components': 'num_topics'}), "(init='nndsvd', n_components=num_topics)\n", (2233, 2273), False, 'from sklearn.decomposition import NMF\n')] |
"""
Agent is something which converts states into actions and has state
"""
import copy
import numpy as np
import torch
import torch.nn.functional as F
from . import actions
class BaseAgent:
"""
Abstract Agent interface
"""
def initial_state(self):
"""
Should create initial empty state for the agent. It will be called for the start of the episode
:return: Anything agent want to remember
"""
return None
def __call__(self, states, agent_states):
"""
Convert observations and states into actions to take
:param states: list of environment states to process
:param agent_states: list of states with the same length as observations
:return: tuple of actions, states
"""
assert isinstance(states, list)
assert isinstance(agent_states, list)
assert len(agent_states) == len(states)
raise NotImplementedError
def default_states_preprocessor(states):
"""
Convert list of states into the form suitable for model. By default we assume Variable
:param states: list of numpy arrays with states
:return: Variable
"""
if len(states) == 1:
np_states = np.expand_dims(states[0], 0)
else:
np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
return torch.tensor(np_states)
def float32_preprocessor(states):
np_states = np.array(states, dtype=np.float32)
return torch.tensor(np_states)
class DQNAgent(BaseAgent):
"""
DQNAgent is a memoryless DQN agent which calculates Q values
from the observations and converts them into the actions using action_selector
"""
def __init__(self, dqn_model, action_selector, device="cpu", preprocessor=default_states_preprocessor):
self.dqn_model = dqn_model
self.action_selector = action_selector
self.preprocessor = preprocessor
self.device = device
@torch.no_grad()
def __call__(self, states, agent_states=None):
if agent_states is None:
agent_states = [None] * len(states)
if self.preprocessor is not None:
states = self.preprocessor(states)
if torch.is_tensor(states):
states = states.to(self.device)
q_v = self.dqn_model(states)
q = q_v.data.cpu().numpy()
actions = self.action_selector(q)
return actions, agent_states
class TargetNet:
"""
Wrapper around model which provides copy of it instead of trained weights
"""
def __init__(self, model):
self.model = model
self.target_model = copy.deepcopy(model)
def sync(self):
self.target_model.load_state_dict(self.model.state_dict())
def alpha_sync(self, alpha):
"""
Blend params of target net with params from the model
:param alpha:
"""
assert isinstance(alpha, float)
assert 0.0 < alpha <= 1.0
state = self.model.state_dict()
tgt_state = self.target_model.state_dict()
for k, v in state.items():
tgt_state[k] = tgt_state[k] * alpha + (1 - alpha) * v
self.target_model.load_state_dict(tgt_state)
class PolicyAgent(BaseAgent):
"""
Policy agent gets action probabilities from the model and samples actions from it
"""
# TODO: unify code with DQNAgent, as only action selector is differs.
def __init__(self, model, action_selector=actions.ProbabilityActionSelector(), device="cpu",
apply_softmax=False, preprocessor=default_states_preprocessor):
self.model = model
self.action_selector = action_selector
self.device = device
self.apply_softmax = apply_softmax
self.preprocessor = preprocessor
@torch.no_grad()
def __call__(self, states, agent_states=None):
"""
Return actions from given list of states
:param states: list of states
:return: list of actions
"""
if agent_states is None:
agent_states = [None] * len(states)
if self.preprocessor is not None:
states = self.preprocessor(states)
if torch.is_tensor(states):
states = states.to(self.device)
probs_v = self.model(states)
if self.apply_softmax:
probs_v = F.softmax(probs_v, dim=1)
probs = probs_v.data.cpu().numpy()
actions = self.action_selector(probs)
return np.array(actions), agent_states
class ActorCriticAgent(BaseAgent):
"""
Policy agent which returns policy and value tensors from observations. Value are stored in agent's state
and could be reused for rollouts calculations by ExperienceSource.
"""
def __init__(self, model, action_selector=actions.ProbabilityActionSelector(), device="cpu",
apply_softmax=False, preprocessor=default_states_preprocessor):
self.model = model
self.action_selector = action_selector
self.device = device
self.apply_softmax = apply_softmax
self.preprocessor = preprocessor
@torch.no_grad()
def __call__(self, states, agent_states=None):
"""
Return actions from given list of states
:param states: list of states
:return: list of actions
"""
if self.preprocessor is not None:
states = self.preprocessor(states)
if torch.is_tensor(states):
states = states.to(self.device)
probs_v, values_v = self.model(states)
if self.apply_softmax:
probs_v = F.softmax(probs_v, dim=1)
probs = probs_v.data.cpu().numpy()
actions = self.action_selector(probs)
agent_states = values_v.data.squeeze().cpu().numpy().tolist()
return np.array(actions), agent_states
| [
"torch.tensor",
"numpy.array",
"torch.is_tensor",
"numpy.expand_dims",
"copy.deepcopy",
"torch.no_grad",
"torch.nn.functional.softmax"
] | [((1353, 1376), 'torch.tensor', 'torch.tensor', (['np_states'], {}), '(np_states)\n', (1365, 1376), False, 'import torch\n'), ((1429, 1463), 'numpy.array', 'np.array', (['states'], {'dtype': 'np.float32'}), '(states, dtype=np.float32)\n', (1437, 1463), True, 'import numpy as np\n'), ((1475, 1498), 'torch.tensor', 'torch.tensor', (['np_states'], {}), '(np_states)\n', (1487, 1498), False, 'import torch\n'), ((1959, 1974), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1972, 1974), False, 'import torch\n'), ((3783, 3798), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3796, 3798), False, 'import torch\n'), ((5108, 5123), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5121, 5123), False, 'import torch\n'), ((1219, 1247), 'numpy.expand_dims', 'np.expand_dims', (['states[0]', '(0)'], {}), '(states[0], 0)\n', (1233, 1247), True, 'import numpy as np\n'), ((2634, 2654), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2647, 2654), False, 'import copy\n'), ((2211, 2234), 'torch.is_tensor', 'torch.is_tensor', (['states'], {}), '(states)\n', (2226, 2234), False, 'import torch\n'), ((4179, 4202), 'torch.is_tensor', 'torch.is_tensor', (['states'], {}), '(states)\n', (4194, 4202), False, 'import torch\n'), ((4342, 4367), 'torch.nn.functional.softmax', 'F.softmax', (['probs_v'], {'dim': '(1)'}), '(probs_v, dim=1)\n', (4351, 4367), True, 'import torch.nn.functional as F\n'), ((4472, 4489), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (4480, 4489), True, 'import numpy as np\n'), ((5423, 5446), 'torch.is_tensor', 'torch.is_tensor', (['states'], {}), '(states)\n', (5438, 5446), False, 'import torch\n'), ((5596, 5621), 'torch.nn.functional.softmax', 'F.softmax', (['probs_v'], {'dim': '(1)'}), '(probs_v, dim=1)\n', (5605, 5621), True, 'import torch.nn.functional as F\n'), ((5796, 5813), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (5804, 5813), True, 'import numpy as np\n'), ((1288, 1311), 'numpy.array', 'np.array', (['s'], {'copy': '(False)'}), '(s, copy=False)\n', (1296, 1311), True, 'import numpy as np\n')] |
# Copyright 2021 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import numpy as np
from kubernetes.client import (
V1ResourceRequirements,
V1ObjectMeta,
)
from kserve import (
constants,
KServeClient,
V1beta1PredictorSpec,
V1beta1InferenceService,
V1beta1InferenceServiceSpec,
V1beta1PaddleServerSpec,
)
from ..common.utils import KSERVE_TEST_NAMESPACE, predict
logging.basicConfig(level=logging.INFO)
kserve_client = KServeClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
def test_paddle():
predictor = V1beta1PredictorSpec(
min_replicas=1,
paddle=V1beta1PaddleServerSpec(
storage_uri="https://zhouti-mcp-edge.cdn.bcebos.com/resnet50.tar.gz",
resources=V1ResourceRequirements(
requests={"cpu": "200m", "memory": "4Gi"},
limits={"cpu": "200m", "memory": "4Gi"},
)
)
)
service_name = 'isvc-paddle'
isvc = V1beta1InferenceService(
api_version=constants.KSERVE_V1BETA1,
kind=constants.KSERVE_KIND,
metadata=V1ObjectMeta(
name=service_name, namespace=KSERVE_TEST_NAMESPACE
),
spec=V1beta1InferenceServiceSpec(predictor=predictor)
)
kserve_client.create(isvc)
try:
kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE, timeout_seconds=720)
except RuntimeError as e:
pods = kserve_client.core_api.list_namespaced_pod(KSERVE_TEST_NAMESPACE,
label_selector='serving.kserve.io/inferenceservice={}'.format(
service_name))
for pod in pods.items:
logging.info(pod)
raise e
res = predict(service_name, './data/jay.json')
assert np.argmax(res["predictions"][0]) == 17
kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
| [
"logging.basicConfig",
"kubernetes.client.V1ObjectMeta",
"kserve.V1beta1InferenceServiceSpec",
"os.environ.get",
"numpy.argmax",
"kubernetes.client.V1ResourceRequirements",
"logging.info"
] | [((935, 974), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (954, 974), False, 'import logging\n'), ((1016, 1062), 'os.environ.get', 'os.environ.get', (['"""KUBECONFIG"""', '"""~/.kube/config"""'], {}), "('KUBECONFIG', '~/.kube/config')\n", (1030, 1062), False, 'import os\n'), ((2382, 2414), 'numpy.argmax', 'np.argmax', (["res['predictions'][0]"], {}), "(res['predictions'][0])\n", (2391, 2414), True, 'import numpy as np\n'), ((1630, 1694), 'kubernetes.client.V1ObjectMeta', 'V1ObjectMeta', ([], {'name': 'service_name', 'namespace': 'KSERVE_TEST_NAMESPACE'}), '(name=service_name, namespace=KSERVE_TEST_NAMESPACE)\n', (1642, 1694), False, 'from kubernetes.client import V1ResourceRequirements, V1ObjectMeta\n'), ((1731, 1779), 'kserve.V1beta1InferenceServiceSpec', 'V1beta1InferenceServiceSpec', ([], {'predictor': 'predictor'}), '(predictor=predictor)\n', (1758, 1779), False, 'from kserve import constants, KServeClient, V1beta1PredictorSpec, V1beta1InferenceService, V1beta1InferenceServiceSpec, V1beta1PaddleServerSpec\n'), ((2285, 2302), 'logging.info', 'logging.info', (['pod'], {}), '(pod)\n', (2297, 2302), False, 'import logging\n'), ((1291, 1402), 'kubernetes.client.V1ResourceRequirements', 'V1ResourceRequirements', ([], {'requests': "{'cpu': '200m', 'memory': '4Gi'}", 'limits': "{'cpu': '200m', 'memory': '4Gi'}"}), "(requests={'cpu': '200m', 'memory': '4Gi'}, limits={\n 'cpu': '200m', 'memory': '4Gi'})\n", (1313, 1402), False, 'from kubernetes.client import V1ResourceRequirements, V1ObjectMeta\n')] |
#!/usr/bin/env python
# This code implements sentiment classification using the Scikit-learn machine learning toolkit for Python:
# Scikit-learn: Machine Learning in Python, Pedregosa et al., JMLR 12, pp. 2825-2830, 2011.
# Sentiment classification of parallel sentence data in English (original), Finnish, French, or Italian (translations).
# Run this script to classify the data using:
# - Pre-compiled training and testing sets (90%/10%)
# - Stratified 10-fold cross-validation
# - Scikit-learn train_test_split (90%/10%)
# The data is classified using the following classifiers:
# - Multinomial Naïve Bayes
# - Logistic Regression
# - Linear SVC
# - Multilayer Perceptron
# Usage: python3 classify.py <LANG> <DIMENSIONS>
# Arguments:
# - <LANG>: en / fi / fr / it
# - <DIMENSIONS>: bin / multi
# - bin: positive/negative
# - multi: 8-class classification into classes: anger/anticipation/disgust/fear/joy/sadness/surprise/trust (Plutchik's Eight)
import codecs
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn import linear_model
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from argparse import ArgumentParser
import re
def load_data(tagfile, goldfile):
# Returns numpy arrays containing the labels of the training set
# and the testing set (gold labels)
tags = []
with open(tagfile, 'r') as f:
taglines = f.readlines()
for l in taglines:
tags.append(l)
f.close()
gold = []
with open(goldfile, 'r') as f:
lines = f.readlines()
for l in lines:
gold.append(l)
f.close()
tags = np.array(tags)
gold = np.array(gold)
return tags, gold
def vectorize(trainfile, testfile):
# Returns count-vectorized representation of the training and testing data
vectorizer = CountVectorizer(analyzer='word')
trainset = vectorizer.fit_transform(codecs.open(trainfile, 'r', 'utf8'))
testset = vectorizer.transform(codecs.open(testfile, 'r', 'utf8'))
return trainset, testset
def evaluate(predictions, test_tags):
# Evaluate classification of pre-compiled test data
# Uses Accuracy, F-measure, Precision, and Recall
# Displays a confusion matrix
print('Pre-compiled stratified training and testing data:')
print('Accuracy: ', metrics.accuracy_score(test_tags, predictions))
print(metrics.classification_report(test_tags, predictions))
print('Confusion matrix for pre-compiled stratified testing data classification:')
cm = confusion_matrix(test_tags, predictions)
print(cm)
def stratified_cross_validate(model, dims, lang):
# Function for classifying using stratified 10-fold cross-validation
# Used for comparison and evaluation against own pre-compiled testset
# Displays a confusion matrix
print()
print('Stratified 10-fold cross-validation:')
vectorizer = CountVectorizer(analyzer='word')
data = vectorizer.fit_transform(codecs.open(dims+'/crossval/'+lang+'/'+lang+'.txt', 'r', 'utf8'))
cross_tags = []
with open(dims+'/crossval/tags.txt', 'r') as f:
taglines = f.readlines()
for l in taglines:
cross_tags.append(l)
f.close()
cross_tags = np.array(cross_tags)
skf = StratifiedKFold(n_splits=10, shuffle=True)
# Structure for storing evaluation scores in lists modified from https://gist.github.com/zacstewart/5978000
prec = []
rec = []
f1 = []
confusion = np.zeros(())
if dims == 'bin':
confusion = np.zeros((2, 2))
elif dims == 'multi':
confusion = np.zeros((8, 8))
for train_index, test_index in skf.split(data, cross_tags):
X_train, X_test = data[train_index], data[test_index]
y_train, y_test = cross_tags[train_index], cross_tags[test_index]
model.fit(X_train, y_train)
pred = model.predict(X_test)
y_tags = np.squeeze(model.predict(X_test))
prec_score = precision_score(y_test, pred, average=None)
rec_score = recall_score(y_test, pred, average=None)
f_measure = f1_score(y_test, pred, average=None)
prec.append(prec_score)
rec.append(rec_score)
f1.append(f_measure)
cm = confusion_matrix(y_test, y_tags)
confusion += cm
# Avgs modified from https://github.com/sri-teja/chemical-NER/blob/master/kfold.py
avg_prec = sum(prec) / len(prec)
avg_rec = sum(rec)/len(rec)
avg_f1 = sum(f1)/len(f1)
print('Average precision: ', avg_prec)
print('Average recall: ', avg_rec)
print('Average F1-score: ', avg_f1)
print('Precision standard deviation: ', avg_prec.std())
print('Recall standard deviation: ', avg_rec.std())
print('F1-score standard deviation: ', avg_f1.std())
print('Confusion matrix using stratified 10-fold cross-validation: ')
print(confusion)
def traintestsplit(model, dims, lang):
# Function for classifying using the in-built Scikit-learn train_test_split function
# Used for comparison and evaluation against own pre-compiled testset
vectorizer = CountVectorizer(analyzer='word')
data = vectorizer.fit_transform(codecs.open(dims+'/traintest/'+lang+'/'+lang+'.txt', 'r', 'utf8'))
testsplit_tags = []
with open(dims+'/traintest/tags.txt', 'r') as f:
taglines = f.readlines()
for l in taglines:
testsplit_tags.append(l)
f.close()
tags = np.array(testsplit_tags)
X_train, X_test, y_train, y_test = train_test_split(data, tags, stratify=tags, test_size=0.1, random_state=42)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print()
print('Accuracy using the Scikit-learn train_test_split function:', metrics.accuracy_score(y_test, predictions))
print(metrics.classification_report(y_test, predictions))
def classify(lang, dims):
# Function to classify using pre-compiled training and test sets
tags, gold = load_data(dims+'/traintest/gold-train.txt', dims+'/traintest/gold-test.txt')
trainset, testset = vectorize(dims+'/traintest/'+lang+'/'+lang+'-train.txt', dims+'/traintest/'+lang+'/'+lang+'-test.txt')
# Classifier models
models = {'Multinomial Naïve Bayes': MultinomialNB(),
'Logistic Regression': linear_model.LogisticRegression(random_state=0, solver='liblinear'),
'Linear SVC': LinearSVC(random_state=0), 'Multilayer Perceptron': MLPClassifier(hidden_layer_sizes=(100, 100, 100),
max_iter=500, alpha=0.0001, solver='adam', verbose=10, random_state=21, tol=0.000000001,
activation='relu', batch_size='auto')}
for k, v in models.items():
print('***', k, '***')
if k is 'Multilayer Perceptron':
scaler = StandardScaler(with_mean=False) # Scale dataset for MLP model
scaler.fit(trainset)
trainset = scaler.transform(trainset)
testset = scaler.transform(testset)
else:
pass
model = v.fit(trainset, tags)
pred = model.predict(testset)
evaluate(pred, gold)
stratified_cross_validate(model, dims, lang) # Use stratified cross-validation
traintestsplit(model, dims, lang) # Use the in-built Scikit-learn train_test_split function
def main():
import argparse
parser = ArgumentParser()
# Code borrowed and modified from https://github.com/cynarr/sentimentator/blob/master/data_import.py
def check_lang(l, pattern=re.compile(r'^[a-zA-Z]{2}$')):
if not pattern.match(l):
raise argparse.ArgumentTypeError('Use a lowercase two-character alphabetic language code. Available codes: en, fi, fr, it.')
return l
def check_dim(d):
dims = ['multi', 'bin']
if d not in dims:
raise argparse.ArgumentTypeError('Use "multi" or "bin" for the classification type.')
return d
parser.add_argument('LANG', help='', type=check_lang)
parser.add_argument('DIMS', help='', type=check_dim)
args = parser.parse_args()
classify(args.LANG, args.DIMS)
if __name__ == "__main__":
main()
| [
"re.compile",
"sklearn.metrics.classification_report",
"sklearn.metrics.precision_score",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"sklearn.metrics.recall_score",
"argparse.ArgumentParser",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.naive_bayes.MultinomialNB",
"sk... | [((1984, 1998), 'numpy.array', 'np.array', (['tags'], {}), '(tags)\n', (1992, 1998), True, 'import numpy as np\n'), ((2010, 2024), 'numpy.array', 'np.array', (['gold'], {}), '(gold)\n', (2018, 2024), True, 'import numpy as np\n'), ((2184, 2216), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""word"""'}), "(analyzer='word')\n", (2199, 2216), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((2881, 2921), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_tags', 'predictions'], {}), '(test_tags, predictions)\n', (2897, 2921), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score\n'), ((3251, 3283), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""word"""'}), "(analyzer='word')\n", (3266, 3283), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((3584, 3604), 'numpy.array', 'np.array', (['cross_tags'], {}), '(cross_tags)\n', (3592, 3604), True, 'import numpy as np\n'), ((3616, 3658), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)'}), '(n_splits=10, shuffle=True)\n', (3631, 3658), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((3828, 3840), 'numpy.zeros', 'np.zeros', (['()'], {}), '(())\n', (3836, 3840), True, 'import numpy as np\n'), ((5439, 5471), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""word"""'}), "(analyzer='word')\n", (5454, 5471), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((5776, 5800), 'numpy.array', 'np.array', (['testsplit_tags'], {}), '(testsplit_tags)\n', (5784, 5800), True, 'import numpy as np\n'), ((5841, 5916), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'tags'], {'stratify': 'tags', 'test_size': '(0.1)', 'random_state': '(42)'}), '(data, tags, stratify=tags, test_size=0.1, random_state=42)\n', (5857, 5916), False, 'from sklearn.model_selection import train_test_split\n'), ((7691, 7707), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (7705, 7707), False, 'from argparse import ArgumentParser\n'), ((2258, 2293), 'codecs.open', 'codecs.open', (['trainfile', '"""r"""', '"""utf8"""'], {}), "(trainfile, 'r', 'utf8')\n", (2269, 2293), False, 'import codecs\n'), ((2330, 2364), 'codecs.open', 'codecs.open', (['testfile', '"""r"""', '"""utf8"""'], {}), "(testfile, 'r', 'utf8')\n", (2341, 2364), False, 'import codecs\n'), ((2671, 2717), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['test_tags', 'predictions'], {}), '(test_tags, predictions)\n', (2693, 2717), False, 'from sklearn import metrics\n'), ((2729, 2782), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_tags', 'predictions'], {}), '(test_tags, predictions)\n', (2758, 2782), False, 'from sklearn import metrics\n'), ((3320, 3394), 'codecs.open', 'codecs.open', (["(dims + '/crossval/' + lang + '/' + lang + '.txt')", '"""r"""', '"""utf8"""'], {}), "(dims + '/crossval/' + lang + '/' + lang + '.txt', 'r', 'utf8')\n", (3331, 3394), False, 'import codecs\n'), ((3884, 3900), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3892, 3900), True, 'import numpy as np\n'), ((4313, 4356), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'pred'], {'average': 'None'}), '(y_test, pred, average=None)\n', (4328, 4356), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score\n'), ((4377, 4417), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'pred'], {'average': 'None'}), '(y_test, pred, average=None)\n', (4389, 4417), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score\n'), ((4438, 4474), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'pred'], {'average': 'None'}), '(y_test, pred, average=None)\n', (4446, 4474), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score\n'), ((4581, 4613), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_tags'], {}), '(y_test, y_tags)\n', (4597, 4613), False, 'from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score\n'), ((5508, 5583), 'codecs.open', 'codecs.open', (["(dims + '/traintest/' + lang + '/' + lang + '.txt')", '"""r"""', '"""utf8"""'], {}), "(dims + '/traintest/' + lang + '/' + lang + '.txt', 'r', 'utf8')\n", (5519, 5583), False, 'import codecs\n'), ((6075, 6118), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (6097, 6118), False, 'from sklearn import metrics\n'), ((6130, 6180), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (6159, 6180), False, 'from sklearn import metrics\n'), ((6570, 6585), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (6583, 6585), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((6623, 6690), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""liblinear"""'}), "(random_state=0, solver='liblinear')\n", (6654, 6690), False, 'from sklearn import linear_model\n'), ((6720, 6745), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (6729, 6745), False, 'from sklearn.svm import LinearSVC\n'), ((6772, 6951), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(100, 100, 100)', 'max_iter': '(500)', 'alpha': '(0.0001)', 'solver': '"""adam"""', 'verbose': '(10)', 'random_state': '(21)', 'tol': '(1e-09)', 'activation': '"""relu"""', 'batch_size': '"""auto"""'}), "(hidden_layer_sizes=(100, 100, 100), max_iter=500, alpha=\n 0.0001, solver='adam', verbose=10, random_state=21, tol=1e-09,\n activation='relu', batch_size='auto')\n", (6785, 6951), False, 'from sklearn.neural_network import MLPClassifier\n'), ((7844, 7871), 're.compile', 're.compile', (['"""^[a-zA-Z]{2}$"""'], {}), "('^[a-zA-Z]{2}$')\n", (7854, 7871), False, 'import re\n'), ((3947, 3963), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (3955, 3963), True, 'import numpy as np\n'), ((7124, 7155), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (7138, 7155), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7926, 8054), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Use a lowercase two-character alphabetic language code. Available codes: en, fi, fr, it."""'], {}), "(\n 'Use a lowercase two-character alphabetic language code. Available codes: en, fi, fr, it.'\n )\n", (7952, 8054), False, 'import argparse\n'), ((8161, 8240), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Use "multi" or "bin" for the classification type."""'], {}), '(\'Use "multi" or "bin" for the classification type.\')\n', (8187, 8240), False, 'import argparse\n')] |
# Setup
from __future__ import print_function
from rh_logger.api import logger
import rh_logger
import logging
import os
import numpy as np
import time
import sys
from scipy.spatial import distance
from scipy import spatial
import cv2
import argparse
from mb_aligner.common import utils
from rh_renderer import models
from mb_aligner.alignment.fine_matchers import PMCC_filter
import multiprocessing as mp
from rh_renderer.tilespec_affine_renderer import TilespecAffineRenderer
import threading
from scipy.spatial import cKDTree as KDTree
from collections import defaultdict
# import pyximport
# pyximport.install()
# from ..common import cv_wrap_module
threadLocal = threading.local()
class BlockMatcherPMCCDispatcher(object):
class BlockMatcherPMCC(object):
def __init__(self, sec1, sec2, sec1_to_sec2_transform, **kwargs):
self._scaling = kwargs.get("scaling", 0.2)
self._template_size = kwargs.get("template_size", 200)
self._search_window_size = kwargs.get("search_window_size", 8 * self._template_size)
logger.report_event("Actual template size: {} and window search size: {} (after scaling)".format(self._template_size * self._scaling, self._search_window_size * self._scaling), log_level=logging.INFO)
# Parameters for PMCC filtering
self._min_corr = kwargs.get("min_correlation", 0.2)
self._max_curvature = kwargs.get("maximal_curvature_ratio", 10)
self._max_rod = kwargs.get("maximal_ROD", 0.9)
self._use_clahe = kwargs.get("use_clahe", False)
if self._use_clahe:
self._clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
#self._debug_dir = kwargs.get("debug_dir", None)
self._debug_save_matches = None
self._template_scaled_side = self._template_size * self._scaling / 2
self._search_window_scaled_side = self._search_window_size * self._scaling / 2
self._sec1 = sec1
self._sec2 = sec2
self._sec1_to_sec2_transform = sec1_to_sec2_transform
self._scale_transformation = np.array([
[ self._scaling, 0., 0. ],
[ 0., self._scaling, 0. ]
])
# For section1 there will be a single renderer with transformation and scaling
self._sec1_scaled_renderer = TilespecAffineRenderer(self._sec1.tilespec)
self._sec1_scaled_renderer.add_transformation(self._sec1_to_sec2_transform.get_matrix())
self._sec1_scaled_renderer.add_transformation(self._scale_transformation)
# for section2 there will only be a single renderer (no need to transform back to sec1)
self._sec2_scaled_renderer = TilespecAffineRenderer(self._sec2.tilespec)
self._sec2_scaled_renderer.add_transformation(self._scale_transformation)
def set_debug_dir(self, debug_dir):
self._debug_save_matches = True
self._debug_dir = debug_dir
def match_sec1_to_sec2_mfov(self, sec1_pts):
# Apply the mfov transformation to compute estimated location on sec2
sec1_mfov_pts_on_sec2 = self._sec1_to_sec2_transform.apply(np.atleast_2d(sec1_pts)) * self._scaling
valid_matches = [[], [], []]
invalid_matches = [[], []]
for sec1_pt, sec2_pt_estimated in zip(sec1_pts, sec1_mfov_pts_on_sec2):
# Fetch the template around img1_point (after transformation)
from_x1, from_y1 = sec2_pt_estimated - self._template_scaled_side
to_x1, to_y1 = sec2_pt_estimated + self._template_scaled_side
sec1_template, sec1_template_start_point = self._sec1_scaled_renderer.crop(from_x1, from_y1, to_x1, to_y1)
# Fetch a large sub-image around img2_point (using search_window_scaled_size)
from_x2, from_y2 = sec2_pt_estimated - self._search_window_scaled_side
to_x2, to_y2 = sec2_pt_estimated + self._search_window_scaled_side
sec2_search_window, sec2_search_window_start_point = self._sec2_scaled_renderer.crop(from_x2, from_y2, to_x2, to_y2)
# execute the PMCC match
# Do template matching
if np.any(np.array(sec2_search_window.shape) == 0) or np.any(np.array(sec1_template.shape) == 0):
continue
if sec1_template.shape[0] >= sec2_search_window.shape[0] or sec1_template.shape[1] >= sec2_search_window.shape[1]:
continue
if self._use_clahe:
sec2_search_window_clahe = self._clahe.apply(sec2_search_window)
sec1_template_clahe = self._clahe.apply(sec1_template)
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec2_search_window_clahe, sec1_template_clahe, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
else:
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec2_search_window, sec1_template, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
if pmcc_result is None:
invalid_matches[0].append(sec1_pt)
invalid_matches[1].append(reason)
# debug_out_fname1 = "temp_debug/debug_match_sec1{}-{}_template.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt_estimated[0]), int(sec2_pt_estimated[1]))
# debug_out_fname2 = "temp_debug/debug_match_sec1{}-{}_search_window.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt_estimated[0]), int(sec2_pt_estimated[1]))
# cv2.imwrite(debug_out_fname1, sec1_template)
# cv2.imwrite(debug_out_fname2, sec2_search_window)
else:
# Compute the location of the matched point on img2 in non-scaled coordinates
matched_location_scaled = np.array([reason[1], reason[0]]) + np.array([from_x2, from_y2]) + self._template_scaled_side
sec2_pt = matched_location_scaled / self._scaling
logger.report_event("{}: match found: {} and {} (orig assumption: {})".format(os.getpid(), sec1_pt, sec2_pt, sec2_pt_estimated / self._scaling), log_level=logging.DEBUG)
if self._debug_save_matches:
debug_out_fname1 = os.path.join(self._debug_dir, "debug_match_sec1_{}-{}_sec2_{}-{}_image1.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt[0]), int(sec2_pt[1])))
debug_out_fname2 = os.path.join(self._debug_dir, "debug_match_sec1_{}-{}_sec2_{}-{}_image2.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt[0]), int(sec2_pt[1])))
cv2.imwrite(debug_out_fname1, sec1_template)
sec2_cut_out = sec2_search_window[int(reason[0]):int(reason[0] + 2 * self._template_scaled_side), int(reason[1]):int(reason[1] + 2 * self._template_scaled_side)]
cv2.imwrite(debug_out_fname2, sec2_cut_out)
valid_matches[0].append(np.array(sec1_pt))
valid_matches[1].append(sec2_pt)
valid_matches[2].append(match_val)
return valid_matches, invalid_matches
def match_sec2_to_sec1_mfov(self, sec2_pts):
# Assume that only sec1 renderer was transformed and not sec2 (and both scaled)
sec2_pts = np.asarray(sec2_pts)
sec2_pts_scaled = sec2_pts * self._scaling
mat = self._sec1_to_sec2_transform.get_matrix()
inverse_mat = np.linalg.inv(mat)
#inverse_model = BlockMatcherPMCC.inverse_transform(self._sec1_to_sec2_transform)
#sec2_pts_on_sec1 = inverse_model.apply(sec2_pts)
valid_matches = [[], [], []]
invalid_matches = [[], []]
for sec2_pt, sec2_pt_scaled in zip(sec2_pts, sec2_pts_scaled):
# sec1_pt_estimated is after the sec1_to_sec2 transform
sec1_pt_estimated = sec2_pt_scaled
# Fetch the template around sec2_pt_scaled (no transformation, just scaling)
from_x2, from_y2 = sec2_pt_scaled - self._template_scaled_side
to_x2, to_y2 = sec2_pt_scaled + self._template_scaled_side
sec2_template, sec2_template_start_point = self._sec2_scaled_renderer.crop(from_x2, from_y2, to_x2, to_y2)
# Fetch a large sub-image around sec1_pt_estimated (after transformation, using search_window_scaled_size)
from_x1, from_y1 = sec1_pt_estimated - self._search_window_scaled_side
to_x1, to_y1 = sec1_pt_estimated + self._search_window_scaled_side
sec1_search_window, sec1_search_window_start_point = self._sec1_scaled_renderer.crop(from_x1, from_y1, to_x1, to_y1)
# execute the PMCC match
# Do template matching
if np.any(np.array(sec1_search_window.shape) == 0) or np.any(np.array(sec2_template.shape) == 0):
continue
if sec2_template.shape[0] >= sec1_search_window.shape[0] or sec2_template.shape[1] >= sec1_search_window.shape[1]:
continue
if self._use_clahe:
sec1_search_window_clahe = self._clahe.apply(sec1_search_window)
sec2_template_clahe = self._clahe.apply(sec2_template)
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec1_search_window_clahe, sec2_template_clahe, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
else:
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec1_search_window, sec2_template, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
if pmcc_result is None:
invalid_matches[0].append(sec2_pt)
invalid_matches[1].append(reason)
# debug_out_fname1 = "temp_debug/debug_match_sec2{}-{}_template.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt_estimated[0]), int(sec1_pt_estimated[1]))
# debug_out_fname2 = "temp_debug/debug_match_sec2{}-{}_search_window.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt_estimated[0]), int(sec1_pt_estimated[1]))
# cv2.imwrite(debug_out_fname1, sec2_template)
# cv2.imwrite(debug_out_fname2, sec1_search_window)
else:
# Compute the location of the matched point on img2 in non-scaled coordinates
matched_location_scaled = np.array([reason[1], reason[0]]) + np.array([from_x1, from_y1]) + self._template_scaled_side
sec1_pt = matched_location_scaled / self._scaling
sec1_pt = np.dot(inverse_mat[:2,:2], sec1_pt) + inverse_mat[:2,2]
logger.report_event("{}: match found: {} and {} (orig assumption: {})".format(os.getpid(), sec2_pt, sec1_pt, np.dot(inverse_mat[:2,:2], sec1_pt_estimated / self._scaling) + inverse_mat[:2,2]), log_level=logging.DEBUG)
if self._debug_save_matches:
debug_out_fname1 = os.path.join(self._debug_dir, "debug_match_sec2_{}-{}_sec1_{}-{}_image1.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt[0]), int(sec1_pt[1])))
debug_out_fname2 = os.path.join(self._debug_dir, "debug_match_sec2_{}-{}_sec1_{}-{}_image2.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt[0]), int(sec1_pt[1])))
cv2.imwrite(debug_out_fname1, sec2_template)
sec1_cut_out = sec1_search_window[int(reason[0]):int(reason[0] + 2 * self._template_scaled_side), int(reason[1]):int(reason[1] + 2 * self._template_scaled_side)]
cv2.imwrite(debug_out_fname2, sec1_cut_out)
valid_matches[0].append(sec2_pt)
valid_matches[1].append(sec1_pt)
valid_matches[2].append(match_val)
return valid_matches, invalid_matches
def __init__(self, **kwargs):
self._matcher_kwargs = kwargs
self._mesh_spacing = kwargs.get("mesh_spacing", 1500)
# self._scaling = kwargs.get("scaling", 0.2)
# self._template_size = kwargs.get("template_size", 200)
# self._search_window_size = kwargs.get("search_window_size", 8 * template_size)
# logger.report_event("Actual template size: {} and window search size: {} (after scaling)".format(template_size * scaling, search_window_size * scaling), log_level=logging.INFO)
#
# # Parameters for PMCC filtering
# self._min_corr = kwargs.get("min_correlation", 0.2)
# self._max_curvature = kwargs.get("maximal_curvature_ratio", 10)
# self._max_rod = kwargs.get("maximal_ROD", 0.9)
# self._use_clahe = kwargs.get("use_clahe", False)
self._debug_dir = kwargs.get("debug_dir", None)
if self._debug_dir is not None:
logger.report_event("Debug mode - on", log_level=logging.INFO)
# Create a debug directory
import datetime
self._debug_dir = os.path.join(self._debug_dir, 'debug_matches_{}'.format(datetime.datetime.now().isoformat()))
os.mkdirs(self._debug_dir)
@staticmethod
def _is_point_in_img(img_bbox, point):
"""Returns True if the given point lies inside the image as denoted by the given tile_tilespec"""
# TODO - instead of checking inside the bbox, need to check inside the polygon after transformation
if point[0] > img_bbox[0] and point[1] > img_bbox[2] and \
point[0] < img_bbox[1] and point[1] < img_bbox[3]:
return True
return False
@staticmethod
def sum_invalid_matches(invalid_matches):
if len(invalid_matches[1]) == 0:
return [0] * 5
hist, _ = np.histogram(invalid_matches[1], bins=5)
return hist
@staticmethod
def _perform_matching(sec1_mfov_tile_idx, sec1, sec2, sec1_to_sec2_mfov_transform, sec1_mfov_mesh_pts, sec2_mfov_mesh_pts, debug_dir, matcher_args):
# fine_matcher_key = "block_matcher_{},{},{}".format(sec1.canonical_section_name, sec2.canonical_section_name, sec1_mfov_tile_idx[0])
# fine_matcher = getattr(threadLocal, fine_matcher_key, None)
# if fine_matcher is None:
# fine_matcher = BlockMatcherPMCCDispatcher.BlockMatcherPMCC(sec1, sec2, sec1_to_sec2_mfov_transform, **matcher_args)
# if debug_dir is not None:
# fine_matcher.set_debug_dir(debug_dir)
#
# setattr(threadLocal, fine_matcher_key, fine_matcher)
fine_matcher = BlockMatcherPMCCDispatcher.BlockMatcherPMCC(sec1, sec2, sec1_to_sec2_mfov_transform, **matcher_args)
if debug_dir is not None:
fine_matcher.set_debug_dir(debug_dir)
logger.report_event("Block-Matching+PMCC layers: {} with {} (mfov1 {}) {} mesh points1, {} mesh points2".format(sec1.canonical_section_name, sec2.canonical_section_name, sec1_mfov_tile_idx, len(sec1_mfov_mesh_pts), len(sec2_mfov_mesh_pts)), log_level=logging.INFO)
logger.report_event("Block-Matching+PMCC layers: {} -> {}".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
valid_matches1, invalid_matches1 = fine_matcher.match_sec1_to_sec2_mfov(sec1_mfov_mesh_pts)
logger.report_event("Block-Matching+PMCC layers: {} -> {} valid matches: {}, invalid_matches: {} {}".format(sec1.canonical_section_name, sec2.canonical_section_name, len(valid_matches1[0]), len(invalid_matches1[0]), BlockMatcherPMCCDispatcher.sum_invalid_matches(invalid_matches1)), log_level=logging.INFO)
logger.report_event("Block-Matching+PMCC layers: {} <- {}".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
valid_matches2, invalid_matches2 = fine_matcher.match_sec2_to_sec1_mfov(sec2_mfov_mesh_pts)
logger.report_event("Block-Matching+PMCC layers: {} <- {} valid matches: {}, invalid_matches: {} {}".format(sec1.canonical_section_name, sec2.canonical_section_name, len(valid_matches2[0]), len(invalid_matches2[0]), BlockMatcherPMCCDispatcher.sum_invalid_matches(invalid_matches2)), log_level=logging.INFO)
return sec1_mfov_tile_idx, valid_matches1, valid_matches2
# def inverse_transform(model):
# mat = model.get_matrix()
# new_model = models.AffineModel(np.linalg.inv(mat))
# return new_model
def match_layers_fine_matching(self, sec1, sec2, sec1_cache, sec2_cache, sec1_to_sec2_mfovs_transforms, pool):
starttime = time.time()
logger.report_event("Block-Matching+PMCC layers: {} with {} (bidirectional)".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
# take just the models (w/o the filtered match points)
sec1_to_sec2_mfovs_transforms = {k:v[0] for k, v in sec1_to_sec2_mfovs_transforms.items()}
# create a processes shared per-mfov transform from sec1 to sec2 (and from sec2 to sec1 too)
mfovs1_centers_sec2centers = [[], [], []] # lists of mfovs indexes, mfovs centers, and mfovs centers after transformation to sec2
missing_mfovs1_transforms_centers = [[], []] # lists of missing mfovs in sec1 and their centers
for mfov1 in sec1.mfovs():
mfov1_center = np.array([(mfov1.bbox[0] + mfov1.bbox[1])/2, (mfov1.bbox[2] + mfov1.bbox[3])/2])
if mfov1.mfov_index in sec1_to_sec2_mfovs_transforms and sec1_to_sec2_mfovs_transforms[mfov1.mfov_index] is not None:
mfovs1_centers_sec2centers[0].append(mfov1.mfov_index)
mfovs1_centers_sec2centers[1].append(mfov1_center)
sec1_mfov_model = sec1_to_sec2_mfovs_transforms[mfov1.mfov_index]
mfovs1_centers_sec2centers[2].append(sec1_mfov_model.apply(mfov1_center)[0])
else:
missing_mfovs1_transforms_centers[0].append(mfov1.mfov_index)
missing_mfovs1_transforms_centers[1].append(mfov1_center)
# # find the transformations from sec2 to sec1
# mfovs1_centers_sec2centers = [np.array(mfovs1_centers_sec2centers[0]), np.array(mfovs1_centers_sec2centers[1]), np.array(mfovs1_centers_sec2centers[2])]
# mfovs1_centers_sec2_kdtree = KDTree(mfovs1_centers_sec2centers[2])
# mfovs2_centers = [np.array([(mfov2.bbox[0] + mfov2.bbox[1])/2, (mfov2.bbox[2] + mfov2.bbox[3])/2]) for mfov2 in sec2.mfovs()]
# mfovs2_closest_centers_mfovs1_idxs = mfovs1_centers_sec2_kdtree.query(mfovs2_centers)[1]
# sec2_to_sec1_mfovs_transforms = {mfov2.mfov_index:
# inverse_transform(
# sec1_to_sec2_mfovs_transforms[
# mfovs1_centers_sec2centers[0][mfovs2_closest_centers_mfovs1_idxs[i]]
# ]
# )
# for i, mfov2 in enumerate(sec2.mfovs())}
# estimate the transformation for mfovs in sec1 that do not have one (look at closest neighbor)
if len(missing_mfovs1_transforms_centers[0]) > 0:
mfovs1_centers_sec1_kdtree = KDTree(mfovs1_centers_sec2centers[1])
mfovs1_missing_closest_centers_mfovs1_idxs = mfovs1_centers_sec1_kdtree.query(missing_mfovs1_transforms_centers[1])[1]
missing_mfovs1_sec2_centers = []
for i, (mfov1_index, mfov1_closest_mfov_idx) in enumerate(zip(missing_mfovs1_transforms_centers[0], mfovs1_missing_closest_centers_mfovs1_idxs)):
model = sec1_to_sec2_mfovs_transforms[
mfovs1_centers_sec2centers[0][mfov1_closest_mfov_idx]
]
sec1_to_sec2_mfovs_transforms[mfov1_index] = model
missing_mfovs1_sec2_centers.append(model.apply(np.atleast_2d(missing_mfovs1_transforms_centers[1][i]))[0])
# update the mfovs1_centers_sec2centers lists to include the missing mfovs and their corresponding values
mfovs1_centers_sec2centers[0] = np.concatenate((mfovs1_centers_sec2centers[0], missing_mfovs1_transforms_centers[0]))
mfovs1_centers_sec2centers[1] = np.concatenate((mfovs1_centers_sec2centers[1], missing_mfovs1_transforms_centers[1]))
mfovs1_centers_sec2centers[2] = np.concatenate((mfovs1_centers_sec2centers[2], missing_mfovs1_sec2_centers))
# Lay a grid on top of each section
sec1_mesh_pts = utils.generate_hexagonal_grid(sec1.bbox, self._mesh_spacing)
sec2_mesh_pts = utils.generate_hexagonal_grid(sec2.bbox, self._mesh_spacing)
sec1_tiles_centers = [
[(t.bbox[0] + t.bbox[1])/2, (t.bbox[2] + t.bbox[3])/2]
for t in sec1.tiles()]
sec1_tiles_centers_kdtree = KDTree(sec1_tiles_centers)
sec1_tiles_mfov_tile_idxs = np.array([[t.mfov_index, t.tile_index] for t in sec1.tiles()])
sec2_tiles_centers = [
[(t.bbox[0] + t.bbox[1])/2, (t.bbox[2] + t.bbox[3])/2]
for t in sec2.tiles()]
sec2_tiles_centers_kdtree = KDTree(sec2_tiles_centers)
sec2_tiles_mfov_tile_idxs = np.array([[t.mfov_index, t.tile_index] for t in sec2.tiles()])
# TODO - split the work in a smart way between the processes
# Group the mesh points of sec1 by its mfovs_tiles and make sure the points are in tiles
sec1_mesh_pts_mfov_tile_idxs = sec1_tiles_mfov_tile_idxs[sec1_tiles_centers_kdtree.query(sec1_mesh_pts)[1]]
sec1_per_region_mesh_pts = defaultdict(list)
for sec1_pt, sec1_pt_mfov_tile_idx in zip(sec1_mesh_pts, sec1_mesh_pts_mfov_tile_idxs):
sec1_tile = sec1.get_mfov(sec1_pt_mfov_tile_idx[0]).get_tile(sec1_pt_mfov_tile_idx[1])
if BlockMatcherPMCCDispatcher._is_point_in_img(sec1_tile.bbox, sec1_pt):
sec1_per_region_mesh_pts[tuple(sec1_pt_mfov_tile_idx)].append(sec1_pt)
# Group the mesh pts of sec2 by the mfov on sec1 which they should end up on (mfov1 that after applying its transformation is closest to that point)
# Transform sec1 tiles centers to their estimated location on sec2
sec1_tiles_centers_per_mfov = defaultdict(list)
for sec1_tile_center, sec1_tiles_mfov_tile_idx in zip(sec1_tiles_centers, sec1_tiles_mfov_tile_idxs):
sec1_tiles_centers_per_mfov[sec1_tiles_mfov_tile_idx[0]].append(sec1_tile_center)
sec1_tiles_centers_on_sec2 = [
sec1_to_sec2_mfovs_transforms[mfov_index].apply(np.atleast_2d(mfov1_tiles_centers))
for mfov_index, mfov1_tiles_centers in sec1_tiles_centers_per_mfov.items()
]
sec1_tiles_centers_on_sec2 = np.vstack(tuple(sec1_tiles_centers_on_sec2))
sec1_tiles_centers_on_sec2_kdtree = KDTree(sec1_tiles_centers_on_sec2)
sec2_mesh_pts_sec1_closest_tile_idxs = sec1_tiles_centers_on_sec2_kdtree.query(sec2_mesh_pts)[1]
sec2_mesh_pts_mfov_tile_idxs = sec2_tiles_mfov_tile_idxs[sec2_tiles_centers_kdtree.query(sec2_mesh_pts)[1]]
sec2_per_region1_mesh_pts = defaultdict(list)
for sec2_pt, (sec2_pt_mfov_idx, sec2_pt_tile_idx), sec1_tile_center_idx in zip(sec2_mesh_pts, sec2_mesh_pts_mfov_tile_idxs, sec2_mesh_pts_sec1_closest_tile_idxs):
sec2_tile = sec2.get_mfov(sec2_pt_mfov_idx).get_tile(sec2_pt_tile_idx)
if BlockMatcherPMCCDispatcher._is_point_in_img(sec2_tile.bbox, sec2_pt):
sec2_per_region1_mesh_pts[tuple(sec1_tiles_mfov_tile_idxs[sec1_tile_center_idx])].append(sec2_pt)
# Activate the actual matching
sec1_to_sec2_results = [[], []]
sec2_to_sec1_results = [[], []]
pool_results = []
for region1_key, sec1_region_mesh_pts in sec1_per_region_mesh_pts.items():
sec2_mesh_pts_cur_sec1_region = sec2_per_region1_mesh_pts[region1_key]
#sec1_sec2_mfov_matches, sec2_sec1_mfov_matches = BlockMatcherPMCCDispatcher._perform_matching(sec1_mfov_index, sec1, sec2, sec1_to_sec2_mfovs_transforms[sec1_mfov_index], sec1_mfov_mesh_pts, sec2_mesh_pts_cur_sec1_mfov, self._debug_dir, **self._matcher_kwargs)
res_pool = pool.apply_async(BlockMatcherPMCCDispatcher._perform_matching, (region1_key, sec1, sec2, sec1_to_sec2_mfovs_transforms[region1_key[0]], sec1_region_mesh_pts, sec2_mesh_pts_cur_sec1_region, self._debug_dir, self._matcher_kwargs))
pool_results.append(res_pool)
for res_pool in pool_results:
sec1_region_index, sec1_sec2_region_matches, sec2_sec1_region_matches = res_pool.get()
if len(sec1_sec2_region_matches[0]) > 0:
sec1_to_sec2_results[0].append(sec1_sec2_region_matches[0])
sec1_to_sec2_results[1].append(sec1_sec2_region_matches[1])
if len(sec2_sec1_region_matches[0]) > 0:
sec2_to_sec1_results[0].append(sec2_sec1_region_matches[0])
sec2_to_sec1_results[1].append(sec2_sec1_region_matches[1])
return np.array([np.vstack(sec1_to_sec2_results[0]), np.vstack(sec1_to_sec2_results[1])]), np.array([np.vstack(sec2_to_sec1_results[0]), np.vstack(sec2_to_sec1_results[1])])
| [
"scipy.spatial.cKDTree",
"mb_aligner.alignment.fine_matchers.PMCC_filter.PMCC_match",
"numpy.array",
"numpy.atleast_2d",
"numpy.histogram",
"threading.local",
"rh_logger.api.logger.report_event",
"numpy.asarray",
"numpy.dot",
"os.mkdirs",
"numpy.concatenate",
"numpy.vstack",
"os.getpid",
"... | [((673, 690), 'threading.local', 'threading.local', ([], {}), '()\n', (688, 690), False, 'import threading\n'), ((14324, 14364), 'numpy.histogram', 'np.histogram', (['invalid_matches[1]'], {'bins': '(5)'}), '(invalid_matches[1], bins=5)\n', (14336, 14364), True, 'import numpy as np\n'), ((17093, 17104), 'time.time', 'time.time', ([], {}), '()\n', (17102, 17104), False, 'import time\n'), ((21094, 21154), 'mb_aligner.common.utils.generate_hexagonal_grid', 'utils.generate_hexagonal_grid', (['sec1.bbox', 'self._mesh_spacing'], {}), '(sec1.bbox, self._mesh_spacing)\n', (21123, 21154), False, 'from mb_aligner.common import utils\n'), ((21179, 21239), 'mb_aligner.common.utils.generate_hexagonal_grid', 'utils.generate_hexagonal_grid', (['sec2.bbox', 'self._mesh_spacing'], {}), '(sec2.bbox, self._mesh_spacing)\n', (21208, 21239), False, 'from mb_aligner.common import utils\n'), ((21446, 21472), 'scipy.spatial.cKDTree', 'KDTree', (['sec1_tiles_centers'], {}), '(sec1_tiles_centers)\n', (21452, 21472), True, 'from scipy.spatial import cKDTree as KDTree\n'), ((21776, 21802), 'scipy.spatial.cKDTree', 'KDTree', (['sec2_tiles_centers'], {}), '(sec2_tiles_centers)\n', (21782, 21802), True, 'from scipy.spatial import cKDTree as KDTree\n'), ((22220, 22237), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22231, 22237), False, 'from collections import defaultdict\n'), ((22876, 22893), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22887, 22893), False, 'from collections import defaultdict\n'), ((23539, 23573), 'scipy.spatial.cKDTree', 'KDTree', (['sec1_tiles_centers_on_sec2'], {}), '(sec1_tiles_centers_on_sec2)\n', (23545, 23573), True, 'from scipy.spatial import cKDTree as KDTree\n'), ((23831, 23848), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23842, 23848), False, 'from collections import defaultdict\n'), ((2157, 2221), 'numpy.array', 'np.array', (['[[self._scaling, 0.0, 0.0], [0.0, self._scaling, 0.0]]'], {}), '([[self._scaling, 0.0, 0.0], [0.0, self._scaling, 0.0]])\n', (2165, 2221), True, 'import numpy as np\n'), ((2472, 2515), 'rh_renderer.tilespec_affine_renderer.TilespecAffineRenderer', 'TilespecAffineRenderer', (['self._sec1.tilespec'], {}), '(self._sec1.tilespec)\n', (2494, 2515), False, 'from rh_renderer.tilespec_affine_renderer import TilespecAffineRenderer\n'), ((2845, 2888), 'rh_renderer.tilespec_affine_renderer.TilespecAffineRenderer', 'TilespecAffineRenderer', (['self._sec2.tilespec'], {}), '(self._sec2.tilespec)\n', (2867, 2888), False, 'from rh_renderer.tilespec_affine_renderer import TilespecAffineRenderer\n'), ((7696, 7716), 'numpy.asarray', 'np.asarray', (['sec2_pts'], {}), '(sec2_pts)\n', (7706, 7716), True, 'import numpy as np\n'), ((7859, 7877), 'numpy.linalg.inv', 'np.linalg.inv', (['mat'], {}), '(mat)\n', (7872, 7877), True, 'import numpy as np\n'), ((13414, 13476), 'rh_logger.api.logger.report_event', 'logger.report_event', (['"""Debug mode - on"""'], {'log_level': 'logging.INFO'}), "('Debug mode - on', log_level=logging.INFO)\n", (13433, 13476), False, 'from rh_logger.api import logger\n'), ((13680, 13706), 'os.mkdirs', 'os.mkdirs', (['self._debug_dir'], {}), '(self._debug_dir)\n', (13689, 13706), False, 'import os\n'), ((17849, 17938), 'numpy.array', 'np.array', (['[(mfov1.bbox[0] + mfov1.bbox[1]) / 2, (mfov1.bbox[2] + mfov1.bbox[3]) / 2]'], {}), '([(mfov1.bbox[0] + mfov1.bbox[1]) / 2, (mfov1.bbox[2] + mfov1.bbox[\n 3]) / 2])\n', (17857, 17938), True, 'import numpy as np\n'), ((19800, 19837), 'scipy.spatial.cKDTree', 'KDTree', (['mfovs1_centers_sec2centers[1]'], {}), '(mfovs1_centers_sec2centers[1])\n', (19806, 19837), True, 'from scipy.spatial import cKDTree as KDTree\n'), ((20688, 20777), 'numpy.concatenate', 'np.concatenate', (['(mfovs1_centers_sec2centers[0], missing_mfovs1_transforms_centers[0])'], {}), '((mfovs1_centers_sec2centers[0],\n missing_mfovs1_transforms_centers[0]))\n', (20702, 20777), True, 'import numpy as np\n'), ((20818, 20907), 'numpy.concatenate', 'np.concatenate', (['(mfovs1_centers_sec2centers[1], missing_mfovs1_transforms_centers[1])'], {}), '((mfovs1_centers_sec2centers[1],\n missing_mfovs1_transforms_centers[1]))\n', (20832, 20907), True, 'import numpy as np\n'), ((20948, 21024), 'numpy.concatenate', 'np.concatenate', (['(mfovs1_centers_sec2centers[2], missing_mfovs1_sec2_centers)'], {}), '((mfovs1_centers_sec2centers[2], missing_mfovs1_sec2_centers))\n', (20962, 21024), True, 'import numpy as np\n'), ((1645, 1696), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=2.0, tileGridSize=(8, 8))\n', (1660, 1696), False, 'import cv2\n'), ((23225, 23259), 'numpy.atleast_2d', 'np.atleast_2d', (['mfov1_tiles_centers'], {}), '(mfov1_tiles_centers)\n', (23238, 23259), True, 'import numpy as np\n'), ((3321, 3344), 'numpy.atleast_2d', 'np.atleast_2d', (['sec1_pts'], {}), '(sec1_pts)\n', (3334, 3344), True, 'import numpy as np\n'), ((4940, 5122), 'mb_aligner.alignment.fine_matchers.PMCC_filter.PMCC_match', 'PMCC_filter.PMCC_match', (['sec2_search_window_clahe', 'sec1_template_clahe'], {'min_correlation': 'self._min_corr', 'maximal_curvature_ratio': 'self._max_curvature', 'maximal_ROD': 'self._max_rod'}), '(sec2_search_window_clahe, sec1_template_clahe,\n min_correlation=self._min_corr, maximal_curvature_ratio=self.\n _max_curvature, maximal_ROD=self._max_rod)\n', (4962, 5122), False, 'from mb_aligner.alignment.fine_matchers import PMCC_filter\n'), ((5189, 5359), 'mb_aligner.alignment.fine_matchers.PMCC_filter.PMCC_match', 'PMCC_filter.PMCC_match', (['sec2_search_window', 'sec1_template'], {'min_correlation': 'self._min_corr', 'maximal_curvature_ratio': 'self._max_curvature', 'maximal_ROD': 'self._max_rod'}), '(sec2_search_window, sec1_template, min_correlation=\n self._min_corr, maximal_curvature_ratio=self._max_curvature,\n maximal_ROD=self._max_rod)\n', (5211, 5359), False, 'from mb_aligner.alignment.fine_matchers import PMCC_filter\n'), ((9766, 9948), 'mb_aligner.alignment.fine_matchers.PMCC_filter.PMCC_match', 'PMCC_filter.PMCC_match', (['sec1_search_window_clahe', 'sec2_template_clahe'], {'min_correlation': 'self._min_corr', 'maximal_curvature_ratio': 'self._max_curvature', 'maximal_ROD': 'self._max_rod'}), '(sec1_search_window_clahe, sec2_template_clahe,\n min_correlation=self._min_corr, maximal_curvature_ratio=self.\n _max_curvature, maximal_ROD=self._max_rod)\n', (9788, 9948), False, 'from mb_aligner.alignment.fine_matchers import PMCC_filter\n'), ((10015, 10185), 'mb_aligner.alignment.fine_matchers.PMCC_filter.PMCC_match', 'PMCC_filter.PMCC_match', (['sec1_search_window', 'sec2_template'], {'min_correlation': 'self._min_corr', 'maximal_curvature_ratio': 'self._max_curvature', 'maximal_ROD': 'self._max_rod'}), '(sec1_search_window, sec2_template, min_correlation=\n self._min_corr, maximal_curvature_ratio=self._max_curvature,\n maximal_ROD=self._max_rod)\n', (10037, 10185), False, 'from mb_aligner.alignment.fine_matchers import PMCC_filter\n'), ((25766, 25800), 'numpy.vstack', 'np.vstack', (['sec1_to_sec2_results[0]'], {}), '(sec1_to_sec2_results[0])\n', (25775, 25800), True, 'import numpy as np\n'), ((25802, 25836), 'numpy.vstack', 'np.vstack', (['sec1_to_sec2_results[1]'], {}), '(sec1_to_sec2_results[1])\n', (25811, 25836), True, 'import numpy as np\n'), ((25850, 25884), 'numpy.vstack', 'np.vstack', (['sec2_to_sec1_results[0]'], {}), '(sec2_to_sec1_results[0])\n', (25859, 25884), True, 'import numpy as np\n'), ((25886, 25920), 'numpy.vstack', 'np.vstack', (['sec2_to_sec1_results[1]'], {}), '(sec2_to_sec1_results[1])\n', (25895, 25920), True, 'import numpy as np\n'), ((6998, 7042), 'cv2.imwrite', 'cv2.imwrite', (['debug_out_fname1', 'sec1_template'], {}), '(debug_out_fname1, sec1_template)\n', (7009, 7042), False, 'import cv2\n'), ((7253, 7296), 'cv2.imwrite', 'cv2.imwrite', (['debug_out_fname2', 'sec2_cut_out'], {}), '(debug_out_fname2, sec2_cut_out)\n', (7264, 7296), False, 'import cv2\n'), ((7341, 7358), 'numpy.array', 'np.array', (['sec1_pt'], {}), '(sec1_pt)\n', (7349, 7358), True, 'import numpy as np\n'), ((11199, 11235), 'numpy.dot', 'np.dot', (['inverse_mat[:2, :2]', 'sec1_pt'], {}), '(inverse_mat[:2, :2], sec1_pt)\n', (11205, 11235), True, 'import numpy as np\n'), ((11958, 12002), 'cv2.imwrite', 'cv2.imwrite', (['debug_out_fname1', 'sec2_template'], {}), '(debug_out_fname1, sec2_template)\n', (11969, 12002), False, 'import cv2\n'), ((12213, 12256), 'cv2.imwrite', 'cv2.imwrite', (['debug_out_fname2', 'sec1_cut_out'], {}), '(debug_out_fname2, sec1_cut_out)\n', (12224, 12256), False, 'import cv2\n'), ((4414, 4448), 'numpy.array', 'np.array', (['sec2_search_window.shape'], {}), '(sec2_search_window.shape)\n', (4422, 4448), True, 'import numpy as np\n'), ((4465, 4494), 'numpy.array', 'np.array', (['sec1_template.shape'], {}), '(sec1_template.shape)\n', (4473, 4494), True, 'import numpy as np\n'), ((6179, 6211), 'numpy.array', 'np.array', (['[reason[1], reason[0]]'], {}), '([reason[1], reason[0]])\n', (6187, 6211), True, 'import numpy as np\n'), ((6214, 6242), 'numpy.array', 'np.array', (['[from_x2, from_y2]'], {}), '([from_x2, from_y2])\n', (6222, 6242), True, 'import numpy as np\n'), ((6441, 6452), 'os.getpid', 'os.getpid', ([], {}), '()\n', (6450, 6452), False, 'import os\n'), ((9240, 9274), 'numpy.array', 'np.array', (['sec1_search_window.shape'], {}), '(sec1_search_window.shape)\n', (9248, 9274), True, 'import numpy as np\n'), ((9291, 9320), 'numpy.array', 'np.array', (['sec2_template.shape'], {}), '(sec2_template.shape)\n', (9299, 9320), True, 'import numpy as np\n'), ((11005, 11037), 'numpy.array', 'np.array', (['[reason[1], reason[0]]'], {}), '([reason[1], reason[0]])\n', (11013, 11037), True, 'import numpy as np\n'), ((11040, 11068), 'numpy.array', 'np.array', (['[from_x1, from_y1]'], {}), '([from_x1, from_y1])\n', (11048, 11068), True, 'import numpy as np\n'), ((11353, 11364), 'os.getpid', 'os.getpid', ([], {}), '()\n', (11362, 11364), False, 'import os\n'), ((13630, 13653), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13651, 13653), False, 'import datetime\n'), ((20465, 20519), 'numpy.atleast_2d', 'np.atleast_2d', (['missing_mfovs1_transforms_centers[1][i]'], {}), '(missing_mfovs1_transforms_centers[1][i])\n', (20478, 20519), True, 'import numpy as np\n'), ((11384, 11446), 'numpy.dot', 'np.dot', (['inverse_mat[:2, :2]', '(sec1_pt_estimated / self._scaling)'], {}), '(inverse_mat[:2, :2], sec1_pt_estimated / self._scaling)\n', (11390, 11446), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import sqrtm as MatrixSqrt
from functools import partial
from sklearn.linear_model import Ridge as LR
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.utils import check_array
from sklearn.decomposition._base import _BasePCA
from sklearn.linear_model._base import LinearModel
from .pcovr_distances import pcovr_covariance, pcovr_kernel
from skcosmo.utils import eig_solver
class PCovR(_BasePCA, LinearModel):
"""
Performs Principal Covariates Regression, as described in `[<NAME> and
<NAME>, 1992] <https://doi.org/10.1016/0169-7439(92)80100-I>`_.
:param mixing: mixing parameter,
as described in PCovR as :math:`{\\alpha}`, defaults to 1
:type mixing: float
:param n_components: Number of components to keep.
:type n_components: int
:param regularization: regularization parameter for linear models
:type regularization: float, default 1E-6
:param tol: tolerance below which to consider eigenvalues = 0
:type tol: float, default 1E-12
:param space: whether to compute the PCovR in `structure` or `feature` space
defaults to `structure` when :math:`{n_{samples} < n_{features}}` and
`feature` when :math:`{n_{features} < n_{samples}}``
:type space: {'feature', 'structure', 'auto'}
:param lr_args: dictionary of arguments to pass to the Ridge Regression
in estimating :math:`{\\mathbf{\\hat{Y}}}`
References
1. <NAME>, <NAME>, 'Principal Covariates
Regression: Part I. Theory', Chemometrics and Intelligent
Laboratory Systems 14(1): 155-164, 1992
2. <NAME>, <NAME>, <NAME>, <NAME>,
'PCovR: An R Package for Principal Covariates Regression',
Journal of Statistical Software 65(1):1-14, 2015
"""
def __init__(
self,
mixing=0.0,
n_components=None,
regularization=1e-6,
tol=1e-12,
space=None,
lr_args=dict(alpha=1e-6, fit_intercept=False, tol=1e-12),
):
self.mixing = mixing
self.regularization = regularization
self.tol = tol
self.space = space
self.lr_args = lr_args
self.n_components = n_components
self.whiten = False
self._eig_solver = partial(
eig_solver, n_components=self.n_components, tol=self.tol, add_null=True
)
def fit(self, X, Y, Yhat=None, W=None):
"""
Fit the model with X and Y. Depending on the dimensions of X,
calls either `_fit_feature_space` or `_fit_structure_space`
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Y : array-like, shape (n_samples, n_properties)
Training data, where n_samples is the number of samples and
n_properties is the number of properties
Yhat : array-like, shape (n_samples, n_properties), optional
Regressed training data, where n_samples is the number of samples and
n_properties is the number of properties. If not supplied, computed
by ridge regression.
W : array-like, shape (n_features, n_properties), optional
Weights of regressed training data. If not supplied, computed
by ridge regression.
"""
X, Y = check_X_y(X, Y, y_numeric=True, multi_output=True)
if self.space is not None and self.space not in [
"feature",
"structure",
"auto",
]:
raise ValueError("Only feature and structure space are supported.")
if self.n_components is None:
self.n_components = min(X.shape)
if Yhat is None or W is None:
Yhat, W = self._compute_Yhat(X, Y, Yhat=Yhat, W=W)
if self.space is None:
if X.shape[0] > X.shape[1]:
self.space = "feature"
else:
self.space = "structure"
if self.space == "feature":
self._fit_feature_space(X, Yhat, W)
else:
self._fit_structure_space(X, Yhat, W)
self.mean_ = np.mean(X, axis=0)
self.pxy_ = self.pxt_ @ self.pty_
if len(Y.shape) == 1:
self.pxy_ = self.pxy_.reshape(
X.shape[1],
)
self.pty_ = self.pty_.reshape(
self.n_components,
)
self.components_ = self.pxt_.T # for sklearn compatibility
return self
def _compute_Yhat(self, X, Y, Yhat=None, W=None):
"""
Method for computing the approximation of Y to fit the PCovR
"""
if Yhat is None:
if W is None:
lr = LR(**self.lr_args) # some sort of args
lr.fit(X, Y)
Yhat = lr.predict(X)
W = lr.coef_.T
else:
Yhat = X @ W
elif W is None:
W = np.linalg.lstsq(X, Y, rcond=self.regularization)[0]
Yhat = Yhat.reshape(X.shape[0], -1)
W = W.reshape(X.shape[1], -1)
return Yhat, W
def _fit_feature_space(self, X, Yhat, W=None):
"""
In feature-space PCovR, the projectors are determined by:
.. math::
\\mathbf{\\tilde{C}} = \\alpha \\mathbf{X}^T \\mathbf{X} +
(1 - \\alpha) \\left(\\left(\\mathbf{X}^T
\\mathbf{X}\\right)^{-\\frac{1}{2}} \\mathbf{X}^T
\\mathbf{\\hat{Y}}\\mathbf{\\hat{Y}}^T \\mathbf{X} \\left(\\mathbf{X}^T
\\mathbf{X}\\right)^{-\\frac{1}{2}}\\right)
where
.. math::
\\mathbf{P}_{XT} = (\\mathbf{X}^T \\mathbf{X})^{-\\frac{1}{2}}
\\mathbf{U}_\\mathbf{\\tilde{C}}^T
\\mathbf{\\Lambda}_\\mathbf{\\tilde{C}}^{\\frac{1}{2}}
.. math::
\\mathbf{P}_{TX} = \\mathbf{\\Lambda}_\\mathbf{\\tilde{C}}^{-\\frac{1}{2}}
\\mathbf{U}_\\mathbf{\\tilde{C}}^T
(\\mathbf{X}^T \\mathbf{X})^{\\frac{1}{2}}
.. math::
\\mathbf{P}_{TY} = \\mathbf{\\Lambda}_\\mathbf{\\tilde{C}}^{-\\frac{1}{2}}
\\mathbf{U}_\\mathbf{\\tilde{C}}^T (\\mathbf{X}^T
\\mathbf{X})^{-\\frac{1}{2}} \\mathbf{X}^T
\\mathbf{Y}
"""
Ct, iCsqrt = pcovr_covariance(
mixing=self.mixing,
X_proxy=X,
Y_proxy=Yhat,
rcond=self.tol,
return_isqrt=True,
)
try:
Csqrt = np.linalg.inv(iCsqrt)
except LinAlgError:
Csqrt = np.real(MatrixSqrt(X.T @ X))
v, U = self._eig_solver(Ct)
S = v ** 0.5
S_inv = np.linalg.pinv(np.diagflat(S))
self.singular_values_ = S.copy()
self.explained_variance_ = (S ** 2) / (X.shape[0] - 1)
self.explained_variance_ratio_ = (
self.explained_variance_ / self.explained_variance_.sum()
)
self.pxt_ = np.linalg.multi_dot([iCsqrt, U, np.diagflat(S)])
self.ptx_ = np.linalg.multi_dot([S_inv, U.T, Csqrt])
self.pty_ = np.linalg.multi_dot([S_inv, U.T, iCsqrt, X.T, Yhat])
def _fit_structure_space(self, X, Yhat, W):
"""
In sample-space PCovR, the projectors are determined by:
.. math::
\\mathbf{\\tilde{K}} = \\alpha \\mathbf{X} \\mathbf{X}^T +
(1 - \\alpha) \\mathbf{\\hat{Y}}\\mathbf{\\hat{Y}}^T
where
.. math::
\\mathbf{P}_{XT} = \\left(\\alpha \\mathbf{X}^T + (1 - \\alpha)
\\mathbf{W} \\mathbf{\\hat{Y}}^T\\right)
\\mathbf{U}_\\mathbf{\\tilde{K}}
\\mathbf{\\Lambda}_\\mathbf{\\tilde{K}}^{-\\frac{1}{2}}
.. math::
\\mathbf{P}_{TX} = \\mathbf{\\Lambda}_\\mathbf{\\tilde{K}}^{-\\frac{1}{2}}
\\mathbf{U}_\\mathbf{\\tilde{K}}^T \\mathbf{X}
.. math::
\\mathbf{P}_{TY} = \\mathbf{\\Lambda}_\\mathbf{\\tilde{K}}^{-\\frac{1}{2}}
\\mathbf{U}_\\mathbf{\\tilde{K}}^T \\mathbf{Y}
"""
Kt = pcovr_kernel(mixing=self.mixing, X_proxy=X, Y_proxy=Yhat)
v, U = self._eig_solver(Kt)
S = v ** 0.5
P = (self.mixing * X.T) + (1.0 - self.mixing) * np.dot(W, Yhat.T)
self.singular_values_ = S.copy()
self.explained_variance_ = (S ** 2) / (X.shape[0] - 1)
self.explained_variance_ratio_ = (
self.explained_variance_ / self.explained_variance_.sum()
)
T = U @ np.diagflat(1 / S)
self.pxt_ = P @ T
self.pty_ = T.T @ Yhat
self.ptx_ = T.T @ X
def inverse_transform(self, T):
"""Transform data back to its original space.
.. math::
\\mathbf{\\hat{X}} = \\mathbf{T} \\mathbf{P}_{TX}
= \\mathbf{X} \\mathbf{P}_{XT} \\mathbf{P}_{TX}
Parameters
----------
T : array-like, shape (n_samples, n_components)
Projected data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
return T @ self.ptx_ + self.mean_
def predict(self, X=None, T=None):
"""Predicts the property values using regression on X or T"""
check_is_fitted(self)
if X is None and T is None:
raise ValueError("Either X or T must be supplied.")
if X is not None:
X = check_array(X)
return X @ self.pxy_
else:
T = check_array(T)
return T @ self.pty_
def transform(self, X=None):
"""
Apply dimensionality reduction to X.
X is projected on the first principal components as determined by the
modified PCovR distances.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
"""
return super().transform(X)
| [
"sklearn.utils.validation.check_is_fitted",
"numpy.mean",
"scipy.linalg.sqrtm",
"numpy.linalg.multi_dot",
"sklearn.linear_model.Ridge",
"numpy.dot",
"numpy.linalg.inv",
"functools.partial",
"sklearn.utils.check_array",
"numpy.linalg.lstsq",
"numpy.diagflat",
"sklearn.utils.validation.check_X_y... | [((2385, 2470), 'functools.partial', 'partial', (['eig_solver'], {'n_components': 'self.n_components', 'tol': 'self.tol', 'add_null': '(True)'}), '(eig_solver, n_components=self.n_components, tol=self.tol, add_null=True\n )\n', (2392, 2470), False, 'from functools import partial\n'), ((3547, 3597), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'Y'], {'y_numeric': '(True)', 'multi_output': '(True)'}), '(X, Y, y_numeric=True, multi_output=True)\n', (3556, 3597), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((4343, 4361), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4350, 4361), True, 'import numpy as np\n'), ((7357, 7397), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[S_inv, U.T, Csqrt]'], {}), '([S_inv, U.T, Csqrt])\n', (7376, 7397), True, 'import numpy as np\n'), ((7418, 7470), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[S_inv, U.T, iCsqrt, X.T, Yhat]'], {}), '([S_inv, U.T, iCsqrt, X.T, Yhat])\n', (7437, 7470), True, 'import numpy as np\n'), ((9768, 9789), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (9783, 9789), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((6835, 6856), 'numpy.linalg.inv', 'np.linalg.inv', (['iCsqrt'], {}), '(iCsqrt)\n', (6848, 6856), True, 'import numpy as np\n'), ((7023, 7037), 'numpy.diagflat', 'np.diagflat', (['S'], {}), '(S)\n', (7034, 7037), True, 'import numpy as np\n'), ((8921, 8939), 'numpy.diagflat', 'np.diagflat', (['(1 / S)'], {}), '(1 / S)\n', (8932, 8939), True, 'import numpy as np\n'), ((9934, 9948), 'sklearn.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (9945, 9948), False, 'from sklearn.utils import check_array\n'), ((10012, 10026), 'sklearn.utils.check_array', 'check_array', (['T'], {}), '(T)\n', (10023, 10026), False, 'from sklearn.utils import check_array\n'), ((4921, 4939), 'sklearn.linear_model.Ridge', 'LR', ([], {}), '(**self.lr_args)\n', (4923, 4939), True, 'from sklearn.linear_model import Ridge as LR\n'), ((7320, 7334), 'numpy.diagflat', 'np.diagflat', (['S'], {}), '(S)\n', (7331, 7334), True, 'import numpy as np\n'), ((8658, 8675), 'numpy.dot', 'np.dot', (['W', 'Yhat.T'], {}), '(W, Yhat.T)\n', (8664, 8675), True, 'import numpy as np\n'), ((5146, 5194), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['X', 'Y'], {'rcond': 'self.regularization'}), '(X, Y, rcond=self.regularization)\n', (5161, 5194), True, 'import numpy as np\n'), ((6913, 6932), 'scipy.linalg.sqrtm', 'MatrixSqrt', (['(X.T @ X)'], {}), '(X.T @ X)\n', (6923, 6932), True, 'from scipy.linalg import sqrtm as MatrixSqrt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plot 2 dimension data (version 1)
"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
# Build datas ###############
x = np.arange(-5, 5, 0.25)
y = np.arange(-5, 5, 0.25)
xx,yy = np.meshgrid(x, y)
z = np.sin(np.sqrt(xx**2 + yy**2))
# Plot data #################
fig = plt.figure()
ax = axes3d.Axes3D(fig)
ax.plot_wireframe(xx, yy, z)
# SAVE FILES ######################
plt.savefig("demo1_mplot3d.png")
# Plot ######################
plt.show()
| [
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"mpl_toolkits.mplot3d.axes3d.Axes3D",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((218, 240), 'numpy.arange', 'np.arange', (['(-5)', '(5)', '(0.25)'], {}), '(-5, 5, 0.25)\n', (227, 240), True, 'import numpy as np\n'), ((245, 267), 'numpy.arange', 'np.arange', (['(-5)', '(5)', '(0.25)'], {}), '(-5, 5, 0.25)\n', (254, 267), True, 'import numpy as np\n'), ((277, 294), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (288, 294), True, 'import numpy as np\n'), ((368, 380), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (378, 380), True, 'import matplotlib.pyplot as plt\n'), ((386, 404), 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'axes3d.Axes3D', (['fig'], {}), '(fig)\n', (399, 404), False, 'from mpl_toolkits.mplot3d import axes3d\n'), ((472, 504), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""demo1_mplot3d.png"""'], {}), "('demo1_mplot3d.png')\n", (483, 504), True, 'import matplotlib.pyplot as plt\n'), ((537, 547), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (545, 547), True, 'import matplotlib.pyplot as plt\n'), ((306, 332), 'numpy.sqrt', 'np.sqrt', (['(xx ** 2 + yy ** 2)'], {}), '(xx ** 2 + yy ** 2)\n', (313, 332), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""Script used to generate a tb file of TWA candidate members.
Using astrometry info from Donaldson 2016, converts into preferred units,
then applies chronostar.traceback.traceback() (?) function.
"""
import chronostar.traceback as tb
import chronostar.retired.groupfitter as gf
import chronostar._overlap as ov
import numpy as np
import pdb
import pickle
from csv import reader
from astropy.table import Table
try:
import astropy.io.fits as pyfits
except:
import pyfits
# TWA Hya init pos
twa_xyzuvw = np.array([12.49, -42.28, 21.55, -9.95, -17.91, -4.65])
twa_age = 10
twa_params = list(twa_xyzuvw) + [1/5., 1/5., 1/5., 1/2., 0, 0, 0] + [twa_age]
twa_group = gf.Group(twa_params, 1.0)
#twa_origin = tb.traceback_group(twa_xyzuvw, twa_age)
# Nasty hacky way of checking if on Raijin
onRaijin = True
try:
dummy = None
pickle.dump(dummy, open("/short/kc5/dummy.pkl",'w'))
except:
onRaijin = False
filename = "TGAS_traceback_165Myr_small.fits"
if onRaijin:
location = "/short/kc5/"
else:
location = "data/"
# Importing TWA astrometry from Donaldson16
def rahours_to_raDeg(hrs, mins, secs):
return (hrs + mins/60. + secs/3600.)/24 * 360
def decdeg_to_degdec(degs, mins, secs):
return degs + mins/60. + secs/3600.
def convert_ra(rahrs_str):
elements_str = np.array(rahrs_str.split(' '))
elements_flt = elements_str.astype(np.float)
return rahours_to_raDeg(*elements_flt)
def convert_dec(decdeg_str):
elements_str = np.array(decdeg_str.split(' '))
elements_flt = elements_str.astype(np.float)
return decdeg_to_degdec(*elements_flt)
# Taken from www.bdnyc.org/2012/10/decimal-deg-to-hms/
def HMS2deg(ra='', dec=''):
RA, DEC, rs, ds = '', '', 1, 1
if dec:
D, M, S = [float(i) for i in dec.split()]
if str(D)[0] == '-':
ds, D = -1, abs(D)
deg = D + (M/60) + (S/3600)
DEC = '{0}'.format(deg*ds)
if ra:
H, M, S = [float(i) for i in ra.split()]
if str(H)[0] == '-':
rs, H = -1, abs(H)
deg = (H*15) + (M/4) + (S/240)
RA = '{0}'.format(deg*rs)
if ra and dec:
return (RA, DEC)
else:
return RA or DEC
infile = open(location + 'Donaldson16_TWA_astrometry.csv', 'r')
data = []
for line in reader(infile):
data += [line]
data = np.array(data)
nTWAstars = data.shape[0]
RA = np.zeros(nTWAstars)
DEC = np.zeros(nTWAstars)
# converting ra and dec measurments to decimal
for i in range(nTWAstars):
RA[i], DEC[i] = HMS2deg(data[i][1], data[i][2])
Plx, e_Plx, pmDE, e_pmDE, pmRA, e_pmRA =\
data[:,3], data[:,4], data[:,11], data[:,12], data[:,9], data[:,10]
RV, e_RV = data[:,6], data[:,7]
# make a dectionary
stars = {}
stars['Name'] = data[:,0]
stars['RAdeg'] = RA
stars['DEdeg'] = DEC
stars['Plx'] = Plx
stars['e_Plx'] = e_Plx
stars['RV'] = RV
stars['e_RV'] = e_RV
stars['pmRA'] = pmRA
stars['e_pmRA']= e_pmRA
stars['pmDE'] = pmDE
stars['e_pmDE']= e_pmDE
t = Table(
[data[:,0],
RA.astype(np.float),
DEC.astype(np.float),
Plx.astype(np.float),
e_Plx.astype(np.float),
RV.astype(np.float),
e_RV.astype(np.float),
pmRA.astype(np.float),
e_pmRA.astype(np.float),
pmDE.astype(np.float),
e_pmDE.astype(np.float)],
names=('Name', 'RAdeg','DEdeg','Plx','e_Plx','RV','e_RV',
'pmRA','e_pmRA','pmDE','e_pmDE')
)
times = np.linspace(0,15,40)
pdb.set_trace()
xyzuvw = tb.traceback(t,times,savefile=location + 'TWA_traceback2.pkl')
#table_infile = location + "Astrometry_with_RVs_250pc_100kms_lesscols.fits"
# Same table but with all columns
table_infile = location + "Astrometry_with_RVs_250pc_100kms.fits"
#table_infile = location + "Astrometry_with_RVs_subset2.fits"
#table_infile = location + filename
table = pyfits.getdata(table_infile)
# print(table.field('Notional Group'))
TWA_ixs = np.where(table['Notional Group'] == 'TWA')
TWA_9A_ix = np.where(table['Name1'] == 'TWA 9A')[0]
infile = location + filename
star_params = gf.read_stars(infile)
TWA_9A = star_params['stars'][TWA_9A_ix]
# twa_9a fields I'm interested in:
# ra_adopt, dec_adopt, parallax_1, pmra_1, pmdec, pmra_error, pmdec_error
# parallax_pmra_corr, parallax_pmdec_corr, ... check out traceback.py ln 272
# for more details
nstars = star_params['stars'].size
star_mns, star_icovs, star_icov_dets = gf.interp_icov(twa_age, star_params)
overlaps = ov.get_overlaps(
twa_group.icov, twa_group.mean, twa_group.icov_det,
star_icovs, star_mns, star_icov_dets, nstars
)
twa_star_ixs = np.where(overlaps > np.percentile(overlaps, 99.9))
pdb.set_trace()
| [
"chronostar._overlap.get_overlaps",
"numpy.where",
"chronostar.traceback.traceback",
"chronostar.retired.groupfitter.Group",
"pyfits.getdata",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"chronostar.retired.groupfitter.interp_icov",
"pdb.set_trace",
"numpy.percentile",
"csv.reader",
"chr... | [((539, 593), 'numpy.array', 'np.array', (['[12.49, -42.28, 21.55, -9.95, -17.91, -4.65]'], {}), '([12.49, -42.28, 21.55, -9.95, -17.91, -4.65])\n', (547, 593), True, 'import numpy as np\n'), ((700, 725), 'chronostar.retired.groupfitter.Group', 'gf.Group', (['twa_params', '(1.0)'], {}), '(twa_params, 1.0)\n', (708, 725), True, 'import chronostar.retired.groupfitter as gf\n'), ((2298, 2312), 'csv.reader', 'reader', (['infile'], {}), '(infile)\n', (2304, 2312), False, 'from csv import reader\n'), ((2340, 2354), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2348, 2354), True, 'import numpy as np\n'), ((2388, 2407), 'numpy.zeros', 'np.zeros', (['nTWAstars'], {}), '(nTWAstars)\n', (2396, 2407), True, 'import numpy as np\n'), ((2414, 2433), 'numpy.zeros', 'np.zeros', (['nTWAstars'], {}), '(nTWAstars)\n', (2422, 2433), True, 'import numpy as np\n'), ((3418, 3440), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(40)'], {}), '(0, 15, 40)\n', (3429, 3440), True, 'import numpy as np\n'), ((3439, 3454), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3452, 3454), False, 'import pdb\n'), ((3464, 3528), 'chronostar.traceback.traceback', 'tb.traceback', (['t', 'times'], {'savefile': "(location + 'TWA_traceback2.pkl')"}), "(t, times, savefile=location + 'TWA_traceback2.pkl')\n", (3476, 3528), True, 'import chronostar.traceback as tb\n'), ((3811, 3839), 'pyfits.getdata', 'pyfits.getdata', (['table_infile'], {}), '(table_infile)\n', (3825, 3839), False, 'import pyfits\n'), ((3890, 3932), 'numpy.where', 'np.where', (["(table['Notional Group'] == 'TWA')"], {}), "(table['Notional Group'] == 'TWA')\n", (3898, 3932), True, 'import numpy as np\n'), ((4029, 4050), 'chronostar.retired.groupfitter.read_stars', 'gf.read_stars', (['infile'], {}), '(infile)\n', (4042, 4050), True, 'import chronostar.retired.groupfitter as gf\n'), ((4376, 4412), 'chronostar.retired.groupfitter.interp_icov', 'gf.interp_icov', (['twa_age', 'star_params'], {}), '(twa_age, star_params)\n', (4390, 4412), True, 'import chronostar.retired.groupfitter as gf\n'), ((4425, 4542), 'chronostar._overlap.get_overlaps', 'ov.get_overlaps', (['twa_group.icov', 'twa_group.mean', 'twa_group.icov_det', 'star_icovs', 'star_mns', 'star_icov_dets', 'nstars'], {}), '(twa_group.icov, twa_group.mean, twa_group.icov_det,\n star_icovs, star_mns, star_icov_dets, nstars)\n', (4440, 4542), True, 'import chronostar._overlap as ov\n'), ((4620, 4635), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4633, 4635), False, 'import pdb\n'), ((3945, 3981), 'numpy.where', 'np.where', (["(table['Name1'] == 'TWA 9A')"], {}), "(table['Name1'] == 'TWA 9A')\n", (3953, 3981), True, 'import numpy as np\n'), ((4589, 4618), 'numpy.percentile', 'np.percentile', (['overlaps', '(99.9)'], {}), '(overlaps, 99.9)\n', (4602, 4618), True, 'import numpy as np\n')] |
from pycocotools.coco import COCO
import numpy as np
import numpy as np
from sklearn.cluster import KMeans
import seaborn as sns
import matplotlib.pyplot as plt
def get_ious(anns):
ious = []
iou_nums = [0] * 4
for k, ann in anns.items():
b = ann['bbox']
m = min(b[2], b[3])
if m < 40:
ious.append(0.2)
iou_nums[0] += 1
elif m < 120:
ious.append(m / 200)
iou_nums[1] += 1
elif m < 420:
ious.append(m / 1500 + 0.52)
iou_nums[2] += 1
else:
ious.append(0.8)
iou_nums[3] += 1
return ious, iou_nums
def kmeans(x, n=3):
x = np.array(x)
if len(x.shape) == 1:
x = x.reshape(-1, 1)
# 假如我要构造一个聚类数为3的聚类器
estimator = KMeans(n_clusters=n) # 构造聚类器
estimator.fit(x) # 聚类
label_pred = estimator.labels_ # 获取聚类标签
centroids = estimator.cluster_centers_ # 获取聚类中心
inertia = estimator.inertia_ # 获取聚类准则的总和
return centroids
def con(args):
cons = []
for i, v in enumerate(args):
cons.append({'type': 'ineq', 'fun': lambda x: x[i] - v[0]})
cons.append({'type': 'ineq', 'fun': lambda x: -x[i] + v[1]})
cons = tuple(cons)
return cons
# def get_cluster(x, n=3):
# centroids = kmeans(x, n)
# # centroids = np.sort(centroids.reshape(-1))
# print('=' * 24, 'get_cluster', '=' * 24, '\n', centroids)
# return centroids
# def iou_cluster(anns, n=3):
# ious, iou_nums = get_ious(anns)
# get_cluster(ious, 'iou.png', n=n)
def anchor_cluster(anns, n=3):
if isinstance(anns, str):
coco = COCO(anns)
anns = coco.dataset['annotations']
elif isinstance(anns, dict):
anns = anns['annotations']
boxes = [a['bbox'] for a in anns]
boxes = np.array(boxes)
aspect_ratio = boxes[:, 3] / boxes[:, 2]
centroids = kmeans(aspect_ratio, n)
centroids = centroids.squeeze()
centroids = np.sort(centroids, axis=0)
return list(centroids)
# hor_ver_ratio = boxes[:, 2] / boxes[:, 3]
# get_cluster(hor_ver_ratio, 'hor_ver_ratio.png', n=n)
def box_cluster(anns, n=3, sind=2, eind=4):
if isinstance(anns, str):
coco = COCO(anns)
anns = coco.dataset['annotations']
elif isinstance(anns, dict):
anns = anns['annotations']
boxes = [a['bbox'] for a in anns]
boxes = np.array(boxes)
# import pandas as pd
# box_df = pd.DataFrame(data=boxes, columns=['x', 'y', 'w', 'h'])
# box_df.plot(kind="scatter", x="w", y="h", alpha=0.1)
# plt.show()
boxes = boxes[:, sind:eind]
centroids = kmeans(boxes, n, )
centroids = np.sort(centroids, axis=0)
return list(centroids)
def main():
# name2label = {1: 1, 9: 2, 5: 3, 3: 4, 4: 5, 0: 6, 2: 7, 8: 8, 6: 9, 10: 10, 7: 11}
# label_weight = {0: 0, 1: 0.15, 2: 0.09, 3: 0.09, 4: 0.05, 5: 0.13, 6: 0.05, 7: 0.12, 8: 0.13, 9: 0.07, 10: 0.12}
# label2name = {v: k for k, v in name2label.items()}
# label_weight = {label2name[k]:v for k,v in name_weight.items()}
# ann_file = '/home/liphone/undone-work/data/detection/garbage/train/instance_train.json'
# bbox_cluster(anns, n=10)
pass
if __name__ == '__main__':
main()
| [
"sklearn.cluster.KMeans",
"numpy.array",
"numpy.sort",
"pycocotools.coco.COCO"
] | [((685, 696), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (693, 696), True, 'import numpy as np\n'), ((792, 812), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n'}), '(n_clusters=n)\n', (798, 812), False, 'from sklearn.cluster import KMeans\n'), ((1812, 1827), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (1820, 1827), True, 'import numpy as np\n'), ((1965, 1991), 'numpy.sort', 'np.sort', (['centroids'], {'axis': '(0)'}), '(centroids, axis=0)\n', (1972, 1991), True, 'import numpy as np\n'), ((2390, 2405), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (2398, 2405), True, 'import numpy as np\n'), ((2661, 2687), 'numpy.sort', 'np.sort', (['centroids'], {'axis': '(0)'}), '(centroids, axis=0)\n', (2668, 2687), True, 'import numpy as np\n'), ((1639, 1649), 'pycocotools.coco.COCO', 'COCO', (['anns'], {}), '(anns)\n', (1643, 1649), False, 'from pycocotools.coco import COCO\n'), ((2217, 2227), 'pycocotools.coco.COCO', 'COCO', (['anns'], {}), '(anns)\n', (2221, 2227), False, 'from pycocotools.coco import COCO\n')] |
import os
import time
import tensorflow as tf
import numpy as np
from scipy.stats import spearmanr
from sklearn.metrics import r2_score
import mnist_input
import multi_mnist_cnn
from sinkhorn import sinkhorn_operator
import util
import random
os.environ['TF_CUDNN_DETERMINISTIC'] = 'true'
tf.set_random_seed(94305)
random.seed(94305)
np.random.seed(94305)
flags = tf.app.flags
flags.DEFINE_integer('M', 1, 'batch size')
flags.DEFINE_integer('n', 3, 'number of elements to compare at a time')
flags.DEFINE_integer('l', 5, 'number of digits')
flags.DEFINE_integer('repetition', 0, 'number of repetition')
flags.DEFINE_float('pow', 1, 'softsort exponent for pairwise difference')
flags.DEFINE_float('tau', 5, 'temperature (dependent meaning)')
flags.DEFINE_string('method', 'deterministic_neuralsort',
'which method to use?')
flags.DEFINE_integer('n_s', 5, 'number of samples')
flags.DEFINE_integer('num_epochs', 200, 'number of epochs to train')
flags.DEFINE_float('lr', 1e-4, 'initial learning rate')
FLAGS = flags.FLAGS
n_s = FLAGS.n_s
NUM_EPOCHS = FLAGS.num_epochs
M = FLAGS.M
n = FLAGS.n
l = FLAGS.l
repetition = FLAGS.repetition
power = FLAGS.pow
tau = FLAGS.tau
method = FLAGS.method
initial_rate = FLAGS.lr
train_iterator, val_iterator, test_iterator = mnist_input.get_iterators(
l, n, 10 ** l - 1, minibatch_size=M)
false_tensor = tf.convert_to_tensor(False)
evaluation = tf.placeholder_with_default(false_tensor, ())
temp = tf.cond(evaluation,
false_fn=lambda: tf.convert_to_tensor(tau, dtype=tf.float32),
true_fn=lambda: tf.convert_to_tensor(1e-10, dtype=tf.float32)
)
experiment_id = 'median-%s-M%d-n%d-l%d-t%d-p%.2f' % (method, M, n, l, tau * 10, power)
checkpoint_path = 'checkpoints/%s/' % experiment_id
predictions_path = 'predictions/'
handle = tf.placeholder(tf.string, ())
X_iterator = tf.data.Iterator.from_string_handle(
handle,
(tf.float32, tf.float32, tf.float32, tf.float32),
((M, n, l * 28, 28), (M,), (M, n), (M, n))
)
X, y, median_scores, true_scores = X_iterator.get_next()
true_scores = tf.expand_dims(true_scores, 2)
P_true = util.neuralsort(true_scores, 1e-10)
n_prime = n
def get_median_probs(P):
median_strip = P[:, n // 2, :]
median_total = tf.reduce_sum(median_strip, axis=1, keepdims=True)
probs = median_strip / median_total
# print(probs)
return probs
if method == 'vanilla':
with tf.variable_scope("phi"):
representations = multi_mnist_cnn.deepnn(l, X, 10)
representations = tf.reshape(representations, [M, n * 10])
fc1 = tf.layers.dense(representations, 10, tf.nn.relu)
fc2 = tf.layers.dense(fc1, 10, tf.nn.relu)
fc3 = tf.layers.dense(fc2, 10, tf.nn.relu)
y_hat = tf.layers.dense(fc3, 1)
y_hat = tf.squeeze(y_hat)
loss_phi = tf.reduce_sum(tf.squared_difference(y_hat, y))
loss_theta = loss_phi
prob_median_eval = 0
elif method == 'sinkhorn':
with tf.variable_scope('phi'):
representations = multi_mnist_cnn.deepnn(l, X, n)
pre_sinkhorn = tf.reshape(representations, [M, n, n])
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
P_hat = sinkhorn_operator(pre_sinkhorn, temp=temp)
prob_median = get_median_probs(P_hat)
point_estimates = tf.reduce_sum(
prob_median * regression_candidates, axis=1)
exp_loss = tf.squared_difference(y, point_estimates)
loss_phi = tf.reduce_mean(exp_loss)
loss_theta = loss_phi
P_hat_eval = sinkhorn_operator(pre_sinkhorn, temp=1e-20)
prob_median_eval = get_median_probs(P_hat_eval)
elif method == 'gumbel_sinkhorn':
with tf.variable_scope('phi'):
representations = multi_mnist_cnn.deepnn(l, X, n)
pre_sinkhorn_orig = tf.reshape(representations, [M, n, n])
pre_sinkhorn = tf.tile(pre_sinkhorn_orig, [
n_s, 1, 1])
pre_sinkhorn += util.sample_gumbel([n_s * M, n, n])
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
P_hat = sinkhorn_operator(pre_sinkhorn, temp=temp)
prob_median = get_median_probs(P_hat)
prob_median = tf.reshape(prob_median, [n_s, M, n])
point_estimates = tf.reduce_sum(
prob_median * regression_candidates, axis=2)
exp_loss = tf.squared_difference(y, point_estimates)
loss_phi = tf.reduce_mean(exp_loss)
loss_theta = loss_phi
P_hat_eval = sinkhorn_operator(pre_sinkhorn_orig, temp=1e-20)
prob_median_eval = get_median_probs(P_hat_eval)
elif method == 'deterministic_neuralsort':
with tf.variable_scope('phi'):
scores = multi_mnist_cnn.deepnn(l, X, 1)
scores = tf.reshape(scores, [M, n, 1])
P_hat = util.neuralsort(scores, temp)
P_hat_eval = util.neuralsort(scores, 1e-20)
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
losses = tf.squared_difference(
regression_candidates, tf.expand_dims(y, 1))
prob_median = get_median_probs(P_hat)
prob_median_eval = get_median_probs(P_hat_eval)
point_estimates = tf.reduce_sum(
prob_median * regression_candidates, axis=1)
exp_loss = tf.squared_difference(y, point_estimates)
point_estimates_eval = tf.reduce_sum(
prob_median_eval * regression_candidates, axis=1)
exp_loss_eval = tf.squared_difference(y, point_estimates)
loss_phi = tf.reduce_mean(exp_loss)
loss_theta = tf.reduce_mean(exp_loss_eval)
elif method == 'deterministic_softsort':
with tf.variable_scope('phi'):
scores = multi_mnist_cnn.deepnn(l, X, 1)
scores = tf.reshape(scores, [M, n, 1])
P_hat = util.softsort(scores, temp, power)
P_hat_eval = util.softsort(scores, 1e-20, power)
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
losses = tf.squared_difference(
regression_candidates, tf.expand_dims(y, 1))
prob_median = get_median_probs(P_hat)
prob_median_eval = get_median_probs(P_hat_eval)
point_estimates = tf.reduce_sum(
prob_median * regression_candidates, axis=1)
exp_loss = tf.squared_difference(y, point_estimates)
point_estimates_eval = tf.reduce_sum(
prob_median_eval * regression_candidates, axis=1)
exp_loss_eval = tf.squared_difference(y, point_estimates)
loss_phi = tf.reduce_mean(exp_loss)
loss_theta = tf.reduce_mean(exp_loss_eval)
elif method == 'stochastic_neuralsort':
with tf.variable_scope('phi'):
scores = multi_mnist_cnn.deepnn(l, X, 1)
scores = tf.reshape(scores, [M, n, 1])
scores = tf.tile(scores, [n_s, 1, 1])
scores += util.sample_gumbel([M * n_s, n, 1])
P_hat = util.neuralsort(scores, temp)
P_hat_eval = util.neuralsort(scores, 1e-20)
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
res_y = tf.expand_dims(y, 1)
losses = tf.squared_difference(regression_candidates, res_y)
prob_median = get_median_probs(P_hat)
prob_median = tf.reshape(prob_median, [n_s, M, n])
prob_median_eval = get_median_probs(P_hat_eval)
prob_median_eval = tf.reshape(prob_median_eval, [n_s, M, n])
exp_losses = tf.reduce_sum(prob_median * losses, axis=2)
exp_losses_eval = tf.reduce_sum(
prob_median_eval * losses, axis=2)
point_estimates_eval = tf.reduce_mean(tf.reduce_sum(prob_median_eval * regression_candidates, axis=2), axis=0)
loss_phi = tf.reduce_mean(exp_losses)
loss_theta = tf.reduce_mean(exp_losses_eval)
elif method == 'stochastic_softsort':
with tf.variable_scope('phi'):
scores = multi_mnist_cnn.deepnn(l, X, 1)
scores = tf.reshape(scores, [M, n, 1])
scores = tf.tile(scores, [n_s, 1, 1])
scores += util.sample_gumbel([M * n_s, n, 1])
P_hat = util.softsort(scores, temp, power)
P_hat_eval = util.softsort(scores, 1e-20, power)
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
res_y = tf.expand_dims(y, 1)
losses = tf.squared_difference(regression_candidates, res_y)
prob_median = get_median_probs(P_hat)
prob_median = tf.reshape(prob_median, [n_s, M, n])
prob_median_eval = get_median_probs(P_hat_eval)
prob_median_eval = tf.reshape(prob_median_eval, [n_s, M, n])
exp_losses = tf.reduce_sum(prob_median * losses, axis=2)
exp_losses_eval = tf.reduce_sum(
prob_median_eval * losses, axis=2)
point_estimates_eval = tf.reduce_mean(tf.reduce_sum(prob_median_eval * regression_candidates, axis=2), axis=0)
loss_phi = tf.reduce_mean(exp_losses)
loss_theta = tf.reduce_mean(exp_losses_eval)
else:
raise ValueError("No such method.")
num_losses = M * n_s if method == 'stochastic_neuralsort' \
or method == 'stochastic_softsort' \
or method == 'gumbel_sinkhorn' else M
correctly_identified = tf.reduce_sum(
prob_median_eval * median_scores) / num_losses
phi = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='phi')
theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='theta')
train_phi = tf.train.AdamOptimizer(
initial_rate).minimize(loss_phi, var_list=phi)
if method != 'vanilla':
train_theta = tf.train.AdamOptimizer(initial_rate).minimize(
loss_phi, var_list=theta)
train_step = tf.group(train_phi, train_theta)
else:
train_step = train_phi
saver = tf.train.Saver()
sess = tf.Session()
logfile = open('./logs/%s.log' % experiment_id, 'w')
def prnt(*args):
print(*args)
print(*args, file=logfile)
sess.run(tf.global_variables_initializer())
train_sh, validate_sh, test_sh = sess.run([
train_iterator.string_handle(),
val_iterator.string_handle(),
test_iterator.string_handle()
])
TRAIN_PER_EPOCH = mnist_input.TRAIN_SET_SIZE // (l * M)
VAL_PER_EPOCH = mnist_input.VAL_SET_SIZE // (l * M)
TEST_PER_EPOCH = mnist_input.TEST_SET_SIZE // (l * M)
best_val = float('inf')
tiebreaker_val = -1
def save_model(epoch):
saver.save(sess, checkpoint_path + 'checkpoint', global_step=epoch)
def load_model():
filename = tf.train.latest_checkpoint(checkpoint_path)
if filename is None:
raise Exception("No model found.")
print("Loaded model %s." % filename)
saver.restore(sess, filename)
def train(epoch):
loss_train = []
for _ in range(TRAIN_PER_EPOCH):
_, l = sess.run([train_step, loss_phi],
feed_dict={handle: train_sh})
loss_train.append(l)
prnt('Average loss:', sum(loss_train) / len(loss_train))
def test(epoch, val=False):
global best_val
c_is = []
l_vs = []
y_evals = []
point_estimates_eval_evals = []
for _ in range(VAL_PER_EPOCH if val else TEST_PER_EPOCH):
if method.startswith('deterministic'):
c_i, l_v, y_eval, point_estimates_eval_eval =\
sess.run([correctly_identified, loss_phi, y, point_estimates_eval], feed_dict={
handle: validate_sh if val else test_sh, evaluation: True})
elif method.startswith('stochastic'):
c_i, l_v, y_eval, point_estimates_eval_eval =\
sess.run([correctly_identified, loss_phi, res_y, point_estimates_eval], feed_dict={
handle: validate_sh if val else test_sh, evaluation: True})
else:
raise ValueError('Cannot handle other methods because I need their prediction tensors and they are '
'named differently.')
c_is.append(c_i)
l_vs.append(l_v)
y_evals.append(y_eval.reshape(-1))
point_estimates_eval_evals.append(point_estimates_eval_eval.reshape(-1))
y_eval = np.concatenate(y_evals)
point_estimates_eval_eval = np.concatenate(point_estimates_eval_evals)
id_suffix = "_N_%s_%s_TAU_%s_LR_%s_E_%s_REP_%s.txt" % (
str(n), str(method), str(tau), str(initial_rate), str(NUM_EPOCHS), str(repetition))
if not val:
np.savetxt(predictions_path + 'y_eval' + id_suffix, y_eval)
np.savetxt(predictions_path + 'point_estimates_eval_eval' + id_suffix, point_estimates_eval_eval)
c_i = sum(c_is) / len(c_is)
l_v = sum(l_vs) / len(l_vs)
r2 = r2_score(y_eval, point_estimates_eval_eval)
spearman_r = spearmanr(y_eval, point_estimates_eval_eval).correlation
if val:
prnt("Validation set: correctly identified %f, mean squared error %f, R2 %f, spearmanr %f" %
(c_i, l_v, r2, spearman_r))
if l_v < best_val:
best_val = l_v
prnt('Saving...')
save_model(epoch)
else:
prnt("Test set: correctly identified %f, mean squared error %f, R2 %f, spearmanr %f" %
(c_i, l_v, r2, spearman_r))
total_training_time = 0
for epoch in range(1, NUM_EPOCHS + 1):
prnt('Epoch', epoch, '(%s)' % experiment_id)
start_time = time.time()
train(epoch)
end_time = time.time()
total_training_time += (end_time - start_time)
test(epoch, val=True)
logfile.flush()
load_model()
test(epoch, val=False)
training_time_per_epoch = total_training_time / NUM_EPOCHS
print("total_training_time: %f" % total_training_time)
print("training_time_per_epoch: %f" % training_time_per_epoch)
sess.close()
logfile.close()
| [
"tensorflow.tile",
"tensorflow.reduce_sum",
"tensorflow.group",
"sinkhorn.sinkhorn_operator",
"tensorflow.reduce_mean",
"tensorflow.set_random_seed",
"sklearn.metrics.r2_score",
"multi_mnist_cnn.deepnn",
"util.softsort",
"tensorflow.data.Iterator.from_string_handle",
"tensorflow.placeholder",
... | [((291, 316), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(94305)'], {}), '(94305)\n', (309, 316), True, 'import tensorflow as tf\n'), ((317, 335), 'random.seed', 'random.seed', (['(94305)'], {}), '(94305)\n', (328, 335), False, 'import random\n'), ((336, 357), 'numpy.random.seed', 'np.random.seed', (['(94305)'], {}), '(94305)\n', (350, 357), True, 'import numpy as np\n'), ((1284, 1346), 'mnist_input.get_iterators', 'mnist_input.get_iterators', (['l', 'n', '(10 ** l - 1)'], {'minibatch_size': 'M'}), '(l, n, 10 ** l - 1, minibatch_size=M)\n', (1309, 1346), False, 'import mnist_input\n'), ((1368, 1395), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(False)'], {}), '(False)\n', (1388, 1395), True, 'import tensorflow as tf\n'), ((1409, 1454), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['false_tensor', '()'], {}), '(false_tensor, ())\n', (1436, 1454), True, 'import tensorflow as tf\n'), ((1837, 1866), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string', '()'], {}), '(tf.string, ())\n', (1851, 1866), True, 'import tensorflow as tf\n'), ((1880, 2022), 'tensorflow.data.Iterator.from_string_handle', 'tf.data.Iterator.from_string_handle', (['handle', '(tf.float32, tf.float32, tf.float32, tf.float32)', '((M, n, l * 28, 28), (M,), (M, n), (M, n))'], {}), '(handle, (tf.float32, tf.float32, tf.\n float32, tf.float32), ((M, n, l * 28, 28), (M,), (M, n), (M, n)))\n', (1915, 2022), True, 'import tensorflow as tf\n'), ((2105, 2135), 'tensorflow.expand_dims', 'tf.expand_dims', (['true_scores', '(2)'], {}), '(true_scores, 2)\n', (2119, 2135), True, 'import tensorflow as tf\n'), ((2145, 2180), 'util.neuralsort', 'util.neuralsort', (['true_scores', '(1e-10)'], {}), '(true_scores, 1e-10)\n', (2160, 2180), False, 'import util\n'), ((9553, 9617), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""phi"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='phi')\n", (9570, 9617), True, 'import tensorflow as tf\n'), ((9626, 9692), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""theta"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='theta')\n", (9643, 9692), True, 'import tensorflow as tf\n'), ((9997, 10013), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (10011, 10013), True, 'import tensorflow as tf\n'), ((10022, 10034), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10032, 10034), True, 'import tensorflow as tf\n'), ((2274, 2324), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['median_strip'], {'axis': '(1)', 'keepdims': '(True)'}), '(median_strip, axis=1, keepdims=True)\n', (2287, 2324), True, 'import tensorflow as tf\n'), ((2543, 2583), 'tensorflow.reshape', 'tf.reshape', (['representations', '[M, n * 10]'], {}), '(representations, [M, n * 10])\n', (2553, 2583), True, 'import tensorflow as tf\n'), ((2594, 2642), 'tensorflow.layers.dense', 'tf.layers.dense', (['representations', '(10)', 'tf.nn.relu'], {}), '(representations, 10, tf.nn.relu)\n', (2609, 2642), True, 'import tensorflow as tf\n'), ((2653, 2689), 'tensorflow.layers.dense', 'tf.layers.dense', (['fc1', '(10)', 'tf.nn.relu'], {}), '(fc1, 10, tf.nn.relu)\n', (2668, 2689), True, 'import tensorflow as tf\n'), ((2700, 2736), 'tensorflow.layers.dense', 'tf.layers.dense', (['fc2', '(10)', 'tf.nn.relu'], {}), '(fc2, 10, tf.nn.relu)\n', (2715, 2736), True, 'import tensorflow as tf\n'), ((2749, 2772), 'tensorflow.layers.dense', 'tf.layers.dense', (['fc3', '(1)'], {}), '(fc3, 1)\n', (2764, 2772), True, 'import tensorflow as tf\n'), ((2785, 2802), 'tensorflow.squeeze', 'tf.squeeze', (['y_hat'], {}), '(y_hat)\n', (2795, 2802), True, 'import tensorflow as tf\n'), ((9480, 9527), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median_eval * median_scores)'], {}), '(prob_median_eval * median_scores)\n', (9493, 9527), True, 'import tensorflow as tf\n'), ((9922, 9954), 'tensorflow.group', 'tf.group', (['train_phi', 'train_theta'], {}), '(train_phi, train_theta)\n', (9930, 9954), True, 'import tensorflow as tf\n'), ((10166, 10199), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10197, 10199), True, 'import tensorflow as tf\n'), ((10691, 10734), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_path'], {}), '(checkpoint_path)\n', (10717, 10734), True, 'import tensorflow as tf\n'), ((12282, 12305), 'numpy.concatenate', 'np.concatenate', (['y_evals'], {}), '(y_evals)\n', (12296, 12305), True, 'import numpy as np\n'), ((12338, 12380), 'numpy.concatenate', 'np.concatenate', (['point_estimates_eval_evals'], {}), '(point_estimates_eval_evals)\n', (12352, 12380), True, 'import numpy as np\n'), ((12797, 12840), 'sklearn.metrics.r2_score', 'r2_score', (['y_eval', 'point_estimates_eval_eval'], {}), '(y_eval, point_estimates_eval_eval)\n', (12805, 12840), False, 'from sklearn.metrics import r2_score\n'), ((13461, 13472), 'time.time', 'time.time', ([], {}), '()\n', (13470, 13472), False, 'import time\n'), ((13505, 13516), 'time.time', 'time.time', ([], {}), '()\n', (13514, 13516), False, 'import time\n'), ((2436, 2460), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""phi"""'], {}), "('phi')\n", (2453, 2460), True, 'import tensorflow as tf\n'), ((2488, 2520), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(10)'], {}), '(l, X, 10)\n', (2510, 2520), False, 'import multi_mnist_cnn\n'), ((2832, 2863), 'tensorflow.squared_difference', 'tf.squared_difference', (['y_hat', 'y'], {}), '(y_hat, y)\n', (2853, 2863), True, 'import tensorflow as tf\n'), ((3300, 3342), 'sinkhorn.sinkhorn_operator', 'sinkhorn_operator', (['pre_sinkhorn'], {'temp': 'temp'}), '(pre_sinkhorn, temp=temp)\n', (3317, 3342), False, 'from sinkhorn import sinkhorn_operator\n'), ((3408, 3466), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median * regression_candidates)'], {'axis': '(1)'}), '(prob_median * regression_candidates, axis=1)\n', (3421, 3466), True, 'import tensorflow as tf\n'), ((3491, 3532), 'tensorflow.squared_difference', 'tf.squared_difference', (['y', 'point_estimates'], {}), '(y, point_estimates)\n', (3512, 3532), True, 'import tensorflow as tf\n'), ((3549, 3573), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_loss'], {}), '(exp_loss)\n', (3563, 3573), True, 'import tensorflow as tf\n'), ((3618, 3661), 'sinkhorn.sinkhorn_operator', 'sinkhorn_operator', (['pre_sinkhorn'], {'temp': '(1e-20)'}), '(pre_sinkhorn, temp=1e-20)\n', (3635, 3661), False, 'from sinkhorn import sinkhorn_operator\n'), ((9706, 9742), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['initial_rate'], {}), '(initial_rate)\n', (9728, 9742), True, 'import tensorflow as tf\n'), ((12557, 12616), 'numpy.savetxt', 'np.savetxt', (["(predictions_path + 'y_eval' + id_suffix)", 'y_eval'], {}), "(predictions_path + 'y_eval' + id_suffix, y_eval)\n", (12567, 12616), True, 'import numpy as np\n'), ((12625, 12726), 'numpy.savetxt', 'np.savetxt', (["(predictions_path + 'point_estimates_eval_eval' + id_suffix)", 'point_estimates_eval_eval'], {}), "(predictions_path + 'point_estimates_eval_eval' + id_suffix,\n point_estimates_eval_eval)\n", (12635, 12726), True, 'import numpy as np\n'), ((12858, 12902), 'scipy.stats.spearmanr', 'spearmanr', (['y_eval', 'point_estimates_eval_eval'], {}), '(y_eval, point_estimates_eval_eval)\n', (12867, 12902), False, 'from scipy.stats import spearmanr\n'), ((1514, 1557), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['tau'], {'dtype': 'tf.float32'}), '(tau, dtype=tf.float32)\n', (1534, 1557), True, 'import tensorflow as tf\n'), ((1590, 1635), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(1e-10)'], {'dtype': 'tf.float32'}), '(1e-10, dtype=tf.float32)\n', (1610, 1635), True, 'import tensorflow as tf\n'), ((2953, 2977), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""phi"""'], {}), "('phi')\n", (2970, 2977), True, 'import tensorflow as tf\n'), ((3005, 3036), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', 'n'], {}), '(l, X, n)\n', (3027, 3036), False, 'import multi_mnist_cnn\n'), ((3060, 3098), 'tensorflow.reshape', 'tf.reshape', (['representations', '[M, n, n]'], {}), '(representations, [M, n, n])\n', (3070, 3098), True, 'import tensorflow as tf\n'), ((3108, 3134), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta"""'], {}), "('theta')\n", (3125, 3134), True, 'import tensorflow as tf\n'), ((3168, 3199), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (3190, 3199), False, 'import multi_mnist_cnn\n'), ((3232, 3273), 'tensorflow.reshape', 'tf.reshape', (['regression_candidates', '[M, n]'], {}), '(regression_candidates, [M, n])\n', (3242, 3273), True, 'import tensorflow as tf\n'), ((4266, 4308), 'sinkhorn.sinkhorn_operator', 'sinkhorn_operator', (['pre_sinkhorn'], {'temp': 'temp'}), '(pre_sinkhorn, temp=temp)\n', (4283, 4308), False, 'from sinkhorn import sinkhorn_operator\n'), ((4369, 4405), 'tensorflow.reshape', 'tf.reshape', (['prob_median', '[n_s, M, n]'], {}), '(prob_median, [n_s, M, n])\n', (4379, 4405), True, 'import tensorflow as tf\n'), ((4429, 4487), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median * regression_candidates)'], {'axis': '(2)'}), '(prob_median * regression_candidates, axis=2)\n', (4442, 4487), True, 'import tensorflow as tf\n'), ((4512, 4553), 'tensorflow.squared_difference', 'tf.squared_difference', (['y', 'point_estimates'], {}), '(y, point_estimates)\n', (4533, 4553), True, 'import tensorflow as tf\n'), ((4570, 4594), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_loss'], {}), '(exp_loss)\n', (4584, 4594), True, 'import tensorflow as tf\n'), ((4639, 4687), 'sinkhorn.sinkhorn_operator', 'sinkhorn_operator', (['pre_sinkhorn_orig'], {'temp': '(1e-20)'}), '(pre_sinkhorn_orig, temp=1e-20)\n', (4656, 4687), False, 'from sinkhorn import sinkhorn_operator\n'), ((9824, 9860), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['initial_rate'], {}), '(initial_rate)\n', (9846, 9860), True, 'import tensorflow as tf\n'), ((3758, 3782), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""phi"""'], {}), "('phi')\n", (3775, 3782), True, 'import tensorflow as tf\n'), ((3810, 3841), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', 'n'], {}), '(l, X, n)\n', (3832, 3841), False, 'import multi_mnist_cnn\n'), ((3870, 3908), 'tensorflow.reshape', 'tf.reshape', (['representations', '[M, n, n]'], {}), '(representations, [M, n, n])\n', (3880, 3908), True, 'import tensorflow as tf\n'), ((3932, 3971), 'tensorflow.tile', 'tf.tile', (['pre_sinkhorn_orig', '[n_s, 1, 1]'], {}), '(pre_sinkhorn_orig, [n_s, 1, 1])\n', (3939, 3971), True, 'import tensorflow as tf\n'), ((4028, 4063), 'util.sample_gumbel', 'util.sample_gumbel', (['[n_s * M, n, n]'], {}), '([n_s * M, n, n])\n', (4046, 4063), False, 'import util\n'), ((4074, 4100), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta"""'], {}), "('theta')\n", (4091, 4100), True, 'import tensorflow as tf\n'), ((4134, 4165), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (4156, 4165), False, 'import multi_mnist_cnn\n'), ((4198, 4239), 'tensorflow.reshape', 'tf.reshape', (['regression_candidates', '[M, n]'], {}), '(regression_candidates, [M, n])\n', (4208, 4239), True, 'import tensorflow as tf\n'), ((4928, 4957), 'util.neuralsort', 'util.neuralsort', (['scores', 'temp'], {}), '(scores, temp)\n', (4943, 4957), False, 'import util\n'), ((4975, 5005), 'util.neuralsort', 'util.neuralsort', (['scores', '(1e-20)'], {}), '(scores, 1e-20)\n', (4990, 5005), False, 'import util\n'), ((5402, 5460), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median * regression_candidates)'], {'axis': '(1)'}), '(prob_median * regression_candidates, axis=1)\n', (5415, 5460), True, 'import tensorflow as tf\n'), ((5485, 5526), 'tensorflow.squared_difference', 'tf.squared_difference', (['y', 'point_estimates'], {}), '(y, point_estimates)\n', (5506, 5526), True, 'import tensorflow as tf\n'), ((5555, 5618), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median_eval * regression_candidates)'], {'axis': '(1)'}), '(prob_median_eval * regression_candidates, axis=1)\n', (5568, 5618), True, 'import tensorflow as tf\n'), ((5648, 5689), 'tensorflow.squared_difference', 'tf.squared_difference', (['y', 'point_estimates'], {}), '(y, point_estimates)\n', (5669, 5689), True, 'import tensorflow as tf\n'), ((5706, 5730), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_loss'], {}), '(exp_loss)\n', (5720, 5730), True, 'import tensorflow as tf\n'), ((5748, 5777), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_loss_eval'], {}), '(exp_loss_eval)\n', (5762, 5777), True, 'import tensorflow as tf\n'), ((4793, 4817), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""phi"""'], {}), "('phi')\n", (4810, 4817), True, 'import tensorflow as tf\n'), ((4836, 4867), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (4858, 4867), False, 'import multi_mnist_cnn\n'), ((4885, 4914), 'tensorflow.reshape', 'tf.reshape', (['scores', '[M, n, 1]'], {}), '(scores, [M, n, 1])\n', (4895, 4914), True, 'import tensorflow as tf\n'), ((5016, 5042), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta"""'], {}), "('theta')\n", (5033, 5042), True, 'import tensorflow as tf\n'), ((5076, 5107), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (5098, 5107), False, 'import multi_mnist_cnn\n'), ((5140, 5181), 'tensorflow.reshape', 'tf.reshape', (['regression_candidates', '[M, n]'], {}), '(regression_candidates, [M, n])\n', (5150, 5181), True, 'import tensorflow as tf\n'), ((5263, 5283), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (5277, 5283), True, 'import tensorflow as tf\n'), ((5964, 5998), 'util.softsort', 'util.softsort', (['scores', 'temp', 'power'], {}), '(scores, temp, power)\n', (5977, 5998), False, 'import util\n'), ((6016, 6051), 'util.softsort', 'util.softsort', (['scores', '(1e-20)', 'power'], {}), '(scores, 1e-20, power)\n', (6029, 6051), False, 'import util\n'), ((6448, 6506), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median * regression_candidates)'], {'axis': '(1)'}), '(prob_median * regression_candidates, axis=1)\n', (6461, 6506), True, 'import tensorflow as tf\n'), ((6531, 6572), 'tensorflow.squared_difference', 'tf.squared_difference', (['y', 'point_estimates'], {}), '(y, point_estimates)\n', (6552, 6572), True, 'import tensorflow as tf\n'), ((6601, 6664), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median_eval * regression_candidates)'], {'axis': '(1)'}), '(prob_median_eval * regression_candidates, axis=1)\n', (6614, 6664), True, 'import tensorflow as tf\n'), ((6694, 6735), 'tensorflow.squared_difference', 'tf.squared_difference', (['y', 'point_estimates'], {}), '(y, point_estimates)\n', (6715, 6735), True, 'import tensorflow as tf\n'), ((6752, 6776), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_loss'], {}), '(exp_loss)\n', (6766, 6776), True, 'import tensorflow as tf\n'), ((6794, 6823), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_loss_eval'], {}), '(exp_loss_eval)\n', (6808, 6823), True, 'import tensorflow as tf\n'), ((5829, 5853), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""phi"""'], {}), "('phi')\n", (5846, 5853), True, 'import tensorflow as tf\n'), ((5872, 5903), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (5894, 5903), False, 'import multi_mnist_cnn\n'), ((5921, 5950), 'tensorflow.reshape', 'tf.reshape', (['scores', '[M, n, 1]'], {}), '(scores, [M, n, 1])\n', (5931, 5950), True, 'import tensorflow as tf\n'), ((6062, 6088), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta"""'], {}), "('theta')\n", (6079, 6088), True, 'import tensorflow as tf\n'), ((6122, 6153), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (6144, 6153), False, 'import multi_mnist_cnn\n'), ((6186, 6227), 'tensorflow.reshape', 'tf.reshape', (['regression_candidates', '[M, n]'], {}), '(regression_candidates, [M, n])\n', (6196, 6227), True, 'import tensorflow as tf\n'), ((6309, 6329), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (6323, 6329), True, 'import tensorflow as tf\n'), ((7109, 7138), 'util.neuralsort', 'util.neuralsort', (['scores', 'temp'], {}), '(scores, temp)\n', (7124, 7138), False, 'import util\n'), ((7156, 7186), 'util.neuralsort', 'util.neuralsort', (['scores', '(1e-20)'], {}), '(scores, 1e-20)\n', (7171, 7186), False, 'import util\n'), ((7389, 7409), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (7403, 7409), True, 'import tensorflow as tf\n'), ((7424, 7475), 'tensorflow.squared_difference', 'tf.squared_difference', (['regression_candidates', 'res_y'], {}), '(regression_candidates, res_y)\n', (7445, 7475), True, 'import tensorflow as tf\n'), ((7537, 7573), 'tensorflow.reshape', 'tf.reshape', (['prob_median', '[n_s, M, n]'], {}), '(prob_median, [n_s, M, n])\n', (7547, 7573), True, 'import tensorflow as tf\n'), ((7649, 7690), 'tensorflow.reshape', 'tf.reshape', (['prob_median_eval', '[n_s, M, n]'], {}), '(prob_median_eval, [n_s, M, n])\n', (7659, 7690), True, 'import tensorflow as tf\n'), ((7709, 7752), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median * losses)'], {'axis': '(2)'}), '(prob_median * losses, axis=2)\n', (7722, 7752), True, 'import tensorflow as tf\n'), ((7775, 7823), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median_eval * losses)'], {'axis': '(2)'}), '(prob_median_eval * losses, axis=2)\n', (7788, 7823), True, 'import tensorflow as tf\n'), ((7965, 7991), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_losses'], {}), '(exp_losses)\n', (7979, 7991), True, 'import tensorflow as tf\n'), ((8009, 8040), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_losses_eval'], {}), '(exp_losses_eval)\n', (8023, 8040), True, 'import tensorflow as tf\n'), ((6874, 6898), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""phi"""'], {}), "('phi')\n", (6891, 6898), True, 'import tensorflow as tf\n'), ((6917, 6948), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (6939, 6948), False, 'import multi_mnist_cnn\n'), ((6966, 6995), 'tensorflow.reshape', 'tf.reshape', (['scores', '[M, n, 1]'], {}), '(scores, [M, n, 1])\n', (6976, 6995), True, 'import tensorflow as tf\n'), ((7013, 7041), 'tensorflow.tile', 'tf.tile', (['scores', '[n_s, 1, 1]'], {}), '(scores, [n_s, 1, 1])\n', (7020, 7041), True, 'import tensorflow as tf\n'), ((7060, 7095), 'util.sample_gumbel', 'util.sample_gumbel', (['[M * n_s, n, 1]'], {}), '([M * n_s, n, 1])\n', (7078, 7095), False, 'import util\n'), ((7197, 7223), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta"""'], {}), "('theta')\n", (7214, 7223), True, 'import tensorflow as tf\n'), ((7257, 7288), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (7279, 7288), False, 'import multi_mnist_cnn\n'), ((7321, 7362), 'tensorflow.reshape', 'tf.reshape', (['regression_candidates', '[M, n]'], {}), '(regression_candidates, [M, n])\n', (7331, 7362), True, 'import tensorflow as tf\n'), ((7876, 7939), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median_eval * regression_candidates)'], {'axis': '(2)'}), '(prob_median_eval * regression_candidates, axis=2)\n', (7889, 7939), True, 'import tensorflow as tf\n'), ((8324, 8358), 'util.softsort', 'util.softsort', (['scores', 'temp', 'power'], {}), '(scores, temp, power)\n', (8337, 8358), False, 'import util\n'), ((8376, 8411), 'util.softsort', 'util.softsort', (['scores', '(1e-20)', 'power'], {}), '(scores, 1e-20, power)\n', (8389, 8411), False, 'import util\n'), ((8614, 8634), 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(1)'], {}), '(y, 1)\n', (8628, 8634), True, 'import tensorflow as tf\n'), ((8649, 8700), 'tensorflow.squared_difference', 'tf.squared_difference', (['regression_candidates', 'res_y'], {}), '(regression_candidates, res_y)\n', (8670, 8700), True, 'import tensorflow as tf\n'), ((8762, 8798), 'tensorflow.reshape', 'tf.reshape', (['prob_median', '[n_s, M, n]'], {}), '(prob_median, [n_s, M, n])\n', (8772, 8798), True, 'import tensorflow as tf\n'), ((8874, 8915), 'tensorflow.reshape', 'tf.reshape', (['prob_median_eval', '[n_s, M, n]'], {}), '(prob_median_eval, [n_s, M, n])\n', (8884, 8915), True, 'import tensorflow as tf\n'), ((8934, 8977), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median * losses)'], {'axis': '(2)'}), '(prob_median * losses, axis=2)\n', (8947, 8977), True, 'import tensorflow as tf\n'), ((9000, 9048), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median_eval * losses)'], {'axis': '(2)'}), '(prob_median_eval * losses, axis=2)\n', (9013, 9048), True, 'import tensorflow as tf\n'), ((9190, 9216), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_losses'], {}), '(exp_losses)\n', (9204, 9216), True, 'import tensorflow as tf\n'), ((9234, 9265), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['exp_losses_eval'], {}), '(exp_losses_eval)\n', (9248, 9265), True, 'import tensorflow as tf\n'), ((8089, 8113), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""phi"""'], {}), "('phi')\n", (8106, 8113), True, 'import tensorflow as tf\n'), ((8132, 8163), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (8154, 8163), False, 'import multi_mnist_cnn\n'), ((8181, 8210), 'tensorflow.reshape', 'tf.reshape', (['scores', '[M, n, 1]'], {}), '(scores, [M, n, 1])\n', (8191, 8210), True, 'import tensorflow as tf\n'), ((8228, 8256), 'tensorflow.tile', 'tf.tile', (['scores', '[n_s, 1, 1]'], {}), '(scores, [n_s, 1, 1])\n', (8235, 8256), True, 'import tensorflow as tf\n'), ((8275, 8310), 'util.sample_gumbel', 'util.sample_gumbel', (['[M * n_s, n, 1]'], {}), '([M * n_s, n, 1])\n', (8293, 8310), False, 'import util\n'), ((8422, 8448), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta"""'], {}), "('theta')\n", (8439, 8448), True, 'import tensorflow as tf\n'), ((8482, 8513), 'multi_mnist_cnn.deepnn', 'multi_mnist_cnn.deepnn', (['l', 'X', '(1)'], {}), '(l, X, 1)\n', (8504, 8513), False, 'import multi_mnist_cnn\n'), ((8546, 8587), 'tensorflow.reshape', 'tf.reshape', (['regression_candidates', '[M, n]'], {}), '(regression_candidates, [M, n])\n', (8556, 8587), True, 'import tensorflow as tf\n'), ((9101, 9164), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_median_eval * regression_candidates)'], {'axis': '(2)'}), '(prob_median_eval * regression_candidates, axis=2)\n', (9114, 9164), True, 'import tensorflow as tf\n')] |
# --------------
#Importing header files
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
# Code starts here
data = pd.read_csv(path)
X = data.drop(columns = ["customer.id","paid.back.loan"],axis=1)
print(X.head(5))
y = data["paid.back.loan"]
print(y.head(5))
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size =0.3,random_state = 0)
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
#Code starts here
#Storing value counts of target variable in 'fully_paid'
fully_paid=y_train.value_counts()
#Plotting bar plot
plt.bar(fully_paid.index, fully_paid)
plt.show()
#Code ends here
# --------------
#Importing header files
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train["int.rate"] = X_train["int.rate"].str.replace("%","")
X_test["int.rate"] = X_test["int.rate"].str.replace("%","")
X_train["int.rate"] = X_train["int.rate"].astype(float)
X_test["int.rate"] = X_test["int.rate"].astype(float)
X_train["int.rate"] = X_train["int.rate"] / 100
X_test["int.rate"] = X_test["int.rate"] / 100
num_df = X_train.select_dtypes(include=['number'])
print(num_df.dtypes)
cat_df = X_train.select_dtypes(include = "object")
print(cat_df.dtypes)
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
cols = list(num_df.columns)
print(cols)
fig, axes = plt.subplots(4,1,figsize =(10,20))
for i in range(0,4):
sns.boxplot(x=y_train,y=num_df[cols[i]], ax=axes[i])
fig
# Code ends here
# --------------
# Code starts here
cols = list(cat_df.columns)
fig , axes = plt.subplots(2,2,figsize=(8,16))
for i in range(0,2):
for j in range(0,2):
sns.countplot(x=X_train[cols[i*2+j]], hue=y_train,ax=axes[i,j])
fig.tight_layout()
# Code ends here
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
# Code starts here
for col in cat_df.columns:
#Filling null values with 'NA'
X_train[col].fillna('NA',inplace=True)
#Initalising a label encoder object
le=LabelEncoder()
#Fitting and transforming the column in X_train with 'le'
X_train[col]=le.fit_transform(X_train[col])
#Filling null values with 'NA'
X_test[col].fillna('NA',inplace=True)
#Fitting the column in X_test with 'le'
X_test[col]=le.transform(X_test[col])
#Initialising 'Decision Tree' model
model=DecisionTreeClassifier(random_state=0)
#Training the 'Decision Tree' model
model.fit(X_train, y_train)
#Finding the accuracy of 'Decision Tree' model
acc=model.score(X_test, y_test)
#Printing the accuracy
print(acc)
# Code ends here
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2 = DecisionTreeClassifier(random_state = 0)
p_tree = GridSearchCV(estimator=model_2,param_grid=parameter_grid,cv=5)
p_tree.fit(X_train,y_train)
acc_2 =p_tree.score(X_test,y_test)
print(acc_2)
# Code ends here
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data = tree.export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None,
feature_names=X.columns, filled = True,
class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big = pydotplus.graph_from_dot_data(dot_data)
Image(graph_big.create_png())
# Code ends here
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"seaborn.boxplot",
"sklearn.tree.export_graphviz",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.subplots",
"seaborn.cou... | [((80, 113), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (103, 113), False, 'import warnings\n'), ((196, 213), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (207, 213), True, 'import pandas as pd\n'), ((373, 426), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (389, 426), False, 'from sklearn.model_selection import train_test_split\n'), ((652, 689), 'matplotlib.pyplot.bar', 'plt.bar', (['fully_paid.index', 'fully_paid'], {}), '(fully_paid.index, fully_paid)\n', (659, 689), True, 'import matplotlib.pyplot as plt\n'), ((690, 700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (698, 700), True, 'import matplotlib.pyplot as plt\n'), ((1473, 1509), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(10, 20)'}), '(4, 1, figsize=(10, 20))\n', (1485, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1687, 1722), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(8, 16)'}), '(2, 2, figsize=(8, 16))\n', (1699, 1722), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2558), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2542, 2558), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2985, 3023), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3007, 3023), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3035, 3099), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'model_2', 'param_grid': 'parameter_grid', 'cv': '(5)'}), '(estimator=model_2, param_grid=parameter_grid, cv=5)\n', (3047, 3099), False, 'from sklearn.model_selection import GridSearchCV\n'), ((3436, 3612), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', ([], {'decision_tree': 'p_tree.best_estimator_', 'out_file': 'None', 'feature_names': 'X.columns', 'filled': '(True)', 'class_names': "['loan_paid_back_yes', 'loan_paid_back_no']"}), "(decision_tree=p_tree.best_estimator_, out_file=None,\n feature_names=X.columns, filled=True, class_names=['loan_paid_back_yes',\n 'loan_paid_back_no'])\n", (3456, 3612), False, 'from sklearn import tree\n'), ((3685, 3724), 'pydotplus.graph_from_dot_data', 'pydotplus.graph_from_dot_data', (['dot_data'], {}), '(dot_data)\n', (3714, 3724), False, 'import pydotplus\n'), ((1533, 1586), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'y_train', 'y': 'num_df[cols[i]]', 'ax': 'axes[i]'}), '(x=y_train, y=num_df[cols[i]], ax=axes[i])\n', (1544, 1586), True, 'import seaborn as sns\n'), ((2168, 2182), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2180, 2182), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2901, 2917), 'numpy.arange', 'np.arange', (['(3)', '(10)'], {}), '(3, 10)\n', (2910, 2917), True, 'import numpy as np\n'), ((1774, 1843), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'X_train[cols[i * 2 + j]]', 'hue': 'y_train', 'ax': 'axes[i, j]'}), '(x=X_train[cols[i * 2 + j]], hue=y_train, ax=axes[i, j])\n', (1787, 1843), True, 'import seaborn as sns\n')] |
#! /usr/bin/env python
"""
File: plot_sin_eps.py
Copyright (c) 2016 <NAME>
License: MIT
Course: PHYS227
Assignment: B.2
Date: March 17th, 2016
Email: <EMAIL>
Name: <NAME>
Description: Studies a function for different parameter values
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def sin_graph(eps, n):
fig = plt.figure(1)
x = np.linspace(0, 1, n + 1)
y = np.sin(1 / (x + eps))
plt.plot(x, y, 'b-')
plt.xlabel('x')
plt.ylabel('y')
plt.title('f(x)')
plt.axis([-0.2, 1.2, -1.2, 1.2])
plt.show()
def multigraph(eps, n1):
n2 = n1 + 10
fig = plt.figure(1)
x1 = np.linspace(0, 1, n1)
y1 = np.sin(1 / (x1 + eps))
x2 = np.linspace(0, 1, n2)
y2 = np.sin(1 / (x2 + eps))
plt.plot(x1, y1, 'b-')
plt.plot(x2, y2, 'r-')
plt.xlabel('x')
plt.ylabel('y')
plt.title('f(x)')
plt.axis([-0.2, 1.2, -1.2, 1.2])
plt.show()
def choose_n(eps):
"""
Finds the smallest n such that the difference between the max of the function using n nodes and n + 1 nodes is less than 0.1
"""
n1 = 1
n2 = n1 + 10
x1 = np.linspace(0, 1, n1)
y1 = np.sin(1 / (x1 + eps))
x2 = np.linspace(0, 1, n2)
y2 = np.sin(1 / (x2 + eps))
while (abs(max(y2) - max(y1)) >= 0.1):
n1 += 1
n2 += 1
x1 = np.linspace(0, 1, n1)
y1 = np.sin(1 / (x1 + eps))
x2 = np.linspace(0, 1, n2)
y2 = np.sin(1 / (x2 + eps))
return n1 | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((357, 370), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (367, 370), True, 'import matplotlib.pyplot as plt\n'), ((379, 403), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(n + 1)'], {}), '(0, 1, n + 1)\n', (390, 403), True, 'import numpy as np\n'), ((412, 433), 'numpy.sin', 'np.sin', (['(1 / (x + eps))'], {}), '(1 / (x + eps))\n', (418, 433), True, 'import numpy as np\n'), ((438, 458), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b-"""'], {}), "(x, y, 'b-')\n", (446, 458), True, 'import matplotlib.pyplot as plt\n'), ((463, 478), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (473, 478), True, 'import matplotlib.pyplot as plt\n'), ((483, 498), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (493, 498), True, 'import matplotlib.pyplot as plt\n'), ((503, 520), 'matplotlib.pyplot.title', 'plt.title', (['"""f(x)"""'], {}), "('f(x)')\n", (512, 520), True, 'import matplotlib.pyplot as plt\n'), ((525, 557), 'matplotlib.pyplot.axis', 'plt.axis', (['[-0.2, 1.2, -1.2, 1.2]'], {}), '([-0.2, 1.2, -1.2, 1.2])\n', (533, 557), True, 'import matplotlib.pyplot as plt\n'), ((562, 572), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (570, 572), True, 'import matplotlib.pyplot as plt\n'), ((630, 643), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (640, 643), True, 'import matplotlib.pyplot as plt\n'), ((653, 674), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n1'], {}), '(0, 1, n1)\n', (664, 674), True, 'import numpy as np\n'), ((684, 706), 'numpy.sin', 'np.sin', (['(1 / (x1 + eps))'], {}), '(1 / (x1 + eps))\n', (690, 706), True, 'import numpy as np\n'), ((716, 737), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n2'], {}), '(0, 1, n2)\n', (727, 737), True, 'import numpy as np\n'), ((747, 769), 'numpy.sin', 'np.sin', (['(1 / (x2 + eps))'], {}), '(1 / (x2 + eps))\n', (753, 769), True, 'import numpy as np\n'), ((774, 796), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1', '"""b-"""'], {}), "(x1, y1, 'b-')\n", (782, 796), True, 'import matplotlib.pyplot as plt\n'), ((801, 823), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2', '"""r-"""'], {}), "(x2, y2, 'r-')\n", (809, 823), True, 'import matplotlib.pyplot as plt\n'), ((828, 843), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (838, 843), True, 'import matplotlib.pyplot as plt\n'), ((848, 863), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (858, 863), True, 'import matplotlib.pyplot as plt\n'), ((868, 885), 'matplotlib.pyplot.title', 'plt.title', (['"""f(x)"""'], {}), "('f(x)')\n", (877, 885), True, 'import matplotlib.pyplot as plt\n'), ((890, 922), 'matplotlib.pyplot.axis', 'plt.axis', (['[-0.2, 1.2, -1.2, 1.2]'], {}), '([-0.2, 1.2, -1.2, 1.2])\n', (898, 922), True, 'import matplotlib.pyplot as plt\n'), ((927, 937), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (935, 937), True, 'import matplotlib.pyplot as plt\n'), ((1140, 1161), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n1'], {}), '(0, 1, n1)\n', (1151, 1161), True, 'import numpy as np\n'), ((1171, 1193), 'numpy.sin', 'np.sin', (['(1 / (x1 + eps))'], {}), '(1 / (x1 + eps))\n', (1177, 1193), True, 'import numpy as np\n'), ((1203, 1224), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n2'], {}), '(0, 1, n2)\n', (1214, 1224), True, 'import numpy as np\n'), ((1234, 1256), 'numpy.sin', 'np.sin', (['(1 / (x2 + eps))'], {}), '(1 / (x2 + eps))\n', (1240, 1256), True, 'import numpy as np\n'), ((1345, 1366), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n1'], {}), '(0, 1, n1)\n', (1356, 1366), True, 'import numpy as np\n'), ((1380, 1402), 'numpy.sin', 'np.sin', (['(1 / (x1 + eps))'], {}), '(1 / (x1 + eps))\n', (1386, 1402), True, 'import numpy as np\n'), ((1416, 1437), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n2'], {}), '(0, 1, n2)\n', (1427, 1437), True, 'import numpy as np\n'), ((1451, 1473), 'numpy.sin', 'np.sin', (['(1 / (x2 + eps))'], {}), '(1 / (x2 + eps))\n', (1457, 1473), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: salimt
"""
#Problem 1: Curve Fitting
#15/15 points (graded)
#Implement the generate_models function.
#
#x and y are two lists corresponding to the x-coordinates and y-coordinates of the data samples (or data points); for example, if you have N data points, x = [x1 , x2 , ..., xN ] and y = [y1 , y2 , ..., yN ], where x_i and y_i are the x and y coordinate of the i-th data points. In this problem set, each x coordinate is an integer and corresponds to the year of a sample (e.g., 1997); each corresponding y coordinate is a float and represents the temperature observation (will be computed in multiple ways) of that year in Celsius. This representation will be used throughout the entire problem set.
#degs is a list of integers indicating the degree of each regression model that we want to create. For each model, this function should fit the data (x,y) to a polynomial curve of that degree.
#This function should return a list of models. A model is the numpy 1d array of the coefficients of the fitting polynomial curve. Each returned model should be in the same order as their corresponding integer in degs.
#Example:
#
#print(generate_models([1961, 1962, 1963],[4.4,5.5,6.6],[1, 2]))
#Should print something close to:
#
#[array([ 1.10000000e+00, -2.15270000e+03]), array([ -8.86320195e-14, 1.10000000e+00, -2.15270000e+03])]
#The above example was generating a linear and a quadratic curve on data samples (xi, yi ) = (1961, 4.4), (1962, 5.5), and (1963, 6.6). The resulting models are in the same order as specified in degs. Note that it is fine you did not get the exact number because of numerical errors.
#
#Note: If you want to use numpy arrays, you should import numpy as np and use np.METHOD_NAME in your code. Unfortunately, pylab does not work with the grader
#
## Problem 1
#
def generate_models(x, y, degs):
"""
Generate regression models by fitting a polynomial for each degree in degs
to points (x, y).
Args:
x: a list with length N, representing the x-coords of N sample points
y: a list with length N, representing the y-coords of N sample points
degs: a list of degrees of the fitting polynomial
Returns:
a list of numpy arrays, where each array is a 1-d array of coefficients
that minimizes the squared error of the fitting polynomial
"""
import numpy as np
models = []
for d in degs:
model = np.polyfit(x, y, d)
models.append(model)
return models
#print(generate_models([1961, 1962, 1963],[4.4,5.5,6.6],[1, 2]))
#[array([ 1.10000000e+00, -2.15270000e+03]), array([ -8.86320195e-14, 1.10000000e+00, -2.15270000e+03])] | [
"numpy.polyfit"
] | [((2483, 2502), 'numpy.polyfit', 'np.polyfit', (['x', 'y', 'd'], {}), '(x, y, d)\n', (2493, 2502), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
phi = -0.8
times = list(range(16))
y1 = [phi**k / (1 - phi**2) for k in times]
y2 = [np.cos(np.pi * k) for k in times]
y3 = [a * b for a, b in zip(y1, y2)]
num_rows, num_cols = 3, 1
fig, axes = plt.subplots(num_rows, num_cols, figsize=(10, 8))
plt.subplots_adjust(hspace=0.25)
# Autocovariance when phi = -0.8
ax = axes[0]
ax.plot(times, y1, 'bo-', alpha=0.6, label=r'$\gamma(k)$')
ax.legend(loc='upper right')
ax.set_xlim(0, 15)
ax.set_yticks((-2, 0, 2))
ax.hlines(0, 0, 15, linestyle='--', alpha=0.5)
# Cycles at frequence pi
ax = axes[1]
ax.plot(times, y2, 'bo-', alpha=0.6, label=r'$\cos(\pi k)$')
ax.legend(loc='upper right')
ax.set_xlim(0, 15)
ax.set_yticks((-1, 0, 1))
ax.hlines(0, 0, 15, linestyle='--', alpha=0.5)
# Product
ax = axes[2]
ax.stem(times, y3, label=r'$\gamma(k) \cos(\pi k)$')
ax.legend(loc='upper right')
ax.set_xlim((0, 15))
ax.set_ylim(-3, 3)
ax.set_yticks((-1, 0, 1, 2, 3))
ax.hlines(0, 0, 15, linestyle='--', alpha=0.5)
plt.show()
| [
"numpy.cos",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((247, 296), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', 'num_cols'], {'figsize': '(10, 8)'}), '(num_rows, num_cols, figsize=(10, 8))\n', (259, 296), True, 'import matplotlib.pyplot as plt\n'), ((297, 329), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.25)'}), '(hspace=0.25)\n', (316, 329), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1014), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1012, 1014), True, 'import matplotlib.pyplot as plt\n'), ((137, 154), 'numpy.cos', 'np.cos', (['(np.pi * k)'], {}), '(np.pi * k)\n', (143, 154), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import time
import board
import busio
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
import numpy as np
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1115(i2c)
# Create single-ended input on channel 1
chanRef = AnalogIn(ads, ADS.P1)
chanCali = AnalogIn(ads, ADS.P3)
def outputBanner():
print("{} {} {}".format('Time', 'ObjectiveTemp', 'Variance'))
def outputTemp(Mean, Variance):
print(time.strftime("%H:%M:%S", time.localtime()) +
" {:>5.2f} {:>4.3f}".format(Mean, Variance))
if __name__ == '__main__':
outputTimeStep = 1
screenOutput = False
filename = time.strftime(
"%m-%d-at-%H:%M-year%YRecordLog.txt", time.localtime())
with open(filename, 'w') as f:
if screenOutput:
outputBanner()
f.write("{} {} {}\n".format('Time', 'ObjectiveTemp', 'Variance'))
while True:
startTime = time.time()
tempList = []
while time.time()-startTime < outputTimeStep:
tempList.append(chanRef.voltage*10)
time.sleep(outputTimeStep//50)
tempArray = np.array(tempList)
Mean, Var = np.mean(tempArray), np.var(tempArray)
f.write(time.strftime("%H:%M:%S", time.localtime()) +
" {:>5.2f} {:>4.3f}\n".format(Mean,Var))
if screenOutput:
outputTemp(Mean, Var)
| [
"adafruit_ads1x15.analog_in.AnalogIn",
"numpy.mean",
"busio.I2C",
"time.sleep",
"numpy.array",
"adafruit_ads1x15.ads1115.ADS1115",
"time.localtime",
"time.time",
"numpy.var"
] | [((192, 223), 'busio.I2C', 'busio.I2C', (['board.SCL', 'board.SDA'], {}), '(board.SCL, board.SDA)\n', (201, 223), False, 'import busio\n'), ((272, 288), 'adafruit_ads1x15.ads1115.ADS1115', 'ADS.ADS1115', (['i2c'], {}), '(i2c)\n', (283, 288), True, 'import adafruit_ads1x15.ads1115 as ADS\n'), ((340, 361), 'adafruit_ads1x15.analog_in.AnalogIn', 'AnalogIn', (['ads', 'ADS.P1'], {}), '(ads, ADS.P1)\n', (348, 361), False, 'from adafruit_ads1x15.analog_in import AnalogIn\n'), ((373, 394), 'adafruit_ads1x15.analog_in.AnalogIn', 'AnalogIn', (['ads', 'ADS.P3'], {}), '(ads, ADS.P3)\n', (381, 394), False, 'from adafruit_ads1x15.analog_in import AnalogIn\n'), ((783, 799), 'time.localtime', 'time.localtime', ([], {}), '()\n', (797, 799), False, 'import time\n'), ((1006, 1017), 'time.time', 'time.time', ([], {}), '()\n', (1015, 1017), False, 'import time\n'), ((1225, 1243), 'numpy.array', 'np.array', (['tempList'], {}), '(tempList)\n', (1233, 1243), True, 'import numpy as np\n'), ((553, 569), 'time.localtime', 'time.localtime', ([], {}), '()\n', (567, 569), False, 'import time\n'), ((1170, 1202), 'time.sleep', 'time.sleep', (['(outputTimeStep // 50)'], {}), '(outputTimeStep // 50)\n', (1180, 1202), False, 'import time\n'), ((1268, 1286), 'numpy.mean', 'np.mean', (['tempArray'], {}), '(tempArray)\n', (1275, 1286), True, 'import numpy as np\n'), ((1288, 1305), 'numpy.var', 'np.var', (['tempArray'], {}), '(tempArray)\n', (1294, 1305), True, 'import numpy as np\n'), ((1062, 1073), 'time.time', 'time.time', ([], {}), '()\n', (1071, 1073), False, 'import time\n'), ((1352, 1368), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1366, 1368), False, 'import time\n')] |
from detectron2.checkpoint import DetectionCheckpointer
from typing import Any
import torch
import torch.nn as nn
from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION, quantization, \
ObserverBase, FakeQuantizeBase
from torch import distributed as dist
from scipy import interpolate
import numpy as np
import torch.nn.functional as F
from collections import OrderedDict
def append_prefix(k):
prefix = 'backbone.bottom_up.backbone.'
return prefix + k if not k.startswith(prefix) else k
def modify_ckpt_state(model, state_dict, logger=None):
# reshape absolute position embedding for Swin
if state_dict.get(append_prefix('absolute_pos_embed')) is not None:
absolute_pos_embed = state_dict[append_prefix('absolute_pos_embed')]
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = model.backbone.bottom_up.backbone.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H * W:
logger.warning("Error in loading absolute_pos_embed, pass")
else:
state_dict[append_prefix('absolute_pos_embed')] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
def get_dist_info():
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def resize_position_embeddings(max_position_embeddings, old_vocab_size,
_k='backbone.bottom_up.backbone.embeddings.position_embeddings.weight',
initializer_range=0.02, reuse_position_embedding=True):
'''
Reference: unilm
ALso see discussions:
https://github.com/pytorch/fairseq/issues/1685
https://github.com/google-research/bert/issues/27
'''
new_position_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_position_embedding = nn.Parameter(data=new_position_embedding, requires_grad=True)
new_position_embedding.data.normal_(mean=0.0, std=initializer_range)
if max_position_embeddings > old_vocab_size:
logger.info("Resize > position embeddings !")
max_range = max_position_embeddings if reuse_position_embedding else old_vocab_size
shift = 0
while shift < max_range:
delta = min(old_vocab_size, max_range - shift)
new_position_embedding.data[shift: shift + delta, :] = state_dict[_k][:delta, :]
logger.info(" CP [%d ~ %d] into [%d ~ %d] " % (0, delta, shift, shift + delta))
shift += delta
state_dict[_k] = new_position_embedding.data
del new_position_embedding
elif max_position_embeddings < old_vocab_size:
logger.info("Resize < position embeddings !")
new_position_embedding.data.copy_(state_dict[_k][:max_position_embeddings, :])
state_dict[_k] = new_position_embedding.data
del new_position_embedding
rank, _ = get_dist_info()
all_keys = list(state_dict.keys())
for key in all_keys:
if "embeddings.position_embeddings.weight" in key:
if key not in model.state_dict(): # image only models do not use this key
continue
max_position_embeddings = model.state_dict()[key].shape[0]
old_vocab_size = state_dict[key].shape[0]
if max_position_embeddings != old_vocab_size:
resize_position_embeddings(max_position_embeddings, old_vocab_size,_k=key)
if "relative_position_index" in key:
state_dict.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = state_dict[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
if key not in model.state_dict():
continue
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.backbone.bottom_up.backbone.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
if rank == 0:
print("Position interpolate for %s from %dx%d to %dx%d" % (
key, src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.13492:
# q = 1.13492
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
if rank == 0:
print("x = {}".format(x))
print("dx = {}".format(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
state_dict[key] = new_rel_pos_bias
if append_prefix('pos_embed') in state_dict:
pos_embed_checkpoint = state_dict[append_prefix('pos_embed')]
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.backbone.bottom_up.backbone.patch_embed.num_patches
num_extra_tokens = model.backbone.bottom_up.backbone.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
# new_size = int(num_patches ** 0.5)
new_size_w = model.backbone.bottom_up.backbone.patch_embed.num_patches_w
new_size_h = model.backbone.bottom_up.backbone.patch_embed.num_patches_h
# class_token and dist_token are kept unchanged
if orig_size != new_size_h or orig_size != new_size_w:
if rank == 0:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size_w, new_size_h))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size_w, new_size_h), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
state_dict[append_prefix('pos_embed')] = new_pos_embed
# interpolate position bias table if needed
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
if table_key not in model.state_dict():
continue
table_current = model.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {table_key}, pass")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2), mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
if append_prefix('rel_pos_bias.relative_position_bias_table') in state_dict and \
model.backbone.bottom_up.backbone.use_rel_pos_bias and \
not model.backbone.bottom_up.backbone.use_shared_rel_pos_bias and \
append_prefix('blocks.0.attn.relative_position_bias_table') not in state_dict:
logger.info("[BEIT] Expand the shared relative position embedding to each transformer block. ")
num_layers = model.backbone.bottom_up.backbone.get_num_layers()
rel_pos_bias = state_dict[append_prefix("rel_pos_bias.relative_position_bias_table")]
for i in range(num_layers):
state_dict["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone()
state_dict.pop(append_prefix("rel_pos_bias.relative_position_bias_table"))
return state_dict
class MyDetectionCheckpointer(DetectionCheckpointer):
def _load_model(self, checkpoint: Any) -> _IncompatibleKeys:
"""
Load weights from a checkpoint.
Args:
checkpoint (Any): checkpoint contains the weights.
Returns:
``NamedTuple`` with ``missing_keys``, ``unexpected_keys``,
and ``incorrect_shapes`` fields:
* **missing_keys** is a list of str containing the missing keys
* **unexpected_keys** is a list of str containing the unexpected keys
* **incorrect_shapes** is a list of (key, shape in checkpoint, shape in model)
This is just like the return value of
:func:`torch.nn.Module.load_state_dict`, but with extra support
for ``incorrect_shapes``.
"""
checkpoint_state_dict = checkpoint.pop("model")
checkpoint_state_dict = self.rename_state_dict(checkpoint_state_dict)
self._convert_ndarray_to_tensor(checkpoint_state_dict)
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching.
_strip_prefix_if_present(checkpoint_state_dict, "module.")
# workaround https://github.com/pytorch/pytorch/issues/24139
model_state_dict = self.model.state_dict()
incorrect_shapes = []
# rename the para in checkpoint_state_dict
# some bug here, do not support re load
if 'backbone.fpn_lateral2.weight' not in checkpoint_state_dict.keys():
checkpoint_state_dict = {
append_prefix(k): checkpoint_state_dict[k]
for k in checkpoint_state_dict.keys()
}
# else: resume a model, do not need append_prefix
checkpoint_state_dict = modify_ckpt_state(self.model, checkpoint_state_dict, logger=self.logger)
for k in list(checkpoint_state_dict.keys()):
if k in model_state_dict:
model_param = model_state_dict[k]
# Allow mismatch for uninitialized parameters
if TORCH_VERSION >= (1, 8) and isinstance(
model_param, nn.parameter.UninitializedParameter
):
continue
shape_model = tuple(model_param.shape)
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
if shape_model != shape_checkpoint:
has_observer_base_classes = (
TORCH_VERSION >= (1, 8)
and hasattr(quantization, "ObserverBase")
and hasattr(quantization, "FakeQuantizeBase")
)
if has_observer_base_classes:
# Handle the special case of quantization per channel observers,
# where buffer shape mismatches are expected.
def _get_module_for_key(
model: torch.nn.Module, key: str
) -> torch.nn.Module:
# foo.bar.param_or_buffer_name -> [foo, bar]
key_parts = key.split(".")[:-1]
cur_module = model
for key_part in key_parts:
cur_module = getattr(cur_module, key_part)
return cur_module
cls_to_skip = (
ObserverBase,
FakeQuantizeBase,
)
target_module = _get_module_for_key(self.model, k)
if isinstance(target_module, cls_to_skip):
# Do not remove modules with expected shape mismatches
# them from the state_dict loading. They have special logic
# in _load_from_state_dict to handle the mismatches.
continue
incorrect_shapes.append((k, shape_checkpoint, shape_model))
checkpoint_state_dict.pop(k)
incompatible = self.model.load_state_dict(checkpoint_state_dict, strict=False)
return _IncompatibleKeys(
missing_keys=incompatible.missing_keys,
unexpected_keys=incompatible.unexpected_keys,
incorrect_shapes=incorrect_shapes,
)
def rename_state_dict(self, state_dict):
new_state_dict = OrderedDict()
layoutlm = False
for k, v in state_dict.items():
if 'layoutlmv3' in k:
layoutlm = True
new_state_dict[k.replace('layoutlmv3.', '')] = v
if layoutlm:
return new_state_dict
return state_dict
| [
"collections.OrderedDict",
"torch.ones",
"numpy.arange",
"torch.distributed.is_initialized",
"fvcore.common.checkpoint._strip_prefix_if_present",
"torch.nn.Parameter",
"fvcore.common.checkpoint._IncompatibleKeys",
"torch.nn.functional.interpolate",
"torch.distributed.get_rank",
"torch.distributed.... | [((2108, 2169), 'torch.nn.Parameter', 'nn.Parameter', ([], {'data': 'new_position_embedding', 'requires_grad': '(True)'}), '(data=new_position_embedding, requires_grad=True)\n', (2120, 2169), True, 'import torch.nn as nn\n'), ((11256, 11314), 'fvcore.common.checkpoint._strip_prefix_if_present', '_strip_prefix_if_present', (['checkpoint_state_dict', '"""module."""'], {}), "(checkpoint_state_dict, 'module.')\n", (11280, 11314), False, 'from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION, quantization, ObserverBase, FakeQuantizeBase\n'), ((14351, 14494), 'fvcore.common.checkpoint._IncompatibleKeys', '_IncompatibleKeys', ([], {'missing_keys': 'incompatible.missing_keys', 'unexpected_keys': 'incompatible.unexpected_keys', 'incorrect_shapes': 'incorrect_shapes'}), '(missing_keys=incompatible.missing_keys, unexpected_keys=\n incompatible.unexpected_keys, incorrect_shapes=incorrect_shapes)\n', (14368, 14494), False, 'from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION, quantization, ObserverBase, FakeQuantizeBase\n'), ((14608, 14621), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14619, 14621), False, 'from collections import OrderedDict\n'), ((1219, 1238), 'torch.distributed.is_available', 'dist.is_available', ([], {}), '()\n', (1236, 1238), True, 'from torch import distributed as dist\n'), ((1243, 1264), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (1262, 1264), True, 'from torch import distributed as dist\n'), ((1285, 1300), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1298, 1300), True, 'from torch import distributed as dist\n'), ((1326, 1347), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1345, 1347), True, 'from torch import distributed as dist\n'), ((1974, 2041), 'torch.ones', 'torch.ones', ([], {'size': '(max_position_embeddings, state_dict[_k].shape[1])'}), '(size=(max_position_embeddings, state_dict[_k].shape[1]))\n', (1984, 2041), False, 'import torch\n'), ((7895, 8010), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['pos_tokens'], {'size': '(new_size_w, new_size_h)', 'mode': '"""bicubic"""', 'align_corners': '(False)'}), "(pos_tokens, size=(new_size_w, new_size_h),\n mode='bicubic', align_corners=False)\n", (7926, 8010), False, 'import torch\n'), ((8122, 8166), 'torch.cat', 'torch.cat', (['(extra_tokens, pos_tokens)'], {'dim': '(1)'}), '((extra_tokens, pos_tokens), dim=1)\n', (8131, 8166), False, 'import torch\n'), ((5758, 5785), 'numpy.arange', 'np.arange', (['(-t)', '(t + 0.1)', '(1.0)'], {}), '(-t, t + 0.1, 1.0)\n', (5767, 5785), True, 'import numpy as np\n'), ((5807, 5834), 'numpy.arange', 'np.arange', (['(-t)', '(t + 0.1)', '(1.0)'], {}), '(-t, t + 0.1, 1.0)\n', (5816, 5834), True, 'import numpy as np\n'), ((6374, 6409), 'torch.cat', 'torch.cat', (['all_rel_pos_bias'], {'dim': '(-1)'}), '(all_rel_pos_bias, dim=-1)\n', (6383, 6409), False, 'import torch\n'), ((6445, 6491), 'torch.cat', 'torch.cat', (['(rel_pos_bias, extra_tokens)'], {'dim': '(0)'}), '((rel_pos_bias, extra_tokens), dim=0)\n', (6454, 6491), False, 'import torch\n'), ((6155, 6198), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['x', 'y', 'z'], {'kind': '"""cubic"""'}), "(x, y, z, kind='cubic')\n", (6175, 6198), False, 'from scipy import interpolate\n')] |
import numpy as np
import ops.utils
# FUNCTIONS
def correlate_channels(r, first, second):
"""Cross-correlation between non-zero pixels.
Uses `first` and `second` to index channels from `r.intensity_image_full`.
"""
A, B = r.intensity_image_full[[first, second]]
filt = A > 0
if filt.sum() == 0:
return np.nan
A = A[filt]
B = B[filt]
try:
corr_array = (A - A.mean()) * (B - B.mean()) / (A.std() * B.std())
corr = corr_array.mean()
except:
corr = float('NaN')
return corr
def masked(r, index):
return r.intensity_image_full[index][r.filled_image]
def bounds(r, index):
return r.intensity_image_full[index][r.image]
# FEATURES
# these functions expect an `skimage.measure.regionprops` region as input
intensity = {
'mean': lambda r: r.intensity_image[r.image].mean(),
'median': lambda r: np.median(r.intensity_image[r.image]),
'max': lambda r: r.intensity_image[r.image].max(),
'min': lambda r: r.intensity_image[r.image].min(),
}
geometry = {
'area' : lambda r: r.area,
'i' : lambda r: r.centroid[0],
'j' : lambda r: r.centroid[1],
'bounds' : lambda r: r.bbox,
'contour' : lambda r: ops.utils.binary_contours(r.image, fix=True, labeled=False)[0],
'label' : lambda r: r.label,
'mask': lambda r: ops.utils.Mask(r.image),
'eccentricity': lambda r: r.eccentricity,
'solidity': lambda r: r.solidity,
'convex_area': lambda r: r.convex_area,
'perimeter': lambda r: r.perimeter
}
# DAPI, HA, myc
frameshift = {
'dapi_ha_corr' : lambda r: correlate_channels(r, 0, 1),
'dapi_myc_corr': lambda r: correlate_channels(r, 0, 2),
'ha_median' : lambda r: np.median(r.intensity_image_full[1]),
'myc_median' : lambda r: np.median(r.intensity_image_full[2]),
'cell' : lambda r: r.label,
}
translocation = {
'dapi_gfp_corr' : lambda r: correlate_channels(r, 0, 1),
# 'dapi_mean' : lambda r: masked(r, 0).mean(),
# 'dapi_median': lambda r: np.median(masked(r, 0)),
# 'gfp_median' : lambda r: np.median(masked(r, 1)),
# 'gfp_mean' : lambda r: masked(r, 1).mean(),
# 'dapi_int' : lambda r: masked(r, 0).sum(),
# 'gfp_int' : lambda r: masked(r, 1).sum(),
# 'dapi_max' : lambda r: masked(r, 0).max(),
# 'gfp_max' : lambda r: masked(r, 1).max(),
}
viewRNA = {
'cy3_median': lambda r: np.median(masked(r, 1)),
'cy5_median': lambda r: np.median(masked(r, 2)),
'cy5_80p' : lambda r: np.percentile(masked(r, 2), 80),
'cy3_int': lambda r: masked(r, 1).sum(),
'cy5_int': lambda r: masked(r, 2).sum(),
'cy5_mean': lambda r: masked(r, 2).sum(),
'cy5_max': lambda r: masked(r, 2).max(),
}
synapse = {
'dapi_a532_corr' : lambda r: correlate_channels(r, 0, 3),
'dapi_a594_corr' : lambda r: correlate_channels(r, 0, 4),
'dapi_a647_corr' : lambda r: correlate_channels(r, 0, 5),
'dapi_a750_corr' : lambda r: correlate_channels(r, 0, 2),
'gfp_a532_corr' : lambda r: correlate_channels(r, 1, 3),
'gfp_a594_corr' : lambda r: correlate_channels(r, 1, 4),
'gfp_a647_corr' : lambda r: correlate_channels(r, 1, 5),
'gfp_a750_corr' : lambda r: correlate_channels(r, 1, 2),
'a532_a594_corr' : lambda r: correlate_channels(r, 3, 4),
'a532_a647_corr' : lambda r: correlate_channels(r, 3, 5),
'a532_a750_corr' : lambda r: correlate_channels(r, 3, 2),
'a594_a647_corr' : lambda r: correlate_channels(r, 4, 5),
'a532_a750_corr' : lambda r: correlate_channels(r, 4, 2),
'a647_a750_corr' : lambda r: correlate_channels(r, 5, 2),
'dapi_int' : lambda r: masked(r, 0).sum(),
'dapi_mean' : lambda r: masked(r, 0).mean(),
'dapi_std' : lambda r: np.std(masked(r, 0)),
'dapi_median' : lambda r: np.median(masked(r, 0)),
'dapi_max' : lambda r: masked(r, 0).max(),
'dapi_min' : lambda r: masked(r, 0).min(),
'dapi_lower_quartile' : lambda r: np.percentile(masked(r, 0),25),
'dapi_upper_quartile' : lambda r: np.percentile(masked(r, 0),75),
'gfp_int' : lambda r: masked(r, 1).sum(),
'gfp_mean' : lambda r: masked(r, 1).mean(),
'gfp_std' : lambda r: np.std(masked(r, 1)),
'gfp_median' : lambda r: np.median(masked(r, 1)),
'gfp_max' : lambda r: masked(r, 1).max(),
'gfp_min' : lambda r: masked(r, 1).min(),
'gfp_lower_quartile' : lambda r: np.percentile(masked(r, 1),25),
'gfp_upper_quartile' : lambda r: np.percentile(masked(r, 1),75),
'a750_int' : lambda r: masked(r, 2).sum(),
'a750_mean' : lambda r: masked(r, 2).mean(),
'a750_std' : lambda r: np.std(masked(r, 2)),
'a750_median' : lambda r: np.median(masked(r, 2)),
'a750_max' : lambda r: masked(r, 2).max(),
'a750_min' : lambda r: masked(r, 2).min(),
'a750_lower_quartile' : lambda r: np.percentile(masked(r, 2),25),
'a750_upper_quartile' : lambda r: np.percentile(masked(r, 2),75),
'a532_int' : lambda r: masked(r, 3).sum(),
'a532_mean' : lambda r: masked(r, 3).mean(),
'a532_std' : lambda r: np.std(masked(r, 3)),
'a532_median' : lambda r: np.median(masked(r, 3)),
'a532_max' : lambda r: masked(r, 3).max(),
'a532_min' : lambda r: masked(r, 3).min(),
'a532_lower_quartile' : lambda r: np.percentile(masked(r, 3),25),
'a532_upper_quartile' : lambda r: np.percentile(masked(r, 3),75),
'a594_int' : lambda r: masked(r, 4).sum(),
'a594_mean' : lambda r: masked(r, 4).mean(),
'a594_std' : lambda r: np.std(masked(r, 4)),
'a594_median' : lambda r: np.median(masked(r, 4)),
'a594_max' : lambda r: masked(r, 4).max(),
'a594_min' : lambda r: masked(r, 4).min(),
'a594_lower_quartile' : lambda r: np.percentile(masked(r, 4),25),
'a594_upper_quartile' : lambda r: np.percentile(masked(r, 4),75),
'a647_int' : lambda r: masked(r, 5).sum(),
'a647_mean' : lambda r: masked(r, 5).mean(),
'a647_std' : lambda r: np.std(masked(r, 5)),
'a647_median' : lambda r: np.median(masked(r, 5)),
'a647_max' : lambda r: masked(r, 5).max(),
'a647_min' : lambda r: masked(r, 5).min(),
'a647_lower_quartile' : lambda r: np.percentile(masked(r, 5),25),
'a647_upper_quartile' : lambda r: np.percentile(masked(r, 5),75)
}
all_features = [
intensity,
geometry,
translocation,
frameshift,
viewRNA,
synapse
]
def validate_features():
names = sum(map(list, all_features), [])
assert len(names) == len(set(names))
def make_feature_dict(feature_names):
features = {}
[features.update(d) for d in all_features]
return {n: features[n] for n in feature_names}
validate_features()
features_basic = make_feature_dict(('area', 'i', 'j', 'label'))
features_geom = make_feature_dict((
'area', 'eccentricity', 'convex_area', 'perimeter'))
features_translocation_nuclear = make_feature_dict((
'dapi_gfp_corr',
'eccentricity', 'solidity',
'dapi_median', 'dapi_mean', 'dapi_int', 'dapi_max',
'gfp_median', 'gfp_mean', 'gfp_int', 'gfp_max',
'area'))
features_translocation_cell = make_feature_dict((
'dapi_gfp_corr',
'eccentricity', 'solidity',
'dapi_median', 'dapi_mean', 'dapi_int', 'dapi_max',
'gfp_median', 'gfp_mean', 'gfp_int', 'gfp_max',
'area'))
features_frameshift = make_feature_dict((
'dapi_ha_corr',
'dapi_median', 'dapi_max',
'ha_median'))
features_frameshift_myc = make_feature_dict((
'dapi_ha_corr', 'dapi_myc_corr',
'dapi_median', 'dapi_max',
'ha_median', 'myc_median'))
features_translocation_nuclear_simple = make_feature_dict((
'dapi_gfp_corr',
'dapi_mean', 'dapi_max', 'gfp_mean', 'gfp_max',
'area'))
features_synapse_cell = make_feature_dict((
'area', 'eccentricity', 'solidity') + tuple(synapse.keys()))
features_synapse_edge = make_feature_dict((
'area', 'eccentricity', 'solidity') + tuple(synapse.keys()))
features_synapse_puncta = make_feature_dict((
'area', 'eccentricity', 'solidity') + tuple(synapse.keys()))
| [
"numpy.median"
] | [((889, 926), 'numpy.median', 'np.median', (['r.intensity_image[r.image]'], {}), '(r.intensity_image[r.image])\n', (898, 926), True, 'import numpy as np\n'), ((1740, 1776), 'numpy.median', 'np.median', (['r.intensity_image_full[1]'], {}), '(r.intensity_image_full[1])\n', (1749, 1776), True, 'import numpy as np\n'), ((1809, 1845), 'numpy.median', 'np.median', (['r.intensity_image_full[2]'], {}), '(r.intensity_image_full[2])\n', (1818, 1845), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn.functional as F
import point_utils_cuda
from pytorch3d.loss import chamfer_distance
from pytorch3d.ops import knn_points, knn_gather
from scipy.spatial.transform import Rotation
import random
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
'''
ctx:
xyz: [B,N,3]
npoint: int
'''
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
point_utils_cuda.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class WeightedFurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, weights: torch.Tensor, npoint: int) -> torch.Tensor:
'''
ctx:
xyz: [B,N,3]
weights: [B,N]
npoint: int
'''
assert xyz.is_contiguous()
assert weights.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
point_utils_cuda.weighted_furthest_point_sampling_wrapper(B, N, npoint, xyz, weights, temp, output);
return output
@staticmethod
def backward(xyz, a=None):
return None, None
weighted_furthest_point_sample = WeightedFurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'''
ctx
features: [B,C,N]
idx: [B,npoint]
'''
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
point_utils_cuda.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B,C,N).zero_())
grad_out_data = grad_out.data.contiguous()
point_utils_cuda.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
def generate_rand_rotm(x_lim=5.0, y_lim=5.0, z_lim=180.0):
'''
Input:
x_lim
y_lim
z_lim
return:
rotm: [3,3]
'''
rand_z = np.random.uniform(low=-z_lim, high=z_lim)
rand_y = np.random.uniform(low=-y_lim, high=y_lim)
rand_x = np.random.uniform(low=-x_lim, high=x_lim)
rand_eul = np.array([rand_z, rand_y, rand_x])
r = Rotation.from_euler('zyx', rand_eul, degrees=True)
rotm = r.as_matrix()
return rotm
def generate_rand_trans(x_lim=10.0, y_lim=1.0, z_lim=0.1):
'''
Input:
x_lim
y_lim
z_lim
return:
trans [3]
'''
rand_x = np.random.uniform(low=-x_lim, high=x_lim)
rand_y = np.random.uniform(low=-y_lim, high=y_lim)
rand_z = np.random.uniform(low=-z_lim, high=z_lim)
rand_trans = np.array([rand_x, rand_y, rand_z])
return rand_trans
def apply_transform(pts, trans):
R = trans[:3, :3]
T = trans[:3, 3]
pts = pts @ R.T + T
return pts
def calc_error_np(pred_R, pred_t, gt_R, gt_t):
tmp = (np.trace(pred_R.transpose().dot(gt_R))-1)/2
tmp = np.clip(tmp, -1.0, 1.0)
L_rot = np.arccos(tmp)
L_rot = 180 * L_rot / np.pi
L_trans = np.linalg.norm(pred_t - gt_t)
return L_rot, L_trans
def set_seed(seed):
'''
Set random seed for torch, numpy and python
'''
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark=False
torch.backends.cudnn.deterministic=True | [
"numpy.clip",
"torch.manual_seed",
"point_utils_cuda.weighted_furthest_point_sampling_wrapper",
"torch.cuda.manual_seed_all",
"numpy.arccos",
"scipy.spatial.transform.Rotation.from_euler",
"torch.cuda.FloatTensor",
"point_utils_cuda.furthest_point_sampling_wrapper",
"numpy.linalg.norm",
"random.se... | [((2858, 2899), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-z_lim)', 'high': 'z_lim'}), '(low=-z_lim, high=z_lim)\n', (2875, 2899), True, 'import numpy as np\n'), ((2913, 2954), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-y_lim)', 'high': 'y_lim'}), '(low=-y_lim, high=y_lim)\n', (2930, 2954), True, 'import numpy as np\n'), ((2968, 3009), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-x_lim)', 'high': 'x_lim'}), '(low=-x_lim, high=x_lim)\n', (2985, 3009), True, 'import numpy as np\n'), ((3026, 3060), 'numpy.array', 'np.array', (['[rand_z, rand_y, rand_x]'], {}), '([rand_z, rand_y, rand_x])\n', (3034, 3060), True, 'import numpy as np\n'), ((3069, 3119), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""zyx"""', 'rand_eul'], {'degrees': '(True)'}), "('zyx', rand_eul, degrees=True)\n", (3088, 3119), False, 'from scipy.spatial.transform import Rotation\n'), ((3333, 3374), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-x_lim)', 'high': 'x_lim'}), '(low=-x_lim, high=x_lim)\n', (3350, 3374), True, 'import numpy as np\n'), ((3388, 3429), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-y_lim)', 'high': 'y_lim'}), '(low=-y_lim, high=y_lim)\n', (3405, 3429), True, 'import numpy as np\n'), ((3443, 3484), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-z_lim)', 'high': 'z_lim'}), '(low=-z_lim, high=z_lim)\n', (3460, 3484), True, 'import numpy as np\n'), ((3503, 3537), 'numpy.array', 'np.array', (['[rand_x, rand_y, rand_z]'], {}), '([rand_x, rand_y, rand_z])\n', (3511, 3537), True, 'import numpy as np\n'), ((3790, 3813), 'numpy.clip', 'np.clip', (['tmp', '(-1.0)', '(1.0)'], {}), '(tmp, -1.0, 1.0)\n', (3797, 3813), True, 'import numpy as np\n'), ((3826, 3840), 'numpy.arccos', 'np.arccos', (['tmp'], {}), '(tmp)\n', (3835, 3840), True, 'import numpy as np\n'), ((3887, 3916), 'numpy.linalg.norm', 'np.linalg.norm', (['(pred_t - gt_t)'], {}), '(pred_t - gt_t)\n', (3901, 3916), True, 'import numpy as np\n'), ((4032, 4049), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4043, 4049), False, 'import random\n'), ((4055, 4075), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4069, 4075), True, 'import numpy as np\n'), ((4081, 4104), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4098, 4104), False, 'import torch\n'), ((4113, 4138), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4136, 4138), False, 'import torch\n'), ((601, 632), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['B', 'npoint'], {}), '(B, npoint)\n', (621, 632), False, 'import torch\n'), ((698, 783), 'point_utils_cuda.furthest_point_sampling_wrapper', 'point_utils_cuda.furthest_point_sampling_wrapper', (['B', 'N', 'npoint', 'xyz', 'temp', 'output'], {}), '(B, N, npoint, xyz, temp,\n output)\n', (746, 783), False, 'import point_utils_cuda\n'), ((1315, 1346), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['B', 'npoint'], {}), '(B, npoint)\n', (1335, 1346), False, 'import torch\n'), ((1412, 1515), 'point_utils_cuda.weighted_furthest_point_sampling_wrapper', 'point_utils_cuda.weighted_furthest_point_sampling_wrapper', (['B', 'N', 'npoint', 'xyz', 'weights', 'temp', 'output'], {}), '(B, N, npoint, xyz,\n weights, temp, output)\n', (1469, 1515), False, 'import point_utils_cuda\n'), ((2062, 2098), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['B', 'C', 'npoint'], {}), '(B, C, npoint)\n', (2084, 2098), False, 'import torch\n'), ((2108, 2186), 'point_utils_cuda.gather_points_wrapper', 'point_utils_cuda.gather_points_wrapper', (['B', 'C', 'N', 'npoint', 'features', 'idx', 'output'], {}), '(B, C, N, npoint, features, idx, output)\n', (2146, 2186), False, 'import point_utils_cuda\n'), ((2506, 2610), 'point_utils_cuda.gather_points_grad_wrapper', 'point_utils_cuda.gather_points_grad_wrapper', (['B', 'C', 'N', 'npoint', 'grad_out_data', 'idx', 'grad_features.data'], {}), '(B, C, N, npoint, grad_out_data,\n idx, grad_features.data)\n', (2549, 2610), False, 'import point_utils_cuda\n'), ((4148, 4176), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (4170, 4176), False, 'import torch\n'), ((4186, 4218), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (4212, 4218), False, 'import torch\n'), ((648, 676), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['B', 'N'], {}), '(B, N)\n', (670, 676), False, 'import torch\n'), ((1362, 1390), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['B', 'N'], {}), '(B, N)\n', (1384, 1390), False, 'import torch\n'), ((2408, 2439), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['B', 'C', 'N'], {}), '(B, C, N)\n', (2430, 2439), False, 'import torch\n')] |
import os
import random
import numpy as np
from PIL import Image, ImageFilter, ImageDraw
from computer_text_generator import ComputerTextGenerator
from elastic_transform import ElasticTransform
from roi_rotator import RoiRotator
try:
from handwritten_text_generator import HandwrittenTextGenerator
except ImportError as e:
print('Missing modules for handwritten text generation.')
from background_generator import BackgroundGenerator
from distorsion_generator import DistorsionGenerator
class FakeTextDataGenerator(object):
@classmethod
def generate_from_tuple(cls, t):
"""
Same as generate, but takes all parameters as one tuple
"""
return cls.generate(*t)
@classmethod
def draw_bounding_boxes(cls, image_dst, rois):
drawbbox = ImageDraw.Draw(image_dst)
for roi in rois:
drawbbox.rectangle(roi, outline=0, fill=None) # Only works for grayscale images need outline=(0,0,0) for color
@classmethod
def generate(cls, index, text, fonts, out_dir, height, random_height, extension, skewing_angle, random_skew,
blur, random_blur, background_type, random_bg, distorsion_type, distorsion_orientation,
is_handwritten, name_format, width, random_width, alignment, bounding_box, view_bounding_box, random_alignment, text_color=-1):
image = None
#########################################################################
# Randomly determine height between height and random_height variables #
#########################################################################
if random_height > height:
height = random.randint(height, random_height)
##########################
# Create picture of text #
##########################
if is_handwritten:
image = HandwrittenTextGenerator.generate(text)
else:
image, rois = ComputerTextGenerator.generate(text, fonts, text_color, height, bounding_box)
random_angle = random.randint(0-skewing_angle, skewing_angle)
rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1)
if bounding_box:
rois = RoiRotator.compute(rois, random_angle, image.size, rotated_img.size)
#############################
# Apply distorsion to image #
#############################
if distorsion_type == 0:
distorted_img = rotated_img # Mind = blown
elif distorsion_type == 1:
distorted_img = DistorsionGenerator.sin(
rotated_img,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)
)
elif distorsion_type == 2:
distorted_img = DistorsionGenerator.cos(
rotated_img,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)
)
else:
distorted_img = DistorsionGenerator.random(
rotated_img,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)
)
##################################
# Resize image to desired format #
##################################
old_width = distorted_img.size[0]
old_height = distorted_img.size[1]
new_width = int(float(distorted_img.size[0]) * (float(height) / float(distorted_img.size[1])))
resized_img = distorted_img.resize((new_width, height - 10), Image.ANTIALIAS)
x_factor = new_width / old_width
y_factor = (height - 10) / old_height
if bounding_box:
i = 0
for roi in rois:
rois[i] = (np.array(roi) * np.array([x_factor, y_factor, x_factor, y_factor])).astype(int)
i += 1
if width > 0 and random_width > width:
background_width = new_width + random.randint(width,random_width)
elif width > 0:
background_width = width
else:
background_width = new_width + 10
#############################
# Generate background image #
#############################
if random_bg:
background_type = random.randint(0,2)
if background_type == 0:
background = BackgroundGenerator.gaussian_noise(height, background_width)
elif background_type == 1:
background = BackgroundGenerator.plain_white(height, background_width)
elif background_type == 2:
background = BackgroundGenerator.quasicrystal(height, background_width)
else:
background = BackgroundGenerator.picture(height, background_width)
#############################
# Place text with alignment #
#############################
new_text_width, _ = resized_img.size
if random_alignment:
alignment = random.randint(0,2)
if alignment == 0:
x_offset = 5
background.paste(resized_img, (5, 5), resized_img)
elif alignment == 1:
x_offset = int(background_width / 2 - new_text_width / 2)
background.paste(resized_img, (x_offset, 5), resized_img)
else:
x_offset = background_width - new_text_width - 5
background.paste(resized_img, (x_offset, 5), resized_img)
if bounding_box:
i = 0
for roi in rois:
rois[i] = (np.array(roi) + np.array([x_offset, 5, x_offset, 5])).tolist()
i += 1
##################################
# Apply gaussian blur #
##################################
blur_image = background.filter(
ImageFilter.GaussianBlur(
radius=(blur if not random_blur else random.randint(0, blur))
)
)
##################################
# Apply elastic transform #
##################################
final_image = ElasticTransform.generate(blur_image, random.randint(0, 20) / 100 , random.randint(1, 100) / 100)
#################################################
# Apply width reduction to get skinny characters#
#################################################
# width_factor = random.randint(2,3)
#
# final_width = final_image.size[0]
# final_height = final_image.size[1]
# adjusted_width = int(final_width/width_factor)
#
# final_image = final_image.resize((adjusted_width, final_height))
#
# x_factor = adjusted_width / final_width
# y_factor = 1
#
# i = 0
# for roi in rois:
# rois[i] = (np.array(roi) * np.array([x_factor, y_factor, x_factor, y_factor])).astype(int).tolist()
# i += 1
##################################
# Downsample to smaller image #
##################################
# width, height = final_image.size
# resize_factor = random.randint(20,30) / height
# final_image = final_image.resize((int(width * resize_factor), int(height * resize_factor)))
# drawrois = ImageDraw.Draw(final_image)
# for roi in rois:
# drawrois.rectangle(roi, outline=0, fill=None)
##################################
# Draw ROIs as a test #
##################################
if bounding_box and view_bounding_box:
FakeTextDataGenerator.draw_bounding_boxes(final_image, rois)
#####################################
# Generate name for resulting image #
#####################################
if name_format == 0:
image_name = '{}_{}.{}'.format(text, str(index), extension)
elif name_format == 1:
image_name = '{}_{}.{}'.format(str(index), text, extension)
elif name_format == 2:
image_name = '{}.{}'.format(str(index),extension)
else:
print('{} is not a valid name format. Using default.'.format(name_format))
image_name = '{}_{}.{}'.format(text, str(index), extension)
# Save the image
final_image.convert('RGB').save(os.path.join(out_dir, image_name))
return rois, index
| [
"distorsion_generator.DistorsionGenerator.sin",
"roi_rotator.RoiRotator.compute",
"computer_text_generator.ComputerTextGenerator.generate",
"background_generator.BackgroundGenerator.picture",
"handwritten_text_generator.HandwrittenTextGenerator.generate",
"background_generator.BackgroundGenerator.quasicry... | [((802, 827), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image_dst'], {}), '(image_dst)\n', (816, 827), False, 'from PIL import Image, ImageFilter, ImageDraw\n'), ((2057, 2105), 'random.randint', 'random.randint', (['(0 - skewing_angle)', 'skewing_angle'], {}), '(0 - skewing_angle, skewing_angle)\n', (2071, 2105), False, 'import random\n'), ((1684, 1721), 'random.randint', 'random.randint', (['height', 'random_height'], {}), '(height, random_height)\n', (1698, 1721), False, 'import random\n'), ((1875, 1914), 'handwritten_text_generator.HandwrittenTextGenerator.generate', 'HandwrittenTextGenerator.generate', (['text'], {}), '(text)\n', (1908, 1914), False, 'from handwritten_text_generator import HandwrittenTextGenerator\n'), ((1955, 2032), 'computer_text_generator.ComputerTextGenerator.generate', 'ComputerTextGenerator.generate', (['text', 'fonts', 'text_color', 'height', 'bounding_box'], {}), '(text, fonts, text_color, height, bounding_box)\n', (1985, 2032), False, 'from computer_text_generator import ComputerTextGenerator\n'), ((2247, 2315), 'roi_rotator.RoiRotator.compute', 'RoiRotator.compute', (['rois', 'random_angle', 'image.size', 'rotated_img.size'], {}), '(rois, random_angle, image.size, rotated_img.size)\n', (2265, 2315), False, 'from roi_rotator import RoiRotator\n'), ((4561, 4581), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (4575, 4581), False, 'import random\n'), ((4640, 4700), 'background_generator.BackgroundGenerator.gaussian_noise', 'BackgroundGenerator.gaussian_noise', (['height', 'background_width'], {}), '(height, background_width)\n', (4674, 4700), False, 'from background_generator import BackgroundGenerator\n'), ((5247, 5267), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (5261, 5267), False, 'import random\n'), ((8512, 8545), 'os.path.join', 'os.path.join', (['out_dir', 'image_name'], {}), '(out_dir, image_name)\n', (8524, 8545), False, 'import os\n'), ((2584, 2769), 'distorsion_generator.DistorsionGenerator.sin', 'DistorsionGenerator.sin', (['rotated_img'], {'vertical': '(distorsion_orientation == 0 or distorsion_orientation == 2)', 'horizontal': '(distorsion_orientation == 1 or distorsion_orientation == 2)'}), '(rotated_img, vertical=distorsion_orientation == 0 or\n distorsion_orientation == 2, horizontal=distorsion_orientation == 1 or \n distorsion_orientation == 2)\n', (2607, 2769), False, 'from distorsion_generator import DistorsionGenerator\n'), ((4226, 4261), 'random.randint', 'random.randint', (['width', 'random_width'], {}), '(width, random_width)\n', (4240, 4261), False, 'import random\n'), ((4761, 4818), 'background_generator.BackgroundGenerator.plain_white', 'BackgroundGenerator.plain_white', (['height', 'background_width'], {}), '(height, background_width)\n', (4792, 4818), False, 'from background_generator import BackgroundGenerator\n'), ((6374, 6395), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (6388, 6395), False, 'import random\n'), ((6404, 6426), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (6418, 6426), False, 'import random\n'), ((2890, 3075), 'distorsion_generator.DistorsionGenerator.cos', 'DistorsionGenerator.cos', (['rotated_img'], {'vertical': '(distorsion_orientation == 0 or distorsion_orientation == 2)', 'horizontal': '(distorsion_orientation == 1 or distorsion_orientation == 2)'}), '(rotated_img, vertical=distorsion_orientation == 0 or\n distorsion_orientation == 2, horizontal=distorsion_orientation == 1 or \n distorsion_orientation == 2)\n', (2913, 3075), False, 'from distorsion_generator import DistorsionGenerator\n'), ((3175, 3364), 'distorsion_generator.DistorsionGenerator.random', 'DistorsionGenerator.random', (['rotated_img'], {'vertical': '(distorsion_orientation == 0 or distorsion_orientation == 2)', 'horizontal': '(distorsion_orientation == 1 or distorsion_orientation == 2)'}), '(rotated_img, vertical=distorsion_orientation == \n 0 or distorsion_orientation == 2, horizontal=distorsion_orientation == \n 1 or distorsion_orientation == 2)\n', (3201, 3364), False, 'from distorsion_generator import DistorsionGenerator\n'), ((4879, 4937), 'background_generator.BackgroundGenerator.quasicrystal', 'BackgroundGenerator.quasicrystal', (['height', 'background_width'], {}), '(height, background_width)\n', (4911, 4937), False, 'from background_generator import BackgroundGenerator\n'), ((4977, 5030), 'background_generator.BackgroundGenerator.picture', 'BackgroundGenerator.picture', (['height', 'background_width'], {}), '(height, background_width)\n', (5004, 5030), False, 'from background_generator import BackgroundGenerator\n'), ((6142, 6165), 'random.randint', 'random.randint', (['(0)', 'blur'], {}), '(0, blur)\n', (6156, 6165), False, 'import random\n'), ((4032, 4045), 'numpy.array', 'np.array', (['roi'], {}), '(roi)\n', (4040, 4045), True, 'import numpy as np\n'), ((4048, 4098), 'numpy.array', 'np.array', (['[x_factor, y_factor, x_factor, y_factor]'], {}), '([x_factor, y_factor, x_factor, y_factor])\n', (4056, 4098), True, 'import numpy as np\n'), ((5797, 5810), 'numpy.array', 'np.array', (['roi'], {}), '(roi)\n', (5805, 5810), True, 'import numpy as np\n'), ((5813, 5849), 'numpy.array', 'np.array', (['[x_offset, 5, x_offset, 5]'], {}), '([x_offset, 5, x_offset, 5])\n', (5821, 5849), True, 'import numpy as np\n')] |
import duckdb
import pandas as pd
import numpy as np
import os, psutil
try:
import pyarrow as pa
can_run = True
except:
can_run = False
def check_memory(function_to_check):
process = psutil.Process(os.getpid())
mem_usage = process.memory_info().rss/(10**9)
for __ in range(100):
function_to_check()
cur_mem_usage = process.memory_info().rss/(10**9)
# This seems a good empirical value
assert cur_mem_usage/3 < mem_usage
def from_df():
df = pd.DataFrame({"x": np.random.rand(1_000_000)})
return duckdb.from_df(df)
def from_arrow():
data = pa.array(np.random.rand(1_000_000), type=pa.float32())
arrow_table = pa.Table.from_arrays([data],['a'])
duckdb.from_arrow(arrow_table)
class TestRelationDependencyMemoryLeak(object):
def test_from_arrow_leak(self, duckdb_cursor):
if not can_run:
return
check_memory(from_arrow)
def test_from_df_leak(self, duckdb_cursor):
check_memory(from_df)
def test_relation_view_leak(self, duckdb_cursor):
rel = from_df()
rel.create_view("bla")
duckdb.default_connection.unregister("bla")
assert rel.query("bla", "select count(*) from bla").fetchone()[0] == 1_000_000
| [
"duckdb.from_arrow",
"duckdb.from_df",
"numpy.random.rand",
"duckdb.default_connection.unregister",
"pyarrow.float32",
"os.getpid",
"pyarrow.Table.from_arrays"
] | [((548, 566), 'duckdb.from_df', 'duckdb.from_df', (['df'], {}), '(df)\n', (562, 566), False, 'import duckdb\n'), ((671, 706), 'pyarrow.Table.from_arrays', 'pa.Table.from_arrays', (['[data]', "['a']"], {}), "([data], ['a'])\n", (691, 706), True, 'import pyarrow as pa\n'), ((710, 740), 'duckdb.from_arrow', 'duckdb.from_arrow', (['arrow_table'], {}), '(arrow_table)\n', (727, 740), False, 'import duckdb\n'), ((215, 226), 'os.getpid', 'os.getpid', ([], {}), '()\n', (224, 226), False, 'import os, psutil\n'), ((607, 630), 'numpy.random.rand', 'np.random.rand', (['(1000000)'], {}), '(1000000)\n', (621, 630), True, 'import numpy as np\n'), ((1114, 1157), 'duckdb.default_connection.unregister', 'duckdb.default_connection.unregister', (['"""bla"""'], {}), "('bla')\n", (1150, 1157), False, 'import duckdb\n'), ((509, 532), 'numpy.random.rand', 'np.random.rand', (['(1000000)'], {}), '(1000000)\n', (523, 532), True, 'import numpy as np\n'), ((639, 651), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (649, 651), True, 'import pyarrow as pa\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: 10_matrix_multiply.ipynb (unless otherwise specified).
__all__ = ['dotp', 'mmult']
# Cell
def dotp(v1, v2):
"Get dot product of 2 vectors"
sum = 0
for i in range(0, len(v1)):
sum += v1[i] * v2[i]
return sum
# Cell
def mmult(m1, m2):
"Get product of 2 matrices using [dotp](/mmult#dotp) "
import numpy as np
assert m1.shape[1] == m2.shape[0]
vsize = m1.shape[1]
pmatrix = np.zeros((m1.shape[0],m2.shape[1]))
for i in range(0,m1.shape[0]):
for j in range(0,m2.shape[1]):
nv = dotp(m1[i,:], m2[:,j])
pmatrix[i,j] = nv
return pmatrix | [
"numpy.zeros"
] | [((462, 498), 'numpy.zeros', 'np.zeros', (['(m1.shape[0], m2.shape[1])'], {}), '((m1.shape[0], m2.shape[1]))\n', (470, 498), True, 'import numpy as np\n')] |
# start import modules
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import emcee
# end import modules
savefig=True
# start generate data
np.random.seed(1) # for repeatability
F_true = 1000 # true flux, say number of photons measured in 1 second
N = 50 # number of measurements
F = stats.poisson(F_true).rvs(N)
# N measurements of the flux
e = np.sqrt(F) # errors on Poisson counts estimated via square root
# end generate data
# start visualize data
fig, ax = plt.subplots()
ax.errorbar(F, np.arange(N), xerr=e, fmt='ok', ecolor='gray', alpha=0.5)
ax.vlines([F_true], 0, N, linewidth=5, alpha=0.2)
ax.set_xlabel("Flux");ax.set_ylabel("measurement number");
# end visualize data
if savefig:
fig.savefig('../fig/singlephotoncount_fig_1.png')
# start frequentist
w=1./e**2
print(f"""
F_true = {F_true}
F_est = {(w * F).sum() / w.sum():.0f} +/- { w.sum() ** -0.5:.0f} (based on {N} measurements) """)
# end frequentist
# start bayesian setup
def log_prior(theta):
if theta>0 and theta<10000:
return 0 # flat prior
else:
return -np.inf
def log_likelihood(theta, F, e):
return -0.5 * np.sum(np.log(2 * np.pi * e ** 2) \
+ (F - theta[0]) ** 2 / e ** 2)
def log_posterior(theta, F, e):
return log_prior(theta) + log_likelihood(theta, F, e)
# end bayesian setup
# start bayesian mcmc
ndim = 1 # number of parameters in the model
nwalkers = 50 # number of MCMC walkers
nwarm = 1000 # "warm-up" period to let chains stabilize
nsteps = 2000 # number of MCMC steps to take
# we'll start at random locations between 0 and 2000
starting_guesses = 2000 * np.random.rand(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[F,e])
sampler.run_mcmc(starting_guesses, nsteps)
# Shape of sampler.chain = (nwalkers, nsteps, ndim)
# Flatten the sampler chain and discard warm-in points:
samples = sampler.chain[:, nwarm:, :].reshape((-1, ndim))
# end bayesian mcmc
# start visualize bayesian
fig, ax = plt.subplots()
ax.hist(samples, bins=50, histtype="stepfilled", alpha=0.3, density=True)
ax.set_xlabel(r'$F_\mathrm{est}$')
ax.set_ylabel(r'$p(F_\mathrm{est}|D,I)$');
# end visualize bayesian
if savefig:
fig.savefig('../fig/singlephotoncount_fig_2.png')
# plot a best-fit Gaussian
F_est = np.linspace(975, 1025)
pdf = stats.norm(np.mean(samples), np.std(samples)).pdf(F_est)
ax.plot(F_est, pdf, '-k')
# start bayesian CI
sampper=np.percentile(samples, [2.5, 16.5, 50, 83.5, 97.5],axis=0).flatten()
print(f"""
F_true = {F_true}
Based on {N} measurements the posterior point estimates are:
...F_est = { np.mean(samples):.0f} +/- { np.std(samples):.0f}
or using credibility intervals:
...F_est = {sampper[2]:.0f} (posterior median)
...F_est in [{sampper[1]:.0f}, {sampper[3]:.0f}] (67% credibility interval)
...F_est in [{sampper[0]:.0f}, {sampper[4]:.0f}] (95% credibility interval) """)
# end bayesian CI
if not savefig:
plt.show()
| [
"numpy.mean",
"numpy.sqrt",
"numpy.random.rand",
"numpy.log",
"emcee.EnsembleSampler",
"numpy.linspace",
"scipy.stats.poisson",
"numpy.random.seed",
"numpy.std",
"numpy.percentile",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((169, 186), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (183, 186), True, 'import numpy as np\n'), ((428, 438), 'numpy.sqrt', 'np.sqrt', (['F'], {}), '(F)\n', (435, 438), True, 'import numpy as np\n'), ((554, 568), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (566, 568), True, 'import matplotlib.pyplot as plt\n'), ((1780, 1845), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'log_posterior'], {'args': '[F, e]'}), '(nwalkers, ndim, log_posterior, args=[F, e])\n', (1801, 1845), False, 'import emcee\n'), ((2113, 2127), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2125, 2127), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2430), 'numpy.linspace', 'np.linspace', (['(975)', '(1025)'], {}), '(975, 1025)\n', (2419, 2430), True, 'import numpy as np\n'), ((584, 596), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (593, 596), True, 'import numpy as np\n'), ((1739, 1769), 'numpy.random.rand', 'np.random.rand', (['nwalkers', 'ndim'], {}), '(nwalkers, ndim)\n', (1753, 1769), True, 'import numpy as np\n'), ((3056, 3066), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3064, 3066), True, 'import matplotlib.pyplot as plt\n'), ((343, 364), 'scipy.stats.poisson', 'stats.poisson', (['F_true'], {}), '(F_true)\n', (356, 364), False, 'from scipy import stats\n'), ((2549, 2608), 'numpy.percentile', 'np.percentile', (['samples', '[2.5, 16.5, 50, 83.5, 97.5]'], {'axis': '(0)'}), '(samples, [2.5, 16.5, 50, 83.5, 97.5], axis=0)\n', (2562, 2608), True, 'import numpy as np\n'), ((2448, 2464), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (2455, 2464), True, 'import numpy as np\n'), ((2466, 2481), 'numpy.std', 'np.std', (['samples'], {}), '(samples)\n', (2472, 2481), True, 'import numpy as np\n'), ((2728, 2744), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (2735, 2744), True, 'import numpy as np\n'), ((2756, 2771), 'numpy.std', 'np.std', (['samples'], {}), '(samples)\n', (2762, 2771), True, 'import numpy as np\n'), ((1215, 1241), 'numpy.log', 'np.log', (['(2 * np.pi * e ** 2)'], {}), '(2 * np.pi * e ** 2)\n', (1221, 1241), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, mse=True, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, device="cpu"):
super(NLayerDiscriminator, self).__init__()
self.device = device
self.to(device)
kw = 4
padw = int(np.ceil((kw-1)/2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw),
# TODO: use InstanceNorm
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw),
# TODO: useInstanceNorm
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
if mse:
self.loss = nn.MSELoss()
else:
self.loss = nn.CrossEntropyLoss()
def forward(self, input):
# if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
# return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
# else:
return self.model(input)
def get_loss_D(self, x, pred_res, label):
if len(label.shape) == 3:
label = label.unsqueeze(1)
# x: (1, 2, 256, 256)
# pred_res: (1, 1, 256, 256)
fake_AB = torch.cat((x, pred_res), 1)
pred_fake = self.forward(fake_AB.detach())# detach 是为了只更新d的参数,而不去更新分割网络的参数!
fake_label = torch.zeros_like(pred_fake, device=self.device)
loss_D_fake = self.loss(pred_fake, fake_label)
# Real
real_AB = torch.cat((x, label), 1)
pred_real = self.forward(real_AB)
real_label = torch.ones_like(pred_real, device=self.device)
loss_D_real = self.loss(pred_real, real_label)
# Combined loss
loss_D = (loss_D_fake + loss_D_real) * 0.5
return loss_D
if __name__ == '__main__':
t1 = torch.rand(1, 2, 128, 128)
label = torch.rand(1, 1, 128, 128)
pred_res = torch.rand(1, 1, 128, 128)
model = NLayerDiscriminator(input_nc=3)
# out = model(t1)
# print(out.shape)
out_loss = model.get_loss_D(t1, pred_res, label)
print(out_loss)
print(out_loss.shape) | [
"torch.ones_like",
"numpy.ceil",
"torch.nn.CrossEntropyLoss",
"torch.rand",
"torch.nn.LeakyReLU",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MSELoss",
"torch.zeros_like",
"torch.cat"
] | [((2640, 2666), 'torch.rand', 'torch.rand', (['(1)', '(2)', '(128)', '(128)'], {}), '(1, 2, 128, 128)\n', (2650, 2666), False, 'import torch\n'), ((2679, 2705), 'torch.rand', 'torch.rand', (['(1)', '(1)', '(128)', '(128)'], {}), '(1, 1, 128, 128)\n', (2689, 2705), False, 'import torch\n'), ((2721, 2747), 'torch.rand', 'torch.rand', (['(1)', '(1)', '(128)', '(128)'], {}), '(1, 1, 128, 128)\n', (2731, 2747), False, 'import torch\n'), ((1461, 1485), 'torch.nn.Sequential', 'nn.Sequential', (['*sequence'], {}), '(*sequence)\n', (1474, 1485), True, 'import torch.nn as nn\n'), ((2047, 2074), 'torch.cat', 'torch.cat', (['(x, pred_res)', '(1)'], {}), '((x, pred_res), 1)\n', (2056, 2074), False, 'import torch\n'), ((2180, 2227), 'torch.zeros_like', 'torch.zeros_like', (['pred_fake'], {'device': 'self.device'}), '(pred_fake, device=self.device)\n', (2196, 2227), False, 'import torch\n'), ((2316, 2340), 'torch.cat', 'torch.cat', (['(x, label)', '(1)'], {}), '((x, label), 1)\n', (2325, 2340), False, 'import torch\n'), ((2404, 2450), 'torch.ones_like', 'torch.ones_like', (['pred_real'], {'device': 'self.device'}), '(pred_real, device=self.device)\n', (2419, 2450), False, 'import torch\n'), ((405, 426), 'numpy.ceil', 'np.ceil', (['((kw - 1) / 2)'], {}), '((kw - 1) / 2)\n', (412, 426), True, 'import numpy as np\n'), ((457, 521), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ndf'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)\n', (466, 521), True, 'import torch.nn as nn\n'), ((535, 558), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (547, 558), True, 'import torch.nn as nn\n'), ((1118, 1206), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,\n padding=padw)\n', (1127, 1206), True, 'import torch.nn as nn\n'), ((1313, 1336), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (1325, 1336), True, 'import torch.nn as nn\n'), ((1369, 1436), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult)', '(1)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n', (1378, 1436), True, 'import torch.nn as nn\n'), ((1526, 1538), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1536, 1538), True, 'import torch.nn as nn\n'), ((1577, 1598), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1596, 1598), True, 'import torch.nn as nn\n'), ((764, 852), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,\n padding=padw)\n', (773, 852), True, 'import torch.nn as nn\n'), ((976, 999), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (988, 999), True, 'import torch.nn as nn\n')] |
import random
from typing import Iterable
from unittest import TestCase
import gym
import numpy as np
from envs.connect_four_env import ResultType, Player
from gym_connect_four import ConnectFourEnv, RandomPlayer
BOARD_VALIDATION = np.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, -1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 0, 1, 1],
[-1, -1, -1, -1, 0, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_WIN_ROW = np.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, -1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 0, 1, 1],
[-1, -1, -1, -1, 0, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_WIN_COLUMN = np.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 0, 1, 1],
[-1, 0, -1, -1, 0, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_WIN_DIAGONAL = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, 0, 1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_WIN_BDIAGONAL = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 0, 1, 1],
[-1, 1, -1, -1, 0, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_AVAILABLE_0123 = np.array([[0, 0, 0, 0, -1, 1, -1],
[0, 0, 0, 1, 1, -1, 1],
[0, 0, -1, 1, 1, -1, -1],
[0, 0, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, -1, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_AVAILABLE_2 = np.array([[1, 1, 0, -1, -1, 1, -1],
[1, 1, -1, 1, 1, -1, 1],
[1, 1, -1, 1, 1, -1, -1],
[1, 1, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, -1, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_AVAILABLE_6 = np.array([[1, 1, 1, 1, -1, 1, 0],
[1, 1, -1, 1, 1, -1, 1],
[1, 1, -1, 1, 1, -1, -1],
[1, 1, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, -1, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_AVAILABLE_NONE = np.array([[1, 1, 1, 1, -1, 1, 1],
[1, 1, -1, 1, 1, -1, 1],
[1, 1, -1, 1, 1, -1, -1],
[1, 1, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, -1, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
class DeterministicPlayer(Player):
def __init__(self, env: 'ConnectFourEnv', moves: Iterable[int], name='DeterministicPlayer'):
super().__init__(env, name)
self._moves = moves
self.reset()
def reset(self):
self._moves_itr = iter(self._moves)
self.action_log = []
self.reward_log = []
self.done_log = []
self.states = []
self.l_states = []
self.l_new_states = []
def get_next_action(self, state: np.ndarray) -> int:
self.states.append(state)
next_move = next(self._moves_itr)
valid_moves = self.env.available_moves()
while next_move not in valid_moves:
next_move += 1
next_move %= self.env.action_space.n
return next_move
def learn(self, state, action, state_next, reward, done) -> None:
self.action_log.append(action)
self.reward_log.append(reward)
self.done_log.append(done)
self.l_states.append(state)
self.l_new_states.append(state_next)
class TestConnectFourEnv(TestCase):
def setUp(self) -> None:
self.env = gym.make('ConnectFour-v0')
def test_is_valid_action(self):
self.env = self.env
self.env.reset(BOARD_VALIDATION)
self.assertTrue(self.env.is_valid_action(0))
self.assertFalse(self.env.is_valid_action(3))
def test_is_win_state(self):
self.env = self.env
self.env.reset(BOARD_WIN_ROW)
self.assertTrue(self.env.is_win_state())
self.env.reset(BOARD_WIN_COLUMN)
self.assertTrue(self.env.is_win_state())
self.env.reset(BOARD_WIN_DIAGONAL)
self.assertTrue(self.env.is_win_state())
self.env.reset(BOARD_WIN_BDIAGONAL)
self.assertTrue(self.env.is_win_state())
def test_available_moves(self):
self.env = self.env
self.env.reset(BOARD_AVAILABLE_0123)
self.assertEqual(set(self.env.available_moves()), {0, 1, 2, 3})
self.env.reset(BOARD_AVAILABLE_2)
self.assertEqual(set(self.env.available_moves()), {2})
self.env.reset(BOARD_AVAILABLE_6)
self.assertEqual(set(self.env.available_moves()), {6})
self.env.reset(BOARD_AVAILABLE_NONE)
self.assertEqual(set(self.env.available_moves()), set([]))
def test_run_win_p1(self):
env = self.env
act_space = env.action_space.n
moves1 = [i % act_space for i in range(100)]
moves2 = [2 * i % act_space for i in range(1, 100)]
p1 = DeterministicPlayer(env=env, moves=moves1, name="P1")
p2 = DeterministicPlayer(env=env, moves=moves2, name="P2")
res = env.run(p1, p2, None)
self.assertEqual(ResultType.WIN1.value, res.value)
self.assertEqual(moves1[:11], p1.action_log)
self.assertEqual(moves2[:10], p2.action_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 10 + [ConnectFourEnv.WIN_REWARD], p1.reward_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 9 + [ConnectFourEnv.LOSS_REWARD], p2.reward_log)
self.assertEqual([False] * 10 + [True], p1.done_log)
self.assertEqual([False] * 9 + [True], p2.done_log)
np.testing.assert_array_equal(p1.l_states[1], p1.states[1])
np.testing.assert_array_equal(p1.l_new_states[0], p1.states[1])
np.testing.assert_array_equal(p1.l_states[-1], p1.states[-1])
np.testing.assert_array_equal(p1.l_new_states[-3], p1.states[-2])
np.testing.assert_array_equal(p1.l_states[-2], p1.states[-2])
np.testing.assert_array_equal(p2.l_new_states[0], p2.states[1])
np.testing.assert_array_equal(p2.l_states[1], p2.states[1])
np.testing.assert_array_equal(p2.l_states[-1], p2.states[-1])
np.testing.assert_array_equal(p2.l_new_states[-3], p2.states[-2])
np.testing.assert_array_equal(p2.l_states[-2], p2.states[-2])
def test_run_win_p2(self):
env = self.env
act_space = env.action_space.n
moves1 = [2 * i % act_space for i in range(100)]
moves2 = [(2 * i + 1) % act_space for i in range(0, 100)]
p1 = DeterministicPlayer(env=env, moves=moves1, name="P1")
p2 = DeterministicPlayer(env=env, moves=moves2, name="P2")
res = env.run(p1, p2, None)
self.assertEqual(ResultType.WIN2.value, res.value)
self.assertListEqual(moves1[:11], p1.action_log)
self.assertListEqual(moves2[:11], p2.action_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 10 + [ConnectFourEnv.LOSS_REWARD], p1.reward_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 10 + [ConnectFourEnv.WIN_REWARD], p2.reward_log)
self.assertEqual([False] * 10 + [True], p1.done_log)
self.assertEqual([False] * 10 + [True], p2.done_log)
def test_run_draw(self):
random.seed(0)
env = self.env
act_space = env.action_space.n
moves2 = [(2 * i + 1) % act_space for i in range(0, 100)]
p1 = RandomPlayer(env=env, name="P1", seed=88)
p2 = DeterministicPlayer(env=env, moves=moves2, name="P2")
res = env.run(p1, p2, None)
self.assertEqual(ResultType.DRAW.value, res.value)
self.assertEqual([1, 3, 5, 0, 2, 4, 6, 1, 3, 5, 0, 2, 4, 6, 1, 3, 5, 0, 3, 4, 4], p2.action_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 20 + [ConnectFourEnv.DRAW_REWARD], p2.reward_log)
self.assertEqual([False] * 20 + [True], p2.done_log)
def test_reset(self):
env = self.env
env.run(RandomPlayer(env=env, seed=0), RandomPlayer(env=env, seed=1), None)
sum_steps = np.sum(np.sum(np.absolute(env.board)))
self.assertEqual(17, sum_steps)
env.reset()
sum_steps = np.sum(np.sum(np.absolute(env.board)))
self.assertEqual(0, sum_steps)
| [
"numpy.absolute",
"random.seed",
"numpy.array",
"gym_connect_four.RandomPlayer",
"gym.make",
"numpy.testing.assert_array_equal"
] | [((235, 403), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, -1, 0, 0, 0], [0, 0, -1, 1, 0, -1, 0], [0,\n 0, 1, 1, 0, 1, 1], [-1, -1, -1, -1, 0, -1, -1], [1, 1, -1, 1, -1, 1, 1]]'], {}), '([[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, -1, 0, 0, 0], [0, 0, -1, 1, 0, -\n 1, 0], [0, 0, 1, 1, 0, 1, 1], [-1, -1, -1, -1, 0, -1, -1], [1, 1, -1, 1,\n -1, 1, 1]])\n', (243, 403), True, 'import numpy as np\n'), ((512, 680), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, -1, 0, 0, 0], [0, 0, -1, 1, 0, -1, 0], [0,\n 0, 1, 1, 0, 1, 1], [-1, -1, -1, -1, 0, -1, -1], [1, 1, -1, 1, -1, 1, 1]]'], {}), '([[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, -1, 0, 0, 0], [0, 0, -1, 1, 0, -\n 1, 0], [0, 0, 1, 1, 0, 1, 1], [-1, -1, -1, -1, 0, -1, -1], [1, 1, -1, 1,\n -1, 1, 1]])\n', (520, 680), True, 'import numpy as np\n'), ((777, 942), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, -1, 1, 0, -1, 0], [0,\n 0, 1, 1, 0, 1, 1], [-1, 0, -1, -1, 0, -1, -1], [1, 1, -1, 1, -1, 1, 1]]'], {}), '([[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, -1, 1, 0, -1,\n 0], [0, 0, 1, 1, 0, 1, 1], [-1, 0, -1, -1, 0, -1, -1], [1, 1, -1, 1, -1,\n 1, 1]])\n', (785, 942), True, 'import numpy as np\n'), ((1057, 1221), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, -1, 1, 0, -1, 0], [0,\n 0, 1, 1, 1, 1, 1], [-1, 1, -1, -1, 0, 1, -1], [1, 1, -1, 1, -1, 1, 1]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, -1, 1, 0, -1,\n 0], [0, 0, 1, 1, 1, 1, 1], [-1, 1, -1, -1, 0, 1, -1], [1, 1, -1, 1, -1,\n 1, 1]])\n', (1065, 1221), True, 'import numpy as np\n'), ((1347, 1512), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, -1, 1, 0, -1, 0], [0,\n 0, 1, 1, 0, 1, 1], [-1, 1, -1, -1, 0, -1, -1], [1, 1, -1, 1, -1, 1, 1]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, -1, 1, 0, -1,\n 0], [0, 0, 1, 1, 0, 1, 1], [-1, 1, -1, -1, 0, -1, -1], [1, 1, -1, 1, -1,\n 1, 1]])\n', (1355, 1512), True, 'import numpy as np\n'), ((1644, 1814), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, -1, 1, -1], [0, 0, 0, 1, 1, -1, 1], [0, 0, -1, 1, 1, -1, -1],\n [0, 0, 1, 1, 1, 1, 1], [-1, 1, -1, -1, -1, -1, -1], [1, 1, -1, 1, -1, 1, 1]\n ]'], {}), '([[0, 0, 0, 0, -1, 1, -1], [0, 0, 0, 1, 1, -1, 1], [0, 0, -1, 1, 1,\n -1, -1], [0, 0, 1, 1, 1, 1, 1], [-1, 1, -1, -1, -1, -1, -1], [1, 1, -1,\n 1, -1, 1, 1]])\n', (1652, 1814), True, 'import numpy as np\n'), ((1948, 2122), 'numpy.array', 'np.array', (['[[1, 1, 0, -1, -1, 1, -1], [1, 1, -1, 1, 1, -1, 1], [1, 1, -1, 1, 1, -1, -1\n ], [1, 1, 1, 1, 1, 1, 1], [-1, 1, -1, -1, -1, -1, -1], [1, 1, -1, 1, -1,\n 1, 1]]'], {}), '([[1, 1, 0, -1, -1, 1, -1], [1, 1, -1, 1, 1, -1, 1], [1, 1, -1, 1, \n 1, -1, -1], [1, 1, 1, 1, 1, 1, 1], [-1, 1, -1, -1, -1, -1, -1], [1, 1, \n -1, 1, -1, 1, 1]])\n', (1956, 2122), True, 'import numpy as np\n'), ((2239, 2409), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, -1, 1, 0], [1, 1, -1, 1, 1, -1, 1], [1, 1, -1, 1, 1, -1, -1],\n [1, 1, 1, 1, 1, 1, 1], [-1, 1, -1, -1, -1, -1, -1], [1, 1, -1, 1, -1, 1, 1]\n ]'], {}), '([[1, 1, 1, 1, -1, 1, 0], [1, 1, -1, 1, 1, -1, 1], [1, 1, -1, 1, 1,\n -1, -1], [1, 1, 1, 1, 1, 1, 1], [-1, 1, -1, -1, -1, -1, -1], [1, 1, -1,\n 1, -1, 1, 1]])\n', (2247, 2409), True, 'import numpy as np\n'), ((2531, 2701), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, -1, 1, 1], [1, 1, -1, 1, 1, -1, 1], [1, 1, -1, 1, 1, -1, -1],\n [1, 1, 1, 1, 1, 1, 1], [-1, 1, -1, -1, -1, -1, -1], [1, 1, -1, 1, -1, 1, 1]\n ]'], {}), '([[1, 1, 1, 1, -1, 1, 1], [1, 1, -1, 1, 1, -1, 1], [1, 1, -1, 1, 1,\n -1, -1], [1, 1, 1, 1, 1, 1, 1], [-1, 1, -1, -1, -1, -1, -1], [1, 1, -1,\n 1, -1, 1, 1]])\n', (2539, 2701), True, 'import numpy as np\n'), ((3947, 3973), 'gym.make', 'gym.make', (['"""ConnectFour-v0"""'], {}), "('ConnectFour-v0')\n", (3955, 3973), False, 'import gym\n'), ((6001, 6060), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p1.l_states[1]', 'p1.states[1]'], {}), '(p1.l_states[1], p1.states[1])\n', (6030, 6060), True, 'import numpy as np\n'), ((6069, 6132), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p1.l_new_states[0]', 'p1.states[1]'], {}), '(p1.l_new_states[0], p1.states[1])\n', (6098, 6132), True, 'import numpy as np\n'), ((6142, 6203), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p1.l_states[-1]', 'p1.states[-1]'], {}), '(p1.l_states[-1], p1.states[-1])\n', (6171, 6203), True, 'import numpy as np\n'), ((6212, 6277), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p1.l_new_states[-3]', 'p1.states[-2]'], {}), '(p1.l_new_states[-3], p1.states[-2])\n', (6241, 6277), True, 'import numpy as np\n'), ((6286, 6347), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p1.l_states[-2]', 'p1.states[-2]'], {}), '(p1.l_states[-2], p1.states[-2])\n', (6315, 6347), True, 'import numpy as np\n'), ((6357, 6420), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p2.l_new_states[0]', 'p2.states[1]'], {}), '(p2.l_new_states[0], p2.states[1])\n', (6386, 6420), True, 'import numpy as np\n'), ((6429, 6488), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p2.l_states[1]', 'p2.states[1]'], {}), '(p2.l_states[1], p2.states[1])\n', (6458, 6488), True, 'import numpy as np\n'), ((6498, 6559), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p2.l_states[-1]', 'p2.states[-1]'], {}), '(p2.l_states[-1], p2.states[-1])\n', (6527, 6559), True, 'import numpy as np\n'), ((6568, 6633), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p2.l_new_states[-3]', 'p2.states[-2]'], {}), '(p2.l_new_states[-3], p2.states[-2])\n', (6597, 6633), True, 'import numpy as np\n'), ((6642, 6703), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['p2.l_states[-2]', 'p2.states[-2]'], {}), '(p2.l_states[-2], p2.states[-2])\n', (6671, 6703), True, 'import numpy as np\n'), ((7633, 7647), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (7644, 7647), False, 'import random\n'), ((7789, 7830), 'gym_connect_four.RandomPlayer', 'RandomPlayer', ([], {'env': 'env', 'name': '"""P1"""', 'seed': '(88)'}), "(env=env, name='P1', seed=88)\n", (7801, 7830), False, 'from gym_connect_four import ConnectFourEnv, RandomPlayer\n'), ((8330, 8359), 'gym_connect_four.RandomPlayer', 'RandomPlayer', ([], {'env': 'env', 'seed': '(0)'}), '(env=env, seed=0)\n', (8342, 8359), False, 'from gym_connect_four import ConnectFourEnv, RandomPlayer\n'), ((8361, 8390), 'gym_connect_four.RandomPlayer', 'RandomPlayer', ([], {'env': 'env', 'seed': '(1)'}), '(env=env, seed=1)\n', (8373, 8390), False, 'from gym_connect_four import ConnectFourEnv, RandomPlayer\n'), ((8432, 8454), 'numpy.absolute', 'np.absolute', (['env.board'], {}), '(env.board)\n', (8443, 8454), True, 'import numpy as np\n'), ((8551, 8573), 'numpy.absolute', 'np.absolute', (['env.board'], {}), '(env.board)\n', (8562, 8573), True, 'import numpy as np\n')] |
# Working with Bag of Words
#---------------------------------------
#
# In this example, we will download and preprocess the ham/spam
# text data. We will then use a one-hot-encoding to make a
# bag of words set of features to use in logistic regression.
#
# We will use these one-hot-vectors for logistic regression to
# predict if a text is spam or ham.
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import numpy as np
import csv
import string
import requests
import io
from zipfile import ZipFile
from tensorflow.contrib import learn
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph session
sess = tf.Session()
# Check if data was downloaded, otherwise download it and save for future use
save_file_name = os.path.join('temp','temp_spam_data.csv')
if os.path.isfile(save_file_name):
text_data = []
with open(save_file_name, 'r') as temp_output_file:
reader = csv.reader(temp_output_file)
for row in reader:
text_data.append(row)
else:
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x)>=1]
# And write to csv
with open(save_file_name, 'w') as temp_output_file:
writer = csv.writer(temp_output_file)
writer.writerows(text_data)
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
# Relabel 'spam' as 1, 'ham' as 0
target = [1 if x=='spam' else 0 for x in target]
# Normalize text
# Lower case
texts = [x.lower() for x in texts]
# Remove punctuation
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts]
# Remove numbers
texts = [''.join(c for c in x if c not in '0123456789') for x in texts]
# Trim extra whitespace
texts = [' '.join(x.split()) for x in texts]
# Plot histogram of text lengths
text_lengths = [len(x.split()) for x in texts]
text_lengths = [x for x in text_lengths if x < 50]
plt.hist(text_lengths, bins=25)
plt.title('Histogram of # of Words in Texts')
# Choose max text word length at 25
sentence_size = 25
min_word_freq = 3
# Setup vocabulary processor
vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency=min_word_freq)
# Have to fit transform to get length of unique words.
vocab_processor.fit_transform(texts)
embedding_size = len(vocab_processor.vocabulary_)
# Split up data set into train/test
train_indices = np.random.choice(len(texts), round(len(texts)*0.8), replace=False)
test_indices = np.array(list(set(range(len(texts))) - set(train_indices)))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# Setup Index Matrix for one-hot-encoding
identity_mat = tf.diag(tf.ones(shape=[embedding_size]))
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
y_target = tf.placeholder(shape=[1, 1], dtype=tf.float32)
# Text-Vocab Embedding
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0)
# Declare model operations
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(model_output, y_target))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.initialize_all_variables()
sess.run(init)
# Start Logistic Regression
print('Starting Training Over {} Sentences.'.format(len(texts_train)))
loss_vec = []
train_acc_all = []
train_acc_avg = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_train)):
y_data = [[target_train[ix]]]
sess.run(train_step, feed_dict={x_data: t, y_target: y_data})
temp_loss = sess.run(loss, feed_dict={x_data: t, y_target: y_data})
loss_vec.append(temp_loss)
if (ix+1)%10==0:
print('Training Observation #' + str(ix+1) + ': Loss = ' + str(temp_loss))
# Keep trailing average of past 50 observations accuracy
# Get prediction of single observation
[[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})
# Get True/False if prediction is accurate
train_acc_temp = target_train[ix]==np.round(temp_pred)
train_acc_all.append(train_acc_temp)
if len(train_acc_all) >= 50:
train_acc_avg.append(np.mean(train_acc_all[-50:]))
# Get test set accuracy
print('Getting Test Set Accuracy For {} Sentences.'.format(len(texts_test)))
test_acc_all = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_test)):
y_data = [[target_test[ix]]]
if (ix+1)%50==0:
print('Test Observation #' + str(ix+1))
# Keep trailing average of past 50 observations accuracy
# Get prediction of single observation
[[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})
# Get True/False if prediction is accurate
test_acc_temp = target_test[ix]==np.round(temp_pred)
test_acc_all.append(test_acc_temp)
print('\nOverall Test Accuracy: {}'.format(np.mean(test_acc_all)))
# Plot training accuracy over time
plt.plot(range(len(train_acc_avg)), train_acc_avg, 'k-', label='Train Accuracy')
plt.title('Avg Training Acc Over Past 50 Generations')
plt.xlabel('Generation')
plt.ylabel('Training Accuracy')
plt.show() | [
"matplotlib.pyplot.hist",
"tensorflow.python.framework.ops.reset_default_graph",
"matplotlib.pyplot.ylabel",
"tensorflow.reduce_sum",
"io.BytesIO",
"tensorflow.nn.embedding_lookup",
"numpy.mean",
"tensorflow.random_normal",
"tensorflow.Session",
"tensorflow.placeholder",
"matplotlib.pyplot.xlabe... | [((607, 632), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (630, 632), False, 'from tensorflow.python.framework import ops\n'), ((665, 677), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (675, 677), True, 'import tensorflow as tf\n'), ((774, 816), 'os.path.join', 'os.path.join', (['"""temp"""', '"""temp_spam_data.csv"""'], {}), "('temp', 'temp_spam_data.csv')\n", (786, 816), False, 'import os\n'), ((819, 849), 'os.path.isfile', 'os.path.isfile', (['save_file_name'], {}), '(save_file_name)\n', (833, 849), False, 'import os\n'), ((2243, 2274), 'matplotlib.pyplot.hist', 'plt.hist', (['text_lengths'], {'bins': '(25)'}), '(text_lengths, bins=25)\n', (2251, 2274), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2320), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of # of Words in Texts"""'], {}), "('Histogram of # of Words in Texts')\n", (2284, 2320), True, 'import matplotlib.pyplot as plt\n'), ((2443, 2531), 'tensorflow.contrib.learn.preprocessing.VocabularyProcessor', 'learn.preprocessing.VocabularyProcessor', (['sentence_size'], {'min_frequency': 'min_word_freq'}), '(sentence_size, min_frequency=\n min_word_freq)\n', (2482, 2531), False, 'from tensorflow.contrib import learn\n'), ((3435, 3488), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[sentence_size]', 'dtype': 'tf.int32'}), '(shape=[sentence_size], dtype=tf.int32)\n', (3449, 3488), True, 'import tensorflow as tf\n'), ((3500, 3546), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, 1]', 'dtype': 'tf.float32'}), '(shape=[1, 1], dtype=tf.float32)\n', (3514, 3546), True, 'import tensorflow as tf\n'), ((3581, 3625), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['identity_mat', 'x_data'], {}), '(identity_mat, x_data)\n', (3603, 3625), True, 'import tensorflow as tf\n'), ((3639, 3664), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x_embed', '(0)'], {}), '(x_embed, 0)\n', (3652, 3664), True, 'import tensorflow as tf\n'), ((3709, 3738), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_col_sums', '(0)'], {}), '(x_col_sums, 0)\n', (3723, 3738), True, 'import tensorflow as tf\n'), ((3963, 3987), 'tensorflow.sigmoid', 'tf.sigmoid', (['model_output'], {}), '(model_output)\n', (3973, 3987), True, 'import tensorflow as tf\n'), ((4018, 4058), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.001)'], {}), '(0.001)\n', (4051, 4058), True, 'import tensorflow as tf\n'), ((4126, 4155), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (4153, 4155), True, 'import tensorflow as tf\n'), ((5960, 6014), 'matplotlib.pyplot.title', 'plt.title', (['"""Avg Training Acc Over Past 50 Generations"""'], {}), "('Avg Training Acc Over Past 50 Generations')\n", (5969, 6014), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6039), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generation"""'], {}), "('Generation')\n", (6025, 6039), True, 'import matplotlib.pyplot as plt\n'), ((6040, 6071), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Training Accuracy"""'], {}), "('Training Accuracy')\n", (6050, 6071), True, 'import matplotlib.pyplot as plt\n'), ((6072, 6082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6080, 6082), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1169), 'requests.get', 'requests.get', (['zip_url'], {}), '(zip_url)\n', (1160, 1169), False, 'import requests\n'), ((3215, 3246), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[embedding_size]'}), '(shape=[embedding_size])\n', (3222, 3246), True, 'import tensorflow as tf\n'), ((3308, 3351), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[embedding_size, 1]'}), '(shape=[embedding_size, 1])\n', (3324, 3351), True, 'import tensorflow as tf\n'), ((3368, 3398), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[1, 1]'}), '(shape=[1, 1])\n', (3384, 3398), True, 'import tensorflow as tf\n'), ((3761, 3788), 'tensorflow.matmul', 'tf.matmul', (['x_col_sums_2D', 'A'], {}), '(x_col_sums_2D, A)\n', (3770, 3788), True, 'import tensorflow as tf\n'), ((3861, 3924), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', (['model_output', 'y_target'], {}), '(model_output, y_target)\n', (3900, 3924), True, 'import tensorflow as tf\n'), ((943, 971), 'csv.reader', 'csv.reader', (['temp_output_file'], {}), '(temp_output_file)\n', (953, 971), False, 'import csv\n'), ((1186, 1207), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (1196, 1207), False, 'import io\n'), ((1566, 1594), 'csv.writer', 'csv.writer', (['temp_output_file'], {}), '(temp_output_file)\n', (1576, 1594), False, 'import csv\n'), ((4992, 5011), 'numpy.round', 'np.round', (['temp_pred'], {}), '(temp_pred)\n', (5000, 5011), True, 'import numpy as np\n'), ((5716, 5735), 'numpy.round', 'np.round', (['temp_pred'], {}), '(temp_pred)\n', (5724, 5735), True, 'import numpy as np\n'), ((5819, 5840), 'numpy.mean', 'np.mean', (['test_acc_all'], {}), '(test_acc_all)\n', (5826, 5840), True, 'import numpy as np\n'), ((5115, 5143), 'numpy.mean', 'np.mean', (['train_acc_all[-50:]'], {}), '(train_acc_all[-50:])\n', (5122, 5143), True, 'import numpy as np\n')] |
#System
import numpy as np
import sys
import os
import random
from glob import glob
from skimage import io
from PIL import Image
import random
import SimpleITK as sitk
#Torch
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Function
import torch
import torch.nn as nn
import torchvision.transforms as standard_transforms
#from torchvision.models import resnet18
import nibabel as nib
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
ckpt_path = 'ckpt'
exp_name = 'lol'
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
if not os.path.exists(os.path.join(ckpt_path, exp_name)):
os.makedirs(os.path.join(ckpt_path, exp_name))
args = {
'num_class': 2,
'num_gpus': 1,
'start_epoch': 1,
'num_epoch': 100,
'batch_size': 8,
'lr': 0.001,
'lr_decay': 0.9,
'weight_decay': 1e-4,
'momentum': 0.9,
'snapshot': '',
'opt': 'adam',
'crop_size1': 138,
}
class HEMDataset(Dataset):
def __init__(self, text_dir):
file_pairs = open(text_dir,'r')
self.img_anno_pairs = file_pairs.readlines()
self.req_file, self.req_tar = [],[]
for i in range(len(self.img_anno_pairs)):
net = self.img_anno_pairs[i][:-1]
self.req_file.append(net[:3])
self.req_tar.append(net[4])
def __len__(self):
return len(self.req_tar)
def __getitem__(self, index):
_file_num = self.req_file[index]
_gt = float(self.req_tar[index])
req_npy = './Features_Train/'+ str(_file_num) + 'ct1_seg.npy'
_input_arr = np.load(req_npy, allow_pickle=True)
_input = np.array([])
for i in range(len(_input_arr)):
if i > 18:
_input = np.concatenate((_input, _input_arr[i]), axis=None)
_input = torch.from_numpy(np.array(_input)).float()
_target = torch.from_numpy(np.array(_gt)).long()
return _input, _target
class HEMDataset_test(Dataset):
def __init__(self, text_dir):
file_pairs = open(text_dir,'r')
self.img_anno_pairs = file_pairs.readlines()
self.req_file, self.req_tar = [],[]
for i in range(len(self.img_anno_pairs)):
net = self.img_anno_pairs[i][:-1]
self.req_file.append(net[:3])
self.req_tar.append(net[4])
def __len__(self):
return len(self.req_tar)
def __getitem__(self, index):
_file_num = self.req_file[index]
_gt = float(self.req_tar[index])
req_npy = './Features_Val/'+ str(_file_num) + 'ct1_seg.npy'
_input_arr = np.load(req_npy, allow_pickle=True)
_input = np.array([])
for i in range(len(_input_arr)):
if i > 18:
_input = np.concatenate((_input, _input_arr[i]), axis=None)
_input = torch.from_numpy(np.array(_input)).float()
_target = torch.from_numpy(np.array(_gt)).long()
return _input, _target
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(4, 2048)
self.fc2 = nn.Linear(2048, 1024)
self.fc3 = nn.Linear(1024, 2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
if __name__ == '__main__':
train_file = 'Train_dir.txt'
test_file = 'Val_dir.txt'
train_dataset = HEMDataset(text_dir=train_file)
test_dataset = HEMDataset_test(text_dir=test_file)
train_loader = DataLoader(dataset=train_dataset, batch_size=args['batch_size'], shuffle=True, num_workers=2,drop_last=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, num_workers=2,drop_last=False)
net = Net().cuda()
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=args['lr'])
max_epoch = 50
for epoch in range (max_epoch):
net.train()
for batch_idx, data in enumerate(train_loader):
inputs, labels = data
inputs = Variable(inputs).cuda()
labels = Variable(labels).cuda()
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
net.eval()
correct, total = 0, 0
class_pred, class_gt = [], []
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
class_pred.append(predicted.item())
class_gt.append(targets.item())
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Epoch:', epoch)#, 'Accuracy: %f %%' % (100 * correct / total))
print(confusion_matrix(np.array(class_pred),np.array(class_gt)))
print(classification_report(np.array(class_pred),np.array(class_gt)))
print(accuracy_score(np.array(class_pred),np.array(class_gt)))
print('')
print('Finished Training')
| [
"os.path.exists",
"torch.autograd.Variable",
"os.makedirs",
"torch.max",
"os.path.join",
"numpy.array",
"torch.nn.NLLLoss",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"torch.no_grad",
"numpy.load"
] | [((676, 701), 'os.path.exists', 'os.path.exists', (['ckpt_path'], {}), '(ckpt_path)\n', (690, 701), False, 'import os\n'), ((707, 729), 'os.makedirs', 'os.makedirs', (['ckpt_path'], {}), '(ckpt_path)\n', (718, 729), False, 'import os\n'), ((3653, 3767), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': "args['batch_size']", 'shuffle': '(True)', 'num_workers': '(2)', 'drop_last': '(True)'}), "(dataset=train_dataset, batch_size=args['batch_size'], shuffle=\n True, num_workers=2, drop_last=True)\n", (3663, 3767), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3780, 3877), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(2)', 'drop_last': '(False)'}), '(dataset=test_dataset, batch_size=1, shuffle=False, num_workers=2,\n drop_last=False)\n', (3790, 3877), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3913, 3925), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (3923, 3925), True, 'import torch.nn as nn\n'), ((752, 785), 'os.path.join', 'os.path.join', (['ckpt_path', 'exp_name'], {}), '(ckpt_path, exp_name)\n', (764, 785), False, 'import os\n'), ((804, 837), 'os.path.join', 'os.path.join', (['ckpt_path', 'exp_name'], {}), '(ckpt_path, exp_name)\n', (816, 837), False, 'import os\n'), ((1746, 1781), 'numpy.load', 'np.load', (['req_npy'], {'allow_pickle': '(True)'}), '(req_npy, allow_pickle=True)\n', (1753, 1781), True, 'import numpy as np\n'), ((1799, 1811), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1807, 1811), True, 'import numpy as np\n'), ((2748, 2783), 'numpy.load', 'np.load', (['req_npy'], {'allow_pickle': '(True)'}), '(req_npy, allow_pickle=True)\n', (2755, 2783), True, 'import numpy as np\n'), ((2801, 2813), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2809, 2813), True, 'import numpy as np\n'), ((3205, 3223), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(2048)'], {}), '(4, 2048)\n', (3214, 3223), True, 'import torch.nn as nn\n'), ((3243, 3264), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(1024)'], {}), '(2048, 1024)\n', (3252, 3264), True, 'import torch.nn as nn\n'), ((3284, 3302), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(2)'], {}), '(1024, 2)\n', (3293, 3302), True, 'import torch.nn as nn\n'), ((4520, 4535), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4533, 4535), False, 'import torch\n'), ((1901, 1951), 'numpy.concatenate', 'np.concatenate', (['(_input, _input_arr[i])'], {'axis': 'None'}), '((_input, _input_arr[i]), axis=None)\n', (1915, 1951), True, 'import numpy as np\n'), ((2903, 2953), 'numpy.concatenate', 'np.concatenate', (['(_input, _input_arr[i])'], {'axis': 'None'}), '((_input, _input_arr[i]), axis=None)\n', (2917, 2953), True, 'import numpy as np\n'), ((4813, 4839), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (4822, 4839), False, 'import torch\n'), ((5154, 5174), 'numpy.array', 'np.array', (['class_pred'], {}), '(class_pred)\n', (5162, 5174), True, 'import numpy as np\n'), ((5175, 5193), 'numpy.array', 'np.array', (['class_gt'], {}), '(class_gt)\n', (5183, 5193), True, 'import numpy as np\n'), ((5232, 5252), 'numpy.array', 'np.array', (['class_pred'], {}), '(class_pred)\n', (5240, 5252), True, 'import numpy as np\n'), ((5253, 5271), 'numpy.array', 'np.array', (['class_gt'], {}), '(class_gt)\n', (5261, 5271), True, 'import numpy as np\n'), ((5303, 5323), 'numpy.array', 'np.array', (['class_pred'], {}), '(class_pred)\n', (5311, 5323), True, 'import numpy as np\n'), ((5324, 5342), 'numpy.array', 'np.array', (['class_gt'], {}), '(class_gt)\n', (5332, 5342), True, 'import numpy as np\n'), ((1986, 2002), 'numpy.array', 'np.array', (['_input'], {}), '(_input)\n', (1994, 2002), True, 'import numpy as np\n'), ((2047, 2060), 'numpy.array', 'np.array', (['_gt'], {}), '(_gt)\n', (2055, 2060), True, 'import numpy as np\n'), ((2988, 3004), 'numpy.array', 'np.array', (['_input'], {}), '(_input)\n', (2996, 3004), True, 'import numpy as np\n'), ((3049, 3062), 'numpy.array', 'np.array', (['_gt'], {}), '(_gt)\n', (3057, 3062), True, 'import numpy as np\n'), ((4178, 4194), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (4186, 4194), False, 'from torch.autograd import Variable\n'), ((4223, 4239), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (4231, 4239), False, 'from torch.autograd import Variable\n'), ((4707, 4723), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (4715, 4723), False, 'from torch.autograd import Variable\n'), ((4725, 4742), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (4733, 4742), False, 'from torch.autograd import Variable\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 2 18:53:46 2020
@author: DiyaM
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os
from mtcnn.mtcnn import MTCNN
import cv2
from tensorflow.keras.models import load_model
#Global variables and paths
path=os.getcwd()+"/"
#Reading data from face.npz file saved from face_detection.py
data = np.load(path+"faces.npz", allow_pickle=True)
X=data['arr_0']
y_classes=data['arr_1']
#plt.imshow(X[4])
#print(X[4])
#getting the categorical int from y_classes
y=pd.Series(y_classes, dtype='category').cat.codes.values
#print(y_classes[4])
model = load_model('facenet_keras.h5')
# get the face embedding for one face
def get_embedding(model, face_pixels):
# scale pixel values
face_pixels = face_pixels.astype('float32')
# standardize pixel values across channels (global)
mean, std = face_pixels.mean(), face_pixels.std()
face_pixels = (face_pixels - mean) / std
# transform face into one sample
samples = np.expand_dims(face_pixels, axis=0)
# make prediction to get embedding
yhat = model.predict(samples)
return yhat[0]
newTrainX = list()
i = 0
for face_pixels in X:
#print(face_pixels)
#print(i)
#i=i+1
embedding = get_embedding(model, face_pixels)
newTrainX.append(embedding)
newTrainX = np.asarray(newTrainX)
print(newTrainX.shape)
np.savez_compressed('embedding_face.npz', newTrainX, y)
print("embedding created")
| [
"pandas.Series",
"numpy.asarray",
"os.getcwd",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"numpy.savez_compressed",
"numpy.load"
] | [((421, 467), 'numpy.load', 'np.load', (["(path + 'faces.npz')"], {'allow_pickle': '(True)'}), "(path + 'faces.npz', allow_pickle=True)\n", (428, 467), True, 'import numpy as np\n'), ((688, 718), 'tensorflow.keras.models.load_model', 'load_model', (['"""facenet_keras.h5"""'], {}), "('facenet_keras.h5')\n", (698, 718), False, 'from tensorflow.keras.models import load_model\n'), ((1388, 1409), 'numpy.asarray', 'np.asarray', (['newTrainX'], {}), '(newTrainX)\n', (1398, 1409), True, 'import numpy as np\n'), ((1439, 1494), 'numpy.savez_compressed', 'np.savez_compressed', (['"""embedding_face.npz"""', 'newTrainX', 'y'], {}), "('embedding_face.npz', newTrainX, y)\n", (1458, 1494), True, 'import numpy as np\n'), ((332, 343), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (341, 343), False, 'import sys, os\n'), ((1065, 1100), 'numpy.expand_dims', 'np.expand_dims', (['face_pixels'], {'axis': '(0)'}), '(face_pixels, axis=0)\n', (1079, 1100), True, 'import numpy as np\n'), ((593, 631), 'pandas.Series', 'pd.Series', (['y_classes'], {'dtype': '"""category"""'}), "(y_classes, dtype='category')\n", (602, 631), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2013, NeXpy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING, distributed with this software.
#-----------------------------------------------------------------------------
"""
Module to read in a text file and convert it to NeXus.
This is provided as an example of writing an import dialog. Each new importer needs
to layout the GUI buttons necessary for defining the imported file and its attributes
and a single module, get_data, which returns an NXroot or NXentry object. This will be
added to the NeXpy tree.
Two GUI elements are provided for convenience:
ImportDialog.filebox: Contains a "Choose File" button and a text box. Both can be
used to set the path to the imported file. This can be
retrieved as a string using self.get_filename().
ImportDialog.buttonbox: Contains a "Cancel" and "OK" button to close the dialog.
This should be placed at the bottom of all import dialogs.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from nexpy.gui.pyqt import QtWidgets
from nexusformat.nexus import *
from nexpy.gui.importdialog import BaseImportDialog
filetype = "Text File"
class ImportDialog(BaseImportDialog):
"""Dialog to import a text file"""
def __init__(self, parent=None):
super(ImportDialog, self).__init__(parent)
skippedbox = QtWidgets.QHBoxLayout()
skippedlabel = QtWidgets.QLabel("No. of skipped rows")
self.skiprows = QtWidgets.QLineEdit()
self.skiprows.setText('0')
self.skiprows.setFixedWidth(20)
skippedbox.addWidget(skippedlabel)
skippedbox.addWidget(self.skiprows)
layout = QtWidgets.QVBoxLayout()
layout.addLayout(self.filebox())
layout.addLayout(skippedbox)
layout.addWidget(self.buttonbox())
self.setLayout(layout)
self.setWindowTitle("Import "+str(filetype))
def get_data(self):
skiprows = int(self.skiprows.text())
self.import_file = self.get_filename()
data = np.loadtxt(self.import_file, skiprows=skiprows)
# TODO: consider presenting a dialog asking user how to interpret this data
if data.shape[1] > 1:
x = NXfield(data[:,0], name='x')
y = NXfield(data[:,1], name='y')
if data.shape[1] > 2:
e = NXfield(data[:,2], name='errors')
return NXentry(NXdata(y,x,errors=e))
else:
return NXentry(NXdata(y,x))
| [
"nexpy.gui.pyqt.QtWidgets.QLineEdit",
"nexpy.gui.pyqt.QtWidgets.QVBoxLayout",
"numpy.loadtxt",
"nexpy.gui.pyqt.QtWidgets.QLabel",
"nexpy.gui.pyqt.QtWidgets.QHBoxLayout"
] | [((1678, 1701), 'nexpy.gui.pyqt.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1699, 1701), False, 'from nexpy.gui.pyqt import QtWidgets\n'), ((1725, 1764), 'nexpy.gui.pyqt.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""No. of skipped rows"""'], {}), "('No. of skipped rows')\n", (1741, 1764), False, 'from nexpy.gui.pyqt import QtWidgets\n'), ((1789, 1810), 'nexpy.gui.pyqt.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (1808, 1810), False, 'from nexpy.gui.pyqt import QtWidgets\n'), ((1992, 2015), 'nexpy.gui.pyqt.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (2013, 2015), False, 'from nexpy.gui.pyqt import QtWidgets\n'), ((2357, 2404), 'numpy.loadtxt', 'np.loadtxt', (['self.import_file'], {'skiprows': 'skiprows'}), '(self.import_file, skiprows=skiprows)\n', (2367, 2404), True, 'import numpy as np\n')] |
import numpy as np
import rlberry.seeding as seeding
from rlberry.envs import GridWorld
from rlberry.agents import IncrementalAgent
from rlberry.agents.dynprog.value_iteration import ValueIterationAgent
from rlberry.stats import AgentStats
from optuna.samplers import TPESampler
# global seed
seeding.set_global_seed(1234)
class DummyAgent(IncrementalAgent):
def __init__(self,
env,
n_episodes,
hyperparameter1=0,
hyperparameter2=0,
**kwargs):
IncrementalAgent.__init__(self, env, **kwargs)
self.name = "DummyAgent"
self.n_episodes = n_episodes
self.fitted = False
self.hyperparameter1 = hyperparameter1
self.hyperparameter2 = hyperparameter2
self.fraction_fitted = 0.0
def fit(self, **kwargs):
info = {}
info["episode_rewards"] = np.arange(self.n_episodes)
self.fitted = True
return info
def partial_fit(self, fraction, **kwargs):
assert fraction > 0.0 and fraction <= 1.0
self.fraction_fitted = min(1.0, self.fraction_fitted + fraction)
info = {}
nn = int(np.ceil(fraction*self.n_episodes))
info["episode_rewards"] = np.arange(nn)
return info
def policy(self, observation, time=0, **kwargs):
return self.env.action_space.sample()
@classmethod
def sample_parameters(cls, trial):
hyperparameter1 \
= trial.suggest_categorical('hyperparameter1', [1, 2, 3])
hyperparameter2 \
= trial.suggest_uniform('hyperparameter2', -10, 10)
return {'hyperparameter1': hyperparameter1,
'hyperparameter2': hyperparameter2}
def test_hyperparam_optim_tpe():
# Define trainenv
train_env = GridWorld()
# Parameters
params = {"n_episodes": 500}
# Run AgentStats
stats_agent = AgentStats(DummyAgent, train_env, init_kwargs=params,
n_fit=4, eval_horizon=10, n_jobs=1)
# test hyperparameter optimization with TPE sampler
# using hyperopt default values
sampler_kwargs = TPESampler.hyperopt_parameters()
stats_agent.optimize_hyperparams(sampler_kwargs=sampler_kwargs)
def test_hyperparam_optim_random():
# Define train env
train_env = GridWorld()
# Parameters
params = {"n_episodes": 500}
# Run AgentStats
stats_agent = AgentStats(DummyAgent, train_env, init_kwargs=params,
n_fit=4, eval_horizon=10, n_jobs=1)
# test hyperparameter optimization with random sampler
stats_agent.optimize_hyperparams(sampler_method="random")
def test_hyperparam_optim_grid():
# Define train env
train_env = GridWorld()
# Parameters
params = {"n_episodes": 500}
# Run AgentStats
stats_agent = AgentStats(DummyAgent, train_env, init_kwargs=params,
n_fit=4, eval_horizon=10, n_jobs=1)
# test hyperparameter optimization with grid sampler
search_space = {"hyperparameter1": [1, 2, 3],
"hyperparameter2": [-5, 0, 5]}
sampler_kwargs = {"search_space": search_space}
stats_agent.optimize_hyperparams(n_trials=3*3,
sampler_method="grid",
sampler_kwargs=sampler_kwargs)
def test_hyperparam_optim_cmaes():
# Define train env
train_env = GridWorld()
# Parameters
params = {"n_episodes": 500}
# Run AgentStats
stats_agent = AgentStats(DummyAgent, train_env, init_kwargs=params,
n_fit=4, eval_horizon=10, n_jobs=1)
# test hyperparameter optimization with CMA-ES sampler
stats_agent.optimize_hyperparams(sampler_method="cmaes")
def test_discount_optimization():
seeding.set_global_seed(42)
class ValueIterationAgentToOptimize(ValueIterationAgent):
@classmethod
def sample_parameters(cls, trial):
"""
Sample hyperparameters for hyperparam optimization using Optuna (https://optuna.org/)
"""
gamma = trial.suggest_categorical('gamma', [0.1, 0.99])
return {'gamma': gamma}
env = GridWorld(nrows=3, ncols=10,
reward_at={(1, 1): 0.1, (2, 9): 1.0},
walls=((1, 4), (2, 4), (1, 5)),
success_probability=0.9)
vi_params = {'gamma': 0.1, 'epsilon': 1e-3}
vi_stats = AgentStats(ValueIterationAgentToOptimize, env, eval_horizon=20, init_kwargs=vi_params, n_fit=4, n_jobs=1)
vi_stats.optimize_hyperparams(n_trials=5, timeout=30, n_sim=5, n_fit=1, n_jobs=1,
sampler_method='random', pruner_method='none')
assert vi_stats.best_hyperparams['gamma'] == 0.99
| [
"numpy.ceil",
"rlberry.seeding.set_global_seed",
"rlberry.agents.IncrementalAgent.__init__",
"rlberry.stats.AgentStats",
"rlberry.envs.GridWorld",
"optuna.samplers.TPESampler.hyperopt_parameters",
"numpy.arange"
] | [((295, 324), 'rlberry.seeding.set_global_seed', 'seeding.set_global_seed', (['(1234)'], {}), '(1234)\n', (318, 324), True, 'import rlberry.seeding as seeding\n'), ((1805, 1816), 'rlberry.envs.GridWorld', 'GridWorld', ([], {}), '()\n', (1814, 1816), False, 'from rlberry.envs import GridWorld\n'), ((1908, 2002), 'rlberry.stats.AgentStats', 'AgentStats', (['DummyAgent', 'train_env'], {'init_kwargs': 'params', 'n_fit': '(4)', 'eval_horizon': '(10)', 'n_jobs': '(1)'}), '(DummyAgent, train_env, init_kwargs=params, n_fit=4, eval_horizon\n =10, n_jobs=1)\n', (1918, 2002), False, 'from rlberry.stats import AgentStats\n'), ((2141, 2173), 'optuna.samplers.TPESampler.hyperopt_parameters', 'TPESampler.hyperopt_parameters', ([], {}), '()\n', (2171, 2173), False, 'from optuna.samplers import TPESampler\n'), ((2319, 2330), 'rlberry.envs.GridWorld', 'GridWorld', ([], {}), '()\n', (2328, 2330), False, 'from rlberry.envs import GridWorld\n'), ((2422, 2516), 'rlberry.stats.AgentStats', 'AgentStats', (['DummyAgent', 'train_env'], {'init_kwargs': 'params', 'n_fit': '(4)', 'eval_horizon': '(10)', 'n_jobs': '(1)'}), '(DummyAgent, train_env, init_kwargs=params, n_fit=4, eval_horizon\n =10, n_jobs=1)\n', (2432, 2516), False, 'from rlberry.stats import AgentStats\n'), ((2738, 2749), 'rlberry.envs.GridWorld', 'GridWorld', ([], {}), '()\n', (2747, 2749), False, 'from rlberry.envs import GridWorld\n'), ((2841, 2935), 'rlberry.stats.AgentStats', 'AgentStats', (['DummyAgent', 'train_env'], {'init_kwargs': 'params', 'n_fit': '(4)', 'eval_horizon': '(10)', 'n_jobs': '(1)'}), '(DummyAgent, train_env, init_kwargs=params, n_fit=4, eval_horizon\n =10, n_jobs=1)\n', (2851, 2935), False, 'from rlberry.stats import AgentStats\n'), ((3426, 3437), 'rlberry.envs.GridWorld', 'GridWorld', ([], {}), '()\n', (3435, 3437), False, 'from rlberry.envs import GridWorld\n'), ((3529, 3623), 'rlberry.stats.AgentStats', 'AgentStats', (['DummyAgent', 'train_env'], {'init_kwargs': 'params', 'n_fit': '(4)', 'eval_horizon': '(10)', 'n_jobs': '(1)'}), '(DummyAgent, train_env, init_kwargs=params, n_fit=4, eval_horizon\n =10, n_jobs=1)\n', (3539, 3623), False, 'from rlberry.stats import AgentStats\n'), ((3809, 3836), 'rlberry.seeding.set_global_seed', 'seeding.set_global_seed', (['(42)'], {}), '(42)\n', (3832, 3836), True, 'import rlberry.seeding as seeding\n'), ((4209, 4337), 'rlberry.envs.GridWorld', 'GridWorld', ([], {'nrows': '(3)', 'ncols': '(10)', 'reward_at': '{(1, 1): 0.1, (2, 9): 1.0}', 'walls': '((1, 4), (2, 4), (1, 5))', 'success_probability': '(0.9)'}), '(nrows=3, ncols=10, reward_at={(1, 1): 0.1, (2, 9): 1.0}, walls=((\n 1, 4), (2, 4), (1, 5)), success_probability=0.9)\n', (4218, 4337), False, 'from rlberry.envs import GridWorld\n'), ((4458, 4568), 'rlberry.stats.AgentStats', 'AgentStats', (['ValueIterationAgentToOptimize', 'env'], {'eval_horizon': '(20)', 'init_kwargs': 'vi_params', 'n_fit': '(4)', 'n_jobs': '(1)'}), '(ValueIterationAgentToOptimize, env, eval_horizon=20, init_kwargs\n =vi_params, n_fit=4, n_jobs=1)\n', (4468, 4568), False, 'from rlberry.stats import AgentStats\n'), ((545, 591), 'rlberry.agents.IncrementalAgent.__init__', 'IncrementalAgent.__init__', (['self', 'env'], {}), '(self, env, **kwargs)\n', (570, 591), False, 'from rlberry.agents import IncrementalAgent\n'), ((902, 928), 'numpy.arange', 'np.arange', (['self.n_episodes'], {}), '(self.n_episodes)\n', (911, 928), True, 'import numpy as np\n'), ((1251, 1264), 'numpy.arange', 'np.arange', (['nn'], {}), '(nn)\n', (1260, 1264), True, 'import numpy as np\n'), ((1182, 1217), 'numpy.ceil', 'np.ceil', (['(fraction * self.n_episodes)'], {}), '(fraction * self.n_episodes)\n', (1189, 1217), True, 'import numpy as np\n')] |
import numpy as np
from deerlab import noiselevel, whitegaussnoise, dipolarkernel
from deerlab.dd_models import dd_gauss
from deerlab.bg_models import bg_exp
def test_filtered_movmean():
#============================================================
"Check estimation of noiselevel using a moving-mean filter"
np.random.seed(1)
t = np.linspace(0,3,200)
r = np.linspace(2,6,100)
P = dd_gauss(r,[3, 0.5])
lam = 0.25
B = bg_exp(t,1.5)
noise = whitegaussnoise(t,0.03)
V = dipolarkernel(t,r,mod=lam,bg=B)@P + noise
truelevel = np.std(noise)
approxlevel = noiselevel(V,'movmean')
assert abs(approxlevel - truelevel) < 1e-2
#============================================================
def test_reference():
#============================================================
"Check estimation of noiselevel using a reference signal"
np.random.seed(1)
t = np.linspace(0,3,200)
r = np.linspace(2,6,100)
P = dd_gauss(r,[3, 0.5])
lam = 0.25
B = bg_exp(t,1.5)
Vref = dipolarkernel(t,r,mod=lam,bg=B)@P
noise = whitegaussnoise(t,0.03)
V = Vref + noise
truelevel = np.std(noise)
approxlevel = noiselevel(V,Vref)
assert abs(approxlevel - truelevel) < 1e-2
#============================================================
def test_filtered_savgol():
#============================================================
"Check estimation of noiselevel using a Savitzky-Golay filter"
np.random.seed(1)
t = np.linspace(0,3,200)
r = np.linspace(2,6,100)
P = dd_gauss(r,[3, 0.5])
lam = 0.25
B = bg_exp(t,1.5)
noise = whitegaussnoise(t,0.03)
V = dipolarkernel(t,r,mod=lam,bg=B)@P + noise
truelevel = np.std(noise)
approxlevel = noiselevel(V,'savgol')
assert abs(approxlevel - truelevel) < 1e-2
#============================================================
def test_multiscan():
#============================================================
"Check estimation of noiselevel using multiple scans of a signal"
np.random.seed(1)
t = np.linspace(0,5,300)
r = np.linspace(2,6,200)
P = dd_gauss(r,[4, 0.4])
K = dipolarkernel(t,r)
sigma_ref = 0.1
N = 500
V = np.zeros((len(t),N))
for i in range(N):
V[:,i] = K@P + whitegaussnoise(t,sigma_ref)
sigma = noiselevel(V)
assert abs(sigma - sigma_ref) < 1e-2
#============================================================
def test_complex():
#============================================================
"Check estimation of noiselevel using a complex signal"
t = np.linspace(0,3,200)
r = np.linspace(2,6,100)
P = dd_gauss(r,[4, 0.4])
lam = 0.25
B = bg_exp(t,1.5)
np.random.seed(1)
noise = whitegaussnoise(t,0.03)
np.random.seed(2)
noisec = 1j*whitegaussnoise(t,0.03)
V = dipolarkernel(t,r,mod=lam,bg=B)@P
Vco = V*np.exp(-1j*np.pi/5)
Vco = Vco + noise + noisec
truelevel = np.std(noise)
approxlevel = noiselevel(Vco)
assert abs(truelevel - approxlevel) < 1e-2
#============================================================ | [
"deerlab.dd_models.dd_gauss",
"deerlab.whitegaussnoise",
"deerlab.dipolarkernel",
"deerlab.bg_models.bg_exp",
"numpy.exp",
"numpy.linspace",
"numpy.random.seed",
"numpy.std",
"deerlab.noiselevel"
] | [((330, 347), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (344, 347), True, 'import numpy as np\n'), ((357, 379), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(200)'], {}), '(0, 3, 200)\n', (368, 379), True, 'import numpy as np\n'), ((387, 409), 'numpy.linspace', 'np.linspace', (['(2)', '(6)', '(100)'], {}), '(2, 6, 100)\n', (398, 409), True, 'import numpy as np\n'), ((417, 438), 'deerlab.dd_models.dd_gauss', 'dd_gauss', (['r', '[3, 0.5]'], {}), '(r, [3, 0.5])\n', (425, 438), False, 'from deerlab.dd_models import dd_gauss\n'), ((463, 477), 'deerlab.bg_models.bg_exp', 'bg_exp', (['t', '(1.5)'], {}), '(t, 1.5)\n', (469, 477), False, 'from deerlab.bg_models import bg_exp\n'), ((490, 514), 'deerlab.whitegaussnoise', 'whitegaussnoise', (['t', '(0.03)'], {}), '(t, 0.03)\n', (505, 514), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((586, 599), 'numpy.std', 'np.std', (['noise'], {}), '(noise)\n', (592, 599), True, 'import numpy as np\n'), ((619, 643), 'deerlab.noiselevel', 'noiselevel', (['V', '"""movmean"""'], {}), "(V, 'movmean')\n", (629, 643), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((918, 935), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (932, 935), True, 'import numpy as np\n'), ((945, 967), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(200)'], {}), '(0, 3, 200)\n', (956, 967), True, 'import numpy as np\n'), ((975, 997), 'numpy.linspace', 'np.linspace', (['(2)', '(6)', '(100)'], {}), '(2, 6, 100)\n', (986, 997), True, 'import numpy as np\n'), ((1005, 1026), 'deerlab.dd_models.dd_gauss', 'dd_gauss', (['r', '[3, 0.5]'], {}), '(r, [3, 0.5])\n', (1013, 1026), False, 'from deerlab.dd_models import dd_gauss\n'), ((1051, 1065), 'deerlab.bg_models.bg_exp', 'bg_exp', (['t', '(1.5)'], {}), '(t, 1.5)\n', (1057, 1065), False, 'from deerlab.bg_models import bg_exp\n'), ((1124, 1148), 'deerlab.whitegaussnoise', 'whitegaussnoise', (['t', '(0.03)'], {}), '(t, 0.03)\n', (1139, 1148), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((1191, 1204), 'numpy.std', 'np.std', (['noise'], {}), '(noise)\n', (1197, 1204), True, 'import numpy as np\n'), ((1224, 1243), 'deerlab.noiselevel', 'noiselevel', (['V', 'Vref'], {}), '(V, Vref)\n', (1234, 1243), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((1527, 1544), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1541, 1544), True, 'import numpy as np\n'), ((1554, 1576), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(200)'], {}), '(0, 3, 200)\n', (1565, 1576), True, 'import numpy as np\n'), ((1584, 1606), 'numpy.linspace', 'np.linspace', (['(2)', '(6)', '(100)'], {}), '(2, 6, 100)\n', (1595, 1606), True, 'import numpy as np\n'), ((1614, 1635), 'deerlab.dd_models.dd_gauss', 'dd_gauss', (['r', '[3, 0.5]'], {}), '(r, [3, 0.5])\n', (1622, 1635), False, 'from deerlab.dd_models import dd_gauss\n'), ((1660, 1674), 'deerlab.bg_models.bg_exp', 'bg_exp', (['t', '(1.5)'], {}), '(t, 1.5)\n', (1666, 1674), False, 'from deerlab.bg_models import bg_exp\n'), ((1687, 1711), 'deerlab.whitegaussnoise', 'whitegaussnoise', (['t', '(0.03)'], {}), '(t, 0.03)\n', (1702, 1711), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((1781, 1794), 'numpy.std', 'np.std', (['noise'], {}), '(noise)\n', (1787, 1794), True, 'import numpy as np\n'), ((1814, 1837), 'deerlab.noiselevel', 'noiselevel', (['V', '"""savgol"""'], {}), "(V, 'savgol')\n", (1824, 1837), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((2120, 2137), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2134, 2137), True, 'import numpy as np\n'), ((2147, 2169), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(300)'], {}), '(0, 5, 300)\n', (2158, 2169), True, 'import numpy as np\n'), ((2177, 2199), 'numpy.linspace', 'np.linspace', (['(2)', '(6)', '(200)'], {}), '(2, 6, 200)\n', (2188, 2199), True, 'import numpy as np\n'), ((2207, 2228), 'deerlab.dd_models.dd_gauss', 'dd_gauss', (['r', '[4, 0.4]'], {}), '(r, [4, 0.4])\n', (2215, 2228), False, 'from deerlab.dd_models import dd_gauss\n'), ((2237, 2256), 'deerlab.dipolarkernel', 'dipolarkernel', (['t', 'r'], {}), '(t, r)\n', (2250, 2256), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((2414, 2427), 'deerlab.noiselevel', 'noiselevel', (['V'], {}), '(V)\n', (2424, 2427), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((2695, 2717), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(200)'], {}), '(0, 3, 200)\n', (2706, 2717), True, 'import numpy as np\n'), ((2725, 2747), 'numpy.linspace', 'np.linspace', (['(2)', '(6)', '(100)'], {}), '(2, 6, 100)\n', (2736, 2747), True, 'import numpy as np\n'), ((2755, 2776), 'deerlab.dd_models.dd_gauss', 'dd_gauss', (['r', '[4, 0.4]'], {}), '(r, [4, 0.4])\n', (2763, 2776), False, 'from deerlab.dd_models import dd_gauss\n'), ((2801, 2815), 'deerlab.bg_models.bg_exp', 'bg_exp', (['t', '(1.5)'], {}), '(t, 1.5)\n', (2807, 2815), False, 'from deerlab.bg_models import bg_exp\n'), ((2822, 2839), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2836, 2839), True, 'import numpy as np\n'), ((2853, 2877), 'deerlab.whitegaussnoise', 'whitegaussnoise', (['t', '(0.03)'], {}), '(t, 0.03)\n', (2868, 2877), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((2882, 2899), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (2896, 2899), True, 'import numpy as np\n'), ((3066, 3079), 'numpy.std', 'np.std', (['noise'], {}), '(noise)\n', (3072, 3079), True, 'import numpy as np\n'), ((3099, 3114), 'deerlab.noiselevel', 'noiselevel', (['Vco'], {}), '(Vco)\n', (3109, 3114), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((1077, 1111), 'deerlab.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'mod': 'lam', 'bg': 'B'}), '(t, r, mod=lam, bg=B)\n', (1090, 1111), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((2917, 2941), 'deerlab.whitegaussnoise', 'whitegaussnoise', (['t', '(0.03)'], {}), '(t, 0.03)\n', (2932, 2941), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((2950, 2984), 'deerlab.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'mod': 'lam', 'bg': 'B'}), '(t, r, mod=lam, bg=B)\n', (2963, 2984), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((2997, 3022), 'numpy.exp', 'np.exp', (['(-1.0j * np.pi / 5)'], {}), '(-1.0j * np.pi / 5)\n', (3003, 3022), True, 'import numpy as np\n'), ((523, 557), 'deerlab.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'mod': 'lam', 'bg': 'B'}), '(t, r, mod=lam, bg=B)\n', (536, 557), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((1720, 1754), 'deerlab.dipolarkernel', 'dipolarkernel', (['t', 'r'], {'mod': 'lam', 'bg': 'B'}), '(t, r, mod=lam, bg=B)\n', (1733, 1754), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n'), ((2370, 2399), 'deerlab.whitegaussnoise', 'whitegaussnoise', (['t', 'sigma_ref'], {}), '(t, sigma_ref)\n', (2385, 2399), False, 'from deerlab import noiselevel, whitegaussnoise, dipolarkernel\n')] |
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common.import_utils import has_sklearn
from cuml.datasets.utils import _create_rs_generator
from cuml.common import with_cupy_rmm
import cupy as cp
import numpy as np
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if not has_sklearn():
raise RuntimeError("Scikit-learn is needed to run \
make_classification.")
from sklearn.utils.random import sample_without_replacement
if dimensions > 30:
return np.hstack([np.random.randint(2, size=(samples,
dimensions - 30)),
_generate_hypercube(samples, 30, rng)])
random_state = int(rng.randint(dimensions))
out = sample_without_replacement(2 ** dimensions, samples,
random_state=random_state).astype(
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
@with_cupy_rmm
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None, order='F',
dtype='float32', _centroids=None,
_informative_covariance=None,
_redundant_covariance=None,
_repeated_indices=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an ``n_informative``-dimensional hypercube with sides of
length ``2*class_sep`` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, ``X`` horizontally stacks features in the following
order: the primary ``n_informative`` features, followed by ``n_redundant``
linear combinations of the informative features, followed by ``n_repeated``
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
``X[:, :n_informative + n_redundant + n_repeated]``.
Examples
--------
.. code-block:: python
from cuml.datasets.classification import make_classification
X, y = make_classification(n_samples=10, n_features=4,
n_informative=2, n_classes=2)
print("X:")
print(X)
print("y:")
print(y)
Output:
.. code-block:: python
X:
[[-2.3249989 -0.8679415 -1.1511791 1.3525577 ]
[ 2.2933831 1.3743551 0.63128835 -0.84648645]
[ 1.6361488 -1.3233329 0.807027 -0.894092 ]
[-1.0093077 -0.9990691 -0.00808992 0.00950443]
[ 0.99803793 2.068382 0.49570698 -0.8462848 ]
[-1.2750955 -0.9725835 -0.2390058 0.28081596]
[-1.3635055 -0.9637669 -0.31582272 0.37106958]
[ 1.1893625 2.227583 0.48750278 -0.8737561 ]
[-0.05753583 -1.0939395 0.8188342 -0.9620734 ]
[ 0.47910076 0.7648213 -0.17165393 0.26144698]]
y:
[0 1 0 0 1 0 0 1 0 1]
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise ``n_informative``
informative features, ``n_redundant`` redundant features,
``n_repeated`` duplicated features and
``n_features-n_informative-n_redundant-n_repeated`` useless features
drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension ``n_informative``. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
(default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if ``len(weights) == n_classes - 1``,
then the last class weight is automatically inferred.
More than ``n_samples`` samples may be returned if the sum of
``weights`` exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
order: str, optional (default='F')
The order of the generated samples
dtype : str, optional (default='float32')
Dtype of the generated samples
_centroids: array of centroids of shape (n_clusters, n_informative)
_informative_covariance: array for covariance between informative features
of shape (n_clusters, n_informative, n_informative)
_redundant_covariance: array for covariance between redundant features
of shape (n_informative, n_redundant)
_repeated_indices: array of indices for the repeated features
of shape (n_repeated, )
Returns
-------
X : device array of shape [n_samples, n_features]
The generated samples.
y : device array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] <NAME>, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
How we optimized to the GPU:
1. Firstly, we generate X from a standard univariate instead of zeros.
This saves memory as we don't need to generate univariates each
time for each feature class (informative, repeated, etc.) while
also providing the added speedup of generating a big matrix
on GPU
2. We generate `order=F` construction. We exploit the
fact that X is a generated from a univariate normal, and
covariance is introduced with matrix multiplications. Which means,
we can generate X as a 1D array and just reshape it to the
desired order, which only updates the metadata and eliminates
copies
3. Lastly, we also shuffle by construction. Centroid indices are
permuted for each sample, and then we construct the data for
each centroid. This shuffle works for both `order=C` and
`order=F` and eliminates any need for secondary copies
"""
generator = _create_rs_generator(random_state)
np_seed = int(generator.randint(n_samples, size=1))
np.random.seed(np_seed)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
# Use log2 to avoid overflow errors
if n_informative < np.log2(n_classes * n_clusters_per_class):
msg = "n_classes({}) * n_clusters_per_class({}) must be"
msg += " smaller or equal 2**n_informative({})={}"
raise ValueError(msg.format(n_classes, n_clusters_per_class,
n_informative, 2**n_informative))
if weights is not None:
if len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
if len(weights) == n_classes - 1:
if isinstance(weights, list):
weights = weights + [1.0 - sum(weights)]
else:
weights = np.resize(weights, n_classes)
weights[-1] = 1.0 - sum(weights[:-1])
else:
weights = [1.0 / n_classes] * n_classes
n_clusters = n_classes * n_clusters_per_class
# Distribute samples among clusters by weight
n_samples_per_cluster = [
int(n_samples * weights[k % n_classes] / n_clusters_per_class)
for k in range(n_clusters)]
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = generator.randn(n_samples * n_features, dtype=dtype)
X = X.reshape((n_samples, n_features), order=order)
y = cp.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
if _centroids is None:
centroids = cp.array(_generate_hypercube(n_clusters, n_informative,
generator)).astype(dtype, copy=False)
else:
centroids = _centroids
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1, dtype=dtype)
centroids *= generator.rand(1, n_informative, dtype=dtype)
# Create redundant features
if n_redundant > 0:
if _redundant_covariance is None:
B = 2 * generator.rand(n_informative, n_redundant, dtype=dtype) - 1
else:
B = _redundant_covariance
# Create each cluster; a variant of make_blobs
if shuffle:
proba_samples_per_cluster = np.array(n_samples_per_cluster) / np.sum(
n_samples_per_cluster)
shuffled_sample_indices = cp.array(np.random.choice(
n_clusters,
n_samples,
replace=True,
p=proba_samples_per_cluster
))
for k, centroid in enumerate(centroids):
centroid_indices = cp.where(shuffled_sample_indices == k)
y[centroid_indices[0]] = k % n_classes
X_k = X[centroid_indices[0], :n_informative]
if _informative_covariance is None:
A = 2 * generator.rand(n_informative, n_informative,
dtype=dtype) - 1
else:
A = _informative_covariance[k]
X_k = cp.dot(X_k, A)
# NOTE: This could be done outside the loop, but a current
# cupy bug does not allow that
# https://github.com/cupy/cupy/issues/3284
if n_redundant > 0:
X[centroid_indices[0], n_informative:n_informative
+ n_redundant] = cp.dot(X_k, B)
X_k += centroid # shift the cluster to a vertex
X[centroid_indices[0], :n_informative] = X_k
else:
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
if _informative_covariance is None:
A = 2 * generator.rand(n_informative, n_informative,
dtype=dtype) - 1
else:
A = _informative_covariance[k]
X_k = cp.dot(X_k, A) # introduce random covariance
if n_redundant > 0:
X[start:stop, n_informative:n_informative + n_redundant] = \
cp.dot(X_k, B)
X_k += centroid # shift the cluster to a vertex
X[start:stop, :n_informative] = X_k
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
if _repeated_indices is None:
indices = ((n - 1) * generator.rand(n_repeated,
dtype=dtype)
+ 0.5).astype(np.intp)
else:
indices = _repeated_indices
X[:, n:n + n_repeated] = X[:, indices]
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples, dtype=dtype) < flip_y
y[flip_mask] = generator.randint(n_classes, size=int(flip_mask.sum()))
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features, dtype=dtype) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features, dtype=dtype)
X *= scale
return X, y
| [
"sklearn.utils.random.sample_without_replacement",
"numpy.random.choice",
"cuml.common.import_utils.has_sklearn",
"numpy.array",
"numpy.sum",
"cupy.where",
"numpy.random.randint",
"numpy.random.seed",
"cuml.datasets.utils._create_rs_generator",
"numpy.resize",
"cupy.dot",
"numpy.log2",
"cupy... | [((9600, 9634), 'cuml.datasets.utils._create_rs_generator', '_create_rs_generator', (['random_state'], {}), '(random_state)\n', (9620, 9634), False, 'from cuml.datasets.utils import _create_rs_generator\n'), ((9695, 9718), 'numpy.random.seed', 'np.random.seed', (['np_seed'], {}), '(np_seed)\n', (9709, 9718), True, 'import numpy as np\n'), ((11424, 11457), 'cupy.zeros', 'cp.zeros', (['n_samples'], {'dtype': 'np.int'}), '(n_samples, dtype=np.int)\n', (11432, 11457), True, 'import cupy as cp\n'), ((902, 915), 'cuml.common.import_utils.has_sklearn', 'has_sklearn', ([], {}), '()\n', (913, 915), False, 'from cuml.common.import_utils import has_sklearn\n'), ((10078, 10119), 'numpy.log2', 'np.log2', (['(n_classes * n_clusters_per_class)'], {}), '(n_classes * n_clusters_per_class)\n', (10085, 10119), True, 'import numpy as np\n'), ((1374, 1453), 'sklearn.utils.random.sample_without_replacement', 'sample_without_replacement', (['(2 ** dimensions)', 'samples'], {'random_state': 'random_state'}), '(2 ** dimensions, samples, random_state=random_state)\n', (1400, 1453), False, 'from sklearn.utils.random import sample_without_replacement\n'), ((12281, 12312), 'numpy.array', 'np.array', (['n_samples_per_cluster'], {}), '(n_samples_per_cluster)\n', (12289, 12312), True, 'import numpy as np\n'), ((12315, 12344), 'numpy.sum', 'np.sum', (['n_samples_per_cluster'], {}), '(n_samples_per_cluster)\n', (12321, 12344), True, 'import numpy as np\n'), ((12401, 12488), 'numpy.random.choice', 'np.random.choice', (['n_clusters', 'n_samples'], {'replace': '(True)', 'p': 'proba_samples_per_cluster'}), '(n_clusters, n_samples, replace=True, p=\n proba_samples_per_cluster)\n', (12417, 12488), True, 'import numpy as np\n'), ((12787, 12825), 'cupy.where', 'cp.where', (['(shuffled_sample_indices == k)'], {}), '(shuffled_sample_indices == k)\n', (12795, 12825), True, 'import cupy as cp\n'), ((13192, 13206), 'cupy.dot', 'cp.dot', (['X_k', 'A'], {}), '(X_k, A)\n', (13198, 13206), True, 'import cupy as cp\n'), ((14182, 14196), 'cupy.dot', 'cp.dot', (['X_k', 'A'], {}), '(X_k, A)\n', (14188, 14196), True, 'import cupy as cp\n'), ((1142, 1195), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(samples, dimensions - 30)'}), '(2, size=(samples, dimensions - 30))\n', (1159, 1195), True, 'import numpy as np\n'), ((10780, 10809), 'numpy.resize', 'np.resize', (['weights', 'n_classes'], {}), '(weights, n_classes)\n', (10789, 10809), True, 'import numpy as np\n'), ((13513, 13527), 'cupy.dot', 'cp.dot', (['X_k', 'B'], {}), '(X_k, B)\n', (13519, 13527), True, 'import cupy as cp\n'), ((14358, 14372), 'cupy.dot', 'cp.dot', (['X_k', 'B'], {}), '(X_k, B)\n', (14364, 14372), True, 'import cupy as cp\n')] |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import simplejson as json
import os
from .due import due, Doi
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
__all__ = ["aggregate_braindr_votes", "model"]
# Use duecredit (duecredit.org) to provide a citation to relevant work to
# be cited. This does nothing, unless the user has duecredit installed,
# And calls this with duecredit (as in `python -m duecredit script.py`):
due.cite(Doi("10.1167/13.9.30"),
description="Analysis for braindr",
tags=["reference-implementation"],
path='braindr-results')
log = {}
def model(bdr_pivot, learning_rates=[0.1], n_estimators=[200], max_depth=[2],
test_size=0.33):
# bdr_pivot = pd.DataFrame(braindr_pivot)
X = bdr_pivot[[c for c in bdr_pivot.columns if c not in ['plain_average',
'truth']]].values
y = bdr_pivot.truth.values
log["X_shape"] = X.shape
log['y_shape'] = y.shape
seed = 7
# test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=test_size,
random_state=seed,
stratify=y)
log['X_train_shape'] = X_train.shape
# make sure everyone has a vote in the train and test
assert(np.isfinite(X_train).sum(0).all()), 'not everyone has a vote'
assert(np.isfinite(X_test).sum(0).all()), 'not everyone has a vote'
model = XGBClassifier()
# run the grid search
param_grid = dict(learning_rate=learning_rates,
max_depth=max_depth,
n_estimators=n_estimators)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
grid_search = GridSearchCV(model, param_grid, scoring="neg_log_loss",
n_jobs=-1, cv=kfold)
grid_result = grid_search.fit(X_train, y_train)
# results
log["Best: %f using %s"] = (grid_result.best_score_,
grid_result.best_params_)
y_pred_prob = grid_result.predict_proba(X_test)[:, 1]
log['y_pred_prob'] = y_pred_prob.tolist()
log["y_test"] = y_test.tolist()
B = grid_result.best_estimator_.get_booster()
fscores = B.get_fscore()
fdf = pd.DataFrame([fscores]).T.rename(columns={0: 'F'})
not_col = ['plain_average', 'truth']
users = [c for c in bdr_pivot.columns if c not in not_col]
fdf['user'] = fdf.index.map(lambda x: users[int(x[1:])])
fdf.sort_values('F', inplace=True)
log['user_importance'] = fdf[::-1].to_json(orient='records')
return grid_result
def aggregate_braindr_votes(braindr_data, pass_labels, fail_labels,
learning_rates=[0.1], n_estimators=[200],
max_depth=[2], test_size=0.33):
"""
Function that aggregates braindr data using the XGBoost model
Parameters
----------
braindr_data : string.
This is the path to the braindr data downloaded from firebase or a URL
to the data
pass_labels : list of strings
a list of names that are considered passing
fail_labels : list of strings
a list of names that are considered failing
learning_rates : list of floats
a list of learning rates to grid search in XGBoost
n_estimators : list of ints
a list of number of estimators to grid search in XGBoost
max_depth : list of ints
a list of maximum tree depth for to grid search in XGBoost
test_size : float
fraction of data to put into test set
Returns
-------
anon_path : string
path to anonymized data
"""
assert isinstance(braindr_data, str), "input a string path to\
braindr_data"
if braindr_data.startswith('http'):
braindr_df = pd.read_csv(braindr_data)
else:
assert(os.path.exists(braindr_data)), "please give a valid path\
to braindr data"
braindr_df = pd.read_table(braindr_data)
braindr_df['subject_name'] = braindr_df.image_id.map(lambda x: x.split('__')[0])
braindr_df_pass_subset = braindr_df[braindr_df.subject_name.isin(pass_labels)]
braindr_df_fail_subset = braindr_df[braindr_df.subject_name.isin(fail_labels)]
braindr_df_pass_subset['truth'] = 1
braindr_df_fail_subset['truth'] = 0
braindr_subset = braindr_df_pass_subset.append(braindr_df_fail_subset,
ignore_index=True)
# count users contributions
user_counts = braindr_subset.groupby('username')\
.apply(lambda x: x.shape[0])
username_keep = user_counts[user_counts >= user_counts.describe()['75%']]\
.index.values
bdr = braindr_subset[braindr_subset.username.isin(username_keep)]
bdr_pivot = braindr_subset.pivot_table(columns="username",
index='image_id',
values='vote',
aggfunc=np.mean)
uname_img_counts = pd.DataFrame()
for uname in bdr_pivot.columns:
uname_img_counts.loc[uname, 'counts'] = (pd.isnull(bdr_pivot[uname]) == False).sum()
username_keep = uname_img_counts[uname_img_counts.counts >= uname_img_counts.describe().loc['75%']['counts']]
username_keep = username_keep.index.values
bdr = braindr_subset[braindr_subset.username.isin(username_keep)]
bdr_pivot = bdr.pivot_table(columns="username", index='image_id',
values='vote', aggfunc=np.mean)
truth_vals = bdr.groupby('image_id').apply(lambda x: x.truth.values[0])
bdr_pivot['truth'] = truth_vals
plain_avg = bdr_pivot[bdr_pivot.columns[:-1]].mean(1)
bdr_pivot['plain_average'] = plain_avg
log['bdr_pivot'] = bdr_pivot.to_json(orient='columns')
grid_result = model(bdr_pivot, learning_rates=learning_rates,
n_estimators=n_estimators, max_depth=max_depth,
test_size=test_size)
modelUsers = [c for c in bdr_pivot.columns if c not in ['plain_average',
'truth']]
braindr_full_pivot = braindr_df[braindr_df.username.isin(modelUsers)]\
.pivot_table(columns='username', index='image_id',
values='vote', aggfunc=np.mean)
# braindr_full_pivot = braindr_full_pivot[modelUsers]
log['braindr_full_pivot_shape'] = braindr_full_pivot.shape
X_all = braindr_full_pivot.values
y_all_pred = grid_result.best_estimator_.predict_proba(X_all)
# model.predict_proba(X_all)
plain_avg = braindr_full_pivot.mean(1)
braindr_full_pivot['average_label'] = plain_avg
braindr_full_pivot['xgboost_label'] = y_all_pred[:, 1]
log['output'] = braindr_full_pivot.to_json(orient='columns')
return log # braindr_full_pivot.to_json(orient='columns')
| [
"sklearn.model_selection.GridSearchCV",
"os.path.exists",
"pandas.isnull",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.StratifiedKFold",
"numpy.isfinite",
"pandas.read_table",
"pandas.DataFrame",
"xgboost.XGBClassifier"
] | [((1265, 1339), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'seed', 'stratify': 'y'}), '(X, y, test_size=test_size, random_state=seed, stratify=y)\n', (1281, 1339), False, 'from sklearn.model_selection import train_test_split\n'), ((1765, 1780), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '()\n', (1778, 1780), False, 'from xgboost import XGBClassifier\n'), ((1964, 2025), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': 'seed'}), '(n_splits=10, shuffle=True, random_state=seed)\n', (1979, 2025), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2044, 2120), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['model', 'param_grid'], {'scoring': '"""neg_log_loss"""', 'n_jobs': '(-1)', 'cv': 'kfold'}), "(model, param_grid, scoring='neg_log_loss', n_jobs=-1, cv=kfold)\n", (2056, 2120), False, 'from sklearn.model_selection import GridSearchCV\n'), ((5315, 5329), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5327, 5329), True, 'import pandas as pd\n'), ((4099, 4124), 'pandas.read_csv', 'pd.read_csv', (['braindr_data'], {}), '(braindr_data)\n', (4110, 4124), True, 'import pandas as pd\n'), ((4150, 4178), 'os.path.exists', 'os.path.exists', (['braindr_data'], {}), '(braindr_data)\n', (4164, 4178), False, 'import os\n'), ((4247, 4274), 'pandas.read_table', 'pd.read_table', (['braindr_data'], {}), '(braindr_data)\n', (4260, 4274), True, 'import pandas as pd\n'), ((2565, 2588), 'pandas.DataFrame', 'pd.DataFrame', (['[fscores]'], {}), '([fscores])\n', (2577, 2588), True, 'import pandas as pd\n'), ((1618, 1638), 'numpy.isfinite', 'np.isfinite', (['X_train'], {}), '(X_train)\n', (1629, 1638), True, 'import numpy as np\n'), ((1691, 1710), 'numpy.isfinite', 'np.isfinite', (['X_test'], {}), '(X_test)\n', (1702, 1710), True, 'import numpy as np\n'), ((5415, 5442), 'pandas.isnull', 'pd.isnull', (['bdr_pivot[uname]'], {}), '(bdr_pivot[uname])\n', (5424, 5442), True, 'import pandas as pd\n')] |
""" Module for processing surfaces from Olympus LEXT software
Author: <NAME>
Principal Investigators: Prof. <NAME>, Prof. <NAME>
University: University of California, Berkeley
classes:
Surface:
methods:
__init__(*filename)
parse_waviness()
parse_roughness()
calculate_metrics()
plot_primary()
plot_metrics(*metric)
plot_section(*index)
"""
import numpy
import matplotlib.pyplot as plt
import matplotlib.cm as cm
class Surface:
""" Generates surface profile components and metrics
Produces primary, waviness, and roughness profiles from a CSV containing
height information from Olympus LEXT software
Note that the LEXT software starts indexing at 1 for row numbers, while
Python starts indexing at 0 for when you're comparing sections of
profiles
glossary:
self.npr = number of elements/points per row of data
"""
def __init__(self, filepath, cutoff=80, sample_width=643):
""" Opens file and processes all data
Parses row by comma because input must be a CSV.
All data from a LEXT-generated CSV is on a single line, so it is
reshaped to a 2-D matrix from a 1-D vector.
arguments:
filepath = Path to data
cutoff = Cutoff wavelength for low pass FFT filter
default: 80 um
sample_width = Width of sample area in microns (um)
default: 643 um at 10x magnification on Olympus
"""
self.filepath = filepath
self.cutoff = cutoff
self.sample_width = sample_width
with open(filepath) as f:
row = f.readlines()
self.primary = [float(x) for x in row[0].split(',')
if x is not None and len(x) > 0]
self.npr = int(numpy.sqrt(len(self.primary)))
self.primary = numpy.reshape(self.primary, (self.npr, self.npr))
self.parse_waviness()
self.parse_roughness()
self.calculate_metrics()
def parse_waviness(self):
""" Parse waviness from each row of the primary profile
Computes the FFT of the primary profile line-by-line.
To prevent non-zero values at the boundaries, the primary profile is
extended at the beginning and end by a flipped version of itself.
The dataset is all real valued, so the FFT is symmetric. Thus, the
signal strength must be doubled to fit the data correctly.
For waviness, a low-pass filter is used (allows low frequencies/long
wavelength signals) to allow the wavelengths longer than the cutoff
wavelength to contribute to the final waviness profile. All values
outside the range of allowed values are set to zero.
"""
self.waviness = []
for i in range(self.npr):
row = self.primary[i]
profile = []
flipped = row[::-1]
profile.extend(flipped)
profile.extend(row)
profile.extend(flipped)
f = numpy.array(numpy.fft.fft(profile))
f[1:-1] = f[1:-1]*2
self.wavelengths = []
for j in range(1, self.npr):
wavelength = 2*(3*self.sample_width)/j
self.wavelengths.extend([wavelength])
if (wavelength <= self.cutoff):
stop_index = j
break
filtered = f
filtered[stop_index:-1] = 0
self.waviness.append(numpy.real(numpy.fft.ifft(filtered))
[self.npr:2*self.npr].tolist())
def parse_roughness(self):
""" Parse roughness from primary and waviness profiles
Runs through each row in primary and waviness profiles and finds the
difference between them to get the roughness
"""
self.roughness = []
for i in range(self.npr):
self.roughness.append(self.primary[i] - self.waviness[i])
def calculate_metrics(self):
""" Calculate metrics for each row of waviness and roughness
Calculates:
Wa = Average waviness
Ra = Average roughness
"""
Wa = [sum(numpy.abs(self.waviness[i]))/self.npr
for i in range(self.npr)]
Ra = [sum(numpy.abs(self.roughness[i]))/self.npr
for i in range(self.npr)]
self.metrics = {'Wa': Wa, 'Ra': Ra}
def plot_primary(self):
""" Plots top down view of primary surface """
im = plt.imshow(self.primary, extent=[0, 643, 0, 643], cmap=cm.jet)
plt.colorbar(im, label='Height (um)')
plt.xlabel('X Position (um)')
plt.ylabel('Y Position (um)')
plt.show()
def plot_section(self, index):
""" Plots cross section of profile with waviness and roughness on plot
"""
X = numpy.linspace(0, self.sample_width, self.npr)
plt.plot(X, self.primary[index], label='Primary')
plt.plot(X, self.waviness[index], label='Waviness')
plt.plot(X, self.roughness[index], label='Roughness')
plt.xlim(0, self.sample_width)
plt.xlabel('Position (um)')
plt.ylabel('Height (um)')
plt.legend(ncol=3, loc='upper center')
plt.show()
def plot_metrics(self, metric):
""" Plots data from one of the metrics and center around zero """
centered = (self.metrics[metric] -
(sum(self.metrics[metric])/float(len(
self.metrics[metric]))))
X = numpy.linspace(0, self.sample_width, self.npr)
plt.plot(X, centered)
plt.gcf().subplots_adjust(bottom=0.3)
plt.xlim(0, self.sample_width)
plt.xlabel('Position (um)')
plt.ylabel('Height (um)')
plt.title(metric)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.abs",
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.ff... | [((4597, 4659), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.primary'], {'extent': '[0, 643, 0, 643]', 'cmap': 'cm.jet'}), '(self.primary, extent=[0, 643, 0, 643], cmap=cm.jet)\n', (4607, 4659), True, 'import matplotlib.pyplot as plt\n'), ((4668, 4705), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'label': '"""Height (um)"""'}), "(im, label='Height (um)')\n", (4680, 4705), True, 'import matplotlib.pyplot as plt\n'), ((4715, 4744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Position (um)"""'], {}), "('X Position (um)')\n", (4725, 4744), True, 'import matplotlib.pyplot as plt\n'), ((4753, 4782), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y Position (um)"""'], {}), "('Y Position (um)')\n", (4763, 4782), True, 'import matplotlib.pyplot as plt\n'), ((4792, 4802), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4800, 4802), True, 'import matplotlib.pyplot as plt\n'), ((4944, 4990), 'numpy.linspace', 'numpy.linspace', (['(0)', 'self.sample_width', 'self.npr'], {}), '(0, self.sample_width, self.npr)\n', (4958, 4990), False, 'import numpy\n'), ((5000, 5049), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'self.primary[index]'], {'label': '"""Primary"""'}), "(X, self.primary[index], label='Primary')\n", (5008, 5049), True, 'import matplotlib.pyplot as plt\n'), ((5058, 5109), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'self.waviness[index]'], {'label': '"""Waviness"""'}), "(X, self.waviness[index], label='Waviness')\n", (5066, 5109), True, 'import matplotlib.pyplot as plt\n'), ((5118, 5171), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'self.roughness[index]'], {'label': '"""Roughness"""'}), "(X, self.roughness[index], label='Roughness')\n", (5126, 5171), True, 'import matplotlib.pyplot as plt\n'), ((5181, 5211), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'self.sample_width'], {}), '(0, self.sample_width)\n', (5189, 5211), True, 'import matplotlib.pyplot as plt\n'), ((5220, 5247), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position (um)"""'], {}), "('Position (um)')\n", (5230, 5247), True, 'import matplotlib.pyplot as plt\n'), ((5256, 5281), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Height (um)"""'], {}), "('Height (um)')\n", (5266, 5281), True, 'import matplotlib.pyplot as plt\n'), ((5290, 5328), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(3)', 'loc': '"""upper center"""'}), "(ncol=3, loc='upper center')\n", (5300, 5328), True, 'import matplotlib.pyplot as plt\n'), ((5337, 5347), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5345, 5347), True, 'import matplotlib.pyplot as plt\n'), ((5620, 5666), 'numpy.linspace', 'numpy.linspace', (['(0)', 'self.sample_width', 'self.npr'], {}), '(0, self.sample_width, self.npr)\n', (5634, 5666), False, 'import numpy\n'), ((5675, 5696), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'centered'], {}), '(X, centered)\n', (5683, 5696), True, 'import matplotlib.pyplot as plt\n'), ((5752, 5782), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'self.sample_width'], {}), '(0, self.sample_width)\n', (5760, 5782), True, 'import matplotlib.pyplot as plt\n'), ((5791, 5818), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position (um)"""'], {}), "('Position (um)')\n", (5801, 5818), True, 'import matplotlib.pyplot as plt\n'), ((5827, 5852), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Height (um)"""'], {}), "('Height (um)')\n", (5837, 5852), True, 'import matplotlib.pyplot as plt\n'), ((5861, 5878), 'matplotlib.pyplot.title', 'plt.title', (['metric'], {}), '(metric)\n', (5870, 5878), True, 'import matplotlib.pyplot as plt\n'), ((5887, 5897), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5895, 5897), True, 'import matplotlib.pyplot as plt\n'), ((1927, 1976), 'numpy.reshape', 'numpy.reshape', (['self.primary', '(self.npr, self.npr)'], {}), '(self.primary, (self.npr, self.npr))\n', (1940, 1976), False, 'import numpy\n'), ((3128, 3150), 'numpy.fft.fft', 'numpy.fft.fft', (['profile'], {}), '(profile)\n', (3141, 3150), False, 'import numpy\n'), ((5706, 5715), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5713, 5715), True, 'import matplotlib.pyplot as plt\n'), ((4279, 4306), 'numpy.abs', 'numpy.abs', (['self.waviness[i]'], {}), '(self.waviness[i])\n', (4288, 4306), False, 'import numpy\n'), ((4375, 4403), 'numpy.abs', 'numpy.abs', (['self.roughness[i]'], {}), '(self.roughness[i])\n', (4384, 4403), False, 'import numpy\n'), ((3590, 3614), 'numpy.fft.ifft', 'numpy.fft.ifft', (['filtered'], {}), '(filtered)\n', (3604, 3614), False, 'import numpy\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import jax
import jax.numpy as jnp
from jaxopt import base
from jaxopt._src import test_util
import numpy as onp
from typing import Any
from typing import Callable
from typing import NamedTuple
from typing import Optional
import dataclasses
import jax
import jax.numpy as jnp
from jaxopt._src import base
from jaxopt.tree_util import tree_add
from jaxopt.tree_util import tree_add_scalar_mul
from jaxopt.tree_util import tree_l2_norm
from jaxopt.tree_util import tree_scalar_mul
from jaxopt.tree_util import tree_sub
from jaxopt.tree_util import tree_zeros_like
class DummySolverState(NamedTuple):
iter_num: int
error: float
value: float
aux: Any
@dataclasses.dataclass(eq=False)
class DummySolver(base.IterativeSolver):
"""Dummy solver."""
fun: Callable
maxiter: int = 500
tol: float = 1e-3
implicit_diff: bool = False
def init_state(self, init_params: Any, *args, **kwargs) -> DummySolverState:
return DummySolverState(iter_num=0, error=jnp.inf, value=jnp.inf, aux=None)
def update(self,
params: Any,
state: DummySolverState,
*args,
**kwargs) -> base.OptStep:
return base.OptStep(params=params, state=state)
def dummy_method(self):
return self
def __post_init__(self):
self.dummy_attr = True
class BaseTest(test_util.JaxoptTestCase):
def test_linear_operator(self):
rng = onp.random.RandomState(0)
A = rng.randn(5, 3)
x = rng.randn(3)
y = rng.randn(5)
I_x = jnp.eye(3)
I_y = jnp.eye(5)
delta_x = rng.randn(1)[0]
delta_y = rng.randn(1)[0]
X = rng.randn(3, 2)
delta_X = rng.randn(2)
Y = rng.randn(5, 2)
delta_Y = rng.randn(5)
linop = base.LinearOperator(A)
# Check matrix-vector operations.
Ax = jnp.dot(A, x)
self.assertArraysAllClose(linop.matvec(x), Ax)
ATy = jnp.dot(A.T, y)
self.assertArraysAllClose(linop.rmatvec(y), ATy)
for i in range(A.shape[0]):
self.assertAllClose(linop.matvec_element(x, i), Ax[i])
self.assertArraysAllClose(linop.update_rmatvec(ATy, delta_y, i),
jnp.dot(A.T, y + delta_y * I_y[i]))
for j in range(A.shape[1]):
self.assertAllClose(linop.rmatvec_element(y, j), ATy[j])
self.assertArraysAllClose(linop.update_matvec(Ax, delta_x, j),
jnp.dot(A, x + delta_x * I_x[j]))
# Check matrix-matrix operations.
def E(i, shape):
ret = onp.zeros(shape)
ret[i] = 1
return ret
AX = jnp.dot(A, X)
self.assertArraysAllClose(linop.matvec(X), AX)
ATY = jnp.dot(A.T, Y)
self.assertArraysAllClose(linop.rmatvec(Y), ATY)
for i in range(A.shape[0]):
self.assertAllClose(linop.matvec_element(X, i), AX[i])
# todo: implement this
# self.assertArraysAllClose(linop.update_rmatvec(ATY, delta_Y, i),
# jnp.dot(A.T, Y + delta_Y[:, None] * E(i, Y.shape)))
for j in range(A.shape[1]):
self.assertAllClose(linop.rmatvec_element(Y, j), ATY[j])
self.assertArraysAllClose(linop.update_matvec(AX, delta_X, j),
jnp.dot(A, X + delta_X * E(j, X.shape)))
# Check that flatten and unflatten work.
leaf_values, treedef = jax.tree_util.tree_flatten(linop)
linop2 = jax.tree_util.tree_unflatten(treedef, leaf_values)
self.assertArraysAllClose(linop2.matvec(x), Ax)
def test_solver_attributes(self):
fun = lambda x: x
solver = DummySolver(fun=fun, maxiter=10, tol=1.0, implicit_diff=True)
self.assertEqual(solver.attribute_names(),
("fun", "maxiter", "tol", "implicit_diff"))
self.assertEqual(solver.attribute_values(), (fun, 10, 1.0, True))
def test_solver_hash(self):
fun = lambda x: x
solver = DummySolver(fun=fun, maxiter=10, tol=1.0, implicit_diff=True)
hash(solver)
def test_solver_equality(self):
fun = lambda x: x
solver = DummySolver(fun=fun, maxiter=10, tol=1.0, implicit_diff=True)
self.assertTrue(solver == solver)
def test_jit_update(self):
fun = lambda x: x
solver = DummySolver(fun=fun, maxiter=10, tol=1.0, implicit_diff=True)
update = jax.jit(solver.update)
if __name__ == '__main__':
absltest.main()
| [
"jax.numpy.eye",
"dataclasses.dataclass",
"absl.testing.absltest.main",
"jax.tree_util.tree_unflatten",
"jaxopt._src.base.OptStep",
"numpy.zeros",
"jax.jit",
"jaxopt._src.base.LinearOperator",
"jax.numpy.dot",
"jax.tree_util.tree_flatten",
"numpy.random.RandomState"
] | [((1277, 1308), 'dataclasses.dataclass', 'dataclasses.dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (1298, 1308), False, 'import dataclasses\n'), ((4828, 4843), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4841, 4843), False, 'from absl.testing import absltest\n'), ((1775, 1815), 'jaxopt._src.base.OptStep', 'base.OptStep', ([], {'params': 'params', 'state': 'state'}), '(params=params, state=state)\n', (1787, 1815), False, 'from jaxopt._src import base\n'), ((2003, 2028), 'numpy.random.RandomState', 'onp.random.RandomState', (['(0)'], {}), '(0)\n', (2025, 2028), True, 'import numpy as onp\n'), ((2105, 2115), 'jax.numpy.eye', 'jnp.eye', (['(3)'], {}), '(3)\n', (2112, 2115), True, 'import jax.numpy as jnp\n'), ((2126, 2136), 'jax.numpy.eye', 'jnp.eye', (['(5)'], {}), '(5)\n', (2133, 2136), True, 'import jax.numpy as jnp\n'), ((2311, 2333), 'jaxopt._src.base.LinearOperator', 'base.LinearOperator', (['A'], {}), '(A)\n', (2330, 2333), False, 'from jaxopt._src import base\n'), ((2382, 2395), 'jax.numpy.dot', 'jnp.dot', (['A', 'x'], {}), '(A, x)\n', (2389, 2395), True, 'import jax.numpy as jnp\n'), ((2457, 2472), 'jax.numpy.dot', 'jnp.dot', (['A.T', 'y'], {}), '(A.T, y)\n', (2464, 2472), True, 'import jax.numpy as jnp\n'), ((3123, 3136), 'jax.numpy.dot', 'jnp.dot', (['A', 'X'], {}), '(A, X)\n', (3130, 3136), True, 'import jax.numpy as jnp\n'), ((3198, 3213), 'jax.numpy.dot', 'jnp.dot', (['A.T', 'Y'], {}), '(A.T, Y)\n', (3205, 3213), True, 'import jax.numpy as jnp\n'), ((3853, 3886), 'jax.tree_util.tree_flatten', 'jax.tree_util.tree_flatten', (['linop'], {}), '(linop)\n', (3879, 3886), False, 'import jax\n'), ((3900, 3950), 'jax.tree_util.tree_unflatten', 'jax.tree_util.tree_unflatten', (['treedef', 'leaf_values'], {}), '(treedef, leaf_values)\n', (3928, 3950), False, 'import jax\n'), ((4774, 4796), 'jax.jit', 'jax.jit', (['solver.update'], {}), '(solver.update)\n', (4781, 4796), False, 'import jax\n'), ((3062, 3078), 'numpy.zeros', 'onp.zeros', (['shape'], {}), '(shape)\n', (3071, 3078), True, 'import numpy as onp\n'), ((2723, 2757), 'jax.numpy.dot', 'jnp.dot', (['A.T', '(y + delta_y * I_y[i])'], {}), '(A.T, y + delta_y * I_y[i])\n', (2730, 2757), True, 'import jax.numpy as jnp\n'), ((2956, 2988), 'jax.numpy.dot', 'jnp.dot', (['A', '(x + delta_x * I_x[j])'], {}), '(A, x + delta_x * I_x[j])\n', (2963, 2988), True, 'import jax.numpy as jnp\n')] |
import os
import rnnSMAP
# from rnnSMAP import runTrainLSTM
import numpy as np
import imp
imp.reload(rnnSMAP)
rnnSMAP.reload()
import matplotlib
#################################################
# noise affact on sigmaX (or sigmaMC)
doOpt = []
# doOpt.append('train')
doOpt.append('test')
doOpt.append('plotBox')
noiseOpt = 'SMAP'
noiseNameLst = [None, '5e2', '1e1', '2e1', '3e1', '4e1', '5e1']
noiseNameLstPlot = ['0', '0.05', '0.1', '0.2', '0.3', '0.4', '0.5']
strSigmaLst = ['sigmaX', 'sigmaMC']
strErrLst = ['RMSE', 'ubRMSE']
saveFolder = os.path.join(
rnnSMAP.kPath['dirResult'], 'Sigma', 'int_noise_red')
rootOut = rnnSMAP.kPath['OutSigma_L3_NA']
rootDB = rnnSMAP.kPath['DB_L3_NA']
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rcParams.update({'lines.linewidth': 2})
matplotlib.rcParams.update({'lines.markersize': 10})
#################################################
if 'train' in doOpt:
opt = rnnSMAP.classLSTM.optLSTM(
rootDB=rootDB, rootOut=rootOut,
syr=2015, eyr=2015,
var='varLst_Forcing', varC='varConstLst_Noah',
dr=0.5, modelOpt='relu', model='cudnn',
loss='sigma'
)
trainName = 'CONUSv4f1'
opt['train'] = trainName
cudaIdLst = np.tile([0, 1, 2], 10)
for k in range(5, len(noiseNameLst)):
opt['target'] = 'SMAP_AM_rn'+noiseNameLst[k]
opt['var'] = 'varLst_Forcing'
opt['out'] = opt['train']+'_y15_Forcing_rn'+noiseNameLst[k]
runTrainLSTM.runCmdLine(
opt=opt, cudaID=cudaIdLst[k], screenName=opt['out'])
#################################################
if 'test' in doOpt:
dsLst = list()
statErrLst = list()
statSigmaLst = list()
for k in range(0, len(noiseNameLst)):
testName = 'CONUSv4f1'
# targetName = 'SMAP_AM'
if noiseNameLst[k] is not None:
targetName = 'SMAP_AM_rn'+noiseNameLst[k]
out = 'CONUSv4f1_y15_Forcing_rn'+noiseNameLst[k]
else:
targetName = 'SMAP_AM'
out = 'CONUSv4f1_y15_Forcing'
ds = rnnSMAP.classDB.DatasetPost(
rootDB=rootDB, subsetName=testName, yrLst=[2016,2017])
ds.readData(var=targetName, field='SMAP')
ds.readPred(rootOut=rootOut, out=out, drMC=100, field='LSTM')
dsLst.append(ds)
statErr = ds.statCalError(predField='LSTM', targetField='SMAP')
statSigma = ds.statCalSigma(field='LSTM')
statErrLst.append(statErr)
statSigmaLst.append(statSigma)
#################################################
if 'plotBox' in doOpt:
dataTp = (statSigmaLst, statErrLst)
attrTp = (strSigmaLst, strErrLst)
titleTp = ('Sigma', 'Error')
saveFileTp = ('boxSigma', 'boxErr')
for iP in range(0, len(dataTp)):
dataLst = dataTp[iP]
attrLst = attrTp[iP]
for strS in attrLst:
plotLst = list()
statRef = getattr(dataLst[0], strS)
for data in dataLst:
stat = getattr(data, strS)
plotLst.append(stat/statRef)
fig = rnnSMAP.funPost.plotBox(
plotLst, labelC=noiseNameLstPlot, labelS=None,
title='Temporal Test ' + strS)
saveFile = os.path.join(saveFolder, 'box_'+strS)
fig.savefig(saveFile, dpi=300)
| [
"numpy.tile",
"matplotlib.rcParams.update",
"rnnSMAP.reload",
"imp.reload",
"os.path.join",
"rnnSMAP.classLSTM.optLSTM",
"rnnSMAP.funPost.plotBox",
"rnnSMAP.classDB.DatasetPost"
] | [((90, 109), 'imp.reload', 'imp.reload', (['rnnSMAP'], {}), '(rnnSMAP)\n', (100, 109), False, 'import imp\n'), ((110, 126), 'rnnSMAP.reload', 'rnnSMAP.reload', ([], {}), '()\n', (124, 126), False, 'import rnnSMAP\n'), ((546, 612), 'os.path.join', 'os.path.join', (["rnnSMAP.kPath['dirResult']", '"""Sigma"""', '"""int_noise_red"""'], {}), "(rnnSMAP.kPath['dirResult'], 'Sigma', 'int_noise_red')\n", (558, 612), False, 'import os\n'), ((695, 740), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (721, 740), False, 'import matplotlib\n'), ((741, 791), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'lines.linewidth': 2}"], {}), "({'lines.linewidth': 2})\n", (767, 791), False, 'import matplotlib\n'), ((792, 844), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'lines.markersize': 10}"], {}), "({'lines.markersize': 10})\n", (818, 844), False, 'import matplotlib\n'), ((927, 1115), 'rnnSMAP.classLSTM.optLSTM', 'rnnSMAP.classLSTM.optLSTM', ([], {'rootDB': 'rootDB', 'rootOut': 'rootOut', 'syr': '(2015)', 'eyr': '(2015)', 'var': '"""varLst_Forcing"""', 'varC': '"""varConstLst_Noah"""', 'dr': '(0.5)', 'modelOpt': '"""relu"""', 'model': '"""cudnn"""', 'loss': '"""sigma"""'}), "(rootDB=rootDB, rootOut=rootOut, syr=2015, eyr=\n 2015, var='varLst_Forcing', varC='varConstLst_Noah', dr=0.5, modelOpt=\n 'relu', model='cudnn', loss='sigma')\n", (952, 1115), False, 'import rnnSMAP\n'), ((1225, 1247), 'numpy.tile', 'np.tile', (['[0, 1, 2]', '(10)'], {}), '([0, 1, 2], 10)\n', (1232, 1247), True, 'import numpy as np\n'), ((2063, 2150), 'rnnSMAP.classDB.DatasetPost', 'rnnSMAP.classDB.DatasetPost', ([], {'rootDB': 'rootDB', 'subsetName': 'testName', 'yrLst': '[2016, 2017]'}), '(rootDB=rootDB, subsetName=testName, yrLst=[2016,\n 2017])\n', (2090, 2150), False, 'import rnnSMAP\n'), ((3068, 3173), 'rnnSMAP.funPost.plotBox', 'rnnSMAP.funPost.plotBox', (['plotLst'], {'labelC': 'noiseNameLstPlot', 'labelS': 'None', 'title': "('Temporal Test ' + strS)"}), "(plotLst, labelC=noiseNameLstPlot, labelS=None,\n title='Temporal Test ' + strS)\n", (3091, 3173), False, 'import rnnSMAP\n'), ((3226, 3265), 'os.path.join', 'os.path.join', (['saveFolder', "('box_' + strS)"], {}), "(saveFolder, 'box_' + strS)\n", (3238, 3265), False, 'import os\n')] |
import pytest
import numpy as np
import levitate
# Tests created with these air properties
from levitate.materials import air
air.c = 343
air.rho = 1.2
array = levitate.arrays.RectangularArray(shape=(4, 5))
pos_0 = np.array([0.1, 0.2, 0.3])
pos_1 = np.array([-0.15, 1.27, 0.001])
pos_both = np.stack((pos_0, pos_1), axis=1)
phases = array.focus_phases((pos_0 + pos_1) / 2) + array.signature(stype='twin')
amps = levitate.utils.complex(phases)
spat_ders = array.pressure_derivs(pos_both, orders=3)
ind_ders = np.einsum('i, ji...->ji...', amps, spat_ders)
sum_ders = np.sum(ind_ders, axis=1)
sph_harm = array.spherical_harmonics(pos_both, orders=6)
sph_harm_ind = np.einsum('i, ji...->ji...', amps, sph_harm)
sph_harm_sum = np.sum(sph_harm_ind, axis=1)
requirements = {
'pressure_derivs_summed': sum_ders, 'pressure_derivs_individual': ind_ders,
'spherical_harmonics_summed': sph_harm_sum, 'spherical_harmonics_individual': sph_harm_ind
}
# Defines the fields to use for testing.
# Note that the field implementations themselves are tested elsewhere.
has_jabobians_fields = [
levitate.fields.GorkovPotential,
levitate.fields.GorkovGradient,
levitate.fields.GorkovLaplacian,
]
no_jacobians_fields = [
levitate.fields.RadiationForceGradient,
lambda arr: levitate.fields.SphericalHarmonicsForce(arr, orders=5, radius=1e-3),
]
values_fields = has_jabobians_fields + no_jacobians_fields
@pytest.mark.parametrize("func", values_fields)
def test_Field(func):
field = func(array)
calc_values = field.values
val_0 = field(amps, pos_0)
val_1 = field(amps, pos_1)
val_both = field(amps, pos_both)
np.testing.assert_allclose(val_0, calc_values(**{key: requirements[key][..., 0] for key in field.values_require}))
np.testing.assert_allclose(val_1, calc_values(**{key: requirements[key][..., 1] for key in field.values_require}))
np.testing.assert_allclose(val_both, np.stack([val_0, val_1], axis=field.ndim))
@pytest.mark.parametrize("pos", [pos_0, pos_1, pos_both])
@pytest.mark.parametrize("func", values_fields)
def test_FieldPoint(func, pos):
field = func(array)
np.testing.assert_allclose((field@pos)(amps), field(amps, pos))
@pytest.mark.parametrize("weight", [1, 1e-3, 1e3])
@pytest.mark.parametrize("func", has_jabobians_fields)
def test_CostField(func, weight):
field = func(array)
weight = np.random.uniform(-weight, weight, (1,) * field.ndim)
field = field * weight
calc_values, calc_jacobians = field.values, field.jacobians
raw_0 = calc_values(**{key: requirements[key][..., 0] for key in field.values_require})
raw_1 = calc_values(**{key: requirements[key][..., 1] for key in field.values_require})
raw_both = calc_values(**{key: requirements[key] for key in field.values_require})
val_0 = np.einsum(field._sum_str, weight, raw_0)
val_1 = np.einsum(field._sum_str, weight, raw_1)
val_both = np.einsum(field._sum_str, weight, raw_both)
raw_0 = calc_jacobians(**{key: requirements[key][..., 0] for key in field.jacobians_require})
raw_1 = calc_jacobians(**{key: requirements[key][..., 1] for key in field.jacobians_require})
raw_both = calc_jacobians(**{key: requirements[key] for key in field.jacobians_require})
jac_0 = np.einsum(field._sum_str, weight, raw_0)
jac_1 = np.einsum(field._sum_str, weight, raw_1)
jac_both = np.einsum(field._sum_str, weight, raw_both)
field_val_0, field_jac_0 = field(amps, pos_0)
field_val_1, field_jac_1 = field(amps, pos_1)
field_val_both, field_jac_both = field(amps, pos_both)
np.testing.assert_allclose(val_0, field_val_0)
np.testing.assert_allclose(val_1, field_val_1)
np.testing.assert_allclose(val_both, field_val_both)
np.testing.assert_allclose(jac_0, field_jac_0)
np.testing.assert_allclose(jac_1, field_jac_1)
np.testing.assert_allclose(jac_both, field_jac_both)
@pytest.mark.parametrize("weight", [1, 10])
@pytest.mark.parametrize("pos", [pos_0, pos_1])
@pytest.mark.parametrize("func", has_jabobians_fields)
def test_CostFieldPoint(func, weight, pos):
field = func(array)
weight = np.random.uniform(-weight, weight, (1,) * field.ndim)
field = field * weight
val, jac = (field@pos)(amps)
val_ub, jac_ub = field(amps, pos)
np.testing.assert_allclose(val, val_ub)
np.testing.assert_allclose(jac, jac_ub)
@pytest.mark.parametrize("pos", [pos_0, pos_both])
@pytest.mark.parametrize("func", values_fields)
@pytest.mark.parametrize("target_scale", [1, 1e-3, 1e3])
def test_SquaredField(func, target_scale, pos):
field = func(array)
target = np.random.uniform(-target_scale, target_scale, (1,) * field.ndim)
value = np.abs(field(amps, pos) - np.asarray(target).reshape(target.shape + (pos.ndim - 1) * (1,)))**2
np.testing.assert_allclose((field - target)(amps, pos), value)
@pytest.mark.parametrize("pos", [pos_0, pos_both])
@pytest.mark.parametrize("func", values_fields)
@pytest.mark.parametrize("target_scale", [1, 10])
def test_SquaredFieldPoint(func, target_scale, pos):
field = func(array)
target = np.random.uniform(-target_scale, target_scale, (1,) * field.ndim)
field = field - target
np.testing.assert_allclose((field@pos)(amps), field(amps, pos))
@pytest.mark.parametrize("func", has_jabobians_fields)
@pytest.mark.parametrize("weight", [1, 6])
@pytest.mark.parametrize("target_scale", [1, 1e-4])
def test_SquaredCostField(func, target_scale, weight):
field = func(array)
target = np.random.uniform(-target_scale, target_scale, (1,) * field.ndim)
weight = np.random.uniform(-weight, weight, (1,) * field.ndim)
field = (field - target) * weight
calc_values, calc_jacobians = field.values, field.jacobians
raw_0 = calc_values(**{key: requirements[key][..., 0] for key in field.values_require})
raw_1 = calc_values(**{key: requirements[key][..., 1] for key in field.values_require})
raw_both = calc_values(**{key: requirements[key] for key in field.values_require})
val_0 = np.einsum(field._sum_str, weight, raw_0)
val_1 = np.einsum(field._sum_str, weight, raw_1)
val_both = np.einsum(field._sum_str, weight, raw_both)
raw_0 = calc_jacobians(**{key: requirements[key][..., 0] for key in field.jacobians_require})
raw_1 = calc_jacobians(**{key: requirements[key][..., 1] for key in field.jacobians_require})
raw_both = calc_jacobians(**{key: requirements[key] for key in field.jacobians_require})
jac_0 = np.einsum(field._sum_str, weight, raw_0)
jac_1 = np.einsum(field._sum_str, weight, raw_1)
jac_both = np.einsum(field._sum_str, weight, raw_both)
field_val_0, field_jac_0 = field(amps, pos_0)
field_val_1, field_jac_1 = field(amps, pos_1)
field_val_both, field_jac_both = field(amps, pos_both)
np.testing.assert_allclose(val_0, field_val_0)
np.testing.assert_allclose(val_1, field_val_1)
np.testing.assert_allclose(val_both, field_val_both)
np.testing.assert_allclose(jac_0, field_jac_0)
np.testing.assert_allclose(jac_1, field_jac_1)
np.testing.assert_allclose(jac_both, field_jac_both)
@pytest.mark.parametrize("func", has_jabobians_fields)
@pytest.mark.parametrize("weight", [1, 200])
@pytest.mark.parametrize("target_scale", [1, 30])
@pytest.mark.parametrize("pos", [pos_0, pos_both])
def test_SquaredCostFieldPoint(func, weight, target_scale, pos):
field = func(array)
target = np.random.uniform(-target_scale, target_scale, (1,) * field.ndim)
weight = np.random.uniform(-weight, weight, (1,) * field.ndim)
field = (field - target) * weight
# field = (func(array) - target) * weight
val, jac = (field@pos)(amps)
val_ub, jac_ub = field(amps, pos)
np.testing.assert_allclose(val, val_ub)
np.testing.assert_allclose(jac, jac_ub)
@pytest.mark.parametrize("func0", values_fields)
@pytest.mark.parametrize("func1", values_fields)
def test_MultiField(func0, func1):
field_0 = func0(array)
field_1 = func1(array)
val0 = field_0(amps, pos_both)
val1 = field_1(amps, pos_both)
val_both = (field_0 + field_1)(amps, pos_both)
np.testing.assert_allclose(val0, val_both[0])
np.testing.assert_allclose(val1, val_both[1])
@pytest.mark.parametrize("func0", values_fields)
@pytest.mark.parametrize("func1", values_fields)
def test_MultiFieldPoint(func0, func1):
field = func0(array) + func1(array)
val_field = field(amps, pos_both)
val_bound = (field@pos_both)(amps)
np.testing.assert_allclose(val_field[0], val_bound[0])
np.testing.assert_allclose(val_field[1], val_bound[1])
@pytest.mark.parametrize("func0", has_jabobians_fields)
@pytest.mark.parametrize("func1", has_jabobians_fields)
@pytest.mark.parametrize("pos", [pos_0])
@pytest.mark.parametrize("weight0", [1, 1e-3, 1e3])
@pytest.mark.parametrize("weight1", [1, 1e-3, 1e3])
def test_MultiCostField(func0, func1, pos, weight0, weight1):
field_0 = func0(array)
weight0 = np.random.uniform(-weight0, weight0, (1,) * field_0.ndim)
field_0 = field_0 * weight0
field_1 = func1(array)
weight1 = np.random.uniform(-weight1, weight1, (1,) * field_1.ndim)
field_1 = field_1 * weight1
val0, jac0 = field_0(amps, pos)
val1, jac1 = field_1(amps, pos)
val_both, jac_both = (field_0 + field_1)(amps, pos)
np.testing.assert_allclose(val0 + val1, val_both)
np.testing.assert_allclose(jac0 + jac1, jac_both)
@pytest.mark.parametrize("func0", has_jabobians_fields)
@pytest.mark.parametrize("func1", has_jabobians_fields)
@pytest.mark.parametrize("pos", [pos_0, pos_1])
@pytest.mark.parametrize("weight0", [1, 8])
@pytest.mark.parametrize("weight1", [1, 1e-5])
def test_MultiCostFieldPoint(func0, func1, pos, weight0, weight1):
field_0 = func0(array)
weight0 = np.random.uniform(-weight0, weight0, (1,) * field_0.ndim)
field_0 = field_0 * weight0 @ pos
field_1 = func1(array)
weight1 = np.random.uniform(-weight1, weight1, (1,) * field_1.ndim)
field_1 = field_1 * weight1 @ pos
val0, jac0 = field_0(amps)
val1, jac1 = field_1(amps)
val_both, jac_both = (field_0 + field_1)(amps)
np.testing.assert_allclose(val0 + val1, val_both)
np.testing.assert_allclose(jac0 + jac1, jac_both)
@pytest.mark.parametrize("func0", values_fields)
@pytest.mark.parametrize("func1", values_fields)
@pytest.mark.parametrize("pos0", [pos_0, pos_1])
@pytest.mark.parametrize("pos1", [pos_0, pos_1])
def test_MultiFieldMultiPoint(func0, func1, pos0, pos1):
field_0 = func0(array)@pos0
field_1 = func1(array)@pos1
field_both = field_0 + field_1
val0 = field_0(amps)
val1 = field_1(amps)
val_both = field_both(amps)
np.testing.assert_allclose(val0, val_both[0])
np.testing.assert_allclose(val1, val_both[1])
@pytest.mark.parametrize("func0", has_jabobians_fields)
@pytest.mark.parametrize("func1", has_jabobians_fields)
@pytest.mark.parametrize("pos0", [pos_0, pos_1])
@pytest.mark.parametrize("pos1", [pos_0, pos_1])
@pytest.mark.parametrize("weight0", [1, 1e-3])
@pytest.mark.parametrize("weight1", [1, 1e3])
def test_MultiCostFieldMultiPoint(func0, func1, pos0, pos1, weight0, weight1):
field_0 = func0(array)
weight0 = np.random.uniform(-weight0, weight0, (1,) * field_0.ndim)
field_0 = field_0 * weight0 @ pos0
field_1 = func1(array)
weight1 = np.random.uniform(-weight1, weight1, (1,) * field_1.ndim)
field_1 = field_1 * weight1 @ pos1
field_both = field_0 + field_1
val0, jac0 = field_0(amps)
val1, jac1 = field_1(amps)
val_both, jac_both = field_both(amps)
np.testing.assert_allclose(val0 + val1, val_both)
np.testing.assert_allclose(jac0 + jac1, jac_both)
| [
"numpy.testing.assert_allclose",
"numpy.asarray",
"numpy.array",
"levitate.utils.complex",
"numpy.stack",
"numpy.einsum",
"levitate.arrays.RectangularArray",
"numpy.sum",
"pytest.mark.parametrize",
"numpy.random.uniform",
"levitate.fields.SphericalHarmonicsForce"
] | [((162, 208), 'levitate.arrays.RectangularArray', 'levitate.arrays.RectangularArray', ([], {'shape': '(4, 5)'}), '(shape=(4, 5))\n', (194, 208), False, 'import levitate\n'), ((217, 242), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3]'], {}), '([0.1, 0.2, 0.3])\n', (225, 242), True, 'import numpy as np\n'), ((251, 281), 'numpy.array', 'np.array', (['[-0.15, 1.27, 0.001]'], {}), '([-0.15, 1.27, 0.001])\n', (259, 281), True, 'import numpy as np\n'), ((293, 325), 'numpy.stack', 'np.stack', (['(pos_0, pos_1)'], {'axis': '(1)'}), '((pos_0, pos_1), axis=1)\n', (301, 325), True, 'import numpy as np\n'), ((414, 444), 'levitate.utils.complex', 'levitate.utils.complex', (['phases'], {}), '(phases)\n', (436, 444), False, 'import levitate\n'), ((511, 556), 'numpy.einsum', 'np.einsum', (['"""i, ji...->ji..."""', 'amps', 'spat_ders'], {}), "('i, ji...->ji...', amps, spat_ders)\n", (520, 556), True, 'import numpy as np\n'), ((568, 592), 'numpy.sum', 'np.sum', (['ind_ders'], {'axis': '(1)'}), '(ind_ders, axis=1)\n', (574, 592), True, 'import numpy as np\n'), ((665, 709), 'numpy.einsum', 'np.einsum', (['"""i, ji...->ji..."""', 'amps', 'sph_harm'], {}), "('i, ji...->ji...', amps, sph_harm)\n", (674, 709), True, 'import numpy as np\n'), ((725, 753), 'numpy.sum', 'np.sum', (['sph_harm_ind'], {'axis': '(1)'}), '(sph_harm_ind, axis=1)\n', (731, 753), True, 'import numpy as np\n'), ((1418, 1464), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'values_fields'], {}), "('func', values_fields)\n", (1441, 1464), False, 'import pytest\n'), ((1968, 2024), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos"""', '[pos_0, pos_1, pos_both]'], {}), "('pos', [pos_0, pos_1, pos_both])\n", (1991, 2024), False, 'import pytest\n'), ((2026, 2072), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'values_fields'], {}), "('func', values_fields)\n", (2049, 2072), False, 'import pytest\n'), ((2200, 2253), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight"""', '[1, 0.001, 1000.0]'], {}), "('weight', [1, 0.001, 1000.0])\n", (2223, 2253), False, 'import pytest\n'), ((2251, 2304), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'has_jabobians_fields'], {}), "('func', has_jabobians_fields)\n", (2274, 2304), False, 'import pytest\n'), ((3896, 3938), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight"""', '[1, 10]'], {}), "('weight', [1, 10])\n", (3919, 3938), False, 'import pytest\n'), ((3940, 3986), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos"""', '[pos_0, pos_1]'], {}), "('pos', [pos_0, pos_1])\n", (3963, 3986), False, 'import pytest\n'), ((3988, 4041), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'has_jabobians_fields'], {}), "('func', has_jabobians_fields)\n", (4011, 4041), False, 'import pytest\n'), ((4367, 4416), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos"""', '[pos_0, pos_both]'], {}), "('pos', [pos_0, pos_both])\n", (4390, 4416), False, 'import pytest\n'), ((4418, 4464), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'values_fields'], {}), "('func', values_fields)\n", (4441, 4464), False, 'import pytest\n'), ((4466, 4525), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_scale"""', '[1, 0.001, 1000.0]'], {}), "('target_scale', [1, 0.001, 1000.0])\n", (4489, 4525), False, 'import pytest\n'), ((4851, 4900), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos"""', '[pos_0, pos_both]'], {}), "('pos', [pos_0, pos_both])\n", (4874, 4900), False, 'import pytest\n'), ((4902, 4948), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'values_fields'], {}), "('func', values_fields)\n", (4925, 4948), False, 'import pytest\n'), ((4950, 4998), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_scale"""', '[1, 10]'], {}), "('target_scale', [1, 10])\n", (4973, 4998), False, 'import pytest\n'), ((5253, 5306), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'has_jabobians_fields'], {}), "('func', has_jabobians_fields)\n", (5276, 5306), False, 'import pytest\n'), ((5308, 5349), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight"""', '[1, 6]'], {}), "('weight', [1, 6])\n", (5331, 5349), False, 'import pytest\n'), ((5351, 5403), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_scale"""', '[1, 0.0001]'], {}), "('target_scale', [1, 0.0001])\n", (5374, 5403), False, 'import pytest\n'), ((7104, 7157), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'has_jabobians_fields'], {}), "('func', has_jabobians_fields)\n", (7127, 7157), False, 'import pytest\n'), ((7159, 7202), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight"""', '[1, 200]'], {}), "('weight', [1, 200])\n", (7182, 7202), False, 'import pytest\n'), ((7204, 7252), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_scale"""', '[1, 30]'], {}), "('target_scale', [1, 30])\n", (7227, 7252), False, 'import pytest\n'), ((7254, 7303), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos"""', '[pos_0, pos_both]'], {}), "('pos', [pos_0, pos_both])\n", (7277, 7303), False, 'import pytest\n'), ((7785, 7832), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func0"""', 'values_fields'], {}), "('func0', values_fields)\n", (7808, 7832), False, 'import pytest\n'), ((7834, 7881), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func1"""', 'values_fields'], {}), "('func1', values_fields)\n", (7857, 7881), False, 'import pytest\n'), ((8197, 8244), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func0"""', 'values_fields'], {}), "('func0', values_fields)\n", (8220, 8244), False, 'import pytest\n'), ((8246, 8293), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func1"""', 'values_fields'], {}), "('func1', values_fields)\n", (8269, 8293), False, 'import pytest\n'), ((8573, 8627), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func0"""', 'has_jabobians_fields'], {}), "('func0', has_jabobians_fields)\n", (8596, 8627), False, 'import pytest\n'), ((8629, 8683), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func1"""', 'has_jabobians_fields'], {}), "('func1', has_jabobians_fields)\n", (8652, 8683), False, 'import pytest\n'), ((8685, 8724), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos"""', '[pos_0]'], {}), "('pos', [pos_0])\n", (8708, 8724), False, 'import pytest\n'), ((8726, 8780), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight0"""', '[1, 0.001, 1000.0]'], {}), "('weight0', [1, 0.001, 1000.0])\n", (8749, 8780), False, 'import pytest\n'), ((8778, 8832), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight1"""', '[1, 0.001, 1000.0]'], {}), "('weight1', [1, 0.001, 1000.0])\n", (8801, 8832), False, 'import pytest\n'), ((9394, 9448), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func0"""', 'has_jabobians_fields'], {}), "('func0', has_jabobians_fields)\n", (9417, 9448), False, 'import pytest\n'), ((9450, 9504), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func1"""', 'has_jabobians_fields'], {}), "('func1', has_jabobians_fields)\n", (9473, 9504), False, 'import pytest\n'), ((9506, 9552), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos"""', '[pos_0, pos_1]'], {}), "('pos', [pos_0, pos_1])\n", (9529, 9552), False, 'import pytest\n'), ((9554, 9596), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight0"""', '[1, 8]'], {}), "('weight0', [1, 8])\n", (9577, 9596), False, 'import pytest\n'), ((9598, 9644), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight1"""', '[1, 1e-05]'], {}), "('weight1', [1, 1e-05])\n", (9621, 9644), False, 'import pytest\n'), ((10211, 10258), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func0"""', 'values_fields'], {}), "('func0', values_fields)\n", (10234, 10258), False, 'import pytest\n'), ((10260, 10307), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func1"""', 'values_fields'], {}), "('func1', values_fields)\n", (10283, 10307), False, 'import pytest\n'), ((10309, 10356), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos0"""', '[pos_0, pos_1]'], {}), "('pos0', [pos_0, pos_1])\n", (10332, 10356), False, 'import pytest\n'), ((10358, 10405), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos1"""', '[pos_0, pos_1]'], {}), "('pos1', [pos_0, pos_1])\n", (10381, 10405), False, 'import pytest\n'), ((10749, 10803), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func0"""', 'has_jabobians_fields'], {}), "('func0', has_jabobians_fields)\n", (10772, 10803), False, 'import pytest\n'), ((10805, 10859), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func1"""', 'has_jabobians_fields'], {}), "('func1', has_jabobians_fields)\n", (10828, 10859), False, 'import pytest\n'), ((10861, 10908), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos0"""', '[pos_0, pos_1]'], {}), "('pos0', [pos_0, pos_1])\n", (10884, 10908), False, 'import pytest\n'), ((10910, 10957), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pos1"""', '[pos_0, pos_1]'], {}), "('pos1', [pos_0, pos_1])\n", (10933, 10957), False, 'import pytest\n'), ((10959, 11005), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight0"""', '[1, 0.001]'], {}), "('weight0', [1, 0.001])\n", (10982, 11005), False, 'import pytest\n'), ((11006, 11053), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weight1"""', '[1, 1000.0]'], {}), "('weight1', [1, 1000.0])\n", (11029, 11053), False, 'import pytest\n'), ((2376, 2429), 'numpy.random.uniform', 'np.random.uniform', (['(-weight)', 'weight', '((1,) * field.ndim)'], {}), '(-weight, weight, (1,) * field.ndim)\n', (2393, 2429), True, 'import numpy as np\n'), ((2805, 2845), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_0'], {}), '(field._sum_str, weight, raw_0)\n', (2814, 2845), True, 'import numpy as np\n'), ((2858, 2898), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_1'], {}), '(field._sum_str, weight, raw_1)\n', (2867, 2898), True, 'import numpy as np\n'), ((2914, 2957), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_both'], {}), '(field._sum_str, weight, raw_both)\n', (2923, 2957), True, 'import numpy as np\n'), ((3260, 3300), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_0'], {}), '(field._sum_str, weight, raw_0)\n', (3269, 3300), True, 'import numpy as np\n'), ((3313, 3353), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_1'], {}), '(field._sum_str, weight, raw_1)\n', (3322, 3353), True, 'import numpy as np\n'), ((3369, 3412), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_both'], {}), '(field._sum_str, weight, raw_both)\n', (3378, 3412), True, 'import numpy as np\n'), ((3578, 3624), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val_0', 'field_val_0'], {}), '(val_0, field_val_0)\n', (3604, 3624), True, 'import numpy as np\n'), ((3629, 3675), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val_1', 'field_val_1'], {}), '(val_1, field_val_1)\n', (3655, 3675), True, 'import numpy as np\n'), ((3680, 3732), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val_both', 'field_val_both'], {}), '(val_both, field_val_both)\n', (3706, 3732), True, 'import numpy as np\n'), ((3738, 3784), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac_0', 'field_jac_0'], {}), '(jac_0, field_jac_0)\n', (3764, 3784), True, 'import numpy as np\n'), ((3789, 3835), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac_1', 'field_jac_1'], {}), '(jac_1, field_jac_1)\n', (3815, 3835), True, 'import numpy as np\n'), ((3840, 3892), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac_both', 'field_jac_both'], {}), '(jac_both, field_jac_both)\n', (3866, 3892), True, 'import numpy as np\n'), ((4123, 4176), 'numpy.random.uniform', 'np.random.uniform', (['(-weight)', 'weight', '((1,) * field.ndim)'], {}), '(-weight, weight, (1,) * field.ndim)\n', (4140, 4176), True, 'import numpy as np\n'), ((4280, 4319), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val', 'val_ub'], {}), '(val, val_ub)\n', (4306, 4319), True, 'import numpy as np\n'), ((4324, 4363), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac', 'jac_ub'], {}), '(jac, jac_ub)\n', (4350, 4363), True, 'import numpy as np\n'), ((4607, 4672), 'numpy.random.uniform', 'np.random.uniform', (['(-target_scale)', 'target_scale', '((1,) * field.ndim)'], {}), '(-target_scale, target_scale, (1,) * field.ndim)\n', (4624, 4672), True, 'import numpy as np\n'), ((5089, 5154), 'numpy.random.uniform', 'np.random.uniform', (['(-target_scale)', 'target_scale', '((1,) * field.ndim)'], {}), '(-target_scale, target_scale, (1,) * field.ndim)\n', (5106, 5154), True, 'import numpy as np\n'), ((5494, 5559), 'numpy.random.uniform', 'np.random.uniform', (['(-target_scale)', 'target_scale', '((1,) * field.ndim)'], {}), '(-target_scale, target_scale, (1,) * field.ndim)\n', (5511, 5559), True, 'import numpy as np\n'), ((5573, 5626), 'numpy.random.uniform', 'np.random.uniform', (['(-weight)', 'weight', '((1,) * field.ndim)'], {}), '(-weight, weight, (1,) * field.ndim)\n', (5590, 5626), True, 'import numpy as np\n'), ((6013, 6053), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_0'], {}), '(field._sum_str, weight, raw_0)\n', (6022, 6053), True, 'import numpy as np\n'), ((6066, 6106), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_1'], {}), '(field._sum_str, weight, raw_1)\n', (6075, 6106), True, 'import numpy as np\n'), ((6122, 6165), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_both'], {}), '(field._sum_str, weight, raw_both)\n', (6131, 6165), True, 'import numpy as np\n'), ((6468, 6508), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_0'], {}), '(field._sum_str, weight, raw_0)\n', (6477, 6508), True, 'import numpy as np\n'), ((6521, 6561), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_1'], {}), '(field._sum_str, weight, raw_1)\n', (6530, 6561), True, 'import numpy as np\n'), ((6577, 6620), 'numpy.einsum', 'np.einsum', (['field._sum_str', 'weight', 'raw_both'], {}), '(field._sum_str, weight, raw_both)\n', (6586, 6620), True, 'import numpy as np\n'), ((6786, 6832), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val_0', 'field_val_0'], {}), '(val_0, field_val_0)\n', (6812, 6832), True, 'import numpy as np\n'), ((6837, 6883), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val_1', 'field_val_1'], {}), '(val_1, field_val_1)\n', (6863, 6883), True, 'import numpy as np\n'), ((6888, 6940), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val_both', 'field_val_both'], {}), '(val_both, field_val_both)\n', (6914, 6940), True, 'import numpy as np\n'), ((6946, 6992), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac_0', 'field_jac_0'], {}), '(jac_0, field_jac_0)\n', (6972, 6992), True, 'import numpy as np\n'), ((6997, 7043), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac_1', 'field_jac_1'], {}), '(jac_1, field_jac_1)\n', (7023, 7043), True, 'import numpy as np\n'), ((7048, 7100), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac_both', 'field_jac_both'], {}), '(jac_both, field_jac_both)\n', (7074, 7100), True, 'import numpy as np\n'), ((7406, 7471), 'numpy.random.uniform', 'np.random.uniform', (['(-target_scale)', 'target_scale', '((1,) * field.ndim)'], {}), '(-target_scale, target_scale, (1,) * field.ndim)\n', (7423, 7471), True, 'import numpy as np\n'), ((7485, 7538), 'numpy.random.uniform', 'np.random.uniform', (['(-weight)', 'weight', '((1,) * field.ndim)'], {}), '(-weight, weight, (1,) * field.ndim)\n', (7502, 7538), True, 'import numpy as np\n'), ((7698, 7737), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val', 'val_ub'], {}), '(val, val_ub)\n', (7724, 7737), True, 'import numpy as np\n'), ((7742, 7781), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac', 'jac_ub'], {}), '(jac, jac_ub)\n', (7768, 7781), True, 'import numpy as np\n'), ((8098, 8143), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val0', 'val_both[0]'], {}), '(val0, val_both[0])\n', (8124, 8143), True, 'import numpy as np\n'), ((8148, 8193), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val1', 'val_both[1]'], {}), '(val1, val_both[1])\n', (8174, 8193), True, 'import numpy as np\n'), ((8456, 8510), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val_field[0]', 'val_bound[0]'], {}), '(val_field[0], val_bound[0])\n', (8482, 8510), True, 'import numpy as np\n'), ((8515, 8569), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val_field[1]', 'val_bound[1]'], {}), '(val_field[1], val_bound[1])\n', (8541, 8569), True, 'import numpy as np\n'), ((8932, 8989), 'numpy.random.uniform', 'np.random.uniform', (['(-weight0)', 'weight0', '((1,) * field_0.ndim)'], {}), '(-weight0, weight0, (1,) * field_0.ndim)\n', (8949, 8989), True, 'import numpy as np\n'), ((9064, 9121), 'numpy.random.uniform', 'np.random.uniform', (['(-weight1)', 'weight1', '((1,) * field_1.ndim)'], {}), '(-weight1, weight1, (1,) * field_1.ndim)\n', (9081, 9121), True, 'import numpy as np\n'), ((9287, 9336), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(val0 + val1)', 'val_both'], {}), '(val0 + val1, val_both)\n', (9313, 9336), True, 'import numpy as np\n'), ((9341, 9390), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(jac0 + jac1)', 'jac_both'], {}), '(jac0 + jac1, jac_both)\n', (9367, 9390), True, 'import numpy as np\n'), ((9752, 9809), 'numpy.random.uniform', 'np.random.uniform', (['(-weight0)', 'weight0', '((1,) * field_0.ndim)'], {}), '(-weight0, weight0, (1,) * field_0.ndim)\n', (9769, 9809), True, 'import numpy as np\n'), ((9890, 9947), 'numpy.random.uniform', 'np.random.uniform', (['(-weight1)', 'weight1', '((1,) * field_1.ndim)'], {}), '(-weight1, weight1, (1,) * field_1.ndim)\n', (9907, 9947), True, 'import numpy as np\n'), ((10104, 10153), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(val0 + val1)', 'val_both'], {}), '(val0 + val1, val_both)\n', (10130, 10153), True, 'import numpy as np\n'), ((10158, 10207), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(jac0 + jac1)', 'jac_both'], {}), '(jac0 + jac1, jac_both)\n', (10184, 10207), True, 'import numpy as np\n'), ((10650, 10695), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val0', 'val_both[0]'], {}), '(val0, val_both[0])\n', (10676, 10695), True, 'import numpy as np\n'), ((10700, 10745), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val1', 'val_both[1]'], {}), '(val1, val_both[1])\n', (10726, 10745), True, 'import numpy as np\n'), ((11171, 11228), 'numpy.random.uniform', 'np.random.uniform', (['(-weight0)', 'weight0', '((1,) * field_0.ndim)'], {}), '(-weight0, weight0, (1,) * field_0.ndim)\n', (11188, 11228), True, 'import numpy as np\n'), ((11310, 11367), 'numpy.random.uniform', 'np.random.uniform', (['(-weight1)', 'weight1', '((1,) * field_1.ndim)'], {}), '(-weight1, weight1, (1,) * field_1.ndim)\n', (11327, 11367), True, 'import numpy as np\n'), ((11553, 11602), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(val0 + val1)', 'val_both'], {}), '(val0 + val1, val_both)\n', (11579, 11602), True, 'import numpy as np\n'), ((11607, 11656), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(jac0 + jac1)', 'jac_both'], {}), '(jac0 + jac1, jac_both)\n', (11633, 11656), True, 'import numpy as np\n'), ((1284, 1352), 'levitate.fields.SphericalHarmonicsForce', 'levitate.fields.SphericalHarmonicsForce', (['arr'], {'orders': '(5)', 'radius': '(0.001)'}), '(arr, orders=5, radius=0.001)\n', (1323, 1352), False, 'import levitate\n'), ((1922, 1963), 'numpy.stack', 'np.stack', (['[val_0, val_1]'], {'axis': 'field.ndim'}), '([val_0, val_1], axis=field.ndim)\n', (1930, 1963), True, 'import numpy as np\n'), ((4711, 4729), 'numpy.asarray', 'np.asarray', (['target'], {}), '(target)\n', (4721, 4729), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
"""
from __future__ import division, absolute_import
import logging
import numpy as np
import scipy as sp
import scipy.interpolate as spinterp
import time
import datetime as dt
import matplotlib.pyplot as plt
try:
from mayavi import mlab
except ImportError:
mlab = None
except (ValueError,RuntimeError) as e:
mlab = None
print('Mayavi not imported due to {}'.format(e))
try:
plt.get_cmap('viridis')
defmap3d = 'viridis'
except ValueError:
defmap3d = 'jet'
#%%
def plot3Dslice(geodata, surfs, vbounds, titlestr='', time=0, gkey=None, cmap=defmap3d,
ax=None, fig=None, method='linear', fill_value=np.nan, view=None, units='',
colorbar=False, outimage=False):
"""
This function create 3-D slice image given either a surface or list of
coordinates to slice through.
Inputs:
geodata - A geodata object that will be plotted in 3D
surfs - This is a three element list. Each element can either be
altlist - A list of the altitudes that RISR parameter slices will be taken at
xyvecs- A list of x and y numpy arrays that have the x and y coordinates that the
data will be interpolated over.
ie, xyvecs=[np.linspace(-100.0,500.0), np.linspace(0.0,600.0)]
vbounds = a list of bounds for the geodata objec's parameters. ie, vbounds=[500,2000]
title - A string that holds for the overall image
ax - A handle for an axis that this will be plotted on.
Returns a mayavi image with a surface
"""
if mlab is None:
print('mayavi was not successfully imported')
return
assert geodata.coordnames.lower() == 'cartesian'
datalocs = geodata.dataloc
xvec = sp.unique(datalocs[:, 0])
yvec = sp.unique(datalocs[:, 1])
zvec = sp.unique(datalocs[:, 2])
assert len(xvec)*len(yvec)*len(zvec) == datalocs.shape[0]
#determine if the ordering is fortran or c style ordering
diffcoord = sp.diff(datalocs, axis=0)
if diffcoord[0, 1] != 0.0:
ar_ord = 'f'
elif diffcoord[0, 2] != 0.0:
ar_ord = 'c'
elif diffcoord[0, 0] != 0.0:
if len(np.where(diffcoord[:, 1])[0]) == 0:
ar_ord = 'f'
elif len(np.where(diffcoord[:, 2])[0]) == 0:
ar_ord = 'c'
matshape = (len(yvec), len(xvec), len(zvec))
# reshape the arrays into a matricies for plotting
x, y, z = [sp.reshape(datalocs[:, idim], matshape, order=ar_ord) for idim in range(3)]
if gkey is None:
gkey = geodata.datanames()[0]
porig = geodata.data[gkey][:, time]
mlab.figure(fig)
#determine if list of slices or surfaces are given
islists = isinstance(surfs[0], list)
if isinstance(surfs[0], np.ndarray):
onedim = surfs[0].ndim == 1
#get slices for each dimension out
surflist = []
if islists or onedim:
p = np.reshape(porig, matshape, order=ar_ord)
xslices = surfs[0]
for isur in xslices:
indx = sp.argmin(sp.absolute(isur-xvec))
xtmp = x[:, indx]
ytmp = y[:, indx]
ztmp = z[:, indx]
ptmp = p[:, indx]
pmask = sp.zeros_like(ptmp).astype(bool)
pmask[sp.isnan(ptmp)] = True
surflist.append(mlab.mesh(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0],
vmax=vbounds[1], colormap=cmap, mask=pmask))
surflist[-1].module_manager.scalar_lut_manager.lut.nan_color = 0, 0, 0, 0
yslices = surfs[1]
for isur in yslices:
indx = sp.argmin(sp.absolute(isur-yvec))
xtmp = x[indx]
ytmp = y[indx]
ztmp = z[indx]
ptmp = p[indx]
pmask = sp.zeros_like(ptmp).astype(bool)
pmask[sp.isnan(ptmp)] = True
surflist.append(mlab.mesh(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0],
vmax=vbounds[1], colormap=cmap, mask=pmask))
surflist[-1].module_manager.scalar_lut_manager.lut.nan_color = 0, 0, 0, 0
zslices = surfs[2]
for isur in zslices:
indx = sp.argmin(sp.absolute(isur-zvec))
xtmp = x[:, :, indx]
ytmp = y[:, :, indx]
ztmp = z[:, :, indx]
ptmp = p[:, :, indx]
pmask = sp.zeros_like(ptmp).astype(bool)
pmask[sp.isnan(ptmp)] = True
surflist.append(mlab.mesh(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0],
vmax=vbounds[1], colormap=cmap, mask=pmask))
surflist[-1].module_manager.scalar_lut_manager.lut.nan_color = 0, 0, 0, 0
else:
# For a general surface.
xtmp, ytmp, ztmp = surfs[:]
gooddata = ~np.isnan(porig)
curparam = porig[gooddata]
curlocs = datalocs[gooddata]
new_coords = np.column_stack((xtmp.flatten(), ytmp.flatten(), ztmp.flatten()))
ptmp = spinterp.griddata(curlocs, curparam, new_coords, method, fill_value)
pmask = sp.zeros_like(ptmp).astype(bool)
pmask[sp.isnan(ptmp)] = True
surflist.append(mlab.mesh(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0],
vmax=vbounds[1], colormap=cmap, mask=pmask))
surflist[-1].module_manager.scalar_lut_manager.lut.nan_color = 0, 0, 0, 0
mlab.title(titlestr, color=(0, 0, 0))
#mlab.outline(color=(0,0,0))
mlab.axes(color=(0, 0, 0), x_axis_visibility=True, xlabel='x in km', y_axis_visibility=True,
ylabel='y in km', z_axis_visibility=True, zlabel='z in km')
mlab.orientation_axes(xlabel='x in km', ylabel='y in km', zlabel='z in km')
if view is not None:
# order of elevation is changed between matplotlib and mayavi
mlab.view(view[0], view[1])
if colorbar:
if units == '':
titlestr = gkey
else:
titlstr = gkey +' in ' +units
mlab.colorbar(surflist[-1], title=titlstr, orientation='vertical')
if outimage:
arr = mlab.screenshot(fig, antialiased=True)
mlab.close(fig)
return arr
else:
return surflist
| [
"scipy.unique",
"mayavi.mlab.axes",
"numpy.reshape",
"mayavi.mlab.view",
"scipy.isnan",
"numpy.where",
"mayavi.mlab.screenshot",
"scipy.reshape",
"mayavi.mlab.orientation_axes",
"mayavi.mlab.colorbar",
"mayavi.mlab.close",
"numpy.isnan",
"scipy.diff",
"matplotlib.pyplot.get_cmap",
"scipy... | [((427, 450), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (439, 450), True, 'import matplotlib.pyplot as plt\n'), ((1756, 1781), 'scipy.unique', 'sp.unique', (['datalocs[:, 0]'], {}), '(datalocs[:, 0])\n', (1765, 1781), True, 'import scipy as sp\n'), ((1793, 1818), 'scipy.unique', 'sp.unique', (['datalocs[:, 1]'], {}), '(datalocs[:, 1])\n', (1802, 1818), True, 'import scipy as sp\n'), ((1830, 1855), 'scipy.unique', 'sp.unique', (['datalocs[:, 2]'], {}), '(datalocs[:, 2])\n', (1839, 1855), True, 'import scipy as sp\n'), ((1998, 2023), 'scipy.diff', 'sp.diff', (['datalocs'], {'axis': '(0)'}), '(datalocs, axis=0)\n', (2005, 2023), True, 'import scipy as sp\n'), ((2619, 2635), 'mayavi.mlab.figure', 'mlab.figure', (['fig'], {}), '(fig)\n', (2630, 2635), False, 'from mayavi import mlab\n'), ((5377, 5414), 'mayavi.mlab.title', 'mlab.title', (['titlestr'], {'color': '(0, 0, 0)'}), '(titlestr, color=(0, 0, 0))\n', (5387, 5414), False, 'from mayavi import mlab\n'), ((5452, 5612), 'mayavi.mlab.axes', 'mlab.axes', ([], {'color': '(0, 0, 0)', 'x_axis_visibility': '(True)', 'xlabel': '"""x in km"""', 'y_axis_visibility': '(True)', 'ylabel': '"""y in km"""', 'z_axis_visibility': '(True)', 'zlabel': '"""z in km"""'}), "(color=(0, 0, 0), x_axis_visibility=True, xlabel='x in km',\n y_axis_visibility=True, ylabel='y in km', z_axis_visibility=True,\n zlabel='z in km')\n", (5461, 5612), False, 'from mayavi import mlab\n'), ((5624, 5699), 'mayavi.mlab.orientation_axes', 'mlab.orientation_axes', ([], {'xlabel': '"""x in km"""', 'ylabel': '"""y in km"""', 'zlabel': '"""z in km"""'}), "(xlabel='x in km', ylabel='y in km', zlabel='z in km')\n", (5645, 5699), False, 'from mayavi import mlab\n'), ((2438, 2491), 'scipy.reshape', 'sp.reshape', (['datalocs[:, idim]', 'matshape'], {'order': 'ar_ord'}), '(datalocs[:, idim], matshape, order=ar_ord)\n', (2448, 2491), True, 'import scipy as sp\n'), ((2905, 2946), 'numpy.reshape', 'np.reshape', (['porig', 'matshape'], {'order': 'ar_ord'}), '(porig, matshape, order=ar_ord)\n', (2915, 2946), True, 'import numpy as np\n'), ((4974, 5042), 'scipy.interpolate.griddata', 'spinterp.griddata', (['curlocs', 'curparam', 'new_coords', 'method', 'fill_value'], {}), '(curlocs, curparam, new_coords, method, fill_value)\n', (4991, 5042), True, 'import scipy.interpolate as spinterp\n'), ((5804, 5831), 'mayavi.mlab.view', 'mlab.view', (['view[0]', 'view[1]'], {}), '(view[0], view[1])\n', (5813, 5831), False, 'from mayavi import mlab\n'), ((5965, 6031), 'mayavi.mlab.colorbar', 'mlab.colorbar', (['surflist[-1]'], {'title': 'titlstr', 'orientation': '"""vertical"""'}), "(surflist[-1], title=titlstr, orientation='vertical')\n", (5978, 6031), False, 'from mayavi import mlab\n'), ((6064, 6102), 'mayavi.mlab.screenshot', 'mlab.screenshot', (['fig'], {'antialiased': '(True)'}), '(fig, antialiased=True)\n', (6079, 6102), False, 'from mayavi import mlab\n'), ((6111, 6126), 'mayavi.mlab.close', 'mlab.close', (['fig'], {}), '(fig)\n', (6121, 6126), False, 'from mayavi import mlab\n'), ((4784, 4799), 'numpy.isnan', 'np.isnan', (['porig'], {}), '(porig)\n', (4792, 4799), True, 'import numpy as np\n'), ((5106, 5120), 'scipy.isnan', 'sp.isnan', (['ptmp'], {}), '(ptmp)\n', (5114, 5120), True, 'import scipy as sp\n'), ((5153, 5259), 'mayavi.mlab.mesh', 'mlab.mesh', (['xtmp', 'ytmp', 'ztmp'], {'scalars': 'ptmp', 'vmin': 'vbounds[0]', 'vmax': 'vbounds[1]', 'colormap': 'cmap', 'mask': 'pmask'}), '(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0], vmax=vbounds[1],\n colormap=cmap, mask=pmask)\n', (5162, 5259), False, 'from mayavi import mlab\n'), ((3032, 3056), 'scipy.absolute', 'sp.absolute', (['(isur - xvec)'], {}), '(isur - xvec)\n', (3043, 3056), True, 'import scipy as sp\n'), ((3247, 3261), 'scipy.isnan', 'sp.isnan', (['ptmp'], {}), '(ptmp)\n', (3255, 3261), True, 'import scipy as sp\n'), ((3298, 3404), 'mayavi.mlab.mesh', 'mlab.mesh', (['xtmp', 'ytmp', 'ztmp'], {'scalars': 'ptmp', 'vmin': 'vbounds[0]', 'vmax': 'vbounds[1]', 'colormap': 'cmap', 'mask': 'pmask'}), '(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0], vmax=vbounds[1],\n colormap=cmap, mask=pmask)\n', (3307, 3404), False, 'from mayavi import mlab\n'), ((3612, 3636), 'scipy.absolute', 'sp.absolute', (['(isur - yvec)'], {}), '(isur - yvec)\n', (3623, 3636), True, 'import scipy as sp\n'), ((3815, 3829), 'scipy.isnan', 'sp.isnan', (['ptmp'], {}), '(ptmp)\n', (3823, 3829), True, 'import scipy as sp\n'), ((3866, 3972), 'mayavi.mlab.mesh', 'mlab.mesh', (['xtmp', 'ytmp', 'ztmp'], {'scalars': 'ptmp', 'vmin': 'vbounds[0]', 'vmax': 'vbounds[1]', 'colormap': 'cmap', 'mask': 'pmask'}), '(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0], vmax=vbounds[1],\n colormap=cmap, mask=pmask)\n', (3875, 3972), False, 'from mayavi import mlab\n'), ((4179, 4203), 'scipy.absolute', 'sp.absolute', (['(isur - zvec)'], {}), '(isur - zvec)\n', (4190, 4203), True, 'import scipy as sp\n'), ((4406, 4420), 'scipy.isnan', 'sp.isnan', (['ptmp'], {}), '(ptmp)\n', (4414, 4420), True, 'import scipy as sp\n'), ((4457, 4563), 'mayavi.mlab.mesh', 'mlab.mesh', (['xtmp', 'ytmp', 'ztmp'], {'scalars': 'ptmp', 'vmin': 'vbounds[0]', 'vmax': 'vbounds[1]', 'colormap': 'cmap', 'mask': 'pmask'}), '(xtmp, ytmp, ztmp, scalars=ptmp, vmin=vbounds[0], vmax=vbounds[1],\n colormap=cmap, mask=pmask)\n', (4466, 4563), False, 'from mayavi import mlab\n'), ((5059, 5078), 'scipy.zeros_like', 'sp.zeros_like', (['ptmp'], {}), '(ptmp)\n', (5072, 5078), True, 'import scipy as sp\n'), ((3196, 3215), 'scipy.zeros_like', 'sp.zeros_like', (['ptmp'], {}), '(ptmp)\n', (3209, 3215), True, 'import scipy as sp\n'), ((3764, 3783), 'scipy.zeros_like', 'sp.zeros_like', (['ptmp'], {}), '(ptmp)\n', (3777, 3783), True, 'import scipy as sp\n'), ((4355, 4374), 'scipy.zeros_like', 'sp.zeros_like', (['ptmp'], {}), '(ptmp)\n', (4368, 4374), True, 'import scipy as sp\n'), ((2179, 2204), 'numpy.where', 'np.where', (['diffcoord[:, 1]'], {}), '(diffcoord[:, 1])\n', (2187, 2204), True, 'import numpy as np\n'), ((2257, 2282), 'numpy.where', 'np.where', (['diffcoord[:, 2]'], {}), '(diffcoord[:, 2])\n', (2265, 2282), True, 'import numpy as np\n')] |
import random
import torch
from torch.utils.data.sampler import Sampler
# Adapted from
# https://github.com/pytorch/pytorch/pull/3062/files
class RandomCycleIter(object):
def __init__(self, data):
self.data_list = list(data)
self.length = len(self.data_list)
self.i = self.length - 1
def __iter__(self):
return self
def __next__(self):
self.i += 1
if self.i == self.length:
self.i = 0
random.shuffle(self.data_list)
return self.data_list[self.i]
next = __next__ # Py2
def multi_data_generator(data_iters, index_data, n, size):
i = 0
while i < n:
index = i % size
d = index_data[index]
yield d, next(data_iters[d])
i += 1
class MSampler(object):
def __init__(self, batch_sizes, sizes, num_samples=None, num_iters=None):
self.batch_size = sum(batch_sizes)
self.index_data = {}
size, c = 0, -1
for i in range(self.batch_size):
if i == size:
c += 1
size += batch_sizes[c]
self.index_data[i] = c
self.num_samples = num_samples or num_iters*self.batch_size or sum(sizes)
self.data_iters = [RandomCycleIter(range(n)) for n in sizes]
def __iter__(self):
return multi_data_generator(
self.data_iters, self.index_data,
self.num_samples, self.batch_size)
def __len__(self):
return self.num_samples
def single_data_generator(data_iter, n):
i = 0
while i < n:
yield next(data_iter)
i += 1
class CycleSampler(Sampler):
def __init__(self, size, num_samples=None, num_epochs=0):
self.num_samples = num_samples or size*num_epochs
self.data_iter = RandomCycleIter(range(size))
def __iter__(self):
return single_data_generator(self.data_iter, self.num_samples)
def __len__(self):
return self.num_samples
import numpy as np
class RandomSampler(object):
def __init__(self, data_source, state=None, seed=None):
self.data_source = data_source
self.rng = np.random.RandomSatate(seed)
def __iter__(self):
return iter(torch.randperm(len(self.data_source)).long())
def __len__(self):
return len(self.data_source)
def get_state(self):
return self.rng.get_state()
def set_state(self, state):
self.rng.set_state(state)
| [
"random.shuffle",
"numpy.random.RandomSatate"
] | [((2141, 2169), 'numpy.random.RandomSatate', 'np.random.RandomSatate', (['seed'], {}), '(seed)\n', (2163, 2169), True, 'import numpy as np\n'), ((473, 503), 'random.shuffle', 'random.shuffle', (['self.data_list'], {}), '(self.data_list)\n', (487, 503), False, 'import random\n')] |
import numpy as np
from q1 import findConvexHull
from q2 import VisibiltyGraph
import matplotlib.pyplot as plt
if __name__ == "__main__":
# overlapping test case
# points = [[[1.0, 2.0], [4.0, 3.0], [4.0, 2.0]], [[4.0, 8.0], [6.0, 7.0], [4.0, 4.0], [7.0, 6.0]], [[6.0, 8.0], [9.84, 8.87], [7.16, 11.76]], [[6.0, 10.0], [8.0, 10.0], [5.0, 11.74], [6.0, 13.46], [8.0, 13.46], [9.0, 11.73]]]
# isolated test case
points = [[[2.0, 2.0], [0.0, 6.0], [4.0, 10.0], [10.0, 7.0], [7.0, 3.0]], [[7.0, -2.0], [7.0, 0.0], [9.0, 0.0], [9.0, -2.0]], [[22.0, 0.0], [20.0, 4.0], [24.0, 8.0], [30.0, 5.0], [27.0, 1.0]], [[12.0, 18.0], [10.0, 24.0], [14.0, 30.0], [20.0, 23.0], [17.0, 18.0]], [[12.0, 12.0], [17.0, 15.0], [26.0, 15.0], [23.0, 12.0]], [[25.0, 20.0], [23.0, 28.0], [29.0, 29.0], [34.0, 21.0]]]
# points = [[[2.0, 2.0], [0.0, 6.0], [4.0, 10.0], [10.0, 7.0], [7.0, 3.0]], [[7.0, -2.0], [7.0, 0.0], [9.0, 0.0], [9.0, -2.0]], [[22.0, 0.0], [20.0, 4.0], [24.0, 8.0], [30.0, 5.0], [27.0, 1.0]], [[12.0, 14.0], [10.0, 21.0], [14.0, 30.0], [20.0, 23.0], [17.0, 14.0]], [[12.0, 12.0], [17.0, 15.0], [26.0, 15.0], [23.0, 12.0]], [[25.0, 20.0], [23.0, 28.0], [29.0, 29.0], [34.0, 21.0]]]
# another isolated test case
points = [[[0.7, 4.06], [0.6, 2.01], [2.42, 2.95]], [[4.5, 2.59], [5.76, 1.95], [5.14, 3.81], [6.4, 3.17]], [[4.98, 4.87], [6.0, 5.0], [6.4, 5.95], [5.77, 6.77], [4.75, 6.64], [4.36, 5.69]]]
# yet another isolated case
# points = [[[0.0, 1.0], [1.5, 4.0], [1.0, 6.0]], [[4.0, 4.0], [7.0, 4.0], [5.5, 8.0]]]
# overlapping test case
# points = [[[0.0, 1.0], [4.0, 2.0], [4.0, 0.0]], [[4.0, 8.0], [6.0, 7.0], [4.0, 5.0], [7.0, 6.0]], [[6.0, 8.0], [9.84, 8.87], [7.16, 11.76]], [[6.0, 10.0], [8.0, 10.0], [5.0, 11.74], [6.0, 7.46], [8.0, 13.46], [9.0, 11.73]]]
start = np.array([0, 0])
end = np.array([8, 8])
# robot = np.array([[0,0],[0,0.5],[0.5,0]], np.float32)
robot = np.array([[0,1],[-1,-1],[1,-1]], np.float32)
# works with isolated case 1 and end = 20,20
# robot = np.array([[-1,-1],[-1,1],[0,1],[1,1],[1,-1]], np.float32)
vg = VisibiltyGraph(points, start, end)
vg.getMinkowskiSum(robot)
vg.findShortestPath()
print (vg.shortestPath)
vg.plotPolygonsAndPaths(robot, isRobot=True) | [
"numpy.array",
"q2.VisibiltyGraph"
] | [((1829, 1845), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1837, 1845), True, 'import numpy as np\n'), ((1856, 1872), 'numpy.array', 'np.array', (['[8, 8]'], {}), '([8, 8])\n', (1864, 1872), True, 'import numpy as np\n'), ((1950, 1999), 'numpy.array', 'np.array', (['[[0, 1], [-1, -1], [1, -1]]', 'np.float32'], {}), '([[0, 1], [-1, -1], [1, -1]], np.float32)\n', (1958, 1999), True, 'import numpy as np\n'), ((2131, 2165), 'q2.VisibiltyGraph', 'VisibiltyGraph', (['points', 'start', 'end'], {}), '(points, start, end)\n', (2145, 2165), False, 'from q2 import VisibiltyGraph\n')] |
"""
Reference: <NAME> et al. "Deep Neural Networks for YouTube Recommendations"
(https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/45530.pdf)
author: massquantity
"""
import os
from itertools import islice
import numpy as np
import tensorflow as tf2
from tensorflow.keras.initializers import (
zeros as tf_zeros,
truncated_normal as tf_truncated_normal
)
from .base import Base, TfMixin
from ..data.data_generator import DataGenSequence
from ..data.sequence import sparse_user_last_interacted
from ..evaluation.evaluate import EvalMixin
from ..utils.tf_ops import (
reg_config,
dropout_config,
dense_nn,
lr_decay_config,
multi_sparse_combine_embedding
)
from ..utils.misc import time_block, colorize, assign_oov_vector, count_params
tf = tf2.compat.v1
tf.disable_v2_behavior()
class YouTuBeRetrieval(Base, TfMixin, EvalMixin):
"""
The model implemented mainly corresponds to the candidate generation
phase based on the original paper.
"""
# user_variables = []
item_variables = ["item_interaction_features", "nce_weights", "nce_biases"]
sparse_variables = ["sparse_features"]
dense_variables = ["dense_features"]
user_variables_np = ["user_vector"]
item_variables_np = ["item_weights"]
def __init__(
self,
task="ranking",
data_info=None,
embed_size=16,
n_epochs=20,
lr=0.01,
lr_decay=False,
reg=None,
batch_size=256,
num_sampled_per_batch=None,
use_bn=True,
dropout_rate=None,
hidden_units="128,64,32",
loss_type="nce",
recent_num=10,
random_num=None,
multi_sparse_combiner="sqrtn",
sampler="uniform",
seed=42,
lower_upper_bound=None,
tf_sess_config=None
):
Base.__init__(self, task, data_info, lower_upper_bound)
TfMixin.__init__(self, tf_sess_config)
EvalMixin.__init__(self, task, data_info)
self.task = task
self.data_info = data_info
self.embed_size = embed_size
self.n_epochs = n_epochs
self.lr = lr
self.lr_decay = lr_decay
self.reg = reg_config(reg)
self.batch_size = batch_size
self.num_sampled_per_batch = (
num_sampled_per_batch
if num_sampled_per_batch and num_sampled_per_batch > 0
else batch_size
)
self.use_bn = use_bn
self.dropout_rate = dropout_config(dropout_rate)
self.hidden_units = list(map(int, hidden_units.split(",")))
# the output of last DNN layer is user vector
self.user_vector_size = self.hidden_units[-1]
self.loss_type = loss_type
self.n_users = data_info.n_users
self.n_items = data_info.n_items
(
self.interaction_mode,
self.interaction_num
) = self._check_interaction_mode(recent_num, random_num)
self.seed = seed
self.user_vector = None
self.item_weights = None
self.sampler = sampler
# self.item_biases = None
self.user_consumed = data_info.user_consumed
self.sparse = self._decide_sparse_indices(data_info)
self.dense = self._decide_dense_values(data_info)
if self.sparse:
self.sparse_feature_size = self._sparse_feat_size(data_info)
self.sparse_field_size = self._sparse_field_size(data_info)
self.multi_sparse_combiner = self._check_multi_sparse(
data_info, multi_sparse_combiner)
self.true_sparse_field_size = self._true_sparse_field_size(
data_info, self.sparse_field_size, self.multi_sparse_combiner)
if self.dense:
self.dense_field_size = self._dense_field_size(data_info)
self.vector_infer = True
self.all_args = locals()
def _build_model(self):
self.graph_built = True
tf.set_random_seed(self.seed)
# item_indices actually serve as labels in YouTuBeRetrieval model
self.item_indices = tf.placeholder(tf.int64, shape=[None])
self.is_training = tf.placeholder_with_default(False, shape=[])
self.concat_embed = []
self._build_item_interaction()
if self.sparse:
self._build_sparse()
if self.dense:
self._build_dense()
concat_features = tf.concat(self.concat_embed, axis=1)
self.user_vector_repr = dense_nn(concat_features,
self.hidden_units,
use_bn=self.use_bn,
dropout_rate=self.dropout_rate,
is_training=self.is_training)
count_params()
def _build_item_interaction(self):
self.item_interaction_indices = tf.placeholder(
tf.int64, shape=[None, 2])
self.item_interaction_values = tf.placeholder(tf.int32, shape=[None])
self.modified_batch_size = tf.placeholder(tf.int32, shape=[])
item_interaction_features = tf.get_variable(
name="item_interaction_features",
shape=[self.n_items, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.01),
regularizer=self.reg)
sparse_item_interaction = tf.SparseTensor(
self.item_interaction_indices,
self.item_interaction_values,
[self.modified_batch_size, self.n_items]
)
pooled_embed = tf.nn.safe_embedding_lookup_sparse(
item_interaction_features, sparse_item_interaction,
sparse_weights=None, combiner="sqrtn", default_id=None
) # unknown user will return 0-vector
self.concat_embed.append(pooled_embed)
def _build_sparse(self):
self.sparse_indices = tf.placeholder(
tf.int32, shape=[None, self.sparse_field_size])
sparse_features = tf.get_variable(
name="sparse_features",
shape=[self.sparse_feature_size, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.01),
regularizer=self.reg)
if (self.data_info.multi_sparse_combine_info
and self.multi_sparse_combiner in ("sum", "mean", "sqrtn")):
sparse_embed = multi_sparse_combine_embedding(
self.data_info, sparse_features, self.sparse_indices,
self.multi_sparse_combiner, self.embed_size)
else:
sparse_embed = tf.nn.embedding_lookup(
sparse_features, self.sparse_indices)
sparse_embed = tf.reshape(
sparse_embed, [-1, self.true_sparse_field_size * self.embed_size])
self.concat_embed.append(sparse_embed)
def _build_dense(self):
self.dense_values = tf.placeholder(
tf.float32, shape=[None, self.dense_field_size])
dense_values_reshape = tf.reshape(
self.dense_values, [-1, self.dense_field_size, 1])
batch_size = tf.shape(self.dense_values)[0]
dense_features = tf.get_variable(
name="dense_features",
shape=[self.dense_field_size, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.01),
regularizer=self.reg)
dense_embed = tf.expand_dims(dense_features, axis=0)
# B * F2 * K
dense_embed = tf.tile(dense_embed, [batch_size, 1, 1])
dense_embed = tf.multiply(dense_embed, dense_values_reshape)
dense_embed = tf.reshape(
dense_embed, [-1, self.dense_field_size * self.embed_size])
self.concat_embed.append(dense_embed)
def _build_train_ops(self, **kwargs):
self.nce_weights = tf.get_variable(
name="nce_weights",
# n_classes, embed_size
shape=[self.n_items, self.user_vector_size],
initializer=tf_truncated_normal(0.0, 0.01),
regularizer=self.reg
)
self.nce_biases = tf.get_variable(
name="nce_biases",
shape=[self.n_items],
initializer=tf_zeros,
regularizer=self.reg,
trainable=True
)
# By default, `sampled_softmax_loss` and `nce_loss` in tensorflow
# uses `log_uniform_candidate_sampler` to sample negative items,
# which may not be suitable in recommendation scenarios.
labels = tf.reshape(self.item_indices, [-1, 1])
sampled_values = tf.random.uniform_candidate_sampler(
true_classes=labels,
num_true=1,
num_sampled=self.num_sampled_per_batch,
unique=True,
range_max=self.n_items,
) if self.sampler == "uniform" else None
if self.loss_type == "nce":
self.loss = tf.reduce_mean(tf.nn.nce_loss(
weights=self.nce_weights,
biases=self.nce_biases,
labels=labels,
inputs=self.user_vector_repr,
num_sampled=self.num_sampled_per_batch,
num_classes=self.n_items,
num_true=1,
sampled_values=sampled_values,
remove_accidental_hits=True,
partition_strategy="div")
)
elif self.loss_type == "sampled_softmax":
self.loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(
weights=self.nce_weights,
biases=self.nce_biases,
labels=labels,
inputs=self.user_vector_repr,
num_sampled=self.num_sampled_per_batch,
num_classes=self.n_items,
num_true=1,
sampled_values=sampled_values,
remove_accidental_hits=True,
seed=self.seed,
partition_strategy="div")
)
else:
raise ValueError("Loss type must either be 'nce' "
"or 'sampled_softmax")
if self.reg is not None:
reg_keys = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = self.loss + tf.add_n(reg_keys)
else:
total_loss = self.loss
if self.lr_decay:
n_batches = int(self.data_info.data_size / self.batch_size)
self.lr, global_steps = lr_decay_config(self.lr, n_batches,
**kwargs)
else:
global_steps = None
optimizer = tf.train.AdamOptimizer(self.lr)
optimizer_op = optimizer.minimize(total_loss, global_step=global_steps)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.training_op = tf.group([optimizer_op, update_ops])
self.sess.run(tf.global_variables_initializer())
def fit(self, train_data, verbose=1, shuffle=True, eval_data=None,
metrics=None, **kwargs):
assert self.task == "ranking", (
"YouTube models is only suitable for ranking"
)
self._check_item_col()
self.show_start_time()
if not self.graph_built:
self._build_model()
self._build_train_ops(**kwargs)
data_generator = DataGenSequence(
train_data, self.data_info, self.sparse, self.dense,
mode=self.interaction_mode, num=self.interaction_num,
class_name="YoutubeMatch", padding_idx=self.n_items
)
for epoch in range(1, self.n_epochs + 1):
with time_block(f"Epoch {epoch}", verbose):
train_total_loss = []
for b, ii, iv, user, item, _, si, dv in data_generator(
shuffle, self.batch_size):
feed_dict = {self.modified_batch_size: b,
self.item_interaction_indices: ii,
self.item_interaction_values: iv,
self.item_indices: item,
self.is_training: True}
if self.sparse:
feed_dict.update({self.sparse_indices: si})
if self.dense:
feed_dict.update({self.dense_values: dv})
train_loss, _ = self.sess.run(
[self.loss, self.training_op], feed_dict)
train_total_loss.append(train_loss)
if verbose > 1:
train_loss_str = "train_loss: " + str(
round(float(np.mean(train_total_loss)), 4)
)
print(f"\t {colorize(train_loss_str, 'green')}")
# for evaluation
self._set_latent_vectors()
self.print_metrics(eval_data=eval_data, metrics=metrics,
**kwargs)
print("=" * 30)
# for prediction and recommendation
self._set_latent_vectors()
assign_oov_vector(self)
def predict(self, user, item, cold_start="average", inner_id=False):
user, item = self.convert_id(user, item, inner_id)
unknown_num, unknown_index, user, item = self._check_unknown(user, item)
preds = np.sum(
np.multiply(self.user_vector[user],
self.item_weights[item]),
axis=1)
preds = 1 / (1 + np.exp(-preds))
if unknown_num > 0 and cold_start == "popular":
preds[unknown_index] = self.default_prediction
return preds
def recommend_user(self, user, n_rec, cold_start="average", inner_id=False):
user_id = self._check_unknown_user(user, inner_id)
if user_id is None:
if cold_start == "average":
user_id = self.n_users
elif cold_start == "popular":
return self.popular_recommends(inner_id, n_rec)
else:
raise ValueError(user)
consumed = set(self.user_consumed[user_id])
count = n_rec + len(consumed)
recos = self.user_vector[user_id] @ self.item_weights.T
recos = 1 / (1 + np.exp(-recos))
ids = np.argpartition(recos, -count)[-count:]
rank = sorted(zip(ids, recos[ids]), key=lambda x: -x[1])
recs_and_scores = islice(
(rec if inner_id else (self.data_info.id2item[rec[0]], rec[1])
for rec in rank if rec[0] not in consumed),
n_rec
)
return list(recs_and_scores)
def _set_latent_vectors(self):
user_indices = np.arange(self.n_users)
(
interacted_indices,
interacted_values
) = sparse_user_last_interacted(
user_indices, self.user_consumed, self.interaction_num
)
feed_dict = {self.item_interaction_indices: interacted_indices,
self.item_interaction_values: interacted_values,
self.modified_batch_size: self.n_users,
self.is_training: False}
if self.sparse:
# remove oov
user_sparse_indices = self.data_info.user_sparse_unique[:-1]
feed_dict.update({self.sparse_indices: user_sparse_indices})
if self.dense:
user_dense_values = self.data_info.user_dense_unique[:-1]
feed_dict.update({self.dense_values: user_dense_values})
user_vector = self.sess.run(self.user_vector_repr, feed_dict)
item_weights = self.sess.run(self.nce_weights)
item_biases = self.sess.run(self.nce_biases)
user_bias = np.ones([len(user_vector), 1], dtype=user_vector.dtype)
item_bias = item_biases[:, None]
self.user_vector = np.hstack([user_vector, user_bias])
self.item_weights = np.hstack([item_weights, item_bias])
# oov_zeros = np.zeros(self.user_vector_size + 1, dtype=np.float32)
# self.user_vector = np.vstack([u_vector, oov_zeros])
# self.item_weights = np.vstack([i_weights, oov_zeros])
def _check_item_col(self):
if len(self.data_info.item_col) > 0:
raise ValueError(
"The YouTuBeRetrieval model assumes no item features."
)
def save(self, path, model_name, manual=True, inference_only=False):
if not os.path.isdir(path):
print(f"file folder {path} doesn't exists, creating a new one...")
os.makedirs(path)
self.save_params(path)
if inference_only:
variable_path = os.path.join(path, model_name)
np.savez_compressed(variable_path,
user_vector=self.user_vector,
item_weights=self.item_weights)
else:
self.save_variables(path, model_name, inference_only=False)
@classmethod
def load(cls, path, model_name, data_info, manual=True):
variable_path = os.path.join(path, f"{model_name}.npz")
variables = np.load(variable_path)
hparams = cls.load_params(path, data_info)
model = cls(**hparams)
model.user_vector = variables["user_vector"]
model.item_weights = variables["item_weights"]
return model
| [
"numpy.mean",
"itertools.islice",
"numpy.multiply",
"tensorflow.keras.initializers.truncated_normal",
"numpy.argpartition",
"numpy.hstack",
"os.makedirs",
"os.path.join",
"numpy.exp",
"os.path.isdir",
"numpy.savez_compressed",
"numpy.load",
"numpy.arange"
] | [((14321, 14445), 'itertools.islice', 'islice', (['(rec if inner_id else (self.data_info.id2item[rec[0]], rec[1]) for rec in\n rank if rec[0] not in consumed)', 'n_rec'], {}), '((rec if inner_id else (self.data_info.id2item[rec[0]], rec[1]) for\n rec in rank if rec[0] not in consumed), n_rec)\n', (14327, 14445), False, 'from itertools import islice\n'), ((14585, 14608), 'numpy.arange', 'np.arange', (['self.n_users'], {}), '(self.n_users)\n', (14594, 14608), True, 'import numpy as np\n'), ((15731, 15766), 'numpy.hstack', 'np.hstack', (['[user_vector, user_bias]'], {}), '([user_vector, user_bias])\n', (15740, 15766), True, 'import numpy as np\n'), ((15795, 15831), 'numpy.hstack', 'np.hstack', (['[item_weights, item_bias]'], {}), '([item_weights, item_bias])\n', (15804, 15831), True, 'import numpy as np\n'), ((16924, 16963), 'os.path.join', 'os.path.join', (['path', 'f"""{model_name}.npz"""'], {}), "(path, f'{model_name}.npz')\n", (16936, 16963), False, 'import os\n'), ((16984, 17006), 'numpy.load', 'np.load', (['variable_path'], {}), '(variable_path)\n', (16991, 17006), True, 'import numpy as np\n'), ((13284, 13344), 'numpy.multiply', 'np.multiply', (['self.user_vector[user]', 'self.item_weights[item]'], {}), '(self.user_vector[user], self.item_weights[item])\n', (13295, 13344), True, 'import numpy as np\n'), ((14190, 14220), 'numpy.argpartition', 'np.argpartition', (['recos', '(-count)'], {}), '(recos, -count)\n', (14205, 14220), True, 'import numpy as np\n'), ((16315, 16334), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (16328, 16334), False, 'import os\n'), ((16427, 16444), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (16438, 16444), False, 'import os\n'), ((16531, 16561), 'os.path.join', 'os.path.join', (['path', 'model_name'], {}), '(path, model_name)\n', (16543, 16561), False, 'import os\n'), ((16574, 16674), 'numpy.savez_compressed', 'np.savez_compressed', (['variable_path'], {'user_vector': 'self.user_vector', 'item_weights': 'self.item_weights'}), '(variable_path, user_vector=self.user_vector,\n item_weights=self.item_weights)\n', (16593, 16674), True, 'import numpy as np\n'), ((5341, 5371), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (5360, 5371), True, 'from tensorflow.keras.initializers import zeros as tf_zeros, truncated_normal as tf_truncated_normal\n'), ((6193, 6223), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (6212, 6223), True, 'from tensorflow.keras.initializers import zeros as tf_zeros, truncated_normal as tf_truncated_normal\n'), ((7315, 7345), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (7334, 7345), True, 'from tensorflow.keras.initializers import zeros as tf_zeros, truncated_normal as tf_truncated_normal\n'), ((7984, 8014), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (8003, 8014), True, 'from tensorflow.keras.initializers import zeros as tf_zeros, truncated_normal as tf_truncated_normal\n'), ((13415, 13429), 'numpy.exp', 'np.exp', (['(-preds)'], {}), '(-preds)\n', (13421, 13429), True, 'import numpy as np\n'), ((14159, 14173), 'numpy.exp', 'np.exp', (['(-recos)'], {}), '(-recos)\n', (14165, 14173), True, 'import numpy as np\n'), ((12581, 12606), 'numpy.mean', 'np.mean', (['train_total_loss'], {}), '(train_total_loss)\n', (12588, 12606), True, 'import numpy as np\n')] |
dict_iso={'afghanistan': 'Afghanistan',
'albania': 'Albania',
'algeria': 'Algeria',
'andorra': 'Andorra',
'angola': 'Angola',
'antigua-and-barbuda': 'Antigua and Barbuda',
'argentina': 'Argentina',
'armenia': 'Armenia',
'aruba': 'Aruba',
'australia': 'Australia',
'austria': 'Austria',
'azerbaijan': 'Azerbaijan',
'bahamas': 'Bahamas',
'bahrain': 'Bahrain',
'bangladesh': 'Bangladesh',
'Barbados': 'Barbados',
'belarus': 'Belarus',
'belgium': 'Belgium',
'belize': 'Belize',
'benin': 'Benin',
'bermuda': 'Bermuda',
'bhutan': 'Bhutan',
'bolivia': 'Bolivia, Plurinational State of',
'bosnia-and-herzegovina': 'Bosnia and Herzegovina',
'botswana': 'Botswana',
'brazil': 'Brazil',
'bulgaria': 'Bulgaria',
'burkina-faso': 'Burkina Faso',
'burundi': 'Burundi',
'cabo-verde': 'Cape Verde',
'cambodia': 'Cambodia',
'cameroon': 'Cameroon',
'canada': 'Canada',
'cayman-islands': 'Cayman Islands',
'central-african-republic': 'Central African Republic',
'chad': 'Chad',
'chile': 'Chile',
'china': 'China',
'china-hong-kong-sar': 'Hong Kong,China',
'china-macao-sar': 'Macao, China',
'colombia': 'Colombia',
'comoros': 'Comoros',
'congo': 'Congo',
'costa-rica': 'Costa Rica',
'cote-d-ivoire': "Côte d'Ivoire",
'croatia': 'Croatia',
'cuba': 'Cuba',
'cyprus': 'Cyprus',
'czech-republic': 'Czech Republic',
'democratic-republic-of-the-congo': 'Congo, the Democratic Republic of the',
'denmark': 'Denmark',
'djibouti': 'Djibouti',
'dominican-republic': 'Dominican Republic',
'ecuador': 'Ecuador',
'egypt': 'Egypt',
'el-salvador': 'El Salvador',
'equatorial-guinea': 'Equatorial Guinea',
'eritrea': 'Eritrea',
'estonia': 'Estonia',
'ethiopia': 'Ethiopia',
'faeroe-islands': 'Faroe Islands',
'fiji': 'Fiji',
'finland': 'Finland',
'france': 'France',
'french-guiana': 'French Guiana',
'french-polynesia': 'French Polynesia',
'gabon': 'Gabon',
'gambia': 'Gambia',
'georgia': 'Georgia',
'germany': 'Germany',
'ghana': 'Ghana',
'gibraltar': 'Gibraltar',
'greece': 'Greece',
'grenada': 'Grenada',
'guadeloupe': 'Guadeloupe',
'guatemala': 'Guatemala',
'guinea': 'Guinea',
'guinea-bissau': 'Guinea-Bissau',
'guyana': 'Guyana',
'haiti': 'Haiti',
'honduras': 'Honduras',
'hungary': 'Hungary',
'iceland': 'Iceland',
'india': 'India',
'indonesia': 'Indonesia',
'iran': 'Iran, Islamic Republic of',
'iraq': 'Iraq',
'ireland': 'Ireland',
'israel': 'Israel',
'italy': 'Italy',
'jamaica': 'Jamaica',
'japan': 'Japan',
'jordan': 'Jordan',
'kazakhstan': 'Kazakhstan',
'kenya': 'Kenya',
'kuwait': 'Kuwait',
'kyrgyzstan': 'Kyrgyzstan',
'latvia': 'Latvia',
'lebanon': 'Lebanon',
'lesotho': 'Lesotho',
'liberia': 'Liberia',
'libya': 'Libya',
'liechtenstein': 'Liechtenstein',
'lithuania': 'Lithuania',
'luxembourg': 'Luxembourg',
'macedonia': 'North Macedonia',
'madagascar': 'Madagascar',
'malawi': 'Malawi',
'malaysia': 'Malaysia',
'maldives': 'Maldives',
'mali': 'Mali',
'malta': 'Malta',
'martinique': 'Martinique',
'mauritania': 'Mauritania',
'mauritius': 'Mauritius',
'mayotte': 'Mayotte',
'mexico': 'Mexico',
'moldova': 'Moldova, Republic of',
'monaco': 'Monaco',
'mongolia': 'Mongolia',
'montenegro': 'Montenegro',
'morocco': 'Morocco',
'mozambique': 'Mozambique',
'myanmar': 'Myanmar',
'namibia': 'Namibia',
'nepal': 'Nepal',
'netherlands': 'Netherlands',
'new-zealand': 'New Zealand',
'nicaragua': 'Nicaragua',
'niger': 'Niger',
'nigeria': 'Nigeria',
'norway': 'Norway',
'oman': 'Oman',
'pakistan': 'Pakistan',
'panama': 'Panama',
'papua-new-guinea': 'Papua New Guinea',
'paraguay': 'Paraguay',
'peru': 'Peru',
'philippines': 'Philippines',
'poland': 'Poland',
'portugal': 'Portugal',
'qatar': 'Qatar',
'reunion': 'Réunion',
'romania': 'Romania',
'russia': 'Russia',
'rwanda': 'Rwanda',
'saint-kitts-and-nevis': 'Saint Kitts and Nevis',
'saint-lucia': 'Saint Lucia',
'sao-tome-and-principe': 'Sao Tome and Principe',
'saudi-arabia': 'Saudi Arabia',
'senegal': 'Senegal',
'serbia': 'Serbia',
'seychelles': 'Seychelles',
'sierra-leone': 'Sierra Leone',
'singapore': 'Singapore',
'slovakia': 'Slovakia',
'slovenia': 'Slovenia',
'somalia': 'Somalia',
'south-africa': 'South Africa',
'south-korea': 'South Korea',
'spain': 'Spain',
'sri-lanka': 'Sri Lanka',
'state-of-palestine': 'Palestinian Territory, Occupied',
'sudan': 'Sudan',
'suriname': 'Suriname',
'swaziland': 'Swaziland',
'sweden': 'Sweden',
'switzerland': 'Switzerland',
'syria': 'Syrian Arab Republic',
'taiwan': 'Taiwan,China',
'tajikistan': 'Tajikistan',
'tanzania': 'Tanzania, United Republic of',
'thailand': 'Thailand',
'togo': 'Togo',
'trinidad-and-tobago': 'Trinidad and Tobago',
'tunisia': 'Tunisia',
'turkey': 'Turkey',
'turks-and-caicos-islands': 'Turks and Caicos Islands',
'uganda': 'Uganda',
'uk': 'United Kingdom',
'ukraine': 'Ukraine',
'united-arab-emirates': 'United Arab Emirates',
'uruguay': 'Uruguay',
'us': 'United States',
'uzbekistan': 'Uzbekistan',
'venezuela': 'Venezuela, Bolivarian Republic of',
'viet-nam': 'Viet Nam',
'western-sahara': 'Western Sahara',
'yemen': 'Yemen',
'zambia': 'Zambia',
'zimbabwe': 'Zimbabwe',
'faeroe-islands':'Faroe Islands',
'saint-vincent-and-the-grenadines':'Saint Vincent & the Grenadines',
'timor-leste':'Timor-Leste',
'grenada':'Grenada',
'new-caledonia':'New Caledonia',
'laos':'Lao People\'s Democratic Republic',
'dominica':'Dominica',
'falkland-islands-malvinas':'Falkland Islands',
'greenland':'Greenland',
'holy-see':'Holy See (Vatican City State)',
'anguilla':'Anguilla',
'south-sudan':'South Sudan'
}
cate={'china':'east asia',
'us':'north america',
'brazil':'south america',
'russia':'eastern europe',
'india':'south asia',
'uk':'western europe',
'spain':'western europe',
'peru':'south america',
'chile':'south america',
'italy':'western europe',
'iran':'west asia',
'mexico':'central america and mexico',
'pakistan':'west asia',
'turkey':'west asia',
'germany':'western europe',
'saudi-arabia':'west asia',
'france':'western europe',
'south-africa':'southern africa',
'bangladesh':'south asia',
'canada':'north america',
'qatar':'west asia',
'democratic-republic-of-the-congo':'central africa',
'colombia':'south america',
'egypt':'south-east mediterranean',
'sweden':'western europe',
'belarus':'eastern europe',
'belgium':'western europe',
'argentina':'south america',
'ecuador':'south america',
'indonesia':'southeast asia',
'netherlands':'western europe',
'united-arab-emirates':'west asia',
'iraq':'west asia',
'kuwait':'west asia',
'singapore':'southeast asia',
'ukraine':'eastern europe',
'portugal':'western europe',
'oman':'west asia',
'philippines':'southeast asia',
'poland':'eastern europe',
'panama':'central america and mexico',
'switzerland':'western europe',
'dominican-republic':'caribbean',
'afghanistan':'west asia',
'bolivia':'south america',
'romania':'eastern europe',
'bahrain':'west asia',
'ireland':'western europe',
'armenia':'eastern europe',
'nigeria':'west africa',
'israel':'south-east mediterranean',
'kazakhstan':'central asia',
'japan':'east asia',
'austria':'western europe',
'honduras':'central america and mexico',
'sao-tome-and-principe':'southeast asia',
'central-african-republic':'central africa',
'gabon':'central africa',
'ghana':'west africa',
'azerbaijan':'central asia',
'guatemala':'central america and mexico',
'moldova':'eastern europe',
'serbia':'eastern europe',
'algeria':'south-east mediterranean',
'nepal':'south asia',
'south-korea':'east asia',
'denmark':'western europe',
'cameroon':'central africa',
'morocco':'south-east mediterranean',
'czech-republic':'eastern europe',
'sudan':'east africa',
'cote-d-ivoire':'west africa',
'norway':'western europe',
'malaysia':'southeast asia',
'uzbekistan':'central asia',
'australia':'pacific region',
'finland':'western europe',
'saint-martin':'caribbean',
'senegal':'west africa',
'macedonia':'eastern europe',
'kenya':'east africa',
'el-salvador':'central america and mexico',
'guyana':'caribbean',
'tajikistan':'central asia',
'ethiopia':'east africa',
'guinea':'west africa',
'venezuela':'south america',
'jamaica':'caribbean',
'kyrgyzstan':'central asia',
'bulgaria':'eastern europe',
'djibouti':'east africa',
'luxembourg':'western europe',
'mauritania':'west africa',
'hungary':'eastern europe',
'bosnia-and-herzegovina':'eastern europe',
'french-guiana':'south america',
'grenada':'caribbean',
'greece':'western europe',
'thailand':'southeast asia',
'costa-rica':'central america and mexico',
'suriname':'caribbean',
'somalia':'east africa',
'croatia':'eastern europe',
'mayotte':'east africa',
'albania':'eastern europe',
'cuba':'caribbean',
'maldives':'south asia',
'nicaragua':'central america and mexico',
'equatorial-guinea':'central africa',
'mali':'west africa',
'paraguay':'south america',
'madagascar':'indian ocean islands',
'sri-lanka':'south asia',
'haiti':'caribbean',
'state-of-palestine':'missing',
'south-sudan':'east africa',
'estonia':'eastern europe',
'iceland':'western europe',
'lithuania':'eastern europe',
'lebanon':'south-east mediterranean',
'slovakia':'eastern europe',
'guinea-bissau':'west africa',
'slovenia':'eastern europe',
'zambia':'southern africa',
'new-zealand':'pacific region',
'sierra-leone':'west africa',
'china-hong-kong-sar':'east asia',
'tunisia':'south-east mediterranean',
'cabo-verde':'west africa',
'benin':'west africa',
'malawi':'southern africa',
'jordan':'south-east mediterranean',
'yemen':'west asia',
'latvia':'eastern europe',
'niger':'west africa',
'cyprus':'south-east mediterranean',
'burkina-faso':'west africa',
'uruguay':'south america',
'georgia':'eastern europe',
'rwanda':'east africa',
'chad':'west africa',
'mozambique':'southern africa',
'uganda':'east africa',
'andorra':'western europe',
'swaziland':'southern africa',
'liberia':'west africa',
'libya':'south-east mediterranean',
'malta':'south-east mediterranean',
'togo':'west africa',
'channel-islands':'western europe',
'zimbabwe':'southern africa',
'reunion':'indian ocean islands',
'tanzania':'southern africa',
'montenegro':'eastern europe',
'taiwan':'east asia',
'viet-nam':'southeast asia',
'mauritius':'west africa',
'myanmar':'southeast asia',
'comoros':'indian ocean islands',
'angola':'southern africa',
'syria':'south-east mediterranean',
'martinique':'eastern europe',
'mongolia':'east asia',
'cayman-islands':'north america',
'eritrea':'east africa',
'namibia':'southern africa',
'guadeloupe':'caribbean',
'gibraltar':'north africa',
'burundi':'east africa',
'bermuda':'north america',
'cambodia':'southeast asia',
'bahamas':'caribbean',
'monaco':'eastern europe',
'botswana':'southern africa',
'bhutan':'south asia',
'seychelles':'indian ocean islands',
'antigua-and-barbuda':'caribbean',
'french-polynesia':'pacific region',
'china-macao-sar':'east asia',
'gambia':'west africa',
'turks-and-caicos-islands':'southern africa',
'lesotho':'southern africa',
'belize':'caribbean',
'curacao':'north america',
'papua-new-guinea':'pacific region',
'western-sahara':'west africa',
'fiji':'pacific region',
'saint-kitts-and-nevis':'caribbean',
'saint-lucia':'caribbean',
'congo':'west africa',
'trinidad-and-tobago':'caribbean',
'faeroe-islands':'western europe',
'Barbados':'caribbean',
'liechtenstein':'western europe',
'aruba':'western europe',
'faeroe-islands':'western europe',
'saint-vincent-and-the-grenadines':'caribbean',
'timor-leste':'pacific region',
'grenada':'caribbean',
'new-caledonia':'pacific region',
'laos':'southeast asia',
'dominica':'caribbean',
'falkland-islands-malvinas':'south america',
'greenland':'north america',
'holy-see':'western europe',
'anguilla':'caribbean',
}
from tqdm import tqdm
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
import json
import time
import random
import html5lib
import re
import scipy.stats as st
from pandas.core.frame import DataFrame
import copy
import math
import datetime
headers = { 'Connection': 'close'}
# proxies={'http':'http://127.0.0.1:10080','https':'http://127.0.0.1:10080'}
url='https://www.worldometers.info/coronavirus/#countries'
# url='https://www.worldometers.info/coronavirus/country/us/'
a=requests.get(url,headers=headers)
soup = BeautifulSoup(a.content,'html5lib')
x=soup.body.find_all('tr', attrs={'style': ['','background-color:#F0F0F0','background-color:#EAF7D5']})
# 190 210
def find_start_yesterday(i,j):
for start in range(i,j):
one=x[start]
two=x[start+1]
l1=one.find_all('a',attrs={'class':'mt_a'})
l2=two.find_all('a',attrs={'class':'mt_a'})
if l1==[] or l2==[]:
continue
s1=str(l1[0])
s2=str(l2[0])
coun1=s1.split('/')
coun2=s2.split('/')
if coun1[1]=='china' and coun2[1]=='us':
return start
#385 410
def find_end_yesterday(i,j):
for end in range(i,j):
# final_pre=x[end-1]
final=x[end]
# l1=final_pre.find_all('a',attrs={'class':'mt_a'})
l2=final.find_all('a',attrs={'class':'mt_a'})
if l2==[]:
continue
# s1=str(l1[0])
s2=str(l2[0])
# coun1=s1.split('/')
coun2=s2.split('/')
if coun2[1]=='anguilla':
return end+1
end=find_end_yesterday(370,440)
end2=find_end_yesterday(630,700)
start=find_start_yesterday(190,240)
start2=find_start_yesterday(440,470)
print('start:{}\tend:{}\tstart2:{}\tend2:{}'.format(start,end,start2,end2))
col_name=['0','#','Country,Other','TotalCases',
'NewCases','TotalDeaths','NewDeaths','TotalRecovered',
'NewRecovered','ActiveCases','Serious,Critical','Tot Cases/1M pop',
'Deaths/1M pop','TotalTests','Tests/1M pop','Population',
'Continent','17',' 1 Caseevery X', 'ppl1 Deathevery',' X ppl1 Testevery ','X ppl','22',
'Cases Per 100K Population','Tests Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio',
'New Positive%','Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week',
'New Test','NPI','Region','key-id','Country/District','field','7 days inc cases','7 days inc deaths']
#export https_proxy=http://127.0.0.1:10080;export http_proxy=http://127.0.0.1:10080;export all_proxy=socks5://127.0.0.1:10081
raw_data=[]
for i in tqdm(range(start,end)):
# time.sleep(2)
text_source=x[i]
l=text_source.find_all('a',attrs={'class':'mt_a'})
if l==[]:
continue
s=str(l[0])
coun=s.split('/')
try:
region=cate[coun[1]]
iso=dict_iso[coun[1]]
except:
region='missing'
url='https://www.worldometers.info/coronavirus/country/'+coun[1]+'/'
# a=requests.get(url,proxies=proxies,headers =headers)
a=''
while a=='':
try:
a=requests.get(url,headers=headers)
except:
a=''
soup = BeautifulSoup(a.content,'html5lib')
r=soup.body.find_all('script',attrs={'type':'text/javascript'})
p=re.compile(r'categories: \[(.*?)\]',re.S)
rs=re.findall(p,r[0].text)
d=rs[0]
str_pat = re.compile(r'\"(.*?)\"')
d = str_pat.findall(d)
date=d
p1=re.compile(r'name: \'Cases\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
case=d.split(',')
except:
# print('{} cases is not{}'.format(coun[1],j))
continue
p1=re.compile(r'name: \'Deaths\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
TD=d.split(',')
except:
continue
j={'Date':date,'Total Cases':case,'Total Deaths':TD}
print("Date {} TC {} TD {}".format(len(date),len(case),len(TD)))
if not len(set([len(date),len(case),len(TD)])) == 1:
continue
hist_data_of_coun_i=pd.DataFrame(j)
hist_data_of_coun_i['Total Deaths'][0]=0
for k in range(len(hist_data_of_coun_i['Total Deaths'])):
if hist_data_of_coun_i['Total Deaths'][k]=='null':
data['Total Deaths'][k]=0
hist_data_of_coun_i['Total Cases']=hist_data_of_coun_i['Total Cases'].astype(int)
hist_data_of_coun_i['Total Deaths']=hist_data_of_coun_i['Total Deaths'].astype(int)
hist_data_of_coun_i['case inc']=hist_data_of_coun_i['Total Cases'].diff()
hist_data_of_coun_i['death inc']=hist_data_of_coun_i['Total Deaths'].diff()
#七日新增死亡与cases
seven_cases=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])
seven_deaths=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])
inc1=hist_data_of_coun_i.loc[len(date)-1,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'case inc'])
inc2=hist_data_of_coun_i.loc[len(date)-1,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'death inc'])
inc_1=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(8,15)])
inc_2=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(8,15)])
adcp=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/7
p=1
while inc1 ==0 and hist_data_of_coun_i.loc[len(date)-1,'Total Cases']>=10000:
p+=1
inc1=hist_data_of_coun_i.loc[len(date)-p,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-1-p,'case inc'])
dd=hist_data_of_coun_i.shift(5)
hist_data_of_coun_i['inc_p']=np.log(hist_data_of_coun_i['case inc']/dd['case inc'])/5
hist_data_of_coun_i=hist_data_of_coun_i[~hist_data_of_coun_i.isin([np.nan, np.inf, -np.inf]).any(1)]
da=hist_data_of_coun_i['inc_p'].values
try:
slope,intercept, r_value, p_value, std_err=st.linregress(list(range(30)), da[:30])
except:
slope=None
bo=x[i].text.split('\n')
if bo[6]=='' and bo[7]=='':
del bo[7]
if bo[17]=='' and bo[18]=='':
del bo[18]
for o in range(start2,end2):
s1=x[o]
l1=s1.find_all('a',attrs={'class':'mt_a'})
if l1==[]:
continue
s1=str(l1[0])
coun1=s1.split('/')
if coun1[1]==coun[1]:
bo1=x[o].text.split('\n')
break
for h in range(len(bo)):
bo[h]=bo[h].replace(',','')
bo[h]=bo[h].replace('+','')
for h in range(len(bo1)):
bo1[h]=bo1[h].replace(',','')
bo1[h]=bo1[h].replace('+','')
#Cases Per 100K Population
try:
bo.append(100000*int(bo[3])/int(bo[15]))
except:
continue
# bo.append(np.nan)
# print('lack one')
#Tests Per 100K Population
try:
bo.append(100000*int(bo[13])/int(bo[15]))
except:
continue
# bo.append(np.nan)
# print('lack one')
#'Active Cases Per 100k Population'
try:
bo.append(int(bo[9])*100000/int(bo[15]))
except:
bo.append(np.nan)
# print('lack one')
#Total Test:Positive Ratio
bo.append(int(bo[3])/int(bo[13]))
#New Positive
try:
bo.append((int(bo[3])-int(bo1[3]))/(int(bo[13])-int(bo1[13])))
except:
bo.append(np.nan)
# print('lack one')
#Case Fatality Rate%
try:
if bo[5]=='':
bo.append(0)
else:
bo.append(int(bo[5])/int(bo[3]))
except:
bo.append(np.nan)
#New Confirmed Case Growth Rate
# try:
# q=2
# while (math.isnan(inc1) or inc1==np.inf) and q<=9:
# # print(inc1)
# inc1=hist_data_of_coun_i.loc[len(date)-q,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-q-7,'case inc'])
# c=hist_data_of_coun_i.loc[len(date)-q,'case inc']
# q+=1
# # print(inc1)
# if math.isnan(inc1):
# bo.append(0)
# elif inc1==np.inf:
# bo.append(0.01)
# # elif c<=100:
# # bo.append(0.03)
# else:
# bo.append(inc1)
# except:
# bo.append(0)
# print('lack one')
#New Death Case Growth Rate
# try:
# q=2
# while (math.isnan(inc2) or inc2==np.inf) and q<=9:
# # print(inc2)
# inc2=hist_data_of_coun_i.loc[len(date)-q,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-q-7,'death inc'])
# q+=1
# # print(inc2)
# if math.isnan(inc2):
# bo.append(0)
# elif inc2==np.inf:
# bo.append(0.1)
# else:
# bo.append(inc2)
# except:
# bo.append(0)
# print('lack one')
#New Sum Confirmed Case Growth Rate
if math.isnan(inc_1) or inc_1=='':
bo.append(0)
elif inc_1==np.inf:
bo.append(0.01)
else:
bo.append(inc_1)
# print(bo[-1])
#New Sum Death Case Growth Rate
if math.isnan(inc_2) or inc_2=='':
bo.append(0)
elif inc_2==np.inf:
bo.append(0.1)
else:
bo.append(inc_2)
# print(bo[-1])
#Average daily cases per 100,000 people in the past week
bo.append(adcp*100000/int(bo[15]))
# New Test
try:
bo.append(int(bo[13])-int(bo1[13]))
except:
bo.append(np.nan)
# print('lack one')
bo.append(slope)
if region=='missing':
continue
else:
bo.append(region)
bo.append(coun1[1])
bo.append(iso)
bo.append('world')
bo.append(seven_cases)
bo.append(seven_deaths)
print(len(bo))
print(bo)
if len(bo)!=40:
print(bo)
exit(0)
raw_data.append(bo)
raw_data=DataFrame(raw_data,columns=col_name)
brief_raw_data=raw_data[['Country,Other','key-id','Region','Country/District','field','Population',
'TotalCases','ActiveCases','TotalDeaths','NewDeaths','TotalRecovered','NewRecovered','Serious,Critical','NewCases','New Test','Cases Per 100K Population','Tests Per 100K Population',
'Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI','7 days inc cases','7 days inc deaths']]
tf=copy.deepcopy(brief_raw_data)
uni_region=list(set(list(tf['Region'].values)))
uni_region.remove('western europe')
data_region=tf[tf['Region']=='western europe']
data_region=data_region.replace(np.nan,'shit')
data_region=data_region.replace(np.inf,'shit')
data_region=data_region.replace('N/A','shit')
data_region=data_region.replace('',0)
data_region=data_region.replace(' ',0)
data_region.loc[data_region['NPI']=='shit','NPI']=0
data_region.loc[data_region['Case Fatality Rate%']=='shit','Case Fatality Rate%']=0
dd=data_region[['TotalCases','ActiveCases','TotalRecovered','Case Fatality Rate%']]
ac=dd[(dd['TotalCases']!='shit')&(dd['ActiveCases']!='shit')&(dd['TotalRecovered']!='shit')&(dd['Case Fatality Rate%']!='shit')]
active_rate_region=sum(ac['ActiveCases'].astype(int))/sum(ac['TotalCases'].astype(int))
data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Active Cases Per 100k Population']=active_rate_region*100000*data_region.loc[data_region['Active Cases Per 100k Population']=='shit','TotalCases'].astype(int)/data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Population'].astype(int)
dd=data_region[['NewCases','New Test']]
ac=dd[dd['New Test']!=0]
new_posi=sum(ac['NewCases'].astype(int))/sum(ac['New Test'])
data_region.loc[data_region['New Test']==0,'New Positive%']=new_posi
final=copy.deepcopy(data_region)
for distri in uni_region:
data_region=tf[tf['Region']==distri]
data_region=data_region.replace(np.nan,'shit')
data_region=data_region.replace(np.inf,'shit')
data_region=data_region.replace('N/A','shit')
data_region=data_region.replace('',0)
data_region=data_region.replace(' ',0)
data_region.loc[data_region['NPI']=='shit','NPI']=0
data_region.loc[data_region['Case Fatality Rate%']=='shit','Case Fatality Rate%']=0
dd=data_region[['TotalCases','ActiveCases','TotalRecovered','Case Fatality Rate%']]
ac=dd[(dd['TotalCases']!='shit')&(dd['ActiveCases']!='shit')&(dd['TotalRecovered']!='shit')&(dd['Case Fatality Rate%']!='shit')]
active_rate_region=sum(ac['ActiveCases'].astype(int))/sum(ac['TotalCases'].astype(int))
data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Active Cases Per 100k Population']=active_rate_region*100000*data_region.loc[data_region['Active Cases Per 100k Population']=='shit','TotalCases'].astype(int)/data_region.loc[data_region['Active Cases Per 100k Population']=='shit','Population'].astype(int)
dd=data_region[['NewCases','New Test']]
ac=dd[dd['New Test']!=0]
try:
new_posi=sum(ac['NewCases'].astype(int))/sum(ac['New Test'])
except:
new_posi=0
data_region.loc[data_region['New Test']==0,'New Positive%']=new_posi
data_region.loc[data_region['New Test']=='shit','New Positive%']=new_posi
final=pd.concat([final,data_region])
final=final.reset_index(drop=True)
tf2=final[['Country,Other','key-id','Country/District','Region','field','TotalCases','Cases Per 100K Population','Tests Per 100K Population',
'Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI']]
#越高越好,即需要降序
# for x in ['Cases Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',]
# x='Tests Per 100K Population'
# df=tf2[['Country,Other',x]]
# df2=df.sort_values(x,ascending=False,inplace=False)
# df2 = df2.reset_index(drop=True)
# df2['cum']=df.index+1
# df2['cum_prob']=100*df2['cum']/max(df2['cum'])
# df3=pd.merge(df,df2,on=['Country,Other'])
# tf2['IND_'+x]=df3['cum_prob']
# for x in ['Cases Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%','Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','NPI']:
# i=1
# df=tf2[['Country,Other',x]]
# df2=df.sort_values(x,inplace=False)
# df2 = df2.reset_index(drop=True)
# df2['cum']=df.index+1
# df2['cum_prob']=100*df2['cum']/max(df2['cum'])
# df3=pd.merge(df,df2,on=['Country,Other'])
# tf2['IND_'+x]=df3['cum_prob']
# i+=1
# tf2['Comprehensive Index']=0.1*tf2['IND_Cases Per 100K Population']+0.08*tf2['IND_Tests Per 100K Population']
# +0.2*tf2['IND_Active Cases Per 100k Population']+0.1*tf2['IND_Total Test:Positive Ratio']
# +0.13*tf2['IND_New Positive%']+0.02*tf2['IND_Case Fatality Rate%']+ 0.22*tf2['IND_New Confirmed Case Growth Rate']
# +0.1*tf2['IND_New Death Case Growth Rate']+ 0.05*tf2['IND_NPI']
# today=datetime.datetime.now()
# tf4=tf2[['Country/District','TotalCases','IND_Cases Per 100K Population','IND_Tests Per 100K Population','IND_Total Test:Positive Ratio',
# 'IND_New Positive%','IND_Case Fatality Rate%','IND_New Confirmed Case Growth Rate','IND_New Death Case Growth Rate','IND_Active Cases Per 100k Population',
# 'IND_NPI','Comprehensive Index']]
# tf_c=copy.deepcopy(tf4)
# tf_c_rename=tf_c.rename({'TotalCases':'TOTAL CASE','IND_Cases Per 100K Population':'IND1_Cases Per 100K Population','IND_Tests Per 100K Population':'IND2_Tests Per 100K Population',
# 'IND_Active Cases Per 100k Population':'IND8_Active Cases Per 100k Population','IND_Total Test:Positive Ratio':'IND3_Total Test:Positive Ratio',
# 'IND_New Positive%':'IND4_New Positive%','IND_Case Fatality Rate%':'IND5_Case Fatality Rate%','IND_New Confirmed Case Growth Rate':'IND6_New Confirmed Case Growth Rate',
# 'IND_New Death Case Growth Rate':'IND7_New Death Case Growth Rate','IND_NPI':'NPI'},axis='columns')
# tf_c_rename.to_excel('World_index_{}.xlsx'.format(today),sheet_name='Index',index=False)
# tf2.to_excel('World_raw_index_{}.xlsx'.format(today),sheet_name='Index',index=False)
# brief_raw_data.to_excel('World_rawdata_{}.xlsx'.format(today),sheet_name='Index',index=False)
import pickle
import pandas
import json
from pprint import pprint
from urllib import request
#resp = request.urlopen('https://covidtracking.com/api/v1/states/daily.json')
#proxies = {'http': 'http://proxy.example.com:8080/'}
#opener = request.FancyURLopener(proxies)
a=0
while a==0:
try:
resp = requests.get('https://covidtracking.com/api/v1/states/daily.json')
a=1
except:
a=0
state_data=resp.json()#json.loads(resp.read().decode())
print('stage 1 finished')
import datetime
x0=datetime.date.today()
x1=datetime.date.today()-datetime.timedelta(days=1)
x2=datetime.date.today()-datetime.timedelta(days=2)
x3=datetime.date.today()-datetime.timedelta(days=3)
x4=datetime.date.today()-datetime.timedelta(days=4)
x5=datetime.date.today()-datetime.timedelta(days=5)
x6=datetime.date.today()-datetime.timedelta(days=6)
x7=datetime.date.today()-datetime.timedelta(days=7)
x8=datetime.date.today()-datetime.timedelta(days=8)
x9=datetime.date.today()-datetime.timedelta(days=9)
# run_time
ts=[]
ts.append(x0.__format__('%Y%m%d'))
ts.append(x1.__format__('%Y%m%d'))
ts.append(x2.__format__('%Y%m%d'))
ts.append(x3.__format__('%Y%m%d'))
ts.append(x4.__format__('%Y%m%d'))
ts.append(x5.__format__('%Y%m%d'))
ts.append(x6.__format__('%Y%m%d'))
ts.append(x7.__format__('%Y%m%d'))
ts.append(x8.__format__('%Y%m%d'))
ts.append(x9.__format__('%Y%m%d'))
print(ts)
id_names={'Alabama': 'AL',
'Alaska': 'AK',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District Of Columbia':'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'}
from tqdm import tqdm
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
import json
import time
import random
import html5lib
import re
import scipy.stats as st
from pandas.core.frame import DataFrame
import copy
import math
import datetime
#url='https://www.worldometers.info/coronavirus/#countries'
url='https://www.worldometers.info/coronavirus/country/us/'
a=requests.get(url)
soup = BeautifulSoup(a.content,'html5lib')
x=soup.body.find_all('tr', attrs={'style': ''})
# 190 210
def find_start_yesterday(i,j):
for start in range(i,j):
one=x[start]
two=x[start+1]
l1=one.find_all('a',attrs={'class':'mt_a'})
l2=two.find_all('a',attrs={'class':'mt_a'})
if l1==[] or l2==[]:
continue
s1=str(l1[0])
s2=str(l2[0])
coun1=s1.split('/')
coun2=s2.split('/')
if coun1[3]=='texas' or coun1[3]=='california':
return start
#385 410
def find_end_yesterday(i,j):
for end in range(i,j):
final_pre=x[end-1]
final=x[end]
l1=final_pre.find_all('a',attrs={'class':'mt_a'})
l2=final.find_all('a',attrs={'class':'mt_a'})
if l1==[] or l2==[]:
continue
s1=str(l1[0])
s2=str(l2[0])
coun1=s1.split('/')
coun2=s2.split('/')
if (coun1[3]=='district-of-columbia' and coun2[3]=='vermont') or (coun2[3]=='district-of-columbia' and coun1[3]=='vermont'):
return end+1
end=find_end_yesterday(80,200)
start=find_start_yesterday(64,80)
print('start:{}\tend:{}'.format(start,end))
col_name=['0','#','2','Country,Other','TotalCases',
'5','NewCases','7','TotalDeaths',
'NewDeaths','10','TotalRecovered','12','ActiveCases','Tot Cases/1M pop',
'Deaths/1M pop','16','TotalTests','Tests/1M pop','19','Pop','21','source','23','24','Cases Per 100K Population',
'Tests Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week',
'New Test','NPI','key-id','Country/District','Region','field','7 days inc cases','7 days inc deaths']
raw_data=[]
for i in tqdm(range(start,end)):
# time.sleep(2)
text_source=x[i]
l=text_source.find_all('a',attrs={'class':'mt_a'})
if l==[]:
continue
s=str(l[0])
coun=s.split('/')
url='https://www.worldometers.info/coronavirus/usa/'+coun[3]+'/'
# a=requests.get(url,proxies=proxies,headers = headers)
a=''
while a=='':
try:
a=requests.get(url,headers=headers)
except:
a=''
soup = BeautifulSoup(a.content,'html5lib')
r=soup.body.find_all('script',attrs={'type':'text/javascript'})
p=re.compile(r'categories: \[(.*?)\]',re.S)
rs=re.findall(p,r[0].text)
d=rs[0]
str_pat = re.compile(r'\"(.*?)\"')
d = str_pat.findall(d)
date=d
p1=re.compile(r'name: \'Cases\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
case=d.split(',')
except:
# print('{} cases is not{}'.format(coun[1],j))
continue
p1=re.compile(r'name: \'Deaths\'.*?\[(.*?)\]',re.S)
for j in range(10):
try:
rs=re.findall(p1,r[j].text)
d=rs[0]
d=re.sub(r'\"','',d)
TD=d.split(',')
except:
continue
j={'Date':date,'Total Cases':case,'Total Deaths':TD}
hist_data_of_coun_i=pd.DataFrame(j)
for k in range(len(hist_data_of_coun_i['Total Deaths'])):
if hist_data_of_coun_i['Total Deaths'][k]=='null':
data['Total Deaths'][k]=0
hist_data_of_coun_i['Total Cases']=hist_data_of_coun_i['Total Cases'].astype(int)
hist_data_of_coun_i['Total Deaths']=hist_data_of_coun_i['Total Deaths'].astype(int)
hist_data_of_coun_i['case inc']=hist_data_of_coun_i['Total Cases'].diff()
hist_data_of_coun_i['death inc']=hist_data_of_coun_i['Total Deaths'].diff()
#七日新增死亡与cases
seven_cases=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])
seven_deaths=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])
inc1=hist_data_of_coun_i.loc[len(date)-1,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'case inc'])
inc2=hist_data_of_coun_i.loc[len(date)-1,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'death inc'])
inc_1=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(8,15)])
inc_2=sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(1,8)])/sum([hist_data_of_coun_i.loc[len(date)-i,'death inc'] for i in range(8,15)])
adcp=sum([hist_data_of_coun_i.loc[len(date)-i,'case inc'] for i in range(1,8)])/7
dd=hist_data_of_coun_i.shift(5)
hist_data_of_coun_i['inc_p']=np.log(hist_data_of_coun_i['case inc']/dd['case inc'])/5
hist_data_of_coun_i=hist_data_of_coun_i[~hist_data_of_coun_i.isin([np.nan, np.inf, -np.inf]).any(1)]
da=hist_data_of_coun_i['inc_p'].values
try:
slope,intercept, r_value, p_value, std_err=st.linregress(list(range(30)), da[:30])
except:
slope=None
# print(x[i].text)
bo=x[i].text.split('\n')
# print(bo)
for h in range(len(bo)):
bo[h]=bo[h].replace(',','')
bo[h]=bo[h].replace('+','')
bo[h]=bo[h].strip()
bo[3]=bo[3].strip()
try:
region=id_names[bo[3]]
except:
region='missing'
# print(region)
if bo[4]=='':
del bo[4]
if bo[11]=='':
del bo[11]
# if bo[17]=='':
# del bo[17]
if bo[20]=='':
del bo[20]
if bo[6]=='':
new_cases=0
for t in state_data:
if t['state']==region:
date_time=str(t['date'])
if date_time == ts[1]:
new_cases=t['positiveIncrease']
break
bo[6]=new_cases
if bo[9]=='':
new_cases=0
for t in state_data:
if t['state']==region:
date_time=str(t['date'])
if date_time == ts[1]:
new_cases=t['deathIncrease']
break
bo[9]=new_cases
if bo[22]!='[projections]':
del bo[22]
#match-json
# bo[3]=bo[3].strip()
# try:
# region=id_names[bo[3]]
# except:
# region='missing'
# # print(region)
new_test=1
# test_7 days
for t in state_data:
if t['state']==region:
date_time=str(t['date'])
if date_time == ts[1]:
new_test=t['totalTestResultsIncrease']
break
print(bo)
#Cases Per 100K Population
try:
bo.append(int(bo[14])/10)
except:
continue
# bo.append(np.nan)
# print('lack one')
#Tests Per 100K Population
if bo[25]=='':
del bo[25]
try:
bo.append(int(bo[18])/10)
except:
continue
# bo.append(np.nan)
# print('lack one')
#'Active Cases Per 100k Population'
try:
bo.append(int(bo[13])*100000/int(bo[20]))
except:
bo.append(np.nan)
# print('lack one')
#Total Test:Positive Ratio
bo.append(int(bo[4])/int(bo[17]))
#'New Positive%'
print(region)
try:
bo.append(int(bo[6])/new_test)
except:
bo.append(0)
#Case Fatality Rate%
try:
if bo[8]=='':
bo.append(0)
else:
bo.append(int(bo[8])/int(bo[4]))
except:
bo.append(np.nan)
#New Confirmed Case Growth Rate
# try:
# q=2
# while (math.isnan(inc1) or inc1==np.inf) and q<=9:
# inc1=hist_data_of_coun_i.loc[len(date)-q,'case inc']/(7*hist_data_of_coun_i.loc[len(date)-q-7,'case inc'])
# # c=hist_data_of_coun_i.loc[len(date)-q,'case inc']
# q+=1
# # print(c)
# if math.isnan(inc1):
# bo.append(0)
# elif inc1==np.inf:
# bo.append(0.01)
# else:
# bo.append(inc1)
# # print(inc1)
# except:
# bo.append(0)
# # print('lack one')
# # print(bo[27])
# #New Death Case Growth Rate
# try:
# q=2
# while (math.isnan(inc2) or inc2==np.inf) and q<=9:
# # print(inc2)
# inc2=hist_data_of_coun_i.loc[len(date)-1,'death inc']/(7*hist_data_of_coun_i.loc[len(date)-8,'death inc'])
# q+=1
# # print(inc2)
# if math.isnan(inc2):
# bo.append(0)
# elif inc2==np.inf:
# bo.append(0.1)
# else:
# bo.append(inc2)
# except:
# bo.append(0)
if math.isnan(inc_1) or inc_1=='':
bo.append(0)
elif inc_1==np.inf:
bo.append(0.01)
else:
bo.append(inc_1)
print(bo[-1])
#New Sum Death Case Growth Rate
if math.isnan(inc_2) or inc_2=='':
bo.append(0)
elif inc_2==np.inf:
bo.append(0.1)
else:
bo.append(inc_2)
print(bo[-1])
#Average daily cases per 100,000 people in the past week
bo.append(adcp*100000/int(bo[20]))
# New Test
bo.append(new_test)
#NPI
if slope==np.inf or math.isnan(slope):
bo.append(0)
else:
bo.append(slope)
bo.append(coun[3])
bo.append(region)
bo.append('No')
bo.append('us')
bo.append(seven_cases)
bo.append(seven_deaths)
# if bo[20]=='':
# del bo[20]
print(len(bo))
print(bo)
raw_data.append(bo)
raw_data=DataFrame(raw_data,columns=col_name)
brief_raw_data=raw_data[['Country,Other','key-id','Country/District','Region','field','TotalCases',
'NewCases','TotalDeaths',
'NewDeaths','ActiveCases','Tot Cases/1M pop',
'Deaths/1M pop','TotalTests','Tests/1M pop','Pop','Cases Per 100K Population',
'Tests Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week',
'New Test','NPI','7 days inc cases','7 days inc deaths']]
brief_raw_data['week death rate']=brief_raw_data['7 days inc deaths']/brief_raw_data['7 days inc cases']
tf=copy.deepcopy(brief_raw_data)
tf3=tf[['Country,Other','key-id','Country/District','Region','field','TotalCases','Cases Per 100K Population',
'Tests Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI']]
frames=[tf2,tf3]
tf3 = pd.concat(frames)
tf3 = tf3.reset_index(drop=True)
# tf2=final[['Country,Other','key-id','Country/District','Region','field','TotalCases','Cases Per 100K Population','Tests Per 100K Population',
# 'Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%',
# 'Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI']]
x='Tests Per 100K Population'
df=copy.deepcopy(tf3[['Country,Other',x]])
df2=df.sort_values(x,ascending=False,inplace=False)
df2 = df2.reset_index(drop=True)
df2['cum']=df.index+1
df2['cum_prob']=100*df2['cum']/max(df2['cum'])
df3=pd.merge(df,df2,on=['Country,Other'])
tf3['IND_'+x]=0
for h in list(tf3['Country,Other'].values):
tf3.loc[tf3['Country,Other']==h,'IND_'+x]=df3.loc[df3['Country,Other']==h,'cum_prob'].values[0]
for x in ['Cases Per 100K Population','Active Cases Per 100k Population','Total Test:Positive Ratio','New Positive%','Case Fatality Rate%','New Confirmed Case Growth Rate','New Death Case Growth Rate','Average daily cases per 100,000 people in the past week','NPI']:
df=copy.deepcopy(tf3[['Country,Other',x]])
df2=df.sort_values(x,inplace=False)
df2 = df2.reset_index(drop=True)
df2['cum']=df.index+1
df2['cum_prob']=100*df2['cum']/max(df2['cum'])
df3=pd.merge(df,df2,on=['Country,Other'])
tf3['IND_'+x]=0
for h in list(tf3['Country,Other'].values):
tf3.loc[tf3['Country,Other']==h,'IND_'+x]=df3.loc[df3['Country,Other']==h,'cum_prob'].values[0]
tf3['Comprehensive Index']=0.15*tf3['IND_Cases Per 100K Population']+0.08*tf3['IND_Tests Per 100K Population']+0.2*tf3['IND_Active Cases Per 100k Population']+0.1*tf3['IND_Total Test:Positive Ratio']+0.13*tf3['IND_New Positive%']+0.05*tf3['IND_Case Fatality Rate%']+ 0.22*tf3['IND_New Confirmed Case Growth Rate']+0.07*tf3['IND_New Death Case Growth Rate']
today=datetime.datetime.now()
rrr=tf3[tf3['field']=='us']
tf4=rrr[['Country/District','TotalCases','IND_Cases Per 100K Population','IND_Tests Per 100K Population','IND_Total Test:Positive Ratio',
'IND_New Positive%','IND_Case Fatality Rate%','IND_New Confirmed Case Growth Rate','IND_New Death Case Growth Rate','IND_Active Cases Per 100k Population',
'IND_NPI','IND_Average daily cases per 100,000 people in the past week','Comprehensive Index']]
tf_c=copy.deepcopy(tf4)
tf_c_rename=tf_c.rename({'TotalCases':'TOTAL CASE','IND_Cases Per 100K Population':'IND1_Cases Per 100K Population','IND_Tests Per 100K Population':'IND2_Tests Per 100K Population',
'IND_Active Cases Per 100k Population':'IND8_Active Cases Per 100k Population','IND_Total Test:Positive Ratio':'IND3_Total Test:Positive Ratio',
'IND_New Positive%':'IND4_New Positive%','IND_Case Fatality Rate%':'IND5_Case Fatality Rate%','IND_New Confirmed Case Growth Rate':'IND6_New Confirmed Case Growth Rate',
'IND_New Death Case Growth Rate':'IND7_New Death Case Growth Rate','IND_NPI':'NPI'},axis='columns')
tf_c_rename.to_excel('US_index_{}.xlsx'.format(today),sheet_name=ts[1],index=False)
tf3.to_excel('US_raw_index_{}.xlsx'.format(today),sheet_name=ts[1],index=False)
url='https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv'
a=requests.get(url,headers=headers)
with open("us_all_vacc.csv",'wb') as f:
f.write(a.content)
vacc = pd.read_csv('us_vacc.csv',keep_default_na=False)
ct = list(dict(vacc['location'].value_counts()).keys())
name= list(brief_raw_data['Country,Other'].values)
name=list(set(name).intersection(set(ct)))
name.append('New York State')
name.append('District of Columbia')
for x in ['total_vaccinations','people_vaccinated','people_fully_vaccinated']:
vacc[x]=vacc[x].replace('',0)
vacc[x]=vacc[x].astype(float)
vacc[x]=vacc[x].astype(int)
img = dict()
for i in name:
dt = vacc[vacc['location']==i]
d=[]
for x in ['total_vaccinations','people_vaccinated','people_fully_vaccinated']:
d.append(max(dt[x]))
if i == 'New York State':
img['New York']=d
if i == 'District of Columbia':
img['District Of Columbia']=d
else:
img[i]=d
brief_raw_data['total_vaccinations']=0
brief_raw_data['people_vaccinated']=0
brief_raw_data['people_fully_vaccinated']=0
for i in img.keys():
brief_raw_data.loc[(brief_raw_data['Country,Other']==i),'total_vaccinations'] = int(img[i][0])
brief_raw_data.loc[(brief_raw_data['Country,Other']==i),'people_vaccinated'] = int(img[i][1])
brief_raw_data.loc[(brief_raw_data['Country,Other']==i),'people_fully_vaccinated'] = int(img[i][2])
brief_raw_data['Pop']=brief_raw_data['Pop'].astype(int)
brief_raw_data['vacc_per_100']=brief_raw_data['total_vaccinations']*100/brief_raw_data['Pop']
brief_raw_data['cases_per_100']=brief_raw_data['Cases Per 100K Population']/1000
brief_raw_data['total_immune']=brief_raw_data['cases_per_100']+brief_raw_data['vacc_per_100']*0.9
brief_raw_data.to_excel('US_rawdata_{}.xlsx'.format(today),sheet_name=ts[1],index=False)
| [
"pandas.read_csv",
"re.compile",
"pandas.merge",
"numpy.log",
"requests.get",
"datetime.timedelta",
"bs4.BeautifulSoup",
"datetime.datetime.now",
"re.findall",
"copy.deepcopy",
"pandas.DataFrame",
"re.sub",
"datetime.date.today",
"pandas.core.frame.DataFrame",
"pandas.concat",
"math.is... | [((12326, 12360), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (12338, 12360), False, 'import requests\n'), ((12367, 12403), 'bs4.BeautifulSoup', 'BeautifulSoup', (['a.content', '"""html5lib"""'], {}), "(a.content, 'html5lib')\n", (12380, 12403), False, 'from bs4 import BeautifulSoup\n'), ((21838, 21875), 'pandas.core.frame.DataFrame', 'DataFrame', (['raw_data'], {'columns': 'col_name'}), '(raw_data, columns=col_name)\n', (21847, 21875), False, 'from pandas.core.frame import DataFrame\n'), ((22440, 22469), 'copy.deepcopy', 'copy.deepcopy', (['brief_raw_data'], {}), '(brief_raw_data)\n', (22453, 22469), False, 'import copy\n'), ((23792, 23818), 'copy.deepcopy', 'copy.deepcopy', (['data_region'], {}), '(data_region)\n', (23805, 23818), False, 'import copy\n'), ((28828, 28849), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (28847, 28849), False, 'import datetime\n'), ((31104, 31121), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (31116, 31121), False, 'import requests\n'), ((31129, 31165), 'bs4.BeautifulSoup', 'BeautifulSoup', (['a.content', '"""html5lib"""'], {}), "(a.content, 'html5lib')\n", (31142, 31165), False, 'from bs4 import BeautifulSoup\n'), ((40505, 40542), 'pandas.core.frame.DataFrame', 'DataFrame', (['raw_data'], {'columns': 'col_name'}), '(raw_data, columns=col_name)\n', (40514, 40542), False, 'from pandas.core.frame import DataFrame\n'), ((41252, 41281), 'copy.deepcopy', 'copy.deepcopy', (['brief_raw_data'], {}), '(brief_raw_data)\n', (41265, 41281), False, 'import copy\n'), ((41694, 41711), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (41703, 41711), True, 'import pandas as pd\n'), ((42168, 42208), 'copy.deepcopy', 'copy.deepcopy', (["tf3[['Country,Other', x]]"], {}), "(tf3[['Country,Other', x]])\n", (42181, 42208), False, 'import copy\n'), ((42366, 42405), 'pandas.merge', 'pd.merge', (['df', 'df2'], {'on': "['Country,Other']"}), "(df, df2, on=['Country,Other'])\n", (42374, 42405), True, 'import pandas as pd\n'), ((43617, 43640), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (43638, 43640), False, 'import datetime\n'), ((44065, 44083), 'copy.deepcopy', 'copy.deepcopy', (['tf4'], {}), '(tf4)\n', (44078, 44083), False, 'import copy\n'), ((44967, 45001), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (44979, 45001), False, 'import requests\n'), ((45071, 45120), 'pandas.read_csv', 'pd.read_csv', (['"""us_vacc.csv"""'], {'keep_default_na': '(False)'}), "('us_vacc.csv', keep_default_na=False)\n", (45082, 45120), True, 'import pandas as pd\n'), ((15004, 15040), 'bs4.BeautifulSoup', 'BeautifulSoup', (['a.content', '"""html5lib"""'], {}), "(a.content, 'html5lib')\n", (15017, 15040), False, 'from bs4 import BeautifulSoup\n'), ((15114, 15157), 're.compile', 're.compile', (['"""categories: \\\\[(.*?)\\\\]"""', 're.S'], {}), "('categories: \\\\[(.*?)\\\\]', re.S)\n", (15124, 15157), False, 'import re\n'), ((15163, 15187), 're.findall', 're.findall', (['p', 'r[0].text'], {}), '(p, r[0].text)\n', (15173, 15187), False, 'import re\n'), ((15213, 15238), 're.compile', 're.compile', (['"""\\\\"(.*?)\\\\\\""""'], {}), '(\'\\\\"(.*?)\\\\"\')\n', (15223, 15238), False, 'import re\n'), ((15283, 15334), 're.compile', 're.compile', (['"""name: \\\\\'Cases\\\\\'.*?\\\\[(.*?)\\\\]"""', 're.S'], {}), '("name: \\\\\'Cases\\\\\'.*?\\\\[(.*?)\\\\]", re.S)\n', (15293, 15334), False, 'import re\n'), ((15598, 15650), 're.compile', 're.compile', (['"""name: \\\\\'Deaths\\\\\'.*?\\\\[(.*?)\\\\]"""', 're.S'], {}), '("name: \\\\\'Deaths\\\\\'.*?\\\\[(.*?)\\\\]", re.S)\n', (15608, 15650), False, 'import re\n'), ((16066, 16081), 'pandas.DataFrame', 'pd.DataFrame', (['j'], {}), '(j)\n', (16078, 16081), True, 'import pandas as pd\n'), ((25261, 25292), 'pandas.concat', 'pd.concat', (['[final, data_region]'], {}), '([final, data_region])\n', (25270, 25292), True, 'import pandas as pd\n'), ((28853, 28874), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (28872, 28874), False, 'import datetime\n'), ((28875, 28901), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (28893, 28901), False, 'import datetime\n'), ((28905, 28926), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (28924, 28926), False, 'import datetime\n'), ((28927, 28953), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (28945, 28953), False, 'import datetime\n'), ((28957, 28978), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (28976, 28978), False, 'import datetime\n'), ((28979, 29005), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (28997, 29005), False, 'import datetime\n'), ((29009, 29030), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (29028, 29030), False, 'import datetime\n'), ((29031, 29057), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(4)'}), '(days=4)\n', (29049, 29057), False, 'import datetime\n'), ((29061, 29082), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (29080, 29082), False, 'import datetime\n'), ((29083, 29109), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(5)'}), '(days=5)\n', (29101, 29109), False, 'import datetime\n'), ((29113, 29134), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (29132, 29134), False, 'import datetime\n'), ((29135, 29161), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(6)'}), '(days=6)\n', (29153, 29161), False, 'import datetime\n'), ((29165, 29186), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (29184, 29186), False, 'import datetime\n'), ((29187, 29213), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (29205, 29213), False, 'import datetime\n'), ((29217, 29238), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (29236, 29238), False, 'import datetime\n'), ((29239, 29265), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(8)'}), '(days=8)\n', (29257, 29265), False, 'import datetime\n'), ((29269, 29290), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (29288, 29290), False, 'import datetime\n'), ((29291, 29317), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(9)'}), '(days=9)\n', (29309, 29317), False, 'import datetime\n'), ((33440, 33476), 'bs4.BeautifulSoup', 'BeautifulSoup', (['a.content', '"""html5lib"""'], {}), "(a.content, 'html5lib')\n", (33453, 33476), False, 'from bs4 import BeautifulSoup\n'), ((33550, 33593), 're.compile', 're.compile', (['"""categories: \\\\[(.*?)\\\\]"""', 're.S'], {}), "('categories: \\\\[(.*?)\\\\]', re.S)\n", (33560, 33593), False, 'import re\n'), ((33599, 33623), 're.findall', 're.findall', (['p', 'r[0].text'], {}), '(p, r[0].text)\n', (33609, 33623), False, 'import re\n'), ((33649, 33674), 're.compile', 're.compile', (['"""\\\\"(.*?)\\\\\\""""'], {}), '(\'\\\\"(.*?)\\\\"\')\n', (33659, 33674), False, 'import re\n'), ((33719, 33770), 're.compile', 're.compile', (['"""name: \\\\\'Cases\\\\\'.*?\\\\[(.*?)\\\\]"""', 're.S'], {}), '("name: \\\\\'Cases\\\\\'.*?\\\\[(.*?)\\\\]", re.S)\n', (33729, 33770), False, 'import re\n'), ((34034, 34086), 're.compile', 're.compile', (['"""name: \\\\\'Deaths\\\\\'.*?\\\\[(.*?)\\\\]"""', 're.S'], {}), '("name: \\\\\'Deaths\\\\\'.*?\\\\[(.*?)\\\\]", re.S)\n', (34044, 34086), False, 'import re\n'), ((34359, 34374), 'pandas.DataFrame', 'pd.DataFrame', (['j'], {}), '(j)\n', (34371, 34374), True, 'import pandas as pd\n'), ((42840, 42880), 'copy.deepcopy', 'copy.deepcopy', (["tf3[['Country,Other', x]]"], {}), "(tf3[['Country,Other', x]])\n", (42853, 42880), False, 'import copy\n'), ((43042, 43081), 'pandas.merge', 'pd.merge', (['df', 'df2'], {'on': "['Country,Other']"}), "(df, df2, on=['Country,Other'])\n", (43050, 43081), True, 'import pandas as pd\n'), ((17751, 17807), 'numpy.log', 'np.log', (["(hist_data_of_coun_i['case inc'] / dd['case inc'])"], {}), "(hist_data_of_coun_i['case inc'] / dd['case inc'])\n", (17757, 17807), True, 'import numpy as np\n'), ((20905, 20922), 'math.isnan', 'math.isnan', (['inc_1'], {}), '(inc_1)\n', (20915, 20922), False, 'import math\n'), ((21103, 21120), 'math.isnan', 'math.isnan', (['inc_2'], {}), '(inc_2)\n', (21113, 21120), False, 'import math\n'), ((28623, 28689), 'requests.get', 'requests.get', (['"""https://covidtracking.com/api/v1/states/daily.json"""'], {}), "('https://covidtracking.com/api/v1/states/daily.json')\n", (28635, 28689), False, 'import requests\n'), ((35777, 35833), 'numpy.log', 'np.log', (["(hist_data_of_coun_i['case inc'] / dd['case inc'])"], {}), "(hist_data_of_coun_i['case inc'] / dd['case inc'])\n", (35783, 35833), True, 'import numpy as np\n'), ((39660, 39677), 'math.isnan', 'math.isnan', (['inc_1'], {}), '(inc_1)\n', (39670, 39677), False, 'import math\n'), ((39857, 39874), 'math.isnan', 'math.isnan', (['inc_2'], {}), '(inc_2)\n', (39867, 39874), False, 'import math\n'), ((40182, 40199), 'math.isnan', 'math.isnan', (['slope'], {}), '(slope)\n', (40192, 40199), False, 'import math\n'), ((14926, 14960), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (14938, 14960), False, 'import requests\n'), ((15383, 15408), 're.findall', 're.findall', (['p1', 'r[j].text'], {}), '(p1, r[j].text)\n', (15393, 15408), False, 'import re\n'), ((15442, 15462), 're.sub', 're.sub', (['"""\\\\\\""""', '""""""', 'd'], {}), '(\'\\\\"\', \'\', d)\n', (15448, 15462), False, 'import re\n'), ((15699, 15724), 're.findall', 're.findall', (['p1', 'r[j].text'], {}), '(p1, r[j].text)\n', (15709, 15724), False, 'import re\n'), ((15758, 15778), 're.sub', 're.sub', (['"""\\\\\\""""', '""""""', 'd'], {}), '(\'\\\\"\', \'\', d)\n', (15764, 15778), False, 'import re\n'), ((33362, 33396), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (33374, 33396), False, 'import requests\n'), ((33819, 33844), 're.findall', 're.findall', (['p1', 'r[j].text'], {}), '(p1, r[j].text)\n', (33829, 33844), False, 'import re\n'), ((33878, 33898), 're.sub', 're.sub', (['"""\\\\\\""""', '""""""', 'd'], {}), '(\'\\\\"\', \'\', d)\n', (33884, 33898), False, 'import re\n'), ((34135, 34160), 're.findall', 're.findall', (['p1', 'r[j].text'], {}), '(p1, r[j].text)\n', (34145, 34160), False, 'import re\n'), ((34194, 34214), 're.sub', 're.sub', (['"""\\\\\\""""', '""""""', 'd'], {}), '(\'\\\\"\', \'\', d)\n', (34200, 34214), False, 'import re\n')] |
import argparse
import json
import os
import shutil
import traceback
from datetime import datetime
import cv2
import numpy as np
import torch
from bullet_envs.utils import PY_MUJOCO, env_with_goals
from SRL4RL.rl.modules.agent_utils import process_inputs
from SRL4RL.rl.utils.env_utils import make_env
from SRL4RL.utils.nn_torch import set_seeds
from SRL4RL.utils.utils import (
createFolder,
encoder_methods,
give_name,
loadConfig,
saveConfig,
str2bool,
)
from SRL4RL.utils.utilsEnv import render_env, update_video
RANDOM = False
seperate_csv = False
image_size = 588 # 588
color = True
class EmptyArgs:
pass
def eval_agent(
args,
env,
o_mean,
o_std,
g_mean,
g_std,
actor_network,
runner,
video_path="",
image_path="",
):
numSteps = int(args.max_episode_steps // args.actionRepeat)
mean_rewardProgress = 0
if args.renders and args.env_name in PY_MUJOCO:
env.render(mode="human")
if args.env_name in ["TurtlebotEnv-v0", "TurtlebotMazeEnv-v0"]:
"map view of the environment"
camera_id_eval = 1
else:
"the default camera"
camera_id_eval = -1 if args.highRes else 0
camera_id = -1 if args.highRes else 0
g, num_steps = 0, 0
total_step = np.zeros((args.n_eval_traj))
rewards = np.zeros((args.n_eval_traj, numSteps))
rewardProgress = np.zeros((args.n_eval_traj))
if video_path:
if "Turtlebot" in args.env_name:
fps = 5
elif args.actionRepeat > 1:
fps = 40 // args.actionRepeat
else:
fps = 4
im_width = image_size * 2 if args.method in encoder_methods else image_size
video_out = (
cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"mp4v"),
fps=fps,
frameSize=(im_width, image_size),
)
if args.color
else cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"XVID"),
fps=fps,
frameSize=(im_width, image_size),
isColor=0,
)
)
for ntraj in range(args.n_eval_traj):
print("traj: {}".format(ntraj + 1))
observation = env.reset()
if args.with_goal:
state = observation["observation"]
g = observation["desired_goal"]
else:
state = observation
if video_path:
"reset video"
if args.method in encoder_methods:
update_video(
env,
color=args.color,
video_size=image_size,
video=video_out,
fpv=args.fpv,
camera_id=camera_id,
concatIM=runner.last_input * 255,
downscaling=not args.highRes,
)
else:
update_video(
env,
im=None,
color=args.color,
video_size=image_size,
video=video_out,
fpv=args.fpv,
camera_id=camera_id,
downscaling=not args.highRes,
)
if image_path:
im_high_render = render_env(
env,
588,
False,
camera_id_eval,
args.color,
downscaling=not args.highRes,
)
cv2.imwrite(
image_path + "ob_{:05d}".format(num_steps) + ".png",
im_high_render[:, :, ::-1].astype(np.uint8),
)
for step in range(numSteps):
num_steps += 1
if not RANDOM:
input_tensor = process_inputs(
state, g, o_mean, o_std, g_mean, g_std, args.__dict__
)
else:
input_tensor = None
with torch.no_grad():
pi = runner.forward(
actor_network, input_tensor, evaluate=True, random=RANDOM
)
observation_new, reward, done, info = env.step(pi)
if video_path:
"update video"
if args.method in encoder_methods:
update_video(
env,
color=args.color,
video_size=image_size,
video=video_out,
fpv=args.fpv,
camera_id=camera_id,
concatIM=runner.last_input * 255,
downscaling=not args.highRes,
)
else:
update_video(
env,
im=None,
color=args.color,
video_size=image_size,
video=video_out,
fpv=args.fpv,
camera_id=camera_id,
downscaling=not args.highRes,
)
if image_path:
im_high_render = render_env(
env,
588,
False,
camera_id_eval,
args.color,
downscaling=not args.highRes,
)
cv2.imwrite(
image_path + "ob_{:05d}".format(num_steps) + ".png",
im_high_render[:, :, ::-1].astype(np.uint8),
)
assert step < env.maxSteps, "wrong max_episode_steps"
if args.env_name in env_with_goals:
if args.with_goal:
state_new = observation_new["observation"]
g = observation_new["desired_goal"]
else:
state_new = observation_new
info["is_success"] = reward + 1
if (info["is_success"] == 1.0) or (step == env.maxSteps):
rewards[ntraj, step] = info["is_success"]
num_steps += 1
if info["is_success"] == 0.0:
print(
"\ntraj {} fails, elapsed_steps {}".format(
ntraj + 1, step + 1
)
)
break
else:
state_new = observation_new
rewards[ntraj, step] = reward
if "Pendulum" in args.env_name and video_path:
if np.mean(rewards[ntraj, step + 1 - fps * 10 : step + 1]) > 3.9:
rewards[ntraj, step + 1 :] = rewards[ntraj, step]
break
if video_path:
print("step [{}] reward {}".format(step, reward))
state = state_new
if args.env_name in env_with_goals:
if info["is_success"] != 0.0:
total_step[ntraj] = step + 1
else:
total_step[ntraj] = step + 1
if args.env_name in [
"AntBulletEnv-v0",
"HalfCheetahBulletEnv-v0",
"HopperBulletEnv-v0",
"Walker2DBulletEnv-v0",
]:
rewardProgress[ntraj] = env.rewardProgress
mean_rewards = np.mean(np.sum(rewards, axis=1))
if args.env_name in [
"AntBulletEnv-v0",
"HalfCheetahBulletEnv-v0",
"HopperBulletEnv-v0",
"Walker2DBulletEnv-v0",
]:
mean_rewardProgress = np.mean(rewardProgress)
average_steps = (
args.max_episode_steps if np.isnan(np.mean(total_step)) else np.mean(total_step)
)
if video_path:
"Release everything if job is finished"
video_out.release()
cv2.destroyAllWindows()
if args.env_name in env_with_goals:
strR = "%03d" % int(mean_rewards * 100)
strR = strR[0] + "," + strR[1:]
else:
strR = "R%04d" % int(mean_rewards)
if args.env_name in [
"AntBulletEnv-v0",
"HalfCheetahBulletEnv-v0",
"HopperBulletEnv-v0",
"Walker2DBulletEnv-v0",
]:
strR += "-RP%04d" % int(mean_rewardProgress)
destination = video_path[:-4] + "-" + strR + ".mp4"
print("destination", destination)
shutil.move(video_path, destination)
return mean_rewards, mean_rewardProgress, average_steps
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--my_dir", type=str)
parser.add_argument(
"--save_video",
type=str2bool,
default=False,
help="Record video from evaluation trajectories",
)
parser.add_argument(
"--save_image",
type=str2bool,
default=False,
help="Record images from evaluation trajectories",
)
parser.add_argument(
"--model_type",
type=str,
default="model_best",
choices=(["model_last", "model_best"]),
help="Whether to load the policy with best average return, or the last saved policy",
)
parser.add_argument("--demo_length", type=int, default=2, help="The demo length")
parser.add_argument(
"--n_eval_traj",
type=int,
default=0,
help="The number of trajectories to compute the average episode returns",
)
parser.add_argument("--renders", type=str2bool, default=False, help="Tune entropy")
parser.add_argument(
"--highRes",
type=str2bool,
default=True,
help="Record high-resolution images, if True, do not downscale images",
)
parser.add_argument(
"--cuda",
type=str2bool,
default=True,
help="If False, do not use cuda if available",
)
args_init = parser.parse_args()
if args_init.n_eval_traj > 0:
assert not (args_init.save_video or args_init.save_image)
demo_length = args_init.demo_length
print("\nproj_path: ", args_init.my_dir)
args_init.my_dir = (
args_init.my_dir[:-1] if args_init.my_dir[-1] == "/" else args_init.my_dir
)
all_proj_opt, file = os.path.split(args_init.my_dir)
try:
config = loadConfig(args_init.my_dir)
except Exception:
print("\nNeed remove folder: %s\n" % args_init.my_dir)
traceback.print_exc()
exit()
env_params = config["env_params"]
args = EmptyArgs()
args.__dict__.update(config)
"change args with args_init"
args.renders = args_init.renders
args.my_dir = args_init.my_dir
args.highRes = args_init.highRes
args.seed = datetime.now().microsecond
print("\nSeed is: \n", args.seed)
"IMPORTANT TO USE FOR CUDA MEMORY"
set_seeds(args.seed)
"""
Load the controller
"""
o_mean, o_std, g_mean, g_std = 0, 0, 0, 0
"Load RL model"
try:
o_mean, o_std, g_mean, g_std, actor_network, _ = torch.load(
args_init.my_dir + "/{}.pt".format(args_init.model_type),
map_location=lambda storage, loc: storage,
)
except Exception:
print("\nNot {}.pt saved".format(args_init.model_type))
traceback.print_exc()
exit()
actor_network.eval()
if args.env_name in env_with_goals:
# because RL do not need to see target, it sees the target's position
args.display_target = False if args_init.n_eval_traj > 0 else True
if "TurtlebotMazeEnv" in args.env_name:
args.n_eval_traj = 5
elif "ReacherBulletEnv" in args.env_name:
args.n_eval_traj = 20
saved_steps_per_episode = args.max_episode_steps
elif "Pendulum" in args.env_name:
args.n_eval_traj = 5
saved_steps_per_episode = 100 * 4
else:
args.n_eval_traj = 1
saved_steps_per_episode = args.max_episode_steps
if args_init.save_video or args_init.save_image or args_init.renders:
assert args_init.n_eval_traj == 0
# Change max_episode_steps to define the Average step
args.max_episode_steps = saved_steps_per_episode
elif args_init.n_eval_traj > 0:
args.n_eval_traj = args_init.n_eval_traj
"""
Create the environment with the SRL model wrapper
"""
args.srl_path = args_init.my_dir
args.demo = True
# args.random_target = False
# args.distractor = True
# args.noise_type = 'noisyObs'
env, _, runner = make_env(args.__dict__)
env.seed(args.seed)
# Create video folder
if args_init.save_video:
video_path = (
args.my_dir + "/piEval-best-E%s.mp4" % config["best_elapsed_epochs"]
if args_init.model_type == "model_best"
else args.my_dir + "/piEval-last-E%s.mp4" % config["last_elapsed_epochs"]
)
else:
video_path = ""
if args_init.save_image:
image_path = (
args.my_dir + "/piEval-best-E%s/" % config["best_elapsed_epochs"]
if args_init.model_type == "model_best"
else args.my_dir + "/piEval-last-E%s/" % config["last_elapsed_epochs"]
)
createFolder(image_path, image_path + " already exist")
else:
image_path = ""
# Create recorder:
model_name = config["method"]
model_name = give_name(config)
prefix = "best_" if args_init.model_type == "model_best" else ""
mean_rewards, mean_rewardProgress, average_steps = eval_agent(
args,
env,
o_mean,
o_std,
g_mean,
g_std,
actor_network,
runner,
video_path=video_path,
image_path=image_path,
)
if args_init.n_eval_traj > 0:
print(
"the average total reward is: {}, the average total steps is: {}".format(
mean_rewards, average_steps
)
)
config[prefix + "avg-reward"] = mean_rewards
config[prefix + "avg-progress"] = (
mean_rewardProgress if mean_rewardProgress != 0 else ""
)
config[prefix + "avg-steps"] = average_steps
saveConfig(config, save_dir=args.my_dir)
with open(os.path.join(args.my_dir, "exp_config.json"), "w") as outfile:
json.dump(config, outfile)
| [
"SRL4RL.utils.utilsEnv.update_video",
"SRL4RL.utils.utils.saveConfig",
"SRL4RL.utils.utils.give_name",
"cv2.destroyAllWindows",
"numpy.mean",
"argparse.ArgumentParser",
"shutil.move",
"os.path.split",
"SRL4RL.utils.utils.loadConfig",
"cv2.VideoWriter_fourcc",
"traceback.print_exc",
"SRL4RL.uti... | [((1284, 1310), 'numpy.zeros', 'np.zeros', (['args.n_eval_traj'], {}), '(args.n_eval_traj)\n', (1292, 1310), True, 'import numpy as np\n'), ((1327, 1365), 'numpy.zeros', 'np.zeros', (['(args.n_eval_traj, numSteps)'], {}), '((args.n_eval_traj, numSteps))\n', (1335, 1365), True, 'import numpy as np\n'), ((1387, 1413), 'numpy.zeros', 'np.zeros', (['args.n_eval_traj'], {}), '(args.n_eval_traj)\n', (1395, 1413), True, 'import numpy as np\n'), ((8572, 8597), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8595, 8597), False, 'import argparse\n'), ((10245, 10276), 'os.path.split', 'os.path.split', (['args_init.my_dir'], {}), '(args_init.my_dir)\n', (10258, 10276), False, 'import os\n'), ((10825, 10845), 'SRL4RL.utils.nn_torch.set_seeds', 'set_seeds', (['args.seed'], {}), '(args.seed)\n', (10834, 10845), False, 'from SRL4RL.utils.nn_torch import set_seeds\n'), ((12512, 12535), 'SRL4RL.rl.utils.env_utils.make_env', 'make_env', (['args.__dict__'], {}), '(args.__dict__)\n', (12520, 12535), False, 'from SRL4RL.rl.utils.env_utils import make_env\n'), ((13351, 13368), 'SRL4RL.utils.utils.give_name', 'give_name', (['config'], {}), '(config)\n', (13360, 13368), False, 'from SRL4RL.utils.utils import createFolder, encoder_methods, give_name, loadConfig, saveConfig, str2bool\n'), ((7403, 7426), 'numpy.sum', 'np.sum', (['rewards'], {'axis': '(1)'}), '(rewards, axis=1)\n', (7409, 7426), True, 'import numpy as np\n'), ((7615, 7638), 'numpy.mean', 'np.mean', (['rewardProgress'], {}), '(rewardProgress)\n', (7622, 7638), True, 'import numpy as np\n'), ((7730, 7749), 'numpy.mean', 'np.mean', (['total_step'], {}), '(total_step)\n', (7737, 7749), True, 'import numpy as np\n'), ((7859, 7882), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7880, 7882), False, 'import cv2\n'), ((8432, 8468), 'shutil.move', 'shutil.move', (['video_path', 'destination'], {}), '(video_path, destination)\n', (8443, 8468), False, 'import shutil\n'), ((10303, 10331), 'SRL4RL.utils.utils.loadConfig', 'loadConfig', (['args_init.my_dir'], {}), '(args_init.my_dir)\n', (10313, 10331), False, 'from SRL4RL.utils.utils import createFolder, encoder_methods, give_name, loadConfig, saveConfig, str2bool\n'), ((10717, 10731), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10729, 10731), False, 'from datetime import datetime\n'), ((13186, 13241), 'SRL4RL.utils.utils.createFolder', 'createFolder', (['image_path', "(image_path + ' already exist')"], {}), "(image_path, image_path + ' already exist')\n", (13198, 13241), False, 'from SRL4RL.utils.utils import createFolder, encoder_methods, give_name, loadConfig, saveConfig, str2bool\n'), ((14141, 14181), 'SRL4RL.utils.utils.saveConfig', 'saveConfig', (['config'], {'save_dir': 'args.my_dir'}), '(config, save_dir=args.my_dir)\n', (14151, 14181), False, 'from SRL4RL.utils.utils import createFolder, encoder_methods, give_name, loadConfig, saveConfig, str2bool\n'), ((3340, 3429), 'SRL4RL.utils.utilsEnv.render_env', 'render_env', (['env', '(588)', '(False)', 'camera_id_eval', 'args.color'], {'downscaling': '(not args.highRes)'}), '(env, 588, False, camera_id_eval, args.color, downscaling=not\n args.highRes)\n', (3350, 3429), False, 'from SRL4RL.utils.utilsEnv import render_env, update_video\n'), ((7704, 7723), 'numpy.mean', 'np.mean', (['total_step'], {}), '(total_step)\n', (7711, 7723), True, 'import numpy as np\n'), ((10425, 10446), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (10444, 10446), False, 'import traceback\n'), ((11260, 11281), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11279, 11281), False, 'import traceback\n'), ((14275, 14301), 'json.dump', 'json.dump', (['config', 'outfile'], {}), '(config, outfile)\n', (14284, 14301), False, 'import json\n'), ((1787, 1818), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (1809, 1818), False, 'import cv2\n'), ((2013, 2044), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (2035, 2044), False, 'import cv2\n'), ((2571, 2753), 'SRL4RL.utils.utilsEnv.update_video', 'update_video', (['env'], {'color': 'args.color', 'video_size': 'image_size', 'video': 'video_out', 'fpv': 'args.fpv', 'camera_id': 'camera_id', 'concatIM': '(runner.last_input * 255)', 'downscaling': '(not args.highRes)'}), '(env, color=args.color, video_size=image_size, video=video_out,\n fpv=args.fpv, camera_id=camera_id, concatIM=runner.last_input * 255,\n downscaling=not args.highRes)\n', (2583, 2753), False, 'from SRL4RL.utils.utilsEnv import render_env, update_video\n'), ((2959, 3113), 'SRL4RL.utils.utilsEnv.update_video', 'update_video', (['env'], {'im': 'None', 'color': 'args.color', 'video_size': 'image_size', 'video': 'video_out', 'fpv': 'args.fpv', 'camera_id': 'camera_id', 'downscaling': '(not args.highRes)'}), '(env, im=None, color=args.color, video_size=image_size, video=\n video_out, fpv=args.fpv, camera_id=camera_id, downscaling=not args.highRes)\n', (2971, 3113), False, 'from SRL4RL.utils.utilsEnv import render_env, update_video\n'), ((3830, 3899), 'SRL4RL.rl.modules.agent_utils.process_inputs', 'process_inputs', (['state', 'g', 'o_mean', 'o_std', 'g_mean', 'g_std', 'args.__dict__'], {}), '(state, g, o_mean, o_std, g_mean, g_std, args.__dict__)\n', (3844, 3899), False, 'from SRL4RL.rl.modules.agent_utils import process_inputs\n'), ((4009, 4024), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4022, 4024), False, 'import torch\n'), ((5209, 5298), 'SRL4RL.utils.utilsEnv.render_env', 'render_env', (['env', '(588)', '(False)', 'camera_id_eval', 'args.color'], {'downscaling': '(not args.highRes)'}), '(env, 588, False, camera_id_eval, args.color, downscaling=not\n args.highRes)\n', (5219, 5298), False, 'from SRL4RL.utils.utilsEnv import render_env, update_video\n'), ((14200, 14244), 'os.path.join', 'os.path.join', (['args.my_dir', '"""exp_config.json"""'], {}), "(args.my_dir, 'exp_config.json')\n", (14212, 14244), False, 'import os\n'), ((4352, 4534), 'SRL4RL.utils.utilsEnv.update_video', 'update_video', (['env'], {'color': 'args.color', 'video_size': 'image_size', 'video': 'video_out', 'fpv': 'args.fpv', 'camera_id': 'camera_id', 'concatIM': '(runner.last_input * 255)', 'downscaling': '(not args.highRes)'}), '(env, color=args.color, video_size=image_size, video=video_out,\n fpv=args.fpv, camera_id=camera_id, concatIM=runner.last_input * 255,\n downscaling=not args.highRes)\n', (4364, 4534), False, 'from SRL4RL.utils.utilsEnv import render_env, update_video\n'), ((4784, 4938), 'SRL4RL.utils.utilsEnv.update_video', 'update_video', (['env'], {'im': 'None', 'color': 'args.color', 'video_size': 'image_size', 'video': 'video_out', 'fpv': 'args.fpv', 'camera_id': 'camera_id', 'downscaling': '(not args.highRes)'}), '(env, im=None, color=args.color, video_size=image_size, video=\n video_out, fpv=args.fpv, camera_id=camera_id, downscaling=not args.highRes)\n', (4796, 4938), False, 'from SRL4RL.utils.utilsEnv import render_env, update_video\n'), ((6662, 6715), 'numpy.mean', 'np.mean', (['rewards[ntraj, step + 1 - fps * 10:step + 1]'], {}), '(rewards[ntraj, step + 1 - fps * 10:step + 1])\n', (6669, 6715), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys
import os
import json
import pickle
import traceback
import numpy as np
import time
import datetime as dtime
from progressbar import ProgressBar, ETA, Bar, Percentage
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score, recall_score
from utils.UrlUtils import UrlUtils
#from utils.contextUtils import toContext
def toContext(process,exitv,message):
print(process,exitv,message)
pathjoin = os.path.join
pathexists = os.path.exists
mdy = dtime.datetime.now().strftime('%m%d%y')
product_type = 'interferogram'
cache_dir = 'cached'
train_folds = np.inf # inf = leave-one-out, otherwise k-fold cross validation
train_state = 42 # random seed
train_verbose = 0
train_jobs = -1
cv_type = 'loo' if train_folds==np.inf else '%d-fold'%train_folds
cv_probs = True # record prediction probabilities in addition to labels
scorefn = {} # map from name (e.g., mse) -> f(y_true,y_pred)
scorefn['precision'] = lambda te,pr,ul: precision_score(te,pr,labels=ul)
scorefn['recall'] = lambda te,pr,ul: recall_score(te,pr,labels=ul)
errorfn = {} # map from name (e.g., diff) -> f(y_true,y_pred)
errorfn['match'] = lambda y_true,y_pred: y_true==y_pred
# GRID SEARCH PARAMS FOR PARAMETER TUNING ######################################
gridcv_folds = 2 # number of cross-validation folds per gridcv parameter
gridcv_jobs = -1 # -1 = use all cores
gridcv_verbose = 0 # verbosity level of model-tuning cross-validation output
gridcv_score = 'roc_auc'
# SKLEARN MODEL SPECIFICATIONS #################################################
### Random Forest ##############################################################
rf_trees = 500
rf_feats = np.linspace(0.1,1.0,5)
rf_depth = [2,4,7,10,25]
rf_jobs = 1 if gridcv_jobs == -1 else -1 # multiprocessing + RandomForest don't play nice
rf_tuned = {'max_features':rf_feats,'max_depth':rf_depth}
rf_defaults = {
'n_estimators': rf_trees,'max_features':'sqrt','n_jobs':rf_jobs,
'verbose':train_verbose,'random_state':train_state,
'criterion':'gini','class_weight':'balanced_subsample'
}
### XGBoost ####################################################################
xgb_depth = [3,4,5,10,25]
xgb_subsample = np.linspace(0.1,1,5)
xgb_default = {
'n_estimators':rf_trees,'max_delta_step':1,'learning_rate':0.1,
'objective':'binary:logistic','max_depth':3,'subsample':0.5,
'colsample_bytree':1,'subsample':1,'silent':(not train_verbose),
'seed':train_state,'nthread':train_jobs
}
xgb_tuned = {'learning_rate':[0.001,0.01,0.05,0.1,0.25,0.33],
'max_depth':xgb_depth,'subsample':xgb_subsample}
def loadjson(jsonfile):
with open(jsonfile,'r') as fid:
return json.load(fid)
def dumpjson(objdict,jsonfile):
with open(jsonfile,'w') as fid:
return json.dump(fid,objdict)
def url2pid(url):
"""
url2pid(url): convert url to product id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- product id for url
"""
if url.endswith('/'):
url = url[:-1]
urlsplit = url.split('/')
return (urlsplit[-2] + '_' + urlsplit[-1]).replace('__','_')
def url2featid(url,product_type):
"""
url2pid(url): convert url to feature id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- feature id for url
"""
return url.replace(product_type,'features').replace('features__','features_'+product_type+'__')
def fdict2vec(featdict,clfinputs):
'''
extract feature vector from dict given classifier parameters
specifying which features to use
'''
fvec = []
try:
featspec = clfinputs['features']
featorder = featspec['feature_order']
featdims = featspec['feature_dims']
cohthr = featspec['cohthr10']
featscoh = featdict['%d'%cohthr]
for fid,fdim in zip(featorder,featdims):
flist = featscoh[fid]
if not isinstance(flist,list):
flist = [flist]
assert(len(flist) == fdim)
fvec.extend(flist)
except Exception:
pass
return fvec
def curlProductMeta(prod_url,verbose=False,remove=True):
"""
curlProductMeta(prod_url,verbose=False)
Arguments:
- prod_url: product url
Keyword Arguments:
- verbose: verbose output (default=False)
Returns: metadata dict from product .met.json
"""
if prod_url.endswith('/'):
prod_url = prod_url[:-1]
prod_json = url2pid(prod_url) + '.met.json'
try:
uu = UrlUtils()
silentoutput = ' ' if verbose else ' --silent '
userstr = uu.dav_u + ':' + uu.dav_p
command = 'curl' + silentoutput + '-k -f -u' + userstr + ' -O ' + pathjoin(prod_url,prod_json)
os.system(command)
except Exception:
return {}
if not pathexists(prod_json):
return {}
meta = loadjson(prod_json)
if remove:
os.remove(prod_json)
return meta
def getFeatures(url,clfinputs,product_type='interferogram'):
'''
retrieves feature vector for the given product url, provided clfinputs
'''
featurl = url2featid(url,product_type)
featdict = curlProductMeta(featurl)
fvec = fdict2vec(featdict,clfinputs)
return fvec
def loadQuery(querymeta,queryoptions=[],queryoutfile=None,cache=False):
'''
builds/posts the faceted search query specified in querymeta and dumps the
result to queryoutfile. if queryoutfile already exists, the query is loaded from
disk rather than executed.
'''
if not cache or not pathexists(queryoutfile):
print('executing faceted search query...')
from utils.queryBuilder import postQuery, buildQuery
from utils.contextUtils import toContext
ret,status = postQuery(buildQuery(querymeta,queryoptions))
if cache and status:
# only dump the query if caching enabled and postQuery succeeds
with open(queryoutfile,'wb') as fid:
pickle.dump(ret,fid)
elif cache:
print('loading cached query from %s...'%queryoutfile)
with open(queryoutfile,'rb') as fid:
ret = pickle.load(fid)
print('query returned %d products'%len(ret))
return ret
def loadClassmap(cmapjson):
"""
loadClassmap(cmapjson) - loads classmap file,
substitutes '_', for '-' as necessary
Arguments:
- cmapjson: classmap .json file
Keyword Arguments:
None
Returns: classmap with substitutions
"""
initialmap = loadjson(cmapjson)
classmap = initialmap.copy()
# substitute '-' with '_' (for user-tagged typos)
tags = initialmap.keys()
for tag in tags:
if '-' in tag:
classmap[tag.replace('-','_')] = classmap[tag]
return classmap
def loadPredictorSpec(clfjson):
"""
loadPredictorSpec(clfjson)
Arguments:
- clfjson: json file specifying classifier parameters
Keyword Arguments:
None
Returns: dict containing classifier parameters,
including (but not limited to):
- classmap: classmap to map user tags to labels
- features: dict containing information about features used to train classifier
"""
clfspec = loadjson(clfjson)
clfspec['classmap'] = loadClassmap(clfspec["classmap_file"])
clfspec['features'] = loadjson(clfspec["feat_file"])
return clfspec
def dumpPredictorSpec(inputs):
clfspec = {}
clfspec['clf_file'] = inputs['clf_name']+'.pkl'
for key in ['clf_type','classmap','feat_file']:
clfspec[key] = inputs[key]
json.dump(clfspec,inputs['clf_name']+'.json')
def PredictorSpec(inputjson):
clfspec['clf_file'] = inputs['clf_file']
clfspec['classmap'] = inputs["classmap_file"]
clfspec['features'] = inputs("feat_file")
def usertags2label(usertags,classmap):
'''
return dictionary of matched (tag,label) pairs in classmap for all tags
returns {} if none of the tags are present in classmap
'''
labelmap = {}
for tag in usertags:
tag = tag.strip()
for k,v in classmap.items():
if tag.count(k):
labelmap[tag] = v
return labelmap
def queryAllTags(taglist,cache=False):
'''
return all urls with user tags present in taglist
'''
tagpkl = pathjoin(cache_dir,"usertags.pkl")
tagquery = {'dataset_type':product_type,'tags':taglist}
querylist = loadQuery(tagquery,cache=cache,queryoutfile=tagpkl)
querydict = {}
for product in querylist:
purl = product['url']
querydict[purl] = product
return querydict
def collectUrlTags(urllist,querymeta={}):
"""
collectUrlTags(urllist,querymeta={})
collects user tags for a list of urls
Arguments:
- urllist: list of urls
Keyword Arguments:
- querymeta: (default={})
Returns: dict keyed on product id containing
- url: input url
- user_tags: tags for input url
"""
tagdict = {}
nurl = len(urllist)
for i,url in enumerate(urllist):
if url in querymeta: # use the query input if possible
meta = querymeta[url]
else: # otherwise retrieve product metadata via curl
meta = curlProductMeta(url)
tagdict[url2pid(url)] = {'url':url,'user_tags':meta.get('user_tags',[])}
return tagdict
def collectTrainingData(urls,clfinputs,cache=False):
'''
construct matrix of training samples X with labels y by intersecting the set of
IGMs with extracted features (featquery) with the set of tagged IGMs (taggedquery)
Keep only IGMs with tags present in classmap, and select/validate features
according to the parameters in clfinputs.
Returns: dict containing:
- tags: list of user tags used to select training samples
- X, y: training samples, labels
- traintags: tags for each training sample
- trainurls: url for each training sample
- skiplist: list of urls which could not be retrieved due to errors
- errors: list of error strings for each url in skiplist
'''
classmap = clfinputs['classmap']
tags = sorted(list(classmap.keys()))
traindatpkl = pathjoin(cache_dir,"traindat.pkl")
if cache and pathexists(traindatpkl):
print('loading training data from %s...'%traindatpkl)
with open(traindatpkl,'rb') as fid:
ret = pickle.load(fid)
# make sure the set of tags match
if all([ret['tags'][i] == tags[i] for i in range(len(tags))]):
return ret
print("querying %d tags"%len(tags))
querymeta = queryAllTags(tags,cache=cache)
if len(urls)==0:
print('no URLs provided, training using all tags in classmap')
# construct/run query to get metadata for all products with given tags
urls = list(querymeta.keys())
elif isinstance(urls,str):
urls = [urls]
tagdict = collectUrlTags(urls,querymeta=querymeta)
ntagged = len(tagdict)
X,y = [],[]
traintags,trainurls = [],[]
errors,skiplist = [],[]
widgets = ['collecting features for %d products'%ntagged, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=ntagged).start()
for i,pid in enumerate(tagdict):
tdict = tagdict[pid]
turl,ttags = tdict['url'],tdict['user_tags']
taglabel = usertags2label(ttags,classmap)
if len(taglabel) == 0:
continue
fvec = getFeatures(turl,clfinputs)
if len(fvec)==0:
errmsg = "error collecting features for product %s (skipped)"%pid
errors.append(errmsg)
skiplist.append(turl)
continue
pidtags,pidlabs = list(taglabel.keys()),list(taglabel.values())
if len(pidtags) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
elif len(pidtags) > 1:
ulab = np.unique(pidlabs)
if len(ulab) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
else:
errmsg = "conflicting tags (%s) for product %s, skipped"%(pidtags,pid)
errors.append(errmsg)
skiplist.append(turl)
pbar.update(i)
pbar.finish()
# sort products by product url to ensure identical ordering of X,y
sorti = np.argsort(trainurls)
print('collected', len(sorti), 'training samples (skipped %d)'%len(skiplist))
X,y = np.array(X)[sorti,:],np.array(y)[sorti]
traintags,trainurls = np.array(traintags)[sorti],np.array(trainurls)[sorti]
ret = {'tags':tags,'X':X,'y':y,'traintags':traintags,'trainurls':trainurls,
'skiplist':skiplist,'errors':errors}
if cache:
with open(traindatpkl,'wb') as fid:
pickle.dump(ret,fid)
print('saved training data to %s'%traindatpkl)
return ret
def train(X_train,y_train,clfinputs,**kwargs):
"""
train(X_train,y_train,clfinputs,**kwargs)
train a classifier with parameter tuning via gridsearchcv
Arguments:
- X_train: training data (N x n matrix)
- y_train: training labels (N x 1 vector)
- clfinputs: classifier spec
Keyword Arguments:
None
Returns:
- clf: tuned classifier
- cv: cross validation struct used to tune classifier
"""
uy = np.unique(y_train)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return None,None
model_id = clfinputs['clf_type']
if model_id == 'rf':
model_clf = RandomForestClassifier(**rf_defaults)
model_tuned = [rf_tuned]
else:
print("invalid clf_type")
return {}
clf = clone(model_clf)
if model_tuned is not None and len(model_tuned) != 0 and \
len(model_tuned[0]) != 0:
cv = GridSearchCV(clf,model_tuned,cv=gridcv_folds,scoring=gridcv_score,
n_jobs=gridcv_jobs,verbose=gridcv_verbose,refit=True)
cv.fit(X_train, y_train)
clf = cv.best_estimator_
else: # no parameter tuning
clf.fit(X_train,y_train)
return clf,cv
def crossValidatePredictor(X,y,clfinputs,logfile='cvout.log'):
"""
crossValidatePredictor(X,y,clfinputs,logfile='cvout.log')
use cross validation to assess the quality of a specified classifier
Arguments:
- X: training data
- y: training labels
- clfinputs: dict of classifier inputs
Keyword Arguments:
- logfile: cross-validation outfile (default='cvout.log')
Returns:
- dict containing:
- models: model for each cross validation fold
- scores: scores for each fold according to each scorefn
- preds: predictions for each training sample
- errors: errors for each training sample according to each errorfn
- modelcvs: cross validation structure used to train each model
"""
models,modelcvs,preds,probs = [],[],[],[]
scores = dict([(key,[]) for key in scorefn.keys()])
errors = dict([(key,[]) for key in errorfn.keys()])
# validate class labels
uy = np.unique(y)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return {}
N,ymin = len(y),uy[0]
if cv_type == 'loo':
cv = KFold(N,n_folds=N,random_state=train_state)
y_pred = np.zeros(N)
y_prob = np.zeros(N)
else:
cv = StratifiedKFold(y,n_folds=train_folds,random_state=train_state)
n_folds = len(cv)
model_id = clfinputs['clf_type']
widgets = ['%s cv: '%cv_type, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=n_folds+(cv_type=='loo')).start()
with open(logfile,'w') as logfid:
cv_test_index = []
scorekeys = sorted(scores.keys())
for i,(train_index,test_index) in enumerate(cv):
pbar.update(i)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cv_test_index.extend(test_index)
# xgb assumes labels \in {0,1}
if model_id == 'xgb' and ymin == -1:
y_train[y_train==-1] = 0
# train/predict as usual
clf,clf_cv = train(X_train,y_train,clfinputs)
clf_pred = clf.predict(X_test)
if model_id == 'xgb' and ymin == -1:
clf_pred[clf_pred==0] = -1
if cv_probs:
clf_prob = clf.predict_proba(X_test)[:,0]
else:
clf_prob = np.ones(len(clf_pred))*np.nan
# loo predicts one label per 'fold'
if cv_type == 'loo':
y_pred[test_index] = clf_pred
y_prob[test_index] = clf_prob
# compute scores for the points we've classified thus far
y_test_cur = np.atleast_1d(y[cv_test_index])
y_pred_cur = np.atleast_1d(y_pred[cv_test_index])
for score,score_fn in scorefn.items():
scorei = score_fn(y_test_cur,y_pred_cur,uy)
scores[score] = [scorei]
else:
# collect output for all test samples in this fold
for score,score_fn in scorefn.items():
scorei = score_fn(y_test,clf_pred,uy)
scores[score].append(scorei)
preds.append(clf_pred)
probs.append(clf_prob)
models.append(clf)
modelcvs.append(clf_cv)
for error,error_fn in errorfn.items():
errors[error].append(error_fn(y_test,clf_pred))
if i==0:
scorenames = ['%-16s'%score for score in scorekeys]
logstr = '%-8s %s'%('i',''.join(scorenames))
else:
curscores = ['%-16.4f'%(np.mean(scores[score]))
for score in scorekeys]
logstr = '%-8.3g %s'%(i,''.join(curscores))
print(logstr,file=logfid,flush=True)
# train full model for loo cv, score on loo preds from above
if cv_type == 'loo':
for score,score_fn in scorefn.items():
scores[score] = [score_fn(y,y_pred,uy)]
for error,error_fn in errorfn.items():
errors[error] = [error_fn(y,y_pred)]
clf,clf_cv = train(X,y,clfinputs)
models = [clf]
modelcvs = [clf_cv]
preds = [y_pred]
probs = [y_prob]
pbar.update(i+1)
pbar.finish()
# output scores ordered by key
for score_id in scorekeys:
score_vals = scores[score_id]
print('mean %s: %7.4f (std=%7.4f)'%(score_id, np.mean(score_vals),
np.std(score_vals)))
return {'preds':preds,'probs':probs,'scores':scores,'errors':errors,
'models':models,'modelcvs':modelcvs}
def trainPredictor(infile):
process = 'trainPredictor'
# fix the random seed to ensure reproducibility
np.random.seed(seed=train_state)
inputs = loadjson(infile)
outputs = {}
outbase = 'predictor%s'%mdy
cwd = os.getcwd()
try:
clfinputs = {}
clfinputs['clf_file'] = inputs['clf_name']+'.pkl'
clfinputs['clf_type'] = inputs['clf_type']
clfinputs['classmap'] = loadClassmap(inputs["classmap_file"])
clfinputs['features'] = loadjson(inputs["feat_file"])
inputurls = inputs.pop('urls',[])
crossvalidate = inputs.pop('crossvalidate',0)
saveclf = inputs.pop('saveclf',0)
cacheoutput = inputs.pop('cacheoutput',0)
if not pathexists(outbase):
os.mkdir(outbase)
if cacheoutput and not pathexists(pathjoin(outbase,cache_dir)):
os.mkdir(pathjoin(outbase,cache_dir))
os.chdir(outbase)
except Exception as e:
exitv = 10
message = 'IO Preprocessing failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
trdat = collectTrainingData(inputurls,clfinputs,cache=cacheoutput)
X, y = trdat['X'],trdat['y']
traintags, trainurls = trdat['traintags'],trdat['trainurls']
errors, skiplist = trdat['skiplist'],trdat['errors']
print('loaded %d training samples (%d skipped)'%(len(y),len(skiplist)))
except Exception as e:
exitv = 11
message = 'Training data collection failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if crossvalidate:
cvoutpkl = "cvout.pkl"
cvlogfile = 'cvout.log'
print('evaluating model via %s cross-validation (logfile=%s)...'%(cv_type,cvlogfile))
starttime = time.time()
cvout = crossValidatePredictor(X,y,clfinputs,logfile=cvlogfile)
outputs['cv_time'] = time.time()-starttime
outputs['cv_out'] = cvoutpkl
outputs['cv_log'] = cvlogfile
with open(cvoutpkl,'wb') as fid:
pickle.dump(cvout,fid)
print('done, output saved to %s.'%cvoutpkl)
except Exception as e:
exitv = 12
message = 'Cross-validation failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if saveclf:
starttime = time.time()
clf,clfcv = train(X,y,clfinputs)
clffile = clfinputs['clf_file']
if clffile[0] != '/':
clffile = pathjoin(cwd,clffile) # path relative to cwd
clfjson = clffile.replace('.pkl','.json')
outputs['clf_time'] = time.time()-starttime
outputs['clf_file'] = clffile
print("training classifier using all available data for deployment...")
with open(clffile,'wb') as fid:
pickle.dump(clf,fid)
with open(clfjson,'w') as fid:
json.dump(clfinputs,fid)
print('done, output saved to %s.'%clffile)
except Exception as e:
exitv = 13
message = 'Classifier training failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
json.dump(outputs,open(outbase+'.met.json','w'),indent=True)
except Exception:
os.chdir(cwd)
exitv = 14
message = 'Failed to create metadata file for ' + outbase
toContext(process,exitv,message)
sys.exit(1)
exitv = 0
os.chdir(cwd)
message = 'trainPredictor finished with no errors.'
toContext(process,exitv,message)
if __name__ == '__main__':
try: status = trainPredictor(sys.argv[1])
except Exception as e:
with open('_alt_error.txt', 'w') as f:
f.write("%s\n" % str(e))
with open('_alt_traceback.txt', 'w') as f:
f.write("%s\n" % traceback.format_exc())
raise
sys.exit(status)
| [
"sklearn.cross_validation.KFold",
"utils.queryBuilder.buildQuery",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.argsort",
"utils.contextUtils.toContext",
"numpy.array",
"progressbar.Percentage",
"sys.exit",
"os.remove",
"progressbar.ProgressBar",
"numpy.mean",
"n... | [((1919, 1943), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(5)'], {}), '(0.1, 1.0, 5)\n', (1930, 1943), True, 'import numpy as np\n'), ((2443, 2465), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1)', '(5)'], {}), '(0.1, 1, 5)\n', (2454, 2465), True, 'import numpy as np\n'), ((1198, 1232), 'sklearn.metrics.precision_score', 'precision_score', (['te', 'pr'], {'labels': 'ul'}), '(te, pr, labels=ul)\n', (1213, 1232), False, 'from sklearn.metrics import precision_score, recall_score\n'), ((1271, 1302), 'sklearn.metrics.recall_score', 'recall_score', (['te', 'pr'], {'labels': 'ul'}), '(te, pr, labels=ul)\n', (1283, 1302), False, 'from sklearn.metrics import precision_score, recall_score\n'), ((7963, 8011), 'json.dump', 'json.dump', (['clfspec', "(inputs['clf_name'] + '.json')"], {}), "(clfspec, inputs['clf_name'] + '.json')\n", (7972, 8011), False, 'import json\n'), ((12918, 12939), 'numpy.argsort', 'np.argsort', (['trainurls'], {}), '(trainurls)\n', (12928, 12939), True, 'import numpy as np\n'), ((13921, 13939), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (13930, 13939), True, 'import numpy as np\n'), ((14281, 14297), 'sklearn.base.clone', 'clone', (['model_clf'], {}), '(model_clf)\n', (14286, 14297), False, 'from sklearn.base import clone\n'), ((15677, 15689), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (15686, 15689), True, 'import numpy as np\n'), ((19742, 19774), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'train_state'}), '(seed=train_state)\n', (19756, 19774), True, 'import numpy as np\n'), ((19869, 19880), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (19878, 19880), False, 'import os\n'), ((23483, 23496), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (23491, 23496), False, 'import os\n'), ((23557, 23591), 'utils.contextUtils.toContext', 'toContext', (['process', 'exitv', 'message'], {}), '(process, exitv, message)\n', (23566, 23591), False, 'from utils.contextUtils import toContext\n'), ((23915, 23931), 'sys.exit', 'sys.exit', (['status'], {}), '(status)\n', (23923, 23931), False, 'import sys\n'), ((705, 725), 'datetime.datetime.now', 'dtime.datetime.now', ([], {}), '()\n', (723, 725), True, 'import datetime as dtime\n'), ((2929, 2943), 'json.load', 'json.load', (['fid'], {}), '(fid)\n', (2938, 2943), False, 'import json\n'), ((3028, 3051), 'json.dump', 'json.dump', (['fid', 'objdict'], {}), '(fid, objdict)\n', (3037, 3051), False, 'import json\n'), ((4868, 4878), 'utils.UrlUtils.UrlUtils', 'UrlUtils', ([], {}), '()\n', (4876, 4878), False, 'from utils.UrlUtils import UrlUtils\n'), ((5090, 5108), 'os.system', 'os.system', (['command'], {}), '(command)\n', (5099, 5108), False, 'import os\n'), ((5269, 5289), 'os.remove', 'os.remove', (['prod_json'], {}), '(prod_json)\n', (5278, 5289), False, 'import os\n'), ((11548, 11560), 'progressbar.Percentage', 'Percentage', ([], {}), '()\n', (11558, 11560), False, 'from progressbar import ProgressBar, ETA, Bar, Percentage\n'), ((11567, 11575), 'progressbar.Bar', 'Bar', (['"""="""'], {}), "('=')\n", (11570, 11575), False, 'from progressbar import ProgressBar, ETA, Bar, Percentage\n'), ((11582, 11587), 'progressbar.ETA', 'ETA', ([], {}), '()\n', (11585, 11587), False, 'from progressbar import ProgressBar, ETA, Bar, Percentage\n'), ((14133, 14170), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '(**rf_defaults)\n', (14155, 14170), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14408, 14537), 'sklearn.grid_search.GridSearchCV', 'GridSearchCV', (['clf', 'model_tuned'], {'cv': 'gridcv_folds', 'scoring': 'gridcv_score', 'n_jobs': 'gridcv_jobs', 'verbose': 'gridcv_verbose', 'refit': '(True)'}), '(clf, model_tuned, cv=gridcv_folds, scoring=gridcv_score,\n n_jobs=gridcv_jobs, verbose=gridcv_verbose, refit=True)\n', (14420, 14537), False, 'from sklearn.grid_search import GridSearchCV\n'), ((15858, 15903), 'sklearn.cross_validation.KFold', 'KFold', (['N'], {'n_folds': 'N', 'random_state': 'train_state'}), '(N, n_folds=N, random_state=train_state)\n', (15863, 15903), False, 'from sklearn.cross_validation import KFold, StratifiedKFold\n'), ((15919, 15930), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (15927, 15930), True, 'import numpy as np\n'), ((15948, 15959), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (15956, 15959), True, 'import numpy as np\n'), ((15991, 16056), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (['y'], {'n_folds': 'train_folds', 'random_state': 'train_state'}), '(y, n_folds=train_folds, random_state=train_state)\n', (16006, 16056), False, 'from sklearn.cross_validation import KFold, StratifiedKFold\n'), ((16157, 16169), 'progressbar.Percentage', 'Percentage', ([], {}), '()\n', (16167, 16169), False, 'from progressbar import ProgressBar, ETA, Bar, Percentage\n'), ((16176, 16184), 'progressbar.Bar', 'Bar', (['"""="""'], {}), "('=')\n", (16179, 16184), False, 'from progressbar import ProgressBar, ETA, Bar, Percentage\n'), ((16191, 16196), 'progressbar.ETA', 'ETA', ([], {}), '()\n', (16194, 16196), False, 'from progressbar import ProgressBar, ETA, Bar, Percentage\n'), ((20593, 20610), 'os.chdir', 'os.chdir', (['outbase'], {}), '(outbase)\n', (20601, 20610), False, 'import os\n'), ((6134, 6169), 'utils.queryBuilder.buildQuery', 'buildQuery', (['querymeta', 'queryoptions'], {}), '(querymeta, queryoptions)\n', (6144, 6169), False, 'from utils.queryBuilder import postQuery, buildQuery\n'), ((10798, 10814), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (10809, 10814), False, 'import pickle\n'), ((11600, 11644), 'progressbar.ProgressBar', 'ProgressBar', ([], {'widgets': 'widgets', 'maxval': 'ntagged'}), '(widgets=widgets, maxval=ntagged)\n', (11611, 11644), False, 'from progressbar import ProgressBar, ETA, Bar, Percentage\n'), ((13032, 13043), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (13040, 13043), True, 'import numpy as np\n'), ((13053, 13064), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (13061, 13064), True, 'import numpy as np\n'), ((13098, 13117), 'numpy.array', 'np.array', (['traintags'], {}), '(traintags)\n', (13106, 13117), True, 'import numpy as np\n'), ((13125, 13144), 'numpy.array', 'np.array', (['trainurls'], {}), '(trainurls)\n', (13133, 13144), True, 'import numpy as np\n'), ((13351, 13372), 'pickle.dump', 'pickle.dump', (['ret', 'fid'], {}), '(ret, fid)\n', (13362, 13372), False, 'import pickle\n'), ((16209, 16274), 'progressbar.ProgressBar', 'ProgressBar', ([], {'widgets': 'widgets', 'maxval': "(n_folds + (cv_type == 'loo'))"}), "(widgets=widgets, maxval=n_folds + (cv_type == 'loo'))\n", (16220, 16274), False, 'from progressbar import ProgressBar, ETA, Bar, Percentage\n'), ((20437, 20454), 'os.mkdir', 'os.mkdir', (['outbase'], {}), '(outbase)\n', (20445, 20454), False, 'import os\n'), ((20775, 20809), 'utils.contextUtils.toContext', 'toContext', (['process', 'exitv', 'message'], {}), '(process, exitv, message)\n', (20784, 20809), False, 'from utils.contextUtils import toContext\n'), ((20816, 20827), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (20824, 20827), False, 'import sys\n'), ((21342, 21376), 'utils.contextUtils.toContext', 'toContext', (['process', 'exitv', 'message'], {}), '(process, exitv, message)\n', (21351, 21376), False, 'from utils.contextUtils import toContext\n'), ((21383, 21394), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21391, 21394), False, 'import sys\n'), ((21624, 21635), 'time.time', 'time.time', ([], {}), '()\n', (21633, 21635), False, 'import time\n'), ((22170, 22204), 'utils.contextUtils.toContext', 'toContext', (['process', 'exitv', 'message'], {}), '(process, exitv, message)\n', (22179, 22204), False, 'from utils.contextUtils import toContext\n'), ((22211, 22222), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (22219, 22222), False, 'import sys\n'), ((22289, 22300), 'time.time', 'time.time', ([], {}), '()\n', (22298, 22300), False, 'import time\n'), ((23125, 23159), 'utils.contextUtils.toContext', 'toContext', (['process', 'exitv', 'message'], {}), '(process, exitv, message)\n', (23134, 23159), False, 'from utils.contextUtils import toContext\n'), ((23166, 23177), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (23174, 23177), False, 'import sys\n'), ((23295, 23308), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (23303, 23308), False, 'import os\n'), ((23402, 23436), 'utils.contextUtils.toContext', 'toContext', (['process', 'exitv', 'message'], {}), '(process, exitv, message)\n', (23411, 23436), False, 'from utils.contextUtils import toContext\n'), ((23443, 23454), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (23451, 23454), False, 'import sys\n'), ((6340, 6361), 'pickle.dump', 'pickle.dump', (['ret', 'fid'], {}), '(ret, fid)\n', (6351, 6361), False, 'import pickle\n'), ((6502, 6518), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (6513, 6518), False, 'import pickle\n'), ((12410, 12428), 'numpy.unique', 'np.unique', (['pidlabs'], {}), '(pidlabs)\n', (12419, 12428), True, 'import numpy as np\n'), ((17515, 17546), 'numpy.atleast_1d', 'np.atleast_1d', (['y[cv_test_index]'], {}), '(y[cv_test_index])\n', (17528, 17546), True, 'import numpy as np\n'), ((17576, 17612), 'numpy.atleast_1d', 'np.atleast_1d', (['y_pred[cv_test_index]'], {}), '(y_pred[cv_test_index])\n', (17589, 17612), True, 'import numpy as np\n'), ((21745, 21756), 'time.time', 'time.time', ([], {}), '()\n', (21754, 21756), False, 'import time\n'), ((21923, 21946), 'pickle.dump', 'pickle.dump', (['cvout', 'fid'], {}), '(cvout, fid)\n', (21934, 21946), False, 'import pickle\n'), ((22584, 22595), 'time.time', 'time.time', ([], {}), '()\n', (22593, 22595), False, 'import time\n'), ((22794, 22815), 'pickle.dump', 'pickle.dump', (['clf', 'fid'], {}), '(clf, fid)\n', (22805, 22815), False, 'import pickle\n'), ((22874, 22899), 'json.dump', 'json.dump', (['clfinputs', 'fid'], {}), '(clfinputs, fid)\n', (22883, 22899), False, 'import json\n'), ((19412, 19431), 'numpy.mean', 'np.mean', (['score_vals'], {}), '(score_vals)\n', (19419, 19431), True, 'import numpy as np\n'), ((19477, 19495), 'numpy.std', 'np.std', (['score_vals'], {}), '(score_vals)\n', (19483, 19495), True, 'import numpy as np\n'), ((20743, 20765), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (20763, 20765), False, 'import traceback\n'), ((21310, 21332), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (21330, 21332), False, 'import traceback\n'), ((22138, 22160), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (22158, 22160), False, 'import traceback\n'), ((23093, 23115), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (23113, 23115), False, 'import traceback\n'), ((18558, 18580), 'numpy.mean', 'np.mean', (['scores[score]'], {}), '(scores[score])\n', (18565, 18580), True, 'import numpy as np\n'), ((23872, 23894), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (23892, 23894), False, 'import traceback\n')] |
import scanpy as sc
import pandas as pd
import numpy as np
import scipy
import os
from anndata import AnnData,read_csv,read_text,read_mtx
from scipy.sparse import issparse
def prefilter_cells(adata,min_counts=None,max_counts=None,min_genes=200,max_genes=None):
if min_genes is None and min_counts is None and max_genes is None and max_counts is None:
raise ValueError('Provide one of min_counts, min_genes, max_counts or max_genes.')
id_tmp=np.asarray([True]*adata.shape[0],dtype=bool)
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,min_genes=min_genes)[0]) if min_genes is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,max_genes=max_genes)[0]) if max_genes is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,min_counts=min_counts)[0]) if min_counts is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,max_counts=max_counts)[0]) if max_counts is not None else id_tmp
adata._inplace_subset_obs(id_tmp)
adata.raw=sc.pp.log1p(adata,copy=True) #check the rowname
print("the var_names of adata.raw: adata.raw.var_names.is_unique=:",adata.raw.var_names.is_unique)
def prefilter_genes(adata,min_counts=None,max_counts=None,min_cells=10,max_cells=None):
if min_cells is None and min_counts is None and max_cells is None and max_counts is None:
raise ValueError('Provide one of min_counts, min_genes, max_counts or max_genes.')
id_tmp=np.asarray([True]*adata.shape[1],dtype=bool)
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,min_cells=min_cells)[0]) if min_cells is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,max_cells=max_cells)[0]) if max_cells is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,min_counts=min_counts)[0]) if min_counts is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,max_counts=max_counts)[0]) if max_counts is not None else id_tmp
adata._inplace_subset_var(id_tmp)
def prefilter_specialgenes(adata,Gene1Pattern="ERCC",Gene2Pattern="MT-"):
id_tmp1=np.asarray([not str(name).startswith(Gene1Pattern) for name in adata.var_names],dtype=bool)
id_tmp2=np.asarray([not str(name).startswith(Gene2Pattern) for name in adata.var_names],dtype=bool)
id_tmp=np.logical_and(id_tmp1,id_tmp2)
adata._inplace_subset_var(id_tmp)
def relative_func(expres):
#expres: an array counts expression for a gene
maxd = np.max(expres) - np.min(expres)
min_exp=np.min(expres)
rexpr = (expres - min_exp)/maxd
return rexpr
def plot_relative_exp(input_adata, gene, x_name, y_name,color,use_raw=False, spot_size=200000):
adata=input_adata.copy()
if use_raw:
X=adata.raw.X
else:
X=adata.X
if issparse(X):
X=pd.DataFrame(X.A)
else:
X=pd.DataFrame(X)
X.index=adata.obs.index
X.columns=adata.var.index
rexpr=relative_func(X.loc[:,gene])
adata.obs["rexpr"]=rexpr
fig=sc.pl.scatter(adata,x=x_name,y=y_name,color="rexpr",title=gene+"_rexpr",color_map=color,show=False,size=spot_size/adata.shape[0])
return fig
def plot_log_exp(input_adata, gene, x_name, y_name,color,use_raw=False):
adata=input_adata.copy()
if use_raw:
X=adata.X
else:
X=adata.raw.X
if issparse(X):
X=pd.DataFrame(X.A)
else:
X=pd.DataFrame(X)
X.index=adata.obs.index
X.columns=adata.var.index
adata.obs["log"]=np.log((X.loc[:,gene]+1).tolist())
fig=sc.pl.scatter(adata,x=x_name,y=y_name,color="log",title=gene+"_log",color_map=color,show=False,size=200000/adata.shape[0])
return fig
def refine_clusters(pred, resize_height, resize_width, threshold, radius):
pixel_num=pd.Series(pred).value_counts()
clusters=pixel_num.index.tolist()
reorder_map={}
for i in range(pixel_num.shape[0]):
reorder_map[clusters[i]]=i
pred_reordered=pd.Series(pred).replace(reorder_map).to_numpy()
pixel_num=pd.Series(pred_reordered).value_counts()
# Number of clusters
nLabels = len(np.unique(pred_reordered))
# Number of main clusters
mainLabels=(pd.Series(pred_reordered).value_counts()>=threshold).sum()
#------------- Refine clusters ---------------------
main_clusters=pixel_num.index[pixel_num>=threshold].tolist()
minor_clusters=pixel_num.index[pixel_num<threshold].tolist()
pred_reordered_img = pred_reordered.reshape( (resize_height, resize_width))
max_x, max_y=resize_width, resize_height
replace_map={}
for i in minor_clusters:
nbs=[]
xy=np.where(pred_reordered_img==i)
for j in range(len(xy[0])):
x, y=xy[0][j], xy[1][j]
nbs=nbs+pred_reordered_img[max(0,x-radius):min(max_x,x+radius+1),max(0,y-radius):min(max_y,y+radius+1)].flatten().tolist()
nbs_num=pd.Series(nbs).value_counts()
if sum(nbs_num.index.isin(main_clusters))>0:
replace_map[i]=nbs_num.index[ nbs_num.index.isin(main_clusters) ][ 0 ]
pred_refined=pd.Series(pred_reordered).replace(replace_map).to_numpy()
return pred_refined
| [
"pandas.Series",
"numpy.unique",
"numpy.logical_and",
"numpy.where",
"numpy.asarray",
"scanpy.pp.log1p",
"scipy.sparse.issparse",
"numpy.max",
"scanpy.pp.filter_cells",
"scanpy.pl.scatter",
"scanpy.pp.filter_genes",
"numpy.min",
"pandas.DataFrame"
] | [((458, 505), 'numpy.asarray', 'np.asarray', (['([True] * adata.shape[0])'], {'dtype': 'bool'}), '([True] * adata.shape[0], dtype=bool)\n', (468, 505), True, 'import numpy as np\n'), ((1053, 1082), 'scanpy.pp.log1p', 'sc.pp.log1p', (['adata'], {'copy': '(True)'}), '(adata, copy=True)\n', (1064, 1082), True, 'import scanpy as sc\n'), ((1494, 1541), 'numpy.asarray', 'np.asarray', (['([True] * adata.shape[1])'], {'dtype': 'bool'}), '([True] * adata.shape[1], dtype=bool)\n', (1504, 1541), True, 'import numpy as np\n'), ((2370, 2402), 'numpy.logical_and', 'np.logical_and', (['id_tmp1', 'id_tmp2'], {}), '(id_tmp1, id_tmp2)\n', (2384, 2402), True, 'import numpy as np\n'), ((2574, 2588), 'numpy.min', 'np.min', (['expres'], {}), '(expres)\n', (2580, 2588), True, 'import numpy as np\n'), ((2841, 2852), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (2849, 2852), False, 'from scipy.sparse import issparse\n'), ((3052, 3196), 'scanpy.pl.scatter', 'sc.pl.scatter', (['adata'], {'x': 'x_name', 'y': 'y_name', 'color': '"""rexpr"""', 'title': "(gene + '_rexpr')", 'color_map': 'color', 'show': '(False)', 'size': '(spot_size / adata.shape[0])'}), "(adata, x=x_name, y=y_name, color='rexpr', title=gene +\n '_rexpr', color_map=color, show=False, size=spot_size / adata.shape[0])\n", (3065, 3196), True, 'import scanpy as sc\n'), ((3373, 3384), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (3381, 3384), False, 'from scipy.sparse import issparse\n'), ((3572, 3709), 'scanpy.pl.scatter', 'sc.pl.scatter', (['adata'], {'x': 'x_name', 'y': 'y_name', 'color': '"""log"""', 'title': "(gene + '_log')", 'color_map': 'color', 'show': '(False)', 'size': '(200000 / adata.shape[0])'}), "(adata, x=x_name, y=y_name, color='log', title=gene + '_log',\n color_map=color, show=False, size=200000 / adata.shape[0])\n", (3585, 3709), True, 'import scanpy as sc\n'), ((2530, 2544), 'numpy.max', 'np.max', (['expres'], {}), '(expres)\n', (2536, 2544), True, 'import numpy as np\n'), ((2547, 2561), 'numpy.min', 'np.min', (['expres'], {}), '(expres)\n', (2553, 2561), True, 'import numpy as np\n'), ((2864, 2881), 'pandas.DataFrame', 'pd.DataFrame', (['X.A'], {}), '(X.A)\n', (2876, 2881), True, 'import pandas as pd\n'), ((2902, 2917), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (2914, 2917), True, 'import pandas as pd\n'), ((3396, 3413), 'pandas.DataFrame', 'pd.DataFrame', (['X.A'], {}), '(X.A)\n', (3408, 3413), True, 'import pandas as pd\n'), ((3434, 3449), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (3446, 3449), True, 'import pandas as pd\n'), ((4128, 4153), 'numpy.unique', 'np.unique', (['pred_reordered'], {}), '(pred_reordered)\n', (4137, 4153), True, 'import numpy as np\n'), ((4646, 4679), 'numpy.where', 'np.where', (['(pred_reordered_img == i)'], {}), '(pred_reordered_img == i)\n', (4654, 4679), True, 'import numpy as np\n'), ((3800, 3815), 'pandas.Series', 'pd.Series', (['pred'], {}), '(pred)\n', (3809, 3815), True, 'import pandas as pd\n'), ((4044, 4069), 'pandas.Series', 'pd.Series', (['pred_reordered'], {}), '(pred_reordered)\n', (4053, 4069), True, 'import pandas as pd\n'), ((536, 584), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata.X'], {'min_genes': 'min_genes'}), '(adata.X, min_genes=min_genes)\n', (554, 584), True, 'import scanpy as sc\n'), ((659, 707), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata.X'], {'max_genes': 'max_genes'}), '(adata.X, max_genes=max_genes)\n', (677, 707), True, 'import scanpy as sc\n'), ((782, 832), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata.X'], {'min_counts': 'min_counts'}), '(adata.X, min_counts=min_counts)\n', (800, 832), True, 'import scanpy as sc\n'), ((908, 958), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata.X'], {'max_counts': 'max_counts'}), '(adata.X, max_counts=max_counts)\n', (926, 958), True, 'import scanpy as sc\n'), ((1572, 1620), 'scanpy.pp.filter_genes', 'sc.pp.filter_genes', (['adata.X'], {'min_cells': 'min_cells'}), '(adata.X, min_cells=min_cells)\n', (1590, 1620), True, 'import scanpy as sc\n'), ((1695, 1743), 'scanpy.pp.filter_genes', 'sc.pp.filter_genes', (['adata.X'], {'max_cells': 'max_cells'}), '(adata.X, max_cells=max_cells)\n', (1713, 1743), True, 'import scanpy as sc\n'), ((1818, 1868), 'scanpy.pp.filter_genes', 'sc.pp.filter_genes', (['adata.X'], {'min_counts': 'min_counts'}), '(adata.X, min_counts=min_counts)\n', (1836, 1868), True, 'import scanpy as sc\n'), ((1944, 1994), 'scanpy.pp.filter_genes', 'sc.pp.filter_genes', (['adata.X'], {'max_counts': 'max_counts'}), '(adata.X, max_counts=max_counts)\n', (1962, 1994), True, 'import scanpy as sc\n'), ((3982, 3997), 'pandas.Series', 'pd.Series', (['pred'], {}), '(pred)\n', (3991, 3997), True, 'import pandas as pd\n'), ((4905, 4919), 'pandas.Series', 'pd.Series', (['nbs'], {}), '(nbs)\n', (4914, 4919), True, 'import pandas as pd\n'), ((5096, 5121), 'pandas.Series', 'pd.Series', (['pred_reordered'], {}), '(pred_reordered)\n', (5105, 5121), True, 'import pandas as pd\n'), ((4201, 4226), 'pandas.Series', 'pd.Series', (['pred_reordered'], {}), '(pred_reordered)\n', (4210, 4226), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: py:light,ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.5
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Topography
#
# This is a short demo of the `pysoilmap.features` module -
# which allows deriving some simple topographic features.
#
# ## Setup
# +
import ee
import matplotlib.pyplot as plt
import numpy as np
import pysoilmap.ee as pee
from pysoilmap.features import Topography, diff_gauss
from pysoilmap.plotting import add_colorbar
# -
# We will download a DEM via the Google Earth Engine API.
# For this, you will have to authenticate with a google account here:
pee.initialize()
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# The SRTM data is in WGS84 (degree). We want a coordinate system in meters to
# get meaningful units in derived quantities.
#
# So let's request the data in 3-degree Gauss-Kruger zone 3, and pick a Region
# around Tübingen:
# -
crs = 'epsg:31467'
xmid = 3_499_159
ymid = 5_371_552
xscale = yscale = 90
xdim = 100
ydim = 100
xmin = xmid - xdim / 2 * xscale
xmax = xmid + xdim / 2 * xscale
ymin = ymid - ydim / 2 * yscale
ymax = ymid + ydim / 2 * yscale
# The `transform` defines how pixel coordinates are calculated from the matrix
# indices. We have to set a negative `yscale` and set the offset to `ymax` in
# order to have the `(0, 0)` pixel as the top left corner:
# [xScale, xShearing, xTranslation, yShearing, yScale, yTranslation]
transform = [xscale, 0, xmin, 0, -yscale, ymax]
# Now download DEM from SRTM 30m dataset:
srtm = ee.Image("USGS/SRTMGL1_003")
dem = pee.download_image(
srtm,
band='elevation',
crs=crs,
transform=transform,
xdim=xdim,
ydim=ydim,
)
# In order to download bigger images (>= 50 MB) you would have to export
# them to your google drive first, and then manually download from there.
# The export can be started as follows:
if False:
task = ee.batch.Export.image.toDrive(srtm, **{
'description': 'DEM',
'crs': crs,
'dimensions': [xdim, ydim],
'crsTransform': transform,
})
task.start()
task.status()
# Define a function for plotting multiple variables at once:
def plot_maps(*images, **kwargs):
extent = np.array([0, xmax - xmin, 0, ymax - ymin]) / 1000
rows = len(images)
cols = len(images[0])
fig, axes = plt.subplots(
rows, cols,
squeeze=False,
figsize=(cols * 3, rows * 3))
for i, row in enumerate(images):
for j, (title, image) in enumerate(row.items()):
ax = axes[i, j]
ax.set_title(title)
ax.imshow(image, extent=extent, **kwargs)
add_colorbar(ax, size=0.1, pad=0.07)
if i == rows - 1:
ax.set_xlabel('x [km]')
if j == 0:
ax.set_ylabel('y [km]')
plt.tight_layout()
plot_maps({'Elevation': dem})
# ## Features
#
# By default `Topography` calculates spatial derivatives using central
# differencing:
topo_0 = Topography(
dem,
cellsize=(xscale, yscale),
crs=crs,
transform=transform,
)
# Alternatively, spatial derivatives can be calculated using a Gaussian
# derivative filter. This corresponds to smoothing the DEM with a Gaussian
# filter, and then calculating the derivative. It can be understood as
# calculating the derivative at a given lengthscale:
topo_2 = Topography(
dem,
cellsize=(xscale, yscale),
crs=crs,
transform=transform,
diff=diff_gauss,
sigma=2,
)
topo_5 = Topography(
dem,
cellsize=(xscale, yscale),
crs=crs,
transform=transform,
diff=diff_gauss,
sigma=5,
)
topos = [topo_0, topo_2, topo_5]
# ### Slope (tangent)
plot_maps(*[{
"slope_x": topo.slope_x(),
"slope_y": topo.slope_y(),
"slope": topo.slope(),
} for topo in topos])
# ### Slope (angle)
plot_maps(*[{
"slope_angle_x": topo.slope_angle_x() * 180 / np.pi,
"slope_angle_y": topo.slope_angle_y() * 180 / np.pi,
"slope_angle": topo.slope_angle() * 180 / np.pi,
} for topo in topos])
# ### Slope (sine)
plot_maps(*[{
"verticality_x": topo.verticality_x(),
"verticality_y": topo.verticality_y(),
"verticality": topo.verticality(),
} for topo in topos])
# ### Curvature
plot_maps(*[{
"curvature_x": topo.curvature_x(),
"curvature_y": topo.curvature_y(),
"curvature": topo.curvature(),
} for topo in topos])
plot_maps(*[{
"tang_curvature": topo.tang_curvature(),
"plan_curvature": topo.plan_curvature(),
"prof_curvature": topo.prof_curvature(),
} for topo in topos])
# ### Aspect
# + tags=[]
plot_maps(*[{
"aspect": topo.aspect(),
"eastness": topo.eastness(),
"northness": topo.northness(),
} for topo in topos])
# -
# ### Irradiation
plot_maps(*[{
"sun_exposure": topo.sun_exposure(),
"rad_angle": topo.rad_angle(),
} for topo in topos])
| [
"ee.Image",
"pysoilmap.features.Topography",
"ee.batch.Export.image.toDrive",
"numpy.array",
"pysoilmap.plotting.add_colorbar",
"matplotlib.pyplot.tight_layout",
"pysoilmap.ee.initialize",
"pysoilmap.ee.download_image",
"matplotlib.pyplot.subplots"
] | [((829, 845), 'pysoilmap.ee.initialize', 'pee.initialize', ([], {}), '()\n', (843, 845), True, 'import pysoilmap.ee as pee\n'), ((1743, 1771), 'ee.Image', 'ee.Image', (['"""USGS/SRTMGL1_003"""'], {}), "('USGS/SRTMGL1_003')\n", (1751, 1771), False, 'import ee\n'), ((1778, 1876), 'pysoilmap.ee.download_image', 'pee.download_image', (['srtm'], {'band': '"""elevation"""', 'crs': 'crs', 'transform': 'transform', 'xdim': 'xdim', 'ydim': 'ydim'}), "(srtm, band='elevation', crs=crs, transform=transform,\n xdim=xdim, ydim=ydim)\n", (1796, 1876), True, 'import pysoilmap.ee as pee\n'), ((3193, 3265), 'pysoilmap.features.Topography', 'Topography', (['dem'], {'cellsize': '(xscale, yscale)', 'crs': 'crs', 'transform': 'transform'}), '(dem, cellsize=(xscale, yscale), crs=crs, transform=transform)\n', (3203, 3265), False, 'from pysoilmap.features import Topography, diff_gauss\n'), ((3567, 3669), 'pysoilmap.features.Topography', 'Topography', (['dem'], {'cellsize': '(xscale, yscale)', 'crs': 'crs', 'transform': 'transform', 'diff': 'diff_gauss', 'sigma': '(2)'}), '(dem, cellsize=(xscale, yscale), crs=crs, transform=transform,\n diff=diff_gauss, sigma=2)\n', (3577, 3669), False, 'from pysoilmap.features import Topography, diff_gauss\n'), ((3703, 3805), 'pysoilmap.features.Topography', 'Topography', (['dem'], {'cellsize': '(xscale, yscale)', 'crs': 'crs', 'transform': 'transform', 'diff': 'diff_gauss', 'sigma': '(5)'}), '(dem, cellsize=(xscale, yscale), crs=crs, transform=transform,\n diff=diff_gauss, sigma=5)\n', (3713, 3805), False, 'from pysoilmap.features import Topography, diff_gauss\n'), ((2110, 2242), 'ee.batch.Export.image.toDrive', 'ee.batch.Export.image.toDrive', (['srtm'], {}), "(srtm, **{'description': 'DEM', 'crs': crs,\n 'dimensions': [xdim, ydim], 'crsTransform': transform})\n", (2139, 2242), False, 'import ee\n'), ((2539, 2608), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'squeeze': '(False)', 'figsize': '(cols * 3, rows * 3)'}), '(rows, cols, squeeze=False, figsize=(cols * 3, rows * 3))\n', (2551, 2608), True, 'import matplotlib.pyplot as plt\n'), ((3028, 3046), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3044, 3046), True, 'import matplotlib.pyplot as plt\n'), ((2424, 2466), 'numpy.array', 'np.array', (['[0, xmax - xmin, 0, ymax - ymin]'], {}), '([0, xmax - xmin, 0, ymax - ymin])\n', (2432, 2466), True, 'import numpy as np\n'), ((2854, 2890), 'pysoilmap.plotting.add_colorbar', 'add_colorbar', (['ax'], {'size': '(0.1)', 'pad': '(0.07)'}), '(ax, size=0.1, pad=0.07)\n', (2866, 2890), False, 'from pysoilmap.plotting import add_colorbar\n')] |
import numpy as np
import xarray as xr
from wavespectra.core.attributes import attrs, set_spec_attributes
def spread(dp_matrix, dspr_matrix, dirs):
"""Generic spreading function.
Args:
dp_matrix:
dspr_matrix:
dirs:
Returns:
G1:
Note:
Function defined such that \\int{G1 d\\theta}=1*
"""
adirs = np.array(dirs).reshape((1, -1))
pidirs = np.deg2rad(270.0 - np.array(adirs))
st1 = np.sin(0.5 * np.deg2rad(270.0 - dp_matrix))
ct1 = np.cos(0.5 * np.deg2rad(270.0 - dp_matrix))
a = np.maximum(np.cos(0.5 * pidirs) * ct1 + np.sin(0.5 * pidirs) * st1, 0.0)
G1 = a ** (2.0 * dspr_matrix)
G1 /= np.expand_dims(G1.sum(axis=-1) * abs(dirs[1] - dirs[0]), axis=-1)
return G1
def arrange_inputs(*args):
"""Check all inputs are same shape and add frequency and direction dims."""
argout = []
shape0 = np.array(args[0]).shape
for arg in args:
argm = np.array(arg)
if argm.shape == () and shape0 != (): # Broadcast scalar across matrix
argm = arg * np.ones(shape0)
elif argm.shape != shape0:
raise Exception("Input shapes must be the same")
argout.append(argm[..., np.newaxis, np.newaxis])
return argout
def make_dataset(spec, freqs, dirs, coordinates=[]):
"""Package spectral matrix to xarray.
Args:
spec:
freqs:
dirs:
coordinates:
Returns:
dset: SpecDset object
"""
coords = tuple(coordinates) + ((attrs.FREQNAME, freqs), (attrs.DIRNAME, dirs))
dimensions = tuple([c[0] for c in coords])
dset = xr.DataArray(
data=spec, coords=coords, dims=dimensions, name=attrs.SPECNAME
).to_dataset()
set_spec_attributes(dset)
return dset
def check_coordinates(param, coordinates):
"""Check coordinates are consistent with parameter.
Args:
param:
coordinates:
"""
pshape = np.array(param).shape
if len(pshape) != len(coordinates):
raise Exception("Incorrect number of coordinates for parameter")
for idim, dim in enumerate(pshape):
if dim != len(coordinates[idim][1]):
raise Exception(
"Dimension of coordinate %s at position %d does not match parameter"
% (coordinates[idim][0], dim)
)
| [
"numpy.ones",
"wavespectra.core.attributes.set_spec_attributes",
"numpy.array",
"numpy.deg2rad",
"numpy.cos",
"xarray.DataArray",
"numpy.sin"
] | [((1738, 1763), 'wavespectra.core.attributes.set_spec_attributes', 'set_spec_attributes', (['dset'], {}), '(dset)\n', (1757, 1763), False, 'from wavespectra.core.attributes import attrs, set_spec_attributes\n'), ((898, 915), 'numpy.array', 'np.array', (['args[0]'], {}), '(args[0])\n', (906, 915), True, 'import numpy as np\n'), ((958, 971), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (966, 971), True, 'import numpy as np\n'), ((1950, 1965), 'numpy.array', 'np.array', (['param'], {}), '(param)\n', (1958, 1965), True, 'import numpy as np\n'), ((366, 380), 'numpy.array', 'np.array', (['dirs'], {}), '(dirs)\n', (374, 380), True, 'import numpy as np\n'), ((430, 445), 'numpy.array', 'np.array', (['adirs'], {}), '(adirs)\n', (438, 445), True, 'import numpy as np\n'), ((470, 499), 'numpy.deg2rad', 'np.deg2rad', (['(270.0 - dp_matrix)'], {}), '(270.0 - dp_matrix)\n', (480, 499), True, 'import numpy as np\n'), ((524, 553), 'numpy.deg2rad', 'np.deg2rad', (['(270.0 - dp_matrix)'], {}), '(270.0 - dp_matrix)\n', (534, 553), True, 'import numpy as np\n'), ((1630, 1706), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'spec', 'coords': 'coords', 'dims': 'dimensions', 'name': 'attrs.SPECNAME'}), '(data=spec, coords=coords, dims=dimensions, name=attrs.SPECNAME)\n', (1642, 1706), True, 'import xarray as xr\n'), ((574, 594), 'numpy.cos', 'np.cos', (['(0.5 * pidirs)'], {}), '(0.5 * pidirs)\n', (580, 594), True, 'import numpy as np\n'), ((603, 623), 'numpy.sin', 'np.sin', (['(0.5 * pidirs)'], {}), '(0.5 * pidirs)\n', (609, 623), True, 'import numpy as np\n'), ((1077, 1092), 'numpy.ones', 'np.ones', (['shape0'], {}), '(shape0)\n', (1084, 1092), True, 'import numpy as np\n')] |
"""
=========================================
prop_cycle property markevery in rcParams
=========================================
This example demonstrates a working solution to issue #8576, providing full
support of the markevery property for axes.prop_cycle assignments through
rcParams. Makes use of the same list of markevery cases from the
:doc:`markevery demo
</gallery/lines_bars_and_markers/markevery_demo>`.
Renders a plot with shifted-sine curves along each column with
a unique markevery value for each sine curve.
"""
from cycler import cycler
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# Define a list of markevery cases and color cases to plot
cases = [None,
8,
(30, 8),
[16, 24, 30],
[0, -1],
slice(100, 200, 3),
0.1,
0.3,
1.5,
(0.0, 0.1),
(0.45, 0.1)]
colors = ['#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf',
'#1a55FF']
# Configure rcParams axes.prop_cycle to simultaneously cycle cases and colors.
mpl.rcParams['axes.prop_cycle'] = cycler(markevery=cases, color=colors)
# Create data points and offsets
x = np.linspace(0, 2 * np.pi)
offsets = np.linspace(0, 2 * np.pi, 11, endpoint=False)
yy = np.transpose([np.sin(x + phi) for phi in offsets])
# Set the plot curve with markers and a title
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.6, 0.75])
for i in range(len(cases)):
ax.plot(yy[:, i], marker='o', label=str(cases[i]))
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.title('Support for axes.prop_cycle cycler with markevery')
plt.show()
| [
"numpy.linspace",
"matplotlib.pyplot.figure",
"cycler.cycler",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1240, 1277), 'cycler.cycler', 'cycler', ([], {'markevery': 'cases', 'color': 'colors'}), '(markevery=cases, color=colors)\n', (1246, 1277), False, 'from cycler import cycler\n'), ((1316, 1341), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (1327, 1341), True, 'import numpy as np\n'), ((1352, 1397), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(11)'], {'endpoint': '(False)'}), '(0, 2 * np.pi, 11, endpoint=False)\n', (1363, 1397), True, 'import numpy as np\n'), ((1507, 1519), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1517, 1519), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1784), 'matplotlib.pyplot.title', 'plt.title', (['"""Support for axes.prop_cycle cycler with markevery"""'], {}), "('Support for axes.prop_cycle cycler with markevery')\n", (1731, 1784), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1796), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1794, 1796), True, 'import matplotlib.pyplot as plt\n'), ((1417, 1432), 'numpy.sin', 'np.sin', (['(x + phi)'], {}), '(x + phi)\n', (1423, 1432), True, 'import numpy as np\n')] |
"""
This program will create all the necessary input files to run pnp transport simulations with combinatorial variations
on the values of the surface concentration and electric field.
The variations on the relevant parameters are described in pidsim.parameter_span.one_factor_at_a_time documentation.
These variations are submitted through a csv data file.
The rest of the parameters are assumed to be constant over all the simulations.
Besides the input files, the code will generate a database as a csv file with all the simulations to be run and the
parameters used for each simulation.
@author: <<EMAIL>>
"""
import numpy as np
import pidsim.parameter_span as pspan
# The path to the csv file with the conditions of the different variations
path_to_output = r'G:\My Drive\Research\PVRD1\Manuscripts\Device_Simulations_draft\simulations\sigma_efield_asu_20200820'
# Simulation time in h
simulation_time_h = 96
# Temperature in °C
temperature_c = 85
# Relative permittivity of SiNx
er = 7.0
# Thickness of the SiNx um
thickness_sin = 80E-3
# Modeled thickness of Si um
thickness_si = 1.0
# Number of time steps
t_steps = 720
# Number of elements in the sin layer
x_points_sin = 100
# number of elements in the Si layer
x_points_si = 200
# Background concentration in cm^-3
cb = 1E-20
# The rate of ingress at the surface (1/s)
zeta = 1E-3
# The surface mass transfer coefficient at the SiNx/Si interface in cm/s
h = 1E-10
# The segregation coefficient at the SiNx/Si interface
segregation_coefficient = 1.
# The diffusion coefficient of Na in the stacking fault (cm^2/s)
d_sf = 1E-14
e_fields = np.array([1E-2, 1E-1, 1.])
sigmas = np.array([1E10, 1E11, 1E12])
if __name__ == '__main__':
pspan.sigma_efield_variations(
sigmas=sigmas, efields=e_fields, out_dir=path_to_output, zeta=zeta, simulation_time=simulation_time_h * 3600.,
dsf=d_sf, h=h, m=segregation_coefficient, temperature_c=temperature_c, er=7.0, thickness_sin=thickness_sin,
thickness_si=thickness_si, t_steps=t_steps, x_points_sin=x_points_sin, base_concentration=cb
)
| [
"numpy.array",
"pidsim.parameter_span.sigma_efield_variations"
] | [((1605, 1631), 'numpy.array', 'np.array', (['[0.01, 0.1, 1.0]'], {}), '([0.01, 0.1, 1.0])\n', (1613, 1631), True, 'import numpy as np\n'), ((1641, 1699), 'numpy.array', 'np.array', (['[10000000000.0, 100000000000.0, 1000000000000.0]'], {}), '([10000000000.0, 100000000000.0, 1000000000000.0])\n', (1649, 1699), True, 'import numpy as np\n'), ((1702, 2063), 'pidsim.parameter_span.sigma_efield_variations', 'pspan.sigma_efield_variations', ([], {'sigmas': 'sigmas', 'efields': 'e_fields', 'out_dir': 'path_to_output', 'zeta': 'zeta', 'simulation_time': '(simulation_time_h * 3600.0)', 'dsf': 'd_sf', 'h': 'h', 'm': 'segregation_coefficient', 'temperature_c': 'temperature_c', 'er': '(7.0)', 'thickness_sin': 'thickness_sin', 'thickness_si': 'thickness_si', 't_steps': 't_steps', 'x_points_sin': 'x_points_sin', 'base_concentration': 'cb'}), '(sigmas=sigmas, efields=e_fields, out_dir=\n path_to_output, zeta=zeta, simulation_time=simulation_time_h * 3600.0,\n dsf=d_sf, h=h, m=segregation_coefficient, temperature_c=temperature_c,\n er=7.0, thickness_sin=thickness_sin, thickness_si=thickness_si, t_steps\n =t_steps, x_points_sin=x_points_sin, base_concentration=cb)\n', (1731, 2063), True, 'import pidsim.parameter_span as pspan\n')] |
#!/usr/bin/env python
import collections
import time
import numpy as np
import requests
from astropy.table import Table
from astropy.time import Time
from chandratime import convert_chandra_time
# Globals
# URLs for 6 hour and 7 day JSON files
GOES_URL_ROOT = 'https://services.swpc.noaa.gov/json/goes/primary/'
GOES_6HOUR = f'{GOES_URL_ROOT}/differential-protons-6-hour.json'
GOES_7DAY = f'{GOES_URL_ROOT}/differential-protons-7-day.json'
# Bad or missing data value
BAD_VALUE = -1.0e5
# dTypes from the Replan Central hrc_shield.h5 file. See more here: https://github.com/sot/arc/blob/master/get_hrc.py
descrs = np.dtype([('year', '<i8'), ('month', '<i8'), ('dom', '<i8'), ('hhmm', '<i8'), ('mjd', '<i8'), ('secs', '<i8'), ('p1', '<f8'), ('p2', '<f8'), ('p3', '<f8'), ('p4', '<f8'), ('p5', '<f8'), ('p6', '<f8'), ('p7',
'<f8'), ('p8', '<f8'), ('p9', '<f8'), ('p10', '<f8'), ('p11', '<f8'), ('hrc_shield', '<f8'), ('time', '<f8'), ('p2a', '<f8'), ('p2b', '<f8'), ('p8a', '<f8'), ('p8b', '<f8'), ('p8c', '<f8'), ('satellite', '<i8')])
def get_json_data(url):
"""
Open the json file and return it as an astropy table
"""
last_err = None
for _ in range(3): # try three times
try:
json_file = requests.get(url)
data = json_file.json()
break
except Exception as err:
last_err = err
time.sleep(5)
else:
print(f'Warning: failed to open URL {url}: {last_err}')
# sys.exit(0) I really dont want this to kill my code. I'd rather just not plot the proxy.
dat = Table(data)
return dat
def format_proton_data(dat, descrs):
"""
Manipulate the data and return them in a desired format
including columns that the old h5 file format wanted.
"""
# Create a dictionary to capture the channel data for each time
out = collections.defaultdict(dict)
for row in dat:
out[row['time_tag']][row['channel'].lower()] = row['flux'] * 1000
# Reshape that data into a table with the channels as columns
newdat = Table(list(out.values())).filled(BAD_VALUE)
# Already in time order if dat rows in order
newdat['time_tag'] = list(out.keys())
# Assume the satellite is the same for all of the records of one dat/file
newdat['satellite'] = dat['satellite'][0]
# Add some time columns
times = Time(newdat['time_tag'])
newdat['time'] = times.cxcsec
newdat['mjd'] = times.mjd.astype(int)
newdat['secs'] = np.array(np.round((times.mjd - newdat['mjd']) * 86400,
decimals=0)).astype(int)
newdat['year'] = [t.year for t in times.datetime]
newdat['month'] = [t.month for t in times.datetime]
newdat['dom'] = [t.day for t in times.datetime]
newdat['hhmm'] = np.array(
[f"{t.hour}{t.minute:02}" for t in times.datetime]).astype(int)
# Take the Table and make it into an ndarray with the supplied type
arr = np.ndarray(len(newdat), dtype=descrs)
for col in arr.dtype.names:
# This gets any channels that were just missing altogether. Looks like p2 and p11 now
if col not in newdat.colnames:
arr[col] = BAD_VALUE
else:
arr[col] = newdat[col]
# Calculate the hrc shield values using the numpy array and save into the array
hrc_shield = calc_hrc_shield(arr)
arr['hrc_shield'] = hrc_shield
hrc_bad = (arr['p5'] < 0) | (arr['p6'] < 0) | (arr['p7'] < 0)
arr['hrc_shield'][hrc_bad] = BAD_VALUE # flag bad inputs
return arr, hrc_bad
def calc_hrc_shield(dat):
# this is Malgosia's and MTA's proxy model
# For GOES earlier than 16 use columns p5, p6, p7
# hrc_shield = (6000 * dat['p5'] + 270000 * dat['p6']
# + 100000 * dat['p7']) / 256.
# HRC proxy, GOES-16, used until April 2021
# hrc_shield = (6000 * dat['p5'] + 270000 * dat['p7']
# + 100000 * dat['p9']) / 256.
# HRC proxy model based on fitting the 2SHLDART data
# with a combination of GOES-16 channels at the time
# of the Sep 2017 flare
# ORINIGNAL NORMALIZATION
# hrc_shield = (143 * dat['p5'] + 64738 * dat['p6']
# + 162505 * dat['p7'] + 4127) # / 256.
hrc_shield = (143 * dat['p5'] + 64738 * dat['p6']
+ 162505 * dat['p7'] + 4600) # / 256.
return hrc_shield
def get_goes_proxy():
"""
Return a format string for the PlotDate function and an array of GOES proxy rates.
Used for HRCMonitor's shield plot
"""
# Fetch the raw GOES data
raw_goes_data = get_json_data(GOES_7DAY)
# Reformat the table into our standard format
parsed_goes_data, bad_goes_data = format_proton_data(
raw_goes_data, descrs=descrs)
goes_times = convert_chandra_time(parsed_goes_data['time'])
goes_rates = parsed_goes_data['hrc_shield']
return goes_times, goes_rates
| [
"astropy.table.Table",
"requests.get",
"time.sleep",
"chandratime.convert_chandra_time",
"astropy.time.Time",
"numpy.array",
"collections.defaultdict",
"numpy.dtype",
"numpy.round"
] | [((621, 1064), 'numpy.dtype', 'np.dtype', (["[('year', '<i8'), ('month', '<i8'), ('dom', '<i8'), ('hhmm', '<i8'), ('mjd',\n '<i8'), ('secs', '<i8'), ('p1', '<f8'), ('p2', '<f8'), ('p3', '<f8'), (\n 'p4', '<f8'), ('p5', '<f8'), ('p6', '<f8'), ('p7', '<f8'), ('p8', '<f8'\n ), ('p9', '<f8'), ('p10', '<f8'), ('p11', '<f8'), ('hrc_shield', '<f8'),\n ('time', '<f8'), ('p2a', '<f8'), ('p2b', '<f8'), ('p8a', '<f8'), ('p8b',\n '<f8'), ('p8c', '<f8'), ('satellite', '<i8')]"], {}), "([('year', '<i8'), ('month', '<i8'), ('dom', '<i8'), ('hhmm', '<i8'\n ), ('mjd', '<i8'), ('secs', '<i8'), ('p1', '<f8'), ('p2', '<f8'), ('p3',\n '<f8'), ('p4', '<f8'), ('p5', '<f8'), ('p6', '<f8'), ('p7', '<f8'), (\n 'p8', '<f8'), ('p9', '<f8'), ('p10', '<f8'), ('p11', '<f8'), (\n 'hrc_shield', '<f8'), ('time', '<f8'), ('p2a', '<f8'), ('p2b', '<f8'),\n ('p8a', '<f8'), ('p8b', '<f8'), ('p8c', '<f8'), ('satellite', '<i8')])\n", (629, 1064), True, 'import numpy as np\n'), ((1793, 1804), 'astropy.table.Table', 'Table', (['data'], {}), '(data)\n', (1798, 1804), False, 'from astropy.table import Table\n'), ((2072, 2101), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (2095, 2101), False, 'import collections\n'), ((2577, 2601), 'astropy.time.Time', 'Time', (["newdat['time_tag']"], {}), "(newdat['time_tag'])\n", (2581, 2601), False, 'from astropy.time import Time\n'), ((4981, 5027), 'chandratime.convert_chandra_time', 'convert_chandra_time', (["parsed_goes_data['time']"], {}), "(parsed_goes_data['time'])\n", (5001, 5027), False, 'from chandratime import convert_chandra_time\n'), ((1451, 1468), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1463, 1468), False, 'import requests\n'), ((3001, 3061), 'numpy.array', 'np.array', (["[f'{t.hour}{t.minute:02}' for t in times.datetime]"], {}), "([f'{t.hour}{t.minute:02}' for t in times.datetime])\n", (3009, 3061), True, 'import numpy as np\n'), ((1595, 1608), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1605, 1608), False, 'import time\n'), ((2708, 2765), 'numpy.round', 'np.round', (["((times.mjd - newdat['mjd']) * 86400)"], {'decimals': '(0)'}), "((times.mjd - newdat['mjd']) * 86400, decimals=0)\n", (2716, 2765), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
ON = 255
OFF = 0
vals = [ON, OFF]
def randomGrid(N):
"" "Generado aleatoriamente" ""
return np.random.choice(vals, N * N, p=[0.2, 0.8]).reshape(N, N)
def addGlider(i, j, grid):
"" "Agregar un planeador" ""
glider = np.array([[0, 0, 255],
[255, 0, 255],
[0, 255, 255]])
grid[i:i + 3, j:j + 3] = glider
def addGosperGliderGun(i, j, grid):
"" "Agregar un iniciador de planeador" ""
gun = np.zeros(11 * 38).reshape(11, 38)
gun[5][1] = gun[5][2] = 255
gun[6][1] = gun[6][2] = 255
gun[3][13] = gun[3][14] = 255
gun[4][12] = gun[4][16] = 255
gun[5][11] = gun[5][17] = 255
gun[6][11] = gun[6][15] = gun[6][17] = gun[6][18] = 255
gun[7][11] = gun[7][17] = 255
gun[8][12] = gun[8][16] = 255
gun[9][13] = gun[9][14] = 255
gun[1][25] = 255
gun[2][23] = gun[2][25] = 255
gun[3][21] = gun[3][22] = 255
gun[4][21] = gun[4][22] = 255
gun[5][21] = gun[5][22] = 255
gun[6][23] = gun[6][25] = 255
gun[7][25] = 255
gun[3][35] = gun[3][36] = 255
gun[4][35] = gun[4][36] = 255
grid[i:i + 11, j:j + 38] = gun
def update(frameNum, img, grid, N):
"""
Actualiza la imagen de acuerdo con las reglas del juego.
: param frameNum: el módulo matplotlib.animation necesita datos entrantes
: param img: Imagen original
: cuadrícula param: coordenadas
: parámetro N: tamaño
: volver: nueva imagen
"""
newGrid = grid.copy()
for i in range(N):
for j in range(N):
# Calcule la suma de ocho celdas circundantes (0, 255), calcule cuántas vidas hay alrededor
#% N se usa para considerar las condiciones de contorno
total = int((grid[i, (j - 1) % N] + grid[i, (j + 1) % N] +
grid[(i - 1) % N, j] + grid[(i + 1) % N, j] +
grid[(i - 1) % N, (j - 1) % N] + grid[(i - 1) % N, (j + 1) % N] +
grid[(i + 1) % N, (j - 1) % N] + grid[(i + 1) % N, (j + 1) % N]) / 255)
# Reglas de actualización de la vida
if grid[i, j] == ON:
if (total < 2) or (total > 3):
newGrid[i, j] = OFF
else:
if total == 3:
newGrid[i, j] = ON
# Actualizar datos
img.set_data(newGrid)
grid[:] = newGrid[:]
return img,
def main():
# Recibir parámetros entrantes
parser = argparse.ArgumentParser(description="Runs Conway's Game of Life simulation.")
parser.add_argument('--grid-size', dest='N', required=False)
parser.add_argument('--mov-file', dest='movfile', required=False)
parser.add_argument('--interval', dest='interval', required=False)
parser.add_argument('--glider', action='store_true', required=False)
parser.add_argument('--gosper', action='store_true', required=False)
args = parser.parse_args()
# Establecer el tamaño de mapa predeterminado
N = 100
if args.N and int(args.N) > 8:
N = int(args.N)
# Establecer el tiempo de intervalo de actualización predeterminado ms
updateInterval = 50
if args.interval:
updateInterval = int(args.interval)
# Seleccione la imagen inicial
grid = np.array([])
# Use una imagen aleatoria si no hay otra opción
if args.glider:
grid = np.zeros(N * N).reshape(N, N)
addGlider(1, 1, grid)
elif args.gosper:
grid = np.zeros(N * N).reshape(N, N)
addGosperGliderGun(10, 10, grid)
else:
grid = randomGrid(N)
# Establecer animación
fig, ax = plt.subplots()
img = ax.imshow(grid, interpolation='nearest')
ani = animation.FuncAnimation(fig, update, fargs=(img, grid, N,),
frames=10,
interval=updateInterval,
save_count=50)
# Elija la ubicación de salida
if args.movfile:
ani.save(args.movfile, fps=30, extra_args=['-vcodec', 'libx264'])
plt.show()
# Ejecutar
if __name__ == '__main__':
main() | [
"argparse.ArgumentParser",
"matplotlib.animation.FuncAnimation",
"numpy.random.choice",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((361, 414), 'numpy.array', 'np.array', (['[[0, 0, 255], [255, 0, 255], [0, 255, 255]]'], {}), '([[0, 0, 255], [255, 0, 255], [0, 255, 255]])\n', (369, 414), True, 'import numpy as np\n'), ((2673, 2750), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Runs Conway\'s Game of Life simulation."""'}), '(description="Runs Conway\'s Game of Life simulation.")\n', (2696, 2750), False, 'import argparse\n'), ((3488, 3500), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3496, 3500), True, 'import numpy as np\n'), ((3850, 3864), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3862, 3864), True, 'import matplotlib.pyplot as plt\n'), ((3928, 4041), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'update'], {'fargs': '(img, grid, N)', 'frames': '(10)', 'interval': 'updateInterval', 'save_count': '(50)'}), '(fig, update, fargs=(img, grid, N), frames=10,\n interval=updateInterval, save_count=50)\n', (3951, 4041), True, 'import matplotlib.animation as animation\n'), ((4286, 4296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4294, 4296), True, 'import matplotlib.pyplot as plt\n'), ((223, 266), 'numpy.random.choice', 'np.random.choice', (['vals', '(N * N)'], {'p': '[0.2, 0.8]'}), '(vals, N * N, p=[0.2, 0.8])\n', (239, 266), True, 'import numpy as np\n'), ((599, 616), 'numpy.zeros', 'np.zeros', (['(11 * 38)'], {}), '(11 * 38)\n', (607, 616), True, 'import numpy as np\n'), ((3592, 3607), 'numpy.zeros', 'np.zeros', (['(N * N)'], {}), '(N * N)\n', (3600, 3607), True, 'import numpy as np\n'), ((3692, 3707), 'numpy.zeros', 'np.zeros', (['(N * N)'], {}), '(N * N)\n', (3700, 3707), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import pyopengv
import logging
from matplotlib import pyplot as plt
from opensfm import context
from opensfm import multiview
logger = logging.getLogger(__name__)
# pairwise matches
def match_lowe(index, f2, config):
"""Match features and apply Lowe's ratio filter.
Args:
index: flann index if the first image
f2: feature descriptors of the second image
config: config parameters
"""
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@ in match_lowe')
search_params = dict(checks=config['flann_checks'])
results, dists = index.knnSearch(f2, 2, params=search_params)
squared_ratio = config['lowes_ratio']**2 # Flann returns squared L2 distances
good = dists[:, 0] < squared_ratio * dists[:, 1]
matches = list(zip(results[good, 0], good.nonzero()[0]))
return np.array(matches, dtype=int)
def match_symmetric(fi, indexi, fj, indexj, config):
#def match_symmetric(fi, indexi, fj, indexj, config, pi, pj, imi, imj):
"""Match in both directions and keep consistent matches.
Args:
fi: feature descriptors of the first image
indexi: flann index if the first image
fj: feature descriptors of the second image
indexj: flann index of the second image
config: config parameters
pi,pj: SIFT poins loaded
"""
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@ in match_symmetric')
if config['matcher_type'] == 'FLANN':
matches_ij = [(a, b) for a, b in match_lowe(indexi, fj, config)]
matches_ji = [(b, a) for a, b in match_lowe(indexj, fi, config)]
else:
matches_ij = [(a, b) for a, b in match_lowe_bf(fi, fj, config)]#, pi, pj, imi, imj)]
matches_ji = [(b, a) for a, b in match_lowe_bf(fj, fi, config)]#, pj, pi, imj, imi)]
matches = set(matches_ij).intersection(set(matches_ji))
return np.array(list(matches), dtype=int)
def _convert_matches_to_vector(matches):
"""Convert Dmatch object to matrix form."""
matches_vector = np.zeros((len(matches), 2), dtype=np.int)
k = 0
for mm in matches:
matches_vector[k, 0] = mm.queryIdx
matches_vector[k, 1] = mm.trainIdx
k = k+1
return matches_vector
def match_lowe_bf(f1, f2, config):
#def match_lowe_bf(f1, f2, config, p1, p2, im1, im2):
"""Bruteforce matching and Lowe's ratio filtering.
Args:
f1: feature descriptors of the first image
f2: feature descriptors of the second image
config: config parameters
"""
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@ in match_lowe_bf')
assert(f1.dtype.type == f2.dtype.type)
if (f1.dtype.type == np.uint8):
matcher_type = 'BruteForce-Hamming'
else:
matcher_type = 'BruteForce'
matcher = cv2.DescriptorMatcher_create(matcher_type)
matches = matcher.knnMatch(f1, f2, k=2)
# Possible adding points qli
"""
img3=None
#draw_params=dict(matchesMask=matchesMask)
img3 = cv2.drawMatchesKnn(im1,p1,im2,p2,matches,None,flags=2)
plt.imshow(img3,cmap='gray')
"""
ratio = config['lowes_ratio']
good_matches = []
for match in matches:
if match and len(match) == 2:
m, n = match
if m.distance < ratio * n.distance:
good_matches.append(m)
good_matches = _convert_matches_to_vector(good_matches)
return np.array(good_matches, dtype=int)
def robust_match_fundamental(p1, p2, matches, config):
"""Filter matches by estimating the Fundamental matrix via RANSAC."""
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!! in match_fundamental')
if len(matches) < 8:
return None, np.array([])
p1 = p1[matches[:, 0]][:, :2].copy()
p2 = p2[matches[:, 1]][:, :2].copy()
FM_RANSAC = cv2.FM_RANSAC if context.OPENCV3 else cv2.cv.CV_FM_RANSAC
threshold = config['robust_matching_threshold']
F, mask = cv2.findFundamentalMat(p1, p2, FM_RANSAC, threshold, 0.9999)
inliers = mask.ravel().nonzero()
if F is None or F[2, 2] == 0.0:
return F, []
return F, matches[inliers]
def _compute_inliers_bearings(b1, b2, T, threshold=0.01):
R = T[:, :3]
t = T[:, 3]
p = pyopengv.triangulation_triangulate(b1, b2, t, R)
br1 = p.copy()
br1 /= np.linalg.norm(br1, axis=1)[:, np.newaxis]
br2 = R.T.dot((p - t).T).T
br2 /= np.linalg.norm(br2, axis=1)[:, np.newaxis]
ok1 = multiview.vector_angle_many(br1, b1) < threshold
ok2 = multiview.vector_angle_many(br2, b2) < threshold
return ok1 * ok2
def robust_match_calibrated(p1, p2, camera1, camera2, matches, config):
"""Filter matches by estimating the Essential matrix via RANSAC."""
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!! in robust_match_calibrated')
if len(matches) < 8:
return np.array([])
p1 = p1[matches[:, 0]][:, :2].copy()
p2 = p2[matches[:, 1]][:, :2].copy()
b1 = camera1.pixel_bearing_many(p1)
b2 = camera2.pixel_bearing_many(p2)
threshold = config['robust_matching_calib_threshold']
T = multiview.relative_pose_ransac(
b1, b2, b"STEWENIUS", 1 - np.cos(threshold), 1000, 0.999)
for relax in [4, 2, 1]:
inliers = _compute_inliers_bearings(b1, b2, T, relax * threshold)
if sum(inliers) < 8:
return np.array([])
T = pyopengv.relative_pose_optimize_nonlinear(
b1[inliers], b2[inliers], T[:3, 3], T[:3, :3])
inliers = _compute_inliers_bearings(b1, b2, T, threshold)
return matches[inliers]
def robust_match(p1, p2, camera1, camera2, matches, config):
"""Filter matches by fitting a geometric model.
If cameras are perspective without distortion, then the Fundamental
matrix is used. Otherwise, we use the Essential matrix.
"""
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!! in robust_match')
if (camera1.projection_type == 'perspective'
and camera1.k1 == 0.0 and camera1.k2 == 0.0
and camera2.projection_type == 'perspective'
and camera2.k1 == 0.0 and camera2.k2 == 0.0):
return robust_match_fundamental(p1, p2, matches, config)[1]
else:
return robust_match_calibrated(p1, p2, camera1, camera2, matches, config)
| [
"logging.getLogger",
"cv2.DescriptorMatcher_create",
"opensfm.multiview.vector_angle_many",
"pyopengv.relative_pose_optimize_nonlinear",
"numpy.array",
"numpy.cos",
"cv2.findFundamentalMat",
"numpy.linalg.norm",
"pyopengv.triangulation_triangulate"
] | [((169, 196), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (186, 196), False, 'import logging\n'), ((844, 872), 'numpy.array', 'np.array', (['matches'], {'dtype': 'int'}), '(matches, dtype=int)\n', (852, 872), True, 'import numpy as np\n'), ((2760, 2802), 'cv2.DescriptorMatcher_create', 'cv2.DescriptorMatcher_create', (['matcher_type'], {}), '(matcher_type)\n', (2788, 2802), False, 'import cv2\n'), ((3360, 3393), 'numpy.array', 'np.array', (['good_matches'], {'dtype': 'int'}), '(good_matches, dtype=int)\n', (3368, 3393), True, 'import numpy as np\n'), ((3871, 3931), 'cv2.findFundamentalMat', 'cv2.findFundamentalMat', (['p1', 'p2', 'FM_RANSAC', 'threshold', '(0.9999)'], {}), '(p1, p2, FM_RANSAC, threshold, 0.9999)\n', (3893, 3931), False, 'import cv2\n'), ((4160, 4208), 'pyopengv.triangulation_triangulate', 'pyopengv.triangulation_triangulate', (['b1', 'b2', 't', 'R'], {}), '(b1, b2, t, R)\n', (4194, 4208), False, 'import pyopengv\n'), ((4240, 4267), 'numpy.linalg.norm', 'np.linalg.norm', (['br1'], {'axis': '(1)'}), '(br1, axis=1)\n', (4254, 4267), True, 'import numpy as np\n'), ((4326, 4353), 'numpy.linalg.norm', 'np.linalg.norm', (['br2'], {'axis': '(1)'}), '(br2, axis=1)\n', (4340, 4353), True, 'import numpy as np\n'), ((4380, 4416), 'opensfm.multiview.vector_angle_many', 'multiview.vector_angle_many', (['br1', 'b1'], {}), '(br1, b1)\n', (4407, 4416), False, 'from opensfm import multiview\n'), ((4439, 4475), 'opensfm.multiview.vector_angle_many', 'multiview.vector_angle_many', (['br2', 'b2'], {}), '(br2, b2)\n', (4466, 4475), False, 'from opensfm import multiview\n'), ((4764, 4776), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4772, 4776), True, 'import numpy as np\n'), ((5281, 5374), 'pyopengv.relative_pose_optimize_nonlinear', 'pyopengv.relative_pose_optimize_nonlinear', (['b1[inliers]', 'b2[inliers]', 'T[:3, 3]', 'T[:3, :3]'], {}), '(b1[inliers], b2[inliers], T[:3, 3\n ], T[:3, :3])\n', (5322, 5374), False, 'import pyopengv\n'), ((3634, 3646), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3642, 3646), True, 'import numpy as np\n'), ((5073, 5090), 'numpy.cos', 'np.cos', (['threshold'], {}), '(threshold)\n', (5079, 5090), True, 'import numpy as np\n'), ((5256, 5268), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5264, 5268), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from typing import List, Union, Tuple
from macrosynergy.management.simulate_quantamental_data import make_qdf
from macrosynergy.management.shape_dfs import reduce_df
class NaivePnL:
"""Computes and collects illustrative PnLs with limited signal options and
disregarding transaction costs
:param <pd.Dataframe> df: standardized data frame with the following necessary
columns: 'cid', 'xcat', 'real_date' and 'value'.
:param <str> ret: return category.
:param <List[str]> sigs: signal categories.
:param <List[str]> cids: cross sections to be considered. Default is all in the
dataframe.
:param <str> start: earliest date in ISO format. Default is None and earliest date
in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date in df
is used.
:param <dict> blacklist: cross sections with date ranges that should be excluded
from the dataframe.
"""
def __init__(self, df: pd.DataFrame, ret: str, sigs: List[str],
cids: List[str] = None,
start: str = None, end: str = None,
blacklist: dict = None):
self.ret = ret
self.sigs = sigs
xcats = [ret] + sigs
cols = ['cid', 'xcat', 'real_date', 'value']
self.df, self.xcats, self.cids = reduce_df(df[cols], xcats, cids, start, end,
blacklist, out_all=True)
self.df['real_date'] = pd.to_datetime(self.df['real_date'])
self.pnl_names = [] # list for PnL names
self.black = blacklist
def make_pnl(self, sig: str, sig_op: str = 'zn_score_pan', pnl_name: str = None,
rebal_freq: str = 'daily', rebal_slip = 0, vol_scale: float = None,
min_obs: int = 252, iis: bool = True,
neutral: str = 'zero', thresh: float = None):
# Todo: implement the four 'pass through arguments to make_zn_score()
"""Calculate daily PnL and add to the main dataframe of the class instance
:param <str> sig: name of signal that is the basis for positioning. The signal
is assumed to be recorded at the end of the day prior to position taking.
:param <str> sig_op: signal transformation options; must be one of
'zn_score_pan', 'zn_score_cs', or 'binary'.
Default 'zn_score_pan' transforms raw signals into z-scores around zero value
based on the whole panel.
Option 'zn_score_cs' transforms signals to z-scores around zero based on
cross-section alone.
Option 'binary' transforms signals into uniform long/shorts (1/-1) across all
sections.
N.B.: zn-score here means standardized score with zero being the natural
neutral level and standardization through division by mean absolute value.
:param <str> pnl_name: name of the PnL to be generated and stored.
Default is none, i.e. a default name is given.
Previously calculated PnLs in the class will be overwritten. This means that
if a set of PnLs is to be compared they require custom names.
:param <str> rebal_freq: rebalancing frequency for positions according to signal
must be one of 'daily' (default), 'weekly' or 'monthly'.
:param <str> rebal_slip: rebalancing slippage in days. Default is 1, which means
that it takes one day to rebalance the position and that the new positions
produces PnL from the second day after the signal has been recorded.
:param <bool> vol_scale: ex-post scaling of PnL to annualized volatility given.
This for comparative visualization and not out-of-sample. Default is none.
:param <int> min_obs: the minimum number of observations required to calculate
zn_scores. Default is 252.
# Todo: implement in function
:param <bool> iis: if True (default) zn-scores are also calculated for the initial
sample period defined by min-obs, on an in-sample basis, to avoid losing history.
# Todo: implement in function
:param <str> neutral: method to determine neutral level. Default is 'zero'.
Alternatives are 'mean' and "median".
# Todo: implement in function
:param <float> thresh: threshold value beyond which scores are winsorized,
i.e. contained at that threshold. Therefore, the threshold is the maximum absolute
score value that the function is allowed to produce. The minimum threshold is 1
standard deviation.
# Todo: implement in function
"""
assert sig in self.sigs
assert sig_op in ['zn_score_pan', 'zn_score_cs', 'binary']
assert rebal_freq in ['daily', 'weekly', 'monthly']
dfx = self.df[self.df['xcat'].isin([self.ret, sig])]
dfw = dfx.pivot(index=['cid', 'real_date'], columns='xcat', values='value')
if sig_op == 'zn_score_pan':
# Todo: below is in-sample; use make_zn_score() for oos calculation
# Todo: pass through min_obs, iss, neutral, thresh
sda = dfw[sig].abs().mean()
dfw['psig'] = dfw[sig] / sda
elif sig_op == 'zn_score_cs': # zn-score based on
# Todo: below is in-sample; use make_zn_score() for oos calculation
# Todo: pass through min_obs, iss, neutral, thresh
zn_score = lambda x: x / np.nanmean(np.abs(x))
dfw['psig'] = dfw[sig].groupby(level=0).apply(zn_score)
elif sig_op == 'binary':
dfw['psig'] = np.sign(dfw[sig])
# Signal for the following day explains the lag mechanism.
dfw['psig'] = dfw['psig'].groupby(level=0).shift(1) # lag explanatory 1 period
dfw.reset_index(inplace=True)
if rebal_freq != 'daily':
dfw['year'] = dfw['real_date'].dt.year
if rebal_freq == 'monthly':
dfw['month'] = dfw['real_date'].dt.month
rebal_dates = dfw.groupby(['cid', 'year', 'month'])['real_date'].\
min() # rebalancing days are first of month
if rebal_freq == 'weekly':
dfw['week'] = dfw['real_date'].dt.week
rebal_dates = dfw.groupby(['cid', 'year', 'week'])['real_date'].\
min() # rebalancing days are first of week
dfw['sig'] = np.nan
dfw.loc[dfw['real_date'].isin(rebal_dates), 'sig'] = \
dfw.loc[dfw['real_date'].isin(rebal_dates), 'psig']
dfw['sig'] = dfw['sig'].fillna(method='ffill').shift(rebal_slip)
dfw['value'] = dfw[self.ret] * dfw['sig']
df_pnl = dfw.loc[:, ['cid', 'real_date', 'value']] # cross-section PnLs
df_pnl_all = df_pnl.groupby(['real_date']).sum() # global PnL as sum
df_pnl_all = df_pnl_all[df_pnl_all['value'].cumsum() != 0] # trim early zeros
df_pnl_all['cid'] = 'ALL'
df_pnl_all = df_pnl_all.reset_index()[df_pnl.columns] # columns as in df_pnl...
df_pnl = df_pnl.append(df_pnl_all) #... and append
if vol_scale is not None:
leverage = vol_scale * (df_pnl_all['value'].std() * np.sqrt(261))**(-1)
df_pnl['value'] = df_pnl['value'] * leverage
pnn = ('PNL_' + sig) if pnl_name is None else pnl_name # set PnL name
df_pnl['xcat'] = pnn
if pnn in self.pnl_names:
self.df = self.df[~(self.df['xcat'] == pnn)] # remove any PnL with same name
else:
self.pnl_names = self.pnl_names + [pnn]
self.df = self.df.append(df_pnl[self.df.columns]).reset_index(drop=True)
def plot_pnls(self, pnl_cats: List[str], pnl_cids: List[str] = ['ALL'],
start: str = None, end: str = None, figsize: Tuple = (10, 6)):
"""Plot line chart of cumulative PnLs, single PnL, multiple PnL types per
cross section, or mutiple cross sections per PnL type.
:param <List[str]> pnl_cats: list of PnL categories that should be plotted.
:param <List[str]> pnl_cids: list of cross sections to be plotted;
default is 'ALL' (global PnL).
Note: one can only have multiple PnL categories or multiple cross sections,
not both.
:param <str> start: start date in ISO format.
:param <str> start: earliest date in ISO format. Default is None and earliest
date in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date
in df is used.
:param <Tuple> figsize: tuple of plot width and height. Default is (10,6).
"""
if pnl_cats is None:
pnl_cats = self.pnl_names
assert (len(pnl_cats) == 1) | (len(pnl_cids) == 1)
dfx = reduce_df(self.df, pnl_cats, pnl_cids, start, end, self.black,
out_all=False)
sns.set_theme(style='whitegrid', palette='colorblind',
rc={'figure.figsize': figsize})
if len(pnl_cids) == 1:
dfx['cum_value'] = dfx.groupby('xcat').cumsum()
ax = sns.lineplot(data=dfx, x='real_date', y='cum_value', hue='xcat',
estimator=None, lw=1)
leg = ax.axes.get_legend()
if len(pnl_cats) > 1:
leg.set_title('PnL categories for ' + pnl_cids[0])
else:
leg.set_title('PnL category for ' + pnl_cids[0])
else:
dfx['cum_value'] = dfx.groupby('cid').cumsum()
ax = sns.lineplot(data=dfx, x='real_date', y='cum_value', hue='cid',
estimator=None, lw=1)
leg = ax.axes.get_legend()
leg.set_title('Cross sections')
plt.title('Cumulative naive PnL', fontsize=16)
plt.xlabel('')
plt.ylabel('% of risk capital, no compounding')
plt.axhline(y=0, color='black', linestyle='--', lw=1)
plt.show()
def evaluate_pnls(self, pnl_cats: List[str], pnl_cids: List[str] = ['ALL'],
start: str = None, end: str = None):
"""Small table of key PnL statistics
:param <List[str]> pnl_cats: list of PnL categories that should be plotted.
:param <List[str]> pnl_cids: list of cross sections to be plotted; default is
'ALL' (global PnL).
Note: one can only have multiple PnL categories or multiple cross sections,
not both.
:param <str> start: start date in format.
:param <str> start: earliest date in ISO format. Default is None and earliest
date in df is used.
:param <str> end: latest date in ISO format. Default is None and latest date
in df is used.
:return: standardized dataframe with key PnL performance statistics
"""
if pnl_cats is None:
pnl_cats = self.pnl_names
assert (len(pnl_cats) == 1) | (len(pnl_cids) == 1)
dfx = reduce_df(self.df, pnl_cats, pnl_cids, start, end, self.black,
out_all=False)
groups = 'xcat' if len(pnl_cids) == 1 else 'cid'
stats = ['Return (pct ar)', 'St. Dev. (pct ar)', 'Sharpe ratio', 'Sortino ratio',
'Max 21-day draw', 'Max 6-month draw', 'Traded months']
dfw = dfx.pivot(index='real_date', columns=groups, values='value')
df = pd.DataFrame(columns=dfw.columns, index=stats)
df.iloc[0, :] = dfw.mean(axis=0) * 261
df.iloc[1, :] = dfw.std(axis=0) * np.sqrt(261)
df.iloc[2, :] = df.iloc[0, :] / df.iloc[1, :]
dsd = dfw.apply(lambda x: np.sqrt(np.sum(x[x < 0]**2)/len(x))) * np.sqrt(261)
df.iloc[3, :] = df.iloc[0, :] / dsd
df.iloc[4, :] = dfw.rolling(21).sum().min()
df.iloc[5, :] = dfw.rolling(6*21).sum().min()
df.iloc[6, :] = dfw.resample('M').sum().count()
return df
def pnl_names(self):
"""Print list of names of available PnLs in the class instance"""
print(self.pnl_names)
def pnl_df(self, pnl_names: List[str] = None, cs: bool = False):
"""Return data frame with PnLs
:param <List[str]> pnl_names: list of names of PnLs to be returned.
Default is 'ALL'.
:param <bool> cs: inclusion of cross section PnLs. Default is False.
:return custom data frame with PnLs
"""
selected_pnls = pnl_names if pnl_names is not None else self.pnl_names
filter_1 = self.df['xcat'].isin(selected_pnls)
filter_2 = self.df['cid'] == 'ALL' if not cs else True
return self.df[filter_1 & filter_2]
if __name__ == "__main__":
cids = ['AUD', 'CAD', 'GBP', 'NZD']
xcats = ['XR', 'CRY', 'GROWTH', 'INFL']
cols_1 = ['earliest', 'latest', 'mean_add', 'sd_mult']
df_cids = pd.DataFrame(index=cids, columns=cols_1)
df_cids.loc['AUD',] = ['2000-01-01', '2020-12-31', 0.1, 1]
df_cids.loc['CAD',] = ['2001-01-01', '2020-11-30', 0, 1]
df_cids.loc['GBP',] = ['2002-01-01', '2020-11-30', 0, 2]
df_cids.loc['NZD',] = ['2002-01-01', '2020-09-30', -0.1, 2]
cols_2 = cols_1 + ['ar_coef', 'back_coef']
df_xcats = pd.DataFrame(index=xcats, columns=cols_2)
df_xcats.loc['XR',] = ['2000-01-01', '2020-12-31', 0.1, 1, 0, 0.3]
df_xcats.loc['CRY',] = ['2000-01-01', '2020-10-30', 1, 2, 0.95, 1]
df_xcats.loc['GROWTH',] = ['2001-01-01', '2020-10-30', 1, 2, 0.9, 1]
df_xcats.loc['INFL',] = ['2001-01-01', '2020-10-30', 1, 2, 0.8, 0.5]
black = {'AUD': ['2006-01-01', '2015-12-31'], 'GBP': ['2012-01-01', '2100-01-01']}
dfd = make_qdf(df_cids, df_xcats, back_ar=0.75)
# Initiate instance
pnl = NaivePnL(dfd, ret='XR', sigs=['CRY', 'GROWTH', 'INFL'],
cids=cids, start='2000-01-01', blacklist=black)
# Make PnLs
pnl.make_pnl('CRY', sig_op='zn_score_pan', rebal_freq='monthly',
vol_scale=10, rebal_slip=1,
pnl_name='PNL_CRY_PZN')
pnl.make_pnl('CRY', sig_op='binary', rebal_freq='monthly',
rebal_slip=1, vol_scale=10,
pnl_name='PNL_CRY_DIG')
pnl.make_pnl('GROWTH', sig_op='zn_score_cs', rebal_freq='monthly',
rebal_slip=1, vol_scale=10,
pnl_name='PNL_GROWTH_IZN')
# Plot PnLs
pnl.plot_pnls(pnl_cats=['PNL_CRY_PZN', 'PNL_CRY_DIG', 'PNL_GROWTH_IZN'],
pnl_cids=['ALL'], start='2000-01-01')
pnl.plot_pnls(pnl_cats=['PNL_CRY_PZN'], pnl_cids=['CAD', 'NZD'],
start='2000-01-01')
# Return evaluation and PnL data frames
df_eval = pnl.evaluate_pnls(
pnl_cats=['PNL_CRY_PZN', 'PNL_CRY_DIG', 'PNL_GROWTH_IZN'],
pnl_cids=['ALL'], start='2000-01-01')
df_pnls = pnl.pnl_df()
df_pnls.head()
| [
"numpy.abs",
"numpy.sqrt",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"seaborn.set_theme",
"matplotlib.pyplot.xlabel",
"macrosynergy.management.shape_dfs.reduce_df",
"matplotlib.pyplot.axhline",
"seaborn.lineplot",
"numpy.sum",
"numpy.sign",
"pandas.DataFrame",
"matplotlib.pyplot.... | [((12996, 13036), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'cids', 'columns': 'cols_1'}), '(index=cids, columns=cols_1)\n', (13008, 13036), True, 'import pandas as pd\n'), ((13350, 13391), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'xcats', 'columns': 'cols_2'}), '(index=xcats, columns=cols_2)\n', (13362, 13391), True, 'import pandas as pd\n'), ((13778, 13819), 'macrosynergy.management.simulate_quantamental_data.make_qdf', 'make_qdf', (['df_cids', 'df_xcats'], {'back_ar': '(0.75)'}), '(df_cids, df_xcats, back_ar=0.75)\n', (13786, 13819), False, 'from macrosynergy.management.simulate_quantamental_data import make_qdf\n'), ((1437, 1506), 'macrosynergy.management.shape_dfs.reduce_df', 'reduce_df', (['df[cols]', 'xcats', 'cids', 'start', 'end', 'blacklist'], {'out_all': '(True)'}), '(df[cols], xcats, cids, start, end, blacklist, out_all=True)\n', (1446, 1506), False, 'from macrosynergy.management.shape_dfs import reduce_df\n'), ((1589, 1625), 'pandas.to_datetime', 'pd.to_datetime', (["self.df['real_date']"], {}), "(self.df['real_date'])\n", (1603, 1625), True, 'import pandas as pd\n'), ((8972, 9049), 'macrosynergy.management.shape_dfs.reduce_df', 'reduce_df', (['self.df', 'pnl_cats', 'pnl_cids', 'start', 'end', 'self.black'], {'out_all': '(False)'}), '(self.df, pnl_cats, pnl_cids, start, end, self.black, out_all=False)\n', (8981, 9049), False, 'from macrosynergy.management.shape_dfs import reduce_df\n'), ((9083, 9173), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""whitegrid"""', 'palette': '"""colorblind"""', 'rc': "{'figure.figsize': figsize}"}), "(style='whitegrid', palette='colorblind', rc={'figure.figsize':\n figsize})\n", (9096, 9173), True, 'import seaborn as sns\n'), ((9939, 9985), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative naive PnL"""'], {'fontsize': '(16)'}), "('Cumulative naive PnL', fontsize=16)\n", (9948, 9985), True, 'import matplotlib.pyplot as plt\n'), ((9994, 10008), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (10004, 10008), True, 'import matplotlib.pyplot as plt\n'), ((10017, 10064), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% of risk capital, no compounding"""'], {}), "('% of risk capital, no compounding')\n", (10027, 10064), True, 'import matplotlib.pyplot as plt\n'), ((10073, 10126), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'color': '"""black"""', 'linestyle': '"""--"""', 'lw': '(1)'}), "(y=0, color='black', linestyle='--', lw=1)\n", (10084, 10126), True, 'import matplotlib.pyplot as plt\n'), ((10135, 10145), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10143, 10145), True, 'import matplotlib.pyplot as plt\n'), ((11157, 11234), 'macrosynergy.management.shape_dfs.reduce_df', 'reduce_df', (['self.df', 'pnl_cats', 'pnl_cids', 'start', 'end', 'self.black'], {'out_all': '(False)'}), '(self.df, pnl_cats, pnl_cids, start, end, self.black, out_all=False)\n', (11166, 11234), False, 'from macrosynergy.management.shape_dfs import reduce_df\n'), ((11569, 11615), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'dfw.columns', 'index': 'stats'}), '(columns=dfw.columns, index=stats)\n', (11581, 11615), True, 'import pandas as pd\n'), ((9301, 9392), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'dfx', 'x': '"""real_date"""', 'y': '"""cum_value"""', 'hue': '"""xcat"""', 'estimator': 'None', 'lw': '(1)'}), "(data=dfx, x='real_date', y='cum_value', hue='xcat', estimator=\n None, lw=1)\n", (9313, 9392), True, 'import seaborn as sns\n'), ((9731, 9821), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'dfx', 'x': '"""real_date"""', 'y': '"""cum_value"""', 'hue': '"""cid"""', 'estimator': 'None', 'lw': '(1)'}), "(data=dfx, x='real_date', y='cum_value', hue='cid', estimator=\n None, lw=1)\n", (9743, 9821), True, 'import seaborn as sns\n'), ((11706, 11718), 'numpy.sqrt', 'np.sqrt', (['(261)'], {}), '(261)\n', (11713, 11718), True, 'import numpy as np\n'), ((11846, 11858), 'numpy.sqrt', 'np.sqrt', (['(261)'], {}), '(261)\n', (11853, 11858), True, 'import numpy as np\n'), ((5767, 5784), 'numpy.sign', 'np.sign', (['dfw[sig]'], {}), '(dfw[sig])\n', (5774, 5784), True, 'import numpy as np\n'), ((7374, 7386), 'numpy.sqrt', 'np.sqrt', (['(261)'], {}), '(261)\n', (7381, 7386), True, 'import numpy as np\n'), ((5629, 5638), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (5635, 5638), True, 'import numpy as np\n'), ((11815, 11836), 'numpy.sum', 'np.sum', (['(x[x < 0] ** 2)'], {}), '(x[x < 0] ** 2)\n', (11821, 11836), True, 'import numpy as np\n')] |
import pytest
import torch
from torch import nn
import pugh_torch as pt
import numpy as np
class SimpleInheritedReLU(nn.ReLU, pt.modules.ActivationModule):
pass
class SimpleInheritedRelUWithInit(nn.ReLU, pt.modules.ActivationModule):
@torch.no_grad()
def init_layer(self, m):
if type(m) == nn.Linear:
m.weight.fill_(1.0)
@torch.no_grad()
def init_first_layer(self, m):
if type(m) == nn.Linear:
m.weight.fill_(2.0)
def test_activation_factory_function():
ones = torch.ones(2)
fn = pt.modules.Activation("simpleinheritedrelu")
assert (fn(ones) == 1).all()
assert (fn(-ones) == 0).all()
def test_activation_factory_function_init_layer_module():
ones = torch.ones(2)
fc = nn.Linear(2, 2)
fn = pt.modules.Activation("simpleinheritedreluwithinit", fc)
assert (fc.weight == 1).all()
def test_activation_factory_function_init_layer_list():
ones = torch.ones(2)
fc = nn.Linear(2, 2)
fn = pt.modules.Activation("simpleinheritedreluwithinit", [fc])
assert (fc.weight == 1).all()
def test_activation_factory_function_init_first_layer():
ones = torch.ones(2)
fc = nn.Linear(2, 2)
fn = pt.modules.Activation("simpleinheritedreluwithinit", fc, first=True)
assert (fc.weight == 2).all()
def test_sine_fn():
input = np.arange(0, 5, 100)
expected = np.sin(input)
input = torch.Tensor(input)
fn = pt.modules.Activation("sine")
actual = fn(input).numpy()
assert np.isclose(expected, actual).all()
def test_sine_first_layer():
input = np.arange(0, 5, 100)
expected = np.sin(input)
fc = nn.Linear(2, 2)
fn = pt.modules.Activation("sine", fc, first=True)
def test_sine_layer():
input = np.arange(0, 5, 100)
expected = np.sin(input)
fc = nn.Linear(2, 2)
fn = pt.modules.Activation("sine", fc)
| [
"numpy.isclose",
"torch.Tensor",
"torch.nn.Linear",
"numpy.sin",
"torch.no_grad",
"pugh_torch.modules.Activation",
"numpy.arange",
"torch.ones"
] | [((247, 262), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (260, 262), False, 'import torch\n'), ((363, 378), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (376, 378), False, 'import torch\n'), ((532, 545), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (542, 545), False, 'import torch\n'), ((555, 599), 'pugh_torch.modules.Activation', 'pt.modules.Activation', (['"""simpleinheritedrelu"""'], {}), "('simpleinheritedrelu')\n", (576, 599), True, 'import pugh_torch as pt\n'), ((738, 751), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (748, 751), False, 'import torch\n'), ((761, 776), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(2)'], {}), '(2, 2)\n', (770, 776), False, 'from torch import nn\n'), ((786, 842), 'pugh_torch.modules.Activation', 'pt.modules.Activation', (['"""simpleinheritedreluwithinit"""', 'fc'], {}), "('simpleinheritedreluwithinit', fc)\n", (807, 842), True, 'import pugh_torch as pt\n'), ((946, 959), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (956, 959), False, 'import torch\n'), ((969, 984), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(2)'], {}), '(2, 2)\n', (978, 984), False, 'from torch import nn\n'), ((994, 1052), 'pugh_torch.modules.Activation', 'pt.modules.Activation', (['"""simpleinheritedreluwithinit"""', '[fc]'], {}), "('simpleinheritedreluwithinit', [fc])\n", (1015, 1052), True, 'import pugh_torch as pt\n'), ((1157, 1170), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (1167, 1170), False, 'import torch\n'), ((1180, 1195), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(2)'], {}), '(2, 2)\n', (1189, 1195), False, 'from torch import nn\n'), ((1205, 1273), 'pugh_torch.modules.Activation', 'pt.modules.Activation', (['"""simpleinheritedreluwithinit"""', 'fc'], {'first': '(True)'}), "('simpleinheritedreluwithinit', fc, first=True)\n", (1226, 1273), True, 'import pugh_torch as pt\n'), ((1342, 1362), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (1351, 1362), True, 'import numpy as np\n'), ((1378, 1391), 'numpy.sin', 'np.sin', (['input'], {}), '(input)\n', (1384, 1391), True, 'import numpy as np\n'), ((1404, 1423), 'torch.Tensor', 'torch.Tensor', (['input'], {}), '(input)\n', (1416, 1423), False, 'import torch\n'), ((1433, 1462), 'pugh_torch.modules.Activation', 'pt.modules.Activation', (['"""sine"""'], {}), "('sine')\n", (1454, 1462), True, 'import pugh_torch as pt\n'), ((1583, 1603), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (1592, 1603), True, 'import numpy as np\n'), ((1619, 1632), 'numpy.sin', 'np.sin', (['input'], {}), '(input)\n', (1625, 1632), True, 'import numpy as np\n'), ((1642, 1657), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(2)'], {}), '(2, 2)\n', (1651, 1657), False, 'from torch import nn\n'), ((1667, 1712), 'pugh_torch.modules.Activation', 'pt.modules.Activation', (['"""sine"""', 'fc'], {'first': '(True)'}), "('sine', fc, first=True)\n", (1688, 1712), True, 'import pugh_torch as pt\n'), ((1750, 1770), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (1759, 1770), True, 'import numpy as np\n'), ((1786, 1799), 'numpy.sin', 'np.sin', (['input'], {}), '(input)\n', (1792, 1799), True, 'import numpy as np\n'), ((1809, 1824), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(2)'], {}), '(2, 2)\n', (1818, 1824), False, 'from torch import nn\n'), ((1834, 1867), 'pugh_torch.modules.Activation', 'pt.modules.Activation', (['"""sine"""', 'fc'], {}), "('sine', fc)\n", (1855, 1867), True, 'import pugh_torch as pt\n'), ((1505, 1533), 'numpy.isclose', 'np.isclose', (['expected', 'actual'], {}), '(expected, actual)\n', (1515, 1533), True, 'import numpy as np\n')] |
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
# from onpolicy.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env
import os
from jinja2 import Template
import mujoco_py as mjp
class ManyAgentSwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, **kwargs):
agent_conf = kwargs.get("agent_conf")
n_agents = int(agent_conf.split("x")[0])
n_segs_per_agents = int(agent_conf.split("x")[1])
n_segs = n_agents * n_segs_per_agents
# Check whether asset file exists already, otherwise create it
asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',
'manyagent_swimmer_{}_agents_each_{}_segments.auto.xml'.format(n_agents,
n_segs_per_agents))
# if not os.path.exists(asset_path):
print("Auto-Generating Manyagent Swimmer asset with {} segments at {}.".format(n_segs, asset_path))
self._generate_asset(n_segs=n_segs, asset_path=asset_path)
#asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p
# 'manyagent_swimmer.xml')
mujoco_env.MujocoEnv.__init__(self, asset_path, 4)
utils.EzPickle.__init__(self)
def _generate_asset(self, n_segs, asset_path):
template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',
'manyagent_swimmer.xml.template')
with open(template_path, "r") as f:
t = Template(f.read())
body_str_template = """
<body name="mid{:d}" pos="-1 0 0">
<geom density="1000" fromto="0 0 0 -1 0 0" size="0.1" type="capsule"/>
<joint axis="0 0 {:d}" limited="true" name="rot{:d}" pos="0 0 0" range="-100 100" type="hinge"/>
"""
body_end_str_template = """
<body name="back" pos="-1 0 0">
<geom density="1000" fromto="0 0 0 -1 0 0" size="0.1" type="capsule"/>
<joint axis="0 0 1" limited="true" name="rot{:d}" pos="0 0 0" range="-100 100" type="hinge"/>
</body>
"""
body_close_str_template ="</body>\n"
actuator_str_template = """\t <motor ctrllimited="true" ctrlrange="-1 1" gear="150.0" joint="rot{:d}"/>\n"""
body_str = ""
for i in range(1,n_segs-1):
body_str += body_str_template.format(i, (-1)**(i+1), i)
body_str += body_end_str_template.format(n_segs-1)
body_str += body_close_str_template*(n_segs-2)
actuator_str = ""
for i in range(n_segs):
actuator_str += actuator_str_template.format(i)
rt = t.render(body=body_str, actuators=actuator_str)
with open(asset_path, "w") as f:
f.write(rt)
pass
def step(self, a):
# ctrl_cost_coeff = 0.0001
# xposbefore = self.sim.data.qpos[0]
# self.do_simulation(a, self.frame_skip)
# xposafter = self.sim.data.qpos[0]
# reward_fwd = (xposafter - xposbefore) / self.dt
# reward_ctrl = -ctrl_cost_coeff * np.square(a).sum()
# reward = reward_fwd + reward_ctrl
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
# yposbefore = self.sim.data.qpos[1]
self.do_simulation(a, self.frame_skip)
# ADDED
mjp.functions.mj_rnePostConstraint(self.sim.model,
self.sim.data)
xposafter = self.sim.data.qpos[0]
# yposbefore = self.sim.data.qpos[1]
y_wallpos1 = self.data.get_geom_xpos("wall1")[1]
y_wallpos2 = self.data.get_geom_xpos("wall2")[1]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
### ADDED safety stuff
yposafter = self.get_body_com("torso")[1]
ywall = np.array([-2.3, 2.3])
if xposafter < 20:
y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall
elif xposafter > 20 and xposafter < 60:
y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall
elif xposafter > 60 and xposafter < 100:
y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall
else:
y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall
obj_cost = (abs(y_walldist) < 1.8).any() * 1.0
# print("y_wallpos1-yposafter", y_wallpos1-yposafter)
# print("y_wallpos2-yposafter", y_wallpos2-yposafter)
#### ADDED
# body_quat = self.data.get_body_xquat('torso')
# z_rot = 1 - 2 * (
# body_quat[1] ** 2 + body_quat[2] ** 2) ### normally xx-rotation, not sure what axes mujoco uses
#
# state = self.state_vector()
done = False
# ADDED
# print("y_walldist", y_walldist)
# print("obj_cost", obj_cost)
# print("done_cost", done_cost)
cost = np.clip(obj_cost, 0, 1)
#cost = obj_cost
ob = self._get_obs()
return ob, reward, done, dict(cost=cost, reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
#ADDED
x = self.sim.data.qpos.flat[0] # ADDED
y = self.sim.data.qpos.flat[1] # ADDED
# ADDED
if x < 20:
y_off = y - x * np.tan(30 / 360 * 2 * np.pi)
elif x > 20 and x < 60:
y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi)
elif x > 60 and x < 100:
y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi)
else:
y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi)
return np.concatenate([qpos.flat[2:], qvel.flat, [x/5],
[y_off]])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)
)
return self._get_obs() | [
"numpy.clip",
"numpy.tan",
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"numpy.square",
"numpy.array",
"gym.utils.EzPickle.__init__",
"numpy.concatenate",
"mujoco_py.functions.mj_rnePostConstraint",
"os.path.abspath"
] | [((1322, 1372), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', 'asset_path', '(4)'], {}), '(self, asset_path, 4)\n', (1351, 1372), False, 'from gym.envs.mujoco import mujoco_env\n'), ((1381, 1410), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (1404, 1410), False, 'from gym import utils\n'), ((3506, 3571), 'mujoco_py.functions.mj_rnePostConstraint', 'mjp.functions.mj_rnePostConstraint', (['self.sim.model', 'self.sim.data'], {}), '(self.sim.model, self.sim.data)\n', (3540, 3571), True, 'import mujoco_py as mjp\n'), ((4072, 4093), 'numpy.array', 'np.array', (['[-2.3, 2.3]'], {}), '([-2.3, 2.3])\n', (4080, 4093), True, 'import numpy as np\n'), ((5208, 5231), 'numpy.clip', 'np.clip', (['obj_cost', '(0)', '(1)'], {}), '(obj_cost, 0, 1)\n', (5215, 5231), True, 'import numpy as np\n'), ((5962, 6022), 'numpy.concatenate', 'np.concatenate', (['[qpos.flat[2:], qvel.flat, [x / 5], [y_off]]'], {}), '([qpos.flat[2:], qvel.flat, [x / 5], [y_off]])\n', (5976, 6022), True, 'import numpy as np\n'), ((644, 669), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (659, 669), False, 'import os\n'), ((1516, 1541), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1531, 1541), False, 'import os\n'), ((3914, 3926), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (3923, 3926), True, 'import numpy as np\n'), ((5652, 5680), 'numpy.tan', 'np.tan', (['(30 / 360 * 2 * np.pi)'], {}), '(30 / 360 * 2 * np.pi)\n', (5658, 5680), True, 'import numpy as np\n'), ((4170, 4198), 'numpy.tan', 'np.tan', (['(30 / 360 * 2 * np.pi)'], {}), '(30 / 360 * 2 * np.pi)\n', (4176, 4198), True, 'import numpy as np\n'), ((5748, 5776), 'numpy.tan', 'np.tan', (['(30 / 360 * 2 * np.pi)'], {}), '(30 / 360 * 2 * np.pi)\n', (5754, 5776), True, 'import numpy as np\n'), ((4311, 4339), 'numpy.tan', 'np.tan', (['(30 / 360 * 2 * np.pi)'], {}), '(30 / 360 * 2 * np.pi)\n', (4317, 4339), True, 'import numpy as np\n'), ((5845, 5873), 'numpy.tan', 'np.tan', (['(30 / 360 * 2 * np.pi)'], {}), '(30 / 360 * 2 * np.pi)\n', (5851, 5873), True, 'import numpy as np\n'), ((5917, 5945), 'numpy.tan', 'np.tan', (['(30 / 360 * 2 * np.pi)'], {}), '(30 / 360 * 2 * np.pi)\n', (5923, 5945), True, 'import numpy as np\n'), ((4453, 4481), 'numpy.tan', 'np.tan', (['(30 / 360 * 2 * np.pi)'], {}), '(30 / 360 * 2 * np.pi)\n', (4459, 4481), True, 'import numpy as np\n'), ((4546, 4574), 'numpy.tan', 'np.tan', (['(30 / 360 * 2 * np.pi)'], {}), '(30 / 360 * 2 * np.pi)\n', (4552, 4574), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resized imagenet to 8x8, 16x16, 32x32.
This is not to be confused with `downsampled_imagenet` which is a unsupervised
dataset used for generative modeling.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import itertools
import numpy as np
import tensorflow_datasets.public_api as tfds
_CITATION = """@article{chrabaszcz2017downsampled,
title={A downsampled variant of imagenet as an alternative to the cifar datasets},
author={<NAME> Loshchilov, <NAME> Hutter, Frank},
journal={arXiv preprint arXiv:1707.08819},
year={2017}
}
"""
_DESCRIPTION = """\
This dataset consists of the ImageNet dataset resized to {size}x{size}.
The images here are the ones provided by Chrabaszcz et. al. using the box resize method.
For [downsampled ImageNet](http://image-net.org/small/download.php) for unsupervised learning see `downsampled_imagenet`.
WARNING: The integer labels used are defined by the authors and do not match
those from the other ImageNet datasets provided by Tensorflow datasets.
See the original [label list](https://github.com/PatrykChrabaszcz/Imagenet32_Scripts/blob/master/map_clsloc.txt),
and the [labels used by this dataset](https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/image/imagenet_resized_labels.txt).
Additionally, the original authors 1 index there labels which we convert to
0 indexed by subtracting one.
"""
_LABELS_FNAME = 'image/imagenet_resized_labels.txt'
_URL_PREFIX = 'http://www.image-net.org/image/downsample/'
class ImagenetResizedConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Imagenet Resized."""
def __init__(self, size, **kwargs):
super(ImagenetResizedConfig, self).__init__(
version=tfds.core.Version('0.1.0'), **kwargs)
self.size = size
def _make_builder_configs():
configs = []
for size in [8, 16, 32, 64]:
configs.append(
ImagenetResizedConfig(
name='%dx%d' % (size, size),
size=size,
description=_DESCRIPTION.format(size=size)))
return configs
class ImagenetResized(tfds.core.GeneratorBasedBuilder):
"""Imagenet Resized dataset."""
VERSION = tfds.core.Version('0.1.0')
BUILDER_CONFIGS = _make_builder_configs()
def _info(self):
names_file = tfds.core.get_tfds_path(_LABELS_FNAME)
size = self.builder_config.size
return tfds.core.DatasetInfo(
builder=self,
description=self.builder_config.description,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(shape=(size, size, 3)),
'label': tfds.features.ClassLabel(names_file=names_file)
}),
supervised_keys=('image', 'label'),
homepage='https://patrykchrabaszcz.github.io/Imagenet32/',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
size = self.builder_config.size
if size in [8, 16, 32]:
train_path, val_path = dl_manager.download([
'%s/Imagenet%d_train_npz.zip' % (_URL_PREFIX, size),
'%s/Imagenet%d_val_npz.zip' % (_URL_PREFIX, size)
])
train_paths = [train_path]
elif size == 64:
# 64x64 uses more than one file due to its size.
train1_path, train2_path, val_path = dl_manager.download([
'%s/Imagenet64_train_part1_npz.zip' % (_URL_PREFIX),
'%s/Imagenet64_train_part2_npz.zip' % (_URL_PREFIX),
'%s/Imagenet64_val_npz.zip' % (_URL_PREFIX)
])
train_paths = [train1_path, train2_path]
else:
raise ValueError('Size not implemented!')
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'archive':
itertools.chain(*[
dl_manager.iter_archive(train_path)
for train_path in train_paths
]),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'archive': dl_manager.iter_archive(val_path),
},
),
]
def _generate_examples(self, archive):
"""Yields examples."""
for fname, fobj in archive:
content = fobj.read()
if content:
fobj_mem = io.BytesIO(content)
data = np.load(fobj_mem, allow_pickle=False)
size = self.builder_config.size
for i, (image, label) in enumerate(zip(data['data'], data['labels'])):
record = {
# The data is packed flat as CHW where as most image datasets
# in tensorflow are HWC. We reshape to recover CHW, then transpose
# to put back into HWC.
'image': np.reshape(image, (3, size, size)).transpose(1, 2, 0),
# Labels in the original dataset are 1 indexed so we subtract 1
# here.
'label': label - 1,
}
yield fname + str(i), record
| [
"tensorflow_datasets.public_api.features.Image",
"numpy.reshape",
"io.BytesIO",
"tensorflow_datasets.public_api.features.ClassLabel",
"tensorflow_datasets.public_api.core.Version",
"numpy.load",
"tensorflow_datasets.public_api.core.get_tfds_path"
] | [((2803, 2829), 'tensorflow_datasets.public_api.core.Version', 'tfds.core.Version', (['"""0.1.0"""'], {}), "('0.1.0')\n", (2820, 2829), True, 'import tensorflow_datasets.public_api as tfds\n'), ((2911, 2949), 'tensorflow_datasets.public_api.core.get_tfds_path', 'tfds.core.get_tfds_path', (['_LABELS_FNAME'], {}), '(_LABELS_FNAME)\n', (2934, 2949), True, 'import tensorflow_datasets.public_api as tfds\n'), ((2373, 2399), 'tensorflow_datasets.public_api.core.Version', 'tfds.core.Version', (['"""0.1.0"""'], {}), "('0.1.0')\n", (2390, 2399), True, 'import tensorflow_datasets.public_api as tfds\n'), ((4889, 4908), 'io.BytesIO', 'io.BytesIO', (['content'], {}), '(content)\n', (4899, 4908), False, 'import io\n'), ((4924, 4961), 'numpy.load', 'np.load', (['fobj_mem'], {'allow_pickle': '(False)'}), '(fobj_mem, allow_pickle=False)\n', (4931, 4961), True, 'import numpy as np\n'), ((3162, 3204), 'tensorflow_datasets.public_api.features.Image', 'tfds.features.Image', ([], {'shape': '(size, size, 3)'}), '(shape=(size, size, 3))\n', (3181, 3204), True, 'import tensorflow_datasets.public_api as tfds\n'), ((3227, 3274), 'tensorflow_datasets.public_api.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'names_file': 'names_file'}), '(names_file=names_file)\n', (3251, 3274), True, 'import tensorflow_datasets.public_api as tfds\n'), ((5320, 5354), 'numpy.reshape', 'np.reshape', (['image', '(3, size, size)'], {}), '(image, (3, size, size))\n', (5330, 5354), True, 'import numpy as np\n')] |
import numpy as np
import time
from src.es import ES
# from src.ga import GA
from src.load_save import save_solution, benchmark
walker = np.array([
[3, 3, 3, 3, 3],
[3, 3, 3, 3, 3],
[3, 3, 0, 3, 3],
[3, 3, 0, 3, 3],
[3, 3, 0, 3, 3]
])
config = {
"env_name": "Walker-v0",
"robot": walker,
"generations": 1,
"lambda": 10, # Population size
"mu": 5, # Parents pop size
"sigma": 0.1, # mutation std
"lr": 1, # Learning rate
"max_steps": 500,
}
result = ES(config)
filename = f"solution_{time.strftime('%Y%m%d_%H%M%S')}.json"
save_solution(result, config, filename)
benchmark(filename)
| [
"src.load_save.benchmark",
"time.strftime",
"src.load_save.save_solution",
"src.es.ES",
"numpy.array"
] | [((139, 239), 'numpy.array', 'np.array', (['[[3, 3, 3, 3, 3], [3, 3, 3, 3, 3], [3, 3, 0, 3, 3], [3, 3, 0, 3, 3], [3, 3,\n 0, 3, 3]]'], {}), '([[3, 3, 3, 3, 3], [3, 3, 3, 3, 3], [3, 3, 0, 3, 3], [3, 3, 0, 3, 3\n ], [3, 3, 0, 3, 3]])\n', (147, 239), True, 'import numpy as np\n'), ((509, 519), 'src.es.ES', 'ES', (['config'], {}), '(config)\n', (511, 519), False, 'from src.es import ES\n'), ((581, 620), 'src.load_save.save_solution', 'save_solution', (['result', 'config', 'filename'], {}), '(result, config, filename)\n', (594, 620), False, 'from src.load_save import save_solution, benchmark\n'), ((622, 641), 'src.load_save.benchmark', 'benchmark', (['filename'], {}), '(filename)\n', (631, 641), False, 'from src.load_save import save_solution, benchmark\n'), ((543, 573), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (556, 573), False, 'import time\n')] |
"""
Sample call:
python scripts/labelme2detectron.py alpacas --save_path annotations/validation.json --dataset_name validation --csv processing-notebooks/final.csv --masks_dir raw-openimages/annotations/correct-masks/
"""
import click
import json
import pandas as pd
import cv2
import os
from tqdm import tqdm
from skimage.measure import find_contours, approximate_polygon
from detectron2.structures import BoxMode
import numpy as np
from common_tools import make_square
def read_labelme_annotation(path):
with open(path) as f:
data = json.load(f)
assert data['version']=='4.5.6'
image_path = os.path.join(os.path.dirname(path), data['imagePath'])
#image_path = data['imagePath']
height = data['imageHeight']
width = data['imageWidth']
shapes = data['shapes']
return image_path, height, width, shapes
def process_shapes(df, image_path, shapes, height, width, masks_dir):
image_id, _ = os.path.basename(image_path).split("_")
mask_rectangles = dict()
for i, shape in enumerate(shapes):
#start, end = shape['points']
mask_rectangles[i] = make_square([ [int(x) for x in xs] for xs in shape['points']], height, width)
if not (df.ImageID == image_id).any():
return
mask_data = {i: [] for i in mask_rectangles}
for mask_path, dataset_name in df.loc[df.ImageID == image_id, ["MaskPath", "dataset"]].drop_duplicates().values:
segmentation_mask = cv2.imread(os.path.join(masks_dir, mask_path), 0)/255.0
if segmentation_mask.shape != (height, width):
segmentation_mask = cv2.resize(segmentation_mask, (width, height))
for i, ((x,y), (u, v)) in mask_rectangles.items():
mask = segmentation_mask[y: v, x:u]
#segmentation_mask[:y] = 0
#segmentation_mask[v:] = 0
#segmentation_mask[:, :x] = 0
#segmentation_mask[:, u:] = 0
#print(np.unique(mask))
mask = np.pad(mask, 1, mode='constant')
lbls = np.unique(mask)
if len(lbls) == 1 and lbls[0] ==0:
continue
mask_data[i].append((mask.sum(), mask > 0))
for i, ((x,y), (u, v)) in mask_rectangles.items():
if len(mask_data[i]) == 0:
continue
k = np.argmax([x[0] for x in mask_data[i]])
mask = mask_data[i][k][1]
contours = find_contours(mask, 0.5)
#contours = [
# approximate_polygon(contour, 2.5)
# for contour in contours]
if len(contours) > 0:
data = {
#'image_id': str(image_id),
'bbox': [x, y, u, v],
'bbox_mode': BoxMode.XYXY_ABS, #<BoxMode.XYXY_ABS: 0>,
'segmentation': [],
'category_id': 0,
'mask_path': mask_path
}
for contour in contours:
points = [
min(max(p+q - 1, q), r) for xs in contour
for p, q, r in zip(xs[::-1], (x, y), (u, v))
]
if len(points) < 6:
continue
assert len(points) % 2 == 0
assert len(points) >= 6, f"{points}"
data['segmentation'].append(points)
yield data
else:
continue
@click.command()
@click.argument('src')
@click.option('--masks_dir',
default="../raw-openimages/annotations/correct-masks",
help="directory where the masks are located")
@click.option('--csv', default="../processing-notebooks/final.csv",
help="file with collected annotations from OpenImages"
)
@click.option('--save_path', default=None)
@click.option('--dataset_name', default=None)
def process_files(src, masks_dir, csv, save_path, dataset_name="train"):
"""Print FILENAME."""
df = pd.read_csv(csv)
df = df[(df.LabelName == "/m/0pcr") & (df.dataset == dataset_name)]
image_ids = np.unique(df.ImageID.values)
json_paths = [
os.path.join(src, x)
for x in os.listdir(src)
if x.find("json") >= 0 and x.split("_")[0] in image_ids
]
all_shapes = []
for path in tqdm(json_paths):
image_path, height, width, shapes = read_labelme_annotation(path)
prepared = {
'file_name': image_path,
'height':height,
'width': width,
'annotations': []
}
for x in process_shapes(df, image_path, shapes, height, width, masks_dir):
prepared['annotations'].append(x)
if len(prepared['annotations']) > 0:
all_shapes.append(prepared)
if save_path is None:
print(all_shapes)
else:
with open(save_path, "w") as f:
json.dump(all_shapes, f)
print(len(all_shapes))
if __name__ == '__main__':
process_files()
| [
"click.argument",
"os.listdir",
"numpy.unique",
"pandas.read_csv",
"click.option",
"tqdm.tqdm",
"os.path.join",
"numpy.argmax",
"os.path.dirname",
"numpy.pad",
"os.path.basename",
"json.load",
"skimage.measure.find_contours",
"cv2.resize",
"click.command",
"json.dump"
] | [((3319, 3334), 'click.command', 'click.command', ([], {}), '()\n', (3332, 3334), False, 'import click\n'), ((3336, 3357), 'click.argument', 'click.argument', (['"""src"""'], {}), "('src')\n", (3350, 3357), False, 'import click\n'), ((3359, 3497), 'click.option', 'click.option', (['"""--masks_dir"""'], {'default': '"""../raw-openimages/annotations/correct-masks"""', 'help': '"""directory where the masks are located"""'}), "('--masks_dir', default=\n '../raw-openimages/annotations/correct-masks', help=\n 'directory where the masks are located')\n", (3371, 3497), False, 'import click\n'), ((3497, 3624), 'click.option', 'click.option', (['"""--csv"""'], {'default': '"""../processing-notebooks/final.csv"""', 'help': '"""file with collected annotations from OpenImages"""'}), "('--csv', default='../processing-notebooks/final.csv', help=\n 'file with collected annotations from OpenImages')\n", (3509, 3624), False, 'import click\n'), ((3626, 3667), 'click.option', 'click.option', (['"""--save_path"""'], {'default': 'None'}), "('--save_path', default=None)\n", (3638, 3667), False, 'import click\n'), ((3669, 3713), 'click.option', 'click.option', (['"""--dataset_name"""'], {'default': 'None'}), "('--dataset_name', default=None)\n", (3681, 3713), False, 'import click\n'), ((3823, 3839), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (3834, 3839), True, 'import pandas as pd\n'), ((3928, 3956), 'numpy.unique', 'np.unique', (['df.ImageID.values'], {}), '(df.ImageID.values)\n', (3937, 3956), True, 'import numpy as np\n'), ((4144, 4160), 'tqdm.tqdm', 'tqdm', (['json_paths'], {}), '(json_paths)\n', (4148, 4160), False, 'from tqdm import tqdm\n'), ((553, 565), 'json.load', 'json.load', (['f'], {}), '(f)\n', (562, 565), False, 'import json\n'), ((632, 653), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (647, 653), False, 'import os\n'), ((2282, 2321), 'numpy.argmax', 'np.argmax', (['[x[0] for x in mask_data[i]]'], {}), '([x[0] for x in mask_data[i]])\n', (2291, 2321), True, 'import numpy as np\n'), ((2375, 2399), 'skimage.measure.find_contours', 'find_contours', (['mask', '(0.5)'], {}), '(mask, 0.5)\n', (2388, 2399), False, 'from skimage.measure import find_contours, approximate_polygon\n'), ((3984, 4004), 'os.path.join', 'os.path.join', (['src', 'x'], {}), '(src, x)\n', (3996, 4004), False, 'import os\n'), ((938, 966), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (954, 966), False, 'import os\n'), ((1588, 1634), 'cv2.resize', 'cv2.resize', (['segmentation_mask', '(width, height)'], {}), '(segmentation_mask, (width, height))\n', (1598, 1634), False, 'import cv2\n'), ((1962, 1994), 'numpy.pad', 'np.pad', (['mask', '(1)'], {'mode': '"""constant"""'}), "(mask, 1, mode='constant')\n", (1968, 1994), True, 'import numpy as np\n'), ((2014, 2029), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (2023, 2029), True, 'import numpy as np\n'), ((4022, 4037), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (4032, 4037), False, 'import os\n'), ((4719, 4743), 'json.dump', 'json.dump', (['all_shapes', 'f'], {}), '(all_shapes, f)\n', (4728, 4743), False, 'import json\n'), ((1456, 1490), 'os.path.join', 'os.path.join', (['masks_dir', 'mask_path'], {}), '(masks_dir, mask_path)\n', (1468, 1490), False, 'import os\n')] |
import numpy as np
from scipy.special import xlogy
import inspect
class Loss:
def __init__(self, fn, **kwargs):
self.fn = fn
self._fn_kwargs = kwargs
def __call__(self, y_true, y_pred):
return self.fn(y_true, y_pred, **self._fn_kwargs)
class BinaryCrossEntropy(Loss):
def __init__(self, epsilon=1e-15):
super(BinaryCrossEntropy, self).__init__(binary_cross_entropy, epsilon=epsilon)
self.epsilon = epsilon
class CategoricalCrossEntropy(Loss):
def __init__(self, epsilon=1e-15):
super(CategoricalCrossEntropy, self).__init__(categorical_cross_entropy)
self.epsilon = epsilon
def binary_cross_entropy(y_true, y_pred, epsilon=1e-15):
m = y_true.shape[0]
return np.squeeze(-(1. / m) * np.nansum(np.multiply(y_true, np.log(y_pred + epsilon)) +
np.multiply(1 - y_true, np.log(1 - y_pred + epsilon))))
def categorical_cross_entropy(y_true, y_pred, epsilon=1e-15):
m = y_true.shape[0]
predictions = np.clip(y_pred, epsilon, 1. - epsilon)
ce = -np.sum(y_true*np.log(predictions+1e-9))/m
return ce
| [
"numpy.clip",
"numpy.log"
] | [((1035, 1074), 'numpy.clip', 'np.clip', (['y_pred', 'epsilon', '(1.0 - epsilon)'], {}), '(y_pred, epsilon, 1.0 - epsilon)\n', (1042, 1074), True, 'import numpy as np\n'), ((1098, 1125), 'numpy.log', 'np.log', (['(predictions + 1e-09)'], {}), '(predictions + 1e-09)\n', (1104, 1125), True, 'import numpy as np\n'), ((801, 825), 'numpy.log', 'np.log', (['(y_pred + epsilon)'], {}), '(y_pred + epsilon)\n', (807, 825), True, 'import numpy as np\n'), ((897, 925), 'numpy.log', 'np.log', (['(1 - y_pred + epsilon)'], {}), '(1 - y_pred + epsilon)\n', (903, 925), True, 'import numpy as np\n')] |
import numpy as np
a = np.array([np.arange(10), np.arange(10)]).T
print(a.shape)
| [
"numpy.arange"
] | [((34, 47), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (43, 47), True, 'import numpy as np\n'), ((49, 62), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (58, 62), True, 'import numpy as np\n')] |
"""Simple layer profile plots"""
import numpy as np
import nibabel as nb
import matplotlib.pyplot as plt
# Output nifti from 01_simulate_layers
FILE1 = "/home/faruk/gdrive/LAYNII/demo_big3/M_brain_rim_metric_equidist_simulated_layers_noised.nii.gz"
# FILE1 = "/home/faruk/gdrive/LAYNII/demo_big3/M_brain_rim_metric_equidist_simulated_layers_brain_draining_veins_noised.nii.gz"
# Metric file generated by LN2_LAYERS
FILE2 = "/home/faruk/gdrive/LAYNII/demo_big3/M_brain_rim_metric_equidist.nii.gz"
FILE3 = "/home/faruk/gdrive/LAYNII/demo_big3/M_brain_rim_layers_equidist.nii.gz"
# Column or area definitions
FILE4 = "/home/faruk/gdrive/LAYNII/demo_big3/M_brain_rim_columns33.nii.gz"
# =============================================================================
# Load image data, you can think of this as an activation map.
nii1 = nb.load(FILE1)
data = nii1.get_fdata()
# Load normalized depths
nii2 = nb.load(FILE2)
norm_depth = nii2.get_fdata()
# Load layers (quantized normalized depths)
nii3 = nb.load(FILE3)
layers = nii3.get_fdata()
idx_layers = np.unique(layers.astype("int"))
idx_layers = idx_layers[1:] # Remove layers with index 0
nr_layers = idx_layers.size
layer_bins = np.zeros(nr_layers + 1) # +1 for zeros indices
# Load columns
nii4 = nb.load(FILE4)
colums = nii4.get_fdata()
idx_columns = np.unique(colums)
idx_columns = idx_columns[1:] # Remove columns with index 0
# Prepare plot
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle('Layer profiles in two alternative ways')
for j in idx_columns:
# Take voxels of a single column
idx = colums == j
data_roi = data[idx]
norm_depth_roi = norm_depth[idx]
layers_roi = layers[idx]
# Before layer quantization
ax1.scatter(norm_depth_roi, data_roi, alpha=0.1, marker=".")
ax1.set_xlim((0, 1))
ax1.set_ylim((90, 120))
ax1.set_xlabel("Normalized cortical depth")
ax1.set_ylabel("Voxel value")
# After layer quantization
for i in idx_layers: # Compute bin averages
layer_bins[i] = np.mean(data_roi[layers_roi == i])
ax2.plot(idx_layers, layer_bins[1:], linewidth=1)
ax2.set_ylim((90, 120))
ax2.set_xlabel("Layers (0=white matter)")
ax2.set_ylabel("Voxel value")
plt.show()
print("Finished.")
| [
"numpy.mean",
"numpy.unique",
"nibabel.load",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((836, 850), 'nibabel.load', 'nb.load', (['FILE1'], {}), '(FILE1)\n', (843, 850), True, 'import nibabel as nb\n'), ((907, 921), 'nibabel.load', 'nb.load', (['FILE2'], {}), '(FILE2)\n', (914, 921), True, 'import nibabel as nb\n'), ((1003, 1017), 'nibabel.load', 'nb.load', (['FILE3'], {}), '(FILE3)\n', (1010, 1017), True, 'import nibabel as nb\n'), ((1188, 1211), 'numpy.zeros', 'np.zeros', (['(nr_layers + 1)'], {}), '(nr_layers + 1)\n', (1196, 1211), True, 'import numpy as np\n'), ((1259, 1273), 'nibabel.load', 'nb.load', (['FILE4'], {}), '(FILE4)\n', (1266, 1273), True, 'import nibabel as nb\n'), ((1314, 1331), 'numpy.unique', 'np.unique', (['colums'], {}), '(colums)\n', (1323, 1331), True, 'import numpy as np\n'), ((1427, 1445), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (1439, 1445), True, 'import matplotlib.pyplot as plt\n'), ((2210, 2220), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2218, 2220), True, 'import matplotlib.pyplot as plt\n'), ((2012, 2046), 'numpy.mean', 'np.mean', (['data_roi[layers_roi == i]'], {}), '(data_roi[layers_roi == i])\n', (2019, 2046), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Script to produce protein and nucleic acid dot plots.
-- PJMartel 2018
"""
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle, Circle
import numpy as np
from sys import argv
from consequent.matrix import readScoreMatrix, getMatrix
from consequent.sequence import getUniprotSeq
import matplotlib.pyplot as plt
import matplotlib
from scipy.signal import convolve2d as cv2
from matplotlib.ticker import MultipleLocator
from matplotlib.widgets import Slider
import argparse
class DotPlot:
def __init__(self, score_matrix='BLOSUM62', seqs=('ADE', 'WCA')):
"""
Set the background and grid.
Initialize the parent Figure class.
"""
readScoreMatrix(score_matrix)
print("Using scoring matrix '{}'".format(score_matrix))
self.seqs = seqs
def createPlot(self, window_size=1, cut_off=-1):
# Convert seqs to numerical
seqA = np.array(list(map(ord, self.seqs[0])), dtype=np.int8)
seqB = np.array(list(map(ord, self.seqs[1])), dtype=np.int8)
seqA -= ord("A")
seqB -= ord("A")
plot_size = (len(seqA), len(seqB))
(ix, iy) = np.meshgrid(range(len(seqB)), range(len(seqA)))
self.dotMatrix = getMatrix()[seqB[ix], seqA[iy]]
M = self.dotMatrix
print("Min and Max befor kernel: ", M.min(), M.max())
if window_size > 1:
kernel = np.eye(window_size)
M = cv2(M, kernel, mode='same', boundary='fill')
trim = (window_size-1)//2
M = M[trim:-trim, trim:-trim]
print("Min and Max after kernel: ", M.min(), M.max())
if cut_off != -1:
c = M.min() + (M.max()-M.min())*cut_off
#print("Min, Max, c-par, cut_off: ", M.min(), M.max(), c, cut_off)
#print(len(M[M <= c]))
M[M <= c] = 0
M[M != 0] = 1
#print("Max and min elements;", M.min(), M.max())
#print("Count of 0's and 1's :", M.size-M.sum(), M.sum(), (M.size-M.sum())+M.sum() )
return M
# def calcScore(self, posA, posB, seqs, win_size):
# if posA < win_size or posA > len(seqs[0])-win_size:
# return 0
# if posB < win_size or posB > len(seqs[1])-win_size:
# return 0
# score = 0
# for i in range(-win_size, +win_size):
# score += nPairScore(seqs[0][i], seqs[1][i])
# return score
def main():
def update(val, cut_off, window_size):
if cut_off != -1:
cut_off = scut.val
# else :
# custom_cmap = cmap.
print("Cutoff ", scut.val)
window_size = int(swin.val)
M = A.createPlot(window_size, cut_off)
im.set_data(M)
# im.set_cmap('')
# Parse commmand line arguments
parser = argparse.ArgumentParser(
"dot_plot.py", description="Biological sequence dotplot comparator.")
parser.add_argument("-w", "--window-size",
help="Size of the averaging window.", default=1, type=int)
parser.add_argument("-c", "--cut-off",
help="Cutoff for plot coloring.", default=-1, type=float)
parser.add_argument("-s", "--size", nargs=2, metavar=('width', 'height'),
help="Plot size (width height)",
type=int, default=[10, 10])
parser.add_argument("-m", "--score-matrix",
help="Scoring matrix file.", default="BLOSUM62", type=str)
parser.add_argument('UniprotA', help="Uniprot sequence code")
parser.add_argument('UniprotB', help="Uniprot sequence code")
args = parser.parse_args()
width = args.size[0]
height = args.size[1]
window_size = args.window_size
cut_off = args.cut_off
plot_size = args.size
score_matrix = args.score_matrix
seqA = getUniprotSeq(args.UniprotA)
seqB = getUniprotSeq(args.UniprotB)
print("Window size: ", window_size)
print("Cutoff: ", cut_off)
print("Plot size: ", (width, height))
print(seqA.description)
print(seqB.description)
A = DotPlot(score_matrix=score_matrix, seqs=(seqA, seqB))
M = A.createPlot(window_size, cut_off)
#plt.rcParams['xtick.bottom'] = plt.rcParams['xtick.labelbottom'] = False
#plt.rcParams['xtick.top'] = plt.rcParams['xtick.labeltop'] = True
matplotlib.rc('axes', labelsize=15)
#fig, ax = plt.subplots(figsize=(10.6,10.6))
fig, ax = plt.subplots(figsize=(10.6, 10.6))
plt.subplots_adjust(left=0.25, bottom=0.25)
ax.xaxis.set_major_locator(MultipleLocator(50))
ax.yaxis.set_major_locator(MultipleLocator(50))
# ax.set_xticks(major_ticks)
#ax.set_xticks(minor_ticks, minor=True)
# ax.set_yticks(major_ticks)
#ax.set_yticks(minor_ticks, minor=True)
# disable tick labels
#labels = [item.get_text() for item in ax.get_xticklabels()]
#empty_string_labels = ['']*len(labels)
# ax.set_xticklabels(empty_string_labels)
#labels = [item.get_text() for item in ax.get_yticklabels()]
#empty_string_labels = ['']*len(labels)
# ax.set_yticklabels(empty_string_labels)
# enable grid
# ax.grid(which='both')
#ax.grid(which='minor', alpha=0.2)
#ax.grid(which='major', alpha=0.5)
# set labels
# ax.set_xlabel(seqA.description)
# ax.set_ylabel(seqB.description)
ax.set_xlabel(args.UniprotA)
ax.set_ylabel(args.UniprotB)
# ax.set_xlabel(seqA.seq)
# ax.set_ylabel(seqB.seq[::-1])
# move the x axis indicators to top
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position('top')
# sliders
axcolor = 'lightgoldenrodyellow'
axcut = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
axwin = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)
scut = Slider(axcut, 'Cut-off', 0.0, 1.0, valinit=cut_off)
swin = Slider(axwin, 'Window-size', 1, 25, valinit=window_size, valstep=2)
scut.on_changed(lambda e: update(e, cut_off, window_size))
swin.on_changed(lambda e: update(e, cut_off, window_size))
# if cut_off == -1 :
# axcmap = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)
# scmap = Slider(axcmpa, 'Gradient', 0, 0.4, valinit=window_size)
# show it
im = ax.imshow(M, cmap='Greys', interpolation='none')
fig.colorbar(im, ax=ax, shrink=0.8)
plt.show()
if __name__ == "__main__":
main()
| [
"consequent.sequence.getUniprotSeq",
"scipy.signal.convolve2d",
"consequent.matrix.readScoreMatrix",
"numpy.eye",
"argparse.ArgumentParser",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.axes",
"matplotlib.rc",
"consequent.matrix.getMatrix",
"matplotlib.widgets.Slider",
"matplotlib.pyp... | [((2837, 2935), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""dot_plot.py"""'], {'description': '"""Biological sequence dotplot comparator."""'}), "('dot_plot.py', description=\n 'Biological sequence dotplot comparator.')\n", (2860, 2935), False, 'import argparse\n'), ((3867, 3895), 'consequent.sequence.getUniprotSeq', 'getUniprotSeq', (['args.UniprotA'], {}), '(args.UniprotA)\n', (3880, 3895), False, 'from consequent.sequence import getUniprotSeq\n'), ((3907, 3935), 'consequent.sequence.getUniprotSeq', 'getUniprotSeq', (['args.UniprotB'], {}), '(args.UniprotB)\n', (3920, 3935), False, 'from consequent.sequence import getUniprotSeq\n'), ((4367, 4402), 'matplotlib.rc', 'matplotlib.rc', (['"""axes"""'], {'labelsize': '(15)'}), "('axes', labelsize=15)\n", (4380, 4402), False, 'import matplotlib\n'), ((4466, 4500), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10.6, 10.6)'}), '(figsize=(10.6, 10.6))\n', (4478, 4500), True, 'import matplotlib.pyplot as plt\n'), ((4505, 4548), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.25)', 'bottom': '(0.25)'}), '(left=0.25, bottom=0.25)\n', (4524, 4548), True, 'import matplotlib.pyplot as plt\n'), ((5678, 5730), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.1, 0.65, 0.03]'], {'facecolor': 'axcolor'}), '([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\n', (5686, 5730), True, 'import matplotlib.pyplot as plt\n'), ((5743, 5796), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.15, 0.65, 0.03]'], {'facecolor': 'axcolor'}), '([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)\n', (5751, 5796), True, 'import matplotlib.pyplot as plt\n'), ((5808, 5859), 'matplotlib.widgets.Slider', 'Slider', (['axcut', '"""Cut-off"""', '(0.0)', '(1.0)'], {'valinit': 'cut_off'}), "(axcut, 'Cut-off', 0.0, 1.0, valinit=cut_off)\n", (5814, 5859), False, 'from matplotlib.widgets import Slider\n'), ((5871, 5938), 'matplotlib.widgets.Slider', 'Slider', (['axwin', '"""Window-size"""', '(1)', '(25)'], {'valinit': 'window_size', 'valstep': '(2)'}), "(axwin, 'Window-size', 1, 25, valinit=window_size, valstep=2)\n", (5877, 5938), False, 'from matplotlib.widgets import Slider\n'), ((6348, 6358), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6356, 6358), True, 'import matplotlib.pyplot as plt\n'), ((727, 756), 'consequent.matrix.readScoreMatrix', 'readScoreMatrix', (['score_matrix'], {}), '(score_matrix)\n', (742, 756), False, 'from consequent.matrix import readScoreMatrix, getMatrix\n'), ((4580, 4599), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(50)'], {}), '(50)\n', (4595, 4599), False, 'from matplotlib.ticker import MultipleLocator\n'), ((4632, 4651), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(50)'], {}), '(50)\n', (4647, 4651), False, 'from matplotlib.ticker import MultipleLocator\n'), ((1259, 1270), 'consequent.matrix.getMatrix', 'getMatrix', ([], {}), '()\n', (1268, 1270), False, 'from consequent.matrix import readScoreMatrix, getMatrix\n'), ((1431, 1450), 'numpy.eye', 'np.eye', (['window_size'], {}), '(window_size)\n', (1437, 1450), True, 'import numpy as np\n'), ((1467, 1511), 'scipy.signal.convolve2d', 'cv2', (['M', 'kernel'], {'mode': '"""same"""', 'boundary': '"""fill"""'}), "(M, kernel, mode='same', boundary='fill')\n", (1470, 1511), True, 'from scipy.signal import convolve2d as cv2\n')] |
#!/usr/bin/env python
u"""
read_cryosat_L2.py
Written by <NAME> (05/2021)
Reads CryoSat Level-2 data products from baselines A, B and C
Reads CryoSat Level-2 netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
OUTPUTS:
Data_1Hz: Time and Orbit Parameters
Corrections: Elevation Corrections and Flags
Data_20Hz: Geolocation and Elevation Measurements with Quality Parameters
METADATA: MPH, SPH and DSD Header data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 05/2021: use raw binary string prefixes (rb) for regular expressions
Updated 02/2021: replaced numpy bool to prevent deprecation warning
Updated 06/2020: patch error in CryoSat-2 GDR pointer variables
using the 1Hz mapping variable ind_meas_1hz_20_ku to remap the index
Updated 02/2020: tilde-expansion of cryosat-2 files before opening
add scale factors function for converting packed units in binary files
convert from hard to soft tabulation
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
will output with same variable names as the binary read functions
output 20Hz data as masked arrays for all baselines
Updated 08/2019: generalize regular expression patterns in read_DSD function
Updated 10/2018: updated header read functions for python3
Updated 11/2016: added Abs_Orbit and Ascending_flag to Data_1Hz outputs
Abs_Orbit should be same as in read_cryosat_ground_tracks.py
Ascending_flag can use in surface regression fits (McMillan, 2014)
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import os
import re
import netCDF4
import numpy as np
#-- PURPOSE: Initiate L2 MDS variables for CryoSat Baselines A and B
def cryosat_baseline_AB(fid,record_size,n_records):
#-- CryoSat-2 1 Hz data fields (Location Group)
#-- Time and Orbit Parameters plus Measurement Mode
Data_1Hz = {}
#-- Time: day part
Data_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
#-- Time: second part
Data_1Hz['Second'] = np.zeros((n_records),dtype=np.int32)
#-- Time: microsecond part
Data_1Hz['Micsec'] = np.zeros((n_records),dtype=np.int32)
#-- SIRAL mode
Data_1Hz['Siral_mode'] = np.zeros((n_records),dtype=np.uint64)
#-- Lat_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lat_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Lon_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lon_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Alt_1Hz: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Data_1Hz['Alt_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Mispointing: packed units (millidegrees, 1e-3 degrees)
Data_1Hz['Mispointing'] = np.zeros((n_records),dtype=np.int16)
#-- Number of valid records in the block of twenty that contain data
#-- Last few records of the last block of a dataset may be blank blocks
#-- inserted to bring the file up to a multiple of twenty.
Data_1Hz['N_valid'] = np.zeros((n_records),dtype=np.int16)
#-- CryoSat-2 geophysical corrections (External Corrections Group)
Corrections = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['dryTrop'] = np.zeros((n_records),dtype=np.int16)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['wetTrop'] = np.zeros((n_records),dtype=np.int16)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Corrections['InvBar'] = np.zeros((n_records),dtype=np.int16)
#-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m)
Corrections['DAC'] = np.zeros((n_records),dtype=np.int16)
#-- Ionospheric Correction packed units (mm, 1e-3 m)
Corrections['Iono'] = np.zeros((n_records),dtype=np.int16)
#-- Sea State Bias Correction packed units (mm, 1e-3 m)
Corrections['SSB'] = np.zeros((n_records),dtype=np.int16)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Corrections['ocTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Corrections['lpeTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Corrections['olTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Corrections['seTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Corrections['gpTideElv'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare1'] = np.zeros((n_records),dtype=np.int16)
#-- Surface Type: Packed in groups of three bits for each of the 20 records
Corrections['Surf_type'] = np.zeros((n_records),dtype=np.uint64)
#-- Mean Sea Surface or Geoid packed units (mm, 1e-3 m)
Corrections['MSS_Geoid'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean Depth/Land Elevation Model (ODLE) packed units (mm, 1e-3 m)
Corrections['ODLE'] = np.zeros((n_records),dtype=np.int32)
#-- Ice Concentration packed units (%/100)
Corrections['Ice_conc'] = np.zeros((n_records),dtype=np.int16)
#-- Snow Depth packed units (mm, 1e-3 m)
Corrections['Snow_depth'] = np.zeros((n_records),dtype=np.int16)
#-- Snow Density packed units (kg/m^3)
Corrections['Snow_density'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare2'] = np.zeros((n_records),dtype=np.int16)
#-- Corrections Status Flag
Corrections['C_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Significant Wave Height (SWH) packed units (mm, 1e-3)
Corrections['SWH'] = np.zeros((n_records),dtype=np.int16)
#-- Wind Speed packed units (mm/s, 1e-3 m/s)
Corrections['Wind_speed'] = np.zeros((n_records),dtype=np.uint16)
Corrections['Spare3'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare4'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare5'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare6'] = np.zeros((n_records),dtype=np.int16)
#-- CryoSat-2 20 Hz data fields (Measurement Group)
#-- Derived from instrument measurement parameters
n_blocks = 20
Data_20Hz = {}
#-- Delta between the timestamps for 20Hz record and the 1Hz record
#-- D_time_mics packed units (microseconds)
Data_20Hz['D_time_mics'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['D_time_mics'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Data_20Hz['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Lat'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Data_20Hz['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Lon'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Measured elevation above ellipsoid from retracker: packed units (mm, 1e-3 m)
Data_20Hz['Elev'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Elev'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolated Sea Surface Height Anomaly: packed units (mm, 1e-3 m)
Data_20Hz['SSHA_interp'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolated Sea Surface Height measurement count
Data_20Hz['SSHA_interp_count'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp_count'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolation quality estimate RSS: packed units (mm, 1e-3 m)
Data_20Hz['SSHA_interp_RMS'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp_RMS'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Sigma Zero Backscatter for retracker: packed units (1e-2 dB)
Data_20Hz['Sig0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Sig0'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Peakiness: packed units (1e-2)
Data_20Hz['Peakiness'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
Data_20Hz['Peakiness'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Freeboard: packed units (mm, 1e-3 m)
#-- -9999 default value indicates computation has not been performed
Data_20Hz['Freeboard'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Freeboard'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Number of averaged echoes or beams
Data_20Hz['N_avg'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['N_avg'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality flags
Data_20Hz['Quality_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
Data_20Hz['Quality_flag'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare2'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare3'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare4'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare4'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare5'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare5'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Location Group for record r
Data_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Second'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Siral_mode'][r] = np.fromfile(fid,dtype='>u8',count=1)
Data_1Hz['Lat_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Lon_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Alt_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Mispointing'][r] = np.fromfile(fid,dtype='>i2',count=1)
Data_1Hz['N_valid'][r] = np.fromfile(fid,dtype='>i2',count=1)
#-- CryoSat-2 External Corrections Group for record r
Corrections['dryTrop'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['wetTrop'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['InvBar'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['DAC'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Iono'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['SSB'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['ocTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['lpeTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['olTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['seTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['gpTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare1'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Surf_type'][r] = np.fromfile(fid,dtype='>u8',count=1)
Corrections['MSS_Geoid'][r] = np.fromfile(fid,dtype='>i4',count=1)
Corrections['ODLE'][r] = np.fromfile(fid,dtype='>i4',count=1)
Corrections['Ice_conc'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Snow_depth'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Snow_density'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare2'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['C_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Corrections['SWH'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Wind_speed'][r] = np.fromfile(fid,dtype='>u2',count=1)
Corrections['Spare3'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare4'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare5'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare6'][r] = np.fromfile(fid,dtype='>i2',count=1)
#-- CryoSat-2 Measurements Group for record r and block b
for b in range(n_blocks):
Data_20Hz['D_time_mics'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Lat'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Lon'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Elev'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['SSHA_interp'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['SSHA_interp_count'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['SSHA_interp_RMS'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Sig0'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Peakiness'].data[r,b] = np.fromfile(fid,dtype='>u2',count=1)
Data_20Hz['Freeboard'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['N_avg'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Spare1'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Quality_flag'].data[r,b] = np.fromfile(fid,dtype='>u4',count=1)
Data_20Hz['Spare2'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Spare3'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Spare4'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Spare5'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
#-- Set CryoSat-2 Measurements Group Masks for record r
Data_20Hz['D_time_mics'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Lat'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Lon'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Elev'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp_count'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp_RMS'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Sig0'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Peakiness'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Freeboard'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['N_avg'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Spare1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_flag'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Spare2'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Spare3'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Spare4'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Spare5'].mask[r,:Data_1Hz['N_valid'][r]] = False
#-- Bind all the bits of the l2_mds together into a single dictionary
CS_l2_mds = {}
CS_l2_mds['Data_1Hz'] = Data_1Hz
CS_l2_mds['Corrections'] = Corrections
CS_l2_mds['Data_20Hz'] = Data_20Hz
#-- return the output dictionary
return CS_l2_mds
#-- PURPOSE: Initiate L2 MDS variables for CryoSat Baseline C
def cryosat_baseline_C(fid,record_size,n_records):
#-- CryoSat-2 1 Hz data fields (Location Group)
#-- Time and Orbit Parameters plus Measurement Mode
Data_1Hz = {}
#-- Time: day part
Data_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
#-- Time: second part
Data_1Hz['Second'] = np.zeros((n_records),dtype=np.int32)
#-- Time: microsecond part
Data_1Hz['Micsec'] = np.zeros((n_records),dtype=np.int32)
#-- SIRAL mode
Data_1Hz['Siral_mode'] = np.zeros((n_records),dtype=np.uint64)
#-- Lat_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lat_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Lon_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lon_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Alt_1Hz: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Data_1Hz['Alt_1Hz'] = np.zeros((n_records),dtype=np.int32)
#-- Roll: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Roll'] = np.zeros((n_records),dtype=np.int32)
#-- Pitch: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Pitch'] = np.zeros((n_records),dtype=np.int32)
#-- Yaw: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Yaw'] = np.zeros((n_records),dtype=np.int32)
Data_1Hz['Spare'] = np.zeros((n_records),dtype=np.int16)
#-- Number of valid records in the block of twenty that contain data
#-- Last few records of the last block of a dataset may be blank blocks
#-- inserted to bring the file up to a multiple of twenty.
Data_1Hz['N_valid'] = np.zeros((n_records),dtype=np.int16)
#-- CryoSat-2 geophysical corrections (External Corrections Group)
Corrections = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['dryTrop'] = np.zeros((n_records),dtype=np.int16)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['wetTrop'] = np.zeros((n_records),dtype=np.int16)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Corrections['InvBar'] = np.zeros((n_records),dtype=np.int16)
#-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m)
Corrections['DAC'] = np.zeros((n_records),dtype=np.int16)
#-- Ionospheric Correction packed units (mm, 1e-3 m)
Corrections['Iono'] = np.zeros((n_records),dtype=np.int16)
#-- Sea State Bias Correction packed units (mm, 1e-3 m)
Corrections['SSB'] = np.zeros((n_records),dtype=np.int16)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Corrections['ocTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Corrections['lpeTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Corrections['olTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Corrections['seTideElv'] = np.zeros((n_records),dtype=np.int16)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Corrections['gpTideElv'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare1'] = np.zeros((n_records),dtype=np.int16)
#-- Surface Type: Packed in groups of three bits for each of the 20 records
Corrections['Surf_type'] = np.zeros((n_records),dtype=np.uint64)
#-- Mean Sea Surface or Geoid packed units (mm, 1e-3 m)
Corrections['MSS_Geoid'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean Depth/Land Elevation Model (ODLE) packed units (mm, 1e-3 m)
Corrections['ODLE'] = np.zeros((n_records),dtype=np.int32)
#-- Ice Concentration packed units (%/100)
Corrections['Ice_conc'] = np.zeros((n_records),dtype=np.int16)
#-- Snow Depth packed units (mm, 1e-3 m)
Corrections['Snow_depth'] = np.zeros((n_records),dtype=np.int16)
#-- Snow Density packed units (kg/m^3)
Corrections['Snow_density'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare2'] = np.zeros((n_records),dtype=np.int16)
#-- Corrections Status Flag
Corrections['C_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Significant Wave Height (SWH) packed units (mm, 1e-3)
Corrections['SWH'] = np.zeros((n_records),dtype=np.int16)
#-- Wind Speed packed units (mm/s, 1e-3 m/s)
Corrections['Wind_speed'] = np.zeros((n_records),dtype=np.uint16)
Corrections['Spare3'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare4'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare5'] = np.zeros((n_records),dtype=np.int16)
Corrections['Spare6'] = np.zeros((n_records),dtype=np.int16)
#-- CryoSat-2 20 Hz data fields (Measurement Group)
#-- Derived from instrument measurement parameters
n_blocks = 20
Data_20Hz = {}
#-- Delta between the timestamps for 20Hz record and the 1Hz record
#-- D_time_mics packed units (microseconds)
Data_20Hz['D_time_mics'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['D_time_mics'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Data_20Hz['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Lat'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Data_20Hz['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Lon'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Measured elevation above ellipsoid from retracker 1: packed units (mm, 1e-3 m)
Data_20Hz['Elev_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Elev_1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Measured elevation above ellipsoid from retracker 2: packed units (mm, 1e-3 m)
Data_20Hz['Elev_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Elev_2'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Measured elevation above ellipsoid from retracker 3: packed units (mm, 1e-3 m)
Data_20Hz['Elev_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Elev_3'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Sigma Zero Backscatter for retracker 1: packed units (1e-2 dB)
Data_20Hz['Sig0_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Sig0_1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Sigma Zero Backscatter for retracker 2: packed units (1e-2 dB)
Data_20Hz['Sig0_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Sig0_2'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Sigma Zero Backscatter for retracker 3: packed units (1e-2 dB)
Data_20Hz['Sig0_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Sig0_3'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Freeboard: packed units (mm, 1e-3 m)
#-- -9999 default value indicates computation has not been performed
Data_20Hz['Freeboard'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Freeboard'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolated Sea Surface Height Anomaly: packed units (mm, 1e-3 m)
Data_20Hz['SSHA_interp'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolated Sea Surface Height measurement count
Data_20Hz['SSHA_interp_count'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp_count'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Interpolation quality estimate RSS: packed units (mm, 1e-3 m)
Data_20Hz['SSHA_interp_RMS'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['SSHA_interp_RMS'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Peakiness: packed units (1e-2)
Data_20Hz['Peakiness'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
Data_20Hz['Peakiness'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Number of averaged echoes or beams
Data_20Hz['N_avg'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['N_avg'].mask = np.ones((n_records,n_blocks),dtype=bool)
Data_20Hz['Spare1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int16)
Data_20Hz['Spare1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality flags
Data_20Hz['Quality_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
Data_20Hz['Quality_flag'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Corrections Application Flag
Data_20Hz['Corrections_flag'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
Data_20Hz['Corrections_flag'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality metric for retracker 1
Data_20Hz['Quality_1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Quality_1'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality metric for retracker 2
Data_20Hz['Quality_2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Quality_2'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- Quality metric for retracker 3
Data_20Hz['Quality_3'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Quality_3'].mask = np.ones((n_records,n_blocks),dtype=bool)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Location Group for record r
Data_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Second'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Siral_mode'][r] = np.fromfile(fid,dtype='>u8',count=1)
Data_1Hz['Lat_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Lon_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Alt_1Hz'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Roll'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Pitch'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Yaw'][r] = np.fromfile(fid,dtype='>i4',count=1)
Data_1Hz['Spare'][r] = np.fromfile(fid,dtype='>i2',count=1)
Data_1Hz['N_valid'][r] = np.fromfile(fid,dtype='>i2',count=1)
#-- CryoSat-2 External Corrections Group for record r
Corrections['dryTrop'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['wetTrop'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['InvBar'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['DAC'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Iono'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['SSB'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['ocTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['lpeTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['olTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['seTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['gpTideElv'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare1'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Surf_type'][r] = np.fromfile(fid,dtype='>u8',count=1)
Corrections['MSS_Geoid'][r] = np.fromfile(fid,dtype='>i4',count=1)
Corrections['ODLE'][r] = np.fromfile(fid,dtype='>i4',count=1)
Corrections['Ice_conc'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Snow_depth'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Snow_density'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare2'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['C_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Corrections['SWH'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Wind_speed'][r] = np.fromfile(fid,dtype='>u2',count=1)
Corrections['Spare3'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare4'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare5'][r] = np.fromfile(fid,dtype='>i2',count=1)
Corrections['Spare6'][r] = np.fromfile(fid,dtype='>i2',count=1)
#-- CryoSat-2 Measurements Group for record r and block b
for b in range(n_blocks):
Data_20Hz['D_time_mics'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Lat'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Lon'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Elev_1'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Elev_2'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Elev_3'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Sig0_1'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Sig0_2'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Sig0_3'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Freeboard'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['SSHA_interp'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['SSHA_interp_count'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['SSHA_interp_RMS'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Peakiness'].data[r,b] = np.fromfile(fid,dtype='>u2',count=1)
Data_20Hz['N_avg'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Spare1'].data[r,b] = np.fromfile(fid,dtype='>i2',count=1)
Data_20Hz['Quality_flag'].data[r,b] = np.fromfile(fid,dtype='>u4',count=1)
Data_20Hz['Corrections_flag'].data[r,b] = np.fromfile(fid,dtype='>u4',count=1)
Data_20Hz['Quality_1'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Quality_2'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Quality_3'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
#-- Set CryoSat-2 Measurements Group Masks for record r
Data_20Hz['D_time_mics'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Lat'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Lon'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Elev_1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Elev_2'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Elev_3'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Sig0_1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Sig0_2'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Sig0_3'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Freeboard'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp_count'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['SSHA_interp_RMS'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Peakiness'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['N_avg'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Spare1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_flag'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Corrections_flag'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_1'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_2'].mask[r,:Data_1Hz['N_valid'][r]] = False
Data_20Hz['Quality_3'].mask[r,:Data_1Hz['N_valid'][r]] = False
#-- Bind all the bits of the l2_mds together into a single dictionary
CS_l2_mds = {}
CS_l2_mds['Data_1Hz'] = Data_1Hz
CS_l2_mds['Corrections'] = Corrections
CS_l2_mds['Data_20Hz'] = Data_20Hz
#-- return the output dictionary
return CS_l2_mds
#-- PURPOSE: Initiate L2 MDS variables for CryoSat Baseline D (netCDF4)
def cryosat_baseline_D(full_filename, UNPACK=False):
#-- open netCDF4 file for reading
fid = netCDF4.Dataset(os.path.expanduser(full_filename),'r')
#-- use original unscaled units unless UNPACK=True
fid.set_auto_scale(UNPACK)
#-- get dimensions
time_cor_01 = fid.variables['time_cor_01'][:].copy()
time_20_ku = fid.variables['time_20_ku'][:].copy()
n_records, = time_cor_01.shape
n_blocks = 20
#-- CryoSat-2 1 Hz data fields (Location Group)
#-- Time and Orbit Parameters plus Measurement Mode
Data_1Hz = {}
#-- Time (seconds since 2000-01-01)
Data_1Hz['Time'] = time_cor_01.copy()
#-- Time: day part
Data_1Hz['Day'] = np.array(time_cor_01/86400.0,dtype=np.int32)
#-- Time: second part
Data_1Hz['Second'] = np.array(time_cor_01-Data_1Hz['Day'][:]*86400.0,dtype=np.int32)
#-- Time: microsecond part
Data_1Hz['Micsec'] = np.array((time_cor_01-Data_1Hz['Day'][:]*86400.0-
Data_1Hz['Second'][:])*1e6,dtype=np.int32)
#-- Lat_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lat_1Hz'] = fid.variables['lat_01'][:].copy()
#-- Lon_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Lon_1Hz'] = fid.variables['lon_01'][:].copy()
#-- Alt_1Hz: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Data_1Hz['Alt_1Hz'] = fid.variables['alt_01'][:].copy()
#-- Roll: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Roll'] = fid.variables['off_nadir_roll_angle_str_01'][:].copy()
#-- Pitch: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Pitch'] = fid.variables['off_nadir_pitch_angle_str_01'][:].copy()
#-- Yaw: packed units (0.1 micro-degree, 1e-7 degrees)
Data_1Hz['Yaw'] = fid.variables['off_nadir_yaw_angle_str_01'][:].copy()
#-- Number of valid records in the block of twenty that contain data
#-- Last few records of the last block of a dataset may be blank blocks
#-- inserted to bring the file up to a multiple of twenty.
Data_1Hz['N_valid'] = fid.variables['num_valid_01'][:].copy()
#-- add absolute orbit number to 1Hz data
Data_1Hz['Abs_Orbit'] = np.zeros((n_records),dtype=np.uint32)
Data_1Hz['Abs_Orbit'][:] = np.uint32(fid.abs_orbit_number)
#-- add ascending/descending flag to 1Hz data (A=ascending,D=descending)
Data_1Hz['Ascending_flag'] = np.zeros((n_records),dtype=bool)
Data_1Hz['Ascending_flag'][:] = (fid.ascending_flag == 'A')
#-- CryoSat-2 geophysical corrections (External Corrections Group)
Corrections = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['dryTrop'] = fid.variables['mod_dry_tropo_cor_01'][:].copy()
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Corrections['wetTrop'] = fid.variables['mod_wet_tropo_cor_01'][:].copy()
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Corrections['InvBar'] = fid.variables['inv_bar_cor_01'][:].copy()
#-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m)
Corrections['DAC'] = fid.variables['hf_fluct_total_cor_01'][:].copy()
#-- Ionospheric Correction packed units (mm, 1e-3 m)
Corrections['Iono'] = fid.variables['iono_cor_01'][:].copy()
Corrections['Iono_GIM'] = fid.variables['iono_cor_gim_01'][:].copy()
#-- Sea State Bias Correction packed units (mm, 1e-3 m)
Corrections['SSB'] = fid.variables['sea_state_bias_01_ku'][:].copy()
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Corrections['ocTideElv'] = fid.variables['ocean_tide_01'][:].copy()
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Corrections['lpeTideElv'] = fid.variables['ocean_tide_eq_01'][:].copy()
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Corrections['olTideElv'] = fid.variables['load_tide_01'][:].copy()
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Corrections['seTideElv'] = fid.variables['solid_earth_tide_01'][:].copy()
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Corrections['gpTideElv'] = fid.variables['pole_tide_01'][:].copy()
#-- Mean Sea Surface and Geoid packed units (mm, 1e-3 m)
Corrections['Geoid'] = fid.variables['geoid_01'][:].copy()
Corrections['MSS'] = fid.variables['mean_sea_surf_sea_ice_01'][:].copy()
#-- Ocean Depth/Land Elevation Model (ODLE) packed units (mm, 1e-3 m)
Corrections['ODLE'] = fid.variables['odle_01'][:].copy()
#-- Ice Concentration packed units (%/100)
Corrections['Ice_conc'] = fid.variables['sea_ice_concentration_01'][:].copy()
#-- Snow Depth packed units (mm, 1e-3 m)
Corrections['Snow_depth'] = fid.variables['snow_depth_01'][:].copy()
#-- Snow Density packed units (kg/m^3)
Corrections['Snow_density'] = fid.variables['snow_density_01'][:].copy()
#-- Corrections Status Flag
Corrections['C_status'] = fid.variables['flag_cor_err_01'][:].copy()
#-- Significant Wave Height (SWH) packed units (mm, 1e-3)
Corrections['SWH'] = fid.variables['swh_ocean_01_ku'][:].copy()
#-- Wind Speed packed units (mm/s, 1e-3 m/s)
Corrections['Wind_speed'] = fid.variables['wind_speed_alt_01_ku'][:].copy()
#-- CryoSat-2 20 Hz data fields (Measurement Group)
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Time (seconds since 2000-01-01)
Data_20Hz['Time'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Time'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
#-- Delta between the timestamps for 20Hz record and the 1Hz record
#-- D_time_mics packed units (microseconds)
Data_20Hz['D_time_mics'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['D_time_mics'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
#-- Lat: packed units
Data_20Hz['Lat'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Lat'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
lat_poca_20_ku = fid.variables['lat_poca_20_ku'][:].copy()
#-- Lon: packed units
Data_20Hz['Lon'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Lon'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
lon_poca_20_ku = fid.variables['lon_poca_20_ku'][:].copy()
#-- Measured elevation above ellipsoid from retracker 1
Data_20Hz['Elev_1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Elev_1'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
height_1_20_ku = fid.variables['height_1_20_ku'][:].copy()
#-- Measured elevation above ellipsoid from retracker 2
Data_20Hz['Elev_2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Elev_2'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
height_2_20_ku = fid.variables['height_2_20_ku'][:].copy()
#-- Measured elevation above ellipsoid from retracker 3
Data_20Hz['Elev_3'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Elev_3'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
height_3_20_ku = fid.variables['height_3_20_ku'][:].copy()
#-- Sigma Zero Backscatter for retracker 1
Data_20Hz['Sig0_1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Sig0_1'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
sig0_1_20_ku = fid.variables['sig0_1_20_ku'][:].copy()
#-- Sigma Zero Backscatter for retracker 2
Data_20Hz['Sig0_2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Sig0_2'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
sig0_2_20_ku = fid.variables['sig0_2_20_ku'][:].copy()
#-- Sigma Zero Backscatter for retracker 3
Data_20Hz['Sig0_3'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Sig0_3'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
sig0_3_20_ku = fid.variables['sig0_3_20_ku'][:].copy()
#-- Measured range from the satellite CoM to the surface from retracker 1
Data_20Hz['Range_1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Range_1'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
range_1_20_ku = fid.variables['range_1_20_ku'][:].copy()
#-- Measured range from the satellite CoM to the surface from retracker 2
Data_20Hz['Range_2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Range_2'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
range_2_20_ku = fid.variables['range_2_20_ku'][:].copy()
#-- Measured range from the satellite CoM to the surface from retracker 3
Data_20Hz['Range_3'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Range_3'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
range_3_20_ku = fid.variables['range_3_20_ku'][:].copy()
#-- Freeboard
Data_20Hz['Freeboard'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Freeboard'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
freeboard_20_ku = fid.variables['freeboard_20_ku'][:].copy()
#-- Sea ice Floe height
Data_20Hz['Sea_Ice_Lead'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Sea_Ice_Lead'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
height_sea_ice_floe_20_ku = fid.variables['height_sea_ice_floe_20_ku'][:].copy()
#-- Sea ice lead height
Data_20Hz['Sea_Ice_Floe'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Sea_Ice_Floe'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
height_sea_ice_lead_20_ku = fid.variables['height_sea_ice_lead_20_ku'][:].copy()
#-- Interpolated Sea Surface Height Anomaly
Data_20Hz['SSHA_interp'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['SSHA_interp'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
ssha_interp_20_ku = fid.variables['ssha_interp_20_ku'][:].copy()
#-- Interpolated Sea Surface Height measurement count
Data_20Hz['SSHA_interp_count'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['SSHA_interp_count'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
ssha_interp_numval_20_ku = fid.variables['ssha_interp_numval_20_ku'][:].copy()
#-- Interpolation quality estimate RSS
Data_20Hz['SSHA_interp_RMS'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['SSHA_interp_RMS'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
ssha_interp_rms_20_ku = fid.variables['ssha_interp_rms_20_ku'][:].copy()
#-- Peakiness
Data_20Hz['Peakiness'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Peakiness'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
peakiness_20_ku = fid.variables['peakiness_20_ku'][:].copy()
#-- Number of averaged echoes or beams
Data_20Hz['N_avg'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['N_avg'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
echo_avg_numval_20_ku = fid.variables['echo_avg_numval_20_ku'][:].copy()
#-- Quality flags
Data_20Hz['Quality_flag'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Quality_flag'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
flag_prod_status_20_ku = fid.variables['flag_prod_status_20_ku'][:].copy()
#-- Corrections Application Flag
Data_20Hz['Corrections_flag'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Corrections_flag'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
flag_cor_applied_20_ku = fid.variables['flag_cor_applied_20_ku'][:].copy()
#-- Measurement mode
Data_20Hz['Measurement_Mode'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Measurement_Mode'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
flag_instr_mode_op_20_ku = fid.variables['flag_instr_mode_op_20_ku'][:].copy()
#-- Surface Type
Data_20Hz['Surf_type'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Surf_type'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
surf_type_20_ku = fid.variables['surf_type_20_ku'][:].copy()
#-- Quality metric for retracker 1
Data_20Hz['Quality_1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Quality_1'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
retracker_1_quality_20_ku = fid.variables['retracker_1_quality_20_ku'][:].copy()
#-- Quality metric for retracker 2
Data_20Hz['Quality_2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Quality_2'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
retracker_2_quality_20_ku = fid.variables['retracker_2_quality_20_ku'][:].copy()
#-- Quality metric for retracker 3
Data_20Hz['Quality_3'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Quality_3'].mask = np.ma.ones((n_records,n_blocks),dtype=bool)
retracker_3_quality_20_ku = fid.variables['retracker_3_quality_20_ku'][:].copy()
#-- remap ind_first_meas_20hz_01 for the CryoSat file
#-- GDR data can have indices pointing outside the file
#-- use the 1Hz mapping variable ind_meas_1hz_20_ku to recalculate the index
ind_meas_1hz_20_ku = fid.variables['ind_meas_1hz_20_ku'][:].copy()
ind_first_meas_20hz_01 = np.zeros((n_records),dtype=np.int64)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- GDR data are incorrectly mapped between the 20Hz and 1Hz variables
ind_first_meas_20hz_01[r] = (ind_meas_1hz_20_ku == r).argmax()
#-- index for record r
# idx = fid.variables['ind_first_meas_20hz_01'][r].copy()
idx = ind_first_meas_20hz_01[r].copy()
#-- number of valid blocks in record r
cnt = np.copy(fid.variables['num_valid_01'][r])
#-- CryoSat-2 Measurements Group for record r
Data_20Hz['Time'].data[r,:cnt] = time_20_ku[idx:idx+cnt].copy()
Data_20Hz['Time'].mask[r,:cnt] = False
Data_20Hz['D_time_mics'].data[r,:cnt] = 1e6*(time_20_ku[idx:idx+cnt] - time_cor_01[r])
Data_20Hz['D_time_mics'].mask[r,:cnt] = False
Data_20Hz['Lat'].data[r,:cnt] = lat_poca_20_ku[idx:idx+cnt].copy()
Data_20Hz['Lat'].mask[r,:cnt] = False
Data_20Hz['Lon'].data[r,:cnt] = lon_poca_20_ku[idx:idx+cnt].copy()
Data_20Hz['Lon'].mask[r,:cnt] = False
Data_20Hz['Elev_1'].data[r,:cnt] = height_1_20_ku[idx:idx+cnt].copy()
Data_20Hz['Elev_1'].mask[r,:cnt] = False
Data_20Hz['Elev_2'].data[r,:cnt] = height_2_20_ku[idx:idx+cnt].copy()
Data_20Hz['Elev_2'].mask[r,:cnt] = False
Data_20Hz['Elev_3'].data[r,:cnt] = height_3_20_ku[idx:idx+cnt].copy()
Data_20Hz['Elev_3'].mask[r,:cnt] = False
Data_20Hz['Sig0_1'].data[r,:cnt] = sig0_1_20_ku[idx:idx+cnt].copy()
Data_20Hz['Sig0_1'].mask[r,:cnt] = False
Data_20Hz['Sig0_2'].data[r,:cnt] = sig0_2_20_ku[idx:idx+cnt].copy()
Data_20Hz['Sig0_2'].mask[r,:cnt] = False
Data_20Hz['Sig0_3'].data[r,:cnt] = sig0_3_20_ku[idx:idx+cnt].copy()
Data_20Hz['Sig0_3'].mask[r,:cnt] = False
Data_20Hz['Range_1'].data[r,:cnt] = range_1_20_ku[idx:idx+cnt].copy()
Data_20Hz['Range_1'].mask[r,:cnt] = False
Data_20Hz['Range_2'].data[r,:cnt] = range_2_20_ku[idx:idx+cnt].copy()
Data_20Hz['Range_2'].mask[r,:cnt] = False
Data_20Hz['Range_3'].data[r,:cnt] = range_3_20_ku[idx:idx+cnt].copy()
Data_20Hz['Range_3'].mask[r,:cnt] = False
Data_20Hz['Freeboard'].data[r,:cnt] = freeboard_20_ku[idx:idx+cnt].copy()
Data_20Hz['Freeboard'].mask[r,:cnt] = False
Data_20Hz['Sea_Ice_Floe'].data[r,:cnt] = height_sea_ice_floe_20_ku[idx:idx+cnt].copy()
Data_20Hz['Sea_Ice_Floe'].mask[r,:cnt] = False
Data_20Hz['Sea_Ice_Lead'].data[r,:cnt] = height_sea_ice_lead_20_ku[idx:idx+cnt].copy()
Data_20Hz['Sea_Ice_Lead'].mask[r,:cnt] = False
Data_20Hz['SSHA_interp'].data[r,:cnt] = ssha_interp_20_ku[idx:idx+cnt].copy()
Data_20Hz['SSHA_interp'].mask[r,:cnt] = False
Data_20Hz['SSHA_interp_count'].data[r,:cnt] = ssha_interp_numval_20_ku[idx:idx+cnt].copy()
Data_20Hz['SSHA_interp_count'].mask[r,:cnt] = False
Data_20Hz['SSHA_interp_RMS'].data[r,:cnt] = ssha_interp_rms_20_ku[idx:idx+cnt].copy()
Data_20Hz['SSHA_interp_RMS'].mask[r,:cnt] = False
Data_20Hz['Peakiness'].data[r,:cnt] = peakiness_20_ku[idx:idx+cnt].copy()
Data_20Hz['Peakiness'].mask[r,:cnt] = False
Data_20Hz['N_avg'].data[r,:cnt] = echo_avg_numval_20_ku[idx:idx+cnt].copy()
Data_20Hz['N_avg'].mask[r,:cnt] = False
Data_20Hz['Quality_flag'].data[r,:cnt] = flag_prod_status_20_ku[idx:idx+cnt].copy()
Data_20Hz['Quality_flag'].mask[r,:cnt] = False
Data_20Hz['Corrections_flag'].data[r,:cnt] = flag_cor_applied_20_ku[idx:idx+cnt].copy()
Data_20Hz['Corrections_flag'].mask[r,:cnt] = False
Data_20Hz['Measurement_Mode'].data[r,:cnt] = flag_instr_mode_op_20_ku[idx:idx+cnt].copy()
Data_20Hz['Measurement_Mode'].mask[r,:cnt] = False
Data_20Hz['Surf_type'].data[r,:cnt] = surf_type_20_ku[idx:idx+cnt].copy()
Data_20Hz['Surf_type'].mask[r,:cnt] = False
Data_20Hz['Quality_1'].data[r,:cnt] = retracker_1_quality_20_ku[idx:idx+cnt].copy()
Data_20Hz['Quality_1'].mask[r,:cnt] = False
Data_20Hz['Quality_2'].data[r,:cnt] = retracker_2_quality_20_ku[idx:idx+cnt].copy()
Data_20Hz['Quality_2'].mask[r,:cnt] = False
Data_20Hz['Quality_3'].data[r,:cnt] = retracker_3_quality_20_ku[idx:idx+cnt].copy()
Data_20Hz['Quality_3'].mask[r,:cnt] = False
#-- Bind all the variables of the l2_mds together into a single dictionary
CS_l2_mds = {}
CS_l2_mds['Data_1Hz'] = Data_1Hz
CS_l2_mds['Corrections'] = Corrections
CS_l2_mds['Data_20Hz'] = Data_20Hz
#-- extract global attributes and assign as MPH and SPH metadata
CS_l2_mds['METADATA'] = dict(MPH={},SPH={},DSD={})
#-- MPH attributes
CS_l2_mds['METADATA']['MPH']['PRODUCT'] = fid.product_name
CS_l2_mds['METADATA']['MPH']['DOI'] = fid.doi
CS_l2_mds['METADATA']['MPH']['PROC_STAGE'] = fid.processing_stage
CS_l2_mds['METADATA']['MPH']['REF_DOC'] = fid.reference_document
CS_l2_mds['METADATA']['MPH']['ACQUISITION_STATION'] = fid.acquisition_station
CS_l2_mds['METADATA']['MPH']['PROC_CENTER'] = fid.processing_centre
CS_l2_mds['METADATA']['MPH']['PROC_TIME'] = fid.creation_time
CS_l2_mds['METADATA']['MPH']['SOFTWARE_VER'] = fid.software_version
CS_l2_mds['METADATA']['MPH']['SENSING_START'] = fid.sensing_start
CS_l2_mds['METADATA']['MPH']['SENSING_STOP'] = fid.sensing_stop
CS_l2_mds['METADATA']['MPH']['PHASE'] = fid.phase
CS_l2_mds['METADATA']['MPH']['CYCLE'] = fid.cycle_number
CS_l2_mds['METADATA']['MPH']['REL_ORBIT'] = fid.rel_orbit_number
CS_l2_mds['METADATA']['MPH']['ABS_ORBIT'] = fid.abs_orbit_number
CS_l2_mds['METADATA']['MPH']['STATE_VECTOR_TIME'] = fid.state_vector_time
CS_l2_mds['METADATA']['MPH']['DELTA_UT1'] = fid.delta_ut1
CS_l2_mds['METADATA']['MPH']['X_POSITION'] = fid.x_position
CS_l2_mds['METADATA']['MPH']['Y_POSITION'] = fid.y_position
CS_l2_mds['METADATA']['MPH']['Z_POSITION'] = fid.z_position
CS_l2_mds['METADATA']['MPH']['X_VELOCITY'] = fid.x_velocity
CS_l2_mds['METADATA']['MPH']['Y_VELOCITY'] = fid.y_velocity
CS_l2_mds['METADATA']['MPH']['Z_VELOCITY'] = fid.z_velocity
CS_l2_mds['METADATA']['MPH']['VECTOR_SOURCE'] = fid.vector_source
CS_l2_mds['METADATA']['MPH']['LEAP_UTC'] = fid.leap_utc
CS_l2_mds['METADATA']['MPH']['LEAP_SIGN'] = fid.leap_sign
CS_l2_mds['METADATA']['MPH']['LEAP_ERR'] = fid.leap_err
CS_l2_mds['METADATA']['MPH']['PRODUCT_ERR'] = fid.product_err
#-- SPH attributes
CS_l2_mds['METADATA']['SPH']['START_RECORD_TAI_TIME'] = fid.first_record_time
CS_l2_mds['METADATA']['SPH']['STOP_RECORD_TAI_TIME'] = fid.last_record_time
CS_l2_mds['METADATA']['SPH']['ABS_ORBIT_START'] = fid.abs_orbit_start
CS_l2_mds['METADATA']['SPH']['REL_TIME_ASC_NODE_START'] = fid.rel_time_acs_node_start
CS_l2_mds['METADATA']['SPH']['ABS_ORBIT_STOP'] = fid.abs_orbit_stop
CS_l2_mds['METADATA']['SPH']['REL_TIME_ASC_NODE_STOP'] = fid.rel_time_acs_node_stop
CS_l2_mds['METADATA']['SPH']['EQUATOR_CROSS_TIME_UTC'] = fid.equator_cross_time
CS_l2_mds['METADATA']['SPH']['EQUATOR_CROSS_LONG'] = fid.equator_cross_long
CS_l2_mds['METADATA']['SPH']['ASCENDING_FLAG'] = fid.ascending_flag
CS_l2_mds['METADATA']['SPH']['START_LAT'] = fid.first_record_lat
CS_l2_mds['METADATA']['SPH']['START_LONG'] = fid.first_record_lon
CS_l2_mds['METADATA']['SPH']['STOP_LAT'] = fid.last_record_lat
CS_l2_mds['METADATA']['SPH']['STOP_LONG'] = fid.last_record_lon
CS_l2_mds['METADATA']['SPH']['L1_PROC_FLAG'] = fid.l1b_proc_flag
CS_l2_mds['METADATA']['SPH']['L1_PROCESSING_QUALITY'] = fid.l1b_processing_quality
CS_l2_mds['METADATA']['SPH']['L1_PROC_THRESH'] = fid.l1b_proc_thresh
CS_l2_mds['METADATA']['SPH']['INSTR_ID'] = fid.instr_id
CS_l2_mds['METADATA']['SPH']['LRM_MODE_PERCENT'] = fid.lrm_mode_percent
CS_l2_mds['METADATA']['SPH']['SAR_MODE_PERCENT'] = fid.sar_mode_percent
CS_l2_mds['METADATA']['SPH']['SARIN_MODE_PERCENT'] = fid.sarin_mode_percent
CS_l2_mds['METADATA']['SPH']['OPEN_OCEAN_PERCENT'] = fid.open_ocean_percent
CS_l2_mds['METADATA']['SPH']['CLOSE_SEA_PERCENT'] = fid.close_sea_percent
CS_l2_mds['METADATA']['SPH']['CONTINENT_ICE_PERCENT'] = fid.continent_ice_percent
CS_l2_mds['METADATA']['SPH']['LAND_PERCENT'] = fid.land_percent
CS_l2_mds['METADATA']['SPH']['L2_PROD_STATUS'] = fid.l2_prod_status
CS_l2_mds['METADATA']['SPH']['L2_PROC_FLAG'] = fid.l2_proc_flag
CS_l2_mds['METADATA']['SPH']['L2_PROCESSING_QUALITY'] = fid.l2_processing_quality
CS_l2_mds['METADATA']['SPH']['L2_PROC_THRESH'] = fid.l2_proc_thresh
CS_l2_mds['METADATA']['SPH']['SIR_CONFIGURATION'] = fid.sir_configuration
CS_l2_mds['METADATA']['SPH']['SIR_OP_MODE'] = fid.sir_op_mode
CS_l2_mds['METADATA']['SPH']['ORBIT_FILE'] = fid.xref_orbit
CS_l2_mds['METADATA']['SPH']['PROC_CONFIG_PARAMS_FILE'] = fid.xref_pconf
CS_l2_mds['METADATA']['SPH']['CONSTANTS_FILE'] = fid.xref_constants
CS_l2_mds['METADATA']['SPH']['IPF_RA_DATABASE_FILE'] = fid.xref_siral_characterisation
CS_l2_mds['METADATA']['SPH']['DORIS_USO_DRIFT_FILE'] = fid.xref_uso
CS_l2_mds['METADATA']['SPH']['STAR_TRACKER_ATTREF_FILE'] = fid.xref_star_tracker_attref
CS_l2_mds['METADATA']['SPH']['SIRAL_LEVEL_0_FILE'] = fid.xref_siral_l0
CS_l2_mds['METADATA']['SPH']['CALIBRATION_TYPE_1_FILE'] = fid.xref_cal1
CS_l2_mds['METADATA']['SPH']['SIR_COMPLEX_CAL1_SARIN'] = fid.xref_cal1_sarin
CS_l2_mds['METADATA']['SPH']['SCENARIO_FILE'] = fid.xref_orbit_scenario
CS_l2_mds['METADATA']['SPH']['CALIBRATION_TYPE_2_FILE'] = fid.xref_cal2
CS_l2_mds['METADATA']['SPH']['SURFACE_PRESSURE_FILE'] = fid.xref_surf_pressure
CS_l2_mds['METADATA']['SPH']['MEAN_PRESSURE_FILE'] = fid.xref_mean_pressure
CS_l2_mds['METADATA']['SPH']['WET_TROPOSPHERE_FILE'] = fid.xref_wet_trop
CS_l2_mds['METADATA']['SPH']['U_WIND_FILE'] = fid.xref_u_wind
CS_l2_mds['METADATA']['SPH']['V_WIND_FILE'] = fid.xref_v_wind
CS_l2_mds['METADATA']['SPH']['METEO_GRID_DEF_FILE'] = fid.xref_meteo
CS_l2_mds['METADATA']['SPH']['S1S2_PRESSURE_00H_MAP'] = fid.xref_s1s2_pressure_00h
CS_l2_mds['METADATA']['SPH']['S1S2_PRESSURE_06H_MAP'] = fid.xref_s1s2_pressure_06h
CS_l2_mds['METADATA']['SPH']['S1S2_PRESSURE_12H_MAP'] = fid.xref_s1s2_pressure_12h
CS_l2_mds['METADATA']['SPH']['S1S2_PRESSURE_18H_MAP'] = fid.xref_s1s2_pressure_18h
CS_l2_mds['METADATA']['SPH']['S1_TIDE_AMPLITUDE_MAP'] = fid.xref_s1_tide_amplitude
CS_l2_mds['METADATA']['SPH']['S1_TIDE_PHASE_MAP'] = fid.xref_s1_tide_phase
CS_l2_mds['METADATA']['SPH']['S2_TIDE_AMPLITUDE_MAP'] = fid.xref_s2_tide_amplitude
CS_l2_mds['METADATA']['SPH']['S2_TIDE_PHASE_MAP'] = fid.xref_s2_tide_phase
CS_l2_mds['METADATA']['SPH']['GPS_IONO_MAP'] = fid.xref_gim
CS_l2_mds['METADATA']['SPH']['MODIFIED_DIP_MAP_FILE'] = fid.xref_dip_map
CS_l2_mds['METADATA']['SPH']['IONO_COEFFICENTS_FILE'] = fid.xref_iono_cor
CS_l2_mds['METADATA']['SPH']['SAI_FILE'] = fid.xref_sai
CS_l2_mds['METADATA']['SPH']['OCEAN_TIDE_FILE'] = fid.xref_ocean_tide
CS_l2_mds['METADATA']['SPH']['TIDAL_LOADING_FILE'] = fid.xref_tidal_load
CS_l2_mds['METADATA']['SPH']['EARTH_TIDE_FILE'] = fid.xref_earth_tide
CS_l2_mds['METADATA']['SPH']['POLE_TIDE_FILE'] = fid.xref_pole_location
CS_l2_mds['METADATA']['SPH']['SURFACE_TYPE_FILE'] = fid.xref_surf_type
CS_l2_mds['METADATA']['SPH']['AUX_MOG2D'] = fid.xref_mog2d
CS_l2_mds['METADATA']['SPH']['SIRAL_LEVEL_1B_FILE'] = fid.xref_siral_l1b
CS_l2_mds['METADATA']['SPH']['MEAN_SEA_SURFACE_FILE'] = fid.xref_mss
CS_l2_mds['METADATA']['SPH']['GEOID_FILE'] = fid.xref_geoid
CS_l2_mds['METADATA']['SPH']['ODLE_FILE'] = fid.xref_odle
#-- mode dependent attributes
if ('xref_dem' in fid.ncattrs()):
CS_l2_mds['METADATA']['SPH']['DEM_MODEL_FILE'] = fid.xref_dem
if ('xref_sea_ice' in fid.ncattrs()):
CS_l2_mds['METADATA']['SPH']['SEA_ICE_FILE'] = fid.xref_sea_ice
if ('xref_snow_depth' in fid.ncattrs()):
CS_l2_mds['METADATA']['SPH']['SNOW_DEPTH_FILE'] = fid.xref_snow_depth
#-- close the netCDF4 file
fid.close()
#-- return the output dictionary
return CS_l2_mds
#-- PURPOSE: Get scaling factors for converting unpacked units in binary files
def cryosat_scaling_factors():
#-- dictionary of scale factors for CryoSat-2 variables
CS_l2_scale = {}
#-- CryoSat-2 1 Hz data fields (Location Group)
#-- Time and Orbit Parameters plus Measurement Mode
CS_l2_scale['Data_1Hz'] = {}
#-- Time: day part
CS_l2_scale['Data_1Hz']['Day'] = 1.0
#-- Time: second part
CS_l2_scale['Data_1Hz']['Second'] = 1.0
#-- Time: microsecond part
CS_l2_scale['Data_1Hz']['Micsec'] = 1.0
#-- SIRAL mode
CS_l2_scale['Data_1Hz']['Siral_mode'] = 1
#-- Lat_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l2_scale['Data_1Hz']['Lat_1Hz'] = 1e-7
#-- Lon_1Hz: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l2_scale['Data_1Hz']['Lon_1Hz'] = 1e-7
#-- Alt_1Hz: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
CS_l2_scale['Data_1Hz']['Alt_1Hz'] = 1e-3
#-- Roll: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l2_scale['Data_1Hz']['Roll'] = 1e-7
#-- Pitch: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l2_scale['Data_1Hz']['Pitch'] = 1e-7
#-- Yaw: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l2_scale['Data_1Hz']['Yaw'] = 1e-7
CS_l2_scale['Data_1Hz']['Spare'] = 1
#-- Number of valid records in the block of twenty that contain data
#-- Last few records of the last block of a dataset may be blank blocks
#-- inserted to bring the file up to a multiple of twenty.
CS_l2_scale['Data_1Hz']['N_valid'] = 1
#-- add absolute orbit number to 1Hz data
CS_l2_scale['Data_1Hz']['Abs_Orbit'] = 1
#-- CryoSat-2 geophysical corrections (External corrections Group)
CS_l2_scale['Corrections'] = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['dryTrop'] = 1e-3
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['wetTrop'] = 1e-3
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['InvBar'] = 1e-3
#-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['DAC'] = 1e-3
#-- Ionospheric Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['Iono'] = 1e-3
#-- Sea State Bias Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['SSB'] = 1e-3
#-- Ocean tide Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['ocTideElv'] = 1e-3
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['lpeTideElv'] = 1e-3
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['olTideElv'] = 1e-3
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['seTideElv'] = 1e-3
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['gpTideElv'] = 1e-3
CS_l2_scale['Corrections']['Spare1'] = 1
#-- Surface Type: Packed in groups of three bits for each of the 20 records
CS_l2_scale['Corrections']['Surf_type'] = 1
#-- Mean Sea Surface or Geoid packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['MSS_Geoid'] = 1e-3
#-- Ocean Depth/Land Elevation Model (ODLE) packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['ODLE'] = 1e-3
#-- Ice Concentration packed units (%/100)
CS_l2_scale['Corrections']['Ice_conc'] = 1e-2
#-- Snow Depth packed units (mm, 1e-3 m)
CS_l2_scale['Corrections']['Snow_depth'] = 1e-3
#-- Snow Density packed units (kg/m^3)
CS_l2_scale['Corrections']['Snow_density'] = 1
CS_l2_scale['Corrections']['Spare2'] = 1
#-- Corrections Status Flag
CS_l2_scale['Corrections']['C_status'] = 1
#-- Significant Wave Height (SWH) packed units (mm, 1e-3)
CS_l2_scale['Corrections']['SWH'] = 1e-3
#-- Wind Speed packed units (mm/s, 1e-3 m/s)
CS_l2_scale['Corrections']['Wind_speed'] = 1e-3
CS_l2_scale['Corrections']['Spare3'] = 1
CS_l2_scale['Corrections']['Spare4'] = 1
CS_l2_scale['Corrections']['Spare5'] = 1
CS_l2_scale['Corrections']['Spare6'] = 1
#-- CryoSat-2 20 Hz data fields (Measurement Group)
#-- Derived from instrument measurement parameters
n_blocks = 20
CS_l2_scale['Data_20Hz'] = {}
#-- Delta between the timestamps for 20Hz record and the 1Hz record
#-- D_time_mics packed units (microseconds)
CS_l2_scale['Data_20Hz']['D_time_mics'] = 1.0
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l2_scale['Data_20Hz']['Lat'] = 1e-7
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l2_scale['Data_20Hz']['Lon'] = 1e-7
#-- Measured elevation above ellipsoid from retracker 1: packed units (mm, 1e-3 m)
CS_l2_scale['Data_20Hz']['Elev_1'] = 1e-3
#-- Measured elevation above ellipsoid from retracker 2: packed units (mm, 1e-3 m)
CS_l2_scale['Data_20Hz']['Elev_2'] = 1e-3
#-- Measured elevation above ellipsoid from retracker 3: packed units (mm, 1e-3 m)
CS_l2_scale['Data_20Hz']['Elev_3'] = 1e-3
#-- Sigma Zero Backscatter for retracker 1: packed units (1e-2 dB)
CS_l2_scale['Data_20Hz']['Sig0_1'] = 1e-2
#-- Sigma Zero Backscatter for retracker 2: packed units (1e-2 dB)
CS_l2_scale['Data_20Hz']['Sig0_2'] = 1e-2
#-- Sigma Zero Backscatter for retracker 3: packed units (1e-2 dB)
CS_l2_scale['Data_20Hz']['Sig0_3'] = 1e-2
#-- Freeboard: packed units (mm, 1e-3 m)
#-- -9999 default value indicates computation has not been performed
CS_l2_scale['Data_20Hz']['Freeboard'] = 1e-3
#-- Interpolated Sea Surface Height Anomaly: packed units (mm, 1e-3 m)
CS_l2_scale['Data_20Hz']['SSHA_interp'] = 1e-3
#-- Interpolated Sea Surface Height measurement count
CS_l2_scale['Data_20Hz']['SSHA_interp_count'] = 1
#-- Interpolation quality estimate RSS: packed units (mm, 1e-3 m)
CS_l2_scale['Data_20Hz']['SSHA_interp_RMS'] = 1e-3
#-- Peakiness: packed units (1e-2)
CS_l2_scale['Data_20Hz']['Peakiness'] = 1e-2
#-- Number of averaged echoes or beams
CS_l2_scale['Data_20Hz']['N_avg'] = 1
CS_l2_scale['Data_20Hz']['Spare1'] = 1
#-- Quality flags
CS_l2_scale['Data_20Hz']['Quality_flag'] = 1
#-- Corrections Application Flag
CS_l2_scale['Data_20Hz']['Corrections_flag'] = 1
#-- Quality metric for retracker 1
CS_l2_scale['Data_20Hz']['Quality_1'] = 1
#-- Quality metric for retracker 2
CS_l2_scale['Data_20Hz']['Quality_2'] = 1
#-- Quality metric for retracker 3
CS_l2_scale['Data_20Hz']['Quality_3'] = 1
#-- return the scaling factors
return CS_l2_scale
#-- PURPOSE: Read ASCII Main Product Header (MPH) block from an ESA PDS file
def read_MPH(full_filename):
#-- read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
#-- Define constant values associated with PDS file formats
#-- number of text lines in standard MPH
n_MPH_lines = 41
#-- check that first line of header matches PRODUCT
if not bool(re.match(br'PRODUCT\=\"(.*)(?=\")',file_contents[0])):
raise IOError('File does not start with a valid PDS MPH')
#-- read MPH header text
s_MPH_fields = {}
for i in range(n_MPH_lines):
#-- use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
#-- data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
#-- data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
#-- Return block name array to calling function
return s_MPH_fields
#-- PURPOSE: Read ASCII Specific Product Header (SPH) block from a PDS file
def read_SPH(full_filename,j_sph_size):
#-- read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
#-- Define constant values associated with PDS file formats
#-- number of text lines in standard MPH
n_MPH_lines = 41
#-- compile regular expression operator for reading headers
rx = re.compile(br'(.*?)\=\"?(.*)',re.VERBOSE)
#-- check first line of header matches SPH_DESCRIPTOR
if not bool(re.match(br'SPH\_DESCRIPTOR\=',file_contents[n_MPH_lines+1])):
raise IOError('File does not have a valid PDS DSD')
#-- read SPH header text (no binary control characters)
s_SPH_lines = [li for li in file_contents[n_MPH_lines+1:] if rx.match(li)
and not re.search(br'[^\x20-\x7e]+',li)]
#-- extract SPH header text
s_SPH_fields = {}
c = 0
while (c < len(s_SPH_lines)):
#-- check if line is within DS_NAME portion of SPH header
if bool(re.match(br'DS_NAME',s_SPH_lines[c])):
#-- add dictionary for DS_NAME
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
key = value.decode('utf-8').rstrip()
s_SPH_fields[key] = {}
for line in s_SPH_lines[c+1:c+7]:
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',line)):
#-- data fields within quotes
dsfield,dsvalue=re.findall(br'(.*?)\=\"(.*)(?=\")',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',line)):
#-- data fields without quotes
dsfield,dsvalue=re.findall(br'(.*?)\=(.*)',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
#-- add 6 to counter to go to next entry
c += 6
#-- use regular expression operators to read headers
elif bool(re.match(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c])):
#-- data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',s_SPH_lines[c])):
#-- data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
#-- add 1 to counter to go to next line
c += 1
#-- Return block name array to calling function
return s_SPH_fields
#-- PURPOSE: Read ASCII Data Set Descriptors (DSD) block from a PDS file
def read_DSD(full_filename):
#-- read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
#-- Define constant values associated with PDS file formats
#-- number of text lines in standard MPH
n_MPH_lines = 41
#-- number of text lines in a DSD header
n_DSD_lines = 8
#-- Level-2 CryoSat DS_NAMES within files
regex_patterns = []
regex_patterns.append(br'DS_NAME\="SIR_LRM_L2(_I)?[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_LRMIL2[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_SAR_L2(A|B)?(_I)?[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_SARIL2(A|B)?[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_FDM_L2[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_SIN_L2(_I)?[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_SINIL2[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_SID_L2(_I)?[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_SIDIL2[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_GDR_2(A|B|_)?[\s+]*"')
#-- find the DSD starting line within the SPH header
c = 0
Flag = False
while ((Flag is False) and (c < len(regex_patterns))):
#-- find indice within
indice = [i for i,line in enumerate(file_contents[n_MPH_lines+1:]) if
re.search(regex_patterns[c],line)]
if indice:
Flag = True
else:
c+=1
#-- check that valid indice was found within header
if not indice:
raise IOError('Can not find correct DSD field')
#-- extract s_DSD_fields info
DSD_START = n_MPH_lines + indice[0] + 1
s_DSD_fields = {}
for i in range(DSD_START,DSD_START+n_DSD_lines):
#-- use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
#-- data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
#-- data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
#-- Return block name array to calling function
return s_DSD_fields
#-- PURPOSE: read CryoSat Level-2 data
def read_cryosat_L2(full_filename, VERBOSE=False):
#-- file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
#-- CryoSat file class
#-- OFFL (Off Line Processing/Systematic)
#-- NRT_ (Near Real Time)
#-- RPRO (ReProcessing)
#-- TEST (Testing)
#-- LTA_ (Long Term Archive)
regex_class = r'OFFL|NRT_|RPRO|TEST|LTA_'
#-- CryoSat mission products
#-- SIR_LRM_2 L2 Product from Low Resolution Mode Processing
#-- SIR_FDM_2 L2 Product from Fast Delivery Marine Mode Processing
#-- SIR_SIN_2 L2 Product from SAR Interferometric Processing
#-- SIR_SID_2 L2 Product from SIN Degraded Processing
#-- SIR_SAR_2 L2 Product from SAR Processing
#-- SIR_GDR_2 L2 Consolidated Product
#-- SIR_LRMI2 In-depth L2 Product from LRM Processing
#-- SIR_SINI2 In-depth L2 Product from SIN Processing
#-- SIR_SIDI2 In-depth L2 Product from SIN Degraded Process.
#-- SIR_SARI2 In-depth L2 Product from SAR Processing
regex_products = (r'SIR_LRM_2|SIR_FDM_2|SIR_SIN_2|SIR_SID_2|'
r'SIR_SAR_2|SIR_GDR_2|SIR_LRMI2|SIR_SINI2|SIR_SIDI2|SIR_SARI2')
#-- CRYOSAT LEVEL-2 PRODUCTS NAMING RULES
#-- Mission Identifier
#-- File Class
#-- File Product
#-- Validity Start Date and Time
#-- Validity Stop Date and Time
#-- Baseline Identifier
#-- Version Number
regex_pattern = r'(.*?)_({0})_({1})__(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
#-- extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
#-- check if input file is original binary *.DBL or new netCDF4 *.nc format
if (fileExtension == '.nc'):
print(fileBasename) if VERBOSE else None
CS_L2_mds = cryosat_baseline_D(full_filename, UNPACK=False)
elif (fileExtension == '.DBL'):
#-- Record sizes
CS_L2_MDS_REC_SIZE = 980
CS_L2_C_MDS_REC_SIZE = 1392
#-- check baseline from file to set i_record_size and allocation function
if (BASELINE == 'C'):
i_record_size = CS_L2_C_MDS_REC_SIZE
read_cryosat_variables = cryosat_baseline_C
else:
i_record_size = CS_L2_MDS_REC_SIZE
read_cryosat_variables = cryosat_baseline_AB
#-- read the input file to get file information
fid = os.open(os.path.expanduser(full_filename),os.O_RDONLY)
file_info = os.fstat(fid)
os.close(fid)
#-- num DSRs from SPH
j_num_DSR = np.int32(file_info.st_size//i_record_size)
#-- print file information
if VERBOSE:
print(fileBasename)
print('{0:d} {1:d} {2:d}'.format(j_num_DSR,file_info.st_size,i_record_size))
#-- Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size == file_info.st_size):
print('No Header on file')
print('The number of DSRs is: {0:d}'.format(j_num_DSR))
else:
print('Header on file')
#-- Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size != file_info.st_size):
#-- If there are MPH/SPH/DSD headers
s_MPH_fields = read_MPH(full_filename)
j_sph_size = np.int32(re.findall('[-+]?\d+',s_MPH_fields['SPH_SIZE']).pop())
s_SPH_fields = read_SPH(full_filename,j_sph_size)
#-- extract information from DSD fields
s_DSD_fields = read_DSD(full_filename)
#-- extract DS_OFFSET
j_DS_start = np.int32(re.findall('[-+]?\d+',s_DSD_fields['DS_OFFSET']).pop())
#-- extract number of DSR in the file
j_num_DSR = np.int32(re.findall('[-+]?\d+',s_DSD_fields['NUM_DSR']).pop())
#-- check the record size
j_DSR_size = np.int32(re.findall('[-+]?\d+',s_DSD_fields['DSR_SIZE']).pop())
#-- minimum size is start of the read plus number of records to read
j_check_size = j_DS_start +(j_DSR_size*j_num_DSR)
if VERBOSE:
print('The offset of the DSD is: {0:d} bytes'.format(j_DS_start))
print('The number of DSRs is {0:d}'.format(j_num_DSR))
print('The size of the DSR is {0:d}'.format(j_DSR_size))
#-- check if invalid file size
if (j_check_size > file_info.st_size):
raise IOError('File size error')
#-- extract binary data from input CryoSat data file (skip headers)
fid = open(os.path.expanduser(full_filename), 'rb')
cryosat_header = fid.read(j_DS_start)
#-- iterate through CryoSat file and fill output variables
CS_L2_mds = read_cryosat_variables(fid,i_record_size,j_num_DSR)
#-- add headers to output dictionary as METADATA
CS_L2_mds['METADATA'] = {}
CS_L2_mds['METADATA']['MPH'] = s_MPH_fields
CS_L2_mds['METADATA']['SPH'] = s_SPH_fields
CS_L2_mds['METADATA']['DSD'] = s_DSD_fields
#-- add absolute orbit number to 1Hz data
CS_L2_mds['Data_1Hz']['Abs_Orbit']=np.zeros((j_num_DSR),dtype=np.uint32)
CS_L2_mds['Data_1Hz']['Abs_Orbit'][:]=np.uint32(s_MPH_fields['ABS_ORBIT'])
#-- add ascending/descending flag to 1Hz data (A=ascending,D=descending)
CS_L2_mds['Data_1Hz']['Ascending_flag']=np.zeros((j_num_DSR),dtype=bool)
if (s_SPH_fields['ASCENDING_FLAG'] == 'A'):
CS_L2_mds['Data_1Hz']['Ascending_flag'][:] = True
#-- close the input CryoSat binary file
fid.close()
else:
#-- If there are not MPH/SPH/DSD headers
#-- extract binary data from input CryoSat data file
fid = open(os.path.expanduser(full_filename), 'rb')
#-- iterate through CryoSat file and fill output variables
CS_L2_mds = read_cryosat_variables(fid,i_record_size,j_num_DSR)
#-- close the input CryoSat binary file
fid.close()
#-- return the data and headers
return CS_L2_mds
| [
"numpy.copy",
"numpy.fromfile",
"numpy.ones",
"re.compile",
"numpy.ma.ones",
"numpy.ma.zeros",
"os.close",
"numpy.int32",
"re.match",
"os.fstat",
"numpy.array",
"numpy.zeros",
"numpy.uint32",
"os.path.basename",
"re.findall",
"os.path.expanduser",
"re.search"
] | [((2545, 2580), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (2553, 2580), True, 'import numpy as np\n'), ((2635, 2670), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (2643, 2670), True, 'import numpy as np\n'), ((2730, 2765), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (2738, 2765), True, 'import numpy as np\n'), ((2817, 2853), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint64'}), '(n_records, dtype=np.uint64)\n', (2825, 2853), True, 'import numpy as np\n'), ((2946, 2981), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (2954, 2981), True, 'import numpy as np\n'), ((3074, 3109), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (3082, 3109), True, 'import numpy as np\n'), ((3254, 3289), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (3262, 3289), True, 'import numpy as np\n'), ((3386, 3421), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (3394, 3421), True, 'import numpy as np\n'), ((3665, 3700), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (3673, 3700), True, 'import numpy as np\n'), ((3891, 3926), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (3899, 3926), True, 'import numpy as np\n'), ((4021, 4056), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (4029, 4056), True, 'import numpy as np\n'), ((4152, 4187), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (4160, 4187), True, 'import numpy as np\n'), ((4280, 4315), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (4288, 4315), True, 'import numpy as np\n'), ((4402, 4437), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (4410, 4437), True, 'import numpy as np\n'), ((4526, 4561), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (4534, 4561), True, 'import numpy as np\n'), ((4652, 4687), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (4660, 4687), True, 'import numpy as np\n'), ((4803, 4838), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (4811, 4838), True, 'import numpy as np\n'), ((4937, 4972), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (4945, 4972), True, 'import numpy as np\n'), ((5069, 5104), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (5077, 5104), True, 'import numpy as np\n'), ((5206, 5241), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (5214, 5241), True, 'import numpy as np\n'), ((5272, 5307), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (5280, 5307), True, 'import numpy as np\n'), ((5422, 5458), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint64'}), '(n_records, dtype=np.uint64)\n', (5430, 5458), True, 'import numpy as np\n'), ((5553, 5588), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (5561, 5588), True, 'import numpy as np\n'), ((5692, 5727), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (5700, 5727), True, 'import numpy as np\n'), ((5808, 5843), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (5816, 5843), True, 'import numpy as np\n'), ((5924, 5959), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (5932, 5959), True, 'import numpy as np\n'), ((6040, 6075), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (6048, 6075), True, 'import numpy as np\n'), ((6106, 6141), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (6114, 6141), True, 'import numpy as np\n'), ((6207, 6243), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint32'}), '(n_records, dtype=np.uint32)\n', (6215, 6243), True, 'import numpy as np\n'), ((6334, 6369), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (6342, 6369), True, 'import numpy as np\n'), ((6454, 6490), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint16'}), '(n_records, dtype=np.uint16)\n', (6462, 6490), True, 'import numpy as np\n'), ((6521, 6556), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (6529, 6556), True, 'import numpy as np\n'), ((6587, 6622), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (6595, 6622), True, 'import numpy as np\n'), ((6653, 6688), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (6661, 6688), True, 'import numpy as np\n'), ((6719, 6754), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (6727, 6754), True, 'import numpy as np\n'), ((7064, 7114), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (7075, 7114), True, 'import numpy as np\n'), ((7150, 7192), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (7157, 7192), True, 'import numpy as np\n'), ((7275, 7325), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (7286, 7325), True, 'import numpy as np\n'), ((7353, 7395), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (7360, 7395), True, 'import numpy as np\n'), ((7478, 7528), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (7489, 7528), True, 'import numpy as np\n'), ((7556, 7598), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (7563, 7598), True, 'import numpy as np\n'), ((7708, 7758), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (7719, 7758), True, 'import numpy as np\n'), ((7787, 7829), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (7794, 7829), True, 'import numpy as np\n'), ((7936, 7986), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (7947, 7986), True, 'import numpy as np\n'), ((8022, 8064), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (8029, 8064), True, 'import numpy as np\n'), ((8160, 8210), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (8171, 8210), True, 'import numpy as np\n'), ((8252, 8294), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (8259, 8294), True, 'import numpy as np\n'), ((8400, 8450), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (8411, 8450), True, 'import numpy as np\n'), ((8490, 8532), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (8497, 8532), True, 'import numpy as np\n'), ((8626, 8676), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (8637, 8676), True, 'import numpy as np\n'), ((8705, 8747), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (8712, 8747), True, 'import numpy as np\n'), ((8816, 8867), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.uint16'}), '((n_records, n_blocks), dtype=np.uint16)\n', (8827, 8867), True, 'import numpy as np\n'), ((8901, 8943), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (8908, 8943), True, 'import numpy as np\n'), ((9092, 9142), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (9103, 9142), True, 'import numpy as np\n'), ((9176, 9218), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (9183, 9218), True, 'import numpy as np\n'), ((9287, 9337), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (9298, 9337), True, 'import numpy as np\n'), ((9367, 9409), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (9374, 9409), True, 'import numpy as np\n'), ((9435, 9485), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (9446, 9485), True, 'import numpy as np\n'), ((9516, 9558), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (9523, 9558), True, 'import numpy as np\n'), ((9613, 9664), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.uint32'}), '((n_records, n_blocks), dtype=np.uint32)\n', (9624, 9664), True, 'import numpy as np\n'), ((9701, 9743), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (9708, 9743), True, 'import numpy as np\n'), ((9769, 9819), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (9780, 9819), True, 'import numpy as np\n'), ((9850, 9892), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (9857, 9892), True, 'import numpy as np\n'), ((9918, 9968), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (9929, 9968), True, 'import numpy as np\n'), ((9999, 10041), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (10006, 10041), True, 'import numpy as np\n'), ((10067, 10117), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (10078, 10117), True, 'import numpy as np\n'), ((10148, 10190), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (10155, 10190), True, 'import numpy as np\n'), ((10216, 10266), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (10227, 10266), True, 'import numpy as np\n'), ((10297, 10339), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (10304, 10339), True, 'import numpy as np\n'), ((16464, 16499), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (16472, 16499), True, 'import numpy as np\n'), ((16554, 16589), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (16562, 16589), True, 'import numpy as np\n'), ((16649, 16684), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (16657, 16684), True, 'import numpy as np\n'), ((16736, 16772), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint64'}), '(n_records, dtype=np.uint64)\n', (16744, 16772), True, 'import numpy as np\n'), ((16865, 16900), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (16873, 16900), True, 'import numpy as np\n'), ((16993, 17028), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (17001, 17028), True, 'import numpy as np\n'), ((17173, 17208), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (17181, 17208), True, 'import numpy as np\n'), ((17295, 17330), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (17303, 17330), True, 'import numpy as np\n'), ((17419, 17454), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (17427, 17454), True, 'import numpy as np\n'), ((17539, 17574), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (17547, 17574), True, 'import numpy as np\n'), ((17601, 17636), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (17609, 17636), True, 'import numpy as np\n'), ((17880, 17915), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (17888, 17915), True, 'import numpy as np\n'), ((18106, 18141), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (18114, 18141), True, 'import numpy as np\n'), ((18236, 18271), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (18244, 18271), True, 'import numpy as np\n'), ((18367, 18402), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (18375, 18402), True, 'import numpy as np\n'), ((18495, 18530), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (18503, 18530), True, 'import numpy as np\n'), ((18617, 18652), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (18625, 18652), True, 'import numpy as np\n'), ((18741, 18776), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (18749, 18776), True, 'import numpy as np\n'), ((18867, 18902), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (18875, 18902), True, 'import numpy as np\n'), ((19018, 19053), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (19026, 19053), True, 'import numpy as np\n'), ((19152, 19187), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (19160, 19187), True, 'import numpy as np\n'), ((19284, 19319), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (19292, 19319), True, 'import numpy as np\n'), ((19421, 19456), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (19429, 19456), True, 'import numpy as np\n'), ((19487, 19522), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (19495, 19522), True, 'import numpy as np\n'), ((19637, 19673), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint64'}), '(n_records, dtype=np.uint64)\n', (19645, 19673), True, 'import numpy as np\n'), ((19768, 19803), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (19776, 19803), True, 'import numpy as np\n'), ((19907, 19942), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int32'}), '(n_records, dtype=np.int32)\n', (19915, 19942), True, 'import numpy as np\n'), ((20023, 20058), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20031, 20058), True, 'import numpy as np\n'), ((20139, 20174), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20147, 20174), True, 'import numpy as np\n'), ((20255, 20290), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20263, 20290), True, 'import numpy as np\n'), ((20321, 20356), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20329, 20356), True, 'import numpy as np\n'), ((20422, 20458), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint32'}), '(n_records, dtype=np.uint32)\n', (20430, 20458), True, 'import numpy as np\n'), ((20549, 20584), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20557, 20584), True, 'import numpy as np\n'), ((20669, 20705), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint16'}), '(n_records, dtype=np.uint16)\n', (20677, 20705), True, 'import numpy as np\n'), ((20736, 20771), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20744, 20771), True, 'import numpy as np\n'), ((20802, 20837), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20810, 20837), True, 'import numpy as np\n'), ((20868, 20903), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20876, 20903), True, 'import numpy as np\n'), ((20934, 20969), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int16'}), '(n_records, dtype=np.int16)\n', (20942, 20969), True, 'import numpy as np\n'), ((21279, 21329), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (21290, 21329), True, 'import numpy as np\n'), ((21365, 21407), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (21372, 21407), True, 'import numpy as np\n'), ((21490, 21540), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (21501, 21540), True, 'import numpy as np\n'), ((21568, 21610), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (21575, 21610), True, 'import numpy as np\n'), ((21693, 21743), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (21704, 21743), True, 'import numpy as np\n'), ((21771, 21813), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (21778, 21813), True, 'import numpy as np\n'), ((21927, 21977), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (21938, 21977), True, 'import numpy as np\n'), ((22008, 22050), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (22015, 22050), True, 'import numpy as np\n'), ((22164, 22214), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (22175, 22214), True, 'import numpy as np\n'), ((22245, 22287), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (22252, 22287), True, 'import numpy as np\n'), ((22401, 22451), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (22412, 22451), True, 'import numpy as np\n'), ((22482, 22524), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (22489, 22524), True, 'import numpy as np\n'), ((22622, 22672), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (22633, 22672), True, 'import numpy as np\n'), ((22703, 22745), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (22710, 22745), True, 'import numpy as np\n'), ((22843, 22893), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (22854, 22893), True, 'import numpy as np\n'), ((22924, 22966), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (22931, 22966), True, 'import numpy as np\n'), ((23064, 23114), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (23075, 23114), True, 'import numpy as np\n'), ((23145, 23187), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (23152, 23187), True, 'import numpy as np\n'), ((23336, 23386), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (23347, 23386), True, 'import numpy as np\n'), ((23420, 23462), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (23427, 23462), True, 'import numpy as np\n'), ((23569, 23619), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (23580, 23619), True, 'import numpy as np\n'), ((23655, 23697), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (23662, 23697), True, 'import numpy as np\n'), ((23793, 23843), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (23804, 23843), True, 'import numpy as np\n'), ((23885, 23927), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (23892, 23927), True, 'import numpy as np\n'), ((24033, 24083), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (24044, 24083), True, 'import numpy as np\n'), ((24123, 24165), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (24130, 24165), True, 'import numpy as np\n'), ((24234, 24285), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.uint16'}), '((n_records, n_blocks), dtype=np.uint16)\n', (24245, 24285), True, 'import numpy as np\n'), ((24319, 24361), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (24326, 24361), True, 'import numpy as np\n'), ((24430, 24480), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (24441, 24480), True, 'import numpy as np\n'), ((24510, 24552), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (24517, 24552), True, 'import numpy as np\n'), ((24578, 24628), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int16'}), '((n_records, n_blocks), dtype=np.int16)\n', (24589, 24628), True, 'import numpy as np\n'), ((24659, 24701), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (24666, 24701), True, 'import numpy as np\n'), ((24756, 24807), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.uint32'}), '((n_records, n_blocks), dtype=np.uint32)\n', (24767, 24807), True, 'import numpy as np\n'), ((24844, 24886), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (24851, 24886), True, 'import numpy as np\n'), ((24960, 25011), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.uint32'}), '((n_records, n_blocks), dtype=np.uint32)\n', (24971, 25011), True, 'import numpy as np\n'), ((25052, 25094), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (25059, 25094), True, 'import numpy as np\n'), ((25163, 25213), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (25174, 25213), True, 'import numpy as np\n'), ((25247, 25289), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (25254, 25289), True, 'import numpy as np\n'), ((25358, 25408), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (25369, 25408), True, 'import numpy as np\n'), ((25442, 25484), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (25449, 25484), True, 'import numpy as np\n'), ((25553, 25603), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {'dtype': 'np.int32'}), '((n_records, n_blocks), dtype=np.int32)\n', (25564, 25603), True, 'import numpy as np\n'), ((25637, 25679), 'numpy.ones', 'np.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (25644, 25679), True, 'import numpy as np\n'), ((33134, 33181), 'numpy.array', 'np.array', (['(time_cor_01 / 86400.0)'], {'dtype': 'np.int32'}), '(time_cor_01 / 86400.0, dtype=np.int32)\n', (33142, 33181), True, 'import numpy as np\n'), ((33232, 33300), 'numpy.array', 'np.array', (["(time_cor_01 - Data_1Hz['Day'][:] * 86400.0)"], {'dtype': 'np.int32'}), "(time_cor_01 - Data_1Hz['Day'][:] * 86400.0, dtype=np.int32)\n", (33240, 33300), True, 'import numpy as np\n'), ((33354, 33465), 'numpy.array', 'np.array', (["((time_cor_01 - Data_1Hz['Day'][:] * 86400.0 - Data_1Hz['Second'][:]) * \n 1000000.0)"], {'dtype': 'np.int32'}), "((time_cor_01 - Data_1Hz['Day'][:] * 86400.0 - Data_1Hz['Second'][:\n ]) * 1000000.0, dtype=np.int32)\n", (33362, 33465), True, 'import numpy as np\n'), ((34661, 34697), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.uint32'}), '(n_records, dtype=np.uint32)\n', (34669, 34697), True, 'import numpy as np\n'), ((34731, 34762), 'numpy.uint32', 'np.uint32', (['fid.abs_orbit_number'], {}), '(fid.abs_orbit_number)\n', (34740, 34762), True, 'import numpy as np\n'), ((34875, 34906), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'bool'}), '(n_records, dtype=bool)\n', (34883, 34906), True, 'import numpy as np\n'), ((37952, 37986), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (37963, 37986), True, 'import numpy as np\n'), ((38016, 38061), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (38026, 38061), True, 'import numpy as np\n'), ((38214, 38248), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (38225, 38248), True, 'import numpy as np\n'), ((38285, 38330), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (38295, 38330), True, 'import numpy as np\n'), ((38380, 38414), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (38391, 38414), True, 'import numpy as np\n'), ((38443, 38488), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (38453, 38488), True, 'import numpy as np\n'), ((38602, 38636), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (38613, 38636), True, 'import numpy as np\n'), ((38665, 38710), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (38675, 38710), True, 'import numpy as np\n'), ((38861, 38895), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (38872, 38895), True, 'import numpy as np\n'), ((38927, 38972), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (38937, 38972), True, 'import numpy as np\n'), ((39123, 39157), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (39134, 39157), True, 'import numpy as np\n'), ((39189, 39234), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (39199, 39234), True, 'import numpy as np\n'), ((39385, 39419), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (39396, 39419), True, 'import numpy as np\n'), ((39451, 39496), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (39461, 39496), True, 'import numpy as np\n'), ((39634, 39668), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (39645, 39668), True, 'import numpy as np\n'), ((39700, 39745), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (39710, 39745), True, 'import numpy as np\n'), ((39879, 39913), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (39890, 39913), True, 'import numpy as np\n'), ((39945, 39990), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (39955, 39990), True, 'import numpy as np\n'), ((40124, 40158), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (40135, 40158), True, 'import numpy as np\n'), ((40190, 40235), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (40200, 40235), True, 'import numpy as np\n'), ((40401, 40435), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (40412, 40435), True, 'import numpy as np\n'), ((40468, 40513), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (40478, 40513), True, 'import numpy as np\n'), ((40681, 40715), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (40692, 40715), True, 'import numpy as np\n'), ((40748, 40793), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (40758, 40793), True, 'import numpy as np\n'), ((40961, 40995), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (40972, 40995), True, 'import numpy as np\n'), ((41028, 41073), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (41038, 41073), True, 'import numpy as np\n'), ((41183, 41217), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (41194, 41217), True, 'import numpy as np\n'), ((41252, 41297), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (41262, 41297), True, 'import numpy as np\n'), ((41424, 41458), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (41435, 41458), True, 'import numpy as np\n'), ((41496, 41541), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (41506, 41541), True, 'import numpy as np\n'), ((41688, 41722), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (41699, 41722), True, 'import numpy as np\n'), ((41760, 41805), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (41770, 41805), True, 'import numpy as np\n'), ((41971, 42005), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (41982, 42005), True, 'import numpy as np\n'), ((42042, 42087), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (42052, 42087), True, 'import numpy as np\n'), ((42253, 42287), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (42264, 42287), True, 'import numpy as np\n'), ((42330, 42375), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (42340, 42375), True, 'import numpy as np\n'), ((42538, 42572), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (42549, 42572), True, 'import numpy as np\n'), ((42613, 42658), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (42623, 42658), True, 'import numpy as np\n'), ((42784, 42818), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (42795, 42818), True, 'import numpy as np\n'), ((42853, 42898), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (42863, 42898), True, 'import numpy as np\n'), ((43033, 43067), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (43044, 43067), True, 'import numpy as np\n'), ((43098, 43143), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (43108, 43143), True, 'import numpy as np\n'), ((43276, 43310), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (43287, 43310), True, 'import numpy as np\n'), ((43348, 43393), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (43358, 43393), True, 'import numpy as np\n'), ((43547, 43581), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (43558, 43581), True, 'import numpy as np\n'), ((43623, 43668), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (43633, 43668), True, 'import numpy as np\n'), ((43810, 43844), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (43821, 43844), True, 'import numpy as np\n'), ((43886, 43931), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (43896, 43931), True, 'import numpy as np\n'), ((44066, 44100), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (44077, 44100), True, 'import numpy as np\n'), ((44135, 44180), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (44145, 44180), True, 'import numpy as np\n'), ((44315, 44349), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (44326, 44349), True, 'import numpy as np\n'), ((44384, 44429), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (44394, 44429), True, 'import numpy as np\n'), ((44584, 44618), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (44595, 44618), True, 'import numpy as np\n'), ((44653, 44698), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (44663, 44698), True, 'import numpy as np\n'), ((44853, 44887), 'numpy.ma.zeros', 'np.ma.zeros', (['(n_records, n_blocks)'], {}), '((n_records, n_blocks))\n', (44864, 44887), True, 'import numpy as np\n'), ((44922, 44967), 'numpy.ma.ones', 'np.ma.ones', (['(n_records, n_blocks)'], {'dtype': 'bool'}), '((n_records, n_blocks), dtype=bool)\n', (44932, 44967), True, 'import numpy as np\n'), ((45358, 45393), 'numpy.zeros', 'np.zeros', (['n_records'], {'dtype': 'np.int64'}), '(n_records, dtype=np.int64)\n', (45366, 45393), True, 'import numpy as np\n'), ((66534, 66577), 're.compile', 're.compile', (['b\'(.*?)\\\\=\\\\"?(.*)\'', 're.VERBOSE'], {}), '(b\'(.*?)\\\\=\\\\"?(.*)\', re.VERBOSE)\n', (66544, 66577), False, 'import re\n'), ((10496, 10534), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (10507, 10534), True, 'import numpy as np\n'), ((10566, 10604), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (10577, 10604), True, 'import numpy as np\n'), ((10636, 10674), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (10647, 10674), True, 'import numpy as np\n'), ((10710, 10748), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u8"""', 'count': '(1)'}), "(fid, dtype='>u8', count=1)\n", (10721, 10748), True, 'import numpy as np\n'), ((10781, 10819), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (10792, 10819), True, 'import numpy as np\n'), ((10852, 10890), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (10863, 10890), True, 'import numpy as np\n'), ((10923, 10961), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (10934, 10961), True, 'import numpy as np\n'), ((10998, 11036), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11009, 11036), True, 'import numpy as np\n'), ((11069, 11107), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11080, 11107), True, 'import numpy as np\n'), ((11206, 11244), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11217, 11244), True, 'import numpy as np\n'), ((11280, 11318), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11291, 11318), True, 'import numpy as np\n'), ((11353, 11391), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11364, 11391), True, 'import numpy as np\n'), ((11423, 11461), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11434, 11461), True, 'import numpy as np\n'), ((11494, 11532), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11505, 11532), True, 'import numpy as np\n'), ((11564, 11602), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11575, 11602), True, 'import numpy as np\n'), ((11640, 11678), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11651, 11678), True, 'import numpy as np\n'), ((11717, 11755), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11728, 11755), True, 'import numpy as np\n'), ((11793, 11831), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11804, 11831), True, 'import numpy as np\n'), ((11869, 11907), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11880, 11907), True, 'import numpy as np\n'), ((11945, 11983), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (11956, 11983), True, 'import numpy as np\n'), ((12018, 12056), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12029, 12056), True, 'import numpy as np\n'), ((12094, 12132), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u8"""', 'count': '(1)'}), "(fid, dtype='>u8', count=1)\n", (12105, 12132), True, 'import numpy as np\n'), ((12170, 12208), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (12181, 12208), True, 'import numpy as np\n'), ((12241, 12279), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (12252, 12279), True, 'import numpy as np\n'), ((12316, 12354), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12327, 12354), True, 'import numpy as np\n'), ((12393, 12431), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12404, 12431), True, 'import numpy as np\n'), ((12472, 12510), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12483, 12510), True, 'import numpy as np\n'), ((12545, 12583), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12556, 12583), True, 'import numpy as np\n'), ((12620, 12658), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u4"""', 'count': '(1)'}), "(fid, dtype='>u4', count=1)\n", (12631, 12658), True, 'import numpy as np\n'), ((12690, 12728), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12701, 12728), True, 'import numpy as np\n'), ((12767, 12805), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u2"""', 'count': '(1)'}), "(fid, dtype='>u2', count=1)\n", (12778, 12805), True, 'import numpy as np\n'), ((12840, 12878), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12851, 12878), True, 'import numpy as np\n'), ((12913, 12951), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12924, 12951), True, 'import numpy as np\n'), ((12986, 13024), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (12997, 13024), True, 'import numpy as np\n'), ((13059, 13097), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (13070, 13097), True, 'import numpy as np\n'), ((25836, 25874), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (25847, 25874), True, 'import numpy as np\n'), ((25906, 25944), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (25917, 25944), True, 'import numpy as np\n'), ((25976, 26014), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (25987, 26014), True, 'import numpy as np\n'), ((26050, 26088), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u8"""', 'count': '(1)'}), "(fid, dtype='>u8', count=1)\n", (26061, 26088), True, 'import numpy as np\n'), ((26121, 26159), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (26132, 26159), True, 'import numpy as np\n'), ((26192, 26230), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (26203, 26230), True, 'import numpy as np\n'), ((26263, 26301), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (26274, 26301), True, 'import numpy as np\n'), ((26331, 26369), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (26342, 26369), True, 'import numpy as np\n'), ((26400, 26438), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (26411, 26438), True, 'import numpy as np\n'), ((26467, 26505), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (26478, 26505), True, 'import numpy as np\n'), ((26536, 26574), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (26547, 26574), True, 'import numpy as np\n'), ((26607, 26645), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (26618, 26645), True, 'import numpy as np\n'), ((26744, 26782), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (26755, 26782), True, 'import numpy as np\n'), ((26818, 26856), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (26829, 26856), True, 'import numpy as np\n'), ((26891, 26929), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (26902, 26929), True, 'import numpy as np\n'), ((26961, 26999), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (26972, 26999), True, 'import numpy as np\n'), ((27032, 27070), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27043, 27070), True, 'import numpy as np\n'), ((27102, 27140), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27113, 27140), True, 'import numpy as np\n'), ((27178, 27216), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27189, 27216), True, 'import numpy as np\n'), ((27255, 27293), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27266, 27293), True, 'import numpy as np\n'), ((27331, 27369), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27342, 27369), True, 'import numpy as np\n'), ((27407, 27445), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27418, 27445), True, 'import numpy as np\n'), ((27483, 27521), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27494, 27521), True, 'import numpy as np\n'), ((27556, 27594), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27567, 27594), True, 'import numpy as np\n'), ((27632, 27670), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u8"""', 'count': '(1)'}), "(fid, dtype='>u8', count=1)\n", (27643, 27670), True, 'import numpy as np\n'), ((27708, 27746), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (27719, 27746), True, 'import numpy as np\n'), ((27779, 27817), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (27790, 27817), True, 'import numpy as np\n'), ((27854, 27892), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27865, 27892), True, 'import numpy as np\n'), ((27931, 27969), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (27942, 27969), True, 'import numpy as np\n'), ((28010, 28048), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (28021, 28048), True, 'import numpy as np\n'), ((28083, 28121), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (28094, 28121), True, 'import numpy as np\n'), ((28158, 28196), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u4"""', 'count': '(1)'}), "(fid, dtype='>u4', count=1)\n", (28169, 28196), True, 'import numpy as np\n'), ((28228, 28266), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (28239, 28266), True, 'import numpy as np\n'), ((28305, 28343), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u2"""', 'count': '(1)'}), "(fid, dtype='>u2', count=1)\n", (28316, 28343), True, 'import numpy as np\n'), ((28378, 28416), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (28389, 28416), True, 'import numpy as np\n'), ((28451, 28489), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (28462, 28489), True, 'import numpy as np\n'), ((28524, 28562), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (28535, 28562), True, 'import numpy as np\n'), ((28597, 28635), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (28608, 28635), True, 'import numpy as np\n'), ((32554, 32587), 'os.path.expanduser', 'os.path.expanduser', (['full_filename'], {}), '(full_filename)\n', (32572, 32587), False, 'import os\n'), ((45834, 45875), 'numpy.copy', 'np.copy', (["fid.variables['num_valid_01'][r]"], {}), "(fid.variables['num_valid_01'][r])\n", (45841, 45875), True, 'import numpy as np\n'), ((71586, 71617), 'os.path.basename', 'os.path.basename', (['full_filename'], {}), '(full_filename)\n', (71602, 71617), False, 'import os\n'), ((13248, 13286), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (13259, 13286), True, 'import numpy as np\n'), ((13327, 13365), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (13338, 13365), True, 'import numpy as np\n'), ((13406, 13444), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (13417, 13444), True, 'import numpy as np\n'), ((13486, 13524), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (13497, 13524), True, 'import numpy as np\n'), ((13573, 13611), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (13584, 13611), True, 'import numpy as np\n'), ((13666, 13704), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (13677, 13704), True, 'import numpy as np\n'), ((13757, 13795), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (13768, 13795), True, 'import numpy as np\n'), ((13837, 13875), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (13848, 13875), True, 'import numpy as np\n'), ((13922, 13960), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u2"""', 'count': '(1)'}), "(fid, dtype='>u2', count=1)\n", (13933, 13960), True, 'import numpy as np\n'), ((14007, 14045), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (14018, 14045), True, 'import numpy as np\n'), ((14088, 14126), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (14099, 14126), True, 'import numpy as np\n'), ((14170, 14208), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (14181, 14208), True, 'import numpy as np\n'), ((14258, 14296), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u4"""', 'count': '(1)'}), "(fid, dtype='>u4', count=1)\n", (14269, 14296), True, 'import numpy as np\n'), ((14340, 14378), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (14351, 14378), True, 'import numpy as np\n'), ((14422, 14460), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (14433, 14460), True, 'import numpy as np\n'), ((14504, 14542), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (14515, 14542), True, 'import numpy as np\n'), ((14586, 14624), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (14597, 14624), True, 'import numpy as np\n'), ((28786, 28824), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (28797, 28824), True, 'import numpy as np\n'), ((28865, 28903), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (28876, 28903), True, 'import numpy as np\n'), ((28944, 28982), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (28955, 28982), True, 'import numpy as np\n'), ((29026, 29064), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (29037, 29064), True, 'import numpy as np\n'), ((29108, 29146), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (29119, 29146), True, 'import numpy as np\n'), ((29190, 29228), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (29201, 29228), True, 'import numpy as np\n'), ((29272, 29310), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (29283, 29310), True, 'import numpy as np\n'), ((29354, 29392), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (29365, 29392), True, 'import numpy as np\n'), ((29436, 29474), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (29447, 29474), True, 'import numpy as np\n'), ((29521, 29559), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (29532, 29559), True, 'import numpy as np\n'), ((29608, 29646), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (29619, 29646), True, 'import numpy as np\n'), ((29701, 29739), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (29712, 29739), True, 'import numpy as np\n'), ((29792, 29830), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (29803, 29830), True, 'import numpy as np\n'), ((29877, 29915), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u2"""', 'count': '(1)'}), "(fid, dtype='>u2', count=1)\n", (29888, 29915), True, 'import numpy as np\n'), ((29958, 29996), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (29969, 29996), True, 'import numpy as np\n'), ((30040, 30078), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i2"""', 'count': '(1)'}), "(fid, dtype='>i2', count=1)\n", (30051, 30078), True, 'import numpy as np\n'), ((30128, 30166), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u4"""', 'count': '(1)'}), "(fid, dtype='>u4', count=1)\n", (30139, 30166), True, 'import numpy as np\n'), ((30220, 30258), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">u4"""', 'count': '(1)'}), "(fid, dtype='>u4', count=1)\n", (30231, 30258), True, 'import numpy as np\n'), ((30305, 30343), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (30316, 30343), True, 'import numpy as np\n'), ((30390, 30428), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (30401, 30428), True, 'import numpy as np\n'), ((30475, 30513), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': '""">i4"""', 'count': '(1)'}), "(fid, dtype='>i4', count=1)\n", (30486, 30513), True, 'import numpy as np\n'), ((64858, 64891), 'os.path.expanduser', 'os.path.expanduser', (['full_filename'], {}), '(full_filename)\n', (64876, 64891), False, 'import os\n'), ((65165, 65220), 're.match', 're.match', (['b\'PRODUCT\\\\=\\\\"(.*)(?=\\\\")\'', 'file_contents[0]'], {}), '(b\'PRODUCT\\\\=\\\\"(.*)(?=\\\\")\', file_contents[0])\n', (65173, 65220), False, 'import re\n'), ((65453, 65506), 're.match', 're.match', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 'file_contents[i]'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', file_contents[i])\n', (65461, 65506), False, 'import re\n'), ((66226, 66259), 'os.path.expanduser', 'os.path.expanduser', (['full_filename'], {}), '(full_filename)\n', (66244, 66259), False, 'import os\n'), ((66652, 66716), 're.match', 're.match', (["b'SPH\\\\_DESCRIPTOR\\\\='", 'file_contents[n_MPH_lines + 1]'], {}), "(b'SPH\\\\_DESCRIPTOR\\\\=', file_contents[n_MPH_lines + 1])\n", (66660, 66716), False, 'import re\n'), ((67154, 67190), 're.match', 're.match', (["b'DS_NAME'", 's_SPH_lines[c]'], {}), "(b'DS_NAME', s_SPH_lines[c])\n", (67162, 67190), False, 'import re\n'), ((69010, 69043), 'os.path.expanduser', 'os.path.expanduser', (['full_filename'], {}), '(full_filename)\n', (69028, 69043), False, 'import os\n'), ((70779, 70832), 're.match', 're.match', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 'file_contents[i]'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', file_contents[i])\n', (70787, 70832), False, 'import re\n'), ((74028, 74041), 'os.fstat', 'os.fstat', (['fid'], {}), '(fid)\n', (74036, 74041), False, 'import os\n'), ((74051, 74064), 'os.close', 'os.close', (['fid'], {}), '(fid)\n', (74059, 74064), False, 'import os\n'), ((74119, 74163), 'numpy.int32', 'np.int32', (['(file_info.st_size // i_record_size)'], {}), '(file_info.st_size // i_record_size)\n', (74127, 74163), True, 'import numpy as np\n'), ((65734, 65777), 're.match', 're.match', (["b'(.*?)\\\\=(.*)'", 'file_contents[i]'], {}), "(b'(.*?)\\\\=(.*)', file_contents[i])\n", (65742, 65777), False, 'import re\n'), ((68194, 68245), 're.match', 're.match', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 's_SPH_lines[c]'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', s_SPH_lines[c])\n', (68202, 68245), False, 'import re\n'), ((70294, 70328), 're.search', 're.search', (['regex_patterns[c]', 'line'], {}), '(regex_patterns[c], line)\n', (70303, 70328), False, 'import re\n'), ((71060, 71103), 're.match', 're.match', (["b'(.*?)\\\\=(.*)'", 'file_contents[i]'], {}), "(b'(.*?)\\\\=(.*)', file_contents[i])\n", (71068, 71103), False, 'import re\n'), ((73960, 73993), 'os.path.expanduser', 'os.path.expanduser', (['full_filename'], {}), '(full_filename)\n', (73978, 73993), False, 'import os\n'), ((76755, 76791), 'numpy.zeros', 'np.zeros', (['j_num_DSR'], {'dtype': 'np.uint32'}), '(j_num_DSR, dtype=np.uint32)\n', (76763, 76791), True, 'import numpy as np\n'), ((76844, 76880), 'numpy.uint32', 'np.uint32', (["s_MPH_fields['ABS_ORBIT']"], {}), "(s_MPH_fields['ABS_ORBIT'])\n", (76853, 76880), True, 'import numpy as np\n'), ((77020, 77051), 'numpy.zeros', 'np.zeros', (['j_num_DSR'], {'dtype': 'bool'}), '(j_num_DSR, dtype=bool)\n', (77028, 77051), True, 'import numpy as np\n'), ((65574, 65629), 're.findall', 're.findall', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 'file_contents[i]'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', file_contents[i])\n', (65584, 65629), False, 'import re\n'), ((66933, 66966), 're.search', 're.search', (["b'[^\\\\x20-\\\\x7e]+'", 'li'], {}), "(b'[^\\\\x20-\\\\x7e]+', li)\n", (66942, 66966), False, 'import re\n'), ((67262, 67315), 're.findall', 're.findall', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 's_SPH_lines[c]'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', s_SPH_lines[c])\n', (67272, 67315), False, 'import re\n'), ((67477, 67518), 're.match', 're.match', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 'line'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', line)\n', (67485, 67518), False, 'import re\n'), ((68471, 68512), 're.match', 're.match', (["b'(.*?)\\\\=(.*)'", 's_SPH_lines[c]'], {}), "(b'(.*?)\\\\=(.*)', s_SPH_lines[c])\n", (68479, 68512), False, 'import re\n'), ((70900, 70955), 're.findall', 're.findall', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 'file_contents[i]'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', file_contents[i])\n', (70910, 70955), False, 'import re\n'), ((76138, 76171), 'os.path.expanduser', 'os.path.expanduser', (['full_filename'], {}), '(full_filename)\n', (76156, 76171), False, 'import os\n'), ((77414, 77447), 'os.path.expanduser', 'os.path.expanduser', (['full_filename'], {}), '(full_filename)\n', (77432, 77447), False, 'import os\n'), ((65848, 65893), 're.findall', 're.findall', (["b'(.*?)\\\\=(.*)'", 'file_contents[i]'], {}), "(b'(.*?)\\\\=(.*)', file_contents[i])\n", (65858, 65893), False, 'import re\n'), ((67779, 67810), 're.match', 're.match', (["b'(.*?)\\\\=(.*)'", 'line'], {}), "(b'(.*?)\\\\=(.*)', line)\n", (67787, 67810), False, 'import re\n'), ((68313, 68366), 're.findall', 're.findall', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 's_SPH_lines[c]'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', s_SPH_lines[c])\n', (68323, 68366), False, 'import re\n'), ((71174, 71219), 're.findall', 're.findall', (["b'(.*?)\\\\=(.*)'", 'file_contents[i]'], {}), "(b'(.*?)\\\\=(.*)', file_contents[i])\n", (71184, 71219), False, 'import re\n'), ((67606, 67649), 're.findall', 're.findall', (['b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\'', 'line'], {}), '(b\'(.*?)\\\\=\\\\"(.*)(?=\\\\")\', line)\n', (67616, 67649), False, 'import re\n'), ((68583, 68626), 're.findall', 're.findall', (["b'(.*?)\\\\=(.*)'", 's_SPH_lines[c]'], {}), "(b'(.*?)\\\\=(.*)', s_SPH_lines[c])\n", (68593, 68626), False, 'import re\n'), ((74870, 74919), 're.findall', 're.findall', (['"""[-+]?\\\\d+"""', "s_MPH_fields['SPH_SIZE']"], {}), "('[-+]?\\\\d+', s_MPH_fields['SPH_SIZE'])\n", (74880, 74919), False, 'import re\n'), ((75163, 75213), 're.findall', 're.findall', (['"""[-+]?\\\\d+"""', "s_DSD_fields['DS_OFFSET']"], {}), "('[-+]?\\\\d+', s_DSD_fields['DS_OFFSET'])\n", (75173, 75213), False, 'import re\n'), ((75304, 75352), 're.findall', 're.findall', (['"""[-+]?\\\\d+"""', "s_DSD_fields['NUM_DSR']"], {}), "('[-+]?\\\\d+', s_DSD_fields['NUM_DSR'])\n", (75314, 75352), False, 'import re\n'), ((75432, 75481), 're.findall', 're.findall', (['"""[-+]?\\\\d+"""', "s_DSD_fields['DSR_SIZE']"], {}), "('[-+]?\\\\d+', s_DSD_fields['DSR_SIZE'])\n", (75442, 75481), False, 'import re\n'), ((67901, 67934), 're.findall', 're.findall', (["b'(.*?)\\\\=(.*)'", 'line'], {}), "(b'(.*?)\\\\=(.*)', line)\n", (67911, 67934), False, 'import re\n')] |
"""
This script loads in a trained policy neural network and uses it for inference.
Typically this script will be executed on the Nvidia Jetson TX2 board during an
experiment in the Spacecraft Robotics and Control Laboratory at Carleton
University.
Script created: June 12, 2019
@author: Kirk (<EMAIL>)
"""
import tensorflow as tf
import numpy as np
import socket
import time
import threading
from collections import deque
# import code # for debugging
#code.interact(local=dict(globals(), **locals())) # Ctrl+D or Ctrl+Z to continue execution
from settings import Settings
from build_neural_networks import BuildActorNetwork
"""
*# Relative pose expressed in the chaser's body frame; everything else in Inertial frame #*
Deep guidance output in x and y are in the chaser body frame
"""
# Do you want the chaser's absolute position to be included in the policy_input?
CHASER_ABSOLUTE_POSITION = True
CAMERA_PROCESSING_TIME = 0.7 # [s] how long it takes SPOTNet to process an image
PHASESPACE_TIMESTEP = 0.5 # [s] equal to serverRate
# Are we testing?
testing = False
# Do you want to debug with constant accelerations?
DEBUG_CONTROLLER_WITH_CONSTANT_ACCELERATIONS = False
constant_Ax = 0 # [m/s^2] in inertial frame
constant_Ay = 0 # [m/s^2] in inertial frame
constant_alpha = 0 # [rad/s^2] in inertial frame
def make_C_bI(angle):
C_bI = np.array([[ np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]]) # [2, 2]
return C_bI
class MessageParser:
def __init__(self, testing, client_socket, messages_to_deep_guidance, stop_run_flag):
print("Initializing Message Parser!")
self.client_socket = client_socket
self.messages_to_deep_guidance = messages_to_deep_guidance
self.stop_run_flag = stop_run_flag
self.testing = testing
# Target detection threshold for SPOTNet
self.SPOTNet_detection_threshold = 0.8
# Initializing all variables that will be passed to the Deep Guidance
# Items from SPOTNet
self.SPOTNet_relative_x = 0
self.SPOTNet_relative_y = 0
self.SPOTNet_relative_angle = 0
self.SPOTNet_sees_target = False
# Items from the Pi
self.Pi_time = 0
self.Pi_red_x = 0
self.Pi_red_y = 0
self.Pi_red_theta = 0
self.Pi_red_Vx = 0
self.Pi_red_Vy = 0
self.Pi_red_omega = 0
self.Pi_black_x = 0
self.Pi_black_y = 0
self.Pi_black_theta = 0
self.Pi_black_Vx = 0
self.Pi_black_Vy = 0
self.Pi_black_omega = 0
print("Done initializing parser!")
def run(self):
print("Running Message Parser!")
# Run until we want to stop
while not self.stop_run_flag.is_set():
if self.testing:
# Assign test values
# Items from SPOTNet
self.SPOTNet_relative_x = 2
self.SPOTNet_relative_y = 0
self.SPOTNet_relative_angle = 0
self.SPOTNet_sees_target = True
# Items from the Pi
self.Pi_time = 15
self.Pi_red_x = 3
self.Pi_red_y = 1
self.Pi_red_theta = 0
self.Pi_red_Vx = 0
self.Pi_red_Vy = 0
self.Pi_red_omega = 0
self.Pi_black_x = 1
self.Pi_black_y = 1
self.Pi_black_theta = 0
self.Pi_black_Vx = 0
self.Pi_black_Vy = 0
self.Pi_black_omega = 0
else:
# It's real
try:
data = self.client_socket.recv(4096) # Read the next value
except socket.timeout:
print("Socket timeout")
continue
data_packet = np.array(data.decode("utf-8").splitlines())
#print('Got message: ' + str(data.decode("utf-8")))
# Check if it's a SpotNet packet or a Pi packet
if data_packet[0] == "SPOTNet":
# We received a SPOTNet packet, update those variables accordingly
self.SPOTNet_relative_x = float(data_packet[1].astype(np.float32))
self.SPOTNet_relative_y = float(data_packet[2].astype(np.float32))
self.SPOTNet_relative_angle = float(data_packet[3].astype(np.float32))
self.SPOTNet_sees_target = float(data_packet[4]) > self.SPOTNet_detection_threshold
print("SPOTNet Packet. See target?: %s" %(self.SPOTNet_sees_target))
else:
# We received a packet from the Pi
# input_data_array is: [red_x, red_y, red_theta, red_vx, red_vy, red_omega, black_x, black_y, black_theta, black_vx, black_vy, black_omega]
try:
self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega = data_packet.astype(np.float32)
print("Pi Packet! Time: %.1f" %self.Pi_time)
except:
print("Bad packet from JetsonRepeater... skipping")
continue
# Write the data to the queue for DeepGuidanceModelRunner to use!
""" This queue is thread-safe. If I append multiple times without popping, the data in the queue is overwritten. Perfect! """
#(self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.SPOTNet_relative_x, self.SPOTNet_relative_y, self.SPOTNet_relative_angle, self.SPOTNet_sees_target)
self.messages_to_deep_guidance.append((self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.SPOTNet_relative_x, self.SPOTNet_relative_y, self.SPOTNet_relative_angle, self.SPOTNet_sees_target))
print("Message handler gently stopped")
class DeepGuidanceModelRunner:
def __init__(self, testing, client_socket, messages_to_deep_guidance, stop_run_flag):
print("Initializing deep guidance model runner")
self.client_socket = client_socket
self.messages_to_deep_guidance = messages_to_deep_guidance
self.stop_run_flag = stop_run_flag
self.testing = testing
###############################
### User-defined parameters ###
###############################
self.offset_x = 0 # Docking offset in the body frame
self.offset_y = 0 # Docking offset in the body frame
self.offset_angle = 0
# So we can access old Pi positions while we wait for new SPOTNet images to come in
self.pi_position_queue = deque(maxlen = round(CAMERA_PROCESSING_TIME/PHASESPACE_TIMESTEP))
# Loading the queue up with zeros
for i in range(round(CAMERA_PROCESSING_TIME/PHASESPACE_TIMESTEP)):
self.pi_position_queue.append((0.,0.,0.))
# Holding the previous position so we know when SPOTNet gives a new update
self.previousSPOTNet_relative_x = 0.0
""" Holding the chaser's x, y, and theta position when a SPOTNet image was taken
which we assume is at the same moment the previous results are received.
This is to ensure the ~0.7 s SPOTNet processing time isn't poorly reflected
in the target's estimated inertial position
"""
self.chaser_x_when_image_was_taken = 0
self.chaser_y_when_image_was_taken = 0
self.chaser_theta_when_image_was_taken = 0
self.SPOTNet_target_x_inertial = 0
self.SPOTNet_target_y_inertial = 0
self.SPOTNet_target_angle_inertial = 0
# Uncomment this on TF2.0
# tf.compat.v1.disable_eager_execution()
# Clear any old graph
tf.reset_default_graph()
# Initialize Tensorflow, and load in policy
self.sess = tf.Session()
# Building the policy network
self.state_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.OBSERVATION_SIZE], name = "state_placeholder")
self.actor = BuildActorNetwork(self.state_placeholder, scope='learner_actor_main')
# Loading in trained network weights
print("Attempting to load in previously-trained model\n")
saver = tf.train.Saver() # initialize the tensorflow Saver()
# Try to load in policy network parameters
try:
ckpt = tf.train.get_checkpoint_state('../')
saver.restore(self.sess, ckpt.model_checkpoint_path)
print("\nModel successfully loaded!\n")
except (ValueError, AttributeError):
print("No model found... quitting :(")
raise SystemExit
print("Done initializing model!")
def run(self):
print("Running Deep Guidance!")
counter = 1
# Parameters for normalizing the input
relevant_state_mean = np.delete(Settings.STATE_MEAN, Settings.IRRELEVANT_STATES)
relevant_half_range = np.delete(Settings.STATE_HALF_RANGE, Settings.IRRELEVANT_STATES)
# To log data
data_log = []
# Run zeros through the policy to ensure all libraries are properly loaded in
deep_guidance = self.sess.run(self.actor.action_scaled, feed_dict={self.state_placeholder:np.zeros([1, Settings.OBSERVATION_SIZE])})[0]
# Run until we want to stop
while not stop_run_flag.is_set():
# Total state is [relative_x, relative_y, relative_vx, relative_vy, relative_angle, relative_angular_velocity, chaser_x, chaser_y, chaser_theta, target_x, target_y, target_theta, chaser_vx, chaser_vy, chaser_omega, target_vx, target_vy, target_omega] *# Relative pose expressed in the chaser's body frame; everything else in Inertial frame #*
# Network input: [relative_x, relative_y, relative_angle, chaser_theta, chaser_vx, chaser_vy, chaser_omega, target_omega] ** Normalize it first **
# Get data from Message Parser
try:
Pi_time, Pi_red_x, Pi_red_y, Pi_red_theta, \
Pi_red_Vx, Pi_red_Vy, Pi_red_omega, \
Pi_black_x, Pi_black_y, Pi_black_theta, \
Pi_black_Vx, Pi_black_Vy, Pi_black_omega, \
SPOTNet_relative_x, SPOTNet_relative_y, SPOTNet_relative_angle, SPOTNet_sees_target = self.messages_to_deep_guidance.pop()
except IndexError:
# Queue was empty, try agian
continue
#################################
### Building the Policy Input ###
#################################
if SPOTNet_sees_target:
""" If SPOTNet sees the target, we want to hold the target's position inertially constant until the next update.
Otherwise, we will perceive the target moving inbetween SPOTNet updates as Red moves
"""
if np.abs(self.previousSPOTNet_relative_x - SPOTNet_relative_x) > 0.001:
# We got a new SPOTNet packet, update the estimated target inertial position
# Estimate the target's inertial position so we can hold it constant as the chaser moves (until we get a new better estimate!)
relative_pose_body = np.array([SPOTNet_relative_x, SPOTNet_relative_y])
relative_pose_inertial = np.matmul(make_C_bI(self.chaser_theta_when_image_was_taken).T, relative_pose_body)
self.SPOTNet_target_x_inertial = self.chaser_x_when_image_was_taken + relative_pose_inertial[0] # Inertial estimate of the target
self.SPOTNet_target_y_inertial = self.chaser_y_when_image_was_taken + relative_pose_inertial[1] # Inertial estimate of the target
self.SPOTNet_target_angle_inertial = self.chaser_theta_when_image_was_taken + SPOTNet_relative_angle # Inertial estimate of the target
# Assuming a new image was just taken, hold the chaser position at this moment for use when we get the spotnet results
self.chaser_x_when_image_was_taken = Pi_red_x
self.chaser_y_when_image_was_taken = Pi_red_y
self.chaser_theta_when_image_was_taken = Pi_red_theta
# Logging so we know when we receive a new spotnet packet
self.previousSPOTNet_relative_x = SPOTNet_relative_x
""" Now, calculate the relative position of the target using the inertial estimate of where the target is, along with the current
Phasespace measurement of Red's current position.
# The target's inertial position, as estimated by spotnet is
[self.SPOTNet_target_x_inertial, self.SPOTNet_target_y_inertial, self.SPOTNet_target_angle_inertial]
"""
relative_pose_inertial = np.array([self.SPOTNet_target_x_inertial - Pi_red_x, self.SPOTNet_target_y_inertial - Pi_red_y])
relative_pose_body = np.matmul(make_C_bI(Pi_red_theta), relative_pose_inertial)
if CHASER_ABSOLUTE_POSITION:
# With chaser absolute position
policy_input = np.array([relative_pose_body[0] - self.offset_x, relative_pose_body[1] - self.offset_y, (self.SPOTNet_target_angle_inertial - Pi_red_theta - self.offset_angle)%(2*np.pi), Pi_red_x, Pi_red_y, Pi_red_theta, Pi_red_Vx, Pi_red_Vy, Pi_red_omega, Pi_black_omega])
else:
# Without chaser absolute position
policy_input = np.array([relative_pose_body[0] - self.offset_x, relative_pose_body[1] - self.offset_y, (self.SPOTNet_target_angle_inertial - Pi_red_theta - self.offset_angle)%(2*np.pi), Pi_red_theta, Pi_red_Vx, Pi_red_Vy, Pi_red_omega, Pi_black_omega])
else:
# We don't see the target -> Use PhaseSpace only
# Calculating the relative X and Y in the chaser's body frame using PhaseSpace
relative_pose_inertial = np.array([Pi_black_x - Pi_red_x, Pi_black_y - Pi_red_y])
relative_pose_body = np.matmul(make_C_bI(Pi_red_theta), relative_pose_inertial)
if CHASER_ABSOLUTE_POSITION:
# With chaser absolute position
policy_input = np.array([relative_pose_body[0] - self.offset_x, relative_pose_body[1] - self.offset_y, (Pi_black_theta - Pi_red_theta - self.offset_angle)%(2*np.pi), Pi_red_x, Pi_red_y, Pi_red_theta, Pi_red_Vx, Pi_red_Vy, Pi_red_omega, Pi_black_omega])
else:
# Without chaser absolute position
policy_input = np.array([relative_pose_body[0] - self.offset_x, relative_pose_body[1] - self.offset_y, (Pi_black_theta - Pi_red_theta - self.offset_angle)%(2*np.pi), Pi_red_theta, Pi_red_Vx, Pi_red_Vy, Pi_red_omega, Pi_black_omega])
""" Since SPOTNet doesn't see the target, we need to estimate the chaser_x_when_image_was_taken values
I'll estimate that the camera delay in samples is: round(CAMERA_PROCESSING_TIME/PHASESPACE_TIMESTEP)
"""
self.chaser_x_when_image_was_taken, self.chaser_y_when_image_was_taken, self.chaser_theta_when_image_was_taken = self.pi_position_queue.pop()
self.pi_position_queue.append((Pi_red_x, Pi_red_y, Pi_red_theta))
# Normalizing
if Settings.NORMALIZE_STATE:
normalized_policy_input = (policy_input - relevant_state_mean)/relevant_half_range
else:
normalized_policy_input = policy_input
# Reshaping the input
normalized_policy_input = normalized_policy_input.reshape([-1, Settings.OBSERVATION_SIZE])
# Run processed state through the policy
deep_guidance = self.sess.run(self.actor.action_scaled, feed_dict={self.state_placeholder:normalized_policy_input})[0] # [accel_x, accel_y, alpha]
# Rotating the command into the inertial frame
deep_guidance[:-1] = np.matmul(make_C_bI(Pi_red_theta).T,deep_guidance[:-1])
# Commanding constant values in the inertial frame for testing purposes
if DEBUG_CONTROLLER_WITH_CONSTANT_ACCELERATIONS:
deep_guidance[0] = constant_Ax # [m/s^2]
deep_guidance[1] = constant_Ay # [m/s^2]
deep_guidance[2] = constant_alpha # [rad/s^2]
#################################################################
### Cap output if we are exceeding the max allowable velocity ###
#################################################################
# Checking whether our velocity is too large AND the acceleration is trying to increase said velocity... in which case we set the desired_linear_acceleration to zero.
# this is in the inertial frame
current_velocity = np.array([Pi_red_Vx, Pi_red_Vy, Pi_red_omega])
deep_guidance[(np.abs(current_velocity) > Settings.VELOCITY_LIMIT) & (np.sign(deep_guidance) == np.sign(current_velocity))] = 0
# Return commanded action to the Raspberry Pi 3
if self.testing:
print(deep_guidance)
pass
else:
deep_guidance_acceleration_signal_to_pi = str(deep_guidance[0]) + "\n" + str(deep_guidance[1]) + "\n" + str(deep_guidance[2]) + "\n"
self.client_socket.send(deep_guidance_acceleration_signal_to_pi.encode())
if counter % 2000 == 0:
print("Output to Pi: ", deep_guidance, " In table inertial frame")
print(normalized_policy_input)
# Incrementing the counter
counter = counter + 1
# Log this timestep's data only if the experiment has actually started
if Pi_time > 0:
data_log.append([Pi_time, deep_guidance[0], deep_guidance[1], deep_guidance[2], \
Pi_red_x, Pi_red_y, Pi_red_theta, \
Pi_red_Vx, Pi_red_Vy, Pi_red_omega, \
Pi_black_x, Pi_black_y, Pi_black_theta, \
Pi_black_Vx, Pi_black_Vy, Pi_black_omega, \
SPOTNet_relative_x, SPOTNet_relative_y, SPOTNet_relative_angle, SPOTNet_sees_target,
self.SPOTNet_target_x_inertial, self.SPOTNet_target_y_inertial, self.SPOTNet_target_angle_inertial])
print("Model gently stopped.")
if len(data_log) > 0:
print("Saving data to file...",end='')
with open('deep_guidance_data_' + time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime()) + '.txt', 'wb') as f:
np.save(f, np.asarray(data_log))
else:
print("Not saving a log because there is no data to write")
print("Done!")
# Close tensorflow session
self.sess.close()
##################################################
#### Start communication with JetsonRepeater #####
##################################################
if testing:
client_socket = 0
else:
# Looping forever until we are connected
while True:
try: # Try to connect
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_socket.connect("/tmp/jetsonRepeater") # Connecting...
client_socket.settimeout(2) # Setting the socket timeout to 2 seconds
print("Connected to JetsonRepeater!")
break
except: # If connection attempt failed
print("Connection to JetsonRepeater FAILED. Trying to re-connect in 1 second")
time.sleep(1)
# WE ARE CONNECTED
# Generate Queues
messages_to_deep_guidance = deque(maxlen = 1)
#####################
### START THREADS ###
#####################
all_threads = []
stop_run_flag = threading.Event() # Flag to stop all threads
# Initialize Message Parser
message_parser = MessageParser(testing, client_socket, messages_to_deep_guidance, stop_run_flag)
# Initialize Deep Guidance Model
deep_guidance_model = DeepGuidanceModelRunner(testing, client_socket, messages_to_deep_guidance, stop_run_flag)
all_threads.append(threading.Thread(target = message_parser.run))
all_threads.append(threading.Thread(target = deep_guidance_model.run))
#############################################
##### STARTING EXECUTION OF ALL THREADS #####
#############################################
# #
# #
for each_thread in all_threads: #
# #
each_thread.start() #
# #
# #
#############################################
############## THREADS STARTED ##############
#############################################
counter_2 = 1
try:
while True:
time.sleep(0.5)
if counter_2 % 200 == 0:
print("100 seconds in, trying to stop gracefully")
stop_run_flag.set()
for each_thread in all_threads:
each_thread.join()
break
except KeyboardInterrupt:
print("Interrupted by user. Ending gently")
stop_run_flag.set()
for each_thread in all_threads:
each_thread.join()
print('Done :)')
| [
"time.sleep",
"numpy.array",
"numpy.sin",
"collections.deque",
"numpy.delete",
"tensorflow.Session",
"tensorflow.placeholder",
"build_neural_networks.BuildActorNetwork",
"numpy.asarray",
"time.localtime",
"numpy.abs",
"tensorflow.train.get_checkpoint_state",
"numpy.cos",
"numpy.sign",
"t... | [((21206, 21221), 'collections.deque', 'deque', ([], {'maxlen': '(1)'}), '(maxlen=1)\n', (21211, 21221), False, 'from collections import deque\n'), ((21324, 21341), 'threading.Event', 'threading.Event', ([], {}), '()\n', (21339, 21341), False, 'import threading\n'), ((21667, 21710), 'threading.Thread', 'threading.Thread', ([], {'target': 'message_parser.run'}), '(target=message_parser.run)\n', (21683, 21710), False, 'import threading\n'), ((21733, 21781), 'threading.Thread', 'threading.Thread', ([], {'target': 'deep_guidance_model.run'}), '(target=deep_guidance_model.run)\n', (21749, 21781), False, 'import threading\n'), ((8529, 8553), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (8551, 8553), True, 'import tensorflow as tf\n'), ((8635, 8647), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8645, 8647), True, 'import tensorflow as tf\n'), ((8719, 8822), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, Settings.OBSERVATION_SIZE]', 'name': '"""state_placeholder"""'}), "(dtype=tf.float32, shape=[None, Settings.OBSERVATION_SIZE],\n name='state_placeholder')\n", (8733, 8822), True, 'import tensorflow as tf\n'), ((8846, 8915), 'build_neural_networks.BuildActorNetwork', 'BuildActorNetwork', (['self.state_placeholder'], {'scope': '"""learner_actor_main"""'}), "(self.state_placeholder, scope='learner_actor_main')\n", (8863, 8915), False, 'from build_neural_networks import BuildActorNetwork\n'), ((9048, 9064), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (9062, 9064), True, 'import tensorflow as tf\n'), ((9699, 9757), 'numpy.delete', 'np.delete', (['Settings.STATE_MEAN', 'Settings.IRRELEVANT_STATES'], {}), '(Settings.STATE_MEAN, Settings.IRRELEVANT_STATES)\n', (9708, 9757), True, 'import numpy as np\n'), ((9788, 9852), 'numpy.delete', 'np.delete', (['Settings.STATE_HALF_RANGE', 'Settings.IRRELEVANT_STATES'], {}), '(Settings.STATE_HALF_RANGE, Settings.IRRELEVANT_STATES)\n', (9797, 9852), True, 'import numpy as np\n'), ((22437, 22452), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (22447, 22452), False, 'import time\n'), ((9189, 9225), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['"""../"""'], {}), "('../')\n", (9218, 9225), True, 'import tensorflow as tf\n'), ((18213, 18259), 'numpy.array', 'np.array', (['[Pi_red_Vx, Pi_red_Vy, Pi_red_omega]'], {}), '([Pi_red_Vx, Pi_red_Vy, Pi_red_omega])\n', (18221, 18259), True, 'import numpy as np\n'), ((20698, 20747), 'socket.socket', 'socket.socket', (['socket.AF_UNIX', 'socket.SOCK_STREAM'], {}), '(socket.AF_UNIX, socket.SOCK_STREAM)\n', (20711, 20747), False, 'import socket\n'), ((1373, 1386), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1379, 1386), True, 'import numpy as np\n'), ((1388, 1401), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1394, 1401), True, 'import numpy as np\n'), ((1442, 1455), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1448, 1455), True, 'import numpy as np\n'), ((13898, 13999), 'numpy.array', 'np.array', (['[self.SPOTNet_target_x_inertial - Pi_red_x, self.SPOTNet_target_y_inertial -\n Pi_red_y]'], {}), '([self.SPOTNet_target_x_inertial - Pi_red_x, self.\n SPOTNet_target_y_inertial - Pi_red_y])\n', (13906, 13999), True, 'import numpy as np\n'), ((15133, 15189), 'numpy.array', 'np.array', (['[Pi_black_x - Pi_red_x, Pi_black_y - Pi_red_y]'], {}), '([Pi_black_x - Pi_red_x, Pi_black_y - Pi_red_y])\n', (15141, 15189), True, 'import numpy as np\n'), ((21121, 21134), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (21131, 21134), False, 'import time\n'), ((1427, 1440), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1433, 1440), True, 'import numpy as np\n'), ((11825, 11885), 'numpy.abs', 'np.abs', (['(self.previousSPOTNet_relative_x - SPOTNet_relative_x)'], {}), '(self.previousSPOTNet_relative_x - SPOTNet_relative_x)\n', (11831, 11885), True, 'import numpy as np\n'), ((12208, 12258), 'numpy.array', 'np.array', (['[SPOTNet_relative_x, SPOTNet_relative_y]'], {}), '([SPOTNet_relative_x, SPOTNet_relative_y])\n', (12216, 12258), True, 'import numpy as np\n'), ((14264, 14537), 'numpy.array', 'np.array', (['[relative_pose_body[0] - self.offset_x, relative_pose_body[1] - self.\n offset_y, (self.SPOTNet_target_angle_inertial - Pi_red_theta - self.\n offset_angle) % (2 * np.pi), Pi_red_x, Pi_red_y, Pi_red_theta,\n Pi_red_Vx, Pi_red_Vy, Pi_red_omega, Pi_black_omega]'], {}), '([relative_pose_body[0] - self.offset_x, relative_pose_body[1] -\n self.offset_y, (self.SPOTNet_target_angle_inertial - Pi_red_theta -\n self.offset_angle) % (2 * np.pi), Pi_red_x, Pi_red_y, Pi_red_theta,\n Pi_red_Vx, Pi_red_Vy, Pi_red_omega, Pi_black_omega])\n', (14272, 14537), True, 'import numpy as np\n'), ((14634, 14887), 'numpy.array', 'np.array', (['[relative_pose_body[0] - self.offset_x, relative_pose_body[1] - self.\n offset_y, (self.SPOTNet_target_angle_inertial - Pi_red_theta - self.\n offset_angle) % (2 * np.pi), Pi_red_theta, Pi_red_Vx, Pi_red_Vy,\n Pi_red_omega, Pi_black_omega]'], {}), '([relative_pose_body[0] - self.offset_x, relative_pose_body[1] -\n self.offset_y, (self.SPOTNet_target_angle_inertial - Pi_red_theta -\n self.offset_angle) % (2 * np.pi), Pi_red_theta, Pi_red_Vx, Pi_red_Vy,\n Pi_red_omega, Pi_black_omega])\n', (14642, 14887), True, 'import numpy as np\n'), ((15451, 15704), 'numpy.array', 'np.array', (['[relative_pose_body[0] - self.offset_x, relative_pose_body[1] - self.\n offset_y, (Pi_black_theta - Pi_red_theta - self.offset_angle) % (2 * np\n .pi), Pi_red_x, Pi_red_y, Pi_red_theta, Pi_red_Vx, Pi_red_Vy,\n Pi_red_omega, Pi_black_omega]'], {}), '([relative_pose_body[0] - self.offset_x, relative_pose_body[1] -\n self.offset_y, (Pi_black_theta - Pi_red_theta - self.offset_angle) % (2 *\n np.pi), Pi_red_x, Pi_red_y, Pi_red_theta, Pi_red_Vx, Pi_red_Vy,\n Pi_red_omega, Pi_black_omega])\n', (15459, 15704), True, 'import numpy as np\n'), ((15821, 16050), 'numpy.array', 'np.array', (['[relative_pose_body[0] - self.offset_x, relative_pose_body[1] - self.\n offset_y, (Pi_black_theta - Pi_red_theta - self.offset_angle) % (2 * np\n .pi), Pi_red_theta, Pi_red_Vx, Pi_red_Vy, Pi_red_omega, Pi_black_omega]'], {}), '([relative_pose_body[0] - self.offset_x, relative_pose_body[1] -\n self.offset_y, (Pi_black_theta - Pi_red_theta - self.offset_angle) % (2 *\n np.pi), Pi_red_theta, Pi_red_Vx, Pi_red_Vy, Pi_red_omega, Pi_black_omega])\n', (15829, 16050), True, 'import numpy as np\n'), ((20175, 20195), 'numpy.asarray', 'np.asarray', (['data_log'], {}), '(data_log)\n', (20185, 20195), True, 'import numpy as np\n'), ((10099, 10139), 'numpy.zeros', 'np.zeros', (['[1, Settings.OBSERVATION_SIZE]'], {}), '([1, Settings.OBSERVATION_SIZE])\n', (10107, 10139), True, 'import numpy as np\n'), ((18287, 18311), 'numpy.abs', 'np.abs', (['current_velocity'], {}), '(current_velocity)\n', (18293, 18311), True, 'import numpy as np\n'), ((18342, 18364), 'numpy.sign', 'np.sign', (['deep_guidance'], {}), '(deep_guidance)\n', (18349, 18364), True, 'import numpy as np\n'), ((18368, 18393), 'numpy.sign', 'np.sign', (['current_velocity'], {}), '(current_velocity)\n', (18375, 18393), True, 'import numpy as np\n'), ((20104, 20120), 'time.localtime', 'time.localtime', ([], {}), '()\n', (20118, 20120), False, 'import time\n')] |
import numpy as np
import copy
import time
from queue import PriorityQueue
from lib.game_engine import game_engine
from search.planner_utils import tup_equal, tup_dist, parse_moves, Astar
def build_moving_plan(map, startings, endings, deadlines=None):
def validate(solution):
conflicts = set([])
max_length = cost(solution)
for i_step in range(max_length):
moves = []
for i_sol in range(len(solution)):
if len(solution[i_sol])<=i_step:
moves.append(solution[i_sol][-1])
else:
moves.append(solution[i_sol][i_step])
if i_step>0:
moves_prev = []
for i_sol in range(len(solution)):
if len(solution[i_sol])<=(i_step-1):
moves_prev.append(solution[i_sol][-1])
else:
moves_prev.append(solution[i_sol][i_step-1])
flag = False
for i_sol in range(len(solution)):
if i_sol<(len(solution)-1):
the_rest = moves[:i_sol]+moves[(i_sol+1):]
else:
the_rest = moves[:i_sol]
if moves[i_sol] in the_rest:
flag = True
conflicts.add((i_sol, moves[i_sol], i_step))
# 判断路线中是否有二元环
# if i_step>0:
# for i_sol in range(len(solution)):
# if i_sol<(len(solution)-1):
# the_rest = moves_prev[:i_sol]+[(-1,-1)]+moves_prev[(i_sol+1):]
# else:
# the_rest = moves_prev[:i_sol]+[(-1,-1)]
# if moves[i_sol] in the_rest:
# candidate = the_rest.index(moves[i_sol])
# if tup_equal(moves[candidate], moves_prev[i_sol]):
# flag = True
# conflicts.add((i_sol, moves[i_sol], i_step))
# 判断路线中是否有环
if i_step>0:
for i_sol in range(len(solution)):
if i_sol<(len(solution)-1):
the_rest = moves_prev[:i_sol]+[(-1,-1)]+moves_prev[(i_sol+1):]
else:
the_rest = moves_prev[:i_sol]+[(-1,-1)]
if moves[i_sol] in the_rest:
ring = [i_sol]
new_i_sol = the_rest.index(moves[i_sol])
ring_flag = True
while new_i_sol not in ring:
ring.append(new_i_sol)
i_sol = new_i_sol
if i_sol<(len(solution)-1):
the_rest = moves_prev[:i_sol]+[(-1,-1)]+moves_prev[(i_sol+1):]
else:
the_rest = moves_prev[:i_sol]+[(-1,-1)]
if moves[i_sol] in the_rest:
new_i_sol = the_rest.index(moves[i_sol])
else:
ring_flag = False
break
if ring_flag:
flag = True
conflicts.add((i_sol, moves[i_sol], i_step))
if flag: break
return list(conflicts)
def cost(solution):
return np.max([len(x) for x in solution])
def build_reservation(constraint):
reservation = np.zeros([map.shape[0], map.shape[1], np.max([x[2] for x in constraint])+1], dtype=int)
for c in constraint:
reservation[c[1][0], c[1][1], c[2]] = 1
return reservation
def low_level(start, end, reservation, deadline=None):
route = Astar(map, start, end, reservation=reservation)[0]
if len(route)==0:
return None, []
if deadline is not None and len(route)>deadline:
return None, []
if len(route)>=reservation.shape[2]:
return route, []
else:
for i in range(len(route), reservation.shape[2]):
node = route[-1]
new_nodes = [(int(node[0]), int(node[1])),
(int(node[0]-1), int(node[1])),
(int(node[0]+1), int(node[1])),
(int(node[0]), int(node[1]-1)),
(int(node[0]), int(node[1]+1))]
flag = False
for new_node in new_nodes:
if new_node[0]<0 or new_node[1]<0 or new_node[0]>=reservation.shape[0] or new_node[1]>=reservation.shape[1]: continue
if reservation[node[0], node[1], i]==0:
route.append(new_node)
flag = True
break
if not flag:
# return None, [node[0], node[1], i-1]
return None, []
return route, []
assert len(startings)==len(endings)
q = PriorityQueue() # (cost, solution)
constraints = [[] for _ in range(len(startings))]
solution = [Astar(map, startings[i], endings[i])[0] for i in range(len(startings))]
q.put((cost(solution), solution, constraints))
while q.qsize()>0:
_, solution, constraints = q.get()
conflicts = validate(solution)
if len(conflicts)==0:
return solution
for c in conflicts:
new_constraint_i = copy.deepcopy(constraints[c[0]]+[c])
res = build_reservation(new_constraint_i)
new_constraints = copy.deepcopy(constraints)
new_constraints[c[0]] = new_constraint_i
new_solution = copy.deepcopy(solution)
if deadlines is None:
new_route, new_conflict = low_level(startings[c[0]], endings[c[0]], res)
else:
new_route, new_conflict = low_level(startings[c[0]], endings[c[0]], res, deadlines[c[0]])
while len(new_conflict)>0:
res[new_conflict[0], new_conflict[1], new_conflict[2]] = 1
if deadlines is None:
new_route, new_conflict = low_level(startings[c[0]], endings[c[0]], res)
else:
new_route, new_conflict = low_level(startings[c[0]], endings[c[0]], res, deadlines[c[0]])
if new_route is None:
continue
new_solution[c[0]] = new_route
q.put((cost(new_solution), new_solution, new_constraints))
return None
class CBS:
def __init__(self, ge):
map = ge._map[:,:,0]>=0
startings = sorted([(int(x[0]), int(x[1])) for x in np.argwhere(ge._map[:,:,1]>0)], key=lambda x: ge._map[x[0],x[1],1])
endings = sorted([(int(x[0]), int(x[1])) for x in np.argwhere(ge._map[:,:,0]>0)], key=lambda x: ge._map[x[0],x[1],0])
if ge.step_left is not None:
self.deadlines = ge.step_left-1
else:
self.deadlines = None
tmp_sol = build_moving_plan(map, startings, endings, self.deadlines)
if tmp_sol is None:
self.solution = [[(0,0,0)] for _ in range(len(ge.players))]
else:
moves_list = [parse_moves(x) for x in tmp_sol]
max_len = np.max([len(x) for x in tmp_sol])
self.solution = []
for moves in moves_list:
moves_t = [(x[0], x[1], 0) for x in moves]
while len(moves_t)<(max_len-1):
moves_t.append((0,0,0))
moves_t.reverse()
moves_t.append((0,0,1))
self.solution.append(moves_t)
def pop_moves(self, ge=None):
# 此处输入 game engine 只是为了和其他 policy 保持一致 本质上没有任何作用
if len(self.solution[0])>0:
moves = [m.pop() for m in self.solution]
return True, moves
else:
return False, []
| [
"search.planner_utils.parse_moves",
"numpy.max",
"search.planner_utils.Astar",
"numpy.argwhere",
"copy.deepcopy",
"queue.PriorityQueue"
] | [((5066, 5081), 'queue.PriorityQueue', 'PriorityQueue', ([], {}), '()\n', (5079, 5081), False, 'from queue import PriorityQueue\n'), ((3805, 3852), 'search.planner_utils.Astar', 'Astar', (['map', 'start', 'end'], {'reservation': 'reservation'}), '(map, start, end, reservation=reservation)\n', (3810, 3852), False, 'from search.planner_utils import tup_equal, tup_dist, parse_moves, Astar\n'), ((5171, 5207), 'search.planner_utils.Astar', 'Astar', (['map', 'startings[i]', 'endings[i]'], {}), '(map, startings[i], endings[i])\n', (5176, 5207), False, 'from search.planner_utils import tup_equal, tup_dist, parse_moves, Astar\n'), ((5517, 5555), 'copy.deepcopy', 'copy.deepcopy', (['(constraints[c[0]] + [c])'], {}), '(constraints[c[0]] + [c])\n', (5530, 5555), False, 'import copy\n'), ((5638, 5664), 'copy.deepcopy', 'copy.deepcopy', (['constraints'], {}), '(constraints)\n', (5651, 5664), False, 'import copy\n'), ((5746, 5769), 'copy.deepcopy', 'copy.deepcopy', (['solution'], {}), '(solution)\n', (5759, 5769), False, 'import copy\n'), ((7258, 7272), 'search.planner_utils.parse_moves', 'parse_moves', (['x'], {}), '(x)\n', (7269, 7272), False, 'from search.planner_utils import tup_equal, tup_dist, parse_moves, Astar\n'), ((3571, 3605), 'numpy.max', 'np.max', (['[x[2] for x in constraint]'], {}), '([x[2] for x in constraint])\n', (3577, 3605), True, 'import numpy as np\n'), ((6717, 6750), 'numpy.argwhere', 'np.argwhere', (['(ge._map[:, :, 1] > 0)'], {}), '(ge._map[:, :, 1] > 0)\n', (6728, 6750), True, 'import numpy as np\n'), ((6843, 6876), 'numpy.argwhere', 'np.argwhere', (['(ge._map[:, :, 0] > 0)'], {}), '(ge._map[:, :, 0] > 0)\n', (6854, 6876), True, 'import numpy as np\n')] |
import warnings
import os
from numpy import log10, floor, abs, round, poly1d, load
from .detect_peaks import detect_peaks
from .metadata import get_metadata_from_filename
from .static import (
wavenumbers_to_nm, nm_to_wavenumbers, nm_to_ir_wavenumbers,
ir_wavenumbers_to_nm, savefig, get_interval_index, find_nearest_index
)
from sfg2d.utils.config import CONFIG
from .filter import double_resample, replace_pixel
def round_sig(x, sig=2):
"""Round to sig number of significance."""
return round(x, sig-int(floor(log10(abs(x))))-1)
def round_by_error(value, error, min_sig=2):
"""Use value error pair, to round.
value: Value of the variable
error: uncertaincy of the variable
"""
sig_digits = int(floor(log10(abs(value)))) - int(floor(log10(abs(error)))) + 1
# Kepp at least minimal number of significant digits.
if sig_digits <= min_sig: sig_digits = min_sig
return round_sig(value, sig_digits), round_sig(error, 1)
def pixel_to_nm(
x,
central_wl,
):
""" transform pixel to nanometer
Parameters
----------
central_wl : int
central wavelength of the camera in nm
params_file_path: Optinal file path to calibration parameter file
If None given, a default is loaded.
"""
pixel_to_nm = poly1d(CONFIG['CALIB_PARAMS']) + central_wl - CONFIG['CALIB_CW']
return pixel_to_nm(x)
def nm_to_pixel(x, central_wl):
""" transform nm to pixel coordinates for central wavelength
Parameters
----------
x : array like
nm to transform in pixel
central_wl : int
central wavelength of the camera
Returns
-------
num or array of x in pixel coordinates
"""
params_file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../data/calib/params_Ne_670.npy"
)
params = load(params_file_path)
calib_cw = int(params_file_path[-7:-4])
if len(params) > 2:
params = params[-2:]
if len(params) < 2:
warnings.Warn("Can't use constant calibration")
return x - params[1] - central_wl + calib_cw/params[0]
def get_dataframe(record, seconds, kwargs_track=None):
"""Creates a dataframe from track and time data.
seconds: number of paused seconds between frames. This is not saved by spe file
the difference of frame time is read from .spe metadata information.
"""
import pandas as pd
from datetime import timedelta
if not kwargs_track:
kwargs_track = {}
ydata = record.track(**kwargs_track).squeeze()
start_time = record.metadata['date']
measurment_time = [start_time + timedelta(seconds=seconds)*i for i in range(len(ydata))]
df = pd.DataFrame(ydata, index=measurment_time)
return df
| [
"numpy.abs",
"os.path.realpath",
"warnings.Warn",
"pandas.DataFrame",
"datetime.timedelta",
"numpy.load",
"numpy.poly1d"
] | [((1864, 1886), 'numpy.load', 'load', (['params_file_path'], {}), '(params_file_path)\n', (1868, 1886), False, 'from numpy import log10, floor, abs, round, poly1d, load\n'), ((2703, 2745), 'pandas.DataFrame', 'pd.DataFrame', (['ydata'], {'index': 'measurment_time'}), '(ydata, index=measurment_time)\n', (2715, 2745), True, 'import pandas as pd\n'), ((2016, 2063), 'warnings.Warn', 'warnings.Warn', (['"""Can\'t use constant calibration"""'], {}), '("Can\'t use constant calibration")\n', (2029, 2063), False, 'import warnings\n'), ((1302, 1332), 'numpy.poly1d', 'poly1d', (["CONFIG['CALIB_PARAMS']"], {}), "(CONFIG['CALIB_PARAMS'])\n", (1308, 1332), False, 'from numpy import log10, floor, abs, round, poly1d, load\n'), ((1774, 1800), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1790, 1800), False, 'import os\n'), ((2637, 2663), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (2646, 2663), False, 'from datetime import timedelta\n'), ((752, 762), 'numpy.abs', 'abs', (['value'], {}), '(value)\n', (755, 762), False, 'from numpy import log10, floor, abs, round, poly1d, load\n'), ((784, 794), 'numpy.abs', 'abs', (['error'], {}), '(error)\n', (787, 794), False, 'from numpy import log10, floor, abs, round, poly1d, load\n'), ((537, 543), 'numpy.abs', 'abs', (['x'], {}), '(x)\n', (540, 543), False, 'from numpy import log10, floor, abs, round, poly1d, load\n')] |
import numpy as np
from numpy import *
from scipy.sparse.linalg import svds, eigs
from sklearn.preprocessing import normalize
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score, adjusted_mutual_info_score
from sklearn.cluster import KMeans
from scipy.sparse import spdiags
nmi = normalized_mutual_info_score
ami = adjusted_mutual_info_score
ari = adjusted_rand_score
def acc(y_true, y_pred):
"""
Calculate clustering accuracy.
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
# from sklearn.utils.linear_assignment_ import linear_assignment
from scipy.optimize import linear_sum_assignment as linear_assignment
ind_row, ind_col = linear_assignment(w.max() - w)
return sum([w[i, j] for i, j in zip(ind_row, ind_col)]) * 1.0 / y_pred.size
def err_rate(gt_s, s):
return 1.0 - acc(gt_s, s)
# 这个函数没用了
'''
def post_proC(C, K, d, alpha):
# 求sigma(对角矩阵,对角元素为行和,数据类型为m*m)
# C.shape = (n,m)
n = C.shape[0]
kmeansNum = C.shape[1]
sigma = np.sum(C, axis=0)
sigma = sigma[0:kmeansNum]
# print('sigma', sigma)
# 计算sigma的-1/2次方
sigma = np.abs(sigma)**(-0.5)
sigma = np.diag(sigma)
C = np.matmul(C, sigma)
# print('Chat', C, '\n', 'Chat.shape', C.shape)
r = min(d * K + 1, C.shape[1] - 1)
U, Sigma, _ = svds(C, r, v0=np.ones(kmeansNum))
U = normalize(U, norm='l2', axis=1)
y = KMeans(n_clusters=K, random_state=0).fit(U)
y = y.labels_
return C
'''
####################################
# 这部分为直接调用matlab中svd部分代码
def spectral_clustering(C, K, d, alpha, ro):
import matlab.engine
eng = matlab.engine.start_matlab()
n, m = C.shape
C = C.tolist()
U = eng.mySVD(C, K, n, m)
# U, sig, V = result
# y = eng.litekmeans(U, K)
# labels = litekmeans(U, k, 'MaxIter', 100, 'Replicates', 10)
eng.quit()
y = KMeans(n_clusters=K, random_state=0).fit(U)
y = y.labels_
# y, _ = mysvd(C, K)
return y
#################################
# 这部分为翻译的matlab svd部分代码
'''
def sort_eigvector_by_eigvalue(a, b):
# :param a: eigvalue
# :param b: eigvector
# :return:
a = -np.abs(a)
asort = np.abs(np.sort(a, axis=0))
index = a.argsort()
bsort = b[:, index]
return asort, bsort
def matlab_max(A, B):
shape = A.shape
a = A.ravel()
b = B.ravel()
c = []
for i in range(a.size):
if a[i]>b[i]:
ci = a[i]
else:ci = b[i]
c = np.r_[c, ci]
c = c.reshape(shape)
return c
def mysvd(C, ReducedDim=None):
# You can change this number according your machine computational power
max_matrix_size = 1600
eigvector_ratio = 0.1
if ReducedDim is None:
ReducedDim = 0
nSmp, mFea = C.shape
if mFea/nSmp > 1.0713:
ddata = np.matmul(C, C.T)
ddata = matlab_max(ddata, ddata.T)
dimMatric = ddata.shape[0]
if ReducedDim > 0 and dimMatric > max_matrix_size and ReducedDim < dimMatric*eigvector_ratio:
# option = {"disp": [0]}
# eigvalue, U = np.linalg.eig(ddata)
U, eigvalue = eigs(ddata, ReducedDim, which='LM')
eigvalue = np.diag(eigvalue)
else:
# matlab code
# if issparse(ddata)
# ddate = full(ddata)
#
eigvalue, U = np.linalg.eig(ddata)
# eigvalue = np.diag(eigvalue)
# eigvalue = np.abs(np.sort(-np.abs(eigvalue), axis=0))
eigvalue, U = sort_eigvector_by_eigvalue(eigvalue, U)
maxeigvalue = np.amax(eigvalue, axis=0)
eigIdx = np.argwhere(abs(eigvalue)/maxeigvalue<1e-10)
eigvalue[:, eigIdx] = []
U[:, eigIdx] = []
if ReducedDim > 0 and ReducedDim < len(eigvalue):
eigvalue = eigvalue[0:ReducedDim]
U = U[:, 0:ReducedDim]
eigvalue_half = eigvalue**(0.5)
S = spdiags(eigvalue_half, 0, len(eigvalue_half), len(eigvalue_half))
nargout = 3 # Number of function outputs
if nargout >= 3:
eigvalue_minushalf = eigvalue_half**(-1)
V = np.matmul(C.T, np.multiply(U, np.tile(eigvalue_minushalf.T, U.shape[0], 1)))
else:
ddata = np.matmul(C.T, C)
ddata = matlab_max(ddata, ddata.T)
dimMatric = ddata.shape[0]
if ReducedDim > 0 and dimMatric > max_matrix_size and ReducedDim < dimMatric*eigvector_ratio:
V, eigvalue = eigs(ddata, ReducedDim, which='LM')
eigvalue = np.diag(eigvalue)
else:
# matlab code
# if issparse(ddata)
# ddate = full(ddata)
eigvalue, V = np.linalg.eig(ddata)
# eigvalue = np.diag(eigvalue)
# eigvalue = np.abs(np.sort(-np.abs(eigvalue), axis=0))
eigvalue, V = sort_eigvector_by_eigvalue(eigvalue, V)
maxeigvalue = np.amax(eigvalue, axis=0)
evaluate = maxeigvalue*(1e-10)
eigIdx = np.argwhere(abs(eigvalue) < evaluate)
# print('eigIdx:', eigIdx)
# print('eigvalue:', eigvalue)
#########
eigvalue = delete(eigvalue, eigIdx)
V = delete(V, eigIdx, axis=1)
# eigvalue[:, eigIdx] = []
# V[:, eigIdx] = []
if ReducedDim > 0 and ReducedDim < len(eigvalue):
eigvalue = eigvalue[0:ReducedDim]
V = V[:, 0:ReducedDim]
eigvalue_half = eigvalue ** (0.5)
S = spdiags(eigvalue_half, 0, len(eigvalue_half), len(eigvalue_half))
eigvalue_minushalf = eigvalue_half**(-1)
U = np.matmul(C, np.multiply(V, np.tile(eigvalue_minushalf.T, (V.shape[0], 1))))
y = KMeans(n_clusters=ReducedDim, random_state=0).fit(U)
y = y.labels_
return y, U
if __name__ == "__main__":
C = np.random.randint(1, 10, (1000, 216))
k = 20
U = mysvd(C, k)
''' | [
"sklearn.cluster.KMeans",
"numpy.zeros"
] | [((791, 823), 'numpy.zeros', 'np.zeros', (['(D, D)'], {'dtype': 'np.int64'}), '((D, D), dtype=np.int64)\n', (799, 823), True, 'import numpy as np\n'), ((2230, 2266), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'K', 'random_state': '(0)'}), '(n_clusters=K, random_state=0)\n', (2236, 2266), False, 'from sklearn.cluster import KMeans\n')] |
import numpy as np
import pandas as pd
import us
import os
import gc
from datetime import timedelta
from numpy import linalg as la
from statsmodels.formula.api import ols
from cmdstanpy import CmdStanModel
import matplotlib.pyplot as plt
# os.chdir("/home/admin/gözdeproject/")
class ELECTION_2016:
def __init__(self):
"CONSTANTS"
lambda_ = 0.75
C_1 = np.ones([51, 51])
a = 1
self.polling_bias_scale = 0.013
self.random_walk_scale = 0.05 / np.sqrt(300)
self.sigma_measure_noise_national = 0.04
self.sigma_measure_noise_state = 0.04
self.sigma_c = 0.06
self.sigma_m = 0.04
self.sigma_pop = 0.04
self.sigma_e_bias = 0.02
self.run_date = pd.to_datetime("2016-11-08")
self.election_day = pd.to_datetime("2016-11-08")
self.start_date = pd.to_datetime("2016-03-01")
# day indices
self.df = self.get_df("data/all_polls.csv")
first_day = min(self.df["start"])
# getting states info from 2012
state2012 = pd.read_csv("data/2012.csv")
self.state_name = state2012["state_name"].values.tolist()
state2012["score"] = state2012["obama_count"] / (state2012["obama_count"] + state2012["romney_count"])
state2012["national score"] = sum(state2012["obama_count"]) / sum(
state2012["obama_count"] + state2012["romney_count"])
state2012["delta"] = state2012["score"] - state2012["national score"]
state2012["share_national_vote"] = (state2012["total_count"] * (1 + state2012["adult_pop_growth_2011_15"])) \
/ sum(state2012["total_count"] * (1 + state2012["adult_pop_growth_2011_15"]))
state2012 = state2012.sort_values("state")
self.state_abb = state2012["state"]
prior_diff_score = pd.DataFrame(state2012["delta"])
prior_diff_score.set_index(self.state_abb, inplace=True)
self.state_weights = pd.DataFrame(state2012["share_national_vote"] / sum(state2012["share_national_vote"]))
self.state_weights.set_index(self.state_abb.sort_values(), inplace=True)
##creating covariance matrices
# preparing data
state_data = pd.read_csv("data/abbr_list.csv")
state_data = state_data[["year", "state", "dem"]]
state_data = state_data[state_data["year"] == 2016]
state_data.rename(columns={"year": "variable", "dem": "value"}, inplace=True)
state_data = state_data[["state", "variable", "value"]]
census = pd.read_csv("data/acs_2013_variables.csv")
census.dropna(inplace=True)
census.drop(columns=["state_fips", "pop_total", "pop_density"], inplace=True)
census = census.melt(id_vars="state")
state_data = state_data.append(census)
# adding urbanicity
urbanicity = pd.read_csv("data/urbanicity_index.csv")
urbanicity.rename(columns={"average_log_pop_within_5_miles": "pop_density"}, inplace=True)
urbanicity = urbanicity[["state", "pop_density"]]
urbanicity = urbanicity.melt(id_vars="state")
state_data = state_data.append(urbanicity)
# adding white evangelical
white_pct = pd.read_csv("data/white_evangel_pct.csv")
white_pct = white_pct.melt(id_vars="state")
state_data = state_data.append(white_pct)
# spread the data
state_data_long = state_data.copy()
state_data_long["value"] = state_data_long.groupby("variable")["value"].transform(
lambda x: (x - x.min()) / (x.max() - x.min()))
state_data_long = state_data_long.pivot_table(index="variable", columns="state", values="value").reset_index(
"variable")
state_data_long.drop(columns=["variable"], inplace=True)
# creting and computing correlation matrix
# formula : a*(lambda*C + (1-lambda)*C_1)
# where C is corr matrix with min 0
# C_1 is sq matrix with all numbers 1
# lambda = 0 -> 100% corr, lambda = 1 -> our corr matrix
C = state_data_long.corr()
# make the values of C min 0
C = C.clip(lower=0)
tmp_C = C.copy()
np.fill_diagonal(tmp_C.values, np.nan)
A = (lambda_ * C + (1 - lambda_) * C_1)
new_C = self.nearestPD(A)
# making positive definite
state_correlation_polling = new_C
state_correlation_polling = self.nearestPD(state_correlation_polling)
# cov matrix for polling error
self.state_covariance_polling_bias = self.cov_matrix(51, 0.078 ** 2, 0.9)
self.state_covariance_polling_bias = self.state_covariance_polling_bias * state_correlation_polling
np.sqrt(self.state_weights.T @ self.state_covariance_polling_bias @ self.state_weights) / 4
# cov matrix for prior election day prediction
self.state_covariance_mu_b_T = self.cov_matrix(51, 0.18 ** 2, 0.9)
self.state_covariance_mu_b_T = self.state_covariance_mu_b_T * state_correlation_polling
np.sqrt(self.state_weights.T @ self.state_covariance_mu_b_T @ self.state_weights) / 4
# cov matrix for random walks
state_covariance_mu_b_walk = self.cov_matrix(51, 0.017 ** 2, 0.9)
# demo corrs to fill gaps in state polls
state_covariance_mu_b_walk = state_covariance_mu_b_walk * state_correlation_polling
(np.sqrt(self.state_weights.T @ state_covariance_mu_b_walk @ self.state_weights) / 4) * np.sqrt(300)
# Making default cov matrices:
# initial cov matrix
self.state_covariance_0 = self.cov_matrix(51, 0.07 ** 2, 0.9)
self.state_covariance_0 = self.state_covariance_0 * state_correlation_polling
np.sqrt(self.state_weights.T @ self.state_covariance_0 @ self.state_weights) / 4
diffdays_until_election = (self.election_day - self.run_date).days
expected_national_mu_b_T_error = self.fit_rmse_day_x(diffdays_until_election) # 0.03
self.mu_b_T_scale = expected_national_mu_b_T_error
# national_cov_matrix_error_sd = np.sqrt(self.state_weights.T @ self.state_covariance_0 @ self.state_weights) # 0.05
# cov_poll_bias = self.state_covariance_0 * ((self.polling_bias_scale / national_cov_matrix_error_sd * 4) ** 2).values[0][0]
# cov_mu_b_T = self.state_covariance_0 * ((self.mu_b_T_scale / national_cov_matrix_error_sd * 4) ** 2).values[0][0]
# cov_mu_b_walk = self.state_covariance_0 * ((self.random_walk_scale / national_cov_matrix_error_sd * 4) ** 2).values[0][0]
# creating priors:
abramowitz = pd.read_csv("data/abramowitz_data.csv")
abramowitz = abramowitz[abramowitz["year"] < 2016]
prior_model = ols("incvote ~ juneapp + q2gdp", data=abramowitz).fit()
# make predictions
new_data = {"juneapp": [4], "q2gdp": [1.1]}
national_mu_prior = prior_model.predict(pd.DataFrame(new_data))
national_mu_prior = national_mu_prior / 100
prior_diff_score_values = prior_diff_score.values.reshape(51)
mu_b_prior_result = self.logging(national_mu_prior.values.reshape(1) + prior_diff_score_values)
self.mu_b_prior = prior_diff_score.copy()
self.mu_b_prior["delta"] = mu_b_prior_result
# Pooled voters were different from average voters until September
# creating alpha for inconsistency btw national and state polls
score_among_polled = sum(state2012.drop(7)["obama_count"]) / sum(
state2012.drop(7)["obama_count"] + state2012.drop(7)["romney_count"])
# pollsters that adjust for party
adjusters = ["ABC", "Washington Post", "Ipsos", "Pew", "YouGov", "NBC"]
# creating variables
self.N_state_polls = self.df[self.df["index_s"] != 51].shape[0]
self.N_national_polls = self.df[self.df["index_s"] == 51].shape[0]
self.T = (self.election_day - first_day).days
self.S = 51 # number of states polled
self.P = len(self.df["pollster"].unique())
self.M = len(self.df["mode"].unique())
self.Pop = len(self.df["polltype"].unique())
self.state = self.df[self.df["index_s"] != 51]["index_s"]
self.day_national = self.df[self.df["index_s"] == 51]["poll_day"]
self.day_state = self.df[self.df["index_s"] != 51]["poll_day"]
self.poll_national = self.df[self.df["index_s"] == 51]["index_p"]
self.poll_state = self.df[self.df["index_s"] != 51]["index_p"]
self.poll_mode_national = self.df[self.df["index_s"] == 51]["index_m"]
self.poll_mode_state = self.df[self.df["index_s"] != 51]["index_m"]
self.poll_pop_national = self.df[self.df["index_s"] == 51]["index_pop"]
self.poll_pop_state = self.df[self.df["index_s"] != 51]["index_pop"]
self.n_democrat_national = self.df[self.df["index_s"] == 51]["n_clinton"]
self.n_democrat_state = self.df[self.df["index_s"] != 51]["n_clinton"]
self.n_two_share_national = self.df[self.df["index_s"] == 51].loc[:, ["n_trump", "n_clinton"]].sum(axis=1)
self.n_two_share_state = self.df[self.df["index_s"] != 51].loc[:, ["n_trump", "n_clinton"]].sum(axis=1)
self.df["unadjusted"] = np.where(self.df["pollster"].isin(adjusters), 0, 1)
self.unadjusted_national = self.df[self.df["index_s"] == 51]["unadjusted"]
self.unadjusted_state = self.df[self.df["index_s"] != 51]["unadjusted"]
self.polling_bias_scale = float(self.polling_bias_scale) * 4
self.mu_b_T_scale = float(self.mu_b_T_scale) * 4
self.random_walk_scale = float(self.random_walk_scale) * 4
@staticmethod
def logit(p):
return np.log(p) - np.log(1 - p)
@staticmethod
def inv_logit(p):
return np.exp(p) / (1 + np.exp(p))
def mean_low_high(self, draws, states, id):
if type(draws) == np.ndarray:
m = draws.mean(axis=0)
sd = draws.std(axis=0)
else:
m = draws.mean(axis=0).values
sd = draws.std(axis=0).values
draws_df = pd.DataFrame(data={"mean": self.inv_logit(m),
"high": self.inv_logit(m + 1.96 * sd),
"low": self.inv_logit(m - 1.96 * sd),
"state": states,
"type": id})
return draws_df
@staticmethod
def logging(x):
result = np.log(x / (1 - x))
return result
@staticmethod
def cov_matrix(n, sigma2, rho):
m = np.matrix(np.ones(shape=(n, n)) * np.nan)
m[np.triu_indices(n)] = rho
m[np.tril_indices(n)] = rho
np.fill_diagonal(m, 1)
nn = np.zeros(shape=(n, n))
np.fill_diagonal(nn, 1)
return_ = (sigma2 ** .5 * nn) @ m @ (sigma2 ** .5 * nn)
return return_
def nearestPD(self, A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = la.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if self.isPD(A3):
return A3
spacing = np.spacing(la.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not self.isPD(A3):
mineig = np.min(np.real(la.eigvals(A3)))
A3 += I * (-mineig * k ** 2 + spacing)
k += 1
return A3
@staticmethod
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = la.cholesky(B)
return True
except la.LinAlgError:
return False
@staticmethod
def fit_rmse_day_x(x):
result = 0.03 + (10 ** (-6.6)) * x ** 2
return result
def get_df(self, input_file):
df = pd.read_csv(input_file)
df.drop(columns=["pollster.url", "source.url", "question.text", "question.iteration", "entry.date.time..et.",
"partisan", "affiliation", "Unnamed: 0"], inplace=True)
### Cleaning up the data ###
# filtering data
df.rename(columns={"number.of.observations": "n"}, inplace=True)
df.rename(columns={"start.date": "start"}, inplace=True)
df.rename(columns={"end.date": "end"}, inplace=True)
df["start"] = pd.to_datetime(df["start"], format="%Y-%m-%d")
df["end"] = pd.to_datetime(df["end"], format="%Y-%m-%d")
df["t"] = df["end"] - ((timedelta(days=1) + (df["end"] - df["start"])) / 2).dt.ceil("d")
df = df[
(df["t"] >= self.start_date) & (
(df["population"] == "Likely Voters") | (df["population"] == "Registered Voters")
| (df["population"] == "Adults")) & (df["n"] > 1)]
# pollster arrangements
characters = "'!^-%&/()=?_.,<$>£#½§{[]}\}|;`"
for pollster in df["pollster"].unique():
ch_index_list = []
for ch in characters:
ch_index = [i for i, x in enumerate(pollster) if x == ch]
if ch_index:
ch_index_list.append(ch_index[0])
if not ch_index_list:
continue
first_ch = min(ch_index_list)
new_pollster = pollster.split(pollster[first_ch])[0]
if new_pollster[-1] == " ":
new_pollster = new_pollster[:-1]
df.replace(pollster, new_pollster, inplace=True)
df.replace(["Fox News", "WashPost", "ABC News", "DHM Research", "Public Opinion Strategies"],
["FOX", "Washington Post", "ABC", "DHM", "POS"], inplace=True)
df["mode"].replace(
["Internet", "Live Phone", 'IVR/Online', 'Live Phone/Online', 'Automated Phone', 'IVR/Live Phone', 'Mixed',
'Mail'],
["Online Poll", "Live Phone Component", *["Other"] * 6], inplace=True)
# dropping NAs
df["undecided"][df["undecided"].isna()] = 0
df["other"][df["other"].isna()] = 0
df["johnson"][df["johnson"].isna()] = 0
df["mcmullin"][df["mcmullin"].isna()] = 0
# calculating two party poll shares
df["twoparty"] = df["clinton"] + df["trump"]
df["polltype"] = df["population"]
# calculating Clinton vote shares
df["n_clinton"] = round(df["n"] * df["clinton"] / 100)
df["pct_clinton"] = df["clinton"] / df["twoparty"]
# calculating Trump vote shares
df["n_trump"] = round(df["n"] * df["trump"] / 100)
df["pct_trump"] = df["trump"] / df["twoparty"]
df["poll_day"] = df["t"] - min(df["t"]) + timedelta(days=1)
# creating indexes
columns = ["state", "pollster", "polltype", "mode"]
index_i = ["s", "p", "pop", "m"]
for i, col in enumerate(columns):
reindex = False
for ii, x in enumerate(df[col].sort_values().unique(), start=1):
if reindex:
ii -= 1
if x == "--":
ii = 51
reindex = True
df.loc[df[col] == x, f"index_{index_i[i]}"] = ii
df[f"index_{index_i[i]}"] = df[f"index_{index_i[i]}"].astype(int)
df["index_t"] = df["poll_day"].dt.days
df = df.sort_values(by=["state", "t", "polltype", "twoparty"])
df.drop_duplicates(['state', 't', 'pollster'], inplace=True)
return df
def run_stan_model(self):
data = {
"N_national_polls": self.N_national_polls,
"N_state_polls": self.N_state_polls,
"T": self.T,
"S": self.S,
"P": self.P,
"M": self.M,
"Pop": self.Pop,
"state": self.state,
"state_weights": self.state_weights.squeeze(),
"day_state": self.day_state.dt.days,
"day_national": self.day_national.dt.days,
"poll_state": self.poll_state,
"poll_national": self.poll_national,
"poll_mode_national": self.poll_mode_national,
"poll_mode_state": self.poll_mode_state,
"poll_pop_national": self.poll_pop_national,
"poll_pop_state": self.poll_pop_state,
"unadjusted_national": self.unadjusted_national,
"unadjusted_state": self.unadjusted_state,
"n_democrat_national": self.n_democrat_national.astype(int),
"n_democrat_state": self.n_democrat_state.astype(int),
"n_two_share_national": self.n_two_share_national.astype(int),
"n_two_share_state": self.n_two_share_state.astype(int),
"sigma_measure_noise_national": self.sigma_measure_noise_national,
"sigma_measure_noise_state": self.sigma_measure_noise_state,
"mu_b_prior": self.mu_b_prior.squeeze(),
"sigma_c": self.sigma_c,
"sigma_m": self.sigma_m,
"sigma_pop": self.sigma_pop,
"sigma_e_bias": self.sigma_e_bias,
"state_covariance_0": self.state_covariance_0,
"polling_bias_scale": self.polling_bias_scale,
"mu_b_T_scale": self.mu_b_T_scale,
"random_walk_scale": self.random_walk_scale
}
n_chains = 6
n_cores = 6
n_sampling = 500
n_warmup = 500
n_refresh = int(n_sampling * 0.1)
model = CmdStanModel(stan_file="/home/admin/gözdeproject/poll_model_2020.stan", compile=True)
fit = model.sample(
data=data,
seed=1843,
parallel_chains=n_cores,
chains=n_chains,
iter_warmup=n_warmup,
iter_sampling=n_sampling,
refresh=n_refresh
)
save_fit = fit.draws_pd().to_csv("data/all_predictions.csv")
gc.collect()
def get_plots(self, plot_type, out):
import matplotlib.pyplot as plt
###########################
# MU_b
###########################
y = np.random.multivariate_normal(size=1000, mean=self.mu_b_prior.values.squeeze(),
cov=self.state_covariance_mu_b_T)
mu_b_T_posterior_draw = out[[x for x in out.columns if "mu_b[" in x and "raw" not in x and "252" in x]]
mu_b_T_prior_draws = self.mean_low_high(y, self.state_name, "prior")
mu_b_T_posterior_draws = self.mean_low_high(mu_b_T_posterior_draw, self.state_name, "posterior")
mu_b_T = mu_b_T_prior_draws.append(mu_b_T_posterior_draws)
mu_b_T = mu_b_T.sort_values(by=["mean", "state"])
self.groups = mu_b_T.groupby("type")
if plot_type == "mu_b":
plt.figure(figsize=(8, 12))
for label, group in self.groups:
err = group["high"] - group["low"]
alpha = 0.6 if label == "prior" else 1
plt.errorbar(x=group["mean"], y=group["state"], xerr=err, label=label, fmt="o", alpha=alpha)
plt.xlabel("Mean")
plt.axvline(0.5, linestyle="--")
plt.xlim(0, 1)
plt.tight_layout()
plt.legend()
plt.show()
return
###########################
# MU_C
###########################
if plot_type == "mu_c":
mu_c_cols = [x for x in out.columns.values if "mu_c" in x and "raw" not in x]
mu_c_posterior_draw = out[mu_c_cols].copy()
pollster_ = self.df[["pollster", "index_p"]].drop_duplicates().sort_values(
by="pollster").values.tolist() * 3000
pollster = [x[0] for x in pollster_]
mu_c_posterior_draws = pd.DataFrame({"draws": mu_c_posterior_draw.__array__().reshape(3000 * 162),
"index_p": list(range(1, 163)) * 3000,
"pollster": pollster,
"type": "posterior"})
pollster_ = self.df[["pollster", "index_p"]].sort_values(
by="pollster").drop_duplicates().values.tolist() * 1000
pollster = [x[0] for x in pollster_]
mu_c_prior_draws = pd.DataFrame({"draws": np.random.normal(0, self.sigma_c, self.P * 1000),
"index_p": list(range(1, 163)) * 1000,
"pollster": pollster,
"type": "prior"})
mu_c_draws = mu_c_posterior_draws.append(mu_c_prior_draws)
mu_c_draws.reset_index(drop=True, inplace=True)
g = mu_c_draws.groupby(["pollster", "type"])["draws"]
mu_c_draws = pd.DataFrame({"mean": g.mean(),
"low": g.mean() - 1.96 * g.std(),
"high": g.mean() + 1.96 * g.std()})
filtered_pollster = self.df.groupby("pollster").count()["n"]
filtered_pollster = filtered_pollster[filtered_pollster >= 5].index.tolist()
mu_c_draws.reset_index(drop=False, inplace=True)
mu_c_draws_filtered = mu_c_draws.loc[mu_c_draws["pollster"].isin(filtered_pollster)]
groups = mu_c_draws_filtered.reset_index().sort_values(by=["mean", "pollster"]).groupby("type")
plt.figure(figsize=(10, 20))
for label, group in groups:
# group.sort_values(by="mean", inplace=True)
err = group["high"] - group["low"]
plt.errorbar(x=group["mean"], y=group["pollster"], xerr=err, label=label, fmt="o")
plt.xlabel("Mean")
plt.legend()
plt.tight_layout()
plt.show()
return
###########################
# MU_m
###########################
if plot_type == "mu_m":
mu_m_cols = [x for x in out.columns.values if "mu_m" in x and "raw" not in x]
mu_m_posterior_draws = out[mu_m_cols].copy()
method = self.df[["mode", "index_m"]].sort_values(by="mode").drop_duplicates().values.tolist() * 3000
method = [x[0] for x in method]
mu_m_posterior_draws = pd.DataFrame({"draws": mu_m_posterior_draws.__array__().reshape(3000 * 3, ),
"index_m": list(range(1, self.M + 1)) * mu_m_posterior_draws.shape[0],
"type": "posterior",
"method": method})
method = self.df[["mode", "index_m"]].sort_values(by="mode").drop_duplicates().values.tolist() * 1000
method = [x[0] for x in method]
mu_m_prior_draws = pd.DataFrame({"draws": np.random.normal(0, self.sigma_c, self.M * 1000),
"index_m": list(range(1, self.M + 1)) * 1000,
"type": "prior",
"method": method})
mu_m_draws = mu_m_posterior_draws.append(mu_m_prior_draws)
mu_m_draws.reset_index(drop=True).sort_values(by="index_m", inplace=True)
g = mu_m_draws.groupby(["method", "type"])["draws"]
mu_m_draws = pd.DataFrame({"mean": g.mean(),
"low": g.mean() - 1.96 * g.std(),
"high": g.mean() + 1.96 * g.std()})
groups = mu_m_draws.reset_index(drop=False).sort_values(by=["mean", "method"]).groupby("type")
plt.figure(figsize=(6, 8))
for label, group in groups:
err = group["high"] - group["low"]
plt.errorbar(x=group["mean"], y=group["method"], xerr=err, label=label, fmt="o")
plt.xlabel("Mean")
plt.tight_layout()
plt.legend()
plt.show()
return
###########################
# MU_pop
###########################
if plot_type == "mu_pop":
mu_pop_cols = [x for x in out.columns.values if "mu_pop" in x and "raw" not in x]
mu_pop_posterior_draws = out[mu_pop_cols].copy()
method = self.df[["polltype", "index_pop"]].drop_duplicates().values.tolist() * 3000
method = [x[0] for x in method]
mu_pop_posterior_draws = pd.DataFrame({"draws": mu_pop_posterior_draws.__array__().reshape(3000 * 3, ),
"index_pop": list(range(1, self.M + 1)) *
mu_pop_posterior_draws.shape[0],
"type": "posterior",
"method": method})
method = self.df[["polltype", "index_pop"]].drop_duplicates().values.tolist() * 1000
method = [x[0] for x in method]
mu_pop_prior_draws = pd.DataFrame({"draws": np.random.normal(0, self.sigma_c, self.M * 1000),
"index_pop": list(range(1, self.Pop + 1)) * 1000,
"type": "prior",
"method": method})
mu_pop_draws = mu_pop_posterior_draws.append(mu_pop_prior_draws)
mu_pop_draws.reset_index(drop=True).sort_values(by="index_pop", inplace=True)
g = mu_pop_draws.groupby(["method", "type"])["draws"]
mu_pop_draws = pd.DataFrame({"mean": g.mean(),
"low": g.mean() - 1.96 * g.std(),
"high": g.mean() + 1.96 * g.std()})
groups = mu_pop_draws.reset_index(drop=False).sort_values(by=["mean", "method"]).groupby("type")
plt.figure(figsize=(6, 8))
for label, group in groups:
err = group["high"] - group["low"]
plt.errorbar(x=group["mean"], y=group["method"], xerr=err, label=label, fmt="o")
plt.xlabel("Mean")
plt.tight_layout()
plt.legend()
plt.show()
###########################
# Polling_Bias
###########################
if plot_type == "polling_bias":
polling_bias_posterior = out[[x for x in out.columns.values if "polling_bias[" in x and "raw" not in x]]
polling_bias_posterior_draws = pd.DataFrame({"draws": polling_bias_posterior.__array__().reshape(
polling_bias_posterior.shape[0] * polling_bias_posterior.shape[1], ),
"index_s": list(range(1, self.S + 1)) * polling_bias_posterior.shape[0],
"type": "posterior",
"states": self.state_name * polling_bias_posterior.shape[0]})
y = np.random.multivariate_normal(size=1000, mean=[0] * self.S, cov=self.state_covariance_polling_bias)
polling_bias_prior_draws = pd.DataFrame({"draws": y.reshape(1000 * self.S),
"index_s": list(range(1, self.S + 1)) * 1000,
"type": "prior",
"states": self.state_name * 1000})
polling_bias_draws = polling_bias_posterior_draws.append(polling_bias_prior_draws)
polling_bias_draws.reset_index(drop=True, inplace=True)
g = polling_bias_draws.groupby(["states", "type"])["draws"]
polling_bias_draws = pd.DataFrame({"mean": g.mean(),
"low": g.mean() - 1.96 * g.std(),
"high": g.mean() + 1.96 * g.std()})
groups = polling_bias_draws.reset_index(drop=False).sort_values(by=["mean", "states"]).groupby("type")
plt.figure(figsize=(6, 8))
for label, group in groups:
err = group["high"] - group["low"]
plt.errorbar(x=group["mean"], y=group["states"], xerr=err, label=label, fmt="o")
plt.xlabel("Mean")
plt.tight_layout()
plt.legend()
plt.show()
###########################
# E_Bias
###########################
if plot_type == "map":
e_bias_posterior = out[[x for x in out.columns.values if "e_bias[" in x and "raw" not in x]]
predicted_score = out[[x for x in out.columns.values if "predicted_score[" in x and "raw" not in x]]
single_draw = predicted_score[[x for x in predicted_score.columns.values if "252" in x]]
t_list = [(self.df.start.min().to_pydatetime() + timedelta(days=x)).strftime("%Y-%m-%d") for x in
range(1, 253)]
pct_clinton = pd.DataFrame({"low": predicted_score.quantile(0.025, axis=0).values,
"high": predicted_score.quantile(0.975, axis=0).values,
"mean": predicted_score.mean(axis=0).values,
"prob": (predicted_score[predicted_score > 0.5].count(axis=0) / 3000).values,
"state": np.sort(self.state_name * 252),
"t": t_list * 51})
nat = predicted_score.to_numpy().reshape(3000, 252, 51)
nat_ = np.average(nat, axis=2, weights=self.state_weights.squeeze()).flatten()
pct_clinton_natl = pd.DataFrame({"natl_vote": nat_,
"t": t_list * 3000,
"draw": np.sort(list(range(1, 3001)) * 252)})
groups = pct_clinton_natl.groupby("t")
l = []
for key, value in groups:
l.append(round((value["natl_vote"] > 0.5).sum() / 3000, 2))
pct_clinton_natl = pd.DataFrame({"low": pct_clinton_natl.groupby("t")["natl_vote"].quantile(0.025).values,
"high": pct_clinton_natl.groupby("t")["natl_vote"].quantile(0.975).values,
"mean": pct_clinton_natl.groupby("t")["natl_vote"].mean().values,
"prob": l,
"state": "--",
"t": t_list})
pct_all = pct_clinton.append(pct_clinton_natl).fillna(0).reset_index(drop=True)
v1 = pct_all.loc[pct_all["state"] == "--"]
v1 = v1.set_index(v1["t"])
v2 = self.df[["state", "t", "pct_clinton", "mode"]] # .loc[df["state"] == "--"]
v2["t"] = v2["t"].dt.strftime("%Y-%m-%d")
v2 = v2.set_index(v2["t"])
pct_all_plt = pd.concat([v1, v2])
pct_all_plt = pct_all_plt.fillna(method='ffill').sort_index()
# pct_all_plt = pct_all_plt.sort_index()
# pct_all_plt = pct_all_plt.groupby(level=0).sum()
plt_clinton = pct_all.loc[pct_all["t"] == "2016-11-08"][["state", "prob"]].reset_index(drop=True)
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
# create the map
map = Basemap(llcrnrlon=-119, llcrnrlat=22, urcrnrlon=-64, urcrnrlat=49,
projection='lcc', lat_1=33, lat_2=45, lon_0=-95)
# load the shapefile, use the name 'states'
map.readshapefile('data/st99_d00', name='states', drawbounds=True)
# collect the state names from the shapefile attributes so we can
# look up the shape obect for a state by it's name
state_names = []
for shape_dict in map.states_info:
state_names.append(shape_dict['NAME'])
ax = plt.gca() # get current axes instance
# get Texas and draw the filled polygon
colors = plt.get_cmap("RdBu")
cl = []
pt = []
a = 0
prob_colors = (plt_clinton["prob"] * 255).astype(int).values
for i in plt_clinton["state"]:
if "--" != i:
seg = map.states[state_names.index(i)]
poly = Polygon(seg, facecolor=colors(prob_colors[a]), edgecolor='black')
ax.add_patch(poly)
cl.append(colors(a * 5))
pt.append(poly)
a += 1
p = PatchCollection(pt, cmap=colors)
p.set_array(np.array(cl))
cb = plt.colorbar(p)
plt.title("Probability of Clinton Wins")
plt.box(False)
plt.show()
return
###########################
# States
###########################
if plot_type == "states":
predicted_score = out[[x for x in out.columns.values if "predicted_score[" in x and "raw" not in x]]
t_list = [(self.df.start.min().to_pydatetime() + timedelta(days=x)).strftime("%Y-%m-%d") for x in
range(1, 253)]
pct_clinton = pd.DataFrame({"low": predicted_score.quantile(0.025, axis=0).values,
"high": predicted_score.quantile(0.975, axis=0).values,
"mean": predicted_score.mean(axis=0).values,
"prob": (predicted_score[predicted_score > 0.5].count(axis=0) / 3000).values,
"state": np.sort(self.state_name * 252),
"t": t_list * 51})
nat = predicted_score.to_numpy().reshape(3000, 252, 51, order="F")
nat_ = np.average(nat, axis=2, weights=self.state_weights.squeeze()).flatten()
pct_clinton_natl = pd.DataFrame({"natl_vote": nat_,
"t": t_list * 3000,
"draw": np.sort(list(range(1, 3001)) * 252)})
groups = pct_clinton_natl.groupby("t")
l = []
for key, value in groups:
l.append(round((value["natl_vote"] > 0.5).sum() / 3000, 2))
pct_clinton_natl = pd.DataFrame({"low": pct_clinton_natl.groupby("t")["natl_vote"].quantile(0.025).values,
"high": pct_clinton_natl.groupby("t")["natl_vote"].quantile(0.975).values,
"mean": pct_clinton_natl.groupby("t")["natl_vote"].mean().values,
"prob": l,
"state": "--",
"t": t_list})
plt.figure(figsize=(20, 8))
plt.fill_between(pct_clinton_natl.index, pct_clinton_natl["low"], pct_clinton_natl["high"], alpha=0.3,
color="gray")
plt.plot(pct_clinton_natl.index, pct_clinton_natl["mean"])
plt.box(False)
plt.xticks(list(range(0, 252, 30)), ["M", "A", "M", "J", "J", "A", "S", "O", "N"])
plt.title("National")
plt.tight_layout()
plt.show()
plt.close()
pct_all = pct_clinton.append(pct_clinton_natl).fillna(0).reset_index(drop=True)
v1 = pct_all.loc[pct_all["state"] != "--"]
v1 = v1.set_index(v1["t"])
state_all = {x: self.state_abb.values.tolist()[i] for i, x in enumerate(self.state_name)}
ls_state = np.array(self.state_name)[self.groups.groups["posterior"]][::-1]
ix = 0
for key in ls_state:
if "District" in key:
continue
value = v1.loc[v1.state == key]
if ix % 10 == 0:
ix = 0
plt.figure(figsize=(20, 8))
plt.subplots(2, 5, sharex=True, sharey=True)
plt.subplot(2, 5, (ix + 1))
plt.fill_between(value.index, value["low"], value["high"], alpha=0.3, color="gray")
plt.plot(value.index, value["mean"])
plt.axhline(self.inv_logit(self.mu_b_prior.values[self.state_name.index(key)]), linestyle="--",
color="black")
plt.box(False)
plt.xticks(list(range(0, 252, 30)), ["M", "A", "M", "J", "J", "A", "S", "O", "N"])
plt.title(state_all[key])
plt.tight_layout()
ix += 1
if ix % 10 == 0:
plt.show()
plt.close()
return
| [
"numpy.sqrt",
"pandas.read_csv",
"numpy.log",
"matplotlib.pyplot.fill_between",
"numpy.array",
"numpy.linalg.norm",
"matplotlib.pyplot.errorbar",
"datetime.timedelta",
"pandas.to_datetime",
"cmdstanpy.CmdStanModel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.sort",
"numpy... | [((386, 403), 'numpy.ones', 'np.ones', (['[51, 51]'], {}), '([51, 51])\n', (393, 403), True, 'import numpy as np\n'), ((749, 777), 'pandas.to_datetime', 'pd.to_datetime', (['"""2016-11-08"""'], {}), "('2016-11-08')\n", (763, 777), True, 'import pandas as pd\n'), ((806, 834), 'pandas.to_datetime', 'pd.to_datetime', (['"""2016-11-08"""'], {}), "('2016-11-08')\n", (820, 834), True, 'import pandas as pd\n'), ((861, 889), 'pandas.to_datetime', 'pd.to_datetime', (['"""2016-03-01"""'], {}), "('2016-03-01')\n", (875, 889), True, 'import pandas as pd\n'), ((1067, 1095), 'pandas.read_csv', 'pd.read_csv', (['"""data/2012.csv"""'], {}), "('data/2012.csv')\n", (1078, 1095), True, 'import pandas as pd\n'), ((1854, 1886), 'pandas.DataFrame', 'pd.DataFrame', (["state2012['delta']"], {}), "(state2012['delta'])\n", (1866, 1886), True, 'import pandas as pd\n'), ((2235, 2268), 'pandas.read_csv', 'pd.read_csv', (['"""data/abbr_list.csv"""'], {}), "('data/abbr_list.csv')\n", (2246, 2268), True, 'import pandas as pd\n'), ((2555, 2597), 'pandas.read_csv', 'pd.read_csv', (['"""data/acs_2013_variables.csv"""'], {}), "('data/acs_2013_variables.csv')\n", (2566, 2597), True, 'import pandas as pd\n'), ((2863, 2903), 'pandas.read_csv', 'pd.read_csv', (['"""data/urbanicity_index.csv"""'], {}), "('data/urbanicity_index.csv')\n", (2874, 2903), True, 'import pandas as pd\n'), ((3222, 3263), 'pandas.read_csv', 'pd.read_csv', (['"""data/white_evangel_pct.csv"""'], {}), "('data/white_evangel_pct.csv')\n", (3233, 3263), True, 'import pandas as pd\n'), ((4184, 4222), 'numpy.fill_diagonal', 'np.fill_diagonal', (['tmp_C.values', 'np.nan'], {}), '(tmp_C.values, np.nan)\n', (4200, 4222), True, 'import numpy as np\n'), ((6582, 6621), 'pandas.read_csv', 'pd.read_csv', (['"""data/abramowitz_data.csv"""'], {}), "('data/abramowitz_data.csv')\n", (6593, 6621), True, 'import pandas as pd\n'), ((10418, 10437), 'numpy.log', 'np.log', (['(x / (1 - x))'], {}), '(x / (1 - x))\n', (10424, 10437), True, 'import numpy as np\n'), ((10649, 10671), 'numpy.fill_diagonal', 'np.fill_diagonal', (['m', '(1)'], {}), '(m, 1)\n', (10665, 10671), True, 'import numpy as np\n'), ((10685, 10707), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n)'}), '(shape=(n, n))\n', (10693, 10707), True, 'import numpy as np\n'), ((10716, 10739), 'numpy.fill_diagonal', 'np.fill_diagonal', (['nn', '(1)'], {}), '(nn, 1)\n', (10732, 10739), True, 'import numpy as np\n'), ((11299, 11308), 'numpy.linalg.svd', 'la.svd', (['B'], {}), '(B)\n', (11305, 11308), True, 'from numpy import linalg as la\n'), ((12194, 12212), 'numpy.eye', 'np.eye', (['A.shape[0]'], {}), '(A.shape[0])\n', (12200, 12212), True, 'import numpy as np\n'), ((12799, 12822), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (12810, 12822), True, 'import pandas as pd\n'), ((13307, 13353), 'pandas.to_datetime', 'pd.to_datetime', (["df['start']"], {'format': '"""%Y-%m-%d"""'}), "(df['start'], format='%Y-%m-%d')\n", (13321, 13353), True, 'import pandas as pd\n'), ((13374, 13418), 'pandas.to_datetime', 'pd.to_datetime', (["df['end']"], {'format': '"""%Y-%m-%d"""'}), "(df['end'], format='%Y-%m-%d')\n", (13388, 13418), True, 'import pandas as pd\n'), ((18328, 18417), 'cmdstanpy.CmdStanModel', 'CmdStanModel', ([], {'stan_file': '"""/home/admin/gözdeproject/poll_model_2020.stan"""', 'compile': '(True)'}), "(stan_file='/home/admin/gözdeproject/poll_model_2020.stan',\n compile=True)\n", (18340, 18417), False, 'from cmdstanpy import CmdStanModel\n'), ((18743, 18755), 'gc.collect', 'gc.collect', ([], {}), '()\n', (18753, 18755), False, 'import gc\n'), ((498, 510), 'numpy.sqrt', 'np.sqrt', (['(300)'], {}), '(300)\n', (505, 510), True, 'import numpy as np\n'), ((4699, 4791), 'numpy.sqrt', 'np.sqrt', (['(self.state_weights.T @ self.state_covariance_polling_bias @ self.state_weights\n )'], {}), '(self.state_weights.T @ self.state_covariance_polling_bias @ self.\n state_weights)\n', (4706, 4791), True, 'import numpy as np\n'), ((5026, 5112), 'numpy.sqrt', 'np.sqrt', (['(self.state_weights.T @ self.state_covariance_mu_b_T @ self.state_weights)'], {}), '(self.state_weights.T @ self.state_covariance_mu_b_T @ self.\n state_weights)\n', (5033, 5112), True, 'import numpy as np\n'), ((5462, 5474), 'numpy.sqrt', 'np.sqrt', (['(300)'], {}), '(300)\n', (5469, 5474), True, 'import numpy as np\n'), ((5708, 5784), 'numpy.sqrt', 'np.sqrt', (['(self.state_weights.T @ self.state_covariance_0 @ self.state_weights)'], {}), '(self.state_weights.T @ self.state_covariance_0 @ self.state_weights)\n', (5715, 5784), True, 'import numpy as np\n'), ((6887, 6909), 'pandas.DataFrame', 'pd.DataFrame', (['new_data'], {}), '(new_data)\n', (6899, 6909), True, 'import pandas as pd\n'), ((9648, 9657), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (9654, 9657), True, 'import numpy as np\n'), ((9660, 9673), 'numpy.log', 'np.log', (['(1 - p)'], {}), '(1 - p)\n', (9666, 9673), True, 'import numpy as np\n'), ((9730, 9739), 'numpy.exp', 'np.exp', (['p'], {}), '(p)\n', (9736, 9739), True, 'import numpy as np\n'), ((10579, 10597), 'numpy.triu_indices', 'np.triu_indices', (['n'], {}), '(n)\n', (10594, 10597), True, 'import numpy as np\n'), ((10615, 10633), 'numpy.tril_indices', 'np.tril_indices', (['n'], {}), '(n)\n', (10630, 10633), True, 'import numpy as np\n'), ((11489, 11499), 'numpy.linalg.norm', 'la.norm', (['A'], {}), '(A)\n', (11496, 11499), True, 'from numpy import linalg as la\n'), ((12540, 12554), 'numpy.linalg.cholesky', 'la.cholesky', (['B'], {}), '(B)\n', (12551, 12554), True, 'from numpy import linalg as la\n'), ((15598, 15615), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (15607, 15615), False, 'from datetime import timedelta\n'), ((19612, 19639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 12)'}), '(figsize=(8, 12))\n', (19622, 19639), True, 'import matplotlib.pyplot as plt\n'), ((19912, 19930), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mean"""'], {}), "('Mean')\n", (19922, 19930), True, 'import matplotlib.pyplot as plt\n'), ((19943, 19975), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0.5)'], {'linestyle': '"""--"""'}), "(0.5, linestyle='--')\n", (19954, 19975), True, 'import matplotlib.pyplot as plt\n'), ((19988, 20002), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (19996, 20002), True, 'import matplotlib.pyplot as plt\n'), ((20015, 20033), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20031, 20033), True, 'import matplotlib.pyplot as plt\n'), ((20046, 20058), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20056, 20058), True, 'import matplotlib.pyplot as plt\n'), ((20071, 20081), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20079, 20081), True, 'import matplotlib.pyplot as plt\n'), ((22263, 22291), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 20)'}), '(figsize=(10, 20))\n', (22273, 22291), True, 'import matplotlib.pyplot as plt\n'), ((22555, 22573), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mean"""'], {}), "('Mean')\n", (22565, 22573), True, 'import matplotlib.pyplot as plt\n'), ((22586, 22598), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22596, 22598), True, 'import matplotlib.pyplot as plt\n'), ((22611, 22629), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22627, 22629), True, 'import matplotlib.pyplot as plt\n'), ((22642, 22652), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22650, 22652), True, 'import matplotlib.pyplot as plt\n'), ((24505, 24531), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (24515, 24531), True, 'import matplotlib.pyplot as plt\n'), ((24732, 24750), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mean"""'], {}), "('Mean')\n", (24742, 24750), True, 'import matplotlib.pyplot as plt\n'), ((24763, 24781), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (24779, 24781), True, 'import matplotlib.pyplot as plt\n'), ((24794, 24806), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (24804, 24806), True, 'import matplotlib.pyplot as plt\n'), ((24819, 24829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24827, 24829), True, 'import matplotlib.pyplot as plt\n'), ((26771, 26797), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (26781, 26797), True, 'import matplotlib.pyplot as plt\n'), ((26998, 27016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mean"""'], {}), "('Mean')\n", (27008, 27016), True, 'import matplotlib.pyplot as plt\n'), ((27029, 27047), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27045, 27047), True, 'import matplotlib.pyplot as plt\n'), ((27060, 27072), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (27070, 27072), True, 'import matplotlib.pyplot as plt\n'), ((27085, 27095), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27093, 27095), True, 'import matplotlib.pyplot as plt\n'), ((27775, 27879), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'size': '(1000)', 'mean': '([0] * self.S)', 'cov': 'self.state_covariance_polling_bias'}), '(size=1000, mean=[0] * self.S, cov=self.\n state_covariance_polling_bias)\n', (27804, 27879), True, 'import numpy as np\n'), ((28814, 28840), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (28824, 28840), True, 'import matplotlib.pyplot as plt\n'), ((29041, 29059), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mean"""'], {}), "('Mean')\n", (29051, 29059), True, 'import matplotlib.pyplot as plt\n'), ((29072, 29090), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29088, 29090), True, 'import matplotlib.pyplot as plt\n'), ((29103, 29115), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (29113, 29115), True, 'import matplotlib.pyplot as plt\n'), ((29128, 29138), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29136, 29138), True, 'import matplotlib.pyplot as plt\n'), ((31764, 31783), 'pandas.concat', 'pd.concat', (['[v1, v2]'], {}), '([v1, v2])\n', (31773, 31783), True, 'import pandas as pd\n'), ((32301, 32420), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': '(-119)', 'llcrnrlat': '(22)', 'urcrnrlon': '(-64)', 'urcrnrlat': '(49)', 'projection': '"""lcc"""', 'lat_1': '(33)', 'lat_2': '(45)', 'lon_0': '(-95)'}), "(llcrnrlon=-119, llcrnrlat=22, urcrnrlon=-64, urcrnrlat=49,\n projection='lcc', lat_1=33, lat_2=45, lon_0=-95)\n", (32308, 32420), False, 'from mpl_toolkits.basemap import Basemap\n'), ((32870, 32879), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32877, 32879), True, 'import matplotlib.pyplot as plt\n'), ((32983, 33003), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""RdBu"""'], {}), "('RdBu')\n", (32995, 33003), True, 'import matplotlib.pyplot as plt\n'), ((33520, 33552), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['pt'], {'cmap': 'colors'}), '(pt, cmap=colors)\n', (33535, 33552), False, 'from matplotlib.collections import PatchCollection\n'), ((33608, 33623), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['p'], {}), '(p)\n', (33620, 33623), True, 'import matplotlib.pyplot as plt\n'), ((33636, 33676), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability of Clinton Wins"""'], {}), "('Probability of Clinton Wins')\n", (33645, 33676), True, 'import matplotlib.pyplot as plt\n'), ((33689, 33703), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (33696, 33703), True, 'import matplotlib.pyplot as plt\n'), ((33716, 33726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33724, 33726), True, 'import matplotlib.pyplot as plt\n'), ((35788, 35815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (35798, 35815), True, 'import matplotlib.pyplot as plt\n'), ((35828, 35948), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['pct_clinton_natl.index', "pct_clinton_natl['low']", "pct_clinton_natl['high']"], {'alpha': '(0.3)', 'color': '"""gray"""'}), "(pct_clinton_natl.index, pct_clinton_natl['low'],\n pct_clinton_natl['high'], alpha=0.3, color='gray')\n", (35844, 35948), True, 'import matplotlib.pyplot as plt\n'), ((35986, 36044), 'matplotlib.pyplot.plot', 'plt.plot', (['pct_clinton_natl.index', "pct_clinton_natl['mean']"], {}), "(pct_clinton_natl.index, pct_clinton_natl['mean'])\n", (35994, 36044), True, 'import matplotlib.pyplot as plt\n'), ((36057, 36071), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (36064, 36071), True, 'import matplotlib.pyplot as plt\n'), ((36179, 36200), 'matplotlib.pyplot.title', 'plt.title', (['"""National"""'], {}), "('National')\n", (36188, 36200), True, 'import matplotlib.pyplot as plt\n'), ((36213, 36231), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (36229, 36231), True, 'import matplotlib.pyplot as plt\n'), ((36244, 36254), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36252, 36254), True, 'import matplotlib.pyplot as plt\n'), ((36267, 36278), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (36276, 36278), True, 'import matplotlib.pyplot as plt\n'), ((5375, 5454), 'numpy.sqrt', 'np.sqrt', (['(self.state_weights.T @ state_covariance_mu_b_walk @ self.state_weights)'], {}), '(self.state_weights.T @ state_covariance_mu_b_walk @ self.state_weights)\n', (5382, 5454), True, 'import numpy as np\n'), ((6703, 6752), 'statsmodels.formula.api.ols', 'ols', (['"""incvote ~ juneapp + q2gdp"""'], {'data': 'abramowitz'}), "('incvote ~ juneapp + q2gdp', data=abramowitz)\n", (6706, 6752), False, 'from statsmodels.formula.api import ols\n'), ((9747, 9756), 'numpy.exp', 'np.exp', (['p'], {}), '(p)\n', (9753, 9756), True, 'import numpy as np\n'), ((10537, 10558), 'numpy.ones', 'np.ones', ([], {'shape': '(n, n)'}), '(shape=(n, n))\n', (10544, 10558), True, 'import numpy as np\n'), ((11340, 11350), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (11347, 11350), True, 'import numpy as np\n'), ((19807, 19904), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': "group['mean']", 'y': "group['state']", 'xerr': 'err', 'label': 'label', 'fmt': '"""o"""', 'alpha': 'alpha'}), "(x=group['mean'], y=group['state'], xerr=err, label=label, fmt=\n 'o', alpha=alpha)\n", (19819, 19904), True, 'import matplotlib.pyplot as plt\n'), ((22460, 22546), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': "group['mean']", 'y': "group['pollster']", 'xerr': 'err', 'label': 'label', 'fmt': '"""o"""'}), "(x=group['mean'], y=group['pollster'], xerr=err, label=label,\n fmt='o')\n", (22472, 22546), True, 'import matplotlib.pyplot as plt\n'), ((24639, 24724), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': "group['mean']", 'y': "group['method']", 'xerr': 'err', 'label': 'label', 'fmt': '"""o"""'}), "(x=group['mean'], y=group['method'], xerr=err, label=label, fmt='o'\n )\n", (24651, 24724), True, 'import matplotlib.pyplot as plt\n'), ((26905, 26990), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': "group['mean']", 'y': "group['method']", 'xerr': 'err', 'label': 'label', 'fmt': '"""o"""'}), "(x=group['mean'], y=group['method'], xerr=err, label=label, fmt='o'\n )\n", (26917, 26990), True, 'import matplotlib.pyplot as plt\n'), ((28948, 29033), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': "group['mean']", 'y': "group['states']", 'xerr': 'err', 'label': 'label', 'fmt': '"""o"""'}), "(x=group['mean'], y=group['states'], xerr=err, label=label, fmt='o'\n )\n", (28960, 29033), True, 'import matplotlib.pyplot as plt\n'), ((33577, 33589), 'numpy.array', 'np.array', (['cl'], {}), '(cl)\n', (33585, 33589), True, 'import numpy as np\n'), ((37014, 37039), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(5)', '(ix + 1)'], {}), '(2, 5, ix + 1)\n', (37025, 37039), True, 'import matplotlib.pyplot as plt\n'), ((37058, 37146), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['value.index', "value['low']", "value['high']"], {'alpha': '(0.3)', 'color': '"""gray"""'}), "(value.index, value['low'], value['high'], alpha=0.3, color\n ='gray')\n", (37074, 37146), True, 'import matplotlib.pyplot as plt\n'), ((37158, 37194), 'matplotlib.pyplot.plot', 'plt.plot', (['value.index', "value['mean']"], {}), "(value.index, value['mean'])\n", (37166, 37194), True, 'import matplotlib.pyplot as plt\n'), ((37366, 37380), 'matplotlib.pyplot.box', 'plt.box', (['(False)'], {}), '(False)\n', (37373, 37380), True, 'import matplotlib.pyplot as plt\n'), ((37496, 37521), 'matplotlib.pyplot.title', 'plt.title', (['state_all[key]'], {}), '(state_all[key])\n', (37505, 37521), True, 'import matplotlib.pyplot as plt\n'), ((37538, 37556), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (37554, 37556), True, 'import matplotlib.pyplot as plt\n'), ((12296, 12310), 'numpy.linalg.eigvals', 'la.eigvals', (['A3'], {}), '(A3)\n', (12306, 12310), True, 'from numpy import linalg as la\n'), ((21154, 21202), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.sigma_c', '(self.P * 1000)'], {}), '(0, self.sigma_c, self.P * 1000)\n', (21170, 21202), True, 'import numpy as np\n'), ((23689, 23737), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.sigma_c', '(self.M * 1000)'], {}), '(0, self.sigma_c, self.M * 1000)\n', (23705, 23737), True, 'import numpy as np\n'), ((25925, 25973), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.sigma_c', '(self.M * 1000)'], {}), '(0, self.sigma_c, self.M * 1000)\n', (25941, 25973), True, 'import numpy as np\n'), ((30180, 30210), 'numpy.sort', 'np.sort', (['(self.state_name * 252)'], {}), '(self.state_name * 252)\n', (30187, 30210), True, 'import numpy as np\n'), ((34583, 34613), 'numpy.sort', 'np.sort', (['(self.state_name * 252)'], {}), '(self.state_name * 252)\n', (34590, 34613), True, 'import numpy as np\n'), ((36592, 36617), 'numpy.array', 'np.array', (['self.state_name'], {}), '(self.state_name)\n', (36600, 36617), True, 'import numpy as np\n'), ((36904, 36931), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (36914, 36931), True, 'import matplotlib.pyplot as plt\n'), ((36952, 36996), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(5)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 5, sharex=True, sharey=True)\n', (36964, 36996), True, 'import matplotlib.pyplot as plt\n'), ((37635, 37645), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37643, 37645), True, 'import matplotlib.pyplot as plt\n'), ((37666, 37677), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (37675, 37677), True, 'import matplotlib.pyplot as plt\n'), ((29651, 29668), 'datetime.timedelta', 'timedelta', ([], {'days': 'x'}), '(days=x)\n', (29660, 29668), False, 'from datetime import timedelta\n'), ((34054, 34071), 'datetime.timedelta', 'timedelta', ([], {'days': 'x'}), '(days=x)\n', (34063, 34071), False, 'from datetime import timedelta\n'), ((13451, 13468), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (13460, 13468), False, 'from datetime import timedelta\n')] |
## =========================================================================
## @author <NAME> (<EMAIL>)
## =========================================================================
import numpy, sys
sys.path.insert( 0, '../../lib/python3' )
import PUJ.Model.Linear
data = numpy.loadtxt( open( sys.argv[ 1 ], 'rb' ), delimiter = ',' )
m = PUJ.Model.Linear( numpy.matrix( data ) )
print( m )
## eof - $RCSfile$
| [
"numpy.matrix",
"sys.path.insert"
] | [((201, 240), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../lib/python3"""'], {}), "(0, '../../lib/python3')\n", (216, 240), False, 'import numpy, sys\n'), ((360, 378), 'numpy.matrix', 'numpy.matrix', (['data'], {}), '(data)\n', (372, 378), False, 'import numpy, sys\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
tgt_gpu = tvm.target.Target(target="rocm", host="llvm")
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
print(type(C))
s = te.create_schedule(C.op)
bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
fadd = tvm.build(s, [A, B, C], target=tgt_gpu, name="myadd")
dev = tvm.device(tgt_gpu.kind.name, 0)
n = 1024
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
if (
tgt_gpu.kind.name == "cuda"
or tgt_gpu.kind.name == "rocm"
or tgt_gpu.kind.name.startswith("opencl")
):
dev_module = fadd.imported_modules[0]
print("-----GPU code-----")
print(dev_module.get_source())
else:
print(fadd.get_source()) | [
"tvm.te.var",
"tvm.te.thread_axis",
"tvm.te.create_schedule",
"tvm.te.placeholder",
"tvm.build",
"numpy.zeros",
"tvm.te.compute",
"numpy.random.uniform",
"tvm.target.Target",
"tvm.device"
] | [((865, 910), 'tvm.target.Target', 'tvm.target.Target', ([], {'target': '"""rocm"""', 'host': '"""llvm"""'}), "(target='rocm', host='llvm')\n", (882, 910), False, 'import tvm\n'), ((916, 927), 'tvm.te.var', 'te.var', (['"""n"""'], {}), "('n')\n", (922, 927), False, 'from tvm import te\n'), ((932, 962), 'tvm.te.placeholder', 'te.placeholder', (['(n,)'], {'name': '"""A"""'}), "((n,), name='A')\n", (946, 962), False, 'from tvm import te\n'), ((967, 997), 'tvm.te.placeholder', 'te.placeholder', (['(n,)'], {'name': '"""B"""'}), "((n,), name='B')\n", (981, 997), False, 'from tvm import te\n'), ((1002, 1054), 'tvm.te.compute', 'te.compute', (['A.shape', '(lambda i: A[i] + B[i])'], {'name': '"""C"""'}), "(A.shape, lambda i: A[i] + B[i], name='C')\n", (1012, 1054), False, 'from tvm import te\n'), ((1075, 1099), 'tvm.te.create_schedule', 'te.create_schedule', (['C.op'], {}), '(C.op)\n', (1093, 1099), False, 'from tvm import te\n'), ((1244, 1297), 'tvm.build', 'tvm.build', (['s', '[A, B, C]'], {'target': 'tgt_gpu', 'name': '"""myadd"""'}), "(s, [A, B, C], target=tgt_gpu, name='myadd')\n", (1253, 1297), False, 'import tvm\n'), ((1305, 1337), 'tvm.device', 'tvm.device', (['tgt_gpu.kind.name', '(0)'], {}), '(tgt_gpu.kind.name, 0)\n', (1315, 1337), False, 'import tvm\n'), ((1161, 1189), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (1175, 1189), False, 'from tvm import te\n'), ((1205, 1234), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (1219, 1234), False, 'from tvm import te\n'), ((1495, 1521), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'C.dtype'}), '(n, dtype=C.dtype)\n', (1503, 1521), True, 'import numpy as np\n'), ((1365, 1390), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (1382, 1390), True, 'import numpy as np\n'), ((1430, 1455), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (1447, 1455), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.