code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""unittest cases for dvh."""
# test_dvh.py
# Copyright (c) 2016 <NAME>
import unittest
import os
from dicompylercore import dvh, dicomparser
from numpy import array, arange
from numpy.testing import assert_array_equal
mpl_available = True
try:
import matplotlib.pyplot as plt
except:
mpl_available = False
else:
plt.ioff()
basedata_dir = "tests/testdata"
example_data = os.path.join(basedata_dir, "example_data")
class TestDVH(unittest.TestCase):
"""Unit tests for the DVH module."""
@classmethod
def setUpClass(cls):
"""Setup files for common case testing."""
rtdose_dcm = os.path.join(example_data, "rtdose.dcm")
rtdose = dicomparser.DicomParser(rtdose_dcm)
cls.dvh = dvh.DVH.from_dicom_dvh(rtdose.ds, 9)
cls.rx_dose = 14
def test_raw_data_dvh(self):
"""Test if a DVH can be created from raw data."""
self.assertEqual(dvh.DVH.from_data(1, 1), dvh.DVH([1], [1]))
self.assertEqual(
repr(dvh.DVH.from_data(1, 1)),
"DVH(cumulative, 1 bins: [0:1] Gy, volume: 1 cm3, "
"name: None, rx_dose: 0 Gy)")
assert_array_equal(dvh.DVH.from_data(0, 1).bins, array([0, 0]))
assert_array_equal(dvh.DVH.from_data(5, 2).bins, array([0, 2, 4, 5]))
def test_raw_data_dvh_max_bins(self):
"""Test if a DVH can be created from raw data with [0, 5] bin."""
max_dvh = dvh.DVH.from_data([0, 5])
assert_array_equal(max_dvh.counts, array([1, 0, 0, 0, 1]))
assert_array_equal(max_dvh.bins, arange(0, 6))
def test_differential_dvh(self):
"""Test if a cumulative DVH can be converted to a differential DVH."""
self.assertAlmostEqual(
self.dvh.counts.max(), self.dvh.differential.counts.sum())
self.assertEqual(
self.dvh.differential, self.dvh.differential.differential)
def test_cumulative_dvh(self):
"""Test if a differential DVH can be converted to a cumulative DVH."""
self.assertEqual(
self.dvh.cumulative, self.dvh.differential.cumulative)
def test_absolute_relative_dose_dvh(self):
"""Test if an absolute and relative dose DVH can be generated."""
self.assertEqual(
self.dvh.absolute_dose(self.rx_dose),
self.dvh.relative_dose(self.rx_dose).absolute_dose(self.rx_dose))
self.assertEqual(
self.dvh.relative_dose(self.rx_dose).relative_dose(self.rx_dose),
self.dvh.absolute_dose(self.rx_dose).relative_dose(self.rx_dose))
def test_absolute_relative_volume_dvh(self):
"""Test if an absolute and relative volume DVH can be generated."""
self.assertEqual(
self.dvh.absolute_volume(self.dvh.volume),
self.dvh.relative_volume.absolute_volume(self.dvh.volume))
self.assertEqual(
self.dvh.relative_volume.relative_volume,
self.dvh.absolute_volume(self.dvh.volume).relative_volume)
def test_absolute_relative_full_conversion(self):
"""Test if an abs / relative volume / dose DVH can be generated."""
a = self.dvh
b = a.differential.relative_volume.absolute_volume(a.volume).cumulative
c = a.relative_volume.differential.absolute_volume(a.volume).cumulative
d = a.relative_volume.absolute_volume(a.volume).differential.cumulative
e = a.differential.relative_volume.cumulative.absolute_volume(a.volume)
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(c, d)
self.assertEqual(d, e)
rx = self.rx_dose
f = b.relative_dose(rx).absolute_dose(rx).differential.relative_volume
g = b.relative_dose(rx).relative_volume.differential.absolute_dose(rx)
h = b.relative_volume.differential.relative_dose(rx).absolute_dose(rx)
i = b.differential.relative_dose(rx).relative_volume.absolute_dose(rx)
self.assertEqual(f, g)
self.assertEqual(g, h)
self.assertEqual(h, i)
# Test if rx_dose is included in initial constructor
i.rx_dose = rx
j = i.relative_dose().absolute_dose().differential.relative_volume
k = i.relative_dose().relative_volume.differential.absolute_dose()
l = i.relative_volume.differential.relative_dose().absolute_dose()
m = i.differential.relative_dose().relative_volume.absolute_dose()
self.assertEqual(i, j)
self.assertEqual(j, k)
self.assertEqual(k, l)
self.assertEqual(l, m)
# Test if rx_dose is not included in initial constructor
# but is accessed as if it is provided
with self.assertRaises(AttributeError):
self.dvh.relative_dose()
with self.assertRaises(AttributeError):
self.dvh.relative_dose(14).absolute_dose()
def test_dvh_properties(self):
"""Test if the DVH properties can be derived."""
self.assertEqual(self.dvh.max, 14.579999999999734)
self.assertEqual(self.dvh.min, 14.069999999999744)
self.assertEqual(self.dvh.mean, 14.285830178442307)
self.assertEqual(self.dvh.volume, 12.809180549338803)
def test_dvh_value(self):
"""Test if the DVHValue class works as expected."""
self.assertEqual(str(dvh.DVHValue(100)), '100.00')
self.assertEqual(str(dvh.DVHValue(100, 'Gy')), '100.00 Gy')
self.assertEqual(
repr(dvh.DVHValue(100, 'Gy')),
"dvh.DVHValue(100, 'Gy')")
def test_dvh_statistics(self):
"""Test if the DVH statistics can be calculated."""
self.dvh.rx_dose = self.rx_dose
self.assertEqual(
self.dvh.volume_constraint(0),
dvh.DVHValue(12.809180549338601, 'cm3'))
self.assertEqual(
self.dvh.volume_constraint(100),
dvh.DVHValue(12.809180549338601, 'cm3'))
self.assertEqual(
self.dvh.volume_constraint(105),
dvh.DVHValue(0.0, 'cm3'))
self.assertEqual(
self.dvh.volume_constraint(14, 'Gy'),
dvh.DVHValue(12.809180549338601, 'cm3'))
self.assertEqual(
self.dvh.volume_constraint(100, 'Gy'),
dvh.DVHValue(0.0, 'cm3'))
self.assertEqual(
self.dvh.dose_constraint(100),
dvh.DVHValue(14.059999999999745, 'Gy'))
self.assertEqual(
self.dvh.dose_constraint(90),
dvh.DVHValue(14.169999999999742, 'Gy'))
self.assertEqual(
self.dvh.dose_constraint(0.02, 'cc'),
dvh.DVHValue(14.529999999999735, 'Gy'))
self.assertEqual(
self.dvh.dose_constraint(15, 'cc'),
dvh.DVHValue(0.0, 'Gy'))
def test_dvh_statistics_shorthand(self):
"""Test if the DVH statistics can be accessed via shorthand."""
self.assertEqual(
self.dvh.v100, dvh.DVHValue(12.809180549338601, 'cm3'))
self.assertEqual(
self.dvh.v14Gy, dvh.DVHValue(12.809180549338601, 'cm3'))
self.assertEqual(
self.dvh.D90, dvh.DVHValue(14.169999999999742, 'Gy'))
self.assertEqual(
self.dvh.d2cc, dvh.DVHValue(14.389999999999738, 'Gy'))
def test_dvh_statistics_shorthand_fail(self):
"""Test if the DVH statistics shorthand fail on invalid syntaxes."""
with self.assertRaises(AttributeError):
self.dvh.v100agy
def test_dvh_describe(self):
"""Test if the DVH statistics summary can be generated."""
self.assertEqual(self.dvh.describe(), None)
self.assertEqual(self.dvh.relative_dose(self.rx_dose).describe(), None)
def test_dvh_compare(self):
"""Test if the DVH comparsion summary can be generated."""
self.dvh.name = "test"
self.assertEqual(self.dvh.compare(self.dvh), None)
self.assertEqual(self.dvh.relative_dose(
self.rx_dose).compare(
self.dvh.relative_dose(self.rx_dose)), None)
with self.assertRaises(AttributeError):
self.dvh.relative_dose(self.rx_dose).compare(self.dvh)
@unittest.skipUnless(mpl_available, "Matplotlib not installed")
def test_plotting(self):
"""Test if the DVH can be plotted."""
self.assertEqual(self.dvh.plot(), self.dvh)
self.dvh.name = "test"
self.assertEqual(self.dvh.plot(), self.dvh)
def test_dvh_statistics_with_no_counts(self):
subject = dvh.DVH(array([]), array([0]))
self.assertEqual(subject.max, 0)
self.assertEqual(subject.min, 0)
self.assertEqual(subject.mean, 0)
def test_dose_constraint_with_no_counts(self):
subject = dvh.DVH(array([]), array([0]))
subject.dose_constraint(1)
def test_dvh_statistics_with_zero_volume(self):
subject = dvh.DVH(array([0, 0]), array([0, 1]))
self.assertEqual(subject.max, 0)
self.assertEqual(subject.min, 0)
self.assertEqual(subject.mean, 0)
def test_dose_constraint_with_zero_volume(self):
subject = dvh.DVH(array([0, 0]), array([0, 1]))
subject.dose_constraint(1)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| [
"dicompylercore.dvh.DVH.from_data",
"os.path.join",
"matplotlib.pyplot.ioff",
"unittest.skipUnless",
"dicompylercore.dicomparser.DicomParser",
"unittest.main",
"dicompylercore.dvh.DVH",
"numpy.array",
"dicompylercore.dvh.DVH.from_dicom_dvh",
"dicompylercore.dvh.DVHValue",
"numpy.arange"
] | [((433, 475), 'os.path.join', 'os.path.join', (['basedata_dir', '"""example_data"""'], {}), "(basedata_dir, 'example_data')\n", (445, 475), False, 'import os\n'), ((374, 384), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (382, 384), True, 'import matplotlib.pyplot as plt\n'), ((8126, 8188), 'unittest.skipUnless', 'unittest.skipUnless', (['mpl_available', '"""Matplotlib not installed"""'], {}), "(mpl_available, 'Matplotlib not installed')\n", (8145, 8188), False, 'import unittest\n'), ((668, 708), 'os.path.join', 'os.path.join', (['example_data', '"""rtdose.dcm"""'], {}), "(example_data, 'rtdose.dcm')\n", (680, 708), False, 'import os\n'), ((726, 761), 'dicompylercore.dicomparser.DicomParser', 'dicomparser.DicomParser', (['rtdose_dcm'], {}), '(rtdose_dcm)\n', (749, 761), False, 'from dicompylercore import dvh, dicomparser\n'), ((781, 817), 'dicompylercore.dvh.DVH.from_dicom_dvh', 'dvh.DVH.from_dicom_dvh', (['rtdose.ds', '(9)'], {}), '(rtdose.ds, 9)\n', (803, 817), False, 'from dicompylercore import dvh, dicomparser\n'), ((1464, 1489), 'dicompylercore.dvh.DVH.from_data', 'dvh.DVH.from_data', (['[0, 5]'], {}), '([0, 5])\n', (1481, 1489), False, 'from dicompylercore import dvh, dicomparser\n'), ((9194, 9209), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9207, 9209), False, 'import unittest\n'), ((960, 983), 'dicompylercore.dvh.DVH.from_data', 'dvh.DVH.from_data', (['(1)', '(1)'], {}), '(1, 1)\n', (977, 983), False, 'from dicompylercore import dvh, dicomparser\n'), ((985, 1002), 'dicompylercore.dvh.DVH', 'dvh.DVH', (['[1]', '[1]'], {}), '([1], [1])\n', (992, 1002), False, 'from dicompylercore import dvh, dicomparser\n'), ((1236, 1249), 'numpy.array', 'array', (['[0, 0]'], {}), '([0, 0])\n', (1241, 1249), False, 'from numpy import array, arange\n'), ((1308, 1327), 'numpy.array', 'array', (['[0, 2, 4, 5]'], {}), '([0, 2, 4, 5])\n', (1313, 1327), False, 'from numpy import array, arange\n'), ((1533, 1555), 'numpy.array', 'array', (['[1, 0, 0, 0, 1]'], {}), '([1, 0, 0, 0, 1])\n', (1538, 1555), False, 'from numpy import array, arange\n'), ((1598, 1610), 'numpy.arange', 'arange', (['(0)', '(6)'], {}), '(0, 6)\n', (1604, 1610), False, 'from numpy import array, arange\n'), ((5742, 5779), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(12.8091805493386)', '"""cm3"""'], {}), "(12.8091805493386, 'cm3')\n", (5754, 5779), False, 'from dicompylercore import dvh, dicomparser\n'), ((5866, 5903), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(12.8091805493386)', '"""cm3"""'], {}), "(12.8091805493386, 'cm3')\n", (5878, 5903), False, 'from dicompylercore import dvh, dicomparser\n'), ((5990, 6014), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(0.0)', '"""cm3"""'], {}), "(0.0, 'cm3')\n", (6002, 6014), False, 'from dicompylercore import dvh, dicomparser\n'), ((6104, 6141), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(12.8091805493386)', '"""cm3"""'], {}), "(12.8091805493386, 'cm3')\n", (6116, 6141), False, 'from dicompylercore import dvh, dicomparser\n'), ((6234, 6258), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(0.0)', '"""cm3"""'], {}), "(0.0, 'cm3')\n", (6246, 6258), False, 'from dicompylercore import dvh, dicomparser\n'), ((6341, 6379), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(14.059999999999745)', '"""Gy"""'], {}), "(14.059999999999745, 'Gy')\n", (6353, 6379), False, 'from dicompylercore import dvh, dicomparser\n'), ((6461, 6499), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(14.169999999999742)', '"""Gy"""'], {}), "(14.169999999999742, 'Gy')\n", (6473, 6499), False, 'from dicompylercore import dvh, dicomparser\n'), ((6589, 6627), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(14.529999999999735)', '"""Gy"""'], {}), "(14.529999999999735, 'Gy')\n", (6601, 6627), False, 'from dicompylercore import dvh, dicomparser\n'), ((6715, 6738), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(0.0)', '"""Gy"""'], {}), "(0.0, 'Gy')\n", (6727, 6738), False, 'from dicompylercore import dvh, dicomparser\n'), ((6911, 6948), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(12.8091805493386)', '"""cm3"""'], {}), "(12.8091805493386, 'cm3')\n", (6923, 6948), False, 'from dicompylercore import dvh, dicomparser\n'), ((7006, 7043), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(12.8091805493386)', '"""cm3"""'], {}), "(12.8091805493386, 'cm3')\n", (7018, 7043), False, 'from dicompylercore import dvh, dicomparser\n'), ((7099, 7137), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(14.169999999999742)', '"""Gy"""'], {}), "(14.169999999999742, 'Gy')\n", (7111, 7137), False, 'from dicompylercore import dvh, dicomparser\n'), ((7192, 7230), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(14.389999999999738)', '"""Gy"""'], {}), "(14.389999999999738, 'Gy')\n", (7204, 7230), False, 'from dicompylercore import dvh, dicomparser\n'), ((8476, 8485), 'numpy.array', 'array', (['[]'], {}), '([])\n', (8481, 8485), False, 'from numpy import array, arange\n'), ((8487, 8497), 'numpy.array', 'array', (['[0]'], {}), '([0])\n', (8492, 8497), False, 'from numpy import array, arange\n'), ((8701, 8710), 'numpy.array', 'array', (['[]'], {}), '([])\n', (8706, 8710), False, 'from numpy import array, arange\n'), ((8712, 8722), 'numpy.array', 'array', (['[0]'], {}), '([0])\n', (8717, 8722), False, 'from numpy import array, arange\n'), ((8838, 8851), 'numpy.array', 'array', (['[0, 0]'], {}), '([0, 0])\n', (8843, 8851), False, 'from numpy import array, arange\n'), ((8853, 8866), 'numpy.array', 'array', (['[0, 1]'], {}), '([0, 1])\n', (8858, 8866), False, 'from numpy import array, arange\n'), ((9072, 9085), 'numpy.array', 'array', (['[0, 0]'], {}), '([0, 0])\n', (9077, 9085), False, 'from numpy import array, arange\n'), ((9087, 9100), 'numpy.array', 'array', (['[0, 1]'], {}), '([0, 1])\n', (9092, 9100), False, 'from numpy import array, arange\n'), ((1047, 1070), 'dicompylercore.dvh.DVH.from_data', 'dvh.DVH.from_data', (['(1)', '(1)'], {}), '(1, 1)\n', (1064, 1070), False, 'from dicompylercore import dvh, dicomparser\n'), ((1206, 1229), 'dicompylercore.dvh.DVH.from_data', 'dvh.DVH.from_data', (['(0)', '(1)'], {}), '(0, 1)\n', (1223, 1229), False, 'from dicompylercore import dvh, dicomparser\n'), ((1278, 1301), 'dicompylercore.dvh.DVH.from_data', 'dvh.DVH.from_data', (['(5)', '(2)'], {}), '(5, 2)\n', (1295, 1301), False, 'from dicompylercore import dvh, dicomparser\n'), ((5319, 5336), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(100)'], {}), '(100)\n', (5331, 5336), False, 'from dicompylercore import dvh, dicomparser\n'), ((5378, 5401), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(100)', '"""Gy"""'], {}), "(100, 'Gy')\n", (5390, 5401), False, 'from dicompylercore import dvh, dicomparser\n'), ((5460, 5483), 'dicompylercore.dvh.DVHValue', 'dvh.DVHValue', (['(100)', '"""Gy"""'], {}), "(100, 'Gy')\n", (5472, 5483), False, 'from dicompylercore import dvh, dicomparser\n')] |
import time
import numpy as np
from sklearn.neighbors import NearestCentroid
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn import decomposition
def run(x_train, y_train, x_test, y_test, clf):
s = time.time()
clf.fit(x_train, y_train)
e_train = time.time() - s
s = time.time()
score = clf.score(x_test, y_test)
e_test = time.time() - s
print("score = %0.4f (time, train=%8.3f, test=%8.3f)" % (score, e_train, e_test))
def train(x_train, y_train, x_test, y_test):
print(" Nearest centroid : ", end='')
run(x_train, y_train, x_test, y_test, NearestCentroid())
print(" k-NN classifier (k=3) : ", end='')
run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))
print(" k-NN classifier (k=7) : ", end='')
run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))
print(" Naive Bayes (Gaussian) : ", end='')
run(x_train, y_train, x_test, y_test, GaussianNB())
print(" Decision Tree : ", end='')
run(x_train, y_train, x_test, y_test, DecisionTreeClassifier())
print(" Random Forest (trees= 5) : ", end='')
run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))
print(" Random Forest (trees= 50) : ", end='')
run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))
print(" Random Forest (trees=500) : ", end='')
run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))
print(" Random Forest (trees=1000): ", end='')
run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))
print(" LinearSVM (C=0.01) : ", end='')
run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))
print(" LinearSVM (C=0.1) : ", end='')
run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))
print(" LinearSVM (C=1.0) : ", end='')
run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))
print(" LinearSVM (C=10.0) : ", end='')
run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))
def main():
x_train = np.load("../data/mnist/mnist_train_vectors.npy").astype("float64")
y_train = np.load("../data/mnist/mnist_train_labels.npy")
x_test = np.load("../data/mnist/mnist_test_vectors.npy").astype("float64")
y_test = np.load("../data/mnist/mnist_test_labels.npy")
print("Models trained on raw [0,255] images:")
train(x_train, y_train, x_test, y_test)
print("Models trained on raw [0,1) images:")
train(x_train/256.0, y_train, x_test/256.0, y_test)
m = x_train.mean(axis=0)
s = x_train.std(axis=0) + 1e-8
x_ntrain = (x_train - m) / s
x_ntest = (x_test - m) / s
print("Models trained on normalized images:")
train(x_ntrain, y_train, x_ntest, y_test)
pca = decomposition.PCA(n_components=15)
pca.fit(x_ntrain)
x_ptrain = pca.transform(x_ntrain)
x_ptest = pca.transform(x_ntest)
print("Models trained on first 15 PCA components of normalized images:")
train(x_ptrain, y_train, x_ptest, y_test)
main()
| [
"sklearn.decomposition.PCA",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.svm.LinearSVC",
"sklearn.neighbors.NearestCentroid",
"sklearn.naive_bayes.GaussianNB",
"numpy.load",
"time.time"
] | [((411, 422), 'time.time', 'time.time', ([], {}), '()\n', (420, 422), False, 'import time\n'), ((492, 503), 'time.time', 'time.time', ([], {}), '()\n', (501, 503), False, 'import time\n'), ((2433, 2480), 'numpy.load', 'np.load', (['"""../data/mnist/mnist_train_labels.npy"""'], {}), "('../data/mnist/mnist_train_labels.npy')\n", (2440, 2480), True, 'import numpy as np\n'), ((2573, 2619), 'numpy.load', 'np.load', (['"""../data/mnist/mnist_test_labels.npy"""'], {}), "('../data/mnist/mnist_test_labels.npy')\n", (2580, 2619), True, 'import numpy as np\n'), ((3059, 3093), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': '(15)'}), '(n_components=15)\n', (3076, 3093), False, 'from sklearn import decomposition\n'), ((467, 478), 'time.time', 'time.time', ([], {}), '()\n', (476, 478), False, 'import time\n'), ((555, 566), 'time.time', 'time.time', ([], {}), '()\n', (564, 566), False, 'import time\n'), ((800, 817), 'sklearn.neighbors.NearestCentroid', 'NearestCentroid', ([], {}), '()\n', (815, 817), False, 'from sklearn.neighbors import NearestCentroid\n'), ((915, 950), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(3)'}), '(n_neighbors=3)\n', (935, 950), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1048, 1083), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(7)'}), '(n_neighbors=7)\n', (1068, 1083), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1181, 1193), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1191, 1193), False, 'from sklearn.naive_bayes import GaussianNB, MultinomialNB\n'), ((1291, 1315), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1313, 1315), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1413, 1451), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(5)'}), '(n_estimators=5)\n', (1435, 1451), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1549, 1588), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(50)'}), '(n_estimators=50)\n', (1571, 1588), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1686, 1726), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(500)'}), '(n_estimators=500)\n', (1708, 1726), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1824, 1865), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(1000)'}), '(n_estimators=1000)\n', (1846, 1865), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1963, 1980), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(0.01)'}), '(C=0.01)\n', (1972, 1980), False, 'from sklearn.svm import LinearSVC\n'), ((2078, 2094), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(0.1)'}), '(C=0.1)\n', (2087, 2094), False, 'from sklearn.svm import LinearSVC\n'), ((2192, 2208), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(1.0)'}), '(C=1.0)\n', (2201, 2208), False, 'from sklearn.svm import LinearSVC\n'), ((2306, 2323), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(10.0)'}), '(C=10.0)\n', (2315, 2323), False, 'from sklearn.svm import LinearSVC\n'), ((2352, 2400), 'numpy.load', 'np.load', (['"""../data/mnist/mnist_train_vectors.npy"""'], {}), "('../data/mnist/mnist_train_vectors.npy')\n", (2359, 2400), True, 'import numpy as np\n'), ((2494, 2541), 'numpy.load', 'np.load', (['"""../data/mnist/mnist_test_vectors.npy"""'], {}), "('../data/mnist/mnist_test_vectors.npy')\n", (2501, 2541), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import random
ENG_INPUT_PATH = 'eng\English Wordlist.csv'
DEU_INPUT_PATH = 'deu\GoetheA1.csv'
JAP_INPUT_PATH = 'eng\AdvanceIELTS.csv'
file_lst = ['eng\English Wordlist.csv','deu\GoetheA1.csv','eng\AdvanceIELTS.csv', 'deu\Duolingo.csv']
class WordGenerator:
input_df = ''
input_path = ''
def __init__(self, input):
try:
self.input_df = pd.DataFrame()
self.input_df = pd.read_csv(input, header=0,encoding='latin-1')
self.input_path= input
except:
print('Error when parsing input to dataframe')
# This function generate k unlearned words from the list
def GeneratedWord(self, k):
valid_df = self.input_df[self.input_df['Checked'] == 0]
r_choices = valid_df.sample(n = k)
print(self.input_path)
print('')
print(r_choices.head(k).sort_index())
self.SaveLearnedWord(r_choices.index)
# This function save generated words
def SaveLearnedWord(self, ids):
next_val = self.input_df['Checked'].max() + 1
for id in ids:
self.input_df.loc[id,'Checked'] = next_val
self.SaveToFile()
# This function Reset all generated words
def ResetWords(self):
self.input_df.loc[:,'Checked'] = np.zeros(self.input_df.shape[0])
self.SaveToFile()
# This function reset last words generated by GenerateWords
def ResetLasLearnedtWords(self):
max_val = self.input_df['Checked'].max()
if max_val == 0:
return
last_ids = self.input_df[self.input_df['Checked'] == max_val ].index
for id in last_ids:
self.input_df.loc[id, 'Checked'] = 0
print(str(id) + ' ' + str(self.input_df.loc[id, 'Checked']))
self.SaveToFile()
# This function reroll the generated results if you don't like it
def RerollWords(self, k):
self.ResetLasLearnedtWords()
self.GeneratedWord(k)
# This function show your last generated words:
def ShowLastGeneratedWords(self):
max_val = self.input_df['Checked'].max()
if max_val == 0:
print('You have not take any words')
return
print('You generated time(s): ' + str(max_val))
print(self.input_df[self.input_df['Checked'] == max_val])
# This function save to file
def SaveToFile(self):
self.input_df.to_csv(self.input_path, index = False)
# This function give statistics for your learning
def Stats(self):
uncheck = self.input_df[self.input_df['Checked'] == 0].shape[0]
total = self.input_df.shape[0]
print('Total: ' + str(total))
print('Words remaining: ' + str(uncheck))
WordGen = WordGenerator(ENG_INPUT_PATH)
while True:
print('1. Take words')
print('2. Reroll Words')
print('3. Reset Last Learned Words')
print('4. Reset words')
print('5. Stats')
print('6. Show your newest words')
print('7. ChangeList')
print('Other.Quit')
x = input()
if (int(x) == 1):
WordGen.GeneratedWord(15)
elif (int(x) == 2):
WordGen.RerollWords(15)
elif(int(x) ==3):
WordGen.ResetLasLearnedtWords()
elif(int(x) == 4):
WordGen.ResetWords()
elif(int(x) == 5):
WordGen.Stats()
elif(int(x) == 6):
WordGen.ShowLastGeneratedWords()
elif(int(x) == 7):
print('Chooose File:')
l = len(file_lst)
for i in range (0,l):
print(str(i + 1) + '. ' +file_lst[i])
print(str(l + 1) + '. Return')
y = input()
if int(y) <= l and int(y) > 0:
WordGen = WordGenerator(file_lst[int(y)-1])
else:
print('Oki back to the last')
else:
break | [
"pandas.DataFrame",
"numpy.zeros",
"pandas.read_csv"
] | [((1292, 1324), 'numpy.zeros', 'np.zeros', (['self.input_df.shape[0]'], {}), '(self.input_df.shape[0])\n', (1300, 1324), True, 'import numpy as np\n'), ((407, 421), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (419, 421), True, 'import pandas as pd\n'), ((450, 498), 'pandas.read_csv', 'pd.read_csv', (['input'], {'header': '(0)', 'encoding': '"""latin-1"""'}), "(input, header=0, encoding='latin-1')\n", (461, 498), True, 'import pandas as pd\n')] |
import cv2, glob, scipy
import numpy as np
import sys
import os
from scipy import signal
from PIL import Image
from tqdm import tqdm
import copy
if '__main__' == __name__:
# L = os.listdir('temp/')
# for n in L:
# os.remove(os.path.join('temp', n))
print("sys.argv", sys.argv)
if 1 < len(sys.argv):
# __path = os.path.join(os.getcwd(), str(sys.argv[1]))
__path = str(sys.argv[1])
new_filename = str(sys.argv[1])
type_output_num = len(sys.argv) -1
output_path = "output_test_comparison"+str(type_output_num)+".avi"
else:
print("ERROR: yezheng: input path")
exit(1)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10,30)
fontScale = 1
fontColor = (128,128,128)
lineType = 2
print("os.path.join(__path,'/*')", __path)
img_list = glob.glob(__path+'/Cam_On_Image_*.png')
# print("img_list", img_list)
# img_list = [path_name for path_name in img_list if '.png' ==path_name[-4:] and not 'z' == path_name.split('/')[-1][0]]
img_list = [path_name for path_name in img_list if '.png' ==path_name[-4:] and not 'z' == path_name.split('/')[-1][0]]
# print("img_list", img_list)
img_list.sort()
# img_list = img_list[]
# print("img_list", img_list)
# if 1 == type_output_num:
# output_path = os.path.join(__path,'output_zhang7.avi')
# elif 2 == type_output_num:
# output_path = os.path.join(__path,output_name) #'output_zhang7.avi'
output_path = os.path.join(new_filename,output_path)
print("output_path", output_path)
# out = cv2.VideoWriter(output_path, fourcc, 20.0, (256 * int(type_output_num/int(type_output_num/3)), 256*int(type_output_num/3)))#(256*, 256)
out = cv2.VideoWriter(output_path, fourcc, 20.0, (256 * type_output_num, 256))#(256*, 256)
count = 0
for img_path_idx in tqdm(range(len(img_list))):
img_path = img_list[img_path_idx]
# print("img_path.split('/')[-1][:-4]", img_path.split('/')[-1][:-4])
splits = img_path.split('/')[-1].split('_')[3]#[:-4].split('_')[0].split('e')
# print("[make_video] splits", splits, splits.split())
index = int(''.join([s for s in splits if s.isdigit()]))
# print("[make_video] index", index)
# if "Frame"== splits[0]:
# 1 == img
img = cv2.imread(img_path)
# print("index",index)
# cv2.imshow("img", img)
# cv2.waitKey()
# if index >10:
# exit()
if None is img:
print("img_path", img_path)
else:
# print("img_path", img_path)
pass
if np.max(img) <10:
img = (1 == img) *1.0
# print("img.shape", img.shape, "np.unique(img)", np.unique(img), np.sum(img[...,0] == img[...,1]) )
img = 250*img.astype(np.float64)
# print("img",np.sum(img)/120)
img = cv2.resize(img, #dsize = img_comparison.shape
(256,256))
# if 1 == type_output_num:
# text = '{:05d}'.format(index)
# else:
# text = '{:05d}(new)'.format(index)
text = __path.split('_')[2] +"({})".format(index)
cv2.putText(img,text,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# print("img_comparison", np.max(img_comparison), np.min(img_comparison),img_comparison.shape)
# print("img",img.shape, "img_comparison",img_comparison.shape)
# try:
# del new_img
# except:
# pass
new_img = copy.deepcopy(img)
new_img = img
# del img
for i in range(2,type_output_num+1):
# compare_filename = "/Users/yezheng/medical_img/maskrcnn_single_img/clip_num_4/iter0/"+"{:05d}.png".format(index)
# compare_prefix = '/Users/yezheng/medical_img/data/test/mask_label/iter0'
compare_prefix = sys.argv[i]#'/Users/yezheng/medical_img/box_result/test/clip_num_4'
img_com_list = glob.glob(os.path.join(compare_prefix,"Cam_On_Image_*.png"))
img_com_list.sort()
img_com_list = [n for n in img_com_list if "{:07d}".format(index) in n]
if 0 == len(img_com_list):
img_com_list = glob.glob(os.path.join(compare_prefix,"Cam_On_Image_*.png"))
img_com_list = [n for n in img_com_list if "{:04d}".format(index) in n]
img_com_list.sort()
# print('[make_video] img_com_list', img_com_list)
if 0 == len(img_com_list):
compare_filename = None
img_comparison = np.zeros((256,256,3))+255
else:
compare_filename = img_com_list[0]#compare_prefix+'/{:05d}.png'.format(index)#"/Frame{:04d}_ordered.png".format(index)
# print("compare_filename", compare_filename)
img_comparison = cv2.imread(compare_filename)
if (256,256) is not img_comparison.shape:
img_comparison = cv2.resize(img_comparison, (256,256))
if np.max(img_comparison) <10:
if i == 3:
img_comparison = (i == img_comparison) *1.0
if i == 2:
img_comparison = (blood_idx == img_comparison) *1.0
if i == 4:
img_comparison = (2 == img_comparison) *1.0
img_comparison = 120*img_comparison#.astype(np.int64)
if img_comparison is None:
count += 1
print("yezheng's Exception", img_path, img.shape, img_comparison,"index", index, "compare_filename", compare_filename)
# if count >10:
# break
continue
text = compare_prefix.split('_')[2]
cv2.putText(img_comparison,text, bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
try:
new_img = np.concatenate((img_comparison, new_img),axis=1)
except Exception as e:
print("Exception", e, img.shape, img_comparison.shape,compare_filename)
continue
del img_comparison
# print("new_img", new_img.shape)
# new_img= np.uint8(new_img)
out.write(np.uint8(new_img))
# cv2.imshow("new_img", new_img)
# cv2.waitKey()
out.release()
| [
"numpy.uint8",
"os.path.join",
"cv2.VideoWriter",
"cv2.putText",
"numpy.max",
"numpy.zeros",
"cv2.VideoWriter_fourcc",
"copy.deepcopy",
"numpy.concatenate",
"cv2.resize",
"cv2.imread",
"glob.glob"
] | [((655, 686), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (677, 686), False, 'import cv2, glob, scipy\n'), ((925, 966), 'glob.glob', 'glob.glob', (["(__path + '/Cam_On_Image_*.png')"], {}), "(__path + '/Cam_On_Image_*.png')\n", (934, 966), False, 'import cv2, glob, scipy\n'), ((1549, 1588), 'os.path.join', 'os.path.join', (['new_filename', 'output_path'], {}), '(new_filename, output_path)\n', (1561, 1588), False, 'import os\n'), ((1776, 1848), 'cv2.VideoWriter', 'cv2.VideoWriter', (['output_path', 'fourcc', '(20.0)', '(256 * type_output_num, 256)'], {}), '(output_path, fourcc, 20.0, (256 * type_output_num, 256))\n', (1791, 1848), False, 'import cv2, glob, scipy\n'), ((2320, 2340), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2330, 2340), False, 'import cv2, glob, scipy\n'), ((2771, 2798), 'cv2.resize', 'cv2.resize', (['img', '(256, 256)'], {}), '(img, (256, 256))\n', (2781, 2798), False, 'import cv2, glob, scipy\n'), ((3002, 3090), 'cv2.putText', 'cv2.putText', (['img', 'text', 'bottomLeftCornerOfText', 'font', 'fontScale', 'fontColor', 'lineType'], {}), '(img, text, bottomLeftCornerOfText, font, fontScale, fontColor,\n lineType)\n', (3013, 3090), False, 'import cv2, glob, scipy\n'), ((3329, 3347), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (3342, 3347), False, 'import copy\n'), ((2544, 2555), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (2550, 2555), True, 'import numpy as np\n'), ((5144, 5243), 'cv2.putText', 'cv2.putText', (['img_comparison', 'text', 'bottomLeftCornerOfText', 'font', 'fontScale', 'fontColor', 'lineType'], {}), '(img_comparison, text, bottomLeftCornerOfText, font, fontScale,\n fontColor, lineType)\n', (5155, 5243), False, 'import cv2, glob, scipy\n'), ((5547, 5564), 'numpy.uint8', 'np.uint8', (['new_img'], {}), '(new_img)\n', (5555, 5564), True, 'import numpy as np\n'), ((3728, 3778), 'os.path.join', 'os.path.join', (['compare_prefix', '"""Cam_On_Image_*.png"""'], {}), "(compare_prefix, 'Cam_On_Image_*.png')\n", (3740, 3778), False, 'import os\n'), ((4449, 4477), 'cv2.imread', 'cv2.imread', (['compare_filename'], {}), '(compare_filename)\n', (4459, 4477), False, 'import cv2, glob, scipy\n'), ((5280, 5329), 'numpy.concatenate', 'np.concatenate', (['(img_comparison, new_img)'], {'axis': '(1)'}), '((img_comparison, new_img), axis=1)\n', (5294, 5329), True, 'import numpy as np\n'), ((3936, 3986), 'os.path.join', 'os.path.join', (['compare_prefix', '"""Cam_On_Image_*.png"""'], {}), "(compare_prefix, 'Cam_On_Image_*.png')\n", (3948, 3986), False, 'import os\n'), ((4220, 4243), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)'], {}), '((256, 256, 3))\n', (4228, 4243), True, 'import numpy as np\n'), ((4546, 4584), 'cv2.resize', 'cv2.resize', (['img_comparison', '(256, 256)'], {}), '(img_comparison, (256, 256))\n', (4556, 4584), False, 'import cv2, glob, scipy\n'), ((4591, 4613), 'numpy.max', 'np.max', (['img_comparison'], {}), '(img_comparison)\n', (4597, 4613), True, 'import numpy as np\n')] |
import argparse
import time
import numpy as np
import theano as th
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import lasagne
import lasagne.layers as ll
from lasagne.init import Normal
from lasagne.layers import dnn
import nn
import sys
import cifar10_data
from checkpoints import save_weights,load_weights
# settings
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1) #random seed for theano operation
parser.add_argument('--seed_data', type=int, default=1) #random seed for picking labeled data
parser.add_argument('--count', type=int, default=400) #how much data one class
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--base_RF_loss_weight', type=float, default=0.01) #weight for base random field loss, i.e. f-E[f]
parser.add_argument('--lrd', type=float, default=1e-3)
parser.add_argument('--lrg', type=float, default=1e-3)
parser.add_argument('--potential_control_weight', default=1e-3 ,type=float) #weight for confidence loss
parser.add_argument('--beta', default=0.5 ,type=float) #beta for SGHMC
parser.add_argument('--gradient_coefficient', default=0.003,type=float) #coefficient for gradient term of SGLD/SGHMC
parser.add_argument('--noise_coefficient', default=0,type=float) #coefficient for noise term of SGLD/SGHMC
parser.add_argument('--L', default=10 ,type=int) #revision steps
parser.add_argument('--max_e', default=600 ,type=int) #max number of epochs
parser.add_argument('--revison_method', default='revision_x_sghmc' ,type=str) #revision method
parser.add_argument('--load', default='' ,type=str) #file name to load trained model
parser.add_argument('--data_dir', type=str, default='data/cifar-10-python/') #data folder to load
args = parser.parse_args()
print(args)
# fixed random seeds
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
# load CIFAR data
def rescale(mat):
return np.transpose(np.cast[th.config.floatX]((-127.5 + mat)/127.5),(3,2,0,1))
trainx, trainy = cifar10_data.load(args.data_dir, subset='train')
testx, testy = cifar10_data.load(args.data_dir, subset='test')
trainx_unl = np.array(trainx).copy()
nr_batches_train = int(trainx.shape[0]/args.batch_size)
nr_batches_test = int(np.ceil(float(testx.shape[0])/args.batch_size))
# specify random field
layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_1'),name='d_w1'))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_2'),name='d_w2'))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_3'),name='d_w3'))
layers.append(ll.MaxPool2DLayer(layers[-1],(2,2)))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_4'),name='d_w4'))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_5'),name='d_w5'))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_6'),name='d_w6'))
layers.append(ll.MaxPool2DLayer(layers[-1],(2,2)))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1],512, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_7'),name='d_w7'))
layers.append(nn.weight_norm(ll.NINLayer(layers[-1], num_units=256, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_8'),name='d_w8'))
layers.append(nn.weight_norm(ll.NINLayer(layers[-1], num_units=128, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_9'),name='d_w9'))
layers.append(ll.GlobalPoolLayer(layers[-1]))
layers.append(nn.weight_norm(ll.DenseLayer(layers[-1], num_units=10, W=Normal(0.05), nonlinearity=None,name='d_10'), train_g=True, init_stdv=0.1,name='d_w10'))
labels = T.ivector()
x_lab = T.tensor4()
temp = ll.get_output(layers[-1], x_lab, deterministic=False, init=True)
init_updates = [u for l in layers for u in getattr(l,'init_updates',[])]
output_before_softmax_lab = ll.get_output(layers[-1], x_lab, deterministic=False)
logit_lab = output_before_softmax_lab[T.arange(T.shape(x_lab)[0]),labels]
u_lab = T.mean(nn.log_sum_exp(output_before_softmax_lab))
#cross entropy loss of labeled data
loss_lab = -T.mean(logit_lab) + u_lab
train_err = T.mean(T.neq(T.argmax(output_before_softmax_lab,axis=1),labels))
# test error
output_before_softmax = ll.get_output(layers[-1], x_lab, deterministic=True)
test_err = T.mean(T.neq(T.argmax(output_before_softmax,axis=1),labels))
# Theano functions for training the random field
lr = T.scalar()
RF_params = ll.get_all_params(layers, trainable=True)
RF_param_updates = lasagne.updates.rmsprop(loss_lab, RF_params, learning_rate=lr)
train_RF = th.function(inputs=[x_lab,labels,lr], outputs=[loss_lab, train_err], updates=RF_param_updates)
#weight norm initalization
init_param = th.function(inputs=[x_lab], outputs=None, updates=init_updates)
#predition on test data
output_before_softmax = ll.get_output(layers[-1], x_lab, deterministic=True)
test_batch = th.function(inputs=[x_lab], outputs=output_before_softmax)
# select labeled data
rng_data = np.random.RandomState(args.seed_data)
inds = rng_data.permutation(trainx.shape[0])
trainx = trainx[inds]
trainy = trainy[inds]
txs = []
tys = []
for j in range(10):
txs.append(trainx[trainy==j][:args.count])
tys.append(trainy[trainy==j][:args.count])
txs = np.concatenate(txs, axis=0)
tys = np.concatenate(tys, axis=0)
# //////////// perform training //////////////
lr_D=args.lrd
lr_G=args.lrg
beta=args.beta
gradient_coefficient=args.gradient_coefficient
noise_coefficient=args.noise_coefficient
base_RF_loss_weight = args.base_RF_loss_weight
potential_control_weight=args.potential_control_weight
acc_all=[]
best_acc=1
for epoch in range(args.max_e):
begin = time.time()
# construct randomly permuted minibatches
trainx = []
trainy = []
for t in range(int(np.ceil(trainx_unl.shape[0]/float(txs.shape[0])))):
inds = rng.permutation(txs.shape[0])
trainx.append(txs[inds])
trainy.append(tys[inds])
trainx = np.concatenate(trainx, axis=0)
trainy = np.concatenate(trainy, axis=0)
if epoch==0:
init_param(trainx[:500]) # data based initialization
if args.load:
load_weights('cifar_model/%s.npy'%args.load, layers)
print('loaded!')
# load_weights('cifar_model/pretrain_nrf_ep150.npy' , layers)
# train
loss_lab = 0.
loss_unl = 0.
train_err = 0.
for t in range(nr_batches_train):
ran_from = t * args.batch_size
ran_to = (t + 1) * args.batch_size
# updata random field
lo_lab, tr_er = train_RF( trainx[ran_from:ran_to], trainy[ran_from:ran_to], lr_D)
loss_lab += lo_lab
train_err += tr_er
# updata generator
loss_lab /= nr_batches_train
loss_unl /= nr_batches_train
train_err /= nr_batches_train
# test
test_pred = np.zeros((len(testy), 10), dtype=th.config.floatX)
for t in range(nr_batches_test):
last_ind = np.minimum((t + 1) * args.batch_size, len(testy))
first_ind = last_ind - args.batch_size
test_pred[first_ind:last_ind] = test_batch(testx[first_ind:last_ind])
test_err = np.mean(np.argmax(test_pred, axis=1) != testy)
print(
"epoch %d, time = %ds, loss_lab = %.4f, train err = %.4f, test err = %.4f, best_err = %.4f" % (
epoch + 1, time.time() - begin, loss_lab, train_err, test_err, best_acc))
sys.stdout.flush()
acc_all.append(test_err)
if acc_all[-1] < best_acc:
best_acc = acc_all[-1]
if (epoch + 1) % 50 == 0:
import os
if not os.path.exists('cifar_model'):
os.mkdir('cifar_model')
params = ll.get_all_params(layers)
save_weights('cifar_model/finetune_nrf_data%d_ep%d.npy' % (args.seed_data, epoch + 1), params)
| [
"lasagne.updates.rmsprop",
"numpy.array",
"theano.tensor.mean",
"theano.tensor.argmax",
"lasagne.layers.MaxPool2DLayer",
"numpy.random.RandomState",
"lasagne.layers.get_all_params",
"os.path.exists",
"theano.tensor.shape",
"theano.function",
"argparse.ArgumentParser",
"lasagne.layers.DropoutLa... | [((364, 389), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (387, 389), False, 'import argparse\n'), ((1848, 1880), 'numpy.random.RandomState', 'np.random.RandomState', (['args.seed'], {}), '(args.seed)\n', (1869, 1880), True, 'import numpy as np\n'), ((2140, 2188), 'cifar10_data.load', 'cifar10_data.load', (['args.data_dir'], {'subset': '"""train"""'}), "(args.data_dir, subset='train')\n", (2157, 2188), False, 'import cifar10_data\n'), ((2204, 2251), 'cifar10_data.load', 'cifar10_data.load', (['args.data_dir'], {'subset': '"""test"""'}), "(args.data_dir, subset='test')\n", (2221, 2251), False, 'import cifar10_data\n'), ((4144, 4155), 'theano.tensor.ivector', 'T.ivector', ([], {}), '()\n', (4153, 4155), True, 'import theano.tensor as T\n'), ((4164, 4175), 'theano.tensor.tensor4', 'T.tensor4', ([], {}), '()\n', (4173, 4175), True, 'import theano.tensor as T\n'), ((4184, 4248), 'lasagne.layers.get_output', 'll.get_output', (['layers[-1]', 'x_lab'], {'deterministic': '(False)', 'init': '(True)'}), '(layers[-1], x_lab, deterministic=False, init=True)\n', (4197, 4248), True, 'import lasagne.layers as ll\n'), ((4351, 4404), 'lasagne.layers.get_output', 'll.get_output', (['layers[-1]', 'x_lab'], {'deterministic': '(False)'}), '(layers[-1], x_lab, deterministic=False)\n', (4364, 4404), True, 'import lasagne.layers as ll\n'), ((4731, 4783), 'lasagne.layers.get_output', 'll.get_output', (['layers[-1]', 'x_lab'], {'deterministic': '(True)'}), '(layers[-1], x_lab, deterministic=True)\n', (4744, 4783), True, 'import lasagne.layers as ll\n'), ((4912, 4922), 'theano.tensor.scalar', 'T.scalar', ([], {}), '()\n', (4920, 4922), True, 'import theano.tensor as T\n'), ((4935, 4976), 'lasagne.layers.get_all_params', 'll.get_all_params', (['layers'], {'trainable': '(True)'}), '(layers, trainable=True)\n', (4952, 4976), True, 'import lasagne.layers as ll\n'), ((4996, 5058), 'lasagne.updates.rmsprop', 'lasagne.updates.rmsprop', (['loss_lab', 'RF_params'], {'learning_rate': 'lr'}), '(loss_lab, RF_params, learning_rate=lr)\n', (5019, 5058), False, 'import lasagne\n'), ((5070, 5170), 'theano.function', 'th.function', ([], {'inputs': '[x_lab, labels, lr]', 'outputs': '[loss_lab, train_err]', 'updates': 'RF_param_updates'}), '(inputs=[x_lab, labels, lr], outputs=[loss_lab, train_err],\n updates=RF_param_updates)\n', (5081, 5170), True, 'import theano as th\n'), ((5205, 5268), 'theano.function', 'th.function', ([], {'inputs': '[x_lab]', 'outputs': 'None', 'updates': 'init_updates'}), '(inputs=[x_lab], outputs=None, updates=init_updates)\n', (5216, 5268), True, 'import theano as th\n'), ((5317, 5369), 'lasagne.layers.get_output', 'll.get_output', (['layers[-1]', 'x_lab'], {'deterministic': '(True)'}), '(layers[-1], x_lab, deterministic=True)\n', (5330, 5369), True, 'import lasagne.layers as ll\n'), ((5383, 5441), 'theano.function', 'th.function', ([], {'inputs': '[x_lab]', 'outputs': 'output_before_softmax'}), '(inputs=[x_lab], outputs=output_before_softmax)\n', (5394, 5441), True, 'import theano as th\n'), ((5477, 5514), 'numpy.random.RandomState', 'np.random.RandomState', (['args.seed_data'], {}), '(args.seed_data)\n', (5498, 5514), True, 'import numpy as np\n'), ((5742, 5769), 'numpy.concatenate', 'np.concatenate', (['txs'], {'axis': '(0)'}), '(txs, axis=0)\n', (5756, 5769), True, 'import numpy as np\n'), ((5776, 5803), 'numpy.concatenate', 'np.concatenate', (['tys'], {'axis': '(0)'}), '(tys, axis=0)\n', (5790, 5803), True, 'import numpy as np\n'), ((2449, 2487), 'lasagne.layers.InputLayer', 'll.InputLayer', ([], {'shape': '(None, 3, 32, 32)'}), '(shape=(None, 3, 32, 32))\n', (2462, 2487), True, 'import lasagne.layers as ll\n'), ((2920, 2957), 'lasagne.layers.MaxPool2DLayer', 'll.MaxPool2DLayer', (['layers[-1]', '(2, 2)'], {}), '(layers[-1], (2, 2))\n', (2937, 2957), True, 'import lasagne.layers as ll\n'), ((2971, 3005), 'lasagne.layers.DropoutLayer', 'll.DropoutLayer', (['layers[-1]'], {'p': '(0.5)'}), '(layers[-1], p=0.5)\n', (2986, 3005), True, 'import lasagne.layers as ll\n'), ((3438, 3475), 'lasagne.layers.MaxPool2DLayer', 'll.MaxPool2DLayer', (['layers[-1]', '(2, 2)'], {}), '(layers[-1], (2, 2))\n', (3455, 3475), True, 'import lasagne.layers as ll\n'), ((3489, 3523), 'lasagne.layers.DropoutLayer', 'll.DropoutLayer', (['layers[-1]'], {'p': '(0.5)'}), '(layers[-1], p=0.5)\n', (3504, 3523), True, 'import lasagne.layers as ll\n'), ((3941, 3971), 'lasagne.layers.GlobalPoolLayer', 'll.GlobalPoolLayer', (['layers[-1]'], {}), '(layers[-1])\n', (3959, 3971), True, 'import lasagne.layers as ll\n'), ((4497, 4538), 'nn.log_sum_exp', 'nn.log_sum_exp', (['output_before_softmax_lab'], {}), '(output_before_softmax_lab)\n', (4511, 4538), False, 'import nn\n'), ((6151, 6162), 'time.time', 'time.time', ([], {}), '()\n', (6160, 6162), False, 'import time\n'), ((6440, 6470), 'numpy.concatenate', 'np.concatenate', (['trainx'], {'axis': '(0)'}), '(trainx, axis=0)\n', (6454, 6470), True, 'import numpy as np\n'), ((6484, 6514), 'numpy.concatenate', 'np.concatenate', (['trainy'], {'axis': '(0)'}), '(trainy, axis=0)\n', (6498, 6514), True, 'import numpy as np\n'), ((7845, 7863), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7861, 7863), False, 'import sys\n'), ((2265, 2281), 'numpy.array', 'np.array', (['trainx'], {}), '(trainx)\n', (2273, 2281), True, 'import numpy as np\n'), ((4589, 4606), 'theano.tensor.mean', 'T.mean', (['logit_lab'], {}), '(logit_lab)\n', (4595, 4606), True, 'import theano.tensor as T\n'), ((4641, 4684), 'theano.tensor.argmax', 'T.argmax', (['output_before_softmax_lab'], {'axis': '(1)'}), '(output_before_softmax_lab, axis=1)\n', (4649, 4684), True, 'import theano.tensor as T\n'), ((4808, 4847), 'theano.tensor.argmax', 'T.argmax', (['output_before_softmax'], {'axis': '(1)'}), '(output_before_softmax, axis=1)\n', (4816, 4847), True, 'import theano.tensor as T\n'), ((8103, 8128), 'lasagne.layers.get_all_params', 'll.get_all_params', (['layers'], {}), '(layers)\n', (8120, 8128), True, 'import lasagne.layers as ll\n'), ((8137, 8236), 'checkpoints.save_weights', 'save_weights', (["('cifar_model/finetune_nrf_data%d_ep%d.npy' % (args.seed_data, epoch + 1))", 'params'], {}), "('cifar_model/finetune_nrf_data%d_ep%d.npy' % (args.seed_data, \n epoch + 1), params)\n", (8149, 8236), False, 'from checkpoints import save_weights, load_weights\n'), ((6627, 6681), 'checkpoints.load_weights', 'load_weights', (["('cifar_model/%s.npy' % args.load)", 'layers'], {}), "('cifar_model/%s.npy' % args.load, layers)\n", (6639, 6681), False, 'from checkpoints import save_weights, load_weights\n'), ((7601, 7629), 'numpy.argmax', 'np.argmax', (['test_pred'], {'axis': '(1)'}), '(test_pred, axis=1)\n', (7610, 7629), True, 'import numpy as np\n'), ((8019, 8048), 'os.path.exists', 'os.path.exists', (['"""cifar_model"""'], {}), "('cifar_model')\n", (8033, 8048), False, 'import os\n'), ((8062, 8085), 'os.mkdir', 'os.mkdir', (['"""cifar_model"""'], {}), "('cifar_model')\n", (8070, 8085), False, 'import os\n'), ((2566, 2578), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (2572, 2578), False, 'from lasagne.init import Normal\n'), ((2705, 2717), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (2711, 2717), False, 'from lasagne.init import Normal\n'), ((2844, 2856), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (2850, 2856), False, 'from lasagne.init import Normal\n'), ((3084, 3096), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (3090, 3096), False, 'from lasagne.init import Normal\n'), ((3223, 3235), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (3229, 3235), False, 'from lasagne.init import Normal\n'), ((3362, 3374), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (3368, 3374), False, 'from lasagne.init import Normal\n'), ((3601, 3613), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (3607, 3613), False, 'from lasagne.init import Normal\n'), ((3733, 3745), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (3739, 3745), False, 'from lasagne.init import Normal\n'), ((3865, 3877), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (3871, 3877), False, 'from lasagne.init import Normal\n'), ((4044, 4056), 'lasagne.init.Normal', 'Normal', (['(0.05)'], {}), '(0.05)\n', (4050, 4056), False, 'from lasagne.init import Normal\n'), ((4454, 4468), 'theano.tensor.shape', 'T.shape', (['x_lab'], {}), '(x_lab)\n', (4461, 4468), True, 'import theano.tensor as T\n'), ((7778, 7789), 'time.time', 'time.time', ([], {}), '()\n', (7787, 7789), False, 'import time\n')] |
import math
import random
from settings import *
import pygame as pg
import time
import cv2
import statistics
import numpy as np
class Pad:
def __init__(self,number,threshold=0.3,scale=500,delta=250):
self.score=0
self.image=pg.Surface((PAD_WIDTH*2,PAD_HEIGHT*2))
self.image.fill((255,0,0))
self.height=PAD_HEIGHT
self.width=PAD_WIDTH
self.threshold=threshold
self.scale=scale
self.delta=delta
if number==1:
self.pos=[PAD_WIDTH,WINDOWS_HEIGHT/2]
elif number==2:
self.pos=[WINDOWS_WIDTH-PAD_WIDTH,WINDOWS_HEIGHT/2]
self.number=number
def draw(self,screen):
screen.blit(self.image,(self.pos[0]-self.width,self.pos[1]-self.height))
def move(self,where,what,camera_res=480):
res = cv2.matchTemplate(where,what,cv2.TM_CCOEFF_NORMED)
w, h = what.shape[::-1]
loc = np.where( res >= self.threshold)
Y=[]
for pt in zip(*loc[::-1]):
Y.append(pt[1])
if Y==[]:
return
mean_y=int(statistics.median(Y))
self.pos[1]=((2*mean_y+h)/2/camera_res)*(WINDOWS_HEIGHT+self.scale)-self.delta
class Wall(Pad):
def __init__(self,number):
super().__init__(number)
self.image=pg.Surface((PAD_WIDTH*2,WINDOWS_HEIGHT*10))
self.image.fill((255,0,0))
self.height=WINDOWS_HEIGHT*10
self.width=PAD_WIDTH
self.pos[1]=WINDOWS_WIDTH-self.width
class Ball:
def __init__(self):
self.pos=BALL_STARTPOS.copy()
self.speed=BALL_SPEED
self.generate_angle()
self.acceleration=BALL_ACCELERATION
self.image=pg.Surface((BALL_RADIUS*2,BALL_RADIUS*2))
self.image.fill((0,255,0))
self.last_hit=time.time()
self.radius=BALL_RADIUS
def move(self,dt,pad1,pad2):
self.pos[0]+=dt*self.speed*self.vec_speed[0]
self.pos[1]+=dt*self.speed*self.vec_speed[1]
self.collide(pad1,pad2)
def generate_angle(self):
x=random.randint(30,90)/100*random.choice((-1,1))
if x>0:
y= 1-x
else:
y=1+x
self.vec_speed=[x,y]
def respawn(self,winner):
self.generate_angle()
self.speed=BALL_SPEED
self.pos=BALL_STARTPOS.copy()
winner.score+=1
def update(self,screen):
screen.blit(self.image,(self.pos[0]- self.radius,self.pos[1]- self.radius))
def collide(self,pad1,pad2):
if time.time()-self.last_hit>=1:
if self.pos[0]+ self.radius>=pad2.pos[0]-pad1.width:
if self.pos[1]+BALL_RADIUS>pad2.pos[1]-pad2.height and self.pos[1]- self.radius<pad2.pos[1]+pad2.height:
self.vec_speed[0]=-self.vec_speed[0]
self.speed+=self.acceleration
if self.pos[0]- self.radius<=pad1.pos[0]+pad2.width:
if self.pos[1]+ self.radius>pad1.pos[1]-pad1.height and self.pos[1]- self.radius<pad1.pos[1]+pad1.height:
self.vec_speed[0]=-self.vec_speed[0]
self.speed+=self.acceleration
if self.pos[1]<WALLS[2]+ self.radius: #top
self.vec_speed[1]=-self.vec_speed[1]
if self.pos[1]>WALLS[3]- self.radius: #bottom
self.vec_speed[1]=-self.vec_speed[1]
if self.pos[0]>WALLS[1]:
self.respawn(pad1)
print(str(pad1.score)+":"+str(pad2.score))
if self.pos[0]<WALLS[0]:
self.respawn(pad2)
print(str(pad1.score)+":"+str(pad2.score))
| [
"random.choice",
"pygame.Surface",
"numpy.where",
"statistics.median",
"cv2.matchTemplate",
"time.time",
"random.randint"
] | [((245, 288), 'pygame.Surface', 'pg.Surface', (['(PAD_WIDTH * 2, PAD_HEIGHT * 2)'], {}), '((PAD_WIDTH * 2, PAD_HEIGHT * 2))\n', (255, 288), True, 'import pygame as pg\n'), ((849, 901), 'cv2.matchTemplate', 'cv2.matchTemplate', (['where', 'what', 'cv2.TM_CCOEFF_NORMED'], {}), '(where, what, cv2.TM_CCOEFF_NORMED)\n', (866, 901), False, 'import cv2\n'), ((954, 985), 'numpy.where', 'np.where', (['(res >= self.threshold)'], {}), '(res >= self.threshold)\n', (962, 985), True, 'import numpy as np\n'), ((1383, 1431), 'pygame.Surface', 'pg.Surface', (['(PAD_WIDTH * 2, WINDOWS_HEIGHT * 10)'], {}), '((PAD_WIDTH * 2, WINDOWS_HEIGHT * 10))\n', (1393, 1431), True, 'import pygame as pg\n'), ((1781, 1827), 'pygame.Surface', 'pg.Surface', (['(BALL_RADIUS * 2, BALL_RADIUS * 2)'], {}), '((BALL_RADIUS * 2, BALL_RADIUS * 2))\n', (1791, 1827), True, 'import pygame as pg\n'), ((1880, 1891), 'time.time', 'time.time', ([], {}), '()\n', (1889, 1891), False, 'import time\n'), ((1162, 1182), 'statistics.median', 'statistics.median', (['Y'], {}), '(Y)\n', (1179, 1182), False, 'import statistics\n'), ((2188, 2210), 'random.choice', 'random.choice', (['(-1, 1)'], {}), '((-1, 1))\n', (2201, 2210), False, 'import random\n'), ((2162, 2184), 'random.randint', 'random.randint', (['(30)', '(90)'], {}), '(30, 90)\n', (2176, 2184), False, 'import random\n'), ((2674, 2685), 'time.time', 'time.time', ([], {}), '()\n', (2683, 2685), False, 'import time\n')] |
# to help create semantic-signatures for the contents of a cell. Then match them with adjacent cells to detect tabular content from a csv.
# magnifying glass, tree matching, glo-op
# for each token get a signature
import pandas as pd
import jellyfish
import numpy as np
from collections import Counter
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def createsign(string):
def decide(arr):
if arr[0]=='A' and len(arr)>2:
return 'W'
if arr[0]=='n' and len(arr)>2:
return 'N'
else:
return ''.join(arr)
signature=''
string=str(string)
for ch in string:
if ch.isalpha():
signature+='A'
elif ch.isnumeric():
signature+='n'
else:
signature+=ch
# condense the patterns to mark words
condensed_signature=''
buffer=[]
for i, ch in enumerate(signature):
buffer.append(ch)
if i!=len(signature)-1 and ch!=signature[i+1]:
condensed_signature+=decide(buffer)
buffer=[]
elif i==len(signature)-1:
condensed_signature+=decide(buffer)
return condensed_signature
#get cosine similarity between two document vectors
def get_cosine(vec1, vec2):
import math
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])
sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
return Counter(text)
#get cosine similarity between texts
def CosineSim(text1,text2):
vec1=text_to_vector(text1)
vec2=text_to_vector(text2)
dist=get_cosine(vec1,vec2)
return dist
def levenshtein_similarity(s, t):
""" Levenshtein Similarity """
Ns = len(s);
Nt = len(t);
lev_sim = 1.0 - (jellyfish.levenshtein_distance(s, t)) / float(max(Ns, Nt))
return lev_sim
def jaro_winkler_similarity(s, t):
""" Jaro-Winkler Similarity """
jw_sim = jellyfish.jaro_winkler(s, t)
return jw_sim
def similarity(a,b):
a_sign=createsign(a)
b_sign=createsign(b)
return levenshtein_similarity(a_sign,b_sign)
def similarity2(a,b):
a_sign=list(createsign(a))
b_sign=list(createsign(b))
return CosineSim(a_sign,b_sign)
def simpleNN(column,clusternum,threshold=0.9):
clusters=[[column[0]]] #initialize
clusternums=[clusternum]
for i in range(1,len(column)):
if similarity2(column[i],column[i-1])>=threshold:
clusters[-1].append(column[i])
else:
clusternum+=1
clusters.append([column[i]])
clusternums.append(clusternum)
return clusternums,clusters,clusternum
# examples:
string='55 P - PUMPING (NON-FRAC)'
print(''.join(createsign(string)))
# read table:
csv=pd.read_csv('../../docs/demo2.png.csv')
clusternum=-1
table_matrix=[]
for i,column in enumerate(csv.keys()):
clusternums, clusters, clusternum=simpleNN(csv[column],clusternum+1)
csv[column]=csv[column].apply(lambda x:createsign(x))
table_matrix.append(clusternums)
table_matrix=np.array(np.transpose(table_matrix))
print(table_matrix)
print(csv)
| [
"pandas.read_csv",
"math.sqrt",
"collections.Counter",
"jellyfish.jaro_winkler",
"jellyfish.levenshtein_distance",
"numpy.transpose"
] | [((3007, 3046), 'pandas.read_csv', 'pd.read_csv', (['"""../../docs/demo2.png.csv"""'], {}), "('../../docs/demo2.png.csv')\n", (3018, 3046), True, 'import pandas as pd\n'), ((1712, 1725), 'collections.Counter', 'Counter', (['text'], {}), '(text)\n', (1719, 1725), False, 'from collections import Counter\n'), ((2192, 2220), 'jellyfish.jaro_winkler', 'jellyfish.jaro_winkler', (['s', 't'], {}), '(s, t)\n', (2214, 2220), False, 'import jellyfish\n'), ((3308, 3334), 'numpy.transpose', 'np.transpose', (['table_matrix'], {}), '(table_matrix)\n', (3320, 3334), True, 'import numpy as np\n'), ((1541, 1556), 'math.sqrt', 'math.sqrt', (['sum1'], {}), '(sum1)\n', (1550, 1556), False, 'import math\n'), ((1559, 1574), 'math.sqrt', 'math.sqrt', (['sum2'], {}), '(sum2)\n', (1568, 1574), False, 'import math\n'), ((2028, 2064), 'jellyfish.levenshtein_distance', 'jellyfish.levenshtein_distance', (['s', 't'], {}), '(s, t)\n', (2058, 2064), False, 'import jellyfish\n')] |
import numpy as np
from screws.freeze.main import FrozenOnly
class NumBasis(FrozenOnly):
""""""
def __init__(self, FS):
""""""
assert FS.ndim == 2, " <NumBasis> "
self._FS_ = FS
self._freeze_self_()
@property
def _2dCSCG_0Form_Inner(self):
""" """
_basis_ = 1
for p_i in self._FS_.p:
_basis_ *= p_i + 1
_basis_components_ = (_basis_,)
return _basis_, _basis_components_
@property
def _2dCSCG_1Form_Inner(self):
""" """
_basis_components_ = ()
for i in range(self._FS_.ndim):
p = [self._FS_.p[j] + 1 for j in range(self._FS_.ndim)]
p[i] -= 1
_basis_components_ += (np.prod(p),)
_basis_ = np.sum(_basis_components_)
return _basis_, _basis_components_
@property
def _2dCSCG_2Form_Inner(self):
""" """
_basis_ = np.prod(self._FS_.p)
_basis_components_ = (_basis_,)
return _basis_, _basis_components_
@property
def _2dCSCG_0Form_Outer(self):
""" """
_basis_ = 1
for p_i in self._FS_.p:
_basis_ *= p_i + 1
_basis_components_ = (_basis_,)
return _basis_, _basis_components_
@property
def _2dCSCG_1Form_Outer(self):
""" """
_basis_components_ = ()
for i in range(self._FS_.ndim):
p = [self._FS_.p[j] for j in range(self._FS_.ndim)]
p[i] += 1
_basis_components_ += (np.prod(p),)
_basis_ = np.sum(_basis_components_)
return _basis_, _basis_components_
@property
def _2dCSCG_2Form_Outer(self):
""" """
_basis_ = np.prod(self._FS_.p)
_basis_components_ = (_basis_,)
return _basis_, _basis_components_
@property
def _2dCSCG_0Trace_Inner(self):
""" """
p = self._FS_.p
_basis_ = 2 * ((p[1] + 1) + (p[0] + 1))
_basis_components_ = {'U': (p[1] + 1,), 'D': (p[1] + 1,),
'L': (p[0] + 1,), 'R': (p[0] + 1,)}
_basis_onsides_ = {'U': p[1] + 1, 'D': p[1] + 1,
'L': p[0] + 1, 'R': p[0] + 1}
return _basis_, _basis_components_, _basis_onsides_
@property
def _2dCSCG_1Trace_Inner(self):
p = self._FS_.p
_basis_ = 2 * (p[1] + p[0])
_basis_components_ = {'U': (p[1],), 'D': (p[1],),
'L': (p[0],), 'R': (p[0],)}
_basis_onsides_ = {'U': p[1], 'D': p[1],
'L': p[0], 'R': p[0]}
return _basis_, _basis_components_, _basis_onsides_
@property
def _2dCSCG_0Trace_Outer(self):
""" """
p = self._FS_.p
_basis_ = 2 * ((p[1] + 1) + (p[0] + 1))
_basis_components_ = {'U': (p[1] + 1,), 'D': (p[1] + 1,),
'L': (p[0] + 1,), 'R': (p[0] + 1,)}
_basis_onsides_ = {'U': p[1] + 1, 'D': p[1] + 1,
'L': p[0] + 1, 'R': p[0] + 1}
return _basis_, _basis_components_, _basis_onsides_
@property
def _2dCSCG_1Trace_Outer(self):
p = self._FS_.p
_basis_ = 2 * (p[1] + p[0])
_basis_components_ = {'U': (p[1],), 'D': (p[1],),
'L': (p[0],), 'R': (p[0],)}
_basis_onsides_ = {'U': p[1], 'D': p[1],
'L': p[0], 'R': p[0]}
return _basis_, _basis_components_, _basis_onsides_ | [
"numpy.sum",
"numpy.prod"
] | [((770, 796), 'numpy.sum', 'np.sum', (['_basis_components_'], {}), '(_basis_components_)\n', (776, 796), True, 'import numpy as np\n'), ((924, 944), 'numpy.prod', 'np.prod', (['self._FS_.p'], {}), '(self._FS_.p)\n', (931, 944), True, 'import numpy as np\n'), ((1553, 1579), 'numpy.sum', 'np.sum', (['_basis_components_'], {}), '(_basis_components_)\n', (1559, 1579), True, 'import numpy as np\n'), ((1707, 1727), 'numpy.prod', 'np.prod', (['self._FS_.p'], {}), '(self._FS_.p)\n', (1714, 1727), True, 'import numpy as np\n'), ((739, 749), 'numpy.prod', 'np.prod', (['p'], {}), '(p)\n', (746, 749), True, 'import numpy as np\n'), ((1522, 1532), 'numpy.prod', 'np.prod', (['p'], {}), '(p)\n', (1529, 1532), True, 'import numpy as np\n')] |
import pytest
from .fixtures import *
import numpy as np
def _invalid_table_type():
df = make_table(astype='pandas')
args = [TABLE_NAME, df.values]
kwargs = dict()
return args, kwargs
def _invalid_index_dtype():
df = make_table(astype='pandas')
index = np.random.random(size=30)
df = df.set_index(index)
args = [TABLE_NAME, df]
kwargs = dict()
return args, kwargs
def _duplicate_index():
df = make_table(astype='pandas', rows=15)
df1 = make_table(astype='pandas', rows=15)
df = pd.concat([df, df1])
args = [TABLE_NAME, df]
kwargs = dict()
return args, kwargs
def _index_not_in_cols():
df = make_table(cols=2, astype='polars')
df.column_names = ['c0', 'c1']
args = [TABLE_NAME, df]
kwargs = dict(index='c2')
return args, kwargs
def _invalid_col_names_dtype():
df = make_table(astype='pandas')
df.columns = ['c0', 'c1', 'c2', 3, 'c4']
args = [TABLE_NAME, df]
kwargs = dict()
return args, kwargs
def _forbidden_col_name():
df = make_table(cols=1, astype='pandas')
df.columns = ['like']
args = [TABLE_NAME, df]
kwargs = dict()
return args, kwargs
def _duplicate_col_names():
df = make_table(cols=2, astype='pandas')
df.columns = ['c0', 'c0']
args = [TABLE_NAME, df]
kwargs = dict()
return args, kwargs
def _invalid_warnings_arg():
df = make_table()
args = [TABLE_NAME, df]
kwargs = dict(warnings='abcd')
return args, kwargs
def _invalid_errors_arg():
df = make_table()
args = [TABLE_NAME, df]
kwargs = dict(errors='abcd')
return args, kwargs
def _invalid_partition_size_dtype():
df = make_table()
args = [TABLE_NAME, df]
kwargs = dict(partition_size='abcd')
return args, kwargs
@pytest.mark.parametrize(
("arguments", "exception"),
[
(_invalid_table_type(), TypeError),
(_invalid_index_dtype(), TypeError),
(_duplicate_index(), IndexError),
(_index_not_in_cols(), IndexError),
(_invalid_col_names_dtype(), TypeError),
(_forbidden_col_name(), ValueError),
(_duplicate_col_names(), IndexError),
(_invalid_warnings_arg(), ValueError),
(_invalid_errors_arg(), ValueError),
(_invalid_partition_size_dtype(), TypeError),
],
ids=[
"_invalid_table_type",
"_invalid_index_dtype",
"_duplicate_index",
"_index_not_in_cols",
"_invalid_col_names_dtype",
"_forbidden_col_name",
"_duplicate_col_names",
"_invalid_warnings_arg",
"_invalid_errors_arg",
"_invalid_partition_size_dtype",
],
)
def test_can_write(store, arguments, exception):
# Arrange
arguments, kwargs = arguments
# Act
with pytest.raises(exception) as e:
store.write_table(*arguments, **kwargs)
# Assert
assert isinstance(e.type(), exception)
def test_trying_to_overwrite_existing_table(store):
# Arrange
EXCEPTION = FileExistsError
original_df = make_table()
store.write_table(TABLE_NAME, make_table())
table = store.select_table(TABLE_NAME)
# Act
with pytest.raises(EXCEPTION)as e:
table.write(original_df, errors='raise')
# Assert
assert isinstance(e.type(), EXCEPTION)
def test_overwriting_existing_table(store):
# Arrange
original_df = make_table()
store.write_table(TABLE_NAME, make_table())
table = store.select_table(TABLE_NAME)
# Act
table.write(original_df, errors='ignore')
# Assert
df = table.read_arrow()
assert df.equals(original_df)
def _invalid_table_name_dtype():
return 21, dict()
def _invalid_row_dtype():
return TABLE_NAME, {'rows': 14}
def _invalid_row_elements_dtype():
return TABLE_NAME, {'rows': [5, 'ab', 7.13]}
def _rows_not_in_table():
return TABLE_NAME, {'rows': [0, 1, 3334]}
def _invalid_col_dtype():
return TABLE_NAME, {'cols': 14}
def _invalid_col_elements_dtype():
return TABLE_NAME, {'cols': ['c1', 7.13]}
def _cols_not_in_table():
return TABLE_NAME, {'cols': ['c0', 'c1', 'c3334']}
@pytest.mark.parametrize(
("arguments", "exception"),
[
(_invalid_table_name_dtype(), TypeError),
(_invalid_row_dtype(), TypeError),
(_invalid_row_elements_dtype(), TypeError),
(_rows_not_in_table(), IndexError),
(_invalid_col_dtype(), TypeError),
(_invalid_col_elements_dtype(), TypeError),
(_cols_not_in_table(), IndexError),
],
ids=[
"_invalid_table_name_dtype",
"_invalid_row_dtype",
"_invalid_row_elements_dtype",
"_rows_not_in_table",
"_invalid_col_dtype",
"_invalid_col_elements_dtype",
"_cols_not_in_table",
],
)
def test_can_read(store, arguments, exception):
# Arrange
table_name, kwargs = arguments
df = make_table()
store.write_table(TABLE_NAME, df)
# Act
with pytest.raises(exception) as e:
store.read_pandas(table_name, **kwargs)
# Assert
assert isinstance(e.type(), exception)
def test_read_when_no_table_exists(store):
table = store.select_table(TABLE_NAME)
EXCEPTION = FileNotFoundError
# Act
with pytest.raises(EXCEPTION) as e:
table.read_pandas()
# Assert
assert not table.exists()
assert isinstance(e.type(), EXCEPTION)
| [
"numpy.random.random",
"pytest.raises"
] | [((282, 307), 'numpy.random.random', 'np.random.random', ([], {'size': '(30)'}), '(size=30)\n', (298, 307), True, 'import numpy as np\n'), ((2787, 2811), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (2800, 2811), False, 'import pytest\n'), ((3163, 3187), 'pytest.raises', 'pytest.raises', (['EXCEPTION'], {}), '(EXCEPTION)\n', (3176, 3187), False, 'import pytest\n'), ((4953, 4977), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (4966, 4977), False, 'import pytest\n'), ((5229, 5253), 'pytest.raises', 'pytest.raises', (['EXCEPTION'], {}), '(EXCEPTION)\n', (5242, 5253), False, 'import pytest\n')] |
import numpy as np
import torch
import torch.nn as nn
def aggr_by_one(model, index_list=None):
if not hasattr(model, 'aggr_mask'):
model.aggr_mask = dict()
if index_list is None:
index_list = model.conv_index[1:-1]
for ind in index_list:
W = model.features[ind].weight.data
W_arr = W.cpu().numpy()
if ind not in model.aggr_mask.keys():
model.aggr_mask[ind] = np.ones_like(W_arr)
ch_out, ch_in, ksize, _ = W_arr.shape
for i in range(ch_out):
for j in range(ch_in):
this_kernel = np.squeeze(np.abs(W_arr[i, j, ...]))
this_kernel[this_kernel == 0] = 1000.
m_ind = np.argmin(this_kernel)
m_row = int(m_ind / ksize)
m_col = m_ind % ksize
W_arr[i, j, m_row, m_col] = 0.
model.aggr_mask[ind][i, j, m_row, m_col] = 0.
model.features[ind].weight = nn.Parameter(torch.from_numpy(W_arr).cuda())
def mask_aggr_gradient(model, index_list=None):
if index_list is None:
index_list = model.conv_index[1:-1] # we do not operate on the first and last conv layer
for ind in index_list:
if ind not in model.aggr_mask.keys(): # not yet aggr.
continue
# print(type(self.features[ind].weight.grad))
# print(type(self.aggr_mask[ind]))
mask = model.aggr_mask[ind]
if type(mask) == np.ndarray:
mask = torch.from_numpy(mask).cuda()
model.features[ind].weight.grad.data = torch.mul(model.features[ind].weight.grad.data, mask)
def aggr_select_layer(model, index, aggr_method='max', mode='cpu', get_mask=False):
if not hasattr(model, 'aggr_mask'):
model.aggr_mask = dict()
W = model.features[index].weight.data
if mode == 'cpu':
W_arr = W.cpu().numpy()
if get_mask:
mask = np.zeros_like(W_arr)
ch_out, ch_in, ksize, _ = W_arr.shape
assert ksize == 3
for i in range(ch_out):
for j in range(ch_in):
m_ind = np.argmax(np.abs(W_arr[i, j, ...]))
m_row = int(m_ind / ksize)
m_col = m_ind % ksize
if aggr_method == 'max':
m_val = W_arr[i, j, m_row, m_col]
elif aggr_method == 'sum':
m_val = np.sum(W_arr[i, j, ...]) # TODO
elif aggr_method == 'weighted':
ss_x = 0.
ss_y = 0.
for k_i in range(ksize):
for k_j in range(ksize):
ss_x += k_i * W_arr[i, j, k_i, k_j]
ss_y += k_j * W_arr[i, j, k_i, k_j]
ss_x /= np.sum(W_arr[i, j, ...])
ss_y /= np.sum(W_arr[i, j, ...])
m_row = int(round(ss_x))
m_col = int(round(ss_y))
m_val = np.sum(W_arr[i, j, ...])
else:
raise NotImplementedError
if get_mask:
mask[i, j, m_row, m_col] = 1. # only largest value is preseved
W_arr[i, j, ...] = np.zeros([ksize, ksize])
W_arr[i, j, m_row, m_col] = m_val
del model.features[index].weight
model.features[index].weight = nn.Parameter(torch.from_numpy(W_arr).cuda())
if get_mask:
model.aggr_mask[index] = torch.from_numpy(mask).cuda()
else:
raise NotImplementedError
def reset_aggr_mask(model):
if hasattr(model, 'aggr_mask'):
del model.aggr_mask
model.aggr_mask = dict()
| [
"torch.mul",
"numpy.ones_like",
"numpy.abs",
"torch.from_numpy",
"numpy.sum",
"numpy.zeros",
"numpy.argmin",
"numpy.zeros_like"
] | [((1552, 1605), 'torch.mul', 'torch.mul', (['model.features[ind].weight.grad.data', 'mask'], {}), '(model.features[ind].weight.grad.data, mask)\n', (1561, 1605), False, 'import torch\n'), ((426, 445), 'numpy.ones_like', 'np.ones_like', (['W_arr'], {}), '(W_arr)\n', (438, 445), True, 'import numpy as np\n'), ((1901, 1921), 'numpy.zeros_like', 'np.zeros_like', (['W_arr'], {}), '(W_arr)\n', (1914, 1921), True, 'import numpy as np\n'), ((704, 726), 'numpy.argmin', 'np.argmin', (['this_kernel'], {}), '(this_kernel)\n', (713, 726), True, 'import numpy as np\n'), ((3197, 3221), 'numpy.zeros', 'np.zeros', (['[ksize, ksize]'], {}), '([ksize, ksize])\n', (3205, 3221), True, 'import numpy as np\n'), ((600, 624), 'numpy.abs', 'np.abs', (['W_arr[i, j, ...]'], {}), '(W_arr[i, j, ...])\n', (606, 624), True, 'import numpy as np\n'), ((967, 990), 'torch.from_numpy', 'torch.from_numpy', (['W_arr'], {}), '(W_arr)\n', (983, 990), False, 'import torch\n'), ((1475, 1497), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (1491, 1497), False, 'import torch\n'), ((2095, 2119), 'numpy.abs', 'np.abs', (['W_arr[i, j, ...]'], {}), '(W_arr[i, j, ...])\n', (2101, 2119), True, 'import numpy as np\n'), ((3365, 3388), 'torch.from_numpy', 'torch.from_numpy', (['W_arr'], {}), '(W_arr)\n', (3381, 3388), False, 'import torch\n'), ((3455, 3477), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (3471, 3477), False, 'import torch\n'), ((2368, 2392), 'numpy.sum', 'np.sum', (['W_arr[i, j, ...]'], {}), '(W_arr[i, j, ...])\n', (2374, 2392), True, 'import numpy as np\n'), ((2759, 2783), 'numpy.sum', 'np.sum', (['W_arr[i, j, ...]'], {}), '(W_arr[i, j, ...])\n', (2765, 2783), True, 'import numpy as np\n'), ((2812, 2836), 'numpy.sum', 'np.sum', (['W_arr[i, j, ...]'], {}), '(W_arr[i, j, ...])\n', (2818, 2836), True, 'import numpy as np\n'), ((2955, 2979), 'numpy.sum', 'np.sum', (['W_arr[i, j, ...]'], {}), '(W_arr[i, j, ...])\n', (2961, 2979), True, 'import numpy as np\n')] |
import numpy
def reshape(a1, a2):
np = numpy.array(a2, int)
a1 = numpy.array(a1, int)
np = numpy.transpose(np)
np = np.reshape((int(a1[0]), int(a1[1])))
print(np)
#np = numpy.transpose(np)
np = np.flatten()
np.sort()
print(np)
if __name__ == "__main__":
h = []
c1 = []
c2 = []
l = 0
c = []
while True:
try:
line = input()
if l == 0:
h = line.strip().split(" ")
else:
c.append(line.strip().split(" "))
l +=1
except EOFError:
break
c = numpy.array(c, int)
c.transpose()
reshape(h, c)
| [
"numpy.array",
"numpy.transpose"
] | [((44, 64), 'numpy.array', 'numpy.array', (['a2', 'int'], {}), '(a2, int)\n', (55, 64), False, 'import numpy\n'), ((74, 94), 'numpy.array', 'numpy.array', (['a1', 'int'], {}), '(a1, int)\n', (85, 94), False, 'import numpy\n'), ((104, 123), 'numpy.transpose', 'numpy.transpose', (['np'], {}), '(np)\n', (119, 123), False, 'import numpy\n'), ((612, 631), 'numpy.array', 'numpy.array', (['c', 'int'], {}), '(c, int)\n', (623, 631), False, 'import numpy\n')] |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(r'D:\DeepLearning\Kaggle\Datahandling')
import utils_for_datasets
import glob
import numpy as np
import cv2
import re
import os.path
import scipy
import time
from skimage.measure import label
import skimage.transform as ski_transform
import matplotlib.pyplot as plt
DATASETROOT = 'CVSP\Cameratrap'
DATASETROOT_CVL = 'CVSP\CVL'
UNETROOT = 'D:\DeepLearning\Semantic_segmentation\Cameratrap_Dataset'
UNETROOT_CVL = 'D:\DeepLearning\Semantic_segmentation\CVL_Dataset'
DATASET_FOLDER_TISQUANT = r'D:\DeepLearning\SCCHCode\TisQuantValidation\data'
#DATASET_FOLDER_KAGGLE = r'D:\\DeepLearning\\SCCHCode\\data\\kaggle-dsbowl-2018-dataset-fixes-master\\stage1_train'
#DATASET_FOLDER_KAGGLE = r'D:\\DeepLearning\\SCCHCode\\data\\Kaggle\\stage1_train'
from scipy.io import loadmat, savemat
from tifffile import tifffile
from Config.Config import UNETSettings
from tqdm import tqdm
class TisquantDataset(utils_for_datasets.Dataset):
def load_data(self,width=None,height=None,ids=None,mode=1):
self.add_class("Nuclei",1,'Nucleus')
if (mode==1):
data_file = "256x256_TisQuantTrainingData_Evaluation1_new.mat"
else:
data_file = "256x256_TisQuantTestData_Evaluation1_new.mat"
print('... LOADING DATA')
Images, Labels, FileNames = [], [], []
raw_data = loadmat(os.path.join(DATASET_FOLDER_TISQUANT, data_file), struct_as_record=True)
if (mode==1):
raw_data = raw_data['trainingset']
else:
raw_data = raw_data['testset']
Images, Masks = [], []
slice_size = 256
masks = raw_data['groundtruth'][0]
raw_images = raw_data['rawimage'][0]
n_images = len(raw_images)
for i,img in enumerate(raw_images):
#img_new = np.zeros((3, img.shape[0], img.shape[1]))
#img_new[0] = img
#img_new[1] = img
#img_new[2] = img
#Images.append(img_new / 255.0)
#Images.append(img / 255.0)
#Images.append(img / 255.0)
Images.append(img)
#Masks.append(label(masks[i]>0))
Masks.append(masks[i])
# convert to conv net format
img_size = Images[0].shape
#Images = np.asarray(Images, dtype=np.float32).reshape(-1, img_size[0], img_size[1],img_size[2])
#Images = np.transpose(Images, (0, 2, 3, 1))
Images = np.asarray(Images, dtype=np.float32).reshape(-1, img_size[0], img_size[1])
#Masks = np.asarray(Masks, dtype=np.float32).reshape(-1, 1, img_size[1], img_size[2])
#Masks = np.transpose(Masks, (0, 2, 3, 1))
Masks = np.asarray(Masks, dtype=np.float32).reshape(-1, img_size[0], img_size[1])
train_val = 0.8
ret_val = 0
n_tr = int(round(Images.shape[0] * 0.8))
ids = np.arange(Images.__len__())
if (mode == 1): # Trainingset
np.random.shuffle(ids)
self.images = Images
self.masks = Masks
for i in range(self.images.shape[0]):
self.add_image("Nuclei", image_id=i, path=None,width=width, height=height)
self.train_cnt = int(self.images.__len__()*0.8)
#self.images = np.transpose(self.images,(0,3,1,2))
#self.masks = np.transpose(self.masks,(0,3,1,2))
return ids
def getMeanMaskObjectSize(self, image_id):
masks = self.load_mask(image_id)
masks_new = masks[0][:, :, 1:]
print("Summe: {0}, Laenge: {1}".format(masks_new.sum(), masks_new.shape[2]))
if (np.isnan(masks_new.sum() / masks_new.shape[2])):
return 0
else:
return int(masks_new.sum() / masks_new.shape[2])
def load_image(self, image_id):
return self.images[image_id]
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
mask = self.masks[image_id]
count = int(mask.max())
mask_new = np.zeros([info['height'], info['width'], count+1], dtype=np.uint8) # one more for background
for i in range(count+1):
#mask_new[:, :, i:i+1] = (mask == i).transpose(1, 2, 0)
mask_new[:, :, i:i + 1] = (mask==i).reshape(mask.shape[0], mask.shape[1], -1)
# mask_new[:, :, i:i+1] = (mask==i).transpose(1,2,0)
# Map class names to class IDs.
class_ids = np.ones(count+1) # one more fore background
#add Background
#class_ids[count] = 0 # add Background
#mask_new[:, :, count:count + 1] = (mask == 0).transpose(1, 2, 0)
#class_ids[count] = 0 # add Background
class_ids[0] = 0 # add Background
# End add Background
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self,image_id):
return self.masks[image_id]#[0]
class KaggleDataset(utils_for_datasets.Dataset):
def load_data(self,width=None,height=None,ids=None,mode=1,folders=None):
self.image_path = []
self.mask_path = []
self.add_class("Nucleus",1,'Nucleus')
self.setImagePaths(folders)
ids = np.arange(self.image_path.__len__())
np.random.seed(1)
np.random.shuffle(ids)
self.ids = ids
for i in self.ids:
self.add_image("Nucleus", image_id=i, path=None)
return ids
def load_image(self, image_id):
info = self.image_info[image_id]
img = cv2.imread(self.image_path[self.ids[image_id]])
#img = ski_transform.resize(img, (info['height'], info['width']), mode='reflect')
return img
def setImagePaths(self,folders=""):
for folder in os.listdir(folders):
file_pattern = os.path.join(folders,folder,'images',"*.png")
#print(file_pattern)
img_files = glob.glob(file_pattern)
for i in img_files:
self.image_path.append(i)
self.mask_path.append(os.path.join(folders,folder,'masks'))
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
info = self.image_info[image_id]
mask_files = glob.glob(file_pattern)
#mask_tmp = cv2.imread(mask_files[0])
mask_new = np.zeros([info['height'], info['width'], mask_files.__len__()+1], dtype=np.uint8) # one more for background
count = 1
mask_total = 0
for i in mask_files:
mask = cv2.imread(i)
mask = mask[:, :, 1] / 255.0
#mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')
mask_new[:, :, count] = (mask)
mask_total = mask_total + (mask>0) * count
count = count + 1
# Map class names to class IDs.
class_ids = np.ones(count) # one more fore background
#add Background
class_ids[0] = 0; # Background
mask_new[:, :, 0] = np.invert(mask_total.astype(np.bool))
# End add Background
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
info = self.image_info[image_id]
mask_files = glob.glob(file_pattern)
#mask_tmp = cv2.imread(mask_files[0])
mask_new = np.zeros([info['width'], info['height'], mask_files.__len__()+1], dtype=np.uint8) # one more for background
count = 1
mask_total = 0
for i in mask_files:
mask = cv2.imread(i)
mask = mask[:, :, 1] / 255.0
#mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')
mask_new[:, :, count] = (mask)
mask_total = mask_total * (mask == 0)
mask_total = mask_total + (mask>0) * count
count = count + 1
return mask_total
def getMeanMaskObjectSize(self, image_id):
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
mask_files = glob.glob(file_pattern)
total_sum = 0;
for i in mask_files:
mask = cv2.imread(i)
total_sum = total_sum + (mask>0).sum()
return (total_sum / mask_files.__len__()).astype(np.int16)
def pre_process_img(self,img, color):
"""
Preprocess image
"""
if color is 'gray':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif color is 'rgb':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
pass
img = img.astype(np.float32)
img /= 255.0
return img
class ArtificialNucleiDataset(utils_for_datasets.Dataset):
img_prefix = 'Img_'
img_postfix = '-outputs.png'
mask_prefix = 'Mask_'
mask_postfix = '.tif'
settings = UNETSettings()
def load_data(self, width=256, height=256, ids=None, mode=1):
# Load settings
self.image_path = []
self.mask_path = []
self.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
train_cnt = 0
val_cnt = 0
print("Loading train data ...")
if self.settings.network_info["traintestmode"] == 'train':
for i in self.settings.network_info["dataset_dirs_train"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking train path ...")
self.checkPath()
print("Loading val data ...")
train_cnt = self.image_path.__len__()
for i in self.settings.network_info["dataset_dirs_val"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking val path ...")
self.checkPath()
val_cnt += self.image_path.__len__() - train_cnt
#ids = np.arange(self.image_path.__len__())
ids_train = np.arange(0,train_cnt)
ids_val = np.arange(train_cnt, train_cnt+val_cnt)
self.train_cnt = train_cnt
self.val_cnt = val_cnt
np.random.shuffle(ids_train)
np.random.shuffle(ids_val)
self.ids = np.concatenate((ids_train,ids_val),axis=0)
else:
for i in self.settings.network_info["dataset_dirs_test"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking train path ...")
self.checkPath()
self.ids = np.arange(0,self.image_path.__len__())
for i in self.ids:
self.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
return ids
def checkPath(self):
to_delete = []
for index,i in tqdm(enumerate(self.image_path)):
if not os.path.exists(i):
to_delete.append(index)
to_delete.sort(reverse=True)
for i in to_delete:
del self.image_path[i]
del self.mask_path[i]
def load_image(self, image_id):
info = self.image_info[image_id]
img_final = cv2.imread(self.image_path[self.ids[image_id]])
try:
img_final = img_final[:,:,0]
except:
None
#return img_final / 255.0
if self.settings.network_info["netinfo"] == 'maskrcnn': # mask rcnn need an rgb image
img_new = np.zeros((img_final.shape[0],img_final.shape[1],3))
img_new[:,:,0] = img_new[:,:,1] = img_new[:,:,2] = img_final
img_final = img_new
return img_final
def setImagePaths(self, folders=""):
for folder in folders:
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
for folder in folders:
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
mask = tifffile.imread(self.mask_path[self.ids[image_id]])
if np.unique(mask).__len__() > 1:
count = np.unique(mask).__len__()-1 # one less because of 0
mask_new = np.zeros([info['height'], info['width'], count], dtype=np.uint8) # one more for background
running = 0
for i in np.unique(mask): #range(1, count):
if ((i > 0) & ((mask == i).sum() > 0)):
mask_new[:, :, running] = (mask == i)
running = running + 1
# Map class names to class IDs.
class_ids = np.ones(count)
else:
mask_new = np.zeros([info['height'], info['width'], 1], dtype=np.uint8)
class_ids = np.zeros([1])
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self, image_id,relabel=False):
mask = tifffile.imread(self.mask_path[self.ids[image_id]])
if (mask.ndim > 2):
mask = mask[:,:,0]
if (relabel):
mask_tmp = np.zeros((mask.shape[0],mask.shape[1]))
running=1
for i in np.unique(mask):
if i > 0:
mask_tmp = mask_tmp + running * (mask==i)
running = running + 1
mask = mask_tmp.astype(np.float)
return mask #mask.astype(np.float)
def pre_process_img(self, img, color):
"""
Preprocess image
"""
if color is 'gray':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif color is 'rgb':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
pass
img = img.astype(np.float32)
img /= 255.0
return img
def split_train_test(self,width=256, height=256):
dataset_train = ArtificialNucleiDataset()
dataset_test = ArtificialNucleiDataset()
dataset_train.image_path = []
dataset_train.mask_path = []
dataset_train.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
dataset_test.image_path = []
dataset_test.mask_path = []
dataset_test.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
image_path_train = []
image_path_val = []
mask_path_train = []
mask_path_val = []
self.ids = []
run = 0
dataset_train.image_path.extend(self.image_path[0:self.train_cnt])
dataset_train.mask_path.extend(self.mask_path[0:self.train_cnt])
dataset_train.train_cnt = self.image_path.__len__()
dataset_test.image_path.extend(self.image_path[self.train_cnt:])
dataset_test.mask_path.extend(self.mask_path[self.train_cnt:])
dataset_test.train_cnt = self.image_path.__len__() - self.train_cnt
ids_train = np.arange(0,self.train_cnt)
ids_val = np.arange(0,self.val_cnt)
np.random.shuffle(ids_train)
np.random.shuffle(ids_val)
dataset_train.ids = ids_train
dataset_test.ids = ids_val
for i in dataset_train.ids:
dataset_train.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
for i in dataset_test.ids:
dataset_test.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
dataset_train.prepare()
dataset_test.prepare()
return dataset_train, dataset_test
class TisquantDatasetNew(ArtificialNucleiDataset):
def setImagePaths(self, folders=""):
self.img_postfix = ".jpg"
for folder in folders:
#self.img_prefix = os.path.basename(folder) + "_"
folder_names = folder.split('\\')
self.img_prefix = "Img_" + folder_names[folder_names.__len__() - 2] + "_"
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
self.mask_postfix = ".tif"
for folder in folders:
#self.mask_prefix = os.path.basename(folder) + "_"
#self.mask_prefix = "Mask_"
folder_names = folder.split('\\')
self.mask_prefix = "Mask_" + folder_names[folder_names.__len__() - 2] + "_"
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
class SpecificNucleiDataset(ArtificialNucleiDataset):
def setImagePaths(self, folders=""):
self.img_postfix = "-outputs.png"
for folder in folders:
#self.img_prefix = os.path.basename(folder) + "_"
folder_names = folder.split('\\')
self.img_prefix = "Specific_"
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
self.mask_postfix = ".tif"
for folder in folders:
#self.mask_prefix = os.path.basename(folder) + "_"
#self.mask_prefix = "Mask_"
folder_names = folder.split('\\')
self.mask_prefix = "Specific_Mask_"
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
class MergedDataset(ArtificialNucleiDataset):
def __init__(self,datasets):
super(MergedDataset, self).__init__(self)
self.image_path = []
self.mask_path = []
self.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
image_path_train = []
image_path_val = []
mask_path_train = []
mask_path_val = []
self.ids = []
run = 0
for dataset in datasets:
self.image_path.extend(dataset.image_path[0:dataset.train_cnt])
self.mask_path.extend(dataset.mask_path[0:dataset.train_cnt])
# self.ids.extend(dataset.ids[0:dataset.train_cnt]+self.ids.__len__())
self.train_cnt = self.image_path.__len__()
for dataset in datasets:
self.image_path.extend(dataset.image_path[dataset.train_cnt:])
self.mask_path.extend(dataset.mask_path[dataset.train_cnt:])
self.val_cnt = self.image_path.__len__() - self.train_cnt
ids_train = np.arange(0,self.train_cnt)
ids_val = np.arange(self.train_cnt, self.train_cnt+self.val_cnt)
np.random.shuffle(ids_train)
np.random.shuffle(ids_val)
self.ids = np.concatenate((ids_train,ids_val),axis=0)
def load_data(self, width=256, height=256, ids=None, mode=1):
for i in self.ids:
self.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
def load_image(self, image_id):
info = self.image_info[image_id]
img_final = cv2.imread(self.image_path[self.ids[image_id]])
try:
img_final = img_final[:,:,0]
except:
None
#return img_final / 255.0
try:
img_final = img_final[:,0:256]
except:
e=1
if self.settings.network_info["netinfo"] == 'maskrcnn': # mask rcnn need an rgb image
img_new = np.zeros((img_final.shape[0],img_final.shape[1],3))
img_new[:,:,0] = img_new[:,:,1] = img_new[:,:,2] = img_final
img_final = img_new
return img_final
class ArtificialNucleiDatasetNotConverted(ArtificialNucleiDataset):
img_prefix = 'Img_'
img_postfix = '.jpg' #'-inputs.png'
mask_prefix = "Mask_"
def setImagePaths(self, folders=""):
for folder in folders:
#self.img_prefix = os.path.basename(folder) + "_"
folder_names = folder.split('\\')
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
self.mask_postfix = ".tif"
for folder in folders:
#self.mask_prefix = os.path.basename(folder) + "_"
#self.mask_prefix = "Mask_"
folder_names = folder.split('\\')
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
def load_image(self, image_id):
info = self.image_info[image_id]
img_final = cv2.imread(self.image_path[self.ids[image_id]])
try:
img_final = img_final[:,:,0]
except:
None
#return img_final / 255.0
img_final = img_final[:,0:256]
if self.settings.network_info["netinfo"] == 'maskrcnn': # mask rcnn need an rgb image
img_new = np.zeros((img_final.shape[0],img_final.shape[1],3))
img_new[:,:,0] = img_new[:,:,1] = img_new[:,:,2] = img_final
img_final = img_new
return img_final
def load_mask_one_layer(self, image_id,relabel=False):
mask = tifffile.imread(self.mask_path[self.ids[image_id]])
if (mask.ndim > 2):
mask = mask[:,:,0]
#mask = mask[:, 0:256]
if (relabel):
mask_tmp = np.zeros((mask.shape[0],mask.shape[1]))
running=1
for i in np.unique(mask):
if i > 0:
mask_tmp = mask_tmp + running * (mask==i)
running = running + 1
mask = mask_tmp.astype(np.float)
return mask #mask.astype(np.float)
class SampleInference(ArtificialNucleiDataset):
def setImagePaths(self, folders=""):
self.img_postfix = ".jpg"
for folder in folders:
#self.img_prefix = os.path.basename(folder) + "_"
folder_names = folder.split('\\')
self.img_prefix = "Img"
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(img_files[i])
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
self.mask_postfix = ".tif"
for folder in folders:
#self.mask_prefix = os.path.basename(folder) + "_"
#self.mask_prefix = "Mask_"
folder_names = folder.split('\\')
self.mask_prefix = "Img"
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(img_files[i])
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
def load_data(self, width=256, height=256, ids=None, mode=1):
# Load settings
self.image_path = []
self.mask_path = []
self.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
train_cnt = 0
val_cnt = 0
print("Loading train data ...")
for i in self.settings.network_info["dataset_dirs_test"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking train path ...")
self.checkPath()
self.ids = np.arange(0,self.image_path.__len__())
for i in self.ids:
self.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
return ids
class DataLoading:
def load(self,phase='train'):
# Load settings
settings = UNETSettings()
# Load Dataset
print("Load dataset ...")
if UNETSettings().network_info["dataset"] == 'tisquant':
dataset = TisquantDatasetNew()
# dataset = TisquantDataset()
elif UNETSettings().network_info["dataset"] == 'artificialNuclei':
dataset = ArtificialNucleiDataset()
elif UNETSettings().network_info["dataset"] == 'artificialNucleiNotConverted':
dataset = ArtificialNucleiDatasetNotConverted()
elif UNETSettings().network_info["dataset"] == 'mergeTisquantArtificialNotConverted':
datasets = []
dataset1 = TisquantDatasetNew()
dataset1.load_data(mode=1)
dataset2 = ArtificialNucleiDatasetNotConverted()
dataset2.load_data(mode=1)
datasets.append(dataset1)
datasets.append(dataset2)
dataset = MergedDataset(datasets)
elif UNETSettings().network_info["dataset"] == 'mergeTisquantArtificial':
datasets = []
dataset1 = TisquantDatasetNew()
dataset1.load_data(mode=1)
dataset2 = ArtificialNucleiDataset()
dataset2.load_data(mode=1)
datasets.append(dataset1)
datasets.append(dataset2)
dataset = MergedDataset(datasets)
else:
print('Dataset not valid')
sys.exit("Error")
# Load Dataset
if phase == 'train':
dataset.load_data(mode=1)
else:
dataset.load_data(mode=2)
dataset.prepare()
return dataset
def getID(self):
settings = UNETSettings()
return settings.network_info["net_description"]
def getResultsPath(self):
settings = UNETSettings()
return settings.network_info["results_folder"] | [
"tifffile.tifffile.imread",
"Config.Config.UNETSettings",
"sys.exit",
"sys.path.append",
"numpy.arange",
"os.path.exists",
"os.listdir",
"numpy.asarray",
"numpy.random.seed",
"numpy.concatenate",
"glob.glob",
"numpy.ones",
"os.path.dirname",
"cv2.cvtColor",
"cv2.imread",
"numpy.unique"... | [((87, 144), 'sys.path.append', 'sys.path.append', (['"""D:\\\\DeepLearning\\\\Kaggle\\\\Datahandling"""'], {}), "('D:\\\\DeepLearning\\\\Kaggle\\\\Datahandling')\n", (102, 144), False, 'import sys\n'), ((9966, 9980), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (9978, 9980), False, 'from Config.Config import UNETSettings\n'), ((52, 77), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (67, 77), False, 'import os\n'), ((4472, 4540), 'numpy.zeros', 'np.zeros', (["[info['height'], info['width'], count + 1]"], {'dtype': 'np.uint8'}), "([info['height'], info['width'], count + 1], dtype=np.uint8)\n", (4480, 4540), True, 'import numpy as np\n'), ((4886, 4904), 'numpy.ones', 'np.ones', (['(count + 1)'], {}), '(count + 1)\n', (4893, 4904), True, 'import numpy as np\n'), ((5682, 5699), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (5696, 5699), True, 'import numpy as np\n'), ((5709, 5731), 'numpy.random.shuffle', 'np.random.shuffle', (['ids'], {}), '(ids)\n', (5726, 5731), True, 'import numpy as np\n'), ((5962, 6009), 'cv2.imread', 'cv2.imread', (['self.image_path[self.ids[image_id]]'], {}), '(self.image_path[self.ids[image_id]])\n', (5972, 6009), False, 'import cv2\n'), ((6187, 6206), 'os.listdir', 'os.listdir', (['folders'], {}), '(folders)\n', (6197, 6206), False, 'import os\n'), ((7014, 7046), 'os.path.join', 'os.path.join', (['mask_path', '"""*.png"""'], {}), "(mask_path, '*.png')\n", (7026, 7046), False, 'import os\n'), ((7111, 7134), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (7120, 7134), False, 'import glob\n'), ((7749, 7763), 'numpy.ones', 'np.ones', (['count'], {}), '(count)\n', (7756, 7763), True, 'import numpy as np\n'), ((8221, 8253), 'os.path.join', 'os.path.join', (['mask_path', '"""*.png"""'], {}), "(mask_path, '*.png')\n", (8233, 8253), False, 'import os\n'), ((8318, 8341), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (8327, 8341), False, 'import glob\n'), ((9102, 9134), 'os.path.join', 'os.path.join', (['mask_path', '"""*.png"""'], {}), "(mask_path, '*.png')\n", (9114, 9134), False, 'import os\n'), ((9157, 9180), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (9166, 9180), False, 'import glob\n'), ((12494, 12541), 'cv2.imread', 'cv2.imread', (['self.image_path[self.ids[image_id]]'], {}), '(self.image_path[self.ids[image_id]])\n', (12504, 12541), False, 'import cv2\n'), ((14741, 14792), 'tifffile.tifffile.imread', 'tifffile.imread', (['self.mask_path[self.ids[image_id]]'], {}), '(self.mask_path[self.ids[image_id]])\n', (14756, 14792), False, 'from tifffile import tifffile\n'), ((15625, 15676), 'tifffile.tifffile.imread', 'tifffile.imread', (['self.mask_path[self.ids[image_id]]'], {}), '(self.mask_path[self.ids[image_id]])\n', (15640, 15676), False, 'from tifffile import tifffile\n'), ((17575, 17603), 'numpy.arange', 'np.arange', (['(0)', 'self.train_cnt'], {}), '(0, self.train_cnt)\n', (17584, 17603), True, 'import numpy as np\n'), ((17622, 17648), 'numpy.arange', 'np.arange', (['(0)', 'self.val_cnt'], {}), '(0, self.val_cnt)\n', (17631, 17648), True, 'import numpy as np\n'), ((17657, 17685), 'numpy.random.shuffle', 'np.random.shuffle', (['ids_train'], {}), '(ids_train)\n', (17674, 17685), True, 'import numpy as np\n'), ((17695, 17721), 'numpy.random.shuffle', 'np.random.shuffle', (['ids_val'], {}), '(ids_val)\n', (17712, 17721), True, 'import numpy as np\n'), ((22852, 22880), 'numpy.arange', 'np.arange', (['(0)', 'self.train_cnt'], {}), '(0, self.train_cnt)\n', (22861, 22880), True, 'import numpy as np\n'), ((22899, 22955), 'numpy.arange', 'np.arange', (['self.train_cnt', '(self.train_cnt + self.val_cnt)'], {}), '(self.train_cnt, self.train_cnt + self.val_cnt)\n', (22908, 22955), True, 'import numpy as np\n'), ((22963, 22991), 'numpy.random.shuffle', 'np.random.shuffle', (['ids_train'], {}), '(ids_train)\n', (22980, 22991), True, 'import numpy as np\n'), ((23001, 23027), 'numpy.random.shuffle', 'np.random.shuffle', (['ids_val'], {}), '(ids_val)\n', (23018, 23027), True, 'import numpy as np\n'), ((23048, 23092), 'numpy.concatenate', 'np.concatenate', (['(ids_train, ids_val)'], {'axis': '(0)'}), '((ids_train, ids_val), axis=0)\n', (23062, 23092), True, 'import numpy as np\n'), ((23391, 23438), 'cv2.imread', 'cv2.imread', (['self.image_path[self.ids[image_id]]'], {}), '(self.image_path[self.ids[image_id]])\n', (23401, 23438), False, 'import cv2\n'), ((25820, 25867), 'cv2.imread', 'cv2.imread', (['self.image_path[self.ids[image_id]]'], {}), '(self.image_path[self.ids[image_id]])\n', (25830, 25867), False, 'import cv2\n'), ((26415, 26466), 'tifffile.tifffile.imread', 'tifffile.imread', (['self.mask_path[self.ids[image_id]]'], {}), '(self.mask_path[self.ids[image_id]])\n', (26430, 26466), False, 'from tifffile import tifffile\n'), ((29484, 29498), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (29496, 29498), False, 'from Config.Config import UNETSettings\n'), ((31159, 31173), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (31171, 31173), False, 'from Config.Config import UNETSettings\n'), ((31284, 31298), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (31296, 31298), False, 'from Config.Config import UNETSettings\n'), ((1459, 1507), 'os.path.join', 'os.path.join', (['DATASET_FOLDER_TISQUANT', 'data_file'], {}), '(DATASET_FOLDER_TISQUANT, data_file)\n', (1471, 1507), False, 'import os\n'), ((3055, 3077), 'numpy.random.shuffle', 'np.random.shuffle', (['ids'], {}), '(ids)\n', (3072, 3077), True, 'import numpy as np\n'), ((6236, 6284), 'os.path.join', 'os.path.join', (['folders', 'folder', '"""images"""', '"""*.png"""'], {}), "(folders, folder, 'images', '*.png')\n", (6248, 6284), False, 'import os\n'), ((6341, 6364), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (6350, 6364), False, 'import glob\n'), ((7403, 7416), 'cv2.imread', 'cv2.imread', (['i'], {}), '(i)\n', (7413, 7416), False, 'import cv2\n'), ((8610, 8623), 'cv2.imread', 'cv2.imread', (['i'], {}), '(i)\n', (8620, 8623), False, 'import cv2\n'), ((9255, 9268), 'cv2.imread', 'cv2.imread', (['i'], {}), '(i)\n', (9265, 9268), False, 'import cv2\n'), ((9534, 9571), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (9546, 9571), False, 'import cv2\n'), ((11216, 11239), 'numpy.arange', 'np.arange', (['(0)', 'train_cnt'], {}), '(0, train_cnt)\n', (11225, 11239), True, 'import numpy as np\n'), ((11262, 11303), 'numpy.arange', 'np.arange', (['train_cnt', '(train_cnt + val_cnt)'], {}), '(train_cnt, train_cnt + val_cnt)\n', (11271, 11303), True, 'import numpy as np\n'), ((11391, 11419), 'numpy.random.shuffle', 'np.random.shuffle', (['ids_train'], {}), '(ids_train)\n', (11408, 11419), True, 'import numpy as np\n'), ((11433, 11459), 'numpy.random.shuffle', 'np.random.shuffle', (['ids_val'], {}), '(ids_val)\n', (11450, 11459), True, 'import numpy as np\n'), ((11484, 11528), 'numpy.concatenate', 'np.concatenate', (['(ids_train, ids_val)'], {'axis': '(0)'}), '((ids_train, ids_val), axis=0)\n', (11498, 11528), True, 'import numpy as np\n'), ((12786, 12839), 'numpy.zeros', 'np.zeros', (['(img_final.shape[0], img_final.shape[1], 3)'], {}), '((img_final.shape[0], img_final.shape[1], 3))\n', (12794, 12839), True, 'import numpy as np\n'), ((13075, 13137), 'os.path.join', 'os.path.join', (['folder', "(self.img_prefix + '*' + self.img_postfix)"], {}), "(folder, self.img_prefix + '*' + self.img_postfix)\n", (13087, 13137), False, 'import os\n'), ((13218, 13241), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (13227, 13241), False, 'import glob\n'), ((13790, 13854), 'os.path.join', 'os.path.join', (['folder', "(self.mask_prefix + '*' + self.mask_postfix)"], {}), "(folder, self.mask_prefix + '*' + self.mask_postfix)\n", (13802, 13854), False, 'import os\n'), ((13928, 13951), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (13937, 13951), False, 'import glob\n'), ((14937, 15001), 'numpy.zeros', 'np.zeros', (["[info['height'], info['width'], count]"], {'dtype': 'np.uint8'}), "([info['height'], info['width'], count], dtype=np.uint8)\n", (14945, 15001), True, 'import numpy as np\n'), ((15076, 15091), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (15085, 15091), True, 'import numpy as np\n'), ((15340, 15354), 'numpy.ones', 'np.ones', (['count'], {}), '(count)\n', (15347, 15354), True, 'import numpy as np\n'), ((15394, 15454), 'numpy.zeros', 'np.zeros', (["[info['height'], info['width'], 1]"], {'dtype': 'np.uint8'}), "([info['height'], info['width'], 1], dtype=np.uint8)\n", (15402, 15454), True, 'import numpy as np\n'), ((15480, 15493), 'numpy.zeros', 'np.zeros', (['[1]'], {}), '([1])\n', (15488, 15493), True, 'import numpy as np\n'), ((15785, 15825), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1])'], {}), '((mask.shape[0], mask.shape[1]))\n', (15793, 15825), True, 'import numpy as np\n'), ((15870, 15885), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (15879, 15885), True, 'import numpy as np\n'), ((16256, 16293), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (16268, 16293), False, 'import cv2\n'), ((18588, 18650), 'os.path.join', 'os.path.join', (['folder', "(self.img_prefix + '*' + self.img_postfix)"], {}), "(folder, self.img_prefix + '*' + self.img_postfix)\n", (18600, 18650), False, 'import os\n'), ((18731, 18754), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (18740, 18754), False, 'import glob\n'), ((19580, 19644), 'os.path.join', 'os.path.join', (['folder', "(self.mask_prefix + '*' + self.mask_postfix)"], {}), "(folder, self.mask_prefix + '*' + self.mask_postfix)\n", (19592, 19644), False, 'import os\n'), ((19718, 19741), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (19727, 19741), False, 'import glob\n'), ((20412, 20474), 'os.path.join', 'os.path.join', (['folder', "(self.img_prefix + '*' + self.img_postfix)"], {}), "(folder, self.img_prefix + '*' + self.img_postfix)\n", (20424, 20474), False, 'import os\n'), ((20555, 20578), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (20564, 20578), False, 'import glob\n'), ((21364, 21428), 'os.path.join', 'os.path.join', (['folder', "(self.mask_prefix + '*' + self.mask_postfix)"], {}), "(folder, self.mask_prefix + '*' + self.mask_postfix)\n", (21376, 21428), False, 'import os\n'), ((21502, 21525), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (21511, 21525), False, 'import glob\n'), ((23775, 23828), 'numpy.zeros', 'np.zeros', (['(img_final.shape[0], img_final.shape[1], 3)'], {}), '((img_final.shape[0], img_final.shape[1], 3))\n', (23783, 23828), True, 'import numpy as np\n'), ((24338, 24400), 'os.path.join', 'os.path.join', (['folder', "(self.img_prefix + '*' + self.img_postfix)"], {}), "(folder, self.img_prefix + '*' + self.img_postfix)\n", (24350, 24400), False, 'import os\n'), ((24481, 24504), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (24490, 24504), False, 'import glob\n'), ((25243, 25307), 'os.path.join', 'os.path.join', (['folder', "(self.mask_prefix + '*' + self.mask_postfix)"], {}), "(folder, self.mask_prefix + '*' + self.mask_postfix)\n", (25255, 25307), False, 'import os\n'), ((25381, 25404), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (25390, 25404), False, 'import glob\n'), ((26152, 26205), 'numpy.zeros', 'np.zeros', (['(img_final.shape[0], img_final.shape[1], 3)'], {}), '((img_final.shape[0], img_final.shape[1], 3))\n', (26160, 26205), True, 'import numpy as np\n'), ((26607, 26647), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1])'], {}), '((mask.shape[0], mask.shape[1]))\n', (26615, 26647), True, 'import numpy as np\n'), ((26692, 26707), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (26701, 26707), True, 'import numpy as np\n'), ((27269, 27331), 'os.path.join', 'os.path.join', (['folder', "(self.img_prefix + '*' + self.img_postfix)"], {}), "(folder, self.img_prefix + '*' + self.img_postfix)\n", (27281, 27331), False, 'import os\n'), ((27412, 27435), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (27421, 27435), False, 'import glob\n'), ((28157, 28221), 'os.path.join', 'os.path.join', (['folder', "(self.mask_prefix + '*' + self.mask_postfix)"], {}), "(folder, self.mask_prefix + '*' + self.mask_postfix)\n", (28169, 28221), False, 'import os\n'), ((28295, 28318), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (28304, 28318), False, 'import glob\n'), ((2545, 2581), 'numpy.asarray', 'np.asarray', (['Images'], {'dtype': 'np.float32'}), '(Images, dtype=np.float32)\n', (2555, 2581), True, 'import numpy as np\n'), ((2786, 2821), 'numpy.asarray', 'np.asarray', (['Masks'], {'dtype': 'np.float32'}), '(Masks, dtype=np.float32)\n', (2796, 2821), True, 'import numpy as np\n'), ((9621, 9657), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (9633, 9657), False, 'import cv2\n'), ((12194, 12211), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (12208, 12211), False, 'import os\n'), ((16343, 16379), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (16355, 16379), False, 'import cv2\n'), ((6480, 6518), 'os.path.join', 'os.path.join', (['folders', 'folder', '"""masks"""'], {}), "(folders, folder, 'masks')\n", (6492, 6518), False, 'import os\n'), ((14807, 14822), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (14816, 14822), True, 'import numpy as np\n'), ((29570, 29584), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (29582, 29584), False, 'from Config.Config import UNETSettings\n'), ((14859, 14874), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (14868, 14874), True, 'import numpy as np\n'), ((29725, 29739), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (29737, 29739), False, 'from Config.Config import UNETSettings\n'), ((29850, 29864), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (29862, 29864), False, 'from Config.Config import UNETSettings\n'), ((30897, 30914), 'sys.exit', 'sys.exit', (['"""Error"""'], {}), "('Error')\n", (30905, 30914), False, 'import sys\n'), ((29999, 30013), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (30011, 30013), False, 'from Config.Config import UNETSettings\n'), ((30433, 30447), 'Config.Config.UNETSettings', 'UNETSettings', ([], {}), '()\n', (30445, 30447), False, 'from Config.Config import UNETSettings\n')] |
"""
Individual.py
"""
import time
import copy as cp
import random as r
import numpy as np
import eugene.Config
from eugene.Tree import random_tree
from eugene.Primatives import UNARIES, BINARIES, CONSTS, EPHEMERAL # NARIES,
class Individual(object):
"""
Defines an 'individual' via a set of chromosomes, along with genge expression and mating functions
"""
def __init__(self, chromosomes=None):
self.chromosomes = chromosomes
@property
def size(self):
"""
Return size of individual.
"""
return self.chromosomes.node_num
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.chromosomes)
def display(self, stdout=True):
"""
Display helper.
"""
return self.chromosomes.display(stdout=stdout)
# @profile
def compute_gene_expression(self, error_function=None, target=None):
"""
Compute gene expression by evaluating function stored in tree, and keep track of time.
"""
# evaluate function and time to compute
t0 = time.time()
output = self.chromosomes.evaluate()
t1 = time.time()
# calculate error of result and time complexity
error = error_function(output, target)
time_complexity = t1 - t0
physical_complexity = self.chromosomes.complexity
return np.array([error, time_complexity, physical_complexity])
# @profile
def crossover(self, spouse=None):
"""
Randomly crossover two chromosomes.
"""
# create random crossover points
x1 = r.randint(0, self.size - 1)
x2 = r.randint(0, spouse.size - 1)
# clone parent chromosomes
c1 = cp.deepcopy(self.chromosomes)
c2 = cp.deepcopy(spouse.chromosomes)
# get nodes to cross
c1n = c1.get_node(x1)
c2n = c2.get_node(x2)
# transfer nodes
if c2n:
c1.set_node(x1, c2n)
if c1n:
c2.set_node(x2, c1n)
return (Individual(c1), Individual(c2))
# @profile
def mutate(self, pruning=False):
"""
Alter a random node in chromosomes.
"""
# randomly select node to mutate
mpoint = r.randint(0, self.size - 1)
# mutate whole node by replacing children with random subtree
if r.random() >= 0.5:
rand_tree = random_tree(2)
x2 = r.randint(0, rand_tree.node_num - 1)
node = rand_tree.get_node(x2)
self.chromosomes.set_node(mpoint, node)
# check and prune tree with new subtree for inefficiencies
if pruning:
self.chromosomes.prune()
# or just mutate node value based on current type
else:
node = self.chromosomes.get_node(mpoint)
# constant
if node.value in CONSTS:
mutated_value = CONSTS[r.randint(0, len(CONSTS) - 1)]
# variable
elif node.value in eugene.Config.VAR.keys():
mutated_value = eugene.Config.VAR.keys()[r.randint(0, len(eugene.Config.VAR.keys()) - 1)]
# a unary operator
elif node.value in UNARIES:
mutated_value = UNARIES[r.randint(0, len(UNARIES) - 1)]
# a binary operator
elif node.value in BINARIES:
mutated_value = BINARIES[r.randint(0, len(BINARIES) - 1)]
# a n-ary operator
# elif node.value in NARIES:
# mutated_value = NARIES[r.randint(0, len(NARIES) - 1)]
# EPHEMERAL constant random ( 0:1, uniform -500:500, or normal -500:500 )
else:
mutated_value = EPHEMERAL[r.randint(1, len(EPHEMERAL) - 1)]()
# mutate node value (keeps children, if applicable)
node.value = mutated_value
self.chromosomes.set_node(mpoint, node)
| [
"eugene.Tree.random_tree",
"numpy.array",
"copy.deepcopy",
"random.random",
"time.time",
"random.randint"
] | [((1116, 1127), 'time.time', 'time.time', ([], {}), '()\n', (1125, 1127), False, 'import time\n'), ((1186, 1197), 'time.time', 'time.time', ([], {}), '()\n', (1195, 1197), False, 'import time\n'), ((1410, 1465), 'numpy.array', 'np.array', (['[error, time_complexity, physical_complexity]'], {}), '([error, time_complexity, physical_complexity])\n', (1418, 1465), True, 'import numpy as np\n'), ((1643, 1670), 'random.randint', 'r.randint', (['(0)', '(self.size - 1)'], {}), '(0, self.size - 1)\n', (1652, 1670), True, 'import random as r\n'), ((1684, 1713), 'random.randint', 'r.randint', (['(0)', '(spouse.size - 1)'], {}), '(0, spouse.size - 1)\n', (1693, 1713), True, 'import random as r\n'), ((1763, 1792), 'copy.deepcopy', 'cp.deepcopy', (['self.chromosomes'], {}), '(self.chromosomes)\n', (1774, 1792), True, 'import copy as cp\n'), ((1806, 1837), 'copy.deepcopy', 'cp.deepcopy', (['spouse.chromosomes'], {}), '(spouse.chromosomes)\n', (1817, 1837), True, 'import copy as cp\n'), ((2281, 2308), 'random.randint', 'r.randint', (['(0)', '(self.size - 1)'], {}), '(0, self.size - 1)\n', (2290, 2308), True, 'import random as r\n'), ((2391, 2401), 'random.random', 'r.random', ([], {}), '()\n', (2399, 2401), True, 'import random as r\n'), ((2434, 2448), 'eugene.Tree.random_tree', 'random_tree', (['(2)'], {}), '(2)\n', (2445, 2448), False, 'from eugene.Tree import random_tree\n'), ((2466, 2502), 'random.randint', 'r.randint', (['(0)', '(rand_tree.node_num - 1)'], {}), '(0, rand_tree.node_num - 1)\n', (2475, 2502), True, 'import random as r\n')] |
"""
Extract beats from mitdb dataset with size = 2 * window_size
and compute temporal features from each beat
Author: <NAME>
VARPA
University of A Coruna
April 2017
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import csv
import pickle
import numpy as np
import matplotlib.pyplot as plt
import os.path
import pywt
class temp_features:
def __init__(self):
# Instance atributes
self.pre_R = []
self.post_R = []
self.local_R = []
self.global_R = []
class mit_data:
def __init__(self):
# Instance atributes
self.filenames = []
self.patients = []
self.signals = []
self.classes = []
self.selected_R = []
self.temporal_features = []
self.window_size = []
# Function that given a list of patient extract the signals and
# perform the wavelet descomposition (optionally) and return
# the data and label prepaed for future classification tasks
def get_data_label_mitdb( list_patient, mit_db ):
labels = np.array([], dtype=np.int32)
data = np.array([], dtype=float)
for p in list_patient:
index = mit_db.patients.index(str(p))
for b in range(0, len(mit_db.classes[index]), 1):
RR = [mit_db.temporal_features[index].pre_R[b], mit_db.temporal_features[index].post_R[b], mit_db.temporal_features[index].local_R[b], mit_db.temporal_features[index].global_R[b]]
signal = mit_db.signals[index][b]
beat_type = mit_db.classes[index][b]
# Name class by numbers np.int32
#['N', 'L', 'R', 'e', 'j', 'A', 'a', 'J', 'S', 'V', 'E', 'F', 'P', '/', 'f', 'u']
for i in range(0,5,1):
if beat_type in superclass[i]:
class_n = i
break #exit loop
labels = np.append(labels, class_n)
#Display raw and wave signal
if not compute_wavelets:
features = signal
else: # db of order 8
db8 = pywt.Wavelet('db8')
coeffs = pywt.wavedec(signal, db8, level=4)
features = coeffs[1]
#TODO add RR interval
if compute_RR_interval_feature:
features = np.append(features, RR)
if len(data) == 0:
data = features
else:
data = np.vstack((data, features))
#plt.subplot(211)
#plt.plot(signal)
#plt.subplot(212)
#plt.plot(coeffs[1])
#plt.show()
return (data, labels)
dataset = '/home/mondejar/dataset/ECG/mitdb/'
output_path = dataset + 'm_learning/'
window_size = 160
compute_RR_interval_feature = True
compute_wavelets = True
list_classes = ['N', 'L', 'R', 'e', 'j', 'A', 'a', 'J', 'S', 'V', 'E', 'F', 'P', '/', 'f', 'u']
superclass = []
superclass.append(['N', 'L', 'R', 'e', 'j']) # N
superclass.append(['A', 'a', 'J', 'S']) # SVEB
superclass.append(['V', 'E']) # VEB
superclass.append(['F']) # F
superclass.append(['P', '/', 'f', 'u']) # Q
if not os.path.exists(output_path + 'mit_db_' + str(window_size) + '.p'):
# read files
filenames = next(os.walk(dataset + 'csv'))[2]
# .csv
num_recs = 0
num_annotations = 0
records = []
annotation_files = []
filenames.sort()
for f in filenames:
filename, file_extension = os.path.splitext(f)
if(file_extension == '.csv'):
records.insert(num_recs, dataset + 'csv/' + filename + file_extension)
num_recs = num_recs + 1
else:
annotation_files.insert(num_annotations, dataset + 'csv/' + filename + file_extension)
num_annotations = num_annotations +1
signal_II_w = [ np.array([np.array([])]) for i in range(len(records))]
classes = [[] for i in range(len(records))]
R_poses = [[] for i in range(len(records))]
selected_R = [np.array([]) for i in range(len(records))]
temporal_features = [temp_features() for i in range(len(records))]
mit_db = mit_data()
r_index = 0
files = []
patients = []
for r in range(0,len(records),1):
signal_II = []
print(r)
csvfile = open(records[r], 'rb')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
row_index = -1
for row in spamreader:
if(row_index >= 0):
signal_II.insert(row_index, int(row[1]))
row_index = row_index +1
# Display signal II
#plt.plot(signal_II)
#plt.show()
patients.append(records[r][-7:-4])
# read anotations: R position and class
fileID = open(annotation_files[r], 'r')
data = fileID.readlines()
beat = 0
# read anotations
fileID = open(annotation_files[r], 'r')
data = fileID.readlines()
for d in range(1, len(data), 1):
splitted = data[d].split(' ')
splitted = filter(None, splitted)
pos = int(splitted[1])
type = splitted[2]
if(type in list_classes):
if(pos > window_size and pos < (len(signal_II) - window_size)):
beat = signal_II[pos-window_size+1:pos+window_size]
if np.size(signal_II_w[r]) == 0:
signal_II_w[r] = beat
else:
signamit_pickle_namel_II_w[r] = np.vstack([signal_II_w[r], beat])
classes[r].append(type)
selected_R[r] = np.append(selected_R[r], 1)
else:
selected_R[r] = np.append(selected_R[r], 0)
R_poses[r].append(pos)
# Compute RR Interval, feature Time
if(compute_RR_interval_feature):
pre_R = np.array([0])
post_R = np.array([R_poses[r][1] - R_poses[r][0]])
local_R = np.array([]) # Average of the ten past R intervals
global_R = np.array([]) # Average of the last 5 minutes of the signal
for i in range(1,len(R_poses[r])-1, 1):
pre_R = np.insert(pre_R, i, R_poses[r][i] - R_poses[r][i-1])
post_R = np.insert(post_R, i, R_poses[r][i+1] - R_poses[r][i])
pre_R[0] = pre_R[1]
pre_R = np.append(pre_R, R_poses[r][-1] - R_poses[r][-2])
post_R = np.append(post_R, post_R[-1])
# Local R: AVG from past 10 RR intervals
for i in range(0,len(R_poses[r]), 1):
avg_val = 0
num_elems = 0
window = range(i-10,i,1)
for w in window:
if w >= 0:
avg_val = avg_val + pre_R[w]
num_elems = num_elems + 1
if num_elems == 0:
local_R = np.append(local_R, 0)
else:
avg_val = avg_val / num_elems
local_R = np.append(local_R, avg_val)
# Global R: AVG from past 5 minutes
# 360 Hz 5 minutes = 108000 samples;
for i in range(0, len(R_poses[r]), 1):
avg_val = 0
back = -1
back_length = 0
if R_poses[r][i] < 108000:
window = range(0,i,1)
else:
while (i + back) > 0 and back_length < 108000:
back_length = R_poses[r][i] - R_poses[r][i+back]
back = back -1
window = range(max(0,(back+i)), i, 1)
# Considerando distancia maxima hacia atras
for w in window:
avg_val = avg_val + pre_R[w]
if len(window) > 0:
avg_val = avg_val / len(window)
else:
avg_val = 0
global_R= np.append(global_R, avg_val)
# Only keep those features from beats that we save list_classes
# but for the computation of temporal features all the beats must be used
temporal_features[r].pre_R = pre_R[np.where(selected_R[r] == 1)[0]]
temporal_features[r].post_R = post_R[np.where(selected_R[r] == 1)[0]]
temporal_features[r].local_R = local_R[np.where(selected_R[r] == 1)[0]]
temporal_features[r].global_R = global_R[np.where(selected_R[r] == 1)[0]]
# EXPORT
mit_db.filenames = records
mit_db.patients = patients
mit_db.signals = signal_II_w
mit_db.classes = classes
mit_db.selected_R = selected_R
mit_db.temporal_features = temporal_features
mit_db.window_size = window_size
# Save data
# Protocol version 0 is the original ASCII protocol and is backwards compatible with earlier versions of Python.
# Protocol version 1 is the old binary format which is also compatible with earlier versions of Python.
# Protocol version 2 was introduced in Python 2.3. It provides much more efficient pickling of new-style classes.
pickle.dump(mit_db, open(output_path + 'mit_db_' + str(window_size) + '.p', 'wb'), 2)
else:
# Load data
mit_db = pickle.load(open(output_path + 'mit_db_' + str(window_size) + '.p', 'rb'))
# Select data for training
list_train_pat = [101, 106, 108, 109, 112, 114, 115, 116, 118, 119, 122, 124, 201, 203, 205, 207, 208, 209, 215, 220, 223, 230]
list_test_pat = [100, 103, 105, 111, 113, 117, 121, 123, 200, 202, 210, 212, 213, 214, 219, 221, 222, 228, 231, 232, 233, 234]
#TODO export data and label directly like Tensorflow would require
train_data, train_labels = get_data_label_mitdb(list_train_pat, mit_db)
eval_data, eval_labels = get_data_label_mitdb(list_test_pat, mit_db)
extension = '_' + str(window_size)
if compute_wavelets:
extension = extension + '_' + 'wv'
if compute_RR_interval_feature:
extension = extension + '_' + 'RR'
extension = extension + '.csv'
# Export, save
np.savetxt(output_path + 'train_data' + extension, train_data, delimiter=",")
np.savetxt(output_path + 'train_label' + extension, train_labels, delimiter=",")
np.savetxt(output_path + 'eval_data' + extension, eval_data, delimiter=",")
np.savetxt(output_path + 'eval_label' + extension, eval_labels, delimiter=",")
| [
"numpy.insert",
"numpy.where",
"numpy.size",
"os.path.splitext",
"pywt.Wavelet",
"numpy.append",
"numpy.array",
"numpy.vstack",
"numpy.savetxt",
"pywt.wavedec",
"csv.reader",
"os.walk"
] | [((10178, 10255), 'numpy.savetxt', 'np.savetxt', (["(output_path + 'train_data' + extension)", 'train_data'], {'delimiter': '""","""'}), "(output_path + 'train_data' + extension, train_data, delimiter=',')\n", (10188, 10255), True, 'import numpy as np\n'), ((10256, 10341), 'numpy.savetxt', 'np.savetxt', (["(output_path + 'train_label' + extension)", 'train_labels'], {'delimiter': '""","""'}), "(output_path + 'train_label' + extension, train_labels, delimiter=','\n )\n", (10266, 10341), True, 'import numpy as np\n'), ((10337, 10412), 'numpy.savetxt', 'np.savetxt', (["(output_path + 'eval_data' + extension)", 'eval_data'], {'delimiter': '""","""'}), "(output_path + 'eval_data' + extension, eval_data, delimiter=',')\n", (10347, 10412), True, 'import numpy as np\n'), ((10413, 10491), 'numpy.savetxt', 'np.savetxt', (["(output_path + 'eval_label' + extension)", 'eval_labels'], {'delimiter': '""","""'}), "(output_path + 'eval_label' + extension, eval_labels, delimiter=',')\n", (10423, 10491), True, 'import numpy as np\n'), ((1065, 1093), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (1073, 1093), True, 'import numpy as np\n'), ((1105, 1130), 'numpy.array', 'np.array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (1113, 1130), True, 'import numpy as np\n'), ((3533, 3552), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (3549, 3552), False, 'import os\n'), ((4062, 4074), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4070, 4074), True, 'import numpy as np\n'), ((4391, 4440), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=',', quotechar='|')\n", (4401, 4440), False, 'import csv\n'), ((1874, 1900), 'numpy.append', 'np.append', (['labels', 'class_n'], {}), '(labels, class_n)\n', (1883, 1900), True, 'import numpy as np\n'), ((3326, 3350), 'os.walk', 'os.walk', (["(dataset + 'csv')"], {}), "(dataset + 'csv')\n", (3333, 3350), False, 'import os\n'), ((5966, 5979), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5974, 5979), True, 'import numpy as np\n'), ((6001, 6042), 'numpy.array', 'np.array', (['[R_poses[r][1] - R_poses[r][0]]'], {}), '([R_poses[r][1] - R_poses[r][0]])\n', (6009, 6042), True, 'import numpy as np\n'), ((6065, 6077), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6073, 6077), True, 'import numpy as np\n'), ((6139, 6151), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6147, 6151), True, 'import numpy as np\n'), ((6484, 6533), 'numpy.append', 'np.append', (['pre_R', '(R_poses[r][-1] - R_poses[r][-2])'], {}), '(pre_R, R_poses[r][-1] - R_poses[r][-2])\n', (6493, 6533), True, 'import numpy as np\n'), ((6555, 6584), 'numpy.append', 'np.append', (['post_R', 'post_R[-1]'], {}), '(post_R, post_R[-1])\n', (6564, 6584), True, 'import numpy as np\n'), ((2084, 2103), 'pywt.Wavelet', 'pywt.Wavelet', (['"""db8"""'], {}), "('db8')\n", (2096, 2103), False, 'import pywt\n'), ((2129, 2163), 'pywt.wavedec', 'pywt.wavedec', (['signal', 'db8'], {'level': '(4)'}), '(signal, db8, level=4)\n', (2141, 2163), False, 'import pywt\n'), ((2307, 2330), 'numpy.append', 'np.append', (['features', 'RR'], {}), '(features, RR)\n', (2316, 2330), True, 'import numpy as np\n'), ((2436, 2463), 'numpy.vstack', 'np.vstack', (['(data, features)'], {}), '((data, features))\n', (2445, 2463), True, 'import numpy as np\n'), ((3903, 3915), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3911, 3915), True, 'import numpy as np\n'), ((6287, 6341), 'numpy.insert', 'np.insert', (['pre_R', 'i', '(R_poses[r][i] - R_poses[r][i - 1])'], {}), '(pre_R, i, R_poses[r][i] - R_poses[r][i - 1])\n', (6296, 6341), True, 'import numpy as np\n'), ((6365, 6420), 'numpy.insert', 'np.insert', (['post_R', 'i', '(R_poses[r][i + 1] - R_poses[r][i])'], {}), '(post_R, i, R_poses[r][i + 1] - R_poses[r][i])\n', (6374, 6420), True, 'import numpy as np\n'), ((8111, 8139), 'numpy.append', 'np.append', (['global_R', 'avg_val'], {}), '(global_R, avg_val)\n', (8120, 8139), True, 'import numpy as np\n'), ((5710, 5737), 'numpy.append', 'np.append', (['selected_R[r]', '(1)'], {}), '(selected_R[r], 1)\n', (5719, 5737), True, 'import numpy as np\n'), ((5796, 5823), 'numpy.append', 'np.append', (['selected_R[r]', '(0)'], {}), '(selected_R[r], 0)\n', (5805, 5823), True, 'import numpy as np\n'), ((7050, 7071), 'numpy.append', 'np.append', (['local_R', '(0)'], {}), '(local_R, 0)\n', (7059, 7071), True, 'import numpy as np\n'), ((7177, 7204), 'numpy.append', 'np.append', (['local_R', 'avg_val'], {}), '(local_R, avg_val)\n', (7186, 7204), True, 'import numpy as np\n'), ((8362, 8390), 'numpy.where', 'np.where', (['(selected_R[r] == 1)'], {}), '(selected_R[r] == 1)\n', (8370, 8390), True, 'import numpy as np\n'), ((8444, 8472), 'numpy.where', 'np.where', (['(selected_R[r] == 1)'], {}), '(selected_R[r] == 1)\n', (8452, 8472), True, 'import numpy as np\n'), ((8528, 8556), 'numpy.where', 'np.where', (['(selected_R[r] == 1)'], {}), '(selected_R[r] == 1)\n', (8536, 8556), True, 'import numpy as np\n'), ((8614, 8642), 'numpy.where', 'np.where', (['(selected_R[r] == 1)'], {}), '(selected_R[r] == 1)\n', (8622, 8642), True, 'import numpy as np\n'), ((5413, 5436), 'numpy.size', 'np.size', (['signal_II_w[r]'], {}), '(signal_II_w[r])\n', (5420, 5436), True, 'import numpy as np\n'), ((5571, 5604), 'numpy.vstack', 'np.vstack', (['[signal_II_w[r], beat]'], {}), '([signal_II_w[r], beat])\n', (5580, 5604), True, 'import numpy as np\n')] |
import tensorflow as tf
# import tensorflow_probability as tfp
import numpy as np
# from research2018 import layers
import tf_contextual_prediction_with_expert_advice as cpea
from research2018 import data
class DataNormalizer(object):
def __init__(self, x):
self._abs_min = np.abs(x).min(axis=0, keepdims=True).astype('float32')
self._abs_max = np.abs(x).max(axis=0, keepdims=True).astype('float32')
self._mean = x.mean(axis=0, keepdims=True).astype('float32')
def __call__(self, x):
raise NotImplementedError("Please override.")
class SubtractMeanDivideByScaleDataNormalizer(DataNormalizer):
@tf.function
def __call__(self, x):
return data.subtract_mean_divide_by_scale(tf.cast(x, tf.float32),
self._abs_min, self._abs_max,
self._mean)
def perturbed_img(img, perturbation=None, max_change=100):
if perturbation is None:
perturbation = np.random.uniform(low=0,
high=max_change,
size=[1] +
list(img.shape)).astype('float32')
return np.minimum(np.maximum(img + perturbation, 0),
255).round().astype('uint8')
class ImageClassRewardDataset(data.Dataset):
@classmethod
def new(cls,
x,
y,
num_classes,
reward_fn=lambda y, yh: (y == yh).astype('float32'),
constant_reward=None):
if constant_reward is None:
rewards = np.array(
[reward_fn(y, y_hat) for y_hat in range(num_classes)],
dtype='float32').T
else:
rewards = np.array(
[reward_fn(y, y_hat) for y_hat in range(num_classes)] +
[np.full(y.shape, constant_reward)],
dtype='float32').T
return cls(x, y, rewards)
def perturbed(self, perturbation=None):
return self.__class__(perturbed_img(self.x, perturbation=perturbation),
self.y, self.reward)
@tf.function
def avg_prediction_mean_std(self, sample_models, batch_size=None, seed=42):
avg_mean = tf.zeros([self.reward.shape[1]])
avg_std = tf.zeros([self.reward.shape[1]])
if batch_size is None:
batch_size = len(self) // 10
data = self.clone()
data = data.batch(batch_size).repeat(1)
i = 0
for x, _ in data:
i += 1
samples = [model(x) for model in sample_models]
m = tf.reduce_mean(samples, axis=0)
s = tf.math.reduce_std(samples, axis=0)
avg_mean += (tf.reduce_mean(m, axis=0) - avg_mean) / float(i)
avg_std += (tf.reduce_mean(s, axis=0) - avg_std) / float(i)
return avg_mean, avg_std
@tf.function
def loss(self, model, batch_size=None):
if batch_size is None:
batch_size = len(self) // 10
data = self.clone()
data = data.batch(batch_size).repeat(1)
batch_loss = 0.0
i = 0
for x, rewards in data:
i += 1
next_reward = tf.reduce_mean(cpea.utility(model(x), rewards))
batch_loss += -(next_reward + batch_loss) / float(i)
return batch_loss
def new_training_dataset_and_normalizer(constant_reward=None):
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
# Add color channel
x_train = np.expand_dims(x_train, -1)
dataset = ImageClassRewardDataset.new(x=x_train,
y=y_train,
num_classes=10,
constant_reward=constant_reward)
return dataset, SubtractMeanDivideByScaleDataNormalizer(x_train)
# class KernelPrior(object):
# def __init__(self, stddev=1):
# self.stddev = stddev
#
# def output(self, dtype, shape, name, trainable, add_variable_fn):
# scale = np.full(shape, self.stddev, dtype=dtype.as_numpy_dtype)
# dist = tfp.distributions.Normal(loc=tf.zeros(shape, dtype),
# scale=scale)
# batch_ndims = tf.size(dist.batch_shape_tensor())
# return tfp.distributions.Independent(
# dist, reinterpreted_batch_ndims=batch_ndims)
#
# def conv(self, dtype, shape, name, trainable, add_variable_fn):
# dist = tfp.distributions.Normal(loc=tf.zeros(shape, dtype),
# scale=dtype.as_numpy_dtype(
# self.stddev))
# batch_ndims = tf.size(dist.batch_shape_tensor())
# return tfp.distributions.Independent(
# dist, reinterpreted_batch_ndims=batch_ndims)
#
#
# def weighted_divergence_fn(log_weight):
# def divergence_fn(pos, pri):
# return (tf.exp(float(log_weight)) *
# tf.reduce_mean(pos.kl_divergence(pri)))
#
# return divergence_fn
#
#
# def new_deep_linear_bnn(data_normalizer,
# num_actions,
# filters=8,
# log_divergence_weight=-3,
# prior_stddev=1,
# residual_weight=0.1,
# log_log_divergence_weight=0.0):
# '''
# Creates a new Bayesian neural network on images.
#
# Arguments:
# - data_normalizer: A function that takes MNIST images preprocesses them.
# - filters: The number of convolutional filters in the two hidden
# convolutional layers. Defaults to 8 just because I saw another script
# use this many.
# - log_divergence_weight: The weight of the divergence penalty on each
# layer. Defaults to -3 since that worked best for me with MNIST.
# - prior_stddev: The standard deviation of the prior weight distributions.
# Defaults to 1 since that should probably be a good place to start.
# Might need to turn this up a lot though to get the layers to be more
# random, so you could set this as large as 20.
# - residual_weight: The weight on the residual term in the residual
# layers. Defaults to 0.1 since that worked best for me. You can set it
# to zero to make the layers non-residual.
# '''
# return tf.keras.Sequential([
# tf.keras.layers.Lambda(data_normalizer),
# layers.ResConvolution2D(
# filters=filters,
# kernel_size=3,
# padding='SAME',
# activation=tf.nn.relu,
# residual_weight=residual_weight,
# kernel_prior_fn=KernelPrior(prior_stddev).conv,
# kernel_divergence_fn=weighted_divergence_fn(log_divergence_weight),
# bias_posterior_fn=tfp.layers.default_mean_field_normal_fn(
# is_singular=False, loc_initializer=tf.zeros_initializer()),
# bias_divergence_fn=weighted_divergence_fn(log_divergence_weight)),
# layers.ResConvolution2D(
# filters=filters,
# kernel_size=5,
# padding='SAME',
# activation=tf.nn.relu,
# residual_weight=residual_weight,
# kernel_prior_fn=KernelPrior(prior_stddev).conv,
# kernel_divergence_fn=weighted_divergence_fn(
# log_log_divergence_weight),
# bias_posterior_fn=tfp.layers.default_mean_field_normal_fn(
# is_singular=False, loc_initializer=tf.zeros_initializer()),
# bias_divergence_fn=weighted_divergence_fn(log_divergence_weight)),
# tf.keras.layers.AveragePooling2D(pool_size=[2, 2],
# strides=[2, 2],
# padding='SAME'),
# tf.keras.layers.Flatten(),
# tfp.layers.DenseFlipout(
# num_actions,
# kernel_prior_fn=KernelPrior(prior_stddev).output,
# kernel_divergence_fn=weighted_divergence_fn(log_divergence_weight),
# bias_posterior_fn=tfp.layers.default_mean_field_normal_fn(
# is_singular=False, loc_initializer=tf.zeros_initializer()),
# bias_divergence_fn=weighted_divergence_fn(log_divergence_weight))
# ])
| [
"numpy.abs",
"numpy.full",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.math.reduce_std",
"numpy.expand_dims",
"tensorflow.reduce_mean",
"numpy.maximum",
"tensorflow.cast",
"research2018.data.batch",
"tensorflow.zeros"
] | [((3464, 3499), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (3497, 3499), True, 'import tensorflow as tf\n'), ((3539, 3566), 'numpy.expand_dims', 'np.expand_dims', (['x_train', '(-1)'], {}), '(x_train, -1)\n', (3553, 3566), True, 'import numpy as np\n'), ((2274, 2306), 'tensorflow.zeros', 'tf.zeros', (['[self.reward.shape[1]]'], {}), '([self.reward.shape[1]])\n', (2282, 2306), True, 'import tensorflow as tf\n'), ((2325, 2357), 'tensorflow.zeros', 'tf.zeros', (['[self.reward.shape[1]]'], {}), '([self.reward.shape[1]])\n', (2333, 2357), True, 'import tensorflow as tf\n'), ((733, 755), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (740, 755), True, 'import tensorflow as tf\n'), ((2643, 2674), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (2657, 2674), True, 'import tensorflow as tf\n'), ((2691, 2726), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (2709, 2726), True, 'import tensorflow as tf\n'), ((2474, 2496), 'research2018.data.batch', 'data.batch', (['batch_size'], {}), '(batch_size)\n', (2484, 2496), False, 'from research2018 import data\n'), ((3083, 3105), 'research2018.data.batch', 'data.batch', (['batch_size'], {}), '(batch_size)\n', (3093, 3105), False, 'from research2018 import data\n'), ((2752, 2777), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (2766, 2777), True, 'import tensorflow as tf\n'), ((2825, 2850), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['s'], {'axis': '(0)'}), '(s, axis=0)\n', (2839, 2850), True, 'import tensorflow as tf\n'), ((289, 298), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (295, 298), True, 'import numpy as np\n'), ((368, 377), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (374, 377), True, 'import numpy as np\n'), ((1245, 1278), 'numpy.maximum', 'np.maximum', (['(img + perturbation)', '(0)'], {}), '(img + perturbation, 0)\n', (1255, 1278), True, 'import numpy as np\n'), ((1876, 1909), 'numpy.full', 'np.full', (['y.shape', 'constant_reward'], {}), '(y.shape, constant_reward)\n', (1883, 1909), True, 'import numpy as np\n')] |
#--------------------------------------------------------------------------
# NAME : spiketrains_utils.py:
# DESCRIPTION : routines for generating and binning time series, extracting
# information and generating surrogates
# AUTHOR : <NAME>
# CREATED : September 12, 2012
#--------------------------------------------------------------------------
import numpy as np
import quantities as pq
import neo
def spike_dithering(x, dither, n=1, decimals=None, edges='['):
"""
Generates surrogates of a spike train by spike dithering.
The surrogates are obtained by uniformly dithering times around the
original position. The dithering is performed independently for each
surrogate.
The surrogates retain the t_start and t_stop of the original spike train.
Spikes moved beyond this range are lost or moved to the range's ends,
depending on the parameter edge.
Parameters
----------
x : SpikeTrain
the spike train from which to generate the surrogates
dither : Quantity
amount of dithering. A spike at time t is placed randomly within
]t-dither, t+dither[.
n : int (optional)
number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
edges : str (optional)
For surrogate spikes falling outside the range [x.t_start, x.t_stop),
whether to drop them out (for edges = '[' or 'cliff') or set
that to the range's closest end (for edges = ']' or 'wall').
Default: '['
Returns
-------
list of SpikeTrain
a list of spike trains, each obtained from x by randomly dithering
its spikes. The range of the surrogate spike trains is the same as x.
Example
-------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print spike_dithering(st, dither = 20*pq.ms)
[<SpikeTrain(array([ 96.53801903, 248.57047376, 601.48865767,
815.67209811]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_dithering(st, dither = 20*pq.ms, n=2)
[<SpikeTrain(array([ 104.24942044, 246.0317873 , 584.55938657,
818.84446913]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 111.36693058, 235.15750163, 618.87388515,
786.1807108 ]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_dithering(st, dither = 20*pq.ms, decimals=0)
[<SpikeTrain(array([ 81., 242., 595., 799.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Transform x into a Quantity object (needed for matrix algebra)
data = x.view(pq.Quantity)
# Main: generate the surrogates
surr = data.reshape((1, len(data))) + 2 * dither * \
np.random.random_sample((n, len(data))) - dither
# Round the surrogate data to decimal position, if requested
if decimals is not None:
surr = surr.round(decimals)
if edges in (']', 'wall'):
# Move all spikes outside [x.t_start, x.t_stop] to the range's ends
surr = np.minimum(np.maximum(surr.base,
(x.t_start / x.units).base), (x.t_stop / x.units).base) * x.units
elif edges in ('[', 'cliff'):
# Leave out all spikes outside [x.t_start, x.t_stop]
Tstart, Tstop = (x.t_start / x.units).base, (x.t_stop / x.units).base
surr = [s[np.all([s >= Tstart, s < Tstop], axis=0)] * x.units
for s in surr.base]
# Return the surrogates as SpikeTrains
return [neo.SpikeTrain(s, t_start=x.t_start, t_stop=x.t_stop).rescale(
x.units) for s in surr]
def spike_time_rand(x, n=1, decimals=None):
"""
Generates surrogates of a spike trains by spike time randomisation.
The surrogates are obtained by keeping the spike count of the original
spike train x, but placing them randomly into the interval
[x.t_start, x.t_stop].
This generates independent Poisson SpikeTrains (exponentially distributed
inter-spike intervals) while keeping the spike count as in x.
Parameters
----------
x : SpikeTrain
the spike train from which to generate the surrogates
n : int (optional)
number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
Returns
-------
list of SpikeTrain
a list of spike trains, each obtained from x by randomly dithering
its spikes. The range of the surrogate spike trains is the same as x.
Example
-------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print spike_time_rand(st)
[<SpikeTrain(array([ 131.23574603, 262.05062963, 549.84371387,
940.80503832]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_time_rand(st, n=2)
[<SpikeTrain(array([ 84.53274955, 431.54011743, 733.09605806,
852.32426583]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 197.74596726, 528.93517359, 567.44599968,
775.97843799]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_time_rand(st, decimals=0)
[<SpikeTrain(array([ 29., 667., 720., 774.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Create surrogate spike trains as rows of a Quantity array
sts = ((x.t_stop - x.t_start) * np.random.random(size=(n, len(x))) + \
x.t_start).rescale(x.units)
# Round the surrogate data to decimal position, if requested
if decimals is not None:
sts = sts.round(decimals)
# Convert the Quantity array to a list of SpikeTrains, and return them
return [neo.SpikeTrain(np.sort(st), t_start=x.t_start, t_stop=x.t_stop)
for st in sts]
def isi_shuffling(x, n=1, decimals=None):
"""
Generates surrogates of a spike trains by inter-spike-interval (ISI)
shuffling.
The surrogates are obtained by keeping the randomly sorting the ISIs of
the original spike train x.
This generates independent SpikeTrains with same ISI distribution
and spike count as in x, while destroying temporal dependencies and
firing rate profile.
Parameters
----------
x : SpikeTrain
the spike train from which to generate the surrogates
n : int (optional)
number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
Returns
-------
list of SpikeTrain
a list of spike trains, each obtained from x by randomly ISI shuffling.
The range of the surrogate spike trains is the same as x.
Example
-------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print isi_shuffling(st)
[<SpikeTrain(array([ 200., 350., 700., 800.]) * ms,
[0.0 ms, 1000.0 ms])>]
>>> print isi_shuffling(st, n=2)
[<SpikeTrain(array([ 100., 300., 450., 800.]) * ms,
[0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 200., 350., 700., 800.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Compute ISIs of x as a numpy array (meant in units of x)
x_dl = x.magnitude
if len(x) > 0:
isi0 = x[0] - x.t_start
ISIs = np.hstack([isi0.magnitude, np.diff(x_dl)])
# Round the ISIs to decimal position, if requested
if decimals is not None:
ISIs = ISIs.round(decimals)
# Create list of surrogate spike trains by random ISI permutation
sts = []
for i in xrange(n):
surr_times = np.cumsum(np.random.permutation(ISIs)) * x.units + \
x.t_start
sts.append(neo.SpikeTrain(
surr_times, t_start=x.t_start, t_stop=x.t_stop))
else:
sts = []
empty_train = neo.SpikeTrain([]*x.units, t_start=x.t_start,
t_stop=x.t_stop)
for i in xrange(n):
sts.append(empty_train)
return sts
def train_shifting(x, shift, n=1, decimals=None, edges='['):
"""
Generates surrogates of a spike trains by spike train shifting.
The surrogates are obtained by shifting the whole spike train by a
random amount (independent for each surrogate). Thus, ISIs and temporal
correlations within the spike train are kept. For small shifts, the
firing rate profile is also kept with reasonable accuracy.
The surrogates retain the t_start and t_stop of the original spike train.
Spikes moved beyond this range are lost or moved to the range's ends,
depending on the parameter edge.
Parameters
----------
x : SpikeTrain
the spike train from which to generate the surrogates
shift : Quantity
amount of shift. x is shifted by a random amount uniformly drawn
from the range ]-shift, +shift[.
n : int (optional)
number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
edges : str (optional)
For surrogate spikes falling outside the range [x.t_start, x.t_stop),
whether to drop them out (for edges = '[' or 'cliff') or set
that to the range's closest end (for edges = ']' or 'wall').
Default: '['
Returns
-------
list of SpikeTrain
a list of spike trains, each obtained from x by randomly dithering
its spikes. The range of the surrogate spike trains is the same as x.
Example
-------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>>
>>> print train_shifting(st, shift = 20*pq.ms)
[<SpikeTrain(array([ 96.53801903, 248.57047376, 601.48865767,
815.67209811]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print train_shifting(st, shift = 20*pq.ms, n=2)
[<SpikeTrain(array([ 92.89084054, 242.89084054, 592.89084054,
792.89084054]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 84.61079043, 234.61079043, 584.61079043,
784.61079043]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print train_shifting(st, shift = 20*pq.ms, decimals=0)
[<SpikeTrain(array([ 82., 232., 582., 782.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Transform x into a Quantity object (needed for matrix algebra)
data = x.view(pq.Quantity)
# Main: generate the surrogates by spike train shifting
surr = data.reshape((1, len(data))) + 2 * shift * \
np.random.random_sample((n, 1)) - shift
# Round the surrogate data to decimal position, if requested
if decimals is not None:
surr = surr.round(decimals)
if edges in (']', 'wall'):
# Move all spikes outside [x.t_start, x.t_stop] to the range's ends
surr = np.minimum(np.maximum(surr.base,
(x.t_start / x.units).base), (x.t_stop / x.units).base) * x.units
elif edges in ('[', 'cliff'):
# Leave out all spikes outside [x.t_start, x.t_stop]
Tstart, Tstop = (x.t_start / x.units).base, (x.t_stop / x.units).base
surr = [s[np.all([s >= Tstart, s < Tstop], axis=0)] * x.units
for s in surr.base]
# Return the surrogates as SpikeTrains
return [neo.SpikeTrain(s, t_start=x.t_start, t_stop=x.t_stop).rescale(
x.units) for s in surr]
def spike_jittering(x, binsize, n=1, decimals=None, edges='['):
"""
Generates surrogates of a spike train by spike jittering.
The surrogates are obtained by defining adjacent time bins spanning the
spike train range, and random re-positioning (independently for each
surrogate) each spike in the time bin it falls into.
The surrogates retain the t_start and t_stop of the original spike train.
Note that within each time bin the surrogate spike trains are locally
Poissonian (the inter-spike-interval are exponentially distributed).
Parameters
----------
x : SpikeTrain
the spike train from which to generate the surrogates
binsize : Quantity
size of the time bins within which to randomise the spike times.
Note: the last bin arrives until x.t_stop and might have width different
than binsize.
n : int (optional)
number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
Returns
-------
list of SpikeTrain
a list of spike trains, each obtained from x by randomly replacing its
spikes within bins of user-defined width.
The range of the surrogate spike trains is the same as x.
Example
-------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([80, 150, 320, 480]*pq.ms, t_stop=1*pq.s)
>>> print spike_jittering(st, binsize=100*pq.ms)
[<SpikeTrain(array([ 98.82898293, 178.45805954, 346.93993867,
461.34268507]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_jittering(st, binsize=100*pq.ms, n=2)
[<SpikeTrain(array([ 97.15720041, 199.06945744, 397.51928207,
402.40065162]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 80.74513157, 173.69371317, 338.05860962,
495.48869981]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print spike_jittering(st, binsize=100*pq.ms, decimals=0)
[<SpikeTrain(array([ 4.55064897e-01, 1.31927046e+02, 3.57846265e+02,
4.69370604e+02]) * ms, [0.0 ms, 1000.0 ms])>]
"""
# Define standard time unit; all time Quantities are converted to
# scalars after being rescaled to this unit, to use the power of numpy
std_unit = binsize.units
# Compute bin edges for the jittering procedure
# !: the last bin arrives until x.t_stop and might have size != binsize
start_dl = x.t_start.rescale(std_unit).magnitude
stop_dl = x.t_stop.rescale(std_unit).magnitude
bin_edges = start_dl + np.arange(start_dl, stop_dl, binsize.magnitude)
bin_edges = np.hstack([bin_edges, stop_dl])
# Create n surrogates with spikes randomly placed in the interval (0,1)
surr_poiss01 = np.random.random_sample((n, len(x)))
# Compute the bin id of each spike
bin_ids = np.array(
(x.view(pq.Quantity) / binsize).rescale(pq.dimensionless).magnitude,
dtype=int)
# Compute the size of each time bin (as a numpy array)
bin_sizes_dl = np.diff(bin_edges)
# For each spike compute its offset (the left end of the bin it falls
# into) and the size of the bin it falls into
offsets = start_dl + np.array([bin_edges[bin_id] for bin_id in bin_ids])
dilats = np.array([bin_sizes_dl[bin_id] for bin_id in bin_ids])
# Compute each surrogate by dilatating and shifting each spike s in the
# poisson 0-1 spike trains to dilat * s + offset. Attach time unit again
surr = np.sort(surr_poiss01 * dilats + offsets, axis=1) * std_unit
return [neo.SpikeTrain(s, t_start=x.t_start, t_stop=x.t_stop).rescale(
x.units) for s in surr] | [
"numpy.all",
"numpy.random.random_sample",
"neo.SpikeTrain",
"numpy.hstack",
"numpy.sort",
"numpy.diff",
"numpy.array",
"numpy.maximum",
"numpy.arange",
"numpy.random.permutation"
] | [((14529, 14560), 'numpy.hstack', 'np.hstack', (['[bin_edges, stop_dl]'], {}), '([bin_edges, stop_dl])\n', (14538, 14560), True, 'import numpy as np\n'), ((14933, 14951), 'numpy.diff', 'np.diff', (['bin_edges'], {}), '(bin_edges)\n', (14940, 14951), True, 'import numpy as np\n'), ((15167, 15221), 'numpy.array', 'np.array', (['[bin_sizes_dl[bin_id] for bin_id in bin_ids]'], {}), '([bin_sizes_dl[bin_id] for bin_id in bin_ids])\n', (15175, 15221), True, 'import numpy as np\n'), ((8226, 8290), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([] * x.units)'], {'t_start': 'x.t_start', 't_stop': 'x.t_stop'}), '([] * x.units, t_start=x.t_start, t_stop=x.t_stop)\n', (8240, 8290), False, 'import neo\n'), ((14465, 14512), 'numpy.arange', 'np.arange', (['start_dl', 'stop_dl', 'binsize.magnitude'], {}), '(start_dl, stop_dl, binsize.magnitude)\n', (14474, 14512), True, 'import numpy as np\n'), ((15102, 15153), 'numpy.array', 'np.array', (['[bin_edges[bin_id] for bin_id in bin_ids]'], {}), '([bin_edges[bin_id] for bin_id in bin_ids])\n', (15110, 15153), True, 'import numpy as np\n'), ((15387, 15435), 'numpy.sort', 'np.sort', (['(surr_poiss01 * dilats + offsets)'], {'axis': '(1)'}), '(surr_poiss01 * dilats + offsets, axis=1)\n', (15394, 15435), True, 'import numpy as np\n'), ((5927, 5938), 'numpy.sort', 'np.sort', (['st'], {}), '(st)\n', (5934, 5938), True, 'import numpy as np\n'), ((3194, 3243), 'numpy.maximum', 'np.maximum', (['surr.base', '(x.t_start / x.units).base'], {}), '(surr.base, (x.t_start / x.units).base)\n', (3204, 3243), True, 'import numpy as np\n'), ((3625, 3678), 'neo.SpikeTrain', 'neo.SpikeTrain', (['s'], {'t_start': 'x.t_start', 't_stop': 'x.t_stop'}), '(s, t_start=x.t_start, t_stop=x.t_stop)\n', (3639, 3678), False, 'import neo\n'), ((7699, 7712), 'numpy.diff', 'np.diff', (['x_dl'], {}), '(x_dl)\n', (7706, 7712), True, 'import numpy as np\n'), ((8095, 8157), 'neo.SpikeTrain', 'neo.SpikeTrain', (['surr_times'], {'t_start': 'x.t_start', 't_stop': 'x.t_stop'}), '(surr_times, t_start=x.t_start, t_stop=x.t_stop)\n', (8109, 8157), False, 'import neo\n'), ((10983, 11014), 'numpy.random.random_sample', 'np.random.random_sample', (['(n, 1)'], {}), '((n, 1))\n', (11006, 11014), True, 'import numpy as np\n'), ((11288, 11337), 'numpy.maximum', 'np.maximum', (['surr.base', '(x.t_start / x.units).base'], {}), '(surr.base, (x.t_start / x.units).base)\n', (11298, 11337), True, 'import numpy as np\n'), ((11719, 11772), 'neo.SpikeTrain', 'neo.SpikeTrain', (['s'], {'t_start': 'x.t_start', 't_stop': 'x.t_stop'}), '(s, t_start=x.t_start, t_stop=x.t_stop)\n', (11733, 11772), False, 'import neo\n'), ((15460, 15513), 'neo.SpikeTrain', 'neo.SpikeTrain', (['s'], {'t_start': 'x.t_start', 't_stop': 'x.t_stop'}), '(s, t_start=x.t_start, t_stop=x.t_stop)\n', (15474, 15513), False, 'import neo\n'), ((3485, 3525), 'numpy.all', 'np.all', (['[s >= Tstart, s < Tstop]'], {'axis': '(0)'}), '([s >= Tstart, s < Tstop], axis=0)\n', (3491, 3525), True, 'import numpy as np\n'), ((8003, 8030), 'numpy.random.permutation', 'np.random.permutation', (['ISIs'], {}), '(ISIs)\n', (8024, 8030), True, 'import numpy as np\n'), ((11579, 11619), 'numpy.all', 'np.all', (['[s >= Tstart, s < Tstop]'], {'axis': '(0)'}), '([s >= Tstart, s < Tstop], axis=0)\n', (11585, 11619), True, 'import numpy as np\n')] |
"""
Inferring a binomial proportion via exact mathematical analysis.
"""
import sys
import numpy as np
from scipy.stats import beta
from scipy.special import beta as beta_func
import matplotlib.pyplot as plt
#from HDIofICDF import *
from scipy.optimize import fmin
#from scipy.stats import *
from scipy.stats import beta
from scipy import special
from scipy import stats
import random
from scipy.special.basic import bernoulli
import math
from pylab import mlab
import json
from code.proghist.gausordering.TwoBinsGausingOrderBetaParamProducer import PHBin,\
PHGauss
"""
1
down vote
For a generalized Beta distribution defined on the interval [a,b][a,b], you have the relations:
μ=aβ+bαα+β,σ2=αβ(b−a)2(α+β)2(1+α+β)
μ=aβ+bαα+β,σ2=αβ(b−a)2(α+β)2(1+α+β)
which can be inverted to give:
α=λμ−ab−a,β=λb−μb−a
α=λμ−ab−a,β=λb−μb−a
where
λ=(μ−a)(b−μ)σ2−1
λ=(μ−a)(b−μ)σ2−1
https://stats.stackexchange.com/questions/12232/calculating-the-parameters-of-a-beta-distribution-using-the-mean-and-variance
"""
#https://en.wikipedia.org/wiki/Normal_distribution
class AdaptableTwoBinsGausOrderingBetaParamProducer(object):
def __init__(self, hist=[ [0.2, 0.45, 10], [0.4, 1.0, 20] ], bins=None, data=None):
pass
#self.b1n = [.1, (0.1/3.)**2]
#self.b2n = [.25, (0.2/3.)**2]
self.data = data
#[lower-bound, upper-bound, count]
#self.hist = [ [0.2, 0.45, 10], [0.4, 1.0, 20] ]
#self.hist = [ [0.2, 0.6, 10] ]
if (bins==None):
self.hist = hist
self.bins = []
for i, h_ in enumerate(self.hist):
x1,x2,size = h_
bin = PHBin ("real", x1, x2, size)
self.bins.append(bin)
else:
self.bins=bins
#self.plotGausses()
# produce 0,1,2 categorical values which can be represented by beta-bernoulli tetas. argmax returns the index of max value.
def betaBernoulli3BinsRead(self, chunkSize=6):
origData = self.data
#categorized data
catData = []
for idx, x in enumerate(origData):
tetas=[]
for bin in self.bins:
if bin.type_=="intersection":
continue
tetas_ = [bin.gausses[i].norm.pdf(x) for i in range(len(bin.gausses))]
tetas = tetas + tetas_
catData.append( np.argmax(tetas))
#makes array chunked by chunkSize
origDataChunkedby6 = [origData[i:i+chunkSize] for i in range(len(origData))[::chunkSize]]
catDataChunkedby6 = np.array([catData[i:i+chunkSize] for i in range(len(catData))[::chunkSize]]).tolist()
freqs = self.prepareWeightedFreqs(catDataChunkedby6, [10, 20])
binSizes = [catData.count(i) for i in range(6)]
#print ("bin sizes", binSizes)
return {"binSizes":binSizes, "origDataChunkedBy6":origDataChunkedby6, "catDataChunkedBy6":catDataChunkedby6, "freqs":freqs};
def prepareWeightedFreqs(self, chunkedArr, binsHeights):
pass
ratio = ( 1.0 * binsHeights[0] ) / binsHeights[1]
freqs = []
for i, chunk in enumerate(chunkedArr):
freq=[]
for k,c in enumerate(chunk):
if (k>2):#if it is in second bin, apply ratio, this results in removing effect of bin height differencies on similarity check
freq.append(chunk.count(k) * ratio)
else:
freq.append(chunk.count(k))
freqs.append(freq)
return freqs
def normalize(self, bins):
s = sum(bins)
a = map(lambda x: float(x)/s, bins)
return list(a)
# replicate of test7. returns an array whose elements indicate the changes in bins and among 2 bins.
# for each chunk having 6 categorical value, this operation is done.
# [[bin1_change, bin2_change, bin1-bin2-change]]
def determineChangeBtwTwoBins(self, bins):
#between two bins
CHANGE_LABELS_BTW_BINS=["BECOMING_FAR", "SUPPORTS_INCREASE", "MERGING"]
CHANGE_LABELS_OF_BIN=["SPLITTING", "SUPPORTS_CONCEPT", "SPLITTING"]
#hist=[[0,0.2,10],[0.15,0.35,20],[0.3,0.40,30] ]
#first element added artificially. it shows baseline entropy. entropy higher than this value means there is more uncertainty,
#lower than that mean there is a definite move among bins.
#bins = [[0, 1.0, 0, 0, 1.0, 0.0], [0, 2, 0, 1.0, 1.0, 0.0], [0, 1, 0, 0.0, 2.5, 0.0], [0, 1, 0, 0.5, 2.0, 0.0], [0, 3, 0, 0.0, 1.5, 0.0]]
#bins = [[0, 1.0, 0, 0, 1.0, 0.0], [0, 2, 2, 2.0, 1.0, 0.0], [2, 1, 0, 0.0, 2.5, 4.0], [0, 1, 0, 0.5, 2.0, 0.0], [2, 0, 2, 0.0, 1.5, 0.0]]
#bins_probs = [[self.normalize(x)] for x in bins]
bins_probs =[]
for bin in bins:
if sum(bin)==0:
continue
n = self.normalize(bin)
bins_probs.append([n])
#bins_entropy = [stats.entropy(x) for x in bins_probs]
#bins = self.normalize(bins[0])
#print ("entropy",bins_entropy)
#f = np.vectorize(self.logTransform, otypes=[np.float])
changesInBins=[]
for bin in bins_probs:
binchanges=[]
X = np.array(bin)
Xt = np.transpose(X)
P = np.dot(Xt,X)
#P = f(P)
b1b2_corr = P[0:3,3:6]
b1b2 = np.fliplr(b1b2_corr)
diagonals = b1b2.diagonal()
if (len(diagonals)==0):
continue
argmax_idx = np.argmax(diagonals)
#print (CHANGE_LABELS_BTW_BINS[argmax_idx]) #
binchanges.append(CHANGE_LABELS_BTW_BINS[argmax_idx])
#np.apply_along_axis(self.logTransform,1, P )
b1_corr = P[0:3,0:3]
b1 = np.fliplr(b1_corr)
diagonals = b1.diagonal()
argmax_idx = np.argmax(diagonals)
#print (CHANGE_LABELS_OF_BIN[argmax_idx])
binchanges.append(CHANGE_LABELS_OF_BIN[argmax_idx])
b2_corr = P[3:6,3:6]
b2 = np.fliplr(b2_corr)
diagonals = b2.diagonal()
argmax_idx = np.argmax(diagonals)
#print (CHANGE_LABELS_OF_BIN[argmax_idx])
binchanges.append(CHANGE_LABELS_OF_BIN[argmax_idx])
changesInBins.append(binchanges)
return changesInBins
def twoBinsProgHistData(self, chunkSize=6):
data = self.betaBernoulli3BinsRead(chunkSize=chunkSize)
changes = self.determineChangeBtwTwoBins(data["freqs"]) #frequencies
data["changes"]=changes
return data
def start(self):
#self.test0()
#self.testB01B025()
#self.argmaxInBinsTest()
#self.read(datacount=10)
#self.betaBernoulli3BinsRvsTest()
#self.betaBernoulli3BinsReadTest()
data = self.twoBinsProgHistData(dataCount=10, chunkSize=6)
print (data)
# [[bin_lowerbound, bin_upperbound, bin_popul_size]]
#definition of histogram : [ [0.2, 0.45, 10], [0.4, 1.0, 20] ]
#bpp = TwoBinsGausOrderingBetaParamProducer(hist=[ [0.2, 0.45, 10], [0.4, 1.0, 20] ])
#bpp.start()
| [
"numpy.fliplr",
"numpy.argmax",
"code.proghist.gausordering.TwoBinsGausingOrderBetaParamProducer.PHBin",
"numpy.array",
"numpy.dot",
"numpy.transpose"
] | [((5337, 5350), 'numpy.array', 'np.array', (['bin'], {}), '(bin)\n', (5345, 5350), True, 'import numpy as np\n'), ((5368, 5383), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (5380, 5383), True, 'import numpy as np\n'), ((5400, 5413), 'numpy.dot', 'np.dot', (['Xt', 'X'], {}), '(Xt, X)\n', (5406, 5413), True, 'import numpy as np\n'), ((5502, 5522), 'numpy.fliplr', 'np.fliplr', (['b1b2_corr'], {}), '(b1b2_corr)\n', (5511, 5522), True, 'import numpy as np\n'), ((5649, 5669), 'numpy.argmax', 'np.argmax', (['diagonals'], {}), '(diagonals)\n', (5658, 5669), True, 'import numpy as np\n'), ((5915, 5933), 'numpy.fliplr', 'np.fliplr', (['b1_corr'], {}), '(b1_corr)\n', (5924, 5933), True, 'import numpy as np\n'), ((5997, 6017), 'numpy.argmax', 'np.argmax', (['diagonals'], {}), '(diagonals)\n', (6006, 6017), True, 'import numpy as np\n'), ((6200, 6218), 'numpy.fliplr', 'np.fliplr', (['b2_corr'], {}), '(b2_corr)\n', (6209, 6218), True, 'import numpy as np\n'), ((6282, 6302), 'numpy.argmax', 'np.argmax', (['diagonals'], {}), '(diagonals)\n', (6291, 6302), True, 'import numpy as np\n'), ((1679, 1706), 'code.proghist.gausordering.TwoBinsGausingOrderBetaParamProducer.PHBin', 'PHBin', (['"""real"""', 'x1', 'x2', 'size'], {}), "('real', x1, x2, size)\n", (1684, 1706), False, 'from code.proghist.gausordering.TwoBinsGausingOrderBetaParamProducer import PHBin, PHGauss\n'), ((2417, 2433), 'numpy.argmax', 'np.argmax', (['tetas'], {}), '(tetas)\n', (2426, 2433), True, 'import numpy as np\n')] |
import sys, os
import logging
sys.path.append("/workspace/WPBERT/Shared_files/*")
logger = logging.getLogger(__name__)
os.environ["CUDA_VISIBLE_DEVICES"]=f"{sys.argv[2]}"
import joblib
import ResDAVEnetVQ.run_utils as RDV
import ResDAVEnetVQ.dataloaders.utils as utils
import torch
import torch.nn as nn
import numpy as np
from cpc_feature_reader_MARG import CpcFeatureReader
class embed():
token_pretrain_path = "/data/babymind/pretrained_models/ResDAVEnet323"
phone_pretrain_path = "/data/babymind/cpc_kmeans/cpc_recon_BCS_500k.pt"
kmeans_path = "/data/kmeans/kmeans300.bin"
audio_conf = {'window_stride': 0.01, 'use_raw_length': True}
def __init__(self, device):
self.device = device
self.token_encoder = RDV.load_audio_model_and_state(
exp_dir="/data/babymind/pretrained_models/ResDAVEnet323").to(self.device)
self.token_encoder.eval()
self.token_codebook = self.token_encoder.get_embedding(
'quant3').cpu().numpy()
np.random.seed(50)
self.cls = torch.from_numpy(np.random.uniform(
0, 5, size=256)).float().to(self.device)
self.sep = torch.from_numpy(np.random.uniform(
0, 5, size=256)).float().to(self.device)
self.phone_padding = 1024
self.phone_encoder = CpcFeatureReader(
checkpoint_path=self.phone_pretrain_path, layer=None, device=self.device)
self.kmeans_model = joblib.load(open(self.kmeans_path, "rb"))
self.kmeans_model.verbose = False
return
''' Compare using numpy
def search_codebook(self, codebook, key):
codebook_size = codebook.shape[0]
for i in range(0, codebook_size):
if np.array_equal(key.float(), codebook[i].float()):
return i
return -1
'''
'''
# Compare using mse
def search_codebook(self, codebook, key):
codebook_size = codebook.shape[0]
mean = 100000
mean_index = -1
for i in range(0, codebook_size):
sum = torch.((codebook[i] - key)**2)
if sum < mean:
mean = sum
mean_index = i
return mean_index
'''
'''
def search_codebook(self, codebook, key):
codebook_size = codebook.shape[0]
if torch.equal(self.cls, key): return 1024
elif torch.equal(self.sep, key): return 1025
for i in range(0, codebook_size):
if torch.allclose(key, codebook[i], 0, 1e-2):
return i
return -1
'''
def search_codebook(self, codebook, key):
codebook_size = codebook.shape[0]
if np.array_equal(self.cls, key): return 1024
elif np.array_equal(self.sep, key): return 1025
for i in range(0, codebook_size):
if np.allclose(key, codebook[i], 0, 1e-2):
return i
return -1
def forward(self, audio, file_path):
vq_layer = 'quant3'
# get token embedding
token_input, n_frame = utils.compute_spectrogram(
audio, 16000, self.audio_conf)
token_input = token_input.unsqueeze(0).to(self.device)
(_, q_out, preq_out, onehot) = self.token_encoder.get_vq_outputs(
token_input, vq_layer, True)
tmp = q_out.squeeze()
token_embedding = tmp.transpose(0, 1).to(self.device)
# get CPC feature
cpc_embedding = self.phone_encoder.get_feats(audio)
cpc_embedding = cpc_embedding.cpu().numpy()
# Quantize Using Kmeans
phone_embedding = self.kmeans_model.predict(cpc_embedding)
# Align CPC embedding to VQ3 embedding
length_diff = 4*token_embedding.size()[0] - phone_embedding.shape[0]
if length_diff == 3:
phone_embedding = np.insert(
phone_embedding, 0, phone_embedding[0], axis=0)
phone_embedding = np.append(phone_embedding, np.array(
[phone_embedding[-1], phone_embedding[-1]]))
elif length_diff == 2:
phone_embedding = np.insert(
phone_embedding, 0, phone_embedding[0], axis=0)
phone_embedding = np.append(phone_embedding, phone_embedding[-1])
elif length_diff == 1:
phone_embedding = np.append(phone_embedding, phone_embedding[-1])
elif length_diff == 4:
phone_embedding = np.insert(
phone_embedding, 0, phone_embedding[0], axis=0)
phone_embedding = np.insert(
phone_embedding, 0, phone_embedding[0], axis=0)
phone_embedding = np.append(phone_embedding, np.array(
[phone_embedding[-1], phone_embedding[-1]]))
if phone_embedding.shape[0] > token_embedding.size()[0]*4:
phone_embedding = phone_embedding[:token_embedding.size()[0]*4]
if phone_embedding.shape[0] != token_embedding.size()[0]*4:
print(token_embedding.shape)
print(phone_embedding.shape)
print(file_path)
logger.log(logging.WARNING, file_path)
return np.array([0]), np.array([0]), np.array([0]), np.array([0])
# merge
token_result = [self.cls]
phone_result = [self.phone_padding]
start_ids = [0]
end_ids = [0]
result_entry_phone = [phone_embedding[0]]
result_entry_token = token_embedding[0]
for i in range(0, len(token_embedding)):
if not torch.equal(result_entry_token,token_embedding[i]):
token_result.append(result_entry_token)
result_entry_token = token_embedding[i]
start_ids.append(len(phone_result))
phone_result.extend(result_entry_phone)
end_ids.append(len(phone_result)-1)
result_entry_phone = []
result_entry_phone.append(phone_embedding[4*i])
for j in range(1, 4):
if result_entry_phone[-1]!=phone_embedding[4*i+j]:
result_entry_phone.append(phone_embedding[4*i+j])
else:
for j in range(0, 4):
if i == 0 and j == 0:
continue
if result_entry_phone[-1]!=phone_embedding[4*i+j]:
result_entry_phone.append(phone_embedding[4*i+j])
# for last entry
token_result.append(result_entry_token)
start_ids.append(len(phone_result))
phone_result.extend(result_entry_phone)
end_ids.append(len(phone_result)-1)
# for [SEP]
token_result.append(self.sep)
start_ids.append(len(phone_result))
phone_result.append(self.phone_padding)
end_ids.append(len(phone_result)-1)
input_embed = torch.stack(token_result).cpu().numpy()
char_embed = np.array(phone_result)
start_ids = np.array(start_ids)
end_ids = np.array(end_ids)
input_integer = []
for i in range(0, input_embed.shape[0]):
if i==0: input_integer.append(1024)
elif i==input_embed.shape[0]-1: input_integer.append(1025)
else:
input_integer.append(self.search_codebook(
self.token_codebook, input_embed[i]))
'''
# for debugging
print(self.cls)
print(self.sep)
print(input_embed)
print(char_embed)
print(start_ids)
print(end_ids)
print(input_embed.shape)
print(char_embed.shape)
print(start_ids.shape)
print(end_ids.shape)
'''
return np.array(input_integer), char_embed, start_ids, end_ids
if __name__ == "__main__":
# readpath = f"/data/babymind/LibriSpeech_filenames_0.txt"
readpath = f"/data/babymind/LibriSpeech_filenames_{sys.argv[1]}.txt"
savepath = "/data/babymind/LibriSpeech_cpc_embed"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#device = torch.device(f"cuda:{sys.argv[2]}" if torch.cuda.is_available() else "cpu")
embedding = embed(device = device)
cnt = 0
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
log_path = savepath+'/log.txt'
file_handler = logging.FileHandler(log_path)
logger.addHandler(file_handler)
read = open(readpath, 'r')
while True:
file_path = read.readline()
if not file_path: break
file_path = file_path.replace("\n", "")
audiofile = np.load(file_path)
audio = audiofile['audio']
# input_embed, char_embed, start_ids, end_ids = embedding.forward(audio)
input_embed, char_embed, start_ids, end_ids = embedding.forward(audio, file_path)
list_to_make_new_path = file_path.split('/')
new_dir = ''
for dir in list_to_make_new_path:
if dir.find(".npz") != -1:
continue
if dir == '':
continue
new_dir = new_dir + '/' + dir
new_dir = new_dir.replace(
'LibriSpeech', 'LibriSpeech_cpc_embed')
if not os.path.exists(new_dir):
os.makedirs(new_dir)
new_file_name = new_dir + '/' + list_to_make_new_path[-1]
np.savez(new_file_name, input_embed=input_embed,
char_embed=char_embed, start_ids=start_ids, end_ids=end_ids)
cnt += 1
if cnt % 2000 == 0:
logger.info("Read %d files", cnt)
| [
"logging.getLogger",
"numpy.array",
"torch.cuda.is_available",
"cpc_feature_reader_MARG.CpcFeatureReader",
"sys.path.append",
"os.path.exists",
"numpy.savez",
"ResDAVEnetVQ.dataloaders.utils.compute_spectrogram",
"logging.FileHandler",
"numpy.random.seed",
"ResDAVEnetVQ.run_utils.load_audio_mode... | [((31, 82), 'sys.path.append', 'sys.path.append', (['"""/workspace/WPBERT/Shared_files/*"""'], {}), "('/workspace/WPBERT/Shared_files/*')\n", (46, 82), False, 'import sys, os\n'), ((92, 119), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (109, 119), False, 'import logging\n'), ((8114, 8257), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (8133, 8257), False, 'import logging\n'), ((8350, 8379), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (8369, 8379), False, 'import logging\n'), ((1021, 1039), 'numpy.random.seed', 'np.random.seed', (['(50)'], {}), '(50)\n', (1035, 1039), True, 'import numpy as np\n'), ((1321, 1415), 'cpc_feature_reader_MARG.CpcFeatureReader', 'CpcFeatureReader', ([], {'checkpoint_path': 'self.phone_pretrain_path', 'layer': 'None', 'device': 'self.device'}), '(checkpoint_path=self.phone_pretrain_path, layer=None,\n device=self.device)\n', (1337, 1415), False, 'from cpc_feature_reader_MARG import CpcFeatureReader\n'), ((2665, 2694), 'numpy.array_equal', 'np.array_equal', (['self.cls', 'key'], {}), '(self.cls, key)\n', (2679, 2694), True, 'import numpy as np\n'), ((3038, 3094), 'ResDAVEnetVQ.dataloaders.utils.compute_spectrogram', 'utils.compute_spectrogram', (['audio', '(16000)', 'self.audio_conf'], {}), '(audio, 16000, self.audio_conf)\n', (3063, 3094), True, 'import ResDAVEnetVQ.dataloaders.utils as utils\n'), ((6847, 6869), 'numpy.array', 'np.array', (['phone_result'], {}), '(phone_result)\n', (6855, 6869), True, 'import numpy as np\n'), ((6890, 6909), 'numpy.array', 'np.array', (['start_ids'], {}), '(start_ids)\n', (6898, 6909), True, 'import numpy as np\n'), ((6928, 6945), 'numpy.array', 'np.array', (['end_ids'], {}), '(end_ids)\n', (6936, 6945), True, 'import numpy as np\n'), ((8602, 8620), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (8609, 8620), True, 'import numpy as np\n'), ((9340, 9453), 'numpy.savez', 'np.savez', (['new_file_name'], {'input_embed': 'input_embed', 'char_embed': 'char_embed', 'start_ids': 'start_ids', 'end_ids': 'end_ids'}), '(new_file_name, input_embed=input_embed, char_embed=char_embed,\n start_ids=start_ids, end_ids=end_ids)\n', (9348, 9453), True, 'import numpy as np\n'), ((2721, 2750), 'numpy.array_equal', 'np.array_equal', (['self.sep', 'key'], {}), '(self.sep, key)\n', (2735, 2750), True, 'import numpy as np\n'), ((2822, 2860), 'numpy.allclose', 'np.allclose', (['key', 'codebook[i]', '(0)', '(0.01)'], {}), '(key, codebook[i], 0, 0.01)\n', (2833, 2860), True, 'import numpy as np\n'), ((3802, 3859), 'numpy.insert', 'np.insert', (['phone_embedding', '(0)', 'phone_embedding[0]'], {'axis': '(0)'}), '(phone_embedding, 0, phone_embedding[0], axis=0)\n', (3811, 3859), True, 'import numpy as np\n'), ((7617, 7640), 'numpy.array', 'np.array', (['input_integer'], {}), '(input_integer)\n', (7625, 7640), True, 'import numpy as np\n'), ((7930, 7955), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7953, 7955), False, 'import torch\n'), ((9206, 9229), 'os.path.exists', 'os.path.exists', (['new_dir'], {}), '(new_dir)\n', (9220, 9229), False, 'import sys, os\n'), ((9243, 9263), 'os.makedirs', 'os.makedirs', (['new_dir'], {}), '(new_dir)\n', (9254, 9263), False, 'import sys, os\n'), ((760, 853), 'ResDAVEnetVQ.run_utils.load_audio_model_and_state', 'RDV.load_audio_model_and_state', ([], {'exp_dir': '"""/data/babymind/pretrained_models/ResDAVEnet323"""'}), "(exp_dir=\n '/data/babymind/pretrained_models/ResDAVEnet323')\n", (790, 853), True, 'import ResDAVEnetVQ.run_utils as RDV\n'), ((3934, 3986), 'numpy.array', 'np.array', (['[phone_embedding[-1], phone_embedding[-1]]'], {}), '([phone_embedding[-1], phone_embedding[-1]])\n', (3942, 3986), True, 'import numpy as np\n'), ((4066, 4123), 'numpy.insert', 'np.insert', (['phone_embedding', '(0)', 'phone_embedding[0]'], {'axis': '(0)'}), '(phone_embedding, 0, phone_embedding[0], axis=0)\n', (4075, 4123), True, 'import numpy as np\n'), ((4171, 4218), 'numpy.append', 'np.append', (['phone_embedding', 'phone_embedding[-1]'], {}), '(phone_embedding, phone_embedding[-1])\n', (4180, 4218), True, 'import numpy as np\n'), ((5091, 5104), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5099, 5104), True, 'import numpy as np\n'), ((5106, 5119), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5114, 5119), True, 'import numpy as np\n'), ((5121, 5134), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5129, 5134), True, 'import numpy as np\n'), ((5136, 5149), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5144, 5149), True, 'import numpy as np\n'), ((5460, 5511), 'torch.equal', 'torch.equal', (['result_entry_token', 'token_embedding[i]'], {}), '(result_entry_token, token_embedding[i])\n', (5471, 5511), False, 'import torch\n'), ((4280, 4327), 'numpy.append', 'np.append', (['phone_embedding', 'phone_embedding[-1]'], {}), '(phone_embedding, phone_embedding[-1])\n', (4289, 4327), True, 'import numpy as np\n'), ((4389, 4446), 'numpy.insert', 'np.insert', (['phone_embedding', '(0)', 'phone_embedding[0]'], {'axis': '(0)'}), '(phone_embedding, 0, phone_embedding[0], axis=0)\n', (4398, 4446), True, 'import numpy as np\n'), ((4494, 4551), 'numpy.insert', 'np.insert', (['phone_embedding', '(0)', 'phone_embedding[0]'], {'axis': '(0)'}), '(phone_embedding, 0, phone_embedding[0], axis=0)\n', (4503, 4551), True, 'import numpy as np\n'), ((6786, 6811), 'torch.stack', 'torch.stack', (['token_result'], {}), '(token_result)\n', (6797, 6811), False, 'import torch\n'), ((1076, 1109), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(5)'], {'size': '(256)'}), '(0, 5, size=256)\n', (1093, 1109), True, 'import numpy as np\n'), ((1184, 1217), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(5)'], {'size': '(256)'}), '(0, 5, size=256)\n', (1201, 1217), True, 'import numpy as np\n'), ((4626, 4678), 'numpy.array', 'np.array', (['[phone_embedding[-1], phone_embedding[-1]]'], {}), '([phone_embedding[-1], phone_embedding[-1]])\n', (4634, 4678), True, 'import numpy as np\n')] |
import numpy as np
from annoy import AnnoyIndex
import os
import sys
index_filename = sys.argv[1]
vector_filename = sys.argv[2]
image_filename = sys.argv[3]
with open(image_filename) as fp:
images = [x.strip() for x in fp.readlines()]
vectors = np.load(vector_filename, mmap_mode='r')
dim = vectors.shape[1]
index = AnnoyIndex(dim, metric='dot')
index.load(index_filename)
n = 10
threshold = 0.95
for target in range(len(vectors)):
indices, scores = index.get_nns_by_item(target, n, include_distances=True)
for i, score in zip(indices, scores):
if i != target and score > threshold:
print(images[target], images[i], score)
| [
"numpy.load",
"annoy.AnnoyIndex"
] | [((252, 291), 'numpy.load', 'np.load', (['vector_filename'], {'mmap_mode': '"""r"""'}), "(vector_filename, mmap_mode='r')\n", (259, 291), True, 'import numpy as np\n'), ((324, 353), 'annoy.AnnoyIndex', 'AnnoyIndex', (['dim'], {'metric': '"""dot"""'}), "(dim, metric='dot')\n", (334, 353), False, 'from annoy import AnnoyIndex\n')] |
from __future__ import annotations
import numpy
import numpy as np
from numpy.linalg import inv, det, slogdet
class UnivariateGaussian:
"""
Class for univariate Gaussian Distribution Estimator
"""
def __init__(self, biased_var: bool = False) -> UnivariateGaussian:
"""
Estimator for univariate Gaussian mean and variance parameters
Parameters
----------
biased_var : bool, default=False
Should fitted estimator of variance be a biased or unbiased estimator
Attributes
----------
fitted_ : bool
Initialized as false indicating current estimator instance has not been fitted.
To be set as True in `UnivariateGaussian.fit` function.
mu_: float
Estimated expectation initialized as None. To be set in `UnivariateGaussian.fit`
function.
var_: float
Estimated variance initialized as None. To be set in `UnivariateGaussian.fit`
function.
"""
self.biased_ = biased_var
self.fitted_, self.mu_, self.var_ = False, None, None
def fit(self, X: np.ndarray) -> UnivariateGaussian:
"""
Estimate Gaussian expectation and variance from given samples
Parameters
----------
X: ndarray of shape (n_samples, )
Training data
Returns
-------
self : returns an instance of self.
Notes
-----
Sets `self.mu_`, `self.var_` attributes according to calculated estimation (where
estimator is either biased or unbiased). Then sets `self.fitted_` attribute to `True`
"""
self.mu_ = np.mean(X)
self.var_ = X.var()
self.fitted_ = True
return self
def pdf(self, X: np.ndarray) -> np.ndarray:
"""
Calculate PDF of observations under Gaussian model with fitted estimators
Parameters
----------
X: ndarray of shape (n_samples, )
Samples to calculate PDF for
Returns
-------
pdfs: ndarray of shape (n_samples, )
Calculated values of given samples for PDF function of N(mu_, var_)
Raises
------
ValueError: In case function was called prior fitting the model
"""
if not self.fitted_:
raise ValueError("Estimator must first be fitted before calling `pdf` function")
return np.array([(1/(np.sqrt(2*np.pi*(self.var_**2)))) * np.exp(-0.5*((x-self.mu_)/self.var_)**2) for x in X])
@staticmethod
def log_likelihood(mu: float, sigma: float, X: np.ndarray) -> float:
"""
Calculate the log-likelihood of the data under a specified Gaussian model
Parameters
----------
mu : float
Expectation of Gaussian
sigma : float
Variance of Gaussian
X : ndarray of shape (n_samples, )
Samples to calculate log-likelihood with
Returns
-------
log_likelihood: float
log-likelihood calculated
"""
var_param = 0
for x in X:
var_param += x
return -(1/len(X))*var_param
class MultivariateGaussian:
"""
Class for multivariate Gaussian Distribution Estimator
"""
def __init__(self):
"""
Initialize an instance of multivariate Gaussian estimator
Attributes
----------
fitted_ : bool
Initialized as false indicating current estimator instance has not been fitted.
To be set as True in `MultivariateGaussian.fit` function.
mu_: ndarray of shape (n_features,)
Estimated expectation initialized as None. To be set in `MultivariateGaussian.fit`
function.
cov_: ndarray of shape (n_features, n_features)
Estimated covariance initialized as None. To be set in `MultivariateGaussian.fit`
function.
"""
self.mu_, self.cov_ = None, None
self.fitted_ = False
def fit(self, X: np.ndarray) -> MultivariateGaussian:
"""
Estimate Gaussian expectation and covariance from given samples
Parameters
----------
X: ndarray of shape (n_samples, n_features)
Training data
Returns
-------
self : returns an instance of self
Notes
-----
Sets `self.mu_`, `self.cov_` attributes according to calculated estimation.
Then sets `self.fitted_` attribute to `True`
"""
self.mu_ = np.mean(X,axis=0)
self.cov_ = np.cov(X,None,rowvar=False)
self.fitted_ = True
return self
def pdf(self, X: np.ndarray):
"""
Calculate PDF of observations under Gaussian model with fitted estimators
Parameters
----------
X: ndarray of shape (n_samples, n_features)
Samples to calculate PDF for
Returns
-------
pdfs: ndarray of shape (n_samples, )
Calculated values of given samples for PDF function of N(mu_, cov_)
Raises
------
ValueError: In case function was called prior fitting the model
"""
if not self.fitted_:
raise ValueError("Estimator must first be fitted before calling `pdf` function")
# pdf of every sample in an np array
PDFs = []
sigma_inverse = np.linalg.inv(self.cov_)
for x in X:
sample_t = np.array(x-self.mu_).reshape(1, len(np.array(x)))
sample = np.array(x)
sample = sample.reshape(len(sample),1)
val = sample_t.dot(sigma_inverse).dot(sample)[0]
PDFs.append((np.linalg.det(2*np.pi*self.cov_)**-0.5) * np.exp(val[0]))
return PDFs
@staticmethod
def log_likelihood(mu: np.ndarray, cov: np.ndarray, X: np.ndarray) -> float:
"""
Calculate the log-likelihood of the data under a specified Gaussian model
Parameters
----------
mu : ndarray of shape (n_features,)
Expectation of Gaussian
cov : ndarray of shape (n_features, n_features)
covariance matrix of Gaussian
X : ndarray of shape (n_samples, n_features)
Samples to calculate log-likelihood with
Returns
-------
log_likelihood: float
log-likelihood calculated over all input data and under given parameters of Gaussian
"""
no_of_vars = X.shape[1]
no_of_samples = len(X)
sample_sum = 0
ss = np.sum(X @ cov * X)
# for x in X:
# sample_sum += np.transpose(x-mu).dot(np.linalg.inv(cov)).dot(x-mu)
return -0.5*(no_of_samples*no_of_vars)*np.log(2*np.pi)-0.5*np.log(np.linalg.det(cov))-0.5*ss | [
"numpy.mean",
"numpy.sqrt",
"numpy.log",
"numpy.linalg.det",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.linalg.inv",
"numpy.cov"
] | [((1694, 1704), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (1701, 1704), True, 'import numpy as np\n'), ((4590, 4608), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4597, 4608), True, 'import numpy as np\n'), ((4628, 4657), 'numpy.cov', 'np.cov', (['X', 'None'], {'rowvar': '(False)'}), '(X, None, rowvar=False)\n', (4634, 4657), True, 'import numpy as np\n'), ((5447, 5471), 'numpy.linalg.inv', 'np.linalg.inv', (['self.cov_'], {}), '(self.cov_)\n', (5460, 5471), True, 'import numpy as np\n'), ((6601, 6620), 'numpy.sum', 'np.sum', (['(X @ cov * X)'], {}), '(X @ cov * X)\n', (6607, 6620), True, 'import numpy as np\n'), ((5586, 5597), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5594, 5597), True, 'import numpy as np\n'), ((2506, 2554), 'numpy.exp', 'np.exp', (['(-0.5 * ((x - self.mu_) / self.var_) ** 2)'], {}), '(-0.5 * ((x - self.mu_) / self.var_) ** 2)\n', (2512, 2554), True, 'import numpy as np\n'), ((5515, 5537), 'numpy.array', 'np.array', (['(x - self.mu_)'], {}), '(x - self.mu_)\n', (5523, 5537), True, 'import numpy as np\n'), ((5551, 5562), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5559, 5562), True, 'import numpy as np\n'), ((5777, 5791), 'numpy.exp', 'np.exp', (['val[0]'], {}), '(val[0])\n', (5783, 5791), True, 'import numpy as np\n'), ((6771, 6788), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (6777, 6788), True, 'import numpy as np\n'), ((2470, 2505), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * self.var_ ** 2)'], {}), '(2 * np.pi * self.var_ ** 2)\n', (2477, 2505), True, 'import numpy as np\n'), ((5735, 5771), 'numpy.linalg.det', 'np.linalg.det', (['(2 * np.pi * self.cov_)'], {}), '(2 * np.pi * self.cov_)\n', (5748, 5771), True, 'import numpy as np\n'), ((6798, 6816), 'numpy.linalg.det', 'np.linalg.det', (['cov'], {}), '(cov)\n', (6811, 6816), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Author: tom-hydrogen
# @Date: 2018-03-07 15:18:34
# @Last Modified by: tom-hydrogen
# @Last Modified time: 2018-03-09 16:52:07
from copy import deepcopy
from itertools import product
import numpy as np
from .core import BaseSampler
class GridSampler(BaseSampler):
"""Grid optimization sampler
Sample next location based on grid sampling
Parameters
----------
space: list(dict)
Define search space. Each element has to the following key
values: 'name', 'type', and 'domain' (,'num_grid' is optional).
init_X: array-like(float), shape=(n_samples, n_dim)
The list of parameters to initizlie sampler
init_y: array-like(float), shape(n_samples,)
The list of score of init_X
num_grid: int, optional
The default number of grid
"""
sampler_name = "grid"
def __init__(self, space, init_X=None, init_y=None, num_grid=None,
*args, **kwargs):
super(GridSampler, self).__init__(space, init_X, init_y)
self.index = 0
domains = []
indices = []
_params_conf = deepcopy(self.params_conf)
# Set default grid
for i, conf in enumerate(_params_conf):
# Set default grid value
if "num_grid" not in conf and num_grid is not None:
if len(conf["domain"]) == 2:
conf["num_grid"] = num_grid
# Configure domain
domain = conf["domain"]
if conf["type"] in ["continuous", "integer"]:
if "num_grid" in conf:
scale = conf.get("scale", None)
if scale == 'log':
domain = np.logspace(np.log10(domain[0]),
np.log10(domain[1]),
conf["num_grid"])
else:
domain = np.linspace(domain[0],
domain[1],
conf["num_grid"])
if conf["type"] == "integer":
domain = domain.astype(int)
else:
domain = tuple(domain)
elif conf["type"] == "fixed":
domain = (domain,)
else:
domain = tuple(domain)
domains.append(list(domain))
indices.append(i)
# Sample parameters from parameters stored in self.params_list
patterns = product(*domains)
self.params_list = []
for params_val in patterns:
params_dict = dict()
for i, idx in enumerate(indices):
conf = _params_conf[idx]
params_dict[conf["name"]] = params_val[i]
self.params_list.append(params_dict)
def sample(self, num_samples=1, *args, **kwargs):
"""Sample next location to evaluate based on grid.
Everytime this function is called, it samples points not sampled yet.
Parameters
---------
num_samples: int
The number of samples
Returns
-------
Xs: list(dict), length is num_samples
"""
Xs = []
for i in range(num_samples):
x = self.params_list[self.index]
Xs.append(x)
self.index += 1
self.index = self.index % len(self.params_list)
return Xs
| [
"numpy.linspace",
"itertools.product",
"numpy.log10",
"copy.deepcopy"
] | [((1123, 1149), 'copy.deepcopy', 'deepcopy', (['self.params_conf'], {}), '(self.params_conf)\n', (1131, 1149), False, 'from copy import deepcopy\n'), ((2533, 2550), 'itertools.product', 'product', (['*domains'], {}), '(*domains)\n', (2540, 2550), False, 'from itertools import product\n'), ((1928, 1979), 'numpy.linspace', 'np.linspace', (['domain[0]', 'domain[1]', "conf['num_grid']"], {}), "(domain[0], domain[1], conf['num_grid'])\n", (1939, 1979), True, 'import numpy as np\n'), ((1719, 1738), 'numpy.log10', 'np.log10', (['domain[0]'], {}), '(domain[0])\n', (1727, 1738), True, 'import numpy as np\n'), ((1785, 1804), 'numpy.log10', 'np.log10', (['domain[1]'], {}), '(domain[1])\n', (1793, 1804), True, 'import numpy as np\n')] |
"""Tests for the TimeChangedBrownianMotion class."""
import numpy as np
import pytest
from processes.processes import TimeChangedBrownianMotion
@pytest.fixture(scope="module")
def process():
return TimeChangedBrownianMotion(time_change=lambda t: np.tanh(t))
@pytest.mark.parametrize("time_change", [1, "a", lambda x, t: x])
def test_init_errors(time_change):
if callable(time_change):
with pytest.raises(ValueError):
TimeChangedBrownianMotion(time_change=time_change)
else:
with pytest.raises(TypeError):
TimeChangedBrownianMotion(time_change=time_change)
def test_post_init_modification(process):
with pytest.raises(ValueError):
process.time_change = lambda x, t: x
def test_sample(process, T, n_time_grid, n_paths):
x0 = 1
paths = process.sample(T=T, n_time_grid=n_time_grid, x0=x0, n_paths=n_paths)
assert isinstance(paths, np.ndarray)
if n_paths == 1:
assert paths.shape == (n_time_grid,)
assert paths[0] == x0
else:
assert paths.shape == (n_paths, n_time_grid)
assert np.all(paths[:, 0] == x0)
@pytest.mark.parametrize(
"time_change", [lambda t: np.exp(-t), lambda t: -np.exp(-t)]
)
def test_invalid_time_change(process, time_change, n_time_grid):
process.time_change = time_change
with pytest.raises(ValueError):
process.sample(T=1, n_time_grid=n_time_grid, x0=1, n_paths=1)
| [
"processes.processes.TimeChangedBrownianMotion",
"numpy.tanh",
"numpy.exp",
"pytest.mark.parametrize",
"pytest.raises",
"pytest.fixture",
"numpy.all"
] | [((150, 180), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (164, 180), False, 'import pytest\n'), ((270, 334), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""time_change"""', "[1, 'a', lambda x, t: x]"], {}), "('time_change', [1, 'a', lambda x, t: x])\n", (293, 334), False, 'import pytest\n'), ((668, 693), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (681, 693), False, 'import pytest\n'), ((1100, 1125), 'numpy.all', 'np.all', (['(paths[:, 0] == x0)'], {}), '(paths[:, 0] == x0)\n', (1106, 1125), True, 'import numpy as np\n'), ((1333, 1358), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1346, 1358), False, 'import pytest\n'), ((413, 438), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (426, 438), False, 'import pytest\n'), ((452, 502), 'processes.processes.TimeChangedBrownianMotion', 'TimeChangedBrownianMotion', ([], {'time_change': 'time_change'}), '(time_change=time_change)\n', (477, 502), False, 'from processes.processes import TimeChangedBrownianMotion\n'), ((526, 550), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (539, 550), False, 'import pytest\n'), ((564, 614), 'processes.processes.TimeChangedBrownianMotion', 'TimeChangedBrownianMotion', ([], {'time_change': 'time_change'}), '(time_change=time_change)\n', (589, 614), False, 'from processes.processes import TimeChangedBrownianMotion\n'), ((1184, 1194), 'numpy.exp', 'np.exp', (['(-t)'], {}), '(-t)\n', (1190, 1194), True, 'import numpy as np\n'), ((255, 265), 'numpy.tanh', 'np.tanh', (['t'], {}), '(t)\n', (262, 265), True, 'import numpy as np\n'), ((1207, 1217), 'numpy.exp', 'np.exp', (['(-t)'], {}), '(-t)\n', (1213, 1217), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from collections import Counter
from itertools import combinations
'''
these functions are the general functions used by all information gain ratio
functions
'''
def is_categorical(x):
'''
INPUT
- single data point x
OUTPUT
- boolean
returns true if x is categorical else false
'''
return isinstance(x, str) or isinstance(x, bool) or isinstance(x, unicode)
def check_if_categorical(attribute, df):
'''
INPUT:
- attribute: the feature inside the dataframe to check
- df: the DataFrame itself
OUTPUT:
- boolean
Returns True if feature in df is categorical else False
'''
check_if_categorical = np.vectorize(is_categorical)
if np.mean(check_if_categorical(df[attribute].values)) == 1:
return True
else:
return False
def entropy(y):
'''
INPUT:
- y: 1d numpy array
OUTPUT:
- float
Return the entropy of the array y.
'''
unique = set(y)
count = Counter(y)
ent = 0
for val in unique:
p = count[val]/float(len(y))
ent += p * np.log2(p)
return -1 * ent
def information_gain(y, y1, y2, impurity_criterion):
'''
INPUT:
- y: 1d numpy array
- y1: 1d numpy array (labels for subset 1)
- y2: 1d numpy array (labels for subset 2)
OUTPUT:
- float
Return the information gain of making the given split.
'''
return impurity_criterion(y) - \
(float(len(y1))/len(y) * impurity_criterion(y1) +
float(len(y2))/len(y) * impurity_criterion(y2))
'''
these are the helper functions for the continuous version of information gain
ratio
'''
def multiple_information_gain(y, y_list, impurity_criterion):
'''
INPUT:
- y: 1d numpy array
- y_list: list of y values [y1, y2, y3]
- impurity_criterion: either gini or entropy
OUTPUT:
- float
Return the information gain of making the given split.
'''
aggregate_entropy = 0
for y_vals in y_list:
aggregate_entropy += float(len(y_vals))/len(y) * \
impurity_criterion(y_vals)
return impurity_criterion(y) - aggregate_entropy
def determine_optimal_continuous_split_values(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- max_split: tuple of best values to split on
- info_gain_array: numpy array of all information gains
- possible_splits: list of all possible split values
Returns tuple of split values that optimize information gain (min 1 max 3)
'''
attribute_value_array = df[attribute].values
split_values = np.unique(sorted(attribute_value_array))[:-1]
possible_splits = list(combinations(split_values, 1))
max_info_gain = 0
for split in possible_splits:
X_list, y_list = make_multiple_split(attribute_value_array, y, split)
if multiple_information_gain(y, y_list, entropy) > max_info_gain:
max_info_gain = multiple_information_gain(y, y_list, entropy)
max_split = split
return max_split
def determine_optimal_continuous_split_values(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- max_split: tuple of best values to split on
Returns tuple of split values that optimize information gain (min 1 max 3)
'''
attribute_value_array = df[attribute].values
split_values = np.unique(sorted(attribute_value_array))[:-1]
# possible_splits = list(combinations(split_values, 1))
max_info_gain = 0
for split in combinations(split_values, 1):
X_list, y_list = make_multiple_split(attribute_value_array, y, split)
if multiple_information_gain(y, y_list, entropy) > max_info_gain:
max_info_gain = multiple_information_gain(y, y_list, entropy)
max_split = split
return max_split
def split_list(doc_list, n_groups):
'''
INPUT
- doc_list - is a list of documents to be split up
- n_groups - is the number of groups to split the doc_list into
OUTPUT
- list
Returns a list of len n_groups which seeks to evenly split up the original
list into continuous sub_lists
'''
avg = len(doc_list) / float(n_groups)
split_lists = []
last = 0.0
while last < len(doc_list):
split_lists.append(doc_list[int(last):int(last + avg)])
last += avg
return split_lists
def potential_attribute_information_gain_continuous(X_list):
'''
INPUT
- X_list: list of optimally split attribute values
OUTPUT
- float
Returns the potential information gain for a continuous split variable
using ross quinlan's information gain ratio formula in C4.5
'''
potential_information_gain = 0
n_X = sum([len(subset_of_X) for subset_of_X in X_list])
for X_values in X_list:
subset_ratio = float(len(X_values))/n_X
potential_information_gain += subset_ratio * np.log2(subset_ratio)
return -1 * potential_information_gain
def make_multiple_split(X, y, split_value):
'''
INPUT:
- X: 2d numpy array
- y: 1d numpy array
- split_value: single integers or tuples
OUTPUT:
- X1: 2d numpy array (feature matrix for subset 1)
- X2: 2d numpy array (feature matrix for subset 2)
- X3: 2d numpy array (feature matrix for subset 3)
- X4: 2d numpy array (feature matrix for subset 4)
- y1: 1d numpy array (labels for subset 1)
- y2: 1d numpy array (labels for subset 2)
- y3: 1d numpy array (labels for subset 3)
- y4: 1d numpy array (labels for subset 4)
Return the multiple subsets of the dataset achieved by the given feature
and value to split on. --> two lists (one for X, one for y)
'''
if len(split_value) == 1:
split_value = split_value[0]
X1 = X[X <= split_value]
y1 = y[X <= split_value]
X2 = X[X > split_value]
y2 = y[X > split_value]
return [X1, X2], [y1, y2]
if len(split_value) == 2:
lower, upper = split_value
X1 = X[X <= lower]
y1 = y[X <= lower]
X2 = X[(X > lower) & (X <= upper)]
y2 = y[(X > lower) & (X <= upper)]
X3 = X[X > upper]
y3 = y[X > upper]
return [X1, X2, X3], [y1, y2, y3]
if len(split_value) == 3:
lower, mid, upper = split_value
X1 = X[X <= lower]
y1 = y[X <= lower]
X2 = X[(X > lower) & (X <= mid)]
y2 = y[(X > lower) & (X <= mid)]
X3 = X[(X > mid) & (X <= upper)]
y3 = y[(X > mid) & (X <= upper)]
X4 = X[X > upper]
y4 = y[X > upper]
return [X1, X2, X3, X4], [y1, y2, y3, y4]
def information_gain_ratio_continuous(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- float
Returns the information gain ratio accdg to Quinlan's C4.5
'''
max_split = determine_optimal_continuous_split_values(attribute, df, y)
X_list, y_list = make_multiple_split(df[attribute].values, y, max_split)
ig = multiple_information_gain(y, y_list, entropy)
pig = potential_attribute_information_gain_continuous(X_list)
return ig/pig
'''
these functions below compute for information gain ratio for continuous
variables and work in numpy, thus could potentially be much faster than
the pandas version
'''
def information_gain_ratio_continuous_1d(X, y):
'''
INPUT
- X: continuous feature, 1d array
- y: 1d array, target
OUTPUT
- float
Returns the information gain ratio accdg to Quinlan's C4.5
'''
max_split = determine_optimal_continuous_split_values_1d(X, y)
X_list, y_list = make_multiple_split(X, y, max_split)
ig = multiple_information_gain(y, y_list, entropy)
pig = potential_attribute_information_gain_continuous(X_list)
return ig/pig
def determine_optimal_continuous_split_values_1d(X, y):
'''
INPUT
- X: continuous feature, 1d array
- y: 1d array, target
OUTPUT
- max_split: tuple of best values to split on
Returns tuple of split values that optimize information gain (min 1 max 3)
'''
attribute_value_array = X
split_values = np.unique(sorted(attribute_value_array))[:-1]
max_info_gain = 0
for split in combinations(split_values, 1):
X_list, y_list = make_multiple_split(attribute_value_array, y, split)
if multiple_information_gain(y, y_list, entropy) > max_info_gain:
max_info_gain = multiple_information_gain(y, y_list, entropy)
max_split = split
return max_split
'''
these are the categorical functions that work 100 percent correctly accdg to
ross quinlan's information gain ratio formulas from C4.5
'''
def information_gain_by_attribute_categorical(attribute, df, y):
'''
INPUT
- attribute: string, column in the dataframe that IS categorical
- df: dataframe of features
- y: 1d array of targets
OUTPUT
- float
Return the information gain for a specific attribute
'''
attribute_value_array = df[attribute].values
possible_attribute_values = np.unique(attribute_value_array)
attribute_info_gain = 0
numerator_values = Counter(attribute_value_array)
for possible_attribute_value in possible_attribute_values:
value_info_gain = 0
subset_of_y_values = \
y[attribute_value_array == possible_attribute_value]
y_outcomes = np.unique(subset_of_y_values)
for y_outcome in y_outcomes:
y_num_value = len(subset_of_y_values
[subset_of_y_values == y_outcome])
value_info_gain += \
float(y_num_value)/len(subset_of_y_values) \
* np.log2(float(y_num_value)/len(subset_of_y_values))
attribute_info_gain += \
float(numerator_values[possible_attribute_value])/len(y) * \
-1 * value_info_gain
return entropy(y) - attribute_info_gain
def potential_information_by_attribute_categorical(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- float
Returns the potential information gain accdg to Quinlan's C4.5
'''
attribute_value_array = df[attribute].values
possible_attribute_values = np.unique(attribute_value_array)
potential_information = 0
for possible_attribute_value in possible_attribute_values:
subset_of_y = y[attribute_value_array == possible_attribute_value]
potential_information += \
(float(len(subset_of_y))/len(y)) \
* np.log2(float(len(subset_of_y))/len(y))
return -1 * potential_information
def information_gain_ratio_categorical(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- float
Returns the information gain ratio accdg to Quinlan's C4.5
'''
information_gain = \
information_gain_by_attribute_categorical(attribute, df, y)
potential_information = \
potential_information_by_attribute_categorical(attribute, df, y)
return float(information_gain)/potential_information
'''
this function computes for information gain ratio, checks first if it is
categorical or continuous, and then calls the appropriate functions
currently works for dataframes only
'''
def information_gain_ratio(attribute, df, y):
'''
INPUT
- attribute: str, feature to check
- df: pandas dataframe of features
- y: 1d array, target
OUTPUT
- float
Returns the information gain ratio accdg to Quinlan's C4.5, and checks
if the feature is continuous or categorical so as to appropriately
compute for the information gain ratio
'''
if check_if_categorical(attribute, df):
return information_gain_ratio_categorical(attribute, df, y)
else:
return information_gain_ratio_continuous(attribute, df, y)
'''
these functions load toy data to test the functions on
'''
def load_play_golf():
'''
INPUT
- none
OUTPUT
- df
- X
- y
Return the df, X features, y values for the playgold.csv toy dataset
'''
df = pd.read_csv('data/playgolf.csv')
df.columns = [c.lower() for c in df.columns]
y = df.pop('result')
y = y.values
X = df.values
return df, X, y
def load_labor_negotiations_data():
'''
INPUT
- none
OUTPUT
- df
- X
- y
Return the df, X features, y values for the labor-neg.data.txt dataset
'''
df = pd.read_csv('data/labor-neg.data.txt', header=None)
df.columns = ['dur', 'wage1', 'wage2', 'wage3', 'cola', 'hours', 'pension',
'stby_pay', 'shift_diff', 'educ_allw', 'holidays',
'vacation', 'lngtrm_disabil', 'dntl_ins', 'bereavement',
'empl_hpln', 'target']
y = df.pop('target')
y = y.values
X = df.values
return df, X, y
def load_contraceptive_data():
'''
INPUT
- none
OUTPUT
- df
- X
- y
Return the df, X features, y values for the cmc.data.txt dataset
'''
df = pd.read_csv('data/cmc.data.txt', header=None)
df.columns = ['wife_age', 'wife_educ', 'hus_educ', 'num_kids', 'wife_rel',
'wife_work_status', 'hus_job', 'living_std',
'media_expo', 'label']
y = df.pop('label')
y = np.array(y)
X = df.values
return df, X, y
if __name__ == "__main__":
df, X, y = load_play_golf()
print('information_gain')
for attribute in df.columns:
print(attribute,
information_gain_by_attribute_categorical(attribute, df, y))
print('')
print('split_information_gain')
for attribute in df.columns:
print(attribute,
potential_information_by_attribute_categorical(attribute, df, y))
print('')
print('information_gain_ratio')
for attribute in df.columns:
print(attribute, information_gain_ratio_categorical(attribute, df, y))
print('\ntest information gain for temperature')
print(information_gain_ratio_continuous('humidity', df, y))
print(information_gain_ratio_continuous_1d(df['humidity'].values, y))
| [
"numpy.unique",
"pandas.read_csv",
"collections.Counter",
"itertools.combinations",
"numpy.array",
"numpy.log2",
"numpy.vectorize"
] | [((730, 758), 'numpy.vectorize', 'np.vectorize', (['is_categorical'], {}), '(is_categorical)\n', (742, 758), True, 'import numpy as np\n'), ((1048, 1058), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (1055, 1058), False, 'from collections import Counter\n'), ((3798, 3827), 'itertools.combinations', 'combinations', (['split_values', '(1)'], {}), '(split_values, 1)\n', (3810, 3827), False, 'from itertools import combinations\n'), ((8661, 8690), 'itertools.combinations', 'combinations', (['split_values', '(1)'], {}), '(split_values, 1)\n', (8673, 8690), False, 'from itertools import combinations\n'), ((9517, 9549), 'numpy.unique', 'np.unique', (['attribute_value_array'], {}), '(attribute_value_array)\n', (9526, 9549), True, 'import numpy as np\n'), ((9601, 9631), 'collections.Counter', 'Counter', (['attribute_value_array'], {}), '(attribute_value_array)\n', (9608, 9631), False, 'from collections import Counter\n'), ((10761, 10793), 'numpy.unique', 'np.unique', (['attribute_value_array'], {}), '(attribute_value_array)\n', (10770, 10793), True, 'import numpy as np\n'), ((12735, 12767), 'pandas.read_csv', 'pd.read_csv', (['"""data/playgolf.csv"""'], {}), "('data/playgolf.csv')\n", (12746, 12767), True, 'import pandas as pd\n'), ((13112, 13163), 'pandas.read_csv', 'pd.read_csv', (['"""data/labor-neg.data.txt"""'], {'header': 'None'}), "('data/labor-neg.data.txt', header=None)\n", (13123, 13163), True, 'import pandas as pd\n'), ((13713, 13758), 'pandas.read_csv', 'pd.read_csv', (['"""data/cmc.data.txt"""'], {'header': 'None'}), "('data/cmc.data.txt', header=None)\n", (13724, 13758), True, 'import pandas as pd\n'), ((13974, 13985), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (13982, 13985), True, 'import numpy as np\n'), ((2864, 2893), 'itertools.combinations', 'combinations', (['split_values', '(1)'], {}), '(split_values, 1)\n', (2876, 2893), False, 'from itertools import combinations\n'), ((9840, 9869), 'numpy.unique', 'np.unique', (['subset_of_y_values'], {}), '(subset_of_y_values)\n', (9849, 9869), True, 'import numpy as np\n'), ((1150, 1160), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (1157, 1160), True, 'import numpy as np\n'), ((5201, 5222), 'numpy.log2', 'np.log2', (['subset_ratio'], {}), '(subset_ratio)\n', (5208, 5222), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
data = pd.read_csv('finds.csv')
def train(concepts, target):
for i, val in enumerate(target):
if val == "Yes":
specific_h = concepts[i]
break
for i,h in enumerate(concepts):
if target[i] == "Yes":
for x in range(len(specific_h)):
if h[x] == specific_h[x]:
pass
else:
specific_h[x] = "?"
return specific_h
concepts = np.array(data.iloc[:,0:-1]) #slicing rows and column, : means begining to end of row
target = np.array(data.iloc[:,-1])
print(train(concepts,target)) | [
"numpy.array",
"pandas.read_csv"
] | [((46, 70), 'pandas.read_csv', 'pd.read_csv', (['"""finds.csv"""'], {}), "('finds.csv')\n", (57, 70), True, 'import pandas as pd\n'), ((509, 537), 'numpy.array', 'np.array', (['data.iloc[:, 0:-1]'], {}), '(data.iloc[:, 0:-1])\n', (517, 537), True, 'import numpy as np\n'), ((604, 630), 'numpy.array', 'np.array', (['data.iloc[:, -1]'], {}), '(data.iloc[:, -1])\n', (612, 630), True, 'import numpy as np\n')] |
import logging
import os
import tempfile
import threading
from collections import defaultdict
from http import HTTPStatus
from multiprocessing import shared_memory
from pathlib import Path
from typing import Any, List, Optional
import joblib
import numpy as np
from diskcache import Cache
from filelock import FileLock, Timeout
from flask import Flask, jsonify, request
from scipy.stats import rankdata
from sentence_transformers import SentenceTransformer
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from sklearn.linear_model import Ridge
logging.config.fileConfig("logging.conf")
logger = logging.getLogger(__name__)
PATH_CACHE = Path(__file__).parent.absolute() / "app_data" / "sbert_cache"
PATH_MODELS = Path(__file__).parent.absolute() / "app_data" / "models"
class Server:
def __init__(self):
self._app = Flask(__name__)
self._lock_directory: Path = Path(tempfile.gettempdir()) / ".locks"
self._app.add_url_rule("/rerank/<user_id>", "sort", self._rerank, methods=["POST"])
self._app.add_url_rule("/train/<user_id>", "train", self._train, methods=["POST"])
self._featurizer = CachedSentenceTransformer("paraphrase-distilroberta-base-v1")
self._generations = Cache(PATH_CACHE / "generations.db")
self._generations.clear()
def start(self, debug: bool = False, host: str = "0.0.0.0", port: int = 5000):
self._app.run(debug=debug, host=host, port=port)
def _rerank(self, user_id: str):
logger.info("Got ranking request for [%s]", user_id)
texts = request.get_json()
model = self._load_model(user_id)
if model is None:
logger.info("No model for user [%s] yet", user_id)
ranks = np.arange(len(texts))
rng = np.random.default_rng()
rng.shuffle(ranks)
else:
logger.info("Predicting for user [%s] with generation [%s]", user_id, self._generations.get(user_id, 0))
Xf = self._featurizer.featurize(texts)
y = model.predict(Xf)
ranks = np.argsort(y)
return jsonify([int(r) for r in ranks])
def _train(self, user_id: str):
logger.info("Got training request for [%s]", user_id)
json_data = request.get_json()
texts = json_data["texts"]
times = json_data["times"]
try:
# The lock needs to be acquired out here, not in the fn scope, else it would
# just throw the Timeout inside fn.
lock = self._get_lock(user_id)
lock.acquire()
def _fn():
try:
Xf = self._featurizer.featurize(texts)
model = GaussianProcessRegressor(kernel=(DotProduct() + WhiteKernel())).fit(Xf, times)
self._save_model(user_id, model)
finally:
lock.release()
# We spawn a thread and run the training in there so that this HTTP request can return directly
threading.Thread(target=_fn, daemon=True).start()
return HTTPStatus.NO_CONTENT.description, HTTPStatus.NO_CONTENT.value
except Timeout:
logger.info("Already training for user [%s], skipping!", user_id)
return HTTPStatus.TOO_MANY_REQUESTS.description, HTTPStatus.TOO_MANY_REQUESTS.value
def _get_lock(self, user_id: str) -> FileLock:
self._lock_directory.mkdir(parents=True, exist_ok=True)
lock_path = self._lock_directory / f"{user_id}.lock"
return FileLock(lock_path, timeout=1)
def _load_model(self, user_id: str) -> Optional[Any]:
model_path = self._get_model_path(user_id)
logger.info("Reading model from [%s]", model_path)
if model_path.is_file():
logger.debug("Model found for [%s]", model_path)
return joblib.load(model_path)
else:
logger.debug("No model found for [%s]", model_path)
return None
def _save_model(self, user_id: str, model: Any):
model_path = self._get_model_path(user_id)
logger.info("Writing model to [%s]", model_path)
model_path.parent.mkdir(parents=True, exist_ok=True)
tmp_model_path = model_path.with_suffix(".joblib.tmp")
joblib.dump(model, tmp_model_path)
os.replace(tmp_model_path, model_path)
self._generations.incr(user_id)
def _get_model_path(self, user_id: str) -> Path:
return PATH_MODELS / f"model_{user_id}.joblib"
class CachedSentenceTransformer:
def __init__(self, model_name: str):
super().__init__()
self._model = SentenceTransformer(model_name)
self._cache = Cache(PATH_CACHE / model_name)
def featurize(self, sentences: List[str]) -> np.ndarray:
not_in_cache = [s for s in sentences if s not in self._cache]
vecs_not_in_cache = list(self._model.encode(not_in_cache))
assert len(not_in_cache) == len(vecs_not_in_cache)
for s, vec in zip(not_in_cache, vecs_not_in_cache):
self._cache[s] = vec
return np.array([self._cache[s].squeeze() for s in sentences])
server = Server()
app = server._app
if __name__ == "__main__":
server.start(debug=True, port=5000)
| [
"logging.getLogger",
"sentence_transformers.SentenceTransformer",
"numpy.random.default_rng",
"flask.Flask",
"pathlib.Path",
"sklearn.gaussian_process.kernels.DotProduct",
"filelock.FileLock",
"diskcache.Cache",
"os.replace",
"numpy.argsort",
"flask.request.get_json",
"logging.config.fileConfi... | [((629, 670), 'logging.config.fileConfig', 'logging.config.fileConfig', (['"""logging.conf"""'], {}), "('logging.conf')\n", (654, 670), False, 'import logging\n'), ((681, 708), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (698, 708), False, 'import logging\n'), ((916, 931), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (921, 931), False, 'from flask import Flask, jsonify, request\n'), ((1311, 1347), 'diskcache.Cache', 'Cache', (["(PATH_CACHE / 'generations.db')"], {}), "(PATH_CACHE / 'generations.db')\n", (1316, 1347), False, 'from diskcache import Cache\n'), ((1639, 1657), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1655, 1657), False, 'from flask import Flask, jsonify, request\n'), ((2324, 2342), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2340, 2342), False, 'from flask import Flask, jsonify, request\n'), ((3601, 3631), 'filelock.FileLock', 'FileLock', (['lock_path'], {'timeout': '(1)'}), '(lock_path, timeout=1)\n', (3609, 3631), False, 'from filelock import FileLock, Timeout\n'), ((4334, 4368), 'joblib.dump', 'joblib.dump', (['model', 'tmp_model_path'], {}), '(model, tmp_model_path)\n', (4345, 4368), False, 'import joblib\n'), ((4377, 4415), 'os.replace', 'os.replace', (['tmp_model_path', 'model_path'], {}), '(tmp_model_path, model_path)\n', (4387, 4415), False, 'import os\n'), ((4691, 4722), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_name'], {}), '(model_name)\n', (4710, 4722), False, 'from sentence_transformers import SentenceTransformer\n'), ((4745, 4775), 'diskcache.Cache', 'Cache', (['(PATH_CACHE / model_name)'], {}), '(PATH_CACHE / model_name)\n', (4750, 4775), False, 'from diskcache import Cache\n'), ((1851, 1874), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (1872, 1874), True, 'import numpy as np\n'), ((2142, 2155), 'numpy.argsort', 'np.argsort', (['y'], {}), '(y)\n', (2152, 2155), True, 'import numpy as np\n'), ((3914, 3937), 'joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (3925, 3937), False, 'import joblib\n'), ((974, 995), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (993, 995), False, 'import tempfile\n'), ((723, 737), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (727, 737), False, 'from pathlib import Path\n'), ((799, 813), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (803, 813), False, 'from pathlib import Path\n'), ((3079, 3120), 'threading.Thread', 'threading.Thread', ([], {'target': '_fn', 'daemon': '(True)'}), '(target=_fn, daemon=True)\n', (3095, 3120), False, 'import threading\n'), ((2799, 2811), 'sklearn.gaussian_process.kernels.DotProduct', 'DotProduct', ([], {}), '()\n', (2809, 2811), False, 'from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel\n'), ((2814, 2827), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {}), '()\n', (2825, 2827), False, 'from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel\n')] |
# Standard Library
import argparse
import random
import time
import uuid
# Third Party
import mxnet as mx
import numpy as np
from mxnet import autograd, gluon, init
from mxnet.gluon import nn
from mxnet.gluon.data.vision import datasets, transforms
# First Party
from smdebug.mxnet import Hook, SaveConfig, modes
def parse_args():
parser = argparse.ArgumentParser(
description="Train a mxnet gluon model for FashonMNIST dataset"
)
parser.add_argument("--batch-size", type=int, default=256, help="Batch size")
parser.add_argument(
"--output-uri",
type=str,
default=f"s3://smdebug-testing/outputs/basic-mxnet-hook-{uuid.uuid4()}",
help="S3 URI of the bucket where tensor data will be stored.",
)
parser.add_argument(
"--smdebug_path",
type=str,
default=None,
help="S3 URI of the bucket where tensor data will be stored.",
)
parser.add_argument("--learning_rate", type=float, default=0.1)
parser.add_argument("--random_seed", type=bool, default=False)
parser.add_argument(
"--num_steps",
type=int,
help="Reduce the number of training "
"and evaluation steps to the give number if desired."
"If this is not passed, trains for one epoch "
"of training and validation data",
)
opt = parser.parse_args()
return opt
def acc(output, label):
return (output.argmax(axis=1) == label.astype("float32")).mean().asscalar()
def train_model(batch_size, net, train_data, valid_data, lr, hook, num_steps=None):
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": lr})
# Start the training.
for epoch in range(1):
train_loss, train_acc, valid_acc = 0.0, 0.0, 0.0
tic = time.time()
hook.set_mode(modes.TRAIN)
for i, (data, label) in enumerate(train_data):
if num_steps is not None and num_steps < i:
break
data = data.as_in_context(mx.cpu(0))
# forward + backward
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
# update parameters
trainer.step(batch_size)
# calculate training metrics
train_loss += loss.mean().asscalar()
train_acc += acc(output, label)
# calculate validation accuracy
hook.set_mode(modes.EVAL)
for i, (data, label) in enumerate(valid_data):
if num_steps is not None and num_steps < i:
break
data = data.as_in_context(mx.cpu(0))
valid_acc += acc(net(data), label)
print(
"Epoch %d: loss %.3f, train acc %.3f, test acc %.3f, in %.1f sec"
% (
epoch,
train_loss / len(train_data),
train_acc / len(train_data),
valid_acc / len(valid_data),
time.time() - tic,
)
)
def prepare_data(batch_size):
mnist_train = datasets.FashionMNIST(train=True)
X, y = mnist_train[0]
("X shape: ", X.shape, "X dtype", X.dtype, "y:", y)
text_labels = [
"t-shirt",
"trouser",
"pullover",
"dress",
"coat",
"sandal",
"shirt",
"sneaker",
"bag",
"ankle boot",
]
X, y = mnist_train[0:10]
transformer = transforms.Compose([transforms.ToTensor(), transforms.Normalize(0.13, 0.31)])
mnist_train = mnist_train.transform_first(transformer)
train_data = gluon.data.DataLoader(
mnist_train, batch_size=batch_size, shuffle=True, num_workers=4
)
mnist_valid = gluon.data.vision.FashionMNIST(train=False)
valid_data = gluon.data.DataLoader(
mnist_valid.transform_first(transformer), batch_size=batch_size, num_workers=4
)
return train_data, valid_data
# Create a model using gluon API. The hook is currently
# supports MXNet gluon models only.
def create_gluon_model():
# Create Model in Gluon
net = nn.HybridSequential()
net.add(
nn.Conv2D(channels=6, kernel_size=5, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=16, kernel_size=3, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
nn.Dense(120, activation="relu"),
nn.Dense(84, activation="relu"),
nn.Dense(10),
)
net.initialize(init=init.Xavier(), ctx=mx.cpu())
return net
# Create a hook. The initialization of hook determines which tensors
# are logged while training is in progress.
# Following function shows the default initialization that enables logging of
# weights, biases and gradients in the model.
def create_hook(output_s3_uri):
# With the following SaveConfig, we will save tensors for steps 1, 2 and 3
# (indexing starts with 0).
save_config = SaveConfig(save_steps=[1, 2, 3])
# Create a hook that logs weights, biases and gradients while training the model.
hook = Hook(
out_dir=output_s3_uri,
save_config=save_config,
include_collections=["weights", "gradients", "biases"],
)
return hook
def main():
opt = parse_args()
# these random seeds are only intended for test purpose.
# for now, 128,12,2 could promise no assert failure with running tests
# if you wish to change the number, notice that certain steps' tensor value may be capable of variation
if opt.random_seed:
mx.random.seed(128)
random.seed(12)
np.random.seed(2)
# Create a Gluon Model.
net = create_gluon_model()
# Create a hook for logging the desired tensors.
# The output_s3_uri is a the URI for the s3 bucket where the tensors will be saved.
# The trial_id is used to store the tensors from different trials separately.
output_uri = opt.smdebug_path if opt.smdebug_path is not None else opt.output_uri
hook = create_hook(output_uri)
net.hybridize()
# Register the hook to the top block.
hook.register_hook(net)
# Start the training.
batch_size = opt.batch_size
train_data, valid_data = prepare_data(batch_size)
train_model(batch_size, net, train_data, valid_data, opt.learning_rate, hook, opt.num_steps)
if __name__ == "__main__":
main()
| [
"mxnet.autograd.record",
"mxnet.gluon.nn.Conv2D",
"mxnet.init.Xavier",
"mxnet.gluon.nn.HybridSequential",
"mxnet.gluon.nn.MaxPool2D",
"mxnet.gluon.nn.Flatten",
"mxnet.gluon.loss.SoftmaxCrossEntropyLoss",
"argparse.ArgumentParser",
"numpy.random.seed",
"mxnet.gluon.data.DataLoader",
"smdebug.mxne... | [((348, 441), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a mxnet gluon model for FashonMNIST dataset"""'}), "(description=\n 'Train a mxnet gluon model for FashonMNIST dataset')\n", (371, 441), False, 'import argparse\n'), ((1604, 1640), 'mxnet.gluon.loss.SoftmaxCrossEntropyLoss', 'gluon.loss.SoftmaxCrossEntropyLoss', ([], {}), '()\n', (1638, 1640), False, 'from mxnet import autograd, gluon, init\n'), ((3149, 3182), 'mxnet.gluon.data.vision.datasets.FashionMNIST', 'datasets.FashionMNIST', ([], {'train': '(True)'}), '(train=True)\n', (3170, 3182), False, 'from mxnet.gluon.data.vision import datasets, transforms\n'), ((3674, 3764), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['mnist_train'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(4)'}), '(mnist_train, batch_size=batch_size, shuffle=True,\n num_workers=4)\n', (3695, 3764), False, 'from mxnet import autograd, gluon, init\n'), ((3793, 3836), 'mxnet.gluon.data.vision.FashionMNIST', 'gluon.data.vision.FashionMNIST', ([], {'train': '(False)'}), '(train=False)\n', (3823, 3836), False, 'from mxnet import autograd, gluon, init\n'), ((4162, 4183), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {}), '()\n', (4181, 4183), False, 'from mxnet.gluon import nn\n'), ((5021, 5053), 'smdebug.mxnet.SaveConfig', 'SaveConfig', ([], {'save_steps': '[1, 2, 3]'}), '(save_steps=[1, 2, 3])\n', (5031, 5053), False, 'from smdebug.mxnet import Hook, SaveConfig, modes\n'), ((5152, 5265), 'smdebug.mxnet.Hook', 'Hook', ([], {'out_dir': 'output_s3_uri', 'save_config': 'save_config', 'include_collections': "['weights', 'gradients', 'biases']"}), "(out_dir=output_s3_uri, save_config=save_config, include_collections=[\n 'weights', 'gradients', 'biases'])\n", (5156, 5265), False, 'from smdebug.mxnet import Hook, SaveConfig, modes\n'), ((1845, 1856), 'time.time', 'time.time', ([], {}), '()\n', (1854, 1856), False, 'import time\n'), ((4205, 4260), 'mxnet.gluon.nn.Conv2D', 'nn.Conv2D', ([], {'channels': '(6)', 'kernel_size': '(5)', 'activation': '"""relu"""'}), "(channels=6, kernel_size=5, activation='relu')\n", (4214, 4260), False, 'from mxnet.gluon import nn\n'), ((4270, 4306), 'mxnet.gluon.nn.MaxPool2D', 'nn.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (4282, 4306), False, 'from mxnet.gluon import nn\n'), ((4316, 4372), 'mxnet.gluon.nn.Conv2D', 'nn.Conv2D', ([], {'channels': '(16)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(channels=16, kernel_size=3, activation='relu')\n", (4325, 4372), False, 'from mxnet.gluon import nn\n'), ((4382, 4418), 'mxnet.gluon.nn.MaxPool2D', 'nn.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (4394, 4418), False, 'from mxnet.gluon import nn\n'), ((4428, 4440), 'mxnet.gluon.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (4438, 4440), False, 'from mxnet.gluon import nn\n'), ((4450, 4482), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(120)'], {'activation': '"""relu"""'}), "(120, activation='relu')\n", (4458, 4482), False, 'from mxnet.gluon import nn\n'), ((4492, 4523), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(84)'], {'activation': '"""relu"""'}), "(84, activation='relu')\n", (4500, 4523), False, 'from mxnet.gluon import nn\n'), ((4533, 4545), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (4541, 4545), False, 'from mxnet.gluon import nn\n'), ((5622, 5641), 'mxnet.random.seed', 'mx.random.seed', (['(128)'], {}), '(128)\n', (5636, 5641), True, 'import mxnet as mx\n'), ((5650, 5665), 'random.seed', 'random.seed', (['(12)'], {}), '(12)\n', (5661, 5665), False, 'import random\n'), ((5674, 5691), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (5688, 5691), True, 'import numpy as np\n'), ((3540, 3561), 'mxnet.gluon.data.vision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3559, 3561), False, 'from mxnet.gluon.data.vision import datasets, transforms\n'), ((3563, 3595), 'mxnet.gluon.data.vision.transforms.Normalize', 'transforms.Normalize', (['(0.13)', '(0.31)'], {}), '(0.13, 0.31)\n', (3583, 3595), False, 'from mxnet.gluon.data.vision import datasets, transforms\n'), ((4577, 4590), 'mxnet.init.Xavier', 'init.Xavier', ([], {}), '()\n', (4588, 4590), False, 'from mxnet import autograd, gluon, init\n'), ((4596, 4604), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (4602, 4604), True, 'import mxnet as mx\n'), ((2063, 2072), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (2069, 2072), True, 'import mxnet as mx\n'), ((2124, 2141), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (2139, 2141), False, 'from mxnet import autograd, gluon, init\n'), ((2714, 2723), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (2720, 2723), True, 'import mxnet as mx\n'), ((665, 677), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (675, 677), False, 'import uuid\n'), ((3056, 3067), 'time.time', 'time.time', ([], {}), '()\n', (3065, 3067), False, 'import time\n')] |
from distutils.version import LooseVersion
import pytest
import numpy as np
import astropy
from astropy.io import fits
from astropy import table
from astropy import wcs as fitswcs
from astropy.utils.data import get_pkg_data_filename
from astropy.modeling import polynomial
from astropy.modeling.models import (
Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial,
Identity, Mapping, Const1D, Scale
)
from astropy import units as u
from astropy import coordinates as coord
import tweakwcs
try:
import gwcs
if LooseVersion(gwcs.__version__) > '0.12.0':
from gwcs.geometry import SphericalToCartesian, CartesianToSpherical
from gwcs import coordinate_frames as cf
_GWCS_VER_GT_0P12 = True
else:
_GWCS_VER_GT_0P12 = False
except ImportError:
_GWCS_VER_GT_0P12 = False
_ASTROPY_VER_GE_4 = LooseVersion(astropy.__version__) >= '4.0'
_NO_JWST_SUPPORT = not (_ASTROPY_VER_GE_4 and _GWCS_VER_GT_0P12)
_ATOL = 1e3 * np.finfo(np.array([1.]).dtype).eps
_RAD2ARCSEC = 3600.0 * np.rad2deg(1.0)
_ARCSEC2RAD = 1.0 / _RAD2ARCSEC
def _make_gwcs_wcs(fits_hdr):
hdr = fits.Header.fromfile(get_pkg_data_filename(fits_hdr))
fw = fitswcs.WCS(hdr)
a_order = hdr['A_ORDER']
a_coeff = {}
for i in range(a_order + 1):
for j in range(a_order + 1 - i):
key = 'A_{:d}_{:d}'.format(i, j)
if key in hdr:
a_coeff[key] = hdr[key]
b_order = hdr['B_ORDER']
b_coeff = {}
for i in range(b_order + 1):
for j in range(b_order + 1 - i):
key = 'B_{:d}_{:d}'.format(i, j)
if key in hdr:
b_coeff[key] = hdr[key]
distortion = polynomial.SIP(
fw.wcs.crpix,
fw.sip.a_order,
fw.sip.b_order,
a_coeff,
b_coeff
) + Identity(2)
unit_conv = Scale(1.0 / 3600.0, name='arcsec_to_deg_1D')
unit_conv = unit_conv & unit_conv
unit_conv.name = 'arcsec_to_deg_2D'
unit_conv_inv = Scale(3600.0, name='deg_to_arcsec_1D')
unit_conv_inv = unit_conv_inv & unit_conv_inv
unit_conv_inv.name = 'deg_to_arcsec_2D'
c2s = CartesianToSpherical(name='c2s', wrap_lon_at=180)
s2c = SphericalToCartesian(name='s2c', wrap_lon_at=180)
c2tan = ((Mapping((0, 1, 2), name='xyz') /
Mapping((0, 0, 0), n_inputs=3, name='xxx')) |
Mapping((1, 2), name='xtyt'))
c2tan.name = 'Cartesian 3D to TAN'
tan2c = (Mapping((0, 0, 1), n_inputs=2, name='xtyt2xyz') |
(Const1D(1, name='one') & Identity(2, name='I(2D)')))
tan2c.name = 'TAN to cartesian 3D'
tan2c.inverse = c2tan
c2tan.inverse = tan2c
aff = AffineTransformation2D(matrix=fw.wcs.cd)
offx = Shift(-fw.wcs.crpix[0])
offy = Shift(-fw.wcs.crpix[1])
s = 5e-6
scale = Scale(s) & Scale(s)
distortion |= (offx & offy) | scale | tan2c | c2s | unit_conv_inv
taninv = s2c | c2tan
tan = Pix2Sky_TAN()
n2c = RotateNative2Celestial(fw.wcs.crval[0], fw.wcs.crval[1], 180)
wcslin = unit_conv | taninv | scale.inverse | aff | tan | n2c
sky_frm = cf.CelestialFrame(reference_frame=coord.ICRS())
det_frm = cf.Frame2D(name='detector')
v2v3_frm = cf.Frame2D(
name="v2v3",
unit=(u.arcsec, u.arcsec),
axes_names=('x', 'y'),
axes_order=(0, 1)
)
pipeline = [(det_frm, distortion), (v2v3_frm, wcslin), (sky_frm, None)]
gw = gwcs.WCS(input_frame=det_frm, output_frame=sky_frm,
forward_transform=pipeline)
gw.crpix = fw.wcs.crpix
gw.crval = fw.wcs.crval
# sanity check:
for _ in range(100):
x = np.random.randint(1, fw.pixel_shape[0])
y = np.random.randint(1, fw.pixel_shape[0])
assert np.allclose(gw(x, y), fw.all_pix2world(x, y, 1),
rtol=0, atol=1e-11)
return gw
def _match(x, y):
lenx = len(x)
leny = len(y)
if lenx == leny:
return (np.arange(lenx), np.arange(leny))
elif lenx < leny:
lenx, leny = leny, lenx
x, y = y, x
match = (np.arange(leny) + (0 if y.meta['name'] == 'ext1' else leny),
np.arange(leny))
return match
@pytest.mark.skipif(_NO_JWST_SUPPORT, reason="requires gwcs>=0.12.1")
def test_multichip_jwst_alignment():
w1 = _make_gwcs_wcs('data/wfc3_uvis1.hdr')
imcat1 = tweakwcs.JWSTgWCS(w1, {'v2_ref': 0, 'v3_ref': 0, 'roll_ref': 0})
imcat1.meta['catalog'] = table.Table.read(
get_pkg_data_filename('data/wfc3_uvis1.cat'),
format='ascii.csv',
delimiter=' ',
names=['x', 'y']
)
imcat1.meta['catalog']['x'] += 1
imcat1.meta['catalog']['y'] += 1
imcat1.meta['group_id'] = 1
imcat1.meta['name'] = 'ext1'
w2 = _make_gwcs_wcs('data/wfc3_uvis2.hdr')
imcat2 = tweakwcs.JWSTgWCS(w2, {'v2_ref': 0, 'v3_ref': 0, 'roll_ref': 0})
imcat2.meta['catalog'] = table.Table.read(
get_pkg_data_filename('data/wfc3_uvis2.cat'),
format='ascii.csv',
delimiter=' ',
names=['x', 'y']
)
imcat2.meta['catalog']['x'] += 1
imcat2.meta['catalog']['y'] += 1
imcat2.meta['group_id'] = 1
imcat2.meta['name'] = 'ext4'
refcat = table.Table.read(
get_pkg_data_filename('data/ref.cat'),
format='ascii.csv', delimiter=' ',
names=['RA', 'DEC']
)
tweakwcs.align_wcs([imcat1, imcat2], refcat, match=_match, nclip=None,
sigma=3, fitgeom='general')
fi1 = imcat1.meta['fit_info']
fi2 = imcat2.meta['fit_info']
w1m = imcat1.wcs
w2m = imcat2.wcs
assert np.allclose(w1m(*w1.crpix), (83.206917667519, -67.73275818507248), rtol=0)
assert np.allclose(w2m(*w2.crpix), (83.15167050722597, -67.74220306069903), rtol=0)
assert np.allclose(fi1['<scale>'], 1.0025, rtol=0, atol=2e-8)
assert np.allclose(fi2['<scale>'], 1.0025, rtol=0, atol=2e-8)
assert fi1['rmse'] < 5e-5
assert fi2['rmse'] < 5e-5
ra1, dec1 = imcat1.wcs(imcat1.meta['catalog']['x'],
imcat1.meta['catalog']['y'])
ra2, dec2 = imcat2.wcs(imcat2.meta['catalog']['x'],
imcat2.meta['catalog']['y'])
ra = np.concatenate([ra1, ra2])
dec = np.concatenate([dec1, dec2])
rra = refcat['RA']
rdec = refcat['DEC']
rmse_ra = np.sqrt(np.mean((ra - rra)**2))
rmse_dec = np.sqrt(np.mean((dec - rdec)**2))
assert rmse_ra < 3e-9
assert rmse_dec < 3e-10
| [
"numpy.array",
"astropy.modeling.models.Scale",
"numpy.arange",
"astropy.utils.data.get_pkg_data_filename",
"gwcs.geometry.CartesianToSpherical",
"numpy.mean",
"astropy.modeling.models.Identity",
"astropy.modeling.models.Shift",
"astropy.modeling.models.AffineTransformation2D",
"numpy.concatenate"... | [((4182, 4250), 'pytest.mark.skipif', 'pytest.mark.skipif', (['_NO_JWST_SUPPORT'], {'reason': '"""requires gwcs>=0.12.1"""'}), "(_NO_JWST_SUPPORT, reason='requires gwcs>=0.12.1')\n", (4200, 4250), False, 'import pytest\n'), ((860, 893), 'distutils.version.LooseVersion', 'LooseVersion', (['astropy.__version__'], {}), '(astropy.__version__)\n', (872, 893), False, 'from distutils.version import LooseVersion\n'), ((1042, 1057), 'numpy.rad2deg', 'np.rad2deg', (['(1.0)'], {}), '(1.0)\n', (1052, 1057), True, 'import numpy as np\n'), ((1195, 1211), 'astropy.wcs.WCS', 'fitswcs.WCS', (['hdr'], {}), '(hdr)\n', (1206, 1211), True, 'from astropy import wcs as fitswcs\n'), ((1852, 1896), 'astropy.modeling.models.Scale', 'Scale', (['(1.0 / 3600.0)'], {'name': '"""arcsec_to_deg_1D"""'}), "(1.0 / 3600.0, name='arcsec_to_deg_1D')\n", (1857, 1896), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((1996, 2034), 'astropy.modeling.models.Scale', 'Scale', (['(3600.0)'], {'name': '"""deg_to_arcsec_1D"""'}), "(3600.0, name='deg_to_arcsec_1D')\n", (2001, 2034), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2140, 2189), 'gwcs.geometry.CartesianToSpherical', 'CartesianToSpherical', ([], {'name': '"""c2s"""', 'wrap_lon_at': '(180)'}), "(name='c2s', wrap_lon_at=180)\n", (2160, 2189), False, 'from gwcs.geometry import SphericalToCartesian, CartesianToSpherical\n'), ((2200, 2249), 'gwcs.geometry.SphericalToCartesian', 'SphericalToCartesian', ([], {'name': '"""s2c"""', 'wrap_lon_at': '(180)'}), "(name='s2c', wrap_lon_at=180)\n", (2220, 2249), False, 'from gwcs.geometry import SphericalToCartesian, CartesianToSpherical\n'), ((2673, 2713), 'astropy.modeling.models.AffineTransformation2D', 'AffineTransformation2D', ([], {'matrix': 'fw.wcs.cd'}), '(matrix=fw.wcs.cd)\n', (2695, 2713), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2726, 2749), 'astropy.modeling.models.Shift', 'Shift', (['(-fw.wcs.crpix[0])'], {}), '(-fw.wcs.crpix[0])\n', (2731, 2749), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2761, 2784), 'astropy.modeling.models.Shift', 'Shift', (['(-fw.wcs.crpix[1])'], {}), '(-fw.wcs.crpix[1])\n', (2766, 2784), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2938, 2951), 'astropy.modeling.models.Pix2Sky_TAN', 'Pix2Sky_TAN', ([], {}), '()\n', (2949, 2951), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2962, 3023), 'astropy.modeling.models.RotateNative2Celestial', 'RotateNative2Celestial', (['fw.wcs.crval[0]', 'fw.wcs.crval[1]', '(180)'], {}), '(fw.wcs.crval[0], fw.wcs.crval[1], 180)\n', (2984, 3023), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((3167, 3194), 'gwcs.coordinate_frames.Frame2D', 'cf.Frame2D', ([], {'name': '"""detector"""'}), "(name='detector')\n", (3177, 3194), True, 'from gwcs import coordinate_frames as cf\n'), ((3210, 3306), 'gwcs.coordinate_frames.Frame2D', 'cf.Frame2D', ([], {'name': '"""v2v3"""', 'unit': '(u.arcsec, u.arcsec)', 'axes_names': "('x', 'y')", 'axes_order': '(0, 1)'}), "(name='v2v3', unit=(u.arcsec, u.arcsec), axes_names=('x', 'y'),\n axes_order=(0, 1))\n", (3220, 3306), True, 'from gwcs import coordinate_frames as cf\n'), ((3427, 3506), 'gwcs.WCS', 'gwcs.WCS', ([], {'input_frame': 'det_frm', 'output_frame': 'sky_frm', 'forward_transform': 'pipeline'}), '(input_frame=det_frm, output_frame=sky_frm, forward_transform=pipeline)\n', (3435, 3506), False, 'import gwcs\n'), ((4349, 4413), 'tweakwcs.JWSTgWCS', 'tweakwcs.JWSTgWCS', (['w1', "{'v2_ref': 0, 'v3_ref': 0, 'roll_ref': 0}"], {}), "(w1, {'v2_ref': 0, 'v3_ref': 0, 'roll_ref': 0})\n", (4366, 4413), False, 'import tweakwcs\n'), ((4797, 4861), 'tweakwcs.JWSTgWCS', 'tweakwcs.JWSTgWCS', (['w2', "{'v2_ref': 0, 'v3_ref': 0, 'roll_ref': 0}"], {}), "(w2, {'v2_ref': 0, 'v3_ref': 0, 'roll_ref': 0})\n", (4814, 4861), False, 'import tweakwcs\n'), ((5345, 5447), 'tweakwcs.align_wcs', 'tweakwcs.align_wcs', (['[imcat1, imcat2]', 'refcat'], {'match': '_match', 'nclip': 'None', 'sigma': '(3)', 'fitgeom': '"""general"""'}), "([imcat1, imcat2], refcat, match=_match, nclip=None,\n sigma=3, fitgeom='general')\n", (5363, 5447), False, 'import tweakwcs\n'), ((5766, 5821), 'numpy.allclose', 'np.allclose', (["fi1['<scale>']", '(1.0025)'], {'rtol': '(0)', 'atol': '(2e-08)'}), "(fi1['<scale>'], 1.0025, rtol=0, atol=2e-08)\n", (5777, 5821), True, 'import numpy as np\n'), ((5832, 5887), 'numpy.allclose', 'np.allclose', (["fi2['<scale>']", '(1.0025)'], {'rtol': '(0)', 'atol': '(2e-08)'}), "(fi2['<scale>'], 1.0025, rtol=0, atol=2e-08)\n", (5843, 5887), True, 'import numpy as np\n'), ((6182, 6208), 'numpy.concatenate', 'np.concatenate', (['[ra1, ra2]'], {}), '([ra1, ra2])\n', (6196, 6208), True, 'import numpy as np\n'), ((6219, 6247), 'numpy.concatenate', 'np.concatenate', (['[dec1, dec2]'], {}), '([dec1, dec2])\n', (6233, 6247), True, 'import numpy as np\n'), ((543, 573), 'distutils.version.LooseVersion', 'LooseVersion', (['gwcs.__version__'], {}), '(gwcs.__version__)\n', (555, 573), False, 'from distutils.version import LooseVersion\n'), ((1153, 1184), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['fits_hdr'], {}), '(fits_hdr)\n', (1174, 1184), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((1696, 1774), 'astropy.modeling.polynomial.SIP', 'polynomial.SIP', (['fw.wcs.crpix', 'fw.sip.a_order', 'fw.sip.b_order', 'a_coeff', 'b_coeff'], {}), '(fw.wcs.crpix, fw.sip.a_order, fw.sip.b_order, a_coeff, b_coeff)\n', (1710, 1774), False, 'from astropy.modeling import polynomial\n'), ((1823, 1834), 'astropy.modeling.models.Identity', 'Identity', (['(2)'], {}), '(2)\n', (1831, 1834), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2370, 2398), 'astropy.modeling.models.Mapping', 'Mapping', (['(1, 2)'], {'name': '"""xtyt"""'}), "((1, 2), name='xtyt')\n", (2377, 2398), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2453, 2500), 'astropy.modeling.models.Mapping', 'Mapping', (['(0, 0, 1)'], {'n_inputs': '(2)', 'name': '"""xtyt2xyz"""'}), "((0, 0, 1), n_inputs=2, name='xtyt2xyz')\n", (2460, 2500), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2811, 2819), 'astropy.modeling.models.Scale', 'Scale', (['s'], {}), '(s)\n', (2816, 2819), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2822, 2830), 'astropy.modeling.models.Scale', 'Scale', (['s'], {}), '(s)\n', (2827, 2830), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((3639, 3678), 'numpy.random.randint', 'np.random.randint', (['(1)', 'fw.pixel_shape[0]'], {}), '(1, fw.pixel_shape[0])\n', (3656, 3678), True, 'import numpy as np\n'), ((3691, 3730), 'numpy.random.randint', 'np.random.randint', (['(1)', 'fw.pixel_shape[0]'], {}), '(1, fw.pixel_shape[0])\n', (3708, 3730), True, 'import numpy as np\n'), ((4145, 4160), 'numpy.arange', 'np.arange', (['leny'], {}), '(leny)\n', (4154, 4160), True, 'import numpy as np\n'), ((4469, 4513), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['"""data/wfc3_uvis1.cat"""'], {}), "('data/wfc3_uvis1.cat')\n", (4490, 4513), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((4917, 4961), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['"""data/wfc3_uvis2.cat"""'], {}), "('data/wfc3_uvis2.cat')\n", (4938, 4961), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((5224, 5261), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['"""data/ref.cat"""'], {}), "('data/ref.cat')\n", (5245, 5261), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((6318, 6342), 'numpy.mean', 'np.mean', (['((ra - rra) ** 2)'], {}), '((ra - rra) ** 2)\n', (6325, 6342), True, 'import numpy as np\n'), ((6365, 6391), 'numpy.mean', 'np.mean', (['((dec - rdec) ** 2)'], {}), '((dec - rdec) ** 2)\n', (6372, 6391), True, 'import numpy as np\n'), ((2264, 2294), 'astropy.modeling.models.Mapping', 'Mapping', (['(0, 1, 2)'], {'name': '"""xyz"""'}), "((0, 1, 2), name='xyz')\n", (2271, 2294), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2311, 2353), 'astropy.modeling.models.Mapping', 'Mapping', (['(0, 0, 0)'], {'n_inputs': '(3)', 'name': '"""xxx"""'}), "((0, 0, 0), n_inputs=3, name='xxx')\n", (2318, 2353), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2517, 2539), 'astropy.modeling.models.Const1D', 'Const1D', (['(1)'], {'name': '"""one"""'}), "(1, name='one')\n", (2524, 2539), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((2542, 2567), 'astropy.modeling.models.Identity', 'Identity', (['(2)'], {'name': '"""I(2D)"""'}), "(2, name='I(2D)')\n", (2550, 2567), False, 'from astropy.modeling.models import Shift, AffineTransformation2D, Pix2Sky_TAN, RotateNative2Celestial, Identity, Mapping, Const1D, Scale\n'), ((3139, 3151), 'astropy.coordinates.ICRS', 'coord.ICRS', ([], {}), '()\n', (3149, 3151), True, 'from astropy import coordinates as coord\n'), ((3950, 3965), 'numpy.arange', 'np.arange', (['lenx'], {}), '(lenx)\n', (3959, 3965), True, 'import numpy as np\n'), ((3967, 3982), 'numpy.arange', 'np.arange', (['leny'], {}), '(leny)\n', (3976, 3982), True, 'import numpy as np\n'), ((4071, 4086), 'numpy.arange', 'np.arange', (['leny'], {}), '(leny)\n', (4080, 4086), True, 'import numpy as np\n'), ((992, 1007), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1000, 1007), True, 'import numpy as np\n')] |
# Done by Frannecklp
import cv2
import numpy as np
import win32gui, win32ui, win32con, win32api
import matplotlib.pyplot as plt
import pickle
import os,shutil
box = (45,37,912,511)
def grab_screen(region=box):
hwin = win32gui.GetDesktopWindow()
if region:
left,top,x2,y2 = region
width = x2 - left + 1
height = y2 - top + 1
else:
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signedIntsArray = bmp.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (height,width,4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
def im_show(img):
plt.figure()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
def cv2_show(window,img,pos=(0,384+30)):
img = cv2.resize(img,(683,384),interpolation=cv2.INTER_NEAREST)
cv2.namedWindow(window)
cv2.moveWindow(window, pos[0],pos[1])
cv2.imshow(window,img)
cv2.waitKey(25)
def load_pickle(file):
with open(file,'rb') as f:
return pickle.load(f)
def save_pickle(x,path):
with open(path,'wb') as f:
pickle.dump(x,f)
def remove_content(folder):
for file in os.listdir(folder):
file_path = os.path.join(folder, file)
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path) #remove directories in the folder
| [
"win32gui.GetDesktopWindow",
"cv2.imshow",
"win32ui.CreateBitmap",
"matplotlib.pyplot.imshow",
"cv2.moveWindow",
"os.listdir",
"win32gui.GetWindowDC",
"os.path.isdir",
"os.unlink",
"numpy.fromstring",
"cv2.waitKey",
"pickle.load",
"win32ui.CreateDCFromHandle",
"os.path.isfile",
"win32gui... | [((224, 251), 'win32gui.GetDesktopWindow', 'win32gui.GetDesktopWindow', ([], {}), '()\n', (249, 251), False, 'import win32gui, win32ui, win32con, win32api\n'), ((676, 702), 'win32gui.GetWindowDC', 'win32gui.GetWindowDC', (['hwin'], {}), '(hwin)\n', (696, 702), False, 'import win32gui, win32ui, win32con, win32api\n'), ((715, 749), 'win32ui.CreateDCFromHandle', 'win32ui.CreateDCFromHandle', (['hwindc'], {}), '(hwindc)\n', (741, 749), False, 'import win32gui, win32ui, win32con, win32api\n'), ((799, 821), 'win32ui.CreateBitmap', 'win32ui.CreateBitmap', ([], {}), '()\n', (819, 821), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1044, 1089), 'numpy.fromstring', 'np.fromstring', (['signedIntsArray'], {'dtype': '"""uint8"""'}), "(signedIntsArray, dtype='uint8')\n", (1057, 1089), True, 'import numpy as np\n'), ((1170, 1202), 'win32gui.ReleaseDC', 'win32gui.ReleaseDC', (['hwin', 'hwindc'], {}), '(hwin, hwindc)\n', (1188, 1202), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1258, 1295), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGRA2RGB'], {}), '(img, cv2.COLOR_BGRA2RGB)\n', (1270, 1295), False, 'import cv2\n'), ((1319, 1331), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1329, 1331), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1378), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1354, 1378), False, 'import cv2\n'), ((1383, 1398), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1393, 1398), True, 'import matplotlib.pyplot as plt\n'), ((1455, 1515), 'cv2.resize', 'cv2.resize', (['img', '(683, 384)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img, (683, 384), interpolation=cv2.INTER_NEAREST)\n', (1465, 1515), False, 'import cv2\n'), ((1517, 1540), 'cv2.namedWindow', 'cv2.namedWindow', (['window'], {}), '(window)\n', (1532, 1540), False, 'import cv2\n'), ((1553, 1591), 'cv2.moveWindow', 'cv2.moveWindow', (['window', 'pos[0]', 'pos[1]'], {}), '(window, pos[0], pos[1])\n', (1567, 1591), False, 'import cv2\n'), ((1595, 1618), 'cv2.imshow', 'cv2.imshow', (['window', 'img'], {}), '(window, img)\n', (1605, 1618), False, 'import cv2\n'), ((1622, 1637), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (1633, 1637), False, 'import cv2\n'), ((1862, 1880), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1872, 1880), False, 'import os, shutil\n'), ((398, 452), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_CXVIRTUALSCREEN'], {}), '(win32con.SM_CXVIRTUALSCREEN)\n', (423, 452), False, 'import win32gui, win32ui, win32con, win32api\n'), ((470, 524), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_CYVIRTUALSCREEN'], {}), '(win32con.SM_CYVIRTUALSCREEN)\n', (495, 524), False, 'import win32gui, win32ui, win32con, win32api\n'), ((540, 593), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_XVIRTUALSCREEN'], {}), '(win32con.SM_XVIRTUALSCREEN)\n', (565, 593), False, 'import win32gui, win32ui, win32con, win32api\n'), ((608, 661), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_YVIRTUALSCREEN'], {}), '(win32con.SM_YVIRTUALSCREEN)\n', (633, 661), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1708, 1722), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1719, 1722), False, 'import pickle\n'), ((1792, 1809), 'pickle.dump', 'pickle.dump', (['x', 'f'], {}), '(x, f)\n', (1803, 1809), False, 'import pickle\n'), ((1902, 1928), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (1914, 1928), False, 'import os, shutil\n'), ((1940, 1965), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1954, 1965), False, 'import os, shutil\n'), ((1979, 1999), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (1988, 1999), False, 'import os, shutil\n'), ((2013, 2037), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (2026, 2037), False, 'import os, shutil\n'), ((2052, 2076), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (2065, 2076), False, 'import os, shutil\n')] |
import numpy as np
import threading
import gil_load
N_THREADS = 4
NPTS = 4096
gil_load.init()
def do_some_work():
for i in range(2):
x = np.random.randn(NPTS, NPTS)
x[:] = np.fft.fft2(x).real
gil_load.start()
threads = []
for i in range(N_THREADS):
thread = threading.Thread(target=do_some_work, daemon=True)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
gil_load.stop()
stats = gil_load.get()
print(gil_load.format(stats))
| [
"gil_load.get",
"gil_load.init",
"gil_load.stop",
"gil_load.format",
"numpy.fft.fft2",
"gil_load.start",
"threading.Thread",
"numpy.random.randn"
] | [((80, 95), 'gil_load.init', 'gil_load.init', ([], {}), '()\n', (93, 95), False, 'import gil_load\n'), ((216, 232), 'gil_load.start', 'gil_load.start', ([], {}), '()\n', (230, 232), False, 'import gil_load\n'), ((428, 443), 'gil_load.stop', 'gil_load.stop', ([], {}), '()\n', (441, 443), False, 'import gil_load\n'), ((453, 467), 'gil_load.get', 'gil_load.get', ([], {}), '()\n', (465, 467), False, 'import gil_load\n'), ((287, 337), 'threading.Thread', 'threading.Thread', ([], {'target': 'do_some_work', 'daemon': '(True)'}), '(target=do_some_work, daemon=True)\n', (303, 337), False, 'import threading\n'), ((474, 496), 'gil_load.format', 'gil_load.format', (['stats'], {}), '(stats)\n', (489, 496), False, 'import gil_load\n'), ((152, 179), 'numpy.random.randn', 'np.random.randn', (['NPTS', 'NPTS'], {}), '(NPTS, NPTS)\n', (167, 179), True, 'import numpy as np\n'), ((195, 209), 'numpy.fft.fft2', 'np.fft.fft2', (['x'], {}), '(x)\n', (206, 209), True, 'import numpy as np\n')] |
from hierarc.Likelihood.SneLikelihood.sne_likelihood import SneLikelihood
import pytest
import numpy as np
class TestSnePantheon(object):
def setup(self):
np.random.seed(42)
# define redshifts
num = 30 # number of Sne
zcmb = np.linspace(start=0.01, stop=0.8, num=num)
zhel = zcmb
# define cosmology
from astropy.cosmology import FlatLambdaCDM
om_mean, om_sigma = 0.284, 0.012
cosmo_true = FlatLambdaCDM(H0=70, Om0=om_mean)
# define apparent magnitudes
m_apparent = 18
z_pivot = 0.1
# compute luminosity distances
angular_diameter_distances = cosmo_true.angular_diameter_distance(zcmb).value
lum_dists_true = (5 * np.log10((1 + zhel) * (1 + zcmb) * angular_diameter_distances))
angular_diameter_distance_pivot = cosmo_true.angular_diameter_distance(z_pivot).value
lum_dist_pivot = (5 * np.log10((1 + z_pivot) * (1 + z_pivot) * angular_diameter_distance_pivot))
# draw from scatter
sigma_m_z = 0.1
cov_mag = np.ones((num, num)) * 0.05 ** 2 + np.diag(
np.ones(num) * 0.1 ** 2) # full covariance matrix of systematics
cov_mag_measure = cov_mag + np.diag(np.ones(num) * sigma_m_z ** 2)
mags = m_apparent + lum_dists_true - lum_dist_pivot
mag_mean = np.random.multivariate_normal(mags, cov_mag_measure)
kwargs_sne_likelihood = {'mag_mean': mag_mean, 'cov_mag': cov_mag, 'zhel': zhel, 'zcmb': zcmb}
self.likelihood = SneLikelihood(sample_name='CUSTOM', **kwargs_sne_likelihood)
self.lum_dists_true = lum_dists_true
self.m_apparent_true = m_apparent
self.sigma_m_true = sigma_m_z
self.cosmo_true = cosmo_true
self.z_anchor = z_pivot
def test_log_likelihood(self):
logL = self.likelihood.log_likelihood(self.cosmo_true, apparent_m_z=self.m_apparent_true,
sigma_m_z=self.sigma_m_true, z_anchor=self.z_anchor)
logL_high = self.likelihood.log_likelihood(self.cosmo_true, apparent_m_z=self.m_apparent_true + 0.2,
sigma_m_z=self.sigma_m_true, z_anchor=self.z_anchor)
assert logL > logL_high
logL_low = self.likelihood.log_likelihood(self.cosmo_true, apparent_m_z=self.m_apparent_true - 0.2,
sigma_m_z=self.sigma_m_true, z_anchor=self.z_anchor)
assert logL > logL_low
if __name__ == '__main__':
pytest.main()
| [
"numpy.log10",
"numpy.ones",
"numpy.random.multivariate_normal",
"astropy.cosmology.FlatLambdaCDM",
"pytest.main",
"numpy.linspace",
"numpy.random.seed",
"hierarc.Likelihood.SneLikelihood.sne_likelihood.SneLikelihood"
] | [((2546, 2559), 'pytest.main', 'pytest.main', ([], {}), '()\n', (2557, 2559), False, 'import pytest\n'), ((170, 188), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (184, 188), True, 'import numpy as np\n'), ((265, 307), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.01)', 'stop': '(0.8)', 'num': 'num'}), '(start=0.01, stop=0.8, num=num)\n', (276, 307), True, 'import numpy as np\n'), ((471, 504), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': '(70)', 'Om0': 'om_mean'}), '(H0=70, Om0=om_mean)\n', (484, 504), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((1357, 1409), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mags', 'cov_mag_measure'], {}), '(mags, cov_mag_measure)\n', (1386, 1409), True, 'import numpy as np\n'), ((1540, 1600), 'hierarc.Likelihood.SneLikelihood.sne_likelihood.SneLikelihood', 'SneLikelihood', ([], {'sample_name': '"""CUSTOM"""'}), "(sample_name='CUSTOM', **kwargs_sne_likelihood)\n", (1553, 1600), False, 'from hierarc.Likelihood.SneLikelihood.sne_likelihood import SneLikelihood\n'), ((745, 807), 'numpy.log10', 'np.log10', (['((1 + zhel) * (1 + zcmb) * angular_diameter_distances)'], {}), '((1 + zhel) * (1 + zcmb) * angular_diameter_distances)\n', (753, 807), True, 'import numpy as np\n'), ((934, 1007), 'numpy.log10', 'np.log10', (['((1 + z_pivot) * (1 + z_pivot) * angular_diameter_distance_pivot)'], {}), '((1 + z_pivot) * (1 + z_pivot) * angular_diameter_distance_pivot)\n', (942, 1007), True, 'import numpy as np\n'), ((1081, 1100), 'numpy.ones', 'np.ones', (['(num, num)'], {}), '((num, num))\n', (1088, 1100), True, 'import numpy as np\n'), ((1136, 1148), 'numpy.ones', 'np.ones', (['num'], {}), '(num)\n', (1143, 1148), True, 'import numpy as np\n'), ((1246, 1258), 'numpy.ones', 'np.ones', (['num'], {}), '(num)\n', (1253, 1258), True, 'import numpy as np\n')] |
#!/usr/bin/python
from matplotlib import gridspec, cm, dates
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
## COLORS
# N colors distributed evenly across a color map
# see https://matplotlib.org/examples/color/colormaps_reference.html for color maps
def make_N_colors(cmap_name, N):
cmap = cm.get_cmap(cmap_name, N)
cmap = cmap(np.arange(N))[:,0:3]
cmap = np.fliplr(cmap)
return [tuple(i) for i in cmap]
## BOXPLOTS from a list of vectors
def plotData(matrix,plotTitle,yLabelText,plotLabels):
statTest = 'k'
palette = 'spring'
boxData = matrix
figWidth = 3
if len(boxData) > 2:
numPlots = len(boxData)
plotColors = make_N_colors(palette,numPlots)
plotColors = plotColors * numPlots
plotLabels = plotLabels * numPlots
figWidth = figWidth * numPlots * 0.5
else:
plotColors = ['b','y'] # make_N_colors(palette,numPlots)
f = plt.figure(num=None, figsize=(figWidth, 4), dpi=80, facecolor='w', edgecolor='k')
ax = f.add_subplot(111)
b1 = ax.boxplot(boxData, widths=0.5, sym = '')
b1 = formatBoxColors(b1,plotColors)
# do some scatter plots to add the data
xPos = 1
for b in boxData:
numPoints = len(b)
xPoints=wobbleAround(xPos,numPoints,0.1)
# add the points!
plt.scatter(xPoints,b,c=[plotColors[xPos-1]],edgecolor='',alpha=0.2,s=100)
xPos += 1
ax.set_xticklabels(plotLabels, fontsize = 18)
ax.yaxis.label.set_size(18)
#ax.set_ylim([-1.1,1.1])
ax.set_ylabel(yLabelText,fontsize=18)
if len(boxData) == 2:
pval = statsFromBoxData(boxData,statTest)[0]
print(pval)
titleLabel = plotTitle + ('; p = %1.3f' % pval)
else:
statsFromBoxData(boxData,statTest)
titleLabel = plotTitle
# comment ON to show title with p-value
#ax.set_title(titleLabel,fontsize=18)
#f.set_tight_layout(True)
#ax.set_ylim([0,205])
plt.show()
# format colors of a boxplot object
def formatBoxColors(bp, plotColors):
boxColors = plotColors
baseWidth = 3
for n,box in enumerate(bp['boxes']):
box.set( color=boxColors[n], linewidth=baseWidth)
for n,med in enumerate(bp['medians']):
med.set( color=boxColors[n], linewidth=baseWidth)
bdupes=[]
for i in boxColors:
bdupes.extend([i,i])
boxColors = bdupes
for n,whisk in enumerate(bp['whiskers']):
#whisk.set( color=(0.1,0.1,0.1), linewidth=2, alpha = 0.5)
whisk.set( color=boxColors[n], linewidth=baseWidth, alpha = 0.5)
for n,cap in enumerate(bp['caps']):
cap.set( color=boxColors[n], linewidth=baseWidth, alpha = 0.5)
return bp
# stats from boxplot data
def statsFromBoxData(boxData,statTest):
pvals = []
# check if we should logit transform the data
needTransform = False
for b in boxData:
if np.min(b) >= 0 and np.max(b) <= 1:
needTransform = True
#pass
if needTransform == True:
print('transformed the data!')
boxData = logitBoxData(boxData)
#print(boxData)
for i in range(len(boxData)):
for j in range(i+1,len(boxData)):
if statTest in ['k','kruskal','kruskalwallis','kw']:
_,p = stats.kruskal(boxData[i],boxData[j])
print('%i vs. %i: %1.3f by Kruskal-Wallis' % (i+1,j+1,p))
pvals.append(p)
if statTest in ['t','tt','ttest']:
_,p = stats.ttest_ind(boxData[i],boxData[j])
print('%i vs. %i: %1.3f by ttest-ind' % (i+1,j+1,p))
pvals.append(p)
# MORE STAT TESTS?
print('')
return pvals
# points to scatter on boxplot
def wobbleAround(center,number,distAway):
# to find points to add to box/whisker plots, centered around midline of box
import random
l=[]
while len(l) < number:
l.append(random.uniform(center-distAway, center+distAway))
return l
# logit transform
def logitVec(d):
# convert to proportions if not done already
if np.max(d)>1:
d = d / float(max(d))
# logit is log ( y / [1 - y] )
# add some to take care of 1's and 0's
e = np.min(d[np.where(d>0)])
num = d + e
dem = (1-d) + e
return np.log(num/dem)
def logitBoxData(boxData):
transformed = []
for b in boxData:
transformed.append(logitVec(b))
return transformed
| [
"random.uniform",
"numpy.where",
"numpy.fliplr",
"numpy.log",
"numpy.max",
"matplotlib.pyplot.figure",
"scipy.stats.ttest_ind",
"matplotlib.pyplot.scatter",
"numpy.min",
"scipy.stats.kruskal",
"matplotlib.cm.get_cmap",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((325, 350), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cmap_name', 'N'], {}), '(cmap_name, N)\n', (336, 350), False, 'from matplotlib import gridspec, cm, dates\n'), ((401, 416), 'numpy.fliplr', 'np.fliplr', (['cmap'], {}), '(cmap)\n', (410, 416), True, 'import numpy as np\n'), ((892, 977), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(figWidth, 4)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(figWidth, 4), dpi=80, facecolor='w',\n edgecolor='k')\n", (902, 977), True, 'import matplotlib.pyplot as plt\n'), ((1809, 1819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1817, 1819), True, 'import matplotlib.pyplot as plt\n'), ((3832, 3849), 'numpy.log', 'np.log', (['(num / dem)'], {}), '(num / dem)\n', (3838, 3849), True, 'import numpy as np\n'), ((1242, 1327), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xPoints', 'b'], {'c': '[plotColors[xPos - 1]]', 'edgecolor': '""""""', 'alpha': '(0.2)', 's': '(100)'}), "(xPoints, b, c=[plotColors[xPos - 1]], edgecolor='', alpha=0.2,\n s=100)\n", (1253, 1327), True, 'import matplotlib.pyplot as plt\n'), ((3653, 3662), 'numpy.max', 'np.max', (['d'], {}), '(d)\n', (3659, 3662), True, 'import numpy as np\n'), ((368, 380), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (377, 380), True, 'import numpy as np\n'), ((3507, 3559), 'random.uniform', 'random.uniform', (['(center - distAway)', '(center + distAway)'], {}), '(center - distAway, center + distAway)\n', (3521, 3559), False, 'import random\n'), ((3777, 3792), 'numpy.where', 'np.where', (['(d > 0)'], {}), '(d > 0)\n', (3785, 3792), True, 'import numpy as np\n'), ((2654, 2663), 'numpy.min', 'np.min', (['b'], {}), '(b)\n', (2660, 2663), True, 'import numpy as np\n'), ((2673, 2682), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (2679, 2682), True, 'import numpy as np\n'), ((2968, 3005), 'scipy.stats.kruskal', 'stats.kruskal', (['boxData[i]', 'boxData[j]'], {}), '(boxData[i], boxData[j])\n', (2981, 3005), False, 'from scipy import stats\n'), ((3135, 3174), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['boxData[i]', 'boxData[j]'], {}), '(boxData[i], boxData[j])\n', (3150, 3174), False, 'from scipy import stats\n')] |
#!/usr/bin/env python
"""
Extracts faces from images and recognize the persons inside each image
and returns the images the bounding boxes and the recognized faces
"""
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import cv2
import random
from collections import Counter
from recognition.facenet.src import facenet
from detection.insightface.RetinaFace.retinaface import RetinaFace
def recognition_handler(args):
# define detector
gpuid = -1
model_path = args.fd_model.split(',')
detector = RetinaFace(model_path[0], int(model_path[1]), gpuid, 'net3')
# Making sure output directory exists
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
cap = cv2.VideoCapture(args.input_image)
# Check if video feed opened successfully
if not cap.isOpened():
print("Unable to read frames feed")
with tf.Graph().as_default():
with tf.Session() as sess:
emb_array = np.load(args.dataset+'features_data.npy') #load saved dataset features for KNN
labels = np.load(args.dataset+'labels.npy')
recognized_labels = []
facenet.load_model(args.fr_model)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# embedding_size = embeddings.get_shape()[1]
counter = 0
while True: # place holder for the open connection
ret, frame = cap.read()
# if failed to read frame data
if ret:
detect_align(frame, detector)
else:
break
frame = cv2.rotate(frame,cv2.ROTATE_90_CLOCKWISE)
faces_bounding_boxes, landmarks = detect_align(frame, detector)
nrof_faces = faces_bounding_boxes.shape[0]
faces = np.zeros((nrof_faces, args.image_size, args.image_size, 3))
if nrof_faces > 0:
det = faces_bounding_boxes[:, 0:4]
det_arr = []
img_size = np.asarray(frame.shape)[0:2]
crop_margin = 20
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-crop_margin/2, 0)
bb[1] = np.maximum(det[1]-crop_margin/2, 0)
bb[2] = np.minimum(det[2]+crop_margin/2, img_size[1])
bb[3] = np.minimum(det[3]+crop_margin/2, img_size[0])
cropped = frame[bb[1]:bb[3], bb[0]:bb[2], :]
faces[i] = cv2.resize(cropped, (args.image_size, args.image_size), interpolation=cv2.INTER_LINEAR)
for i in range(nrof_faces):
faces[i] = cv2.resize(faces[i],(args.image_size, args.image_size))
faces[i] = facenet.prewhiten(faces[i])
faces[i] = facenet.crop(faces[i], False, args.image_size)
faces[i] = facenet.flip(faces[i], False)
face = faces[i][None,:,:,:]
feed_dict = { images_placeholder:face, phase_train_placeholder:False }
face_embeddings = sess.run(embeddings, feed_dict=feed_dict)
recognized_labels.append((KNN_predict(face_embeddings, emb_array, labels, k=3), faces_bounding_boxes[i]))
print('recognized labels: ',recognized_labels)
for i in range(nrof_faces):
box = faces_bounding_boxes[i].astype(np.int)
color = (0, 0, 255)
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)
if landmarks is not None:
landmark5 = landmarks[i].astype(np.int)
for l in range(landmark5.shape[0]):
color = (0, 0, 255)
if l == 0 or l == 3:
color = (0, 255, 0)
cv2.circle(frame, (landmark5[l][0], landmark5[l][1]), 1, color, 2)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (box[2]-80, box[3]+15)
fontScale = 0.4
fontColor = (0, 255, 255)
lineType = 2
cv2.putText(frame, recognized_labels[i],
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# cv2.imshow('output', img)
def KNN_predict(image_embeddings, data_embeddings, labels, k):
distances = []
for i in range(len(data_embeddings)):
distances.append((facenet.distance(image_embeddings, data_embeddings[i], distance_metric = 0),labels[i]))
distances = sorted(distances, key=lambda tup: tup[0])
max_iters = (Counter(elem[1] for elem in distances[:min(k, len(data_embeddings))]))
result = ''
curr_freq = 0
for key, occur in max_iters.items():
if occur > curr_freq:
curr_freq = occur
result = key
return result
def detect_align(frame, detector):
if frame.ndim < 2:
print('Unable to align frame')
if frame.ndim == 2:
frame = facenet.to_rgb(frame)
im_shape = frame.shape
scales = [1024, 1980]
target_size = scales[0]
max_size = scales[1]
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [im_scale]
flip = False
return detector.detect(frame, threshold=0.8, scales=scales, do_flip=flip)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('input_image', type=str, help='Absolute path to the input image')
parser.add_argument('output_dir', type=str, help='Directory path used to save output results')
parser.add_argument('fd_model', type=str, help='detection model path, epoch')
parser.add_argument('fr_model', type=str, help='recogntition model path')
parser.add_argument('dataset', type=str, help='Absolute path to Directory or file holding recognition dataset')
parser.add_argument('--image_size', type=int, help='Faces size in pixels.', default=160)
# parser.add_argument('--log_results', help='Set to false to disable results logging', default=True)
# parser.add_argument('--log_folder', type=str,
# help='Folder to save log files default=output_dir', default='')
# parser.add_argument('--random_order',
# help='Shuffles the order of images to enable alignment using multiple processes.', default=True)
# parser.add_argument('--gpu_memory_fraction', type=float,
# help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
# parser.add_argument('--margin', type=int,
# help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
# parser.add_argument('--detect_multiple_faces', type=bool,
# help='Detect and align multiple faces per image.', default=True)
return parser.parse_args(argv)
if __name__ == '__main__':
recognition_handler(parse_arguments(sys.argv[1:]))
| [
"cv2.rectangle",
"recognition.facenet.src.facenet.flip",
"os.path.exists",
"tensorflow.Graph",
"argparse.ArgumentParser",
"tensorflow.Session",
"numpy.asarray",
"numpy.max",
"numpy.min",
"numpy.maximum",
"tensorflow.get_default_graph",
"numpy.round",
"recognition.facenet.src.facenet.load_mod... | [((1953, 1987), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.input_image'], {}), '(args.input_image)\n', (1969, 1987), False, 'import cv2\n'), ((7245, 7266), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (7251, 7266), True, 'import numpy as np\n'), ((7285, 7306), 'numpy.max', 'np.max', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (7291, 7306), True, 'import numpy as np\n'), ((7631, 7656), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7654, 7656), False, 'import argparse\n'), ((1872, 1903), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (1886, 1903), False, 'import os\n'), ((1913, 1941), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (1924, 1941), False, 'import os\n'), ((7099, 7120), 'recognition.facenet.src.facenet.to_rgb', 'facenet.to_rgb', (['frame'], {}), '(frame)\n', (7113, 7120), False, 'from recognition.facenet.src import facenet\n'), ((7369, 7401), 'numpy.round', 'np.round', (['(im_scale * im_size_max)'], {}), '(im_scale * im_size_max)\n', (7377, 7401), True, 'import numpy as np\n'), ((2158, 2170), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2168, 2170), True, 'import tensorflow as tf\n'), ((2204, 2247), 'numpy.load', 'np.load', (["(args.dataset + 'features_data.npy')"], {}), "(args.dataset + 'features_data.npy')\n", (2211, 2247), True, 'import numpy as np\n'), ((2316, 2352), 'numpy.load', 'np.load', (["(args.dataset + 'labels.npy')"], {}), "(args.dataset + 'labels.npy')\n", (2323, 2352), True, 'import numpy as np\n'), ((2399, 2432), 'recognition.facenet.src.facenet.load_model', 'facenet.load_model', (['args.fr_model'], {}), '(args.fr_model)\n', (2417, 2432), False, 'from recognition.facenet.src import facenet\n'), ((2120, 2130), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2128, 2130), True, 'import tensorflow as tf\n'), ((3122, 3164), 'cv2.rotate', 'cv2.rotate', (['frame', 'cv2.ROTATE_90_CLOCKWISE'], {}), '(frame, cv2.ROTATE_90_CLOCKWISE)\n', (3132, 3164), False, 'import cv2\n'), ((3344, 3403), 'numpy.zeros', 'np.zeros', (['(nrof_faces, args.image_size, args.image_size, 3)'], {}), '((nrof_faces, args.image_size, args.image_size, 3))\n', (3352, 3403), True, 'import numpy as np\n'), ((6548, 6621), 'recognition.facenet.src.facenet.distance', 'facenet.distance', (['image_embeddings', 'data_embeddings[i]'], {'distance_metric': '(0)'}), '(image_embeddings, data_embeddings[i], distance_metric=0)\n', (6564, 6621), False, 'from recognition.facenet.src import facenet\n'), ((2509, 2531), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2529, 2531), True, 'import tensorflow as tf\n'), ((2587, 2609), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2607, 2609), True, 'import tensorflow as tf\n'), ((2683, 2705), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2703, 2705), True, 'import tensorflow as tf\n'), ((4490, 4546), 'cv2.resize', 'cv2.resize', (['faces[i]', '(args.image_size, args.image_size)'], {}), '(faces[i], (args.image_size, args.image_size))\n', (4500, 4546), False, 'import cv2\n'), ((4577, 4604), 'recognition.facenet.src.facenet.prewhiten', 'facenet.prewhiten', (['faces[i]'], {}), '(faces[i])\n', (4594, 4604), False, 'from recognition.facenet.src import facenet\n'), ((4636, 4682), 'recognition.facenet.src.facenet.crop', 'facenet.crop', (['faces[i]', '(False)', 'args.image_size'], {}), '(faces[i], False, args.image_size)\n', (4648, 4682), False, 'from recognition.facenet.src import facenet\n'), ((4714, 4743), 'recognition.facenet.src.facenet.flip', 'facenet.flip', (['faces[i]', '(False)'], {}), '(faces[i], False)\n', (4726, 4743), False, 'from recognition.facenet.src import facenet\n'), ((5334, 5400), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(box[0], box[1])', '(box[2], box[3])', 'color', '(2)'], {}), '(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n', (5347, 5400), False, 'import cv2\n'), ((6051, 6157), 'cv2.putText', 'cv2.putText', (['frame', 'recognized_labels[i]', 'bottomLeftCornerOfText', 'font', 'fontScale', 'fontColor', 'lineType'], {}), '(frame, recognized_labels[i], bottomLeftCornerOfText, font,\n fontScale, fontColor, lineType)\n', (6062, 6157), False, 'import cv2\n'), ((3558, 3581), 'numpy.asarray', 'np.asarray', (['frame.shape'], {}), '(frame.shape)\n', (3568, 3581), True, 'import numpy as np\n'), ((3816, 3831), 'numpy.squeeze', 'np.squeeze', (['det'], {}), '(det)\n', (3826, 3831), True, 'import numpy as np\n'), ((3861, 3888), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'np.int32'}), '(4, dtype=np.int32)\n', (3869, 3888), True, 'import numpy as np\n'), ((3921, 3960), 'numpy.maximum', 'np.maximum', (['(det[0] - crop_margin / 2)', '(0)'], {}), '(det[0] - crop_margin / 2, 0)\n', (3931, 3960), True, 'import numpy as np\n'), ((3989, 4028), 'numpy.maximum', 'np.maximum', (['(det[1] - crop_margin / 2)', '(0)'], {}), '(det[1] - crop_margin / 2, 0)\n', (3999, 4028), True, 'import numpy as np\n'), ((4057, 4106), 'numpy.minimum', 'np.minimum', (['(det[2] + crop_margin / 2)', 'img_size[1]'], {}), '(det[2] + crop_margin / 2, img_size[1])\n', (4067, 4106), True, 'import numpy as np\n'), ((4135, 4184), 'numpy.minimum', 'np.minimum', (['(det[3] + crop_margin / 2)', 'img_size[0]'], {}), '(det[3] + crop_margin / 2, img_size[0])\n', (4145, 4184), True, 'import numpy as np\n'), ((4285, 4377), 'cv2.resize', 'cv2.resize', (['cropped', '(args.image_size, args.image_size)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(cropped, (args.image_size, args.image_size), interpolation=cv2.\n INTER_LINEAR)\n', (4295, 4377), False, 'import cv2\n'), ((5728, 5794), 'cv2.circle', 'cv2.circle', (['frame', '(landmark5[l][0], landmark5[l][1])', '(1)', 'color', '(2)'], {}), '(frame, (landmark5[l][0], landmark5[l][1]), 1, color, 2)\n', (5738, 5794), False, 'import cv2\n'), ((3711, 3729), 'numpy.squeeze', 'np.squeeze', (['det[i]'], {}), '(det[i])\n', (3721, 3729), True, 'import numpy as np\n')] |
import numpy
import pickle
from os import listdir
import imageio
import numpy as np
import os
def deletefromfolder(path):
datadir = path
# print('Directory:', datadir)
rmmap = dict()
total = 0
repeatcnt = 0
for root, directories, filenames in os.walk(datadir):
for filename in filenames:
total += 1
if filename.endswith(".jpg") and not filename.startswith("._"):
filei = os.path.join(root, filename)
imi = imageio.imread(filei)
npi = np.asarray(imi).reshape(1, -1).reshape((2025,))
idf = npi.tolist()
for i in range(len(idf)):
idf[i] = str(idf[i])
strlist = "".join(idf)
if strlist in rmmap.keys():
repeatcnt += 1
rmmap[strlist].append(filename)
else:
rmmap[strlist] = list()
# for key in rmmap:
# print(rmmap[key])
# print('Repeat/Total: {}/{}'.format(repeatcnt, total))
for key in rmmap:
for item in rmmap[key]:
print(
"For removal: ", os.path.join(datadir, item)
) # os.remove(os.path.join(datadir, item))
if __name__ == "__main__":
images_path = "../../../../DataScienceProjects/handwrittenmathsymbols/extracted_images/" # '../images/extracted_images/'
dirlist = os.listdir(images_path)
for item in dirlist:
deletefromfolder(os.path.join(images_path, item))
# print(item)
| [
"os.listdir",
"numpy.asarray",
"os.path.join",
"imageio.imread",
"os.walk"
] | [((270, 286), 'os.walk', 'os.walk', (['datadir'], {}), '(datadir)\n', (277, 286), False, 'import os\n'), ((1415, 1438), 'os.listdir', 'os.listdir', (['images_path'], {}), '(images_path)\n', (1425, 1438), False, 'import os\n'), ((1490, 1521), 'os.path.join', 'os.path.join', (['images_path', 'item'], {}), '(images_path, item)\n', (1502, 1521), False, 'import os\n'), ((446, 474), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (458, 474), False, 'import os\n'), ((497, 518), 'imageio.imread', 'imageio.imread', (['filei'], {}), '(filei)\n', (511, 518), False, 'import imageio\n'), ((1162, 1189), 'os.path.join', 'os.path.join', (['datadir', 'item'], {}), '(datadir, item)\n', (1174, 1189), False, 'import os\n'), ((541, 556), 'numpy.asarray', 'np.asarray', (['imi'], {}), '(imi)\n', (551, 556), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on 2017-9-5
@author: cheng.li
"""
import math
import pandas as pd
import numpy as np
from PyFin.api import *
from alphamind.api import *
factor = 'ROE'
universe = Universe('custom', ['zz800'])
start_date = '2010-01-01'
end_date = '2018-04-26'
freq = '10b'
category = 'sw_adj'
level = 1
horizon = map_freq(freq)
ref_dates = makeSchedule(start_date, end_date, freq, 'china.sse')
def factor_analysis(factor):
engine = SqlEngine()
factors = {
'f1': CSQuantiles(factor),
'f2': CSQuantiles(factor, groups='sw1_adj'),
'f3': LAST(factor)
}
total_factor = engine.fetch_factor_range(universe, factors, dates=ref_dates)
_, risk_exp = engine.fetch_risk_model_range(universe, dates=ref_dates)
industry = engine.fetch_industry_range(universe, dates=ref_dates, category=category, level=level)
rets = engine.fetch_dx_return_range(universe, horizon=horizon, offset=1, dates=ref_dates)
total_factor = pd.merge(total_factor, industry[['trade_date', 'code', 'industry']], on=['trade_date', 'code'])
total_factor = pd.merge(total_factor, risk_exp, on=['trade_date', 'code'])
total_factor = pd.merge(total_factor, rets, on=['trade_date', 'code']).dropna()
df_ret = pd.DataFrame(columns=['f1', 'f2', 'f3'])
df_ic = pd.DataFrame(columns=['f1', 'f2', 'f3'])
total_factor_groups = total_factor.groupby('trade_date')
for date, this_factors in total_factor_groups:
raw_factors = this_factors['f3'].values
industry_exp = this_factors[industry_styles + ['COUNTRY']].values.astype(float)
processed_values = factor_processing(raw_factors, pre_process=[], risk_factors=industry_exp,
post_process=[percentile])
this_factors['f3'] = processed_values
factor_values = this_factors[['f1', 'f2', 'f3']].values
positions = (factor_values >= 0.8) * 1.
positions[factor_values <= 0.2] = -1
positions /= np.abs(positions).sum(axis=0)
ret_values = this_factors.dx.values @ positions
df_ret.loc[date] = ret_values
ic_values = this_factors[['dx', 'f1', 'f2', 'f3']].corr().values[0, 1:]
df_ic.loc[date] = ic_values
print(f"{factor} is finished")
return {'ic': (df_ic.mean(axis=0), df_ic.std(axis=0) / math.sqrt(len(df_ic))),
'ret': (df_ret.mean(axis=0), df_ret.std(axis=0) / math.sqrt(len(df_ic))),
'factor': factor}
if __name__ == '__main__':
from dask.distributed import Client
try:
client = Client("10.63.6.176:8786")
cols = pd.MultiIndex.from_product([['mean', 'std'], ['raw', 'peer', 'neutralized']])
factors_ret = pd.DataFrame(columns=cols)
factors_ic = pd.DataFrame(columns=cols)
factors = ['ep_q',
'roe_q',
'SGRO',
'GREV',
'IVR',
'ILLIQUIDITY',
'con_target_price',
'con_pe_rolling_order',
'DividendPaidRatio']
l = client.map(factor_analysis, factors)
results = client.gather(l)
for res in results:
factor = res['factor']
factors_ret.loc[factor, 'mean'] = res['ret'][0].values
factors_ret.loc[factor, 'std'] = res['ret'][1].values
factors_ic.loc[factor, 'mean'] = res['ic'][0].values
factors_ic.loc[factor, 'std'] = res['ic'][1].values
print(factors_ret)
finally:
client.close()
| [
"pandas.MultiIndex.from_product",
"numpy.abs",
"pandas.merge",
"dask.distributed.Client",
"pandas.DataFrame"
] | [((983, 1083), 'pandas.merge', 'pd.merge', (['total_factor', "industry[['trade_date', 'code', 'industry']]"], {'on': "['trade_date', 'code']"}), "(total_factor, industry[['trade_date', 'code', 'industry']], on=[\n 'trade_date', 'code'])\n", (991, 1083), True, 'import pandas as pd\n'), ((1098, 1157), 'pandas.merge', 'pd.merge', (['total_factor', 'risk_exp'], {'on': "['trade_date', 'code']"}), "(total_factor, risk_exp, on=['trade_date', 'code'])\n", (1106, 1157), True, 'import pandas as pd\n'), ((1256, 1296), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['f1', 'f2', 'f3']"}), "(columns=['f1', 'f2', 'f3'])\n", (1268, 1296), True, 'import pandas as pd\n'), ((1309, 1349), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['f1', 'f2', 'f3']"}), "(columns=['f1', 'f2', 'f3'])\n", (1321, 1349), True, 'import pandas as pd\n'), ((2572, 2598), 'dask.distributed.Client', 'Client', (['"""10.63.6.176:8786"""'], {}), "('10.63.6.176:8786')\n", (2578, 2598), False, 'from dask.distributed import Client\n'), ((2614, 2691), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['mean', 'std'], ['raw', 'peer', 'neutralized']]"], {}), "([['mean', 'std'], ['raw', 'peer', 'neutralized']])\n", (2640, 2691), True, 'import pandas as pd\n'), ((2714, 2740), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols'}), '(columns=cols)\n', (2726, 2740), True, 'import pandas as pd\n'), ((2762, 2788), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols'}), '(columns=cols)\n', (2774, 2788), True, 'import pandas as pd\n'), ((1177, 1232), 'pandas.merge', 'pd.merge', (['total_factor', 'rets'], {'on': "['trade_date', 'code']"}), "(total_factor, rets, on=['trade_date', 'code'])\n", (1185, 1232), True, 'import pandas as pd\n'), ((1998, 2015), 'numpy.abs', 'np.abs', (['positions'], {}), '(positions)\n', (2004, 2015), True, 'import numpy as np\n')] |
from physDBD import ConvertMomentsToNMomentsLayer, DeathRxnLayer, \
BirthRxnLayer, EatRxnLayer, ConvertNMomentsTEtoMomentsTE
# Depreciation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import copy
import os
import shutil
import numpy as np
import tensorflow as tf
class Vals:
nv = 3
nh = 2
batch_size = 2
i_death = 0
i_birth = 0
i_predator = 1
i_prey = 0
_mu = np.array([19., 45., 62., 4., 8.])
_cov = np.array([
[30., 67., 107., 10., 9.],
[67., 162., 241., 20., 27.],
[107., 241., 402., 40., 27.],
[10., 20., 40., 5., 0.],
[9., 27., 27., 0., 9.]
])
_ncov = np.array([
[391., 922., 1285., 86., 161.],
[922., 2187., 3031., 200., 387.],
[1285., 3031., 4246., 288., 523.],
[86., 200., 288., 21., 32.],
[161., 387., 523., 32., 73.]
])
_mu_TE = np.array([3.0, 5.0, 2.0, 1.0, 0.8])
_ncov_TE = np.array([
[12.0, 6.0, 3.0, 2.0, 1.0],
[6.0, 18.0, 4.0, 3.0, 1.0],
[3.0, 4.0, 16.0, 2.0, 1.0],
[2.0, 3.0, 2.0, 8.0, 0.5],
[1.0, 1.0, 1.0, 0.5, 6.0]
])
@classmethod
def mu_TE(cls):
return np.tile(cls._mu_TE, (cls.batch_size,1))
@classmethod
def ncov_TE(cls):
return np.tile(cls._ncov_TE, (cls.batch_size,1,1))
@classmethod
def mu(cls):
return np.tile(cls._mu, (cls.batch_size,1))
@classmethod
def cov(cls):
return np.tile(cls._cov, (cls.batch_size,1,1))
@classmethod
def ncov(cls):
return np.tile(cls._ncov, (cls.batch_size,1,1))
@tf.keras.utils.register_keras_serializable(package="physDBD")
class SingleLayerModel(tf.keras.Model):
def __init__(self, lyr, **kwargs):
super(SingleLayerModel, self).__init__(name='')
self.lyr = lyr
def get_config(self):
return {
"lyr": self.lyr
}
@classmethod
def from_config(cls, config):
return cls(**config)
def call(self, input_tensor, training=False):
return self.lyr(input_tensor)
class TestNet:
def assert_equal_dicts(self, x_out, x_out_true):
# Convert x_out_true covh to covh_diag as needed
y_out_true = copy.copy(x_out_true)
for key,val in x_out_true.items():
if key == "covh":
y_out_true["covh_diag"] = np.diag(val)
if "covh" in y_out_true:
del y_out_true["covh"]
for key, val_true in y_out_true.items():
val = x_out[key]
self.assert_equal_arrs(val,val_true)
def assert_equal_arrs(self, x_out, x_out_true):
tol = 1.e-4
assert np.max(abs(x_out-x_out_true)) < tol
def save_load_model(self, lyr, x_in):
# Test save; call the model once to build it first
model = SingleLayerModel(lyr)
x_out = model(x_in)
model.save("saved_models/model", save_traces=False)
# Test load
model_rel = tf.keras.models.load_model("saved_models/model")
print(model_rel)
# Check types match!
# Otherwise we may have: tensorflow.python.keras.saving.saved_model.load.XYZ instead of XYZ
assert type(model_rel) is type(model)
# Remove
if os.path.isdir("saved_models"):
shutil.rmtree("saved_models")
def test_moments_to_nmoments(self):
v = Vals()
lyr = ConvertMomentsToNMomentsLayer()
# Input
x_in = {
"mu": tf.constant(v.mu(), dtype="float32"),
"cov": tf.constant(v.cov(), dtype="float32")
}
# Output
x_out = lyr(x_in)
print(x_out)
x_out_true = {
"mu": np.array([19., 45., 62., 4., 8.]),
"ncov": np.array([
[391., 922., 1285., 86., 161.],
[922., 2187., 3031., 200., 387.],
[1285., 3031., 4246., 288., 523.],
[86., 200., 288., 21., 32.],
[161., 387., 523., 32., 73.]
])
}
self.assert_equal_dicts(x_out,x_out_true)
self.save_load_model(lyr, x_in)
def test_death_rxn(self):
v = Vals()
lyr = DeathRxnLayer(nv=v.nv,nh=v.nh,i_sp=v.i_death)
# Input
x_in = {
"mu": tf.constant(v.mu(), dtype="float32"),
"ncov": tf.constant(v.ncov(), dtype="float32")
}
# Output
x_out = lyr(x_in)
print(x_out)
x_out_true = {
"muTE": np.array([-19., 0., 0., 0., 0.]),
"ncovTE": np.array([
[-763., -922., -1285., -86., -161.],
[-922., 0., 0., 0., 0.],
[-1285., 0., 0., 0., 0.],
[-86., 0., 0., 0., 0.],
[-161., 0., 0., 0., 0.]
])
}
self.assert_equal_dicts(x_out,x_out_true)
self.save_load_model(lyr, x_in)
def test_birth_rxn(self):
v = Vals()
lyr = BirthRxnLayer(nv=v.nv,nh=v.nh,i_sp=v.i_birth)
# Input
x_in = {
"mu": tf.constant(v.mu(), dtype="float32"),
"ncov": tf.constant(v.ncov(), dtype="float32")
}
# Output
x_out = lyr(x_in)
print(x_out)
x_out_true = {
"muTE": np.array([19., 0., 0., 0., 0.]),
"ncovTE": np.array([
[801., 922., 1285., 86., 161.],
[922., 0., 0., 0., 0.],
[1285., 0., 0., 0., 0.],
[86., 0., 0., 0., 0.],
[161., 0., 0., 0., 0.]
])
}
self.assert_equal_dicts(x_out,x_out_true)
self.save_load_model(lyr, x_in)
def test_eat_rxn(self):
v = Vals()
lyr = EatRxnLayer(nv=v.nv,nh=v.nh,i_prey=v.i_prey,i_hunter=v.i_predator)
# Input
x_in = {
"mu": tf.constant(v.mu(), dtype="float32"),
"ncov": tf.constant(v.ncov(), dtype="float32")
}
# Output
x_out = lyr(x_in)
print(x_out)
x_out_true = {
"muTE": np.array([-922., 922., 0., 0., 0.]),
"ncovTE": np.array([
[-39360., -28364., -66558., -4518., -8294.],
[-28364., 96088., 66558., 4518., 8294.],
[-66558., 66558., 0., 0., 0.],
[-4518., 4518., 0., 0., 0.],
[-8294., 8294., 0., 0., 0.]
])
}
self.assert_equal_dicts(x_out,x_out_true)
self.save_load_model(lyr, x_in)
def test_convert_nmomentsTE_to_momentsTE(self):
v = Vals()
lyr = ConvertNMomentsTEtoMomentsTE()
# Input
x_in = {
"mu": tf.constant(v.mu(), dtype="float32"),
"muTE": tf.constant(v.mu_TE(), dtype="float32"),
"ncovTE": tf.constant(v.ncov_TE(), dtype="float32")
}
# Output
x_out = lyr(x_in)
print(x_out)
x_out_true = {
"muTE": np.array([3., 5., 2., 1., 0.8]),
"covTE": np.array([
[-102., -224., -221., -29., -38.2],
[-224., -432., -396., -62., -75.],
[-221., -396., -232., -68., -64.6],
[-29., -62., -68., 0., -10.7],
[-38.2, -75., -64.6, -10.7, -6.8]
])
}
self.assert_equal_dicts(x_out,x_out_true)
self.save_load_model(lyr, x_in) | [
"numpy.tile",
"physDBD.DeathRxnLayer",
"physDBD.BirthRxnLayer",
"numpy.diag",
"tensorflow.keras.utils.register_keras_serializable",
"numpy.array",
"os.path.isdir",
"tensorflow.keras.models.load_model",
"physDBD.EatRxnLayer",
"shutil.rmtree",
"physDBD.ConvertNMomentsTEtoMomentsTE",
"copy.copy",... | [((171, 233), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (194, 233), False, 'import warnings\n'), ((1657, 1718), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""physDBD"""'}), "(package='physDBD')\n", (1699, 1718), True, 'import tensorflow as tf\n'), ((448, 486), 'numpy.array', 'np.array', (['[19.0, 45.0, 62.0, 4.0, 8.0]'], {}), '([19.0, 45.0, 62.0, 4.0, 8.0])\n', (456, 486), True, 'import numpy as np\n'), ((493, 672), 'numpy.array', 'np.array', (['[[30.0, 67.0, 107.0, 10.0, 9.0], [67.0, 162.0, 241.0, 20.0, 27.0], [107.0, \n 241.0, 402.0, 40.0, 27.0], [10.0, 20.0, 40.0, 5.0, 0.0], [9.0, 27.0, \n 27.0, 0.0, 9.0]]'], {}), '([[30.0, 67.0, 107.0, 10.0, 9.0], [67.0, 162.0, 241.0, 20.0, 27.0],\n [107.0, 241.0, 402.0, 40.0, 27.0], [10.0, 20.0, 40.0, 5.0, 0.0], [9.0, \n 27.0, 27.0, 0.0, 9.0]])\n', (501, 672), True, 'import numpy as np\n'), ((705, 910), 'numpy.array', 'np.array', (['[[391.0, 922.0, 1285.0, 86.0, 161.0], [922.0, 2187.0, 3031.0, 200.0, 387.0],\n [1285.0, 3031.0, 4246.0, 288.0, 523.0], [86.0, 200.0, 288.0, 21.0, 32.0\n ], [161.0, 387.0, 523.0, 32.0, 73.0]]'], {}), '([[391.0, 922.0, 1285.0, 86.0, 161.0], [922.0, 2187.0, 3031.0, \n 200.0, 387.0], [1285.0, 3031.0, 4246.0, 288.0, 523.0], [86.0, 200.0, \n 288.0, 21.0, 32.0], [161.0, 387.0, 523.0, 32.0, 73.0]])\n', (713, 910), True, 'import numpy as np\n'), ((942, 977), 'numpy.array', 'np.array', (['[3.0, 5.0, 2.0, 1.0, 0.8]'], {}), '([3.0, 5.0, 2.0, 1.0, 0.8])\n', (950, 977), True, 'import numpy as np\n'), ((993, 1145), 'numpy.array', 'np.array', (['[[12.0, 6.0, 3.0, 2.0, 1.0], [6.0, 18.0, 4.0, 3.0, 1.0], [3.0, 4.0, 16.0, \n 2.0, 1.0], [2.0, 3.0, 2.0, 8.0, 0.5], [1.0, 1.0, 1.0, 0.5, 6.0]]'], {}), '([[12.0, 6.0, 3.0, 2.0, 1.0], [6.0, 18.0, 4.0, 3.0, 1.0], [3.0, 4.0,\n 16.0, 2.0, 1.0], [2.0, 3.0, 2.0, 8.0, 0.5], [1.0, 1.0, 1.0, 0.5, 6.0]])\n', (1001, 1145), True, 'import numpy as np\n'), ((1245, 1285), 'numpy.tile', 'np.tile', (['cls._mu_TE', '(cls.batch_size, 1)'], {}), '(cls._mu_TE, (cls.batch_size, 1))\n', (1252, 1285), True, 'import numpy as np\n'), ((1340, 1385), 'numpy.tile', 'np.tile', (['cls._ncov_TE', '(cls.batch_size, 1, 1)'], {}), '(cls._ncov_TE, (cls.batch_size, 1, 1))\n', (1347, 1385), True, 'import numpy as np\n'), ((1434, 1471), 'numpy.tile', 'np.tile', (['cls._mu', '(cls.batch_size, 1)'], {}), '(cls._mu, (cls.batch_size, 1))\n', (1441, 1471), True, 'import numpy as np\n'), ((1522, 1563), 'numpy.tile', 'np.tile', (['cls._cov', '(cls.batch_size, 1, 1)'], {}), '(cls._cov, (cls.batch_size, 1, 1))\n', (1529, 1563), True, 'import numpy as np\n'), ((1614, 1656), 'numpy.tile', 'np.tile', (['cls._ncov', '(cls.batch_size, 1, 1)'], {}), '(cls._ncov, (cls.batch_size, 1, 1))\n', (1621, 1656), True, 'import numpy as np\n'), ((2282, 2303), 'copy.copy', 'copy.copy', (['x_out_true'], {}), '(x_out_true)\n', (2291, 2303), False, 'import copy\n'), ((3024, 3072), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""saved_models/model"""'], {}), "('saved_models/model')\n", (3050, 3072), True, 'import tensorflow as tf\n'), ((3303, 3332), 'os.path.isdir', 'os.path.isdir', (['"""saved_models"""'], {}), "('saved_models')\n", (3316, 3332), False, 'import os\n'), ((3452, 3483), 'physDBD.ConvertMomentsToNMomentsLayer', 'ConvertMomentsToNMomentsLayer', ([], {}), '()\n', (3481, 3483), False, 'from physDBD import ConvertMomentsToNMomentsLayer, DeathRxnLayer, BirthRxnLayer, EatRxnLayer, ConvertNMomentsTEtoMomentsTE\n'), ((4258, 4305), 'physDBD.DeathRxnLayer', 'DeathRxnLayer', ([], {'nv': 'v.nv', 'nh': 'v.nh', 'i_sp': 'v.i_death'}), '(nv=v.nv, nh=v.nh, i_sp=v.i_death)\n', (4271, 4305), False, 'from physDBD import ConvertMomentsToNMomentsLayer, DeathRxnLayer, BirthRxnLayer, EatRxnLayer, ConvertNMomentsTEtoMomentsTE\n'), ((5058, 5105), 'physDBD.BirthRxnLayer', 'BirthRxnLayer', ([], {'nv': 'v.nv', 'nh': 'v.nh', 'i_sp': 'v.i_birth'}), '(nv=v.nv, nh=v.nh, i_sp=v.i_birth)\n', (5071, 5105), False, 'from physDBD import ConvertMomentsToNMomentsLayer, DeathRxnLayer, BirthRxnLayer, EatRxnLayer, ConvertNMomentsTEtoMomentsTE\n'), ((5847, 5916), 'physDBD.EatRxnLayer', 'EatRxnLayer', ([], {'nv': 'v.nv', 'nh': 'v.nh', 'i_prey': 'v.i_prey', 'i_hunter': 'v.i_predator'}), '(nv=v.nv, nh=v.nh, i_prey=v.i_prey, i_hunter=v.i_predator)\n', (5858, 5916), False, 'from physDBD import ConvertMomentsToNMomentsLayer, DeathRxnLayer, BirthRxnLayer, EatRxnLayer, ConvertNMomentsTEtoMomentsTE\n'), ((6732, 6762), 'physDBD.ConvertNMomentsTEtoMomentsTE', 'ConvertNMomentsTEtoMomentsTE', ([], {}), '()\n', (6760, 6762), False, 'from physDBD import ConvertMomentsToNMomentsLayer, DeathRxnLayer, BirthRxnLayer, EatRxnLayer, ConvertNMomentsTEtoMomentsTE\n'), ((3346, 3375), 'shutil.rmtree', 'shutil.rmtree', (['"""saved_models"""'], {}), "('saved_models')\n", (3359, 3375), False, 'import shutil\n'), ((3768, 3806), 'numpy.array', 'np.array', (['[19.0, 45.0, 62.0, 4.0, 8.0]'], {}), '([19.0, 45.0, 62.0, 4.0, 8.0])\n', (3776, 3806), True, 'import numpy as np\n'), ((3823, 4028), 'numpy.array', 'np.array', (['[[391.0, 922.0, 1285.0, 86.0, 161.0], [922.0, 2187.0, 3031.0, 200.0, 387.0],\n [1285.0, 3031.0, 4246.0, 288.0, 523.0], [86.0, 200.0, 288.0, 21.0, 32.0\n ], [161.0, 387.0, 523.0, 32.0, 73.0]]'], {}), '([[391.0, 922.0, 1285.0, 86.0, 161.0], [922.0, 2187.0, 3031.0, \n 200.0, 387.0], [1285.0, 3031.0, 4246.0, 288.0, 523.0], [86.0, 200.0, \n 288.0, 21.0, 32.0], [161.0, 387.0, 523.0, 32.0, 73.0]])\n', (3831, 4028), True, 'import numpy as np\n'), ((4589, 4626), 'numpy.array', 'np.array', (['[-19.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([-19.0, 0.0, 0.0, 0.0, 0.0])\n', (4597, 4626), True, 'import numpy as np\n'), ((4645, 4826), 'numpy.array', 'np.array', (['[[-763.0, -922.0, -1285.0, -86.0, -161.0], [-922.0, 0.0, 0.0, 0.0, 0.0], [-\n 1285.0, 0.0, 0.0, 0.0, 0.0], [-86.0, 0.0, 0.0, 0.0, 0.0], [-161.0, 0.0,\n 0.0, 0.0, 0.0]]'], {}), '([[-763.0, -922.0, -1285.0, -86.0, -161.0], [-922.0, 0.0, 0.0, 0.0,\n 0.0], [-1285.0, 0.0, 0.0, 0.0, 0.0], [-86.0, 0.0, 0.0, 0.0, 0.0], [-\n 161.0, 0.0, 0.0, 0.0, 0.0]])\n', (4653, 4826), True, 'import numpy as np\n'), ((5389, 5425), 'numpy.array', 'np.array', (['[19.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([19.0, 0.0, 0.0, 0.0, 0.0])\n', (5397, 5425), True, 'import numpy as np\n'), ((5444, 5616), 'numpy.array', 'np.array', (['[[801.0, 922.0, 1285.0, 86.0, 161.0], [922.0, 0.0, 0.0, 0.0, 0.0], [1285.0,\n 0.0, 0.0, 0.0, 0.0], [86.0, 0.0, 0.0, 0.0, 0.0], [161.0, 0.0, 0.0, 0.0,\n 0.0]]'], {}), '([[801.0, 922.0, 1285.0, 86.0, 161.0], [922.0, 0.0, 0.0, 0.0, 0.0],\n [1285.0, 0.0, 0.0, 0.0, 0.0], [86.0, 0.0, 0.0, 0.0, 0.0], [161.0, 0.0, \n 0.0, 0.0, 0.0]])\n', (5452, 5616), True, 'import numpy as np\n'), ((6199, 6239), 'numpy.array', 'np.array', (['[-922.0, 922.0, 0.0, 0.0, 0.0]'], {}), '([-922.0, 922.0, 0.0, 0.0, 0.0])\n', (6207, 6239), True, 'import numpy as np\n'), ((6258, 6477), 'numpy.array', 'np.array', (['[[-39360.0, -28364.0, -66558.0, -4518.0, -8294.0], [-28364.0, 96088.0, \n 66558.0, 4518.0, 8294.0], [-66558.0, 66558.0, 0.0, 0.0, 0.0], [-4518.0,\n 4518.0, 0.0, 0.0, 0.0], [-8294.0, 8294.0, 0.0, 0.0, 0.0]]'], {}), '([[-39360.0, -28364.0, -66558.0, -4518.0, -8294.0], [-28364.0, \n 96088.0, 66558.0, 4518.0, 8294.0], [-66558.0, 66558.0, 0.0, 0.0, 0.0],\n [-4518.0, 4518.0, 0.0, 0.0, 0.0], [-8294.0, 8294.0, 0.0, 0.0, 0.0]])\n', (6266, 6477), True, 'import numpy as np\n'), ((7114, 7149), 'numpy.array', 'np.array', (['[3.0, 5.0, 2.0, 1.0, 0.8]'], {}), '([3.0, 5.0, 2.0, 1.0, 0.8])\n', (7122, 7149), True, 'import numpy as np\n'), ((7168, 7379), 'numpy.array', 'np.array', (['[[-102.0, -224.0, -221.0, -29.0, -38.2], [-224.0, -432.0, -396.0, -62.0, -\n 75.0], [-221.0, -396.0, -232.0, -68.0, -64.6], [-29.0, -62.0, -68.0, \n 0.0, -10.7], [-38.2, -75.0, -64.6, -10.7, -6.8]]'], {}), '([[-102.0, -224.0, -221.0, -29.0, -38.2], [-224.0, -432.0, -396.0, \n -62.0, -75.0], [-221.0, -396.0, -232.0, -68.0, -64.6], [-29.0, -62.0, -\n 68.0, 0.0, -10.7], [-38.2, -75.0, -64.6, -10.7, -6.8]])\n', (7176, 7379), True, 'import numpy as np\n'), ((2419, 2431), 'numpy.diag', 'np.diag', (['val'], {}), '(val)\n', (2426, 2431), True, 'import numpy as np\n')] |
from __future__ import annotations
import numpy as np
import matplotlib.pyplot as plt
class LinearRegressor:
def __init__(self,
random_state=0,
):
self.rng = np.random.default_rng(seed=random_state)
return None
def fit(self, X: np.ndarray, y: np.ndarray) -> LinearRegressor:
self.beta = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
return self
def predict(self, X: np.ndarray) -> np.ndarray:
return np.matmul(X, self.beta) + self.rng.random(X.shape[0])
def test():
total_size, y_size = 20, 2
rng = np.random.default_rng(seed=0)
data = np.linspace(start=[-100] * 50, stop=[100] * 50, num=total_size).T
endog = data[:, -y_size]
error = rng.uniform(low=-1.0, high=1.0, size=(50, total_size - y_size))
exog = data[:, :-y_size] + error
model = LinearRegressor().fit(exog, endog)
y_pred = model.predict(exog)
plt.plot(endog, label='true')
plt.plot(y_pred, label='predict')
plt.show()
if __name__ == '__main__':
test() | [
"numpy.random.default_rng",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.matmul",
"matplotlib.pyplot.show"
] | [((599, 628), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': '(0)'}), '(seed=0)\n', (620, 628), True, 'import numpy as np\n'), ((933, 962), 'matplotlib.pyplot.plot', 'plt.plot', (['endog'], {'label': '"""true"""'}), "(endog, label='true')\n", (941, 962), True, 'import matplotlib.pyplot as plt\n'), ((967, 1000), 'matplotlib.pyplot.plot', 'plt.plot', (['y_pred'], {'label': '"""predict"""'}), "(y_pred, label='predict')\n", (975, 1000), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1015), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1013, 1015), True, 'import matplotlib.pyplot as plt\n'), ((211, 251), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (232, 251), True, 'import numpy as np\n'), ((640, 703), 'numpy.linspace', 'np.linspace', ([], {'start': '([-100] * 50)', 'stop': '([100] * 50)', 'num': 'total_size'}), '(start=[-100] * 50, stop=[100] * 50, num=total_size)\n', (651, 703), True, 'import numpy as np\n'), ((491, 514), 'numpy.matmul', 'np.matmul', (['X', 'self.beta'], {}), '(X, self.beta)\n', (500, 514), True, 'import numpy as np\n')] |
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2 as cv
import numpy as np
def printTime(t1):
t2 = cv.getTickCount()
print((t2-t1)/cv.getCPUTickCount())
return t2
# initialize the camera and grab a reference to the raw camera capture
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3,3))
camera = PiCamera()
camera.resolution = (256, 192)
camera.framerate = 30
camera.shutter_speed = 4000
rawCapture = PiRGBArray(camera, size=(256, 192))
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
img = frame.array[0:112,:,:]#get ROI
t1 = cv.getTickCount()
# change color to gray
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
thresh = cv.inRange(img, 3, 100)
mor = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel, iterations= 1)
mor = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations= 1)
cnts, hier = cv.findContours(mor, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for cnt in cnts:
if cv.contourArea(cnt)>40:
#找到最小区域坐标
rect = cv.minAreaRect(cnt)
box = cv.boxPoints(rect)
box = np.int0(box)
if(rect[0][1] >= rect[1][0]):
pts_o = np.float32(box)
pts_d = np.float32([[0, 64], [0, 0], [48, 0], [48, 64]])
if(rect[0][1] < rect[1][0]):
pts_o = np.float32(box)
pts_d = np.float32([[0, 0], [48, 0], [48, 64], [0, 64]])
M = cv.getPerspectiveTransform(pts_o, pts_d)
#执行透视变换
dst = cv.warpPerspective(mor, M, (48, 64))
cv.imshow("Frame", dst)
#显示输出图像
cv.imshow("b", img)
rawCapture.truncate(0)
if cv.waitKey(1) & 0xFF == ord('q'):
cv.imwrite("dst.jpg",dst)
exit()
| [
"time.sleep",
"cv2.imshow",
"cv2.warpPerspective",
"cv2.getCPUTickCount",
"cv2.contourArea",
"cv2.minAreaRect",
"picamera.array.PiRGBArray",
"cv2.waitKey",
"cv2.boxPoints",
"cv2.getPerspectiveTransform",
"numpy.int0",
"picamera.PiCamera",
"cv2.morphologyEx",
"cv2.cvtColor",
"cv2.imwrite"... | [((332, 379), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(3, 3)'], {}), '(cv.MORPH_RECT, (3, 3))\n', (356, 379), True, 'import cv2 as cv\n'), ((388, 398), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (396, 398), False, 'from picamera import PiCamera\n'), ((493, 528), 'picamera.array.PiRGBArray', 'PiRGBArray', (['camera'], {'size': '(256, 192)'}), '(camera, size=(256, 192))\n', (503, 528), False, 'from picamera.array import PiRGBArray\n'), ((558, 573), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (568, 573), False, 'import time\n'), ((177, 194), 'cv2.getTickCount', 'cv.getTickCount', ([], {}), '()\n', (192, 194), True, 'import cv2 as cv\n'), ((872, 889), 'cv2.getTickCount', 'cv.getTickCount', ([], {}), '()\n', (887, 889), True, 'import cv2 as cv\n'), ((927, 962), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (938, 962), True, 'import cv2 as cv\n'), ((977, 1000), 'cv2.inRange', 'cv.inRange', (['img', '(3)', '(100)'], {}), '(img, 3, 100)\n', (987, 1000), True, 'import cv2 as cv\n'), ((1011, 1072), 'cv2.morphologyEx', 'cv.morphologyEx', (['thresh', 'cv.MORPH_CLOSE', 'kernel'], {'iterations': '(1)'}), '(thresh, cv.MORPH_CLOSE, kernel, iterations=1)\n', (1026, 1072), True, 'import cv2 as cv\n'), ((1084, 1144), 'cv2.morphologyEx', 'cv.morphologyEx', (['thresh', 'cv.MORPH_OPEN', 'kernel'], {'iterations': '(1)'}), '(thresh, cv.MORPH_OPEN, kernel, iterations=1)\n', (1099, 1144), True, 'import cv2 as cv\n'), ((1168, 1230), 'cv2.findContours', 'cv.findContours', (['mor', 'cv.RETR_EXTERNAL', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(mor, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n', (1183, 1230), True, 'import cv2 as cv\n'), ((1976, 1995), 'cv2.imshow', 'cv.imshow', (['"""b"""', 'img'], {}), "('b', img)\n", (1985, 1995), True, 'import cv2 as cv\n'), ((2078, 2104), 'cv2.imwrite', 'cv.imwrite', (['"""dst.jpg"""', 'dst'], {}), "('dst.jpg', dst)\n", (2088, 2104), True, 'import cv2 as cv\n'), ((213, 233), 'cv2.getCPUTickCount', 'cv.getCPUTickCount', ([], {}), '()\n', (231, 233), True, 'import cv2 as cv\n'), ((1268, 1287), 'cv2.contourArea', 'cv.contourArea', (['cnt'], {}), '(cnt)\n', (1282, 1287), True, 'import cv2 as cv\n'), ((1346, 1365), 'cv2.minAreaRect', 'cv.minAreaRect', (['cnt'], {}), '(cnt)\n', (1360, 1365), True, 'import cv2 as cv\n'), ((1384, 1402), 'cv2.boxPoints', 'cv.boxPoints', (['rect'], {}), '(rect)\n', (1396, 1402), True, 'import cv2 as cv\n'), ((1421, 1433), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (1428, 1433), True, 'import numpy as np\n'), ((1786, 1826), 'cv2.getPerspectiveTransform', 'cv.getPerspectiveTransform', (['pts_o', 'pts_d'], {}), '(pts_o, pts_d)\n', (1812, 1826), True, 'import cv2 as cv\n'), ((1865, 1901), 'cv2.warpPerspective', 'cv.warpPerspective', (['mor', 'M', '(48, 64)'], {}), '(mor, M, (48, 64))\n', (1883, 1901), True, 'import cv2 as cv\n'), ((1914, 1937), 'cv2.imshow', 'cv.imshow', (['"""Frame"""', 'dst'], {}), "('Frame', dst)\n", (1923, 1937), True, 'import cv2 as cv\n'), ((2036, 2049), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (2046, 2049), True, 'import cv2 as cv\n'), ((1514, 1529), 'numpy.float32', 'np.float32', (['box'], {}), '(box)\n', (1524, 1529), True, 'import numpy as np\n'), ((1554, 1602), 'numpy.float32', 'np.float32', (['[[0, 64], [0, 0], [48, 0], [48, 64]]'], {}), '([[0, 64], [0, 0], [48, 0], [48, 64]])\n', (1564, 1602), True, 'import numpy as np\n'), ((1668, 1683), 'numpy.float32', 'np.float32', (['box'], {}), '(box)\n', (1678, 1683), True, 'import numpy as np\n'), ((1708, 1756), 'numpy.float32', 'np.float32', (['[[0, 0], [48, 0], [48, 64], [0, 64]]'], {}), '([[0, 0], [48, 0], [48, 64], [0, 64]])\n', (1718, 1756), True, 'import numpy as np\n')] |
import random
from torch.utils.data import DataLoader
import albumentations as A
import numpy as np
from torchdet3d.dataloaders import Objectron
from torchdet3d.utils import ConvertColor, ToTensor, RandomRescale, RandomRotate
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
random.seed(random.getstate()[1][0] + worker_id + 1)
def build_loader(config, mode='train'):
train_transform, test_transform = build_augmentations(cfg=config)
train_dataset = Objectron(config.data.root, mode='train', transform=train_transform,
category_list=config.data.category_list)
train_loader = DataLoader(train_dataset, batch_size=config.data.train_batch_size,
shuffle=True, num_workers=config.data.num_workers,
worker_init_fn=worker_init_fn)
val_dataset = Objectron(config.data.root, mode='val', transform=test_transform,
category_list=config.data.category_list)
val_loader = DataLoader(val_dataset, batch_size=config.data.val_batch_size, shuffle=True,
num_workers=config.data.num_workers,
worker_init_fn=worker_init_fn)
test_dataset = Objectron(config.data.root, mode='test', transform=test_transform,
category_list=config.data.category_list)
test_loader = DataLoader(test_dataset, batch_size=config.data.val_batch_size, shuffle=False,
num_workers=config.data.num_workers,
worker_init_fn=worker_init_fn)
return train_loader, val_loader, test_loader
TRANSFORMS_REGISTRY = {
'convert_color': ConvertColor,
'random_rescale': RandomRescale,
'resize': A.Resize,
'horizontal_flip': A.HorizontalFlip,
'hue_saturation_value': A.HueSaturationValue,
'rgb_shift': A.RGBShift,
'random_brightness_contrast': A.RandomBrightnessContrast,
'color_jitter': A.ColorJitter,
'blur': A.Blur,
'normalize': A.augmentations.transforms.Normalize,
'to_tensor': ToTensor,
'one_of': A.OneOf,
'random_rotate': RandomRotate,
}
def build_transforms_list(transforms_config):
transforms = []
for t, args in transforms_config:
if t == 'one_of':
transforms.append(TRANSFORMS_REGISTRY[t](build_transforms_list(args.transforms), p=args.p))
else:
transforms.append(TRANSFORMS_REGISTRY[t](**args))
return transforms
def build_augmentations(cfg):
train_transform = A.Compose(build_transforms_list(cfg.train_data_pipeline),
keypoint_params=A.KeypointParams(format='xy', remove_invisible=False))
test_transform = A.Compose(build_transforms_list(cfg.test_data_pipeline),
keypoint_params=A.KeypointParams(format='xy', remove_invisible=False))
return train_transform, test_transform
| [
"numpy.random.get_state",
"albumentations.KeypointParams",
"torchdet3d.dataloaders.Objectron",
"random.getstate",
"torch.utils.data.DataLoader"
] | [((510, 623), 'torchdet3d.dataloaders.Objectron', 'Objectron', (['config.data.root'], {'mode': '"""train"""', 'transform': 'train_transform', 'category_list': 'config.data.category_list'}), "(config.data.root, mode='train', transform=train_transform,\n category_list=config.data.category_list)\n", (519, 623), False, 'from torchdet3d.dataloaders import Objectron\n'), ((671, 824), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'config.data.train_batch_size', 'shuffle': '(True)', 'num_workers': 'config.data.num_workers', 'worker_init_fn': 'worker_init_fn'}), '(train_dataset, batch_size=config.data.train_batch_size, shuffle=\n True, num_workers=config.data.num_workers, worker_init_fn=worker_init_fn)\n', (681, 824), False, 'from torch.utils.data import DataLoader\n'), ((903, 1013), 'torchdet3d.dataloaders.Objectron', 'Objectron', (['config.data.root'], {'mode': '"""val"""', 'transform': 'test_transform', 'category_list': 'config.data.category_list'}), "(config.data.root, mode='val', transform=test_transform,\n category_list=config.data.category_list)\n", (912, 1013), False, 'from torchdet3d.dataloaders import Objectron\n'), ((1059, 1207), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'config.data.val_batch_size', 'shuffle': '(True)', 'num_workers': 'config.data.num_workers', 'worker_init_fn': 'worker_init_fn'}), '(val_dataset, batch_size=config.data.val_batch_size, shuffle=True,\n num_workers=config.data.num_workers, worker_init_fn=worker_init_fn)\n', (1069, 1207), False, 'from torch.utils.data import DataLoader\n'), ((1288, 1399), 'torchdet3d.dataloaders.Objectron', 'Objectron', (['config.data.root'], {'mode': '"""test"""', 'transform': 'test_transform', 'category_list': 'config.data.category_list'}), "(config.data.root, mode='test', transform=test_transform,\n category_list=config.data.category_list)\n", (1297, 1399), False, 'from torchdet3d.dataloaders import Objectron\n'), ((1446, 1597), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'config.data.val_batch_size', 'shuffle': '(False)', 'num_workers': 'config.data.num_workers', 'worker_init_fn': 'worker_init_fn'}), '(test_dataset, batch_size=config.data.val_batch_size, shuffle=\n False, num_workers=config.data.num_workers, worker_init_fn=worker_init_fn)\n', (1456, 1597), False, 'from torch.utils.data import DataLoader\n'), ((2755, 2808), 'albumentations.KeypointParams', 'A.KeypointParams', ([], {'format': '"""xy"""', 'remove_invisible': '(False)'}), "(format='xy', remove_invisible=False)\n", (2771, 2808), True, 'import albumentations as A\n'), ((2936, 2989), 'albumentations.KeypointParams', 'A.KeypointParams', ([], {'format': '"""xy"""', 'remove_invisible': '(False)'}), "(format='xy', remove_invisible=False)\n", (2952, 2989), True, 'import albumentations as A\n'), ((279, 300), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (298, 300), True, 'import numpy as np\n'), ((336, 353), 'random.getstate', 'random.getstate', ([], {}), '()\n', (351, 353), False, 'import random\n')] |
import numpy as np
import pytest
from hypothesis import assume, given, settings, strategies as st
from metod_alg import check_metod_class as prev_mt_alg
from metod_alg import objective_functions as mt_obj
from metod_alg import metod_algorithm_functions as mt_alg
def func_params(d=20, p=2, lambda_1=1, lambda_2=10):
"""Generates parameters to use for tests."""
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
store_x0, matrix_test = (mt_obj.function_parameters_several_quad
(p, d, lambda_1, lambda_2))
func_args = p, store_x0, matrix_test
return f, g, func_args
def test_1():
"""Asserts error message when num_points is not integer."""
d = 20
f, g, func_args = func_params()
num_points_t = 0.01
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, num_points=num_points_t)
def test_2():
"""Asserts error message when d is not integer."""
d = 0.01
p = 10
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
func_args = (p, np.random.uniform(0, 1, (p, )),
np.random.uniform(0, 1, (p, 10, 10)))
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d)
def test_3():
"""Asserts error message when beta is not integer or float."""
d = 20
f, g, func_args = func_params()
beta_t = True
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, beta=beta_t)
def test_4():
"""Asserts error message when tolerance is not float."""
d = 20
f, g, func_args = func_params()
tolerance_t = True
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, tolerance=tolerance_t)
def test_5():
"""Asserts error message when projection is not boolean."""
d = 20
f, g, func_args = func_params()
projection_t = 0.01
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, projection=projection_t)
def test_6():
"""Asserts error message when const is not integer or float."""
d = 20
f, g, func_args = func_params()
const_t = 'test'
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, const=const_t)
def test_7():
"""Asserts error message when m is not integer."""
d = 20
f, g, func_args = func_params()
m_t = 0.9
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, m=m_t)
def test_8():
"""Asserts error message when option is not a string."""
d = 20
f, g, func_args = func_params()
option_t = True
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, option=option_t)
def test_9():
"""Asserts error message when met is not a string."""
d = 20
f, g, func_args = func_params()
met_t = 0.1
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, met=met_t)
def test_10():
"""Asserts error message when initial_guess is not a integer or float."""
d = 20
f, g, func_args = func_params()
initial_guess_t = '213'
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
initial_guess=initial_guess_t)
def test_11():
"""Asserts error message when d < 2."""
d = 1
p = 10
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
func_args = (p, np.random.uniform(0, 1, (p, )),
np.random.uniform(0, 1, (p, 10, 10)))
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d)
def test_12():
"""Asserts error message when m < 1."""
d = 20
f, g, func_args = func_params()
m_t = 0
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d, m=m_t)
def test_13():
"""
Asserts error message when bounds_set_x does not contain an integer or
float.
"""
d = 20
f, g, func_args = func_params()
bounds_set_x_t = (True, 1)
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
bounds_set_x=bounds_set_x_t)
def test_14():
"""
Asserts error message when bounds_set_x does not contain an integer or
float.
"""
d = 20
f, g, func_args = func_params()
bounds_set_x_t = (0, 'False')
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
bounds_set_x=bounds_set_x_t)
def test_15():
"""Asserts warning message when beta >= 1."""
d = 20
f, g, func_args = func_params()
beta_t = 1
with pytest.warns(RuntimeWarning):
prev_mt_alg.metod_class(f, g, func_args, d, beta=beta_t)
def test_16():
"""Asserts warning message when tolerance > 0.1."""
d = 20
f, g, func_args = func_params()
tolerance_t = 0.2
with pytest.warns(RuntimeWarning):
prev_mt_alg.metod_class(f, g, func_args, d, tolerance=tolerance_t)
def test_17():
"""
Asserts error message when number of iterations is less than m.
"""
np.random.seed(90)
d = 2
p = 2
lambda_1 = 1
lambda_2 = 3
tolerance_t = 0.1
m_t = 6
f, g, func_args = func_params(d, p, lambda_1, lambda_2)
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
tolerance=tolerance_t, m=m_t)
def test_18():
"""Asserts error message when len(bounds_set_x) > 2."""
d = 20
f, g, func_args = func_params()
bounds_set_x_t = (0, 1, 2)
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
bounds_set_x=bounds_set_x_t)
def test_19():
"""
Asserts error message when relax_sd_it is not
integer or float.
"""
d = 20
f, g, func_args = func_params()
relax_sd_it_t = 'Test'
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
relax_sd_it=relax_sd_it_t)
def test_20():
"""Asserts error message when relax_sd_it is less than zero."""
d = 20
f, g, func_args = func_params()
relax_sd_it_t = -0.1
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
relax_sd_it=relax_sd_it_t)
def test_21():
"""Asserts error message when set_x is not a valid choice."""
d = 20
set_x_t = 'random_unif'
f, g, func_args = func_params()
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
set_x=set_x_t)
def test_22():
"""Asserts error message when set_x is not a string."""
num_points = 1000
d = 20
set_x_t = np.random.uniform(0, 1, (num_points, d))
f, g, func_args = func_params()
with pytest.raises(ValueError):
prev_mt_alg.metod_class(f, g, func_args, d,
set_x=set_x_t)
@settings(max_examples=10, deadline=None)
@given(st.integers(2, 20), st.integers(0, 3), st.integers(2, 100))
def test_23(p, m, d):
"""
Test m is being applied correctly in metod_class.py when computing
distances.
"""
np.random.seed(p)
x = np.random.uniform(0, 1, (d, ))
tolerance = 0.00001
projection = False
option = 'minimize_scalar'
met = 'Brent'
initial_guess = 0.005
beta = 0.095
matrix_test = np.zeros((p, d, d))
store_x0 = np.random.uniform(0, 1, (p, d))
diag_vals = np.zeros(d)
diag_vals[:2] = np.array([1, 10])
diag_vals[2:] = np.random.uniform(2, 9, (d - 2))
matrix_test[0] = np.diag(diag_vals)
diag_vals = np.zeros(d)
diag_vals[:2] = np.array([1, 10])
diag_vals[2:] = np.random.uniform(2, 9, (d - 2))
matrix_test[1] = np.diag(diag_vals)
func_args = p, store_x0, matrix_test
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
usage = 'metod_algorithm'
relax_sd_it = 1
bound_1 = 0
bound_2 = 1
(iterations_of_sd,
its,
store_grad) = (mt_alg.apply_sd_until_stopping_criteria
(x, d, projection, tolerance, option, met,
initial_guess, func_args, f, g, bound_1,
bound_2, usage, relax_sd_it, None))
"""METOD algorithm checks the below"""
assume(its > m)
sd_iterations_partner_points = (mt_alg.partner_point_each_sd
(iterations_of_sd, beta,
store_grad))
test_x = np.random.uniform(0, 1, (d, ))
original_shape = iterations_of_sd.shape[0]
"""Checking correct warm up applied when checking distances"""
set_dist = mt_alg.distances(iterations_of_sd, test_x, m, d, 'All')
assert(set_dist.shape == (original_shape - m,))
assert(set_dist.shape == (its + 1 - m,))
assert(sd_iterations_partner_points.shape[0] == iterations_of_sd.shape[0])
@settings(max_examples=10, deadline=None)
@given(st.integers(2, 20), st.integers(5, 100), st.integers(50, 1000))
def test_24(p, d, num_points_t):
"""
Check ouputs of algorithm with minimum of several Quadratic forms
function and gradient.
"""
np.random.seed(p)
lambda_1 = 1
lambda_2 = 10
store_x0, matrix_test = (mt_obj.function_parameters_several_quad
(p, d, lambda_1, lambda_2))
func_args = p, store_x0, matrix_test
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
(discovered_minimizers,
number_minimizers,
func_vals_of_minimizers,
excessive_descents,
starting_points,
no_grad_evals,
classification_point,
count_gr_2, missed_minimizers,
total_checks) = prev_mt_alg.metod_class(f, g, func_args, d,
num_points=num_points_t)
"""Check outputs are as expected"""
assert(len(discovered_minimizers) == number_minimizers)
assert(number_minimizers == len(func_vals_of_minimizers))
assert(np.unique(classification_point).shape[0] == number_minimizers)
"""Ensure that each region of attraction discovered is unique"""
mt_obj.check_unique_minimizers(discovered_minimizers, number_minimizers,
mt_obj.calc_minimizer_sev_quad, func_args)
assert(no_grad_evals[0] > 4)
assert(count_gr_2 >= 0)
assert(missed_minimizers >= 0)
assert(total_checks>= 0)
assert(np.where(no_grad_evals > 4)[0].shape[0] == excessive_descents
+ number_minimizers)
"""Ensure that starting points used are of correct form"""
assert(np.array(starting_points).shape == (num_points_t, d))
assert(excessive_descents == 0)
for j in range(num_points_t):
for i in range(j+1, num_points_t):
assert(np.any(np.round(starting_points[j], 5) !=
np.round(starting_points[i], 5)))
@settings(max_examples=10, deadline=None)
@given(st.integers(2, 20), st.integers(5, 100), st.integers(50, 1000))
def test_25(p, d, num_points_t):
"""
Check ouputs of algorithm with minimum of several Quadratic forms
function and gradient with set_x = 'random'.
"""
np.random.seed(p)
lambda_1 = 1
lambda_2 = 10
set_x_t = 'random'
store_x0, matrix_test = (mt_obj.function_parameters_several_quad
(p, d, lambda_1, lambda_2))
func_args = p, store_x0, matrix_test
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
(discovered_minimizers,
number_minimizers,
func_vals_of_minimizers,
excessive_descents,
starting_points,
no_grad_evals,
classification_point,
count_gr_2, missed_minimizers,
total_checks) = prev_mt_alg.metod_class(f, g, func_args, d,
num_points=num_points_t,
set_x=set_x_t)
"""Check outputs are as expected"""
assert(len(discovered_minimizers) == number_minimizers)
assert(number_minimizers == len(func_vals_of_minimizers))
assert(np.unique(classification_point).shape[0] == number_minimizers)
assert(no_grad_evals[0] > 4)
assert(count_gr_2 >= 0)
assert(missed_minimizers >= 0)
assert(total_checks>= 0)
assert(np.where(no_grad_evals > 4)[0].shape[0] == excessive_descents
+ number_minimizers)
"""Ensure that each region of attraction discovered is unique"""
mt_obj.check_unique_minimizers(discovered_minimizers, number_minimizers,
mt_obj.calc_minimizer_sev_quad, func_args)
"""Ensure that starting points used are of correct form"""
assert(np.array(starting_points).shape == (num_points_t, d))
assert(excessive_descents == 0)
for j in range(num_points_t):
for i in range(j+1, num_points_t):
assert(np.any(np.round(starting_points[j], 5) !=
np.round(starting_points[i], 5)))
def test_26():
"""
Checks ouputs of algorithm with Sum of Gaussians function and
gradient
"""
np.random.seed(15)
d = 20
p = 10
sigma_sq = 0.8
lambda_1 = 1
lambda_2 = 10
matrix_test = np.zeros((p, d, d))
store_x0, matrix_test, store_c = (mt_obj.function_parameters_sog
(p, d, lambda_1, lambda_2))
func_args = p, sigma_sq, store_x0, matrix_test, store_c
f = mt_obj.sog_function
g = mt_obj.sog_gradient
(discovered_minimizers,
number_minimizers,
func_vals_of_minimizers,
excessive_descents,
starting_points,
no_grad_evals,
classification_point,
count_gr_2, missed_minimizers,
total_checks) = prev_mt_alg.metod_class(f, g, func_args, d)
"""Check outputs are as expected"""
assert(len(discovered_minimizers) == number_minimizers)
assert(number_minimizers == len(func_vals_of_minimizers))
assert(np.unique(classification_point).shape[0] == number_minimizers)
assert(no_grad_evals[0] > 4)
assert(count_gr_2 >= 0)
assert(missed_minimizers >= 0)
assert(total_checks>= 0)
assert(np.where(no_grad_evals > 4)[0].shape[0] == excessive_descents
+ number_minimizers)
"""Ensure that each region of attraction discovered is unique"""
mt_obj.check_unique_minimizers(discovered_minimizers, number_minimizers,
mt_obj.calc_minimizer_sog, func_args)
"""Ensure that starting points used are of correct form"""
assert(np.array(starting_points).shape == (1000, d))
assert(excessive_descents >= 0)
for j in range(len(starting_points)):
for i in range(j+1, len(starting_points)):
assert(np.any(np.round(starting_points[j], 5) !=
np.round(starting_points[i], 5)))
| [
"metod_alg.metod_algorithm_functions.partner_point_each_sd",
"numpy.array",
"metod_alg.objective_functions.function_parameters_several_quad",
"numpy.where",
"numpy.random.seed",
"hypothesis.settings",
"metod_alg.metod_algorithm_functions.apply_sd_until_stopping_criteria",
"numpy.round",
"hypothesis.... | [((7061, 7101), 'hypothesis.settings', 'settings', ([], {'max_examples': '(10)', 'deadline': 'None'}), '(max_examples=10, deadline=None)\n', (7069, 7101), False, 'from hypothesis import assume, given, settings, strategies as st\n'), ((9015, 9055), 'hypothesis.settings', 'settings', ([], {'max_examples': '(10)', 'deadline': 'None'}), '(max_examples=10, deadline=None)\n', (9023, 9055), False, 'from hypothesis import assume, given, settings, strategies as st\n'), ((10966, 11006), 'hypothesis.settings', 'settings', ([], {'max_examples': '(10)', 'deadline': 'None'}), '(max_examples=10, deadline=None)\n', (10974, 11006), False, 'from hypothesis import assume, given, settings, strategies as st\n'), ((471, 536), 'metod_alg.objective_functions.function_parameters_several_quad', 'mt_obj.function_parameters_several_quad', (['p', 'd', 'lambda_1', 'lambda_2'], {}), '(p, d, lambda_1, lambda_2)\n', (510, 536), True, 'from metod_alg import objective_functions as mt_obj\n'), ((5178, 5196), 'numpy.random.seed', 'np.random.seed', (['(90)'], {}), '(90)\n', (5192, 5196), True, 'import numpy as np\n'), ((6846, 6886), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(num_points, d)'], {}), '(0, 1, (num_points, d))\n', (6863, 6886), True, 'import numpy as np\n'), ((7297, 7314), 'numpy.random.seed', 'np.random.seed', (['p'], {}), '(p)\n', (7311, 7314), True, 'import numpy as np\n'), ((7323, 7352), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(d,)'], {}), '(0, 1, (d,))\n', (7340, 7352), True, 'import numpy as np\n'), ((7511, 7530), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (7519, 7530), True, 'import numpy as np\n'), ((7546, 7577), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(p, d)'], {}), '(0, 1, (p, d))\n', (7563, 7577), True, 'import numpy as np\n'), ((7594, 7605), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (7602, 7605), True, 'import numpy as np\n'), ((7626, 7643), 'numpy.array', 'np.array', (['[1, 10]'], {}), '([1, 10])\n', (7634, 7643), True, 'import numpy as np\n'), ((7664, 7694), 'numpy.random.uniform', 'np.random.uniform', (['(2)', '(9)', '(d - 2)'], {}), '(2, 9, d - 2)\n', (7681, 7694), True, 'import numpy as np\n'), ((7718, 7736), 'numpy.diag', 'np.diag', (['diag_vals'], {}), '(diag_vals)\n', (7725, 7736), True, 'import numpy as np\n'), ((7753, 7764), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (7761, 7764), True, 'import numpy as np\n'), ((7785, 7802), 'numpy.array', 'np.array', (['[1, 10]'], {}), '([1, 10])\n', (7793, 7802), True, 'import numpy as np\n'), ((7823, 7853), 'numpy.random.uniform', 'np.random.uniform', (['(2)', '(9)', '(d - 2)'], {}), '(2, 9, d - 2)\n', (7840, 7853), True, 'import numpy as np\n'), ((7877, 7895), 'numpy.diag', 'np.diag', (['diag_vals'], {}), '(diag_vals)\n', (7884, 7895), True, 'import numpy as np\n'), ((8146, 8311), 'metod_alg.metod_algorithm_functions.apply_sd_until_stopping_criteria', 'mt_alg.apply_sd_until_stopping_criteria', (['x', 'd', 'projection', 'tolerance', 'option', 'met', 'initial_guess', 'func_args', 'f', 'g', 'bound_1', 'bound_2', 'usage', 'relax_sd_it', 'None'], {}), '(x, d, projection, tolerance, option,\n met, initial_guess, func_args, f, g, bound_1, bound_2, usage,\n relax_sd_it, None)\n', (8185, 8311), True, 'from metod_alg import metod_algorithm_functions as mt_alg\n'), ((8415, 8430), 'hypothesis.assume', 'assume', (['(its > m)'], {}), '(its > m)\n', (8421, 8430), False, 'from hypothesis import assume, given, settings, strategies as st\n'), ((8467, 8531), 'metod_alg.metod_algorithm_functions.partner_point_each_sd', 'mt_alg.partner_point_each_sd', (['iterations_of_sd', 'beta', 'store_grad'], {}), '(iterations_of_sd, beta, store_grad)\n', (8495, 8531), True, 'from metod_alg import metod_algorithm_functions as mt_alg\n'), ((8620, 8649), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(d,)'], {}), '(0, 1, (d,))\n', (8637, 8649), True, 'import numpy as np\n'), ((8780, 8835), 'metod_alg.metod_algorithm_functions.distances', 'mt_alg.distances', (['iterations_of_sd', 'test_x', 'm', 'd', '"""All"""'], {}), "(iterations_of_sd, test_x, m, d, 'All')\n", (8796, 8835), True, 'from metod_alg import metod_algorithm_functions as mt_alg\n'), ((7109, 7127), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(20)'], {}), '(2, 20)\n', (7120, 7127), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((7129, 7146), 'hypothesis.strategies.integers', 'st.integers', (['(0)', '(3)'], {}), '(0, 3)\n', (7140, 7146), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((7148, 7167), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(100)'], {}), '(2, 100)\n', (7159, 7167), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((9277, 9294), 'numpy.random.seed', 'np.random.seed', (['p'], {}), '(p)\n', (9291, 9294), True, 'import numpy as np\n'), ((9359, 9424), 'metod_alg.objective_functions.function_parameters_several_quad', 'mt_obj.function_parameters_several_quad', (['p', 'd', 'lambda_1', 'lambda_2'], {}), '(p, d, lambda_1, lambda_2)\n', (9398, 9424), True, 'from metod_alg import objective_functions as mt_obj\n'), ((9804, 9872), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'num_points': 'num_points_t'}), '(f, g, func_args, d, num_points=num_points_t)\n', (9827, 9872), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((10227, 10346), 'metod_alg.objective_functions.check_unique_minimizers', 'mt_obj.check_unique_minimizers', (['discovered_minimizers', 'number_minimizers', 'mt_obj.calc_minimizer_sev_quad', 'func_args'], {}), '(discovered_minimizers, number_minimizers,\n mt_obj.calc_minimizer_sev_quad, func_args)\n', (10257, 10346), True, 'from metod_alg import objective_functions as mt_obj\n'), ((9063, 9081), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(20)'], {}), '(2, 20)\n', (9074, 9081), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((9083, 9102), 'hypothesis.strategies.integers', 'st.integers', (['(5)', '(100)'], {}), '(5, 100)\n', (9094, 9102), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((9104, 9125), 'hypothesis.strategies.integers', 'st.integers', (['(50)', '(1000)'], {}), '(50, 1000)\n', (9115, 9125), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((11250, 11267), 'numpy.random.seed', 'np.random.seed', (['p'], {}), '(p)\n', (11264, 11267), True, 'import numpy as np\n'), ((11355, 11420), 'metod_alg.objective_functions.function_parameters_several_quad', 'mt_obj.function_parameters_several_quad', (['p', 'd', 'lambda_1', 'lambda_2'], {}), '(p, d, lambda_1, lambda_2)\n', (11394, 11420), True, 'from metod_alg import objective_functions as mt_obj\n'), ((11800, 11888), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'num_points': 'num_points_t', 'set_x': 'set_x_t'}), '(f, g, func_args, d, num_points=num_points_t, set_x=\n set_x_t)\n', (11823, 11888), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((12509, 12628), 'metod_alg.objective_functions.check_unique_minimizers', 'mt_obj.check_unique_minimizers', (['discovered_minimizers', 'number_minimizers', 'mt_obj.calc_minimizer_sev_quad', 'func_args'], {}), '(discovered_minimizers, number_minimizers,\n mt_obj.calc_minimizer_sev_quad, func_args)\n', (12539, 12628), True, 'from metod_alg import objective_functions as mt_obj\n'), ((11014, 11032), 'hypothesis.strategies.integers', 'st.integers', (['(2)', '(20)'], {}), '(2, 20)\n', (11025, 11032), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((11034, 11053), 'hypothesis.strategies.integers', 'st.integers', (['(5)', '(100)'], {}), '(5, 100)\n', (11045, 11053), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((11055, 11076), 'hypothesis.strategies.integers', 'st.integers', (['(50)', '(1000)'], {}), '(50, 1000)\n', (11066, 11076), True, 'from hypothesis import assume, given, settings, strategies as st\n'), ((13132, 13150), 'numpy.random.seed', 'np.random.seed', (['(15)'], {}), '(15)\n', (13146, 13150), True, 'import numpy as np\n'), ((13245, 13264), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (13253, 13264), True, 'import numpy as np\n'), ((13303, 13359), 'metod_alg.objective_functions.function_parameters_sog', 'mt_obj.function_parameters_sog', (['p', 'd', 'lambda_1', 'lambda_2'], {}), '(p, d, lambda_1, lambda_2)\n', (13333, 13359), True, 'from metod_alg import objective_functions as mt_obj\n'), ((13749, 13792), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {}), '(f, g, func_args, d)\n', (13772, 13792), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((14332, 14446), 'metod_alg.objective_functions.check_unique_minimizers', 'mt_obj.check_unique_minimizers', (['discovered_minimizers', 'number_minimizers', 'mt_obj.calc_minimizer_sog', 'func_args'], {}), '(discovered_minimizers, number_minimizers,\n mt_obj.calc_minimizer_sog, func_args)\n', (14362, 14446), True, 'from metod_alg import objective_functions as mt_obj\n'), ((796, 821), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (809, 821), False, 'import pytest\n'), ((831, 899), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'num_points': 'num_points_t'}), '(f, g, func_args, d, num_points=num_points_t)\n', (854, 899), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((1089, 1118), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(p,)'], {}), '(0, 1, (p,))\n', (1106, 1118), True, 'import numpy as np\n'), ((1138, 1174), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(p, 10, 10)'], {}), '(0, 1, (p, 10, 10))\n', (1155, 1174), True, 'import numpy as np\n'), ((1185, 1210), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1198, 1210), False, 'import pytest\n'), ((1220, 1263), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {}), '(f, g, func_args, d)\n', (1243, 1263), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((1421, 1446), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1434, 1446), False, 'import pytest\n'), ((1456, 1512), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'beta': 'beta_t'}), '(f, g, func_args, d, beta=beta_t)\n', (1479, 1512), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((1669, 1694), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1682, 1694), False, 'import pytest\n'), ((1704, 1770), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'tolerance': 'tolerance_t'}), '(f, g, func_args, d, tolerance=tolerance_t)\n', (1727, 1770), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((1931, 1956), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1944, 1956), False, 'import pytest\n'), ((1966, 2034), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'projection': 'projection_t'}), '(f, g, func_args, d, projection=projection_t)\n', (1989, 2034), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((2196, 2221), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2209, 2221), False, 'import pytest\n'), ((2231, 2289), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'const': 'const_t'}), '(f, g, func_args, d, const=const_t)\n', (2254, 2289), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((2431, 2456), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2444, 2456), False, 'import pytest\n'), ((2466, 2516), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'m': 'm_t'}), '(f, g, func_args, d, m=m_t)\n', (2489, 2516), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((2670, 2695), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2683, 2695), False, 'import pytest\n'), ((2705, 2765), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'option': 'option_t'}), '(f, g, func_args, d, option=option_t)\n', (2728, 2765), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((2912, 2937), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2925, 2937), False, 'import pytest\n'), ((2947, 3001), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'met': 'met_t'}), '(f, g, func_args, d, met=met_t)\n', (2970, 3001), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((3181, 3206), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3194, 3206), False, 'import pytest\n'), ((3216, 3290), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'initial_guess': 'initial_guess_t'}), '(f, g, func_args, d, initial_guess=initial_guess_t)\n', (3239, 3290), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((3499, 3528), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(p,)'], {}), '(0, 1, (p,))\n', (3516, 3528), True, 'import numpy as np\n'), ((3548, 3584), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(p, 10, 10)'], {}), '(0, 1, (p, 10, 10))\n', (3565, 3584), True, 'import numpy as np\n'), ((3595, 3620), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3608, 3620), False, 'import pytest\n'), ((3630, 3673), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {}), '(f, g, func_args, d)\n', (3653, 3673), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((3803, 3828), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3816, 3828), False, 'import pytest\n'), ((3838, 3888), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'m': 'm_t'}), '(f, g, func_args, d, m=m_t)\n', (3861, 3888), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((4095, 4120), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4108, 4120), False, 'import pytest\n'), ((4130, 4202), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'bounds_set_x': 'bounds_set_x_t'}), '(f, g, func_args, d, bounds_set_x=bounds_set_x_t)\n', (4153, 4202), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((4444, 4469), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4457, 4469), False, 'import pytest\n'), ((4479, 4551), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'bounds_set_x': 'bounds_set_x_t'}), '(f, g, func_args, d, bounds_set_x=bounds_set_x_t)\n', (4502, 4551), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((4722, 4750), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (4734, 4750), False, 'import pytest\n'), ((4760, 4816), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'beta': 'beta_t'}), '(f, g, func_args, d, beta=beta_t)\n', (4783, 4816), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((4968, 4996), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (4980, 4996), False, 'import pytest\n'), ((5006, 5072), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'tolerance': 'tolerance_t'}), '(f, g, func_args, d, tolerance=tolerance_t)\n', (5029, 5072), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((5354, 5379), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5367, 5379), False, 'import pytest\n'), ((5389, 5462), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'tolerance': 'tolerance_t', 'm': 'm_t'}), '(f, g, func_args, d, tolerance=tolerance_t, m=m_t)\n', (5412, 5462), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((5659, 5684), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5672, 5684), False, 'import pytest\n'), ((5694, 5766), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'bounds_set_x': 'bounds_set_x_t'}), '(f, g, func_args, d, bounds_set_x=bounds_set_x_t)\n', (5717, 5766), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((5987, 6012), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6000, 6012), False, 'import pytest\n'), ((6022, 6092), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'relax_sd_it': 'relax_sd_it_t'}), '(f, g, func_args, d, relax_sd_it=relax_sd_it_t)\n', (6045, 6092), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((6291, 6316), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6304, 6316), False, 'import pytest\n'), ((6326, 6396), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'relax_sd_it': 'relax_sd_it_t'}), '(f, g, func_args, d, relax_sd_it=relax_sd_it_t)\n', (6349, 6396), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((6596, 6621), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6609, 6621), False, 'import pytest\n'), ((6631, 6689), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'set_x': 'set_x_t'}), '(f, g, func_args, d, set_x=set_x_t)\n', (6654, 6689), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((6932, 6957), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6945, 6957), False, 'import pytest\n'), ((6967, 7025), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd'], {'set_x': 'set_x_t'}), '(f, g, func_args, d, set_x=set_x_t)\n', (6990, 7025), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((10682, 10707), 'numpy.array', 'np.array', (['starting_points'], {}), '(starting_points)\n', (10690, 10707), True, 'import numpy as np\n'), ((12735, 12760), 'numpy.array', 'np.array', (['starting_points'], {}), '(starting_points)\n', (12743, 12760), True, 'import numpy as np\n'), ((14553, 14578), 'numpy.array', 'np.array', (['starting_points'], {}), '(starting_points)\n', (14561, 14578), True, 'import numpy as np\n'), ((10091, 10122), 'numpy.unique', 'np.unique', (['classification_point'], {}), '(classification_point)\n', (10100, 10122), True, 'import numpy as np\n'), ((12143, 12174), 'numpy.unique', 'np.unique', (['classification_point'], {}), '(classification_point)\n', (12152, 12174), True, 'import numpy as np\n'), ((13966, 13997), 'numpy.unique', 'np.unique', (['classification_point'], {}), '(classification_point)\n', (13975, 13997), True, 'import numpy as np\n'), ((10514, 10541), 'numpy.where', 'np.where', (['(no_grad_evals > 4)'], {}), '(no_grad_evals > 4)\n', (10522, 10541), True, 'import numpy as np\n'), ((10875, 10906), 'numpy.round', 'np.round', (['starting_points[j]', '(5)'], {}), '(starting_points[j], 5)\n', (10883, 10906), True, 'import numpy as np\n'), ((10929, 10960), 'numpy.round', 'np.round', (['starting_points[i]', '(5)'], {}), '(starting_points[i], 5)\n', (10937, 10960), True, 'import numpy as np\n'), ((12342, 12369), 'numpy.where', 'np.where', (['(no_grad_evals > 4)'], {}), '(no_grad_evals > 4)\n', (12350, 12369), True, 'import numpy as np\n'), ((12928, 12959), 'numpy.round', 'np.round', (['starting_points[j]', '(5)'], {}), '(starting_points[j], 5)\n', (12936, 12959), True, 'import numpy as np\n'), ((12982, 13013), 'numpy.round', 'np.round', (['starting_points[i]', '(5)'], {}), '(starting_points[i], 5)\n', (12990, 13013), True, 'import numpy as np\n'), ((14165, 14192), 'numpy.where', 'np.where', (['(no_grad_evals > 4)'], {}), '(no_grad_evals > 4)\n', (14173, 14192), True, 'import numpy as np\n'), ((14754, 14785), 'numpy.round', 'np.round', (['starting_points[j]', '(5)'], {}), '(starting_points[j], 5)\n', (14762, 14785), True, 'import numpy as np\n'), ((14808, 14839), 'numpy.round', 'np.round', (['starting_points[i]', '(5)'], {}), '(starting_points[i], 5)\n', (14816, 14839), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 10:54:50 2020
@author: Simon
"""
import numpy as np
from sleep import SleepSet
import config as cfg
import features
import pandas as pd
import dateparser
from datetime import datetime
from scipy.ndimage.morphology import binary_dilation
def block_length(data, seconds):
data = np.repeat(data, 30)
seconds -= 15 # substract window middle
if seconds>0:
data = binary_dilation(data, structure=[True,True,True],
iterations=seconds)
data = data.reshape([-1,30]).max(1)
return data
if __name__=='__main__':
ss = SleepSet(cfg.folder_unisens).stratify()
header = ['Code',
'Epochs',
'epochs artefact',
'Uli gain',
'0-corr 30 sec',
'1-corr 30 sec',
'2-corr 30 sec',
'3-corr 30 sec',
'300 sec loss',
'0-corr 300 sec win',
'1-corr 300 sec win',
'2-corr 300 sec win',
'3-corr 300 sec win',
'% loss 300 sec',
'% loss after 0-corr 300 sec',
'% loss after 1-corr 300 sec',
'% loss after 2-corr 300 sec',
'% loss after 3-corr 300 sec']
table = pd.DataFrame(columns = header)
for p in ss:
art = p.get_artefacts(only_sleeptime=True)
kubios = p.feats.get_data()
starttime = datetime.strptime(p.timestampStart, '%Y-%m-%dT%H:%M:%S')
startsec = (starttime.hour * 60 + starttime.minute) * 60 + starttime.second
p.get_hypno()
epoch_sonset = p.sleep_onset//30
epoch_soffset = p.sleep_offset//30
RRi = kubios['Data']['RR']
T_RR = kubios['Data']['T_RR'] - startsec
RR = np.diff(T_RR)
t_interpolated = T_RR[:-1][RR!=RRi]
corr = features.extract_RR_windows(t_interpolated, RR[RR!=RRi], wsize=30, pad=True, expected_nwin=len(art))[epoch_sonset:epoch_soffset]
if art.mean()==0:
art = np.array([len(c)>0 for c in corr])
continue
corr = [len(c) for c in corr]
uli_gain = np.array([a==False if c>1 else False for c, a in zip(corr,art)])
corr = np.array([c if a else -1 for c, a in zip(corr,art)])
if len(corr)!=len(art):print(f'Art!=Corr for {p.code}, {len(corr)}!={len(art)}')
corr_all = corr>=0
corr_0 = corr==0
corr_1 = corr==1
corr_2 = corr==2
corr_3 = corr==3
corr_all_300 = block_length(corr_all, 300)
corr_0_300 = block_length(corr>0, 300)
corr_1_300 = block_length(corr>1, 300)
corr_2_300 = block_length(corr>2, 300)
corr_3_300 = block_length(corr>3, 300)
# row = {head:None for head in header}
row = {}
row['Code'] = p.code
row['Epochs'] = len(art)
row['epochs artefact'] = np.sum(corr_all)
row['Uli gain'] = np.sum(uli_gain)
row['0-corr 30 sec'] = np.sum(corr_0)
row['1-corr 30 sec'] = np.sum(corr_1)
row['2-corr 30 sec'] = np.sum(corr_2)
row['3-corr 30 sec'] = np.sum(corr_3)
row['300 sec loss'] = np.sum(corr_all_300)
row['0-corr 300 sec win'] = sum(corr_all_300) - np.sum(corr_0_300)
row['1-corr 300 sec win'] = np.sum(corr_0_300) - np.sum(corr_1_300)
row['2-corr 300 sec win'] = np.sum(corr_1_300) - np.sum(corr_2_300)
row['3-corr 300 sec win'] = np.sum(corr_1_300) - np.sum(corr_3_300)
row['% loss 300 sec'] = f'{np.mean(corr_all_300)*100:.1f} %'
row['% loss after 0-corr 300 sec'] = f'{np.logical_and(corr_all_300, corr_0_300).mean()*100:.1f} %'
row['% loss after 1-corr 300 sec'] = f'{np.logical_and(corr_all_300, corr_1_300).mean()*100:.1f} %'
row['% loss after 2-corr 300 sec'] = f'{np.logical_and(corr_all_300, corr_2_300).mean()*100:.1f} %'
row['% loss after 3-corr 300 sec'] = f'{np.logical_and(corr_all_300, corr_3_300).mean()*100:.1f} %'
table = table.append(row, ignore_index=True, sort=False)
table.to_excel(cfg.documents + '/artefact_corr_improvement.xls')
| [
"numpy.mean",
"numpy.repeat",
"numpy.logical_and",
"datetime.datetime.strptime",
"sleep.SleepSet",
"numpy.diff",
"numpy.sum",
"scipy.ndimage.morphology.binary_dilation",
"pandas.DataFrame"
] | [((333, 352), 'numpy.repeat', 'np.repeat', (['data', '(30)'], {}), '(data, 30)\n', (342, 352), True, 'import numpy as np\n'), ((1077, 1105), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'header'}), '(columns=header)\n', (1089, 1105), True, 'import pandas as pd\n'), ((430, 501), 'scipy.ndimage.morphology.binary_dilation', 'binary_dilation', (['data'], {'structure': '[True, True, True]', 'iterations': 'seconds'}), '(data, structure=[True, True, True], iterations=seconds)\n', (445, 501), False, 'from scipy.ndimage.morphology import binary_dilation\n'), ((1238, 1294), 'datetime.datetime.strptime', 'datetime.strptime', (['p.timestampStart', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(p.timestampStart, '%Y-%m-%dT%H:%M:%S')\n", (1255, 1294), False, 'from datetime import datetime\n'), ((1583, 1596), 'numpy.diff', 'np.diff', (['T_RR'], {}), '(T_RR)\n', (1590, 1596), True, 'import numpy as np\n'), ((2747, 2763), 'numpy.sum', 'np.sum', (['corr_all'], {}), '(corr_all)\n', (2753, 2763), True, 'import numpy as np\n'), ((2790, 2806), 'numpy.sum', 'np.sum', (['uli_gain'], {}), '(uli_gain)\n', (2796, 2806), True, 'import numpy as np\n'), ((2839, 2853), 'numpy.sum', 'np.sum', (['corr_0'], {}), '(corr_0)\n', (2845, 2853), True, 'import numpy as np\n'), ((2886, 2900), 'numpy.sum', 'np.sum', (['corr_1'], {}), '(corr_1)\n', (2892, 2900), True, 'import numpy as np\n'), ((2933, 2947), 'numpy.sum', 'np.sum', (['corr_2'], {}), '(corr_2)\n', (2939, 2947), True, 'import numpy as np\n'), ((2980, 2994), 'numpy.sum', 'np.sum', (['corr_3'], {}), '(corr_3)\n', (2986, 2994), True, 'import numpy as np\n'), ((3034, 3054), 'numpy.sum', 'np.sum', (['corr_all_300'], {}), '(corr_all_300)\n', (3040, 3054), True, 'import numpy as np\n'), ((631, 659), 'sleep.SleepSet', 'SleepSet', (['cfg.folder_unisens'], {}), '(cfg.folder_unisens)\n', (639, 659), False, 'from sleep import SleepSet\n'), ((3111, 3129), 'numpy.sum', 'np.sum', (['corr_0_300'], {}), '(corr_0_300)\n', (3117, 3129), True, 'import numpy as np\n'), ((3167, 3185), 'numpy.sum', 'np.sum', (['corr_0_300'], {}), '(corr_0_300)\n', (3173, 3185), True, 'import numpy as np\n'), ((3188, 3206), 'numpy.sum', 'np.sum', (['corr_1_300'], {}), '(corr_1_300)\n', (3194, 3206), True, 'import numpy as np\n'), ((3244, 3262), 'numpy.sum', 'np.sum', (['corr_1_300'], {}), '(corr_1_300)\n', (3250, 3262), True, 'import numpy as np\n'), ((3265, 3283), 'numpy.sum', 'np.sum', (['corr_2_300'], {}), '(corr_2_300)\n', (3271, 3283), True, 'import numpy as np\n'), ((3321, 3339), 'numpy.sum', 'np.sum', (['corr_1_300'], {}), '(corr_1_300)\n', (3327, 3339), True, 'import numpy as np\n'), ((3342, 3360), 'numpy.sum', 'np.sum', (['corr_3_300'], {}), '(corr_3_300)\n', (3348, 3360), True, 'import numpy as np\n'), ((3405, 3426), 'numpy.mean', 'np.mean', (['corr_all_300'], {}), '(corr_all_300)\n', (3412, 3426), True, 'import numpy as np\n'), ((3487, 3527), 'numpy.logical_and', 'np.logical_and', (['corr_all_300', 'corr_0_300'], {}), '(corr_all_300, corr_0_300)\n', (3501, 3527), True, 'import numpy as np\n'), ((3595, 3635), 'numpy.logical_and', 'np.logical_and', (['corr_all_300', 'corr_1_300'], {}), '(corr_all_300, corr_1_300)\n', (3609, 3635), True, 'import numpy as np\n'), ((3703, 3743), 'numpy.logical_and', 'np.logical_and', (['corr_all_300', 'corr_2_300'], {}), '(corr_all_300, corr_2_300)\n', (3717, 3743), True, 'import numpy as np\n'), ((3811, 3851), 'numpy.logical_and', 'np.logical_and', (['corr_all_300', 'corr_3_300'], {}), '(corr_all_300, corr_3_300)\n', (3825, 3851), True, 'import numpy as np\n')] |
import numpy as np
import traci
"""
Codes for light phases (actions)
0 => from TOP EAST to TOP WEST and to TOP and from BOTTOM WEST to BOTTOM EAST
1 => from BOTTOM to TOP WEST, to TOP and to BOTTOM EAST
"""
PHASES_DICT = {
'PHASE_1_GREEN' : 0,
'PHASE_1_YELLOW': 1,
'PHASE_2_GREEN' : 2,
'PHASE_2_YELLOW': 3
}
#roads before reaching traffic light => Bottom Left, Top Right, Bottom Center
ROADS = ['BL', 'TR', 'BC']
#lanes within each road
LANES = [['BL_0', 'BL_1'], 'TR_0', ['TR_1', 'TR_2'], 'BC_0', 'BC_1', 'BC_2']
class Simulation(object):
def __init__(self, agent, sumo, max_steps, is_training=None):
self.agent = agent
self.sumo = sumo
self.max_steps = max_steps
self.is_training = is_training
#storing data from every epoch
self.total_rewards = []
self.waiting_lengths = []
self.total_waiting_times = []
#Refer to Andrea Vidali's thesis if any question about this method
def get_state(self):
#mapping all vehicles into an array
state = np.zeros(self.agent.nb_states)
#get all vehicles to define current state
for veh_id in traci.vehicle.getIDList():
lane_id = traci.vehicle.getLaneID(veh_id)
#get distance from traffic light
veh_pos = traci.lane.getLength(lane_id) - traci.vehicle.getLanePosition(veh_id)
#get correct lane
lane = -1 # discard if vehicle crossed light
for i, l in enumerate(LANES):
if lane_id in l:
lane = i
"""
Dividing lane in 10 groups where each vehicle = 7 (size 5 + gap 2)
1st group => 1 vehicle
2nd group => 1 vehicle
3rd group => 2 vehicles
4th group => 2 vehicles
... => ...
10th group => 5 vehicles
"""
for index, lane_group in enumerate([7, 14, 28, 42, 63, 84, 112, 140, 175, 211]):
#mapping using distance's index, if vehicle is in 1st group (7 meters from traffic light),
#it's lane group will be 0 and so on.
if veh_pos < lane_group:
veh_lane_group = index
#add only vehicles that did not cross the light
if lane >= 0:
#since state is size N, we create the valid vehicle's position as a number between 1-N
#e.g. if current vehicle's lane is 1 and lane_group is 5,
#his index inside state array is gonna be 15.
veh_pos = str(lane) + str(veh_lane_group)
state[int(veh_pos)] = 1
return state
def get_waitinglanes_length(self):
waiting_lengths = 0
for road in ROADS:
waiting_lengths += traci.edge.getLastStepHaltingNumber(road)
return waiting_lengths
def get_waiting_times(self):
waiting_times = 0
for road in ROADS:
waiting_times += traci.edge.getWaitingTime(road)
return waiting_times
def execute_step(self, action, step):
self.set_light_phase(action)
steps_done = 0
sum_waiting_time = 0
sum_waiting_length = 0
#check if simulation is not ending prematurely
tlight_steps = int(traci.trafficlight.getPhaseDuration('center'))
if (step + tlight_steps) >= self.max_steps:
tlight_steps = self.max_steps - step
#simulate one step at a time and gather info
while tlight_steps > 0:
traci.simulationStep()
#get waiting times in all outcoming lanes
sum_waiting_time += self.get_waiting_times()
sum_waiting_length += self.get_waitinglanes_length()
steps_done += 1
tlight_steps -= 1
return self.get_state(), steps_done, self.get_waiting_times(), [sum_waiting_time, sum_waiting_length]
def get_next_phase(self, action):
for k, v in PHASES_DICT.items():
if v == action:
key = k
phases_list = list(PHASES_DICT)
try:
next_key = phases_list[phases_list.index(key) + 1]
except IndexError: #if it's the last phase, next key is the first
next_key = phases_list[0]
return PHASES_DICT[next_key]
def set_light_phase(self, action):
next_action = self.get_next_phase(action)
traci.trafficlight.setPhase('center', next_action)
def run(self, epsilon):
traci.start(self.sumo)
total_reward = 0
total_waiting_time = 0
total_waiting_length = 0
step = 0
last_state = self.get_state()
last_total_waiting = 0
while step < self.max_steps:
action = self.agent.select_action(last_state, epsilon)
next_state, steps_done, total_waiting, observation = self.execute_step(action, step)
reward = last_total_waiting - total_waiting
#save data in agent's memory only if training
if not self.is_training == None:
self.agent.memory.add_sample((last_state, next_state, action, reward))
step += steps_done
total_waiting_time += observation[0]
total_waiting_length += observation[1]
last_state = next_state
last_total_waiting = total_waiting
if reward < 0:
total_reward += reward
print('Total reward: {}'.format(total_reward))
#save stats for later plotting
self.save_stats(total_reward, total_waiting_time, total_waiting_length)
traci.close()
#train NN only in between epochs only if training
if not self.is_training == None:
self.agent.experience_replay()
def save_stats(self, total_reward, total_waiting_time, total_waiting_length):
self.total_rewards.append(total_reward)
self.waiting_lengths.append(total_waiting_length)
self.total_waiting_times.append(total_waiting_time)
def get_stats(self):
return {
'Reward' : self.total_rewards,
'Mean Waiting Length (m)' : np.divide(self.waiting_lengths, self.max_steps),
'Mean Waiting Time (s)' : np.divide(self.total_waiting_times, self.max_steps)
} | [
"traci.trafficlight.getPhaseDuration",
"traci.start",
"traci.edge.getWaitingTime",
"traci.lane.getLength",
"traci.close",
"traci.simulationStep",
"numpy.zeros",
"traci.vehicle.getLanePosition",
"traci.trafficlight.setPhase",
"traci.vehicle.getIDList",
"traci.vehicle.getLaneID",
"traci.edge.get... | [((1051, 1081), 'numpy.zeros', 'np.zeros', (['self.agent.nb_states'], {}), '(self.agent.nb_states)\n', (1059, 1081), True, 'import numpy as np\n'), ((1162, 1187), 'traci.vehicle.getIDList', 'traci.vehicle.getIDList', ([], {}), '()\n', (1185, 1187), False, 'import traci\n'), ((4425, 4475), 'traci.trafficlight.setPhase', 'traci.trafficlight.setPhase', (['"""center"""', 'next_action'], {}), "('center', next_action)\n", (4452, 4475), False, 'import traci\n'), ((4513, 4535), 'traci.start', 'traci.start', (['self.sumo'], {}), '(self.sumo)\n', (4524, 4535), False, 'import traci\n'), ((5650, 5663), 'traci.close', 'traci.close', ([], {}), '()\n', (5661, 5663), False, 'import traci\n'), ((1211, 1242), 'traci.vehicle.getLaneID', 'traci.vehicle.getLaneID', (['veh_id'], {}), '(veh_id)\n', (1234, 1242), False, 'import traci\n'), ((2829, 2870), 'traci.edge.getLastStepHaltingNumber', 'traci.edge.getLastStepHaltingNumber', (['road'], {}), '(road)\n', (2864, 2870), False, 'import traci\n'), ((3018, 3049), 'traci.edge.getWaitingTime', 'traci.edge.getWaitingTime', (['road'], {}), '(road)\n', (3043, 3049), False, 'import traci\n'), ((3324, 3369), 'traci.trafficlight.getPhaseDuration', 'traci.trafficlight.getPhaseDuration', (['"""center"""'], {}), "('center')\n", (3359, 3369), False, 'import traci\n'), ((3570, 3592), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (3590, 3592), False, 'import traci\n'), ((6206, 6253), 'numpy.divide', 'np.divide', (['self.waiting_lengths', 'self.max_steps'], {}), '(self.waiting_lengths, self.max_steps)\n', (6215, 6253), True, 'import numpy as np\n'), ((6297, 6348), 'numpy.divide', 'np.divide', (['self.total_waiting_times', 'self.max_steps'], {}), '(self.total_waiting_times, self.max_steps)\n', (6306, 6348), True, 'import numpy as np\n'), ((1310, 1339), 'traci.lane.getLength', 'traci.lane.getLength', (['lane_id'], {}), '(lane_id)\n', (1330, 1339), False, 'import traci\n'), ((1342, 1379), 'traci.vehicle.getLanePosition', 'traci.vehicle.getLanePosition', (['veh_id'], {}), '(veh_id)\n', (1371, 1379), False, 'import traci\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
More or less a python port of Stewart method from R SpatialPositon package
(https://github.com/Groupe-ElementR/SpatialPosition/).
@author: mthh
"""
import numpy as np
from matplotlib.pyplot import contourf
from shapely import speedups
from shapely.ops import unary_union, transform
from shapely.geometry import Polygon, MultiPolygon
from geopandas import GeoDataFrame
try:
from jenkspy import jenks_breaks
except: jenks_breaks = None
from .helpers_classif import get_opt_nb_class, maximal_breaks, head_tail_breaks
if speedups.available and not speedups.enabled: speedups.enable()
def quick_idw(input_geojson_points, variable_name, power, nb_class,
nb_pts=10000, resolution=None, disc_func=None,
mask=None, user_defined_breaks=None,
variable_name2=None, output='GeoJSON', **kwargs):
"""
Function acting as a one-shot wrapper around SmoothIdw object.
Read a file of point values and optionnaly a mask file,
return the smoothed representation as GeoJSON or GeoDataFrame.
Parameters
----------
input_geojson_points : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
power : int or float
The power of the function.
nb_class : int, optionnal
The number of class, if unset will most likely be 8.
(default: None)
nb_pts: int, optionnal
The number of points to use for the underlying grid.
(default: 10000)
resolution : int, optionnal
The resolution to use (in meters), if not set a default
resolution will be used in order to make a grid containing around
10000 pts (default: None).
disc_func: str, optionnal
The name of the classification function to be used to decide on which
break values to use to create the contour layer.
(default: None)
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask,
can also be a GeoDataFrame (default: None).
user_defined_breaks : list or tuple, optionnal
A list of ordered break to use to construct the contours
(overrides `nb_class` and `disc_func` values if any, default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
output : string, optionnal
The type of output expected (not case-sensitive)
in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON").
Returns
-------
smoothed_result : bytes or GeoDataFrame,
The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame.
Examples
--------
Basic usage, output to raw geojson (bytes):
>>> result = quick_idw("some_file.geojson", "some_variable", power=2)
More options, returning a GeoDataFrame:
>>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable",
nb_class=8, disc_func="percentiles",
output="GeoDataFrame")
"""
return SmoothIdw(input_geojson_points,
variable_name,
power,
nb_pts,
resolution,
variable_name2,
mask,
**kwargs
).render(nb_class=nb_class,
disc_func=disc_func,
user_defined_breaks=user_defined_breaks,
output=output)
def quick_stewart(input_geojson_points, variable_name, span,
beta=2, typefct='exponential',nb_class=None,
nb_pts=10000, resolution=None, mask=None,
user_defined_breaks=None, variable_name2=None,
output="GeoJSON", **kwargs):
"""
Function acting as a one-shot wrapper around SmoothStewart object.
Read a file of point values and optionnaly a mask file,
return the smoothed representation as GeoJSON or GeoDataFrame.
Parameters
----------
input_geojson_points : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
span : int
The span (meters).
beta : float
The beta!
typefct : str, optionnal
The type of function in {"exponential", "pareto"} (default: "exponential").
nb_class : int, optionnal
The number of class, if unset will most likely be 8
(default: None)
nb_pts: int, optionnal
The number of points to use for the underlying grid.
(default: 10000)
resolution : int, optionnal
The resolution to use (in meters), if not set a default
resolution will be used in order to make a grid containing around
10000 pts (default: None).
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask,
can also be a GeoDataFrame (default: None).
user_defined_breaks : list or tuple, optionnal
A list of ordered break to use to construct the contours
(override `nb_class` value if any, default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
output : string, optionnal
The type of output expected (not case-sensitive)
in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON").
Returns
-------
smoothed_result : bytes or GeoDataFrame,
The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame.
Examples
--------
Basic usage, output to raw geojson (bytes):
>>> result = quick_stewart("some_file.geojson", "some_variable",
span=12500, beta=3, typefct="exponential")
More options, returning a GeoDataFrame:
>>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable",
span=12500, beta=3, typefct="pareto",
output="GeoDataFrame")
"""
return SmoothStewart(
input_geojson_points,
variable_name,
span,
beta,
typefct,
nb_pts,
resolution,
variable_name2,
mask,
**kwargs
).render(
nb_class=nb_class,
user_defined_breaks=user_defined_breaks,
output=output)
def make_regular_points_with_no_res(bounds, nb_points=10000):
"""
Return a regular grid of points within `bounds` with the specified
number of points (or a close approximate value).
Parameters
----------
bounds : 4-floats tuple
The bbox of the grid, as xmin, ymin, xmax, ymax.
nb_points : int, optionnal
The desired number of points (default: 10000)
Returns
-------
points : numpy.array
An array of coordinates
shape : 2-floats tuple
The number of points on each dimension (width, height)
"""
minlon, minlat, maxlon, maxlat = bounds
minlon, minlat, maxlon, maxlat = bounds
offset_lon = (maxlon - minlon) / 8
offset_lat = (maxlat - minlat) / 8
minlon -= offset_lon
maxlon += offset_lon
minlat -= offset_lat
maxlat += offset_lat
nb_x = int(nb_points**0.5)
nb_y = int(nb_points**0.5)
return (
np.linspace(minlon, maxlon, nb_x),
np.linspace(minlat, maxlat, nb_y),
(nb_y, nb_x)
)
def make_regular_points(bounds, resolution, longlat=True):
"""
Return a regular grid of points within `bounds` with the specified
resolution.
Parameters
----------
bounds : 4-floats tuple
The bbox of the grid, as xmin, ymin, xmax, ymax.
resolution : int
The resolution to use, in the same unit as `bounds`
Returns
-------
points : numpy.array
An array of coordinates
shape : 2-floats tuple
The number of points on each dimension (width, height)
"""
# xmin, ymin, xmax, ymax = bounds
minlon, minlat, maxlon, maxlat = bounds
offset_lon = (maxlon - minlon) / 8
offset_lat = (maxlat - minlat) / 8
minlon -= offset_lon
maxlon += offset_lon
minlat -= offset_lat
maxlat += offset_lat
if longlat:
height = hav_dist(
np.array([(maxlon + minlon) / 2, minlat]),
np.array([(maxlon + minlon) / 2, maxlat])
)
width = hav_dist(
np.array([minlon, (maxlat + minlat) / 2]),
np.array([maxlon, (maxlat + minlat) / 2])
)
else:
height = np.linalg.norm(
np.array([(maxlon + minlon) / 2, minlat])
- np.array([(maxlon + minlon) / 2, maxlat]))
width = np.linalg.norm(
np.array([minlon, (maxlat + minlat) / 2])
- np.array([maxlon, (maxlat + minlat) / 2]))
nb_x = int(round(width / resolution))
nb_y = int(round(height / resolution))
if nb_y * 0.6 > nb_x:
nb_x = int(nb_x + nb_x / 3)
elif nb_x * 0.6 > nb_y:
nb_y = int(nb_y + nb_y / 3)
return (
np.linspace(minlon, maxlon, nb_x),
np.linspace(minlat, maxlat, nb_y),
(nb_y, nb_x)
)
def _compute_centroids(geometries):
res = []
for geom in geometries:
if hasattr(geom, '__len__'):
ix_biggest = np.argmax([g.area for g in geom])
res.append(geom[ix_biggest].centroid)
else:
res.append(geom.centroid)
return res
def make_dist_mat(xy1, xy2, longlat=True):
"""
Return a distance matrix between two set of coordinates.
Use geometric distance (default) or haversine distance (if longlat=True).
Parameters
----------
xy1 : numpy.array
The first set of coordinates as [(x, y), (x, y), (x, y)].
xy2 : numpy.array
The second set of coordinates as [(x, y), (x, y), (x, y)].
longlat : boolean, optionnal
Whether the coordinates are in geographic (longitude/latitude) format
or not (default: False)
Returns
-------
mat_dist : numpy.array
The distance matrix between xy1 and xy2
"""
if longlat:
return hav_dist(xy1[:, None], xy2)
else:
d0 = np.subtract.outer(xy1[:, 0], xy2[:, 0])
d1 = np.subtract.outer(xy1[:, 1], xy2[:, 1])
return np.hypot(d0, d1)
def hav_dist(locs1, locs2):
"""
Return a distance matrix between two set of coordinates.
Use geometric distance (default) or haversine distance (if longlat=True).
Parameters
----------
locs1 : numpy.array
The first set of coordinates as [(long, lat), (long, lat)].
locs2 : numpy.array
The second set of coordinates as [(long, lat), (long, lat)].
Returns
-------
mat_dist : numpy.array
The distance matrix between locs1 and locs2
"""
# locs1 = np.radians(locs1)
# locs2 = np.radians(locs2)
cos_lat1 = np.cos(locs1[..., 0])
cos_lat2 = np.cos(locs2[..., 0])
cos_lat_d = np.cos(locs1[..., 0] - locs2[..., 0])
cos_lon_d = np.cos(locs1[..., 1] - locs2[..., 1])
return 6367000 * np.arccos(
cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))
def isopoly_to_gdf(collec_poly, levels, field_name="levels"):
"""
Convert a collection of matplotlib.contour.QuadContourSet to a GeoDataFrame
Set an attribute `field_name` on each feature, according to `levels` values
(`levels` must have the same number of features as the collection of contours)
Parameters
----------
collection_polygons : matplotlib.contour.QuadContourSet
The result of a grid interpolation from matplotlib.
levels : array-like
The value to use as attributes for the constructed GeoDataFrame.
field_name : str
The name of the field to be fill by values contained in
`levels` variable (default: "levels").
Returns
-------
gdf_contours : GeoDataFrame
The result as a GeoDataFrame.
"""
polygons, data = [], []
for i, polygon in enumerate(collec_poly.collections):
mpoly = []
for path in polygon.get_paths():
path.should_simplify = False
poly = path.to_polygons()
exterior, holes = [], []
if len(poly) > 0 and len(poly[0]) > 3:
exterior = poly[0]
if len(poly) > 1:
holes = [h for h in poly[1:] if len(h) > 3]
mpoly.append(Polygon(exterior, holes))
if len(mpoly) > 1:
mpoly = MultiPolygon(mpoly)
polygons.append(mpoly)
data.append(levels[i])
elif len(mpoly) == 1:
polygons.append(mpoly[0])
data.append(levels[i])
return GeoDataFrame(geometry=polygons,
data=data,
columns=[field_name])
class BaseSmooth:
def __repr__(self):
return "\n".join([self.info, self.info2, self.info3])
def __str__(self):
return "\n".join([self.info, self.info2, self.info3])
@property
def properties(self):
print("\n".join([self.info, self.info2, self.info3]))
def open_mask(self, mask, input_layer):
# Read the mask according to its format:
if isinstance(mask, GeoDataFrame):
self.mask = mask
elif isinstance(mask, str) and isinstance(input_layer, str) \
and mask == input_layer:
self.mask = self.gdf.copy()
else:
self.mask = GeoDataFrame.from_file(mask)
self.check_mask()
def check_mask(self):
# Ensure the mask is made of Polygon/MultiPolygon:
if len(set(self.mask.type)
.intersection({"Polygon", "MultiPolygon"})) > 0:
# Use the same projection for the mask as for the input layer:
if self.mask.crs and self.mask.crs is not self.proj_to_use:
self.use_mask = True
self.mask.to_crs(self.proj_to_use, inplace=True)
else:
self.use_mask = True
self.mask.crs = self.proj_to_use
else:
self.mask = None
self.use_mask = False
def filter_missing_values(self, variable_name, variable_name2):
# Convert the first value field to a numeric field if not already,
# and dont take into account features with no value / NaN value
if not self.gdf[variable_name].dtype in (float, int):
self.gdf.loc[:, variable_name] = \
self.gdf[variable_name].replace('', np.NaN)
self.gdf.loc[:, variable_name] = self.gdf[variable_name].astype(float)
self.gdf = self.gdf[self.gdf[variable_name].notnull()]
# Convert the second value field to a numeric field if not already,
# and dont take into account features with no value / NaN value
if variable_name2:
if not self.gdf[variable_name2].dtype in (float, int):
self.gdf.loc[:, variable_name2] = \
self.gdf[variable_name2].replace('', np.NaN)
self.gdf.loc[:, variable_name2] = \
self.gdf[variable_name2].astype(float)
self.gdf = self.gdf[self.gdf[variable_name2].notnull()]
# Provide a new index if entries have been removed :
self.gdf.index = range(len(self.gdf))
def define_levels(self, nb_class, disc_func):
zi = self.zi
_min = np.nanmin(zi)
if not nb_class:
# nb_class = int(get_opt_nb_class(len(zi)) - 2)
nb_class = 8
if not disc_func or "prog_geom" in disc_func:
levels = [_min] + [
np.nanmax(zi) / i for i in range(1, nb_class + 1)][::-1]
elif "equal_interval" in disc_func:
_bin = np.nanmax(zi) / nb_class
levels = [_min] + [_bin * i for i in range(1, nb_class+1)]
elif "percentiles" in disc_func:
levels = np.percentile(
np.concatenate((zi[zi.nonzero()], np.array([_min]))),
np.linspace(0.0, 100.0, nb_class+1))
elif "jenks" in disc_func:
levels = list(jenks_breaks(np.concatenate(
([_min], zi[zi.nonzero()])), nb_class))
levels[0] = levels[0] - _min * 0.01
elif "head_tail" in disc_func:
levels = head_tail_breaks(np.concatenate(
([_min], zi[zi.nonzero()])))
elif "maximal_breaks" in disc_func:
levels = maximal_breaks(np.concatenate(
([_min], zi[zi.nonzero()])), nb_class)
else:
raise ValueError
return levels
def render(self, nb_class=8, disc_func=None, user_defined_breaks=None,
output="GeoJSON", new_mask=False):
"""
Parameters
----------
nb_class : int, optionnal
The number of class (default: 8).
disc_func : str, optionnal
The kind of data classification to be used (to be choosed in
"equal_interval", "jenks", "percentiles, "head_tail_breaks"
and "prog_geom"), default: None.
user_defined_breaks : list or tuple, optionnal
A list of ordered break to use to construct the contours
(override `nb_class` and `disc_func` values if any)
(default: None).
output : string, optionnal
The type of output expected (not case-sensitive)
in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON").
new_mask : str, optionnal
Use a new mask by giving the path to the file (Polygons only)
to use as clipping mask, can also be directly a GeoDataFrame
(default: False).
Returns
-------
smoothed_result : bytes or GeoDataFrame
The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame.
"""
if disc_func and 'jenks' in disc_func and not jenks_breaks:
raise ValueError(
"Missing jenkspy package - could not use jenks breaks")
zi = self.zi
if isinstance(new_mask, (type(False), type(None))):
if not self.use_mask:
self.use_mask = False
self.mask = None
else:
self.open_mask(new_mask, None)
# We want levels with the first break value as the minimum of the
# interpolated values and the last break value as the maximum of theses
# values:
if user_defined_breaks:
levels = user_defined_breaks
if levels[len(levels) - 1] < np.nanmax(zi):
levels = levels + [np.nanmax(zi)]
if levels[0] > np.nanmin(zi):
levels = [np.nanmin(zi)] + levels
else:
levels = self.define_levels(nb_class, disc_func)
# Ensure that the levels are unique/increasing
# to avoid error from `contourf` :
s_levels = set(levels)
if len(s_levels) != len(levels):
levels = list(s_levels)
levels.sort()
try:
collec_poly = contourf(
self.XI, self.YI,
zi.reshape(tuple(reversed(self.shape))).T,
levels,
vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi)))
# Retry without setting the levels :
except ValueError:
collec_poly = contourf(
self.XI, self.YI,
zi.reshape(tuple(reversed(self.shape))).T,
vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi)))
# Fetch the levels returned by contourf:
levels = collec_poly.levels
# Set the maximum value at the maximum value of the interpolated values:
levels[-1] = np.nanmax(zi)
# Transform contourf contours into a GeoDataFrame of (Multi)Polygons:
res = isopoly_to_gdf(collec_poly, levels=levels[1:], field_name="max")
if self.longlat:
def f(x, y, z=None):
return (x / 0.017453292519943295,
y / 0.017453292519943295)
res.geometry = [transform(f, g) for g in res.geometry]
res.crs = self.proj_to_use
# Set the min/max/center values of each class as properties
# if this contour layer:
res["min"] = [np.nanmin(zi)] + res["max"][0:len(res)-1].tolist()
res["center"] = (res["min"] + res["max"]) / 2
# Compute the intersection between the contour layer and the mask layer:
ix_max_ft = len(res) - 1
if self.use_mask:
res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer(
0).intersection(unary_union(self.mask.geometry.buffer(0)))
# res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer(
# 0).intersection(self.poly_max_extend.buffer(-0.1))
# Repair geometries if necessary :
if not all(t in ("MultiPolygon", "Polygon") for t in res.geom_type):
res.loc[0:ix_max_ft, "geometry"] = \
[geom if geom.type in ("Polygon", "MultiPolygon")
else MultiPolygon(
[j for j in geom if j.type in ('Polygon', 'MultiPolygon')]
)
for geom in res.geometry]
if "geojson" in output.lower():
return res.to_crs({"init": "epsg:4326"}).to_json().encode()
else:
return res
class SmoothStewart(BaseSmooth):
"""
Main object, allowing to create an instance with some required parameters
(span, beta, etc.) then render the contour polygons according to various
parameters (data classification, number of bins, output format, etc.)
Parameters
----------
input_layer : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
span : int
The span!
beta : float
The beta!
typefct : str, optionnal
The type of function in {"exponential", "pareto"} (default: "exponential").
resolution_pts: int, optionnal
The resolution to use (in number of points). Can be overrided by the
'resolution' parameter if set.
resolution : int, optionnal
The resolution to use (in unit of the input file).
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask (default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
Attributes
----------
zi : numpy.ndarray
The computed potential values for each `unknownpts`.
Methods
-------
render(nb_class=8, disc_func=None, user_defined_breaks=None,
output="GeoJSON", new_mask=False)
Render the contour polygon according to the choosen number of class and
the choosen classification method (or according to
`user_defined_breaks` which will overwrite these parameters)
"""
def __init__(self, input_layer, variable_name, span, beta,
typefct='exponential', nb_pts=10000,
resolution=None, variable_name2=None, mask=None, **kwargs):
self.sizelimit = kwargs.get('sizelimit', float('infinity'))
self.longlat = kwargs.get("distGeo", kwargs.get("longlat", True))
self.proj_to_use = {'init': 'epsg:4326'} if self.longlat \
else kwargs.get("projDistance", None) \
or ("""+proj=robin +lon_0=0 +x_0=0 +y_0=0 """
"""+ellps=WGS84 +datum=WGS84 +units=m +no_defs""")
self.gdf = input_layer.copy() if isinstance(input_layer, GeoDataFrame) \
else GeoDataFrame.from_file(input_layer)
if self.gdf.crs and self.gdf.crs is not self.proj_to_use:
self.gdf.to_crs(self.proj_to_use, inplace=True)
else:
self.gdf.crs = self.proj_to_use
self.info = (
'SmoothStewart - variable : {}{} ({} features)\n'
'beta : {} - span : {} - function : {}'
).format(variable_name,
" / {}".format(variable_name2) if variable_name2 else "",
len(self.gdf), beta, span, typefct)
if mask is not None:
self.open_mask(mask, input_layer)
else:
self.use_mask = False
self.info2 = ""
self.info3 = "Clipping mask: {}".format(self.use_mask)
# Don't use features with missing values:
self.filter_missing_values(variable_name, variable_name2)
# Calculate the value for each unknown points of the grid:
self.compute_zi(variable_name, span, beta,
variable_name2=variable_name2,
nb_pts=nb_pts,
resolution=resolution,
typefct=typefct)
@staticmethod
def _compute_interact_density(matdist, typefun, beta, span):
if 'pareto' in typefun:
alpha = (2.0 ** (1.0 / beta) - 1.0) / span
return (1 + alpha * matdist) ** (-beta)
elif 'exponential' in typefun:
alpha = np.log(2) / span ** beta
return np.exp(- alpha * matdist ** beta)
else:
raise ValueError('Bad interaction function argument: {}'
.format(typefun))
def compute_zi(self, variable_name, span, beta,
nb_pts, resolution=None, typefct="exponential",
variable_name2=None):
knownpts = self.gdf
if self.use_mask:
bounds = self.mask.total_bounds
else:
bounds = knownpts.total_bounds
if self.longlat:
bounds = list(map(lambda x : x * np.pi / 180, bounds))
# Get the x and y axis of the grid:
self.XI, self.YI, self.shape = make_regular_points(bounds, resolution) \
if resolution else make_regular_points_with_no_res(bounds, nb_pts)
# Verify that the size of the matrix doesn't exceed the sizelimit value if any:
if len(knownpts) * self.shape[0] * self.shape[1] > self.sizelimit:
raise ValueError('Too high resolution or to many input points')
# Compute the coordinates of each point of the grid :
unknownpts = np.array([(x, y) for x in self.XI for y in self.YI])
# Use the centroid if the feature is a Polygon
# or use the centroid of the largest Polygon for a MultiPolygon:
if all(i in ("Polygon", "Point") for i in knownpts.geom_type.values):
centroids = knownpts.geometry.centroid
else:
centroids = _compute_centroids(knownpts.geometry)
# Coordinates of every known point:
knwpts_coords = np.array([
(g.coords.xy[0][0], g.coords.xy[1][0])
for g in centroids])
if self.longlat:
knwpts_coords *= np.pi / 180
# Compute the interaction matrix:
mat_dens = self._compute_interact_density(
make_dist_mat(knwpts_coords, unknownpts, longlat=self.longlat),
typefct, beta, span)
if not variable_name2:
self.zi = (
knownpts[variable_name].values[:, np.newaxis] * mat_dens
).sum(axis=0).round(8)
else:
self.zi1 = (
knownpts[variable_name].values[:, np.newaxis] * mat_dens
).sum(axis=0)
self.zi2 = (
knownpts[variable_name2].values[:, np.newaxis] * mat_dens
).sum(axis=0)
self.zi = (np.true_divide(self.zi1, self.zi2)).round(8)
# Replace NaN values by -1.0 :
self.zi[np.argwhere(np.isnan(self.zi)).reshape(-1)] = -1.0
# Replace inf values by -1.0 :
self.zi[np.argwhere(np.isinf(self.zi)).reshape(-1)] = -1.0
self.info2 = ("unknown points : {} - interpolation grid shape : {}"
).format(len(unknownpts), self.shape)
class SmoothIdw(BaseSmooth):
"""
Main object, allowing to create an instance with the appropriate power
parameter then render the contour polygons according to various parameters
(data classification, number of bins, output format, etc.)
Parameters
----------
input_layer : str
Path to file to use as input (Points/Polygons) or GeoDataFrame object,
must contains a relevant numerical field.
variable_name : str
The name of the variable to use (numerical field only).
power : float
The power parameter of the IDW weighting function, as defined by Shepard.
resolution_pts: int, optionnal
The resolution to use (in number of points). Can be overrided by the
'resolution' parameter if set.
resolution : int, optionnal
The resolution to use (in unit of the input file).
mask : str, optionnal
Path to the file (Polygons only) to use as clipping mask (default: None).
variable_name2 : str, optionnal
The name of the 2nd variable to use (numerical field only); values
computed from this variable will be will be used as to divide
values computed from the first variable (default: None)
Attributes
----------
zi : numpy.ndarray
The interpolated values (for each `unknownpts`).
Methods
-------
render(nb_class=8, disc_func=None, user_defined_breaks=None,
output="GeoJSON", new_mask=False)
Render the contour polygon according to the choosen number of class and
the choosen classification method (or according to
`user_defined_breaks` which will overwrite these parameters)
"""
def __init__(self, input_layer, variable_name, power, nb_pts=10000,
resolution=None, variable_name2=None, mask=None, **kwargs):
self.sizelimit = kwargs.get('sizelimit', float('infinity'))
self.longlat = kwargs.get("distGeo", kwargs.get("longlat", True))
self.proj_to_use = {'init': 'epsg:4326'} if self.longlat \
else kwargs.get("projDistance", None) \
or ("""+proj=robin +lon_0=0 +x_0=0 +y_0=0 """
"""+ellps=WGS84 +datum=WGS84 +units=m +no_defs""")
self.gdf = input_layer.copy() if isinstance(input_layer, GeoDataFrame) \
else GeoDataFrame.from_file(input_layer)
if self.gdf.crs and self.gdf.crs is not self.proj_to_use:
self.gdf.to_crs(self.proj_to_use, inplace=True)
else:
self.gdf.crs = self.proj_to_use
self.info = (
'SmoothIdw - variable : {}{} ({} features)\n'
).format(variable_name,
" / {}".format(variable_name2) if variable_name2 else "",
len(self.gdf))
if mask is not None:
self.open_mask(mask, input_layer)
else:
self.use_mask = False
self.info2 = ""
self.info3 = "Clipping mask: {}".format(self.use_mask)
# Don't use features with missing values:
self.filter_missing_values(variable_name, variable_name2)
# Calculate the value for each unknown points of the grid:
self.compute_zi(variable_name,
power,
nb_pts=nb_pts,
resolution=resolution,
variable_name2=variable_name2)
def compute_zi(self, variable_name, power,
nb_pts, resolution=None, variable_name2=None):
knownpts = self.gdf
if self.use_mask:
bounds = self.mask.total_bounds
else:
bounds = knownpts.total_bounds
if self.longlat:
bounds = list(map(lambda x : x * np.pi / 180, bounds))
# Get the x and y axis of the grid:
self.XI, self.YI, self.shape = make_regular_points(bounds, resolution) \
if resolution else make_regular_points_with_no_res(bounds, nb_pts)
# Verify that the size of the matrix doesn't exceed the sizelimit value if any:
if len(knownpts) * self.shape[0] * self.shape[1] > self.sizelimit:
raise ValueError('Too high resolution or to many input points')
# Compute the coordinates of each point of the grid :
unknownpts = np.array([(x, y) for x in self.XI for y in self.YI])
# Use the centroid if the feature is a Polygon
# or use the centroid of the largest Polygon for a MultiPolygon:
if all(i in ("Polygon", "Point") for i in knownpts.geom_type.values):
centroids = knownpts.geometry.centroid
else:
centroids = _compute_centroids(knownpts.geometry)
# Coordinates of every known point:
knwpts_coords = np.array([
(g.coords.xy[0][0], g.coords.xy[1][0])
for g in centroids])
if self.longlat:
knwpts_coords *= np.pi / 180
mat_weights = 1 / np.power(
make_dist_mat(knwpts_coords, unknownpts, longlat=self.longlat),
power)
# Make weights sum to one
mat_weights /= mat_weights.sum(axis=0)
# Multiply the weights for each interpolated point by all observed Z-values
self.zi = np.dot(mat_weights.T, knownpts[variable_name].values[:, np.newaxis])
# Replace NaN values by -1.0 :
self.zi[np.argwhere(np.isnan(self.zi)).reshape(-1)] = -1.0
# Replace inf values by -1.0 :
self.zi[np.argwhere(np.isinf(self.zi)).reshape(-1)] = -1.0
self.info2 = ("unknown points : {} - interpolation grid shape : {}"
).format(len(unknownpts), self.shape)
| [
"numpy.arccos",
"numpy.log",
"numpy.array",
"shapely.geometry.Polygon",
"numpy.nanmin",
"geopandas.GeoDataFrame.from_file",
"numpy.exp",
"numpy.linspace",
"numpy.dot",
"numpy.nanmax",
"numpy.hypot",
"numpy.isinf",
"geopandas.GeoDataFrame",
"shapely.ops.transform",
"numpy.argmax",
"nump... | [((618, 635), 'shapely.speedups.enable', 'speedups.enable', ([], {}), '()\n', (633, 635), False, 'from shapely import speedups\n'), ((11460, 11481), 'numpy.cos', 'np.cos', (['locs1[..., 0]'], {}), '(locs1[..., 0])\n', (11466, 11481), True, 'import numpy as np\n'), ((11497, 11518), 'numpy.cos', 'np.cos', (['locs2[..., 0]'], {}), '(locs2[..., 0])\n', (11503, 11518), True, 'import numpy as np\n'), ((11535, 11572), 'numpy.cos', 'np.cos', (['(locs1[..., 0] - locs2[..., 0])'], {}), '(locs1[..., 0] - locs2[..., 0])\n', (11541, 11572), True, 'import numpy as np\n'), ((11589, 11626), 'numpy.cos', 'np.cos', (['(locs1[..., 1] - locs2[..., 1])'], {}), '(locs1[..., 1] - locs2[..., 1])\n', (11595, 11626), True, 'import numpy as np\n'), ((13266, 13330), 'geopandas.GeoDataFrame', 'GeoDataFrame', ([], {'geometry': 'polygons', 'data': 'data', 'columns': '[field_name]'}), '(geometry=polygons, data=data, columns=[field_name])\n', (13278, 13330), False, 'from geopandas import GeoDataFrame\n'), ((7848, 7881), 'numpy.linspace', 'np.linspace', (['minlon', 'maxlon', 'nb_x'], {}), '(minlon, maxlon, nb_x)\n', (7859, 7881), True, 'import numpy as np\n'), ((7891, 7924), 'numpy.linspace', 'np.linspace', (['minlat', 'maxlat', 'nb_y'], {}), '(minlat, maxlat, nb_y)\n', (7902, 7924), True, 'import numpy as np\n'), ((9619, 9652), 'numpy.linspace', 'np.linspace', (['minlon', 'maxlon', 'nb_x'], {}), '(minlon, maxlon, nb_x)\n', (9630, 9652), True, 'import numpy as np\n'), ((9662, 9695), 'numpy.linspace', 'np.linspace', (['minlat', 'maxlat', 'nb_y'], {}), '(minlat, maxlat, nb_y)\n', (9673, 9695), True, 'import numpy as np\n'), ((10753, 10792), 'numpy.subtract.outer', 'np.subtract.outer', (['xy1[:, 0]', 'xy2[:, 0]'], {}), '(xy1[:, 0], xy2[:, 0])\n', (10770, 10792), True, 'import numpy as np\n'), ((10806, 10845), 'numpy.subtract.outer', 'np.subtract.outer', (['xy1[:, 1]', 'xy2[:, 1]'], {}), '(xy1[:, 1], xy2[:, 1])\n', (10823, 10845), True, 'import numpy as np\n'), ((10861, 10877), 'numpy.hypot', 'np.hypot', (['d0', 'd1'], {}), '(d0, d1)\n', (10869, 10877), True, 'import numpy as np\n'), ((11648, 11708), 'numpy.arccos', 'np.arccos', (['(cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))'], {}), '(cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))\n', (11657, 11708), True, 'import numpy as np\n'), ((15954, 15967), 'numpy.nanmin', 'np.nanmin', (['zi'], {}), '(zi)\n', (15963, 15967), True, 'import numpy as np\n'), ((20242, 20255), 'numpy.nanmax', 'np.nanmax', (['zi'], {}), '(zi)\n', (20251, 20255), True, 'import numpy as np\n'), ((26944, 26996), 'numpy.array', 'np.array', (['[(x, y) for x in self.XI for y in self.YI]'], {}), '([(x, y) for x in self.XI for y in self.YI])\n', (26952, 26996), True, 'import numpy as np\n'), ((27401, 27470), 'numpy.array', 'np.array', (['[(g.coords.xy[0][0], g.coords.xy[1][0]) for g in centroids]'], {}), '([(g.coords.xy[0][0], g.coords.xy[1][0]) for g in centroids])\n', (27409, 27470), True, 'import numpy as np\n'), ((32916, 32968), 'numpy.array', 'np.array', (['[(x, y) for x in self.XI for y in self.YI]'], {}), '([(x, y) for x in self.XI for y in self.YI])\n', (32924, 32968), True, 'import numpy as np\n'), ((33373, 33442), 'numpy.array', 'np.array', (['[(g.coords.xy[0][0], g.coords.xy[1][0]) for g in centroids]'], {}), '([(g.coords.xy[0][0], g.coords.xy[1][0]) for g in centroids])\n', (33381, 33442), True, 'import numpy as np\n'), ((33852, 33920), 'numpy.dot', 'np.dot', (['mat_weights.T', 'knownpts[variable_name].values[:, np.newaxis]'], {}), '(mat_weights.T, knownpts[variable_name].values[:, np.newaxis])\n', (33858, 33920), True, 'import numpy as np\n'), ((8809, 8850), 'numpy.array', 'np.array', (['[(maxlon + minlon) / 2, minlat]'], {}), '([(maxlon + minlon) / 2, minlat])\n', (8817, 8850), True, 'import numpy as np\n'), ((8868, 8909), 'numpy.array', 'np.array', (['[(maxlon + minlon) / 2, maxlat]'], {}), '([(maxlon + minlon) / 2, maxlat])\n', (8876, 8909), True, 'import numpy as np\n'), ((8970, 9011), 'numpy.array', 'np.array', (['[minlon, (maxlat + minlat) / 2]'], {}), '([minlon, (maxlat + minlat) / 2])\n', (8978, 9011), True, 'import numpy as np\n'), ((9029, 9070), 'numpy.array', 'np.array', (['[maxlon, (maxlat + minlat) / 2]'], {}), '([maxlon, (maxlat + minlat) / 2])\n', (9037, 9070), True, 'import numpy as np\n'), ((9869, 9902), 'numpy.argmax', 'np.argmax', (['[g.area for g in geom]'], {}), '([g.area for g in geom])\n', (9878, 9902), True, 'import numpy as np\n'), ((13061, 13080), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (['mpoly'], {}), '(mpoly)\n', (13073, 13080), False, 'from shapely.geometry import Polygon, MultiPolygon\n'), ((24351, 24386), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['input_layer'], {}), '(input_layer)\n', (24373, 24386), False, 'from geopandas import GeoDataFrame\n'), ((30964, 30999), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['input_layer'], {}), '(input_layer)\n', (30986, 30999), False, 'from geopandas import GeoDataFrame\n'), ((9144, 9185), 'numpy.array', 'np.array', (['[(maxlon + minlon) / 2, minlat]'], {}), '([(maxlon + minlon) / 2, minlat])\n', (9152, 9185), True, 'import numpy as np\n'), ((9200, 9241), 'numpy.array', 'np.array', (['[(maxlon + minlon) / 2, maxlat]'], {}), '([(maxlon + minlon) / 2, maxlat])\n', (9208, 9241), True, 'import numpy as np\n'), ((9287, 9328), 'numpy.array', 'np.array', (['[minlon, (maxlat + minlat) / 2]'], {}), '([minlon, (maxlat + minlat) / 2])\n', (9295, 9328), True, 'import numpy as np\n'), ((9343, 9384), 'numpy.array', 'np.array', (['[maxlon, (maxlat + minlat) / 2]'], {}), '([maxlon, (maxlat + minlat) / 2])\n', (9351, 9384), True, 'import numpy as np\n'), ((12988, 13012), 'shapely.geometry.Polygon', 'Polygon', (['exterior', 'holes'], {}), '(exterior, holes)\n', (12995, 13012), False, 'from shapely.geometry import Polygon, MultiPolygon\n'), ((14028, 14056), 'geopandas.GeoDataFrame.from_file', 'GeoDataFrame.from_file', (['mask'], {}), '(mask)\n', (14050, 14056), False, 'from geopandas import GeoDataFrame\n'), ((19090, 19103), 'numpy.nanmax', 'np.nanmax', (['zi'], {}), '(zi)\n', (19099, 19103), True, 'import numpy as np\n'), ((19182, 19195), 'numpy.nanmin', 'np.nanmin', (['zi'], {}), '(zi)\n', (19191, 19195), True, 'import numpy as np\n'), ((20600, 20615), 'shapely.ops.transform', 'transform', (['f', 'g'], {}), '(f, g)\n', (20609, 20615), False, 'from shapely.ops import unary_union, transform\n'), ((20798, 20811), 'numpy.nanmin', 'np.nanmin', (['zi'], {}), '(zi)\n', (20807, 20811), True, 'import numpy as np\n'), ((25841, 25873), 'numpy.exp', 'np.exp', (['(-alpha * matdist ** beta)'], {}), '(-alpha * matdist ** beta)\n', (25847, 25873), True, 'import numpy as np\n'), ((16300, 16313), 'numpy.nanmax', 'np.nanmax', (['zi'], {}), '(zi)\n', (16309, 16313), True, 'import numpy as np\n'), ((21577, 21649), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (["[j for j in geom if j.type in ('Polygon', 'MultiPolygon')]"], {}), "([j for j in geom if j.type in ('Polygon', 'MultiPolygon')])\n", (21589, 21649), False, 'from shapely.geometry import Polygon, MultiPolygon\n'), ((25797, 25806), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (25803, 25806), True, 'import numpy as np\n'), ((28236, 28270), 'numpy.true_divide', 'np.true_divide', (['self.zi1', 'self.zi2'], {}), '(self.zi1, self.zi2)\n', (28250, 28270), True, 'import numpy as np\n'), ((16559, 16596), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', '(nb_class + 1)'], {}), '(0.0, 100.0, nb_class + 1)\n', (16570, 16596), True, 'import numpy as np\n'), ((19140, 19153), 'numpy.nanmax', 'np.nanmax', (['zi'], {}), '(zi)\n', (19149, 19153), True, 'import numpy as np\n'), ((19223, 19236), 'numpy.nanmin', 'np.nanmin', (['zi'], {}), '(zi)\n', (19232, 19236), True, 'import numpy as np\n'), ((19744, 19757), 'numpy.nanmax', 'np.nanmax', (['zi'], {}), '(zi)\n', (19753, 19757), True, 'import numpy as np\n'), ((33989, 34006), 'numpy.isnan', 'np.isnan', (['self.zi'], {}), '(self.zi)\n', (33997, 34006), True, 'import numpy as np\n'), ((34095, 34112), 'numpy.isinf', 'np.isinf', (['self.zi'], {}), '(self.zi)\n', (34103, 34112), True, 'import numpy as np\n'), ((16180, 16193), 'numpy.nanmax', 'np.nanmax', (['zi'], {}), '(zi)\n', (16189, 16193), True, 'import numpy as np\n'), ((19770, 19783), 'numpy.nanmin', 'np.nanmin', (['zi'], {}), '(zi)\n', (19779, 19783), True, 'import numpy as np\n'), ((20012, 20025), 'numpy.nanmax', 'np.nanmax', (['zi'], {}), '(zi)\n', (20021, 20025), True, 'import numpy as np\n'), ((28357, 28374), 'numpy.isnan', 'np.isnan', (['self.zi'], {}), '(self.zi)\n', (28365, 28374), True, 'import numpy as np\n'), ((28471, 28488), 'numpy.isinf', 'np.isinf', (['self.zi'], {}), '(self.zi)\n', (28479, 28488), True, 'import numpy as np\n'), ((16523, 16539), 'numpy.array', 'np.array', (['[_min]'], {}), '([_min])\n', (16531, 16539), True, 'import numpy as np\n'), ((20038, 20051), 'numpy.nanmin', 'np.nanmin', (['zi'], {}), '(zi)\n', (20047, 20051), True, 'import numpy as np\n')] |
from opengl_gui.widget_frame import WidgetFrame
import numpy
class WidgetBarGraph(WidgetFrame):
def __init__(self, visualisation, variables, textures, params):
super().__init__(visualisation, variables, textures, params)
self.br = float(params["bar_color"][0])
self.bg = float(params["bar_color"][1])
self.bb = float(params["bar_color"][2])
self.min = float(params["min_value"])
self.max = float(params["max_value"])
self.enlight_max_value = bool(params["enlight_max_value"])
min_out = 0.0
max_out = self.height*0.9
self.k = (max_out-min_out)/(self.max-self.min)
self.q = max_out - self.k*self.max
def render(self):
if self.visible:
self.visualisation.push()
self.visualisation.translate(self.x, self.y, self.z)
self.render_frame()
values = numpy.asarray(self.variables.v[self.variable_name])
count = values.shape[0]
max_idx = numpy.argmax(values)
for i in range(count):
v_raw = values[i]
value = self._convert(v_raw)
w = self.width*0.9
rw = w/count
rh = value
x_ = -self.width/2.0 + rw*0.9 + i*rw*1.05
y_ = -rh/2.0 + self.height/2.0
self.visualisation.push()
if self.enlight_max_value and i == max_idx:
self.visualisation.set_color(self.br*0.5, 1.0, self.bb*0.5)
else:
self.visualisation.set_color(self.br, self.bg, self.bb)
self.visualisation.translate(x_, -y_, 0)
self.visualisation.paint_rectangle(rw, rh)
self.visualisation.pop()
self.visualisation.push()
string = self._get_rounded(v_raw, 2)
self.visualisation.print_string(x_ - rw/4.0, -y_ + rh/2.0 + 0.01, 0, string, self.font_size*0.5)
self.visualisation.pop()
for i in range(len(self.child_widgets)):
self.child_widgets[i].render()
self.visualisation.pop()
def _convert(self, value):
if value > self.max:
value = self.max
if value < self.min:
value = self.min
return self.k*value + self.q
def _get_rounded(self, value, precision):
return str(round(value, precision))
| [
"numpy.asarray",
"numpy.argmax"
] | [((916, 967), 'numpy.asarray', 'numpy.asarray', (['self.variables.v[self.variable_name]'], {}), '(self.variables.v[self.variable_name])\n', (929, 967), False, 'import numpy\n'), ((1041, 1061), 'numpy.argmax', 'numpy.argmax', (['values'], {}), '(values)\n', (1053, 1061), False, 'import numpy\n')] |
import numpy as np
from scipy import sparse
# There are several functions in this file named after notation used in the following paper:
# http://mpc.zib.de/index.php/MPC/article/viewFile/40/20
# (Alternating direction augmented Lagrangian methods
# for semidefinite programming)
def plainA(constraints, dimension):
'''
Parameters:
constraints : list of dictionaries that contains constraints
from getContraints in constraints.py
dimension :
dimension of X, which is equal to dimension of C
Returns:
A.T : a matrix equal to the plain A in Wen et al.'s paper
'''
A = np.zeros((dimension**2, len(constraints)), dtype=np.float32)
for constraint_index in range(len(constraints)):
constraint = constraints[constraint_index]
for entry_index in range(len(constraint["V"])):
v = constraint["V"][entry_index]
i = constraint["I"][entry_index]
j = constraint["J"][entry_index]
A[j * dimension + i, constraint_index] = v
return A.T
def scriptA(constraints, X):
'''
Parameters:
constraints : list of dictionaries that contains constraints
from getContraints in constraints.py
X : current matrix X which is the current solution to the SDP
Returns: a matrix equivalent to the script A in Wen et al.'s paper (eq. 2)
'''
traces = []
# Calculate trace of each constraint matrix times X
for constraint in constraints:
trace = 0
V = constraint["V"]
I = constraint["I"]
J = constraint["J"]
for num in range(len(V)):
trace += X[I[num], J[num]] * V[num]
traces.append(trace)
return np.matrix(traces, dtype=np.float32).T # Vector of traces
def scriptAStar(A, y):
'''
Parameters:
A : the result of plainA
y : current y value as defined in Wen et al.'s paper (eq. 7)
Returns: a matrix equivalent to the script A* in Wen et al.'s paper (p. 206)
'''
product = A.T.dot(y)
dimension = int(np.sqrt(len(product)))
return product.reshape((dimension, dimension))
def nextY(S, X, C, b, mu, pinvAAt, constraints):
'''
Parameters:
S : matrix S (eq. 6b)
X : matrix X, the current solution to the SDP (eq. 6c)
C : objective function matrix (eq. 1)
b : contraint vector b (eq. 1)
mu : mu value set by user which impacts step size
pinvAAt : pseudo-inverse of A times A transpose
contraints : list of dictionaries that contains constraints
from getContraints in constraints.py
Returns: a matrix equivalent to y in Wen et al.'s paper (eq. 6a)
'''
matrix = -1 * pinvAAt
vector_a = mu * (scriptA(constraints=constraints, X=X) -b)
vector_b = scriptA(constraints=constraints, X=S-C)
return matrix.dot(vector_a + vector_b)
def decomposeV(V):
'''
Parameters:
V : matrix V as described in Wen et al.'s paper
Returns:
sigma_plus : positive eigenvalues of V
Q_plus : eigenvectors of V corresponding to elements of sigma_plus
'''
unordered_vals, unordered_vecs = np.linalg.eigh(V)
# Order eigenvalues and corresponding eigenvectors
ordering = (-unordered_vals).argsort()
sigma = np.diag(unordered_vals[ordering])
Q = unordered_vecs[:, ordering]
num_non_neg = sum(unordered_vals >= 0) # Number of non-negative eigenvalues
sigma_plus = sigma[:num_non_neg, :num_non_neg]
# Q dagger
Q_plus = Q[:, :num_non_neg] # Get columns corresponding to positive eigenvalues
return sigma_plus, Q_plus
def nextV(C, A, mu, X, y):
'''
Parameters:
C : objective function matrix (eq. 1)
A : the result of plainA to make scriptAStar
mu : step size parameter
X : matrix X, the current solution to the SDP (eq. 6c)
y : current y value as defined in Wen et al.'s paper (eq. 7)
Returns:
a matrix corresponding to the current value of V
'''
return C - scriptAStar(A=A, y=y) - mu * X
def nextS(V):
'''
Parameters:
V : matrix V as described in Wen et al.'s paper
Returns:
a matrix corresponding to the current value of S
'''
sigma_plus, Q_plus = decomposeV(V)
first_product = np.matmul(Q_plus, sigma_plus)
return np.matmul(first_product, Q_plus.T)
def nextX(mu, S, V):
'''
Parameters:
mu : step size parameter
S : matrix S (eq. 6b)
V : matrix V as described in Wen et al.'s paper
Returns:
a matrix corresponding to X, the current solution to the SDP
'''
return 1/mu *(S - V)
def simplifyX(X, tolerance=1e-5):
'''
Parameters:
X : matrix X, the current solution to the SDP (eq. 6c)
tolerance : acceptable tolerance
Returns:
X : the original matrix X, but with values within (tolerance) of zero set to zero
'''
initial_shape = X.shape
idx0 = np.absolute(X - np.zeros(initial_shape, dtype = np.float32)) < tolerance
X[idx0] = 0
idx1 = np.absolute(X - np.ones(initial_shape, dtype = np.float32)) < tolerance
X[idx1] = 1
return X
def solveSDP(constraints, b, C, accuracy=1e-5, mu=1, min_iterations=68, max_iterations=421):
'''
Parameters:
contraints : list of dictionaries that contains constraints
from getContraints in constraints.py
tolerance : acceptable tolerance
b : contraint vector b (eq. 1)
C : objective function matrix (eq. 1)
accuracy : tolerance for stopping condition. When objective function value changes by
less than accuracy in one iteration, the algorithm stops running
min_iterations : the minimum number of iterations to run
max_iterations : the maximum number of iterations to run
Returns:
X : the solution to the specified SDP
iteration : the number of iterations required to solve the SDP
'''
initial_shape = C.shape
# Intialize values
S = sparse.csr_matrix(np.eye(initial_shape[0], dtype=np.float32))
X = sparse.csr_matrix(np.zeros(initial_shape, dtype=np.float32))
old_z = X[-1, -1]
A = plainA(constraints=constraints, dimension=initial_shape[0])
pinvAAt = sparse.csr_matrix(np.linalg.pinv(np.matmul(A, A.T)))
A = sparse.csr_matrix(A)
# Iteratively solve SDP
for iteration in range(max_iterations):
y = nextY(S=S, X=X, C=C, b=b, mu=mu, pinvAAt = pinvAAt, constraints=constraints)
V = nextV(C=C, A=A, mu=mu, X=X, y=y)
S = nextS(V)
X = nextX(mu=mu, S=S, V=V)
X = simplifyX(X=X)
# Check if objective value is stabilizing and stopping conditions are met
if np.absolute(X[-1, -1] - old_z) < accuracy and iteration > min_iterations:
break
old_z = X[-1, -1]
return X, iteration
| [
"numpy.eye",
"numpy.ones",
"numpy.absolute",
"numpy.diag",
"numpy.zeros",
"numpy.matmul",
"numpy.linalg.eigh",
"scipy.sparse.csr_matrix",
"numpy.matrix"
] | [((3285, 3302), 'numpy.linalg.eigh', 'np.linalg.eigh', (['V'], {}), '(V)\n', (3299, 3302), True, 'import numpy as np\n'), ((3415, 3448), 'numpy.diag', 'np.diag', (['unordered_vals[ordering]'], {}), '(unordered_vals[ordering])\n', (3422, 3448), True, 'import numpy as np\n'), ((4471, 4500), 'numpy.matmul', 'np.matmul', (['Q_plus', 'sigma_plus'], {}), '(Q_plus, sigma_plus)\n', (4480, 4500), True, 'import numpy as np\n'), ((4512, 4546), 'numpy.matmul', 'np.matmul', (['first_product', 'Q_plus.T'], {}), '(first_product, Q_plus.T)\n', (4521, 4546), True, 'import numpy as np\n'), ((6592, 6612), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['A'], {}), '(A)\n', (6609, 6612), False, 'from scipy import sparse\n'), ((1772, 1807), 'numpy.matrix', 'np.matrix', (['traces'], {'dtype': 'np.float32'}), '(traces, dtype=np.float32)\n', (1781, 1807), True, 'import numpy as np\n'), ((6314, 6356), 'numpy.eye', 'np.eye', (['initial_shape[0]'], {'dtype': 'np.float32'}), '(initial_shape[0], dtype=np.float32)\n', (6320, 6356), True, 'import numpy as np\n'), ((6384, 6425), 'numpy.zeros', 'np.zeros', (['initial_shape'], {'dtype': 'np.float32'}), '(initial_shape, dtype=np.float32)\n', (6392, 6425), True, 'import numpy as np\n'), ((6564, 6581), 'numpy.matmul', 'np.matmul', (['A', 'A.T'], {}), '(A, A.T)\n', (6573, 6581), True, 'import numpy as np\n'), ((5199, 5240), 'numpy.zeros', 'np.zeros', (['initial_shape'], {'dtype': 'np.float32'}), '(initial_shape, dtype=np.float32)\n', (5207, 5240), True, 'import numpy as np\n'), ((5299, 5339), 'numpy.ones', 'np.ones', (['initial_shape'], {'dtype': 'np.float32'}), '(initial_shape, dtype=np.float32)\n', (5306, 5339), True, 'import numpy as np\n'), ((6997, 7027), 'numpy.absolute', 'np.absolute', (['(X[-1, -1] - old_z)'], {}), '(X[-1, -1] - old_z)\n', (7008, 7027), True, 'import numpy as np\n')] |
'''
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
SALT RSS Image Convolution
This program convolves 2 SALT RSS spectra images to the lowest resolution
of the two.
The files required are:
(1) The 2D spectrum FITS file of the first source.
(2) The 2D ARC FITS file of the first source.
(3) The 2D spectrum FITS file of the second source.
(4) The 2D ARC FITS file of the second source.
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
'''
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Import Libraries
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
import os # For bash commands
from scipy.ndimage import gaussian_filter # For gaussian filtering
from astropy.io import fits as fits # For FITS file handling
import numpy as np # For array handling
from scipy.optimize import curve_fit # For curve fitting
from pyraf import iraf # For IRAF commands
from pathlib import Path # To extract filenames
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Load IRAF Libraries
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
iraf.images()
iraf.images.imutil()
iraf.images.imgeom()
iraf.images.immatch()
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Read FITS file
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
def read_FITS(file, centre):
'''
Opens the given FITS file and extracts the central spectrum of the astronomical source.
:param file [String]: File name of FITS file to be opened.
:param centre [Float]: Number of the central pixel of the astronomical source on the FITS image.
:return: wave [Float array]: Wavelength.
:return: flux [Float array]: Flux at current wavelength.
'''
# Set centre pixel
centre = int(float(centre))
# Get basename of file
basename = Path(file).stem
# Create temporary file
tmp = '{}_tmp.fits'.format(basename)
if os.path.isfile('tmp_ctr.fits'):
iraf.images.imutil.imdelete(images=tmp, verify='No', mode='ql')
# Extract central aperture of the FITS file
iraf.images.imgeom.blkavg(input='{}[*,{}]'.format(file, centre), output=tmp, option='average',
b1=1, b2=1, b3=1, b4=1, b5=1, b6=1, b7=1, mode='ql')
# Open FITS file and extract wavelength & flux
hdu = fits.open(tmp)
hdr = hdu[0].header
flux = hdu[0].data
flux = np.array(flux, dtype=np.float64)
start_wave = hdr['CRVAL1'] # Initial wavelenghth
step = hdr['CDELT1'] # Increment per pixel
w0, dw, n = start_wave, step, len(flux)
w = start_wave + step * n
wave = np.linspace(w0, w, n, endpoint=False)
if os.path.isfile(tmp):
iraf.images.imutil.imdelete(images=tmp, verify='No', mode='ql')
return wave, flux
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
FWHM Calculation with 4333 Angstrom Line
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
def FWHM_calc(ARC, centre):
'''
Calculates the instrumental FWHM resolution of the telescope (in this case the Southern African Large Telescope),
given the ARC FITS file. The FWHM is calculated based off the 4333 Angstrom line. This code will only work if the given
ARC file includes the 4333 Angstrom emission line.
:param ARC [String]: File name of ARC FITS file to be opened.
:param centre [Float]: Number of the central pixel of the astronomical source on the FITS image.
:return: FWHM [float]: Instrumental resolution of the telescope.
:return: FWHM [float]: Error of the instrumental resolution of the telescope.
'''
# Read FITS file
wave, flux = read_FITS(ARC, centre)
# Set wavelength range for fitting the Gaussian.
# In this case we fit the 4333 Angstrom emission line.
line_4333 = [(wave >= 4320) & (wave <= 4340)]
flux = flux[tuple(line_4333)]
wave = wave[tuple(line_4333)]
# Determine which data points have a signal above 5 sigma.
n = len(flux)
noise = 0.6052697 * np.median(np.abs(2.0 * flux[2:n - 2] - flux[0:n - 4] - flux[4:n]))
flux_base = flux[flux < 5 * noise]
wave_base = wave[flux < 5 * noise]
# Fit the baseline (all data points with a signal less than 5 sigma are considered to be the baseline).
def straight_line(x, m, c):
return m * x + c
coeff, var_matrix = curve_fit(straight_line, wave_base, flux_base)
m = coeff[0]
c = coeff[1]
baseline = m * wave + c
# Subtract the baseline
smooth_flux = flux - baseline
# Fit the Guassian
def Gauss(x, a, mu, sigma):
return a * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))
amp = np.max(flux)
mean = np.mean(wave)
sigma = np.std(wave)
coeff, var_matrix = curve_fit(Gauss, wave, smooth_flux, p0=[amp, mean, sigma])
sigma = coeff[2]
sigma_err = var_matrix[2][2]
FWHM = 2.355 * sigma
FWHM_err = 2.355 * sigma_err
return FWHM, FWHM_err
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Convolve Images
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
def convolve(work_dir, src1, ARC1, ctr1, src2, ARC2, ctr2):
'''
Convolve two image sources together, such that the two images match in resolution.
The higher resolution image is set to match the lower resolution image.
:param src1 [String]: Complete path and filename of the first image source (FITS file).
:param ARC1 [String]: Complete path and filename of the first ARC image (FITS file).
:param ctr1 [Float]: Number of the central pixel of the first astronomical source.
:param src2 [String]: Complete path and filename of the second image source (FITS file).
:param ARC2 [String]: Complete path and filename of the first ARC image (FITS file).
:param ctr1 [Float]: Number of the central pixel of the second astronomical source.
:return: None
'''
# Open first FITS image
hdu_1 = fits.open('{}'.format(src1))
# Open second FITS image
hdu_2 = fits.open('{}'.format(src2))
# Calculate instrumental FWHM resolutions of both images
FWHM_1, FWHM_1_err = FWHM_calc(ARC1, ctr1)
FWHM_2, FWHM_2_err = FWHM_calc(ARC2, ctr2)
print('FWHM_1 = {} +\- {}'.format(FWHM_1, FWHM_1_err))
print('FWHM_2 = {} +\- {}'.format(FWHM_2, FWHM_2_err))
# Calculate the difference
delta_sigma = np.abs(FWHM_1 - FWHM_2)
print('FWHM Difference = {}'.format(delta_sigma))
# Convolve the images to match the lowest resolution
if FWHM_1 < FWHM_2:
for i in range(len(hdu_1[0].data)):
hdu_1[0].data[i] = gaussian_filter(hdu_1[0].data[i], sigma=delta_sigma)
elif FWHM_2 < FWHM_1:
for i in range(len(hdu_2[0].data)):
hdu_2[0].data[i] = gaussian_filter(hdu_2[0].data[i], sigma=delta_sigma)
# Get basenames of source files
name1 = Path(src1).stem
name2 = Path(src2).stem
new1 = '{}{}_new.fits'.format(work_dir, name1)
new2 = '{}{}_new.fits'.format(work_dir, name2)
# Write new files
hdu_1.writeto(new1)
hdu_2.writeto(new2)
return None
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Run Program
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
# Set work_dir
work_dir = raw_input('\nPlease enter the filepath where you would like to save the images to: ')
# Get 1st Galaxy File
src1 = raw_input('\nEnter file path & name of the first GALAXY FITS file (with full path and extension): ')
# Get 1st ARC File
ARC1 = raw_input('\nEnter file path & name of the first ARC FITS file (with full path and extension): ')
# Get 1st File centre pixel
ctr1 = raw_input('\nEnter the centre pixel of the first galaxy: ')
# Get 2nd Galaxy File
src2 = raw_input('\nEnter file path & name of the second GALAXY FITS file (with full path and extension): ')
# Get 2nd ARC File
ARC2 = raw_input('\nEnter file path & name of the second ARC FITS file (with full path and extension): ')
# Get 2nd File centre pixel
ctr2 = raw_input('\nEnter the centre pixel of the second galaxy: ')
# Convolve Files
convolve(work_dir, src1, ARC1, ctr1, src2, ARC2, ctr2)
| [
"scipy.optimize.curve_fit",
"pyraf.iraf.images.imutil",
"numpy.mean",
"numpy.abs",
"scipy.ndimage.gaussian_filter",
"pyraf.iraf.images",
"pathlib.Path",
"numpy.std",
"pyraf.iraf.images.imgeom",
"numpy.max",
"os.path.isfile",
"numpy.array",
"numpy.linspace",
"numpy.exp",
"pyraf.iraf.image... | [((1056, 1069), 'pyraf.iraf.images', 'iraf.images', ([], {}), '()\n', (1067, 1069), False, 'from pyraf import iraf\n'), ((1070, 1090), 'pyraf.iraf.images.imutil', 'iraf.images.imutil', ([], {}), '()\n', (1088, 1090), False, 'from pyraf import iraf\n'), ((1091, 1111), 'pyraf.iraf.images.imgeom', 'iraf.images.imgeom', ([], {}), '()\n', (1109, 1111), False, 'from pyraf import iraf\n'), ((1112, 1133), 'pyraf.iraf.images.immatch', 'iraf.images.immatch', ([], {}), '()\n', (1131, 1133), False, 'from pyraf import iraf\n'), ((1843, 1873), 'os.path.isfile', 'os.path.isfile', (['"""tmp_ctr.fits"""'], {}), "('tmp_ctr.fits')\n", (1857, 1873), False, 'import os\n'), ((2240, 2254), 'astropy.io.fits.open', 'fits.open', (['tmp'], {}), '(tmp)\n', (2249, 2254), True, 'from astropy.io import fits as fits\n'), ((2313, 2345), 'numpy.array', 'np.array', (['flux'], {'dtype': 'np.float64'}), '(flux, dtype=np.float64)\n', (2321, 2345), True, 'import numpy as np\n'), ((2533, 2570), 'numpy.linspace', 'np.linspace', (['w0', 'w', 'n'], {'endpoint': '(False)'}), '(w0, w, n, endpoint=False)\n', (2544, 2570), True, 'import numpy as np\n'), ((2578, 2597), 'os.path.isfile', 'os.path.isfile', (['tmp'], {}), '(tmp)\n', (2592, 2597), False, 'import os\n'), ((4223, 4269), 'scipy.optimize.curve_fit', 'curve_fit', (['straight_line', 'wave_base', 'flux_base'], {}), '(straight_line, wave_base, flux_base)\n', (4232, 4269), False, 'from scipy.optimize import curve_fit\n'), ((4522, 4534), 'numpy.max', 'np.max', (['flux'], {}), '(flux)\n', (4528, 4534), True, 'import numpy as np\n'), ((4546, 4559), 'numpy.mean', 'np.mean', (['wave'], {}), '(wave)\n', (4553, 4559), True, 'import numpy as np\n'), ((4572, 4584), 'numpy.std', 'np.std', (['wave'], {}), '(wave)\n', (4578, 4584), True, 'import numpy as np\n'), ((4609, 4667), 'scipy.optimize.curve_fit', 'curve_fit', (['Gauss', 'wave', 'smooth_flux'], {'p0': '[amp, mean, sigma]'}), '(Gauss, wave, smooth_flux, p0=[amp, mean, sigma])\n', (4618, 4667), False, 'from scipy.optimize import curve_fit\n'), ((6180, 6203), 'numpy.abs', 'np.abs', (['(FWHM_1 - FWHM_2)'], {}), '(FWHM_1 - FWHM_2)\n', (6186, 6203), True, 'import numpy as np\n'), ((1750, 1760), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (1754, 1760), False, 'from pathlib import Path\n'), ((1883, 1946), 'pyraf.iraf.images.imutil.imdelete', 'iraf.images.imutil.imdelete', ([], {'images': 'tmp', 'verify': '"""No"""', 'mode': '"""ql"""'}), "(images=tmp, verify='No', mode='ql')\n", (1910, 1946), False, 'from pyraf import iraf\n'), ((2607, 2670), 'pyraf.iraf.images.imutil.imdelete', 'iraf.images.imutil.imdelete', ([], {'images': 'tmp', 'verify': '"""No"""', 'mode': '"""ql"""'}), "(images=tmp, verify='No', mode='ql')\n", (2634, 2670), False, 'from pyraf import iraf\n'), ((6671, 6681), 'pathlib.Path', 'Path', (['src1'], {}), '(src1)\n', (6675, 6681), False, 'from pathlib import Path\n'), ((6699, 6709), 'pathlib.Path', 'Path', (['src2'], {}), '(src2)\n', (6703, 6709), False, 'from pathlib import Path\n'), ((3898, 3953), 'numpy.abs', 'np.abs', (['(2.0 * flux[2:n - 2] - flux[0:n - 4] - flux[4:n])'], {}), '(2.0 * flux[2:n - 2] - flux[0:n - 4] - flux[4:n])\n', (3904, 3953), True, 'import numpy as np\n'), ((4470, 4511), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - mu) ** 2 / (2 * sigma ** 2))\n', (4476, 4511), True, 'import numpy as np\n'), ((6415, 6467), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['hdu_1[0].data[i]'], {'sigma': 'delta_sigma'}), '(hdu_1[0].data[i], sigma=delta_sigma)\n', (6430, 6467), False, 'from scipy.ndimage import gaussian_filter\n'), ((6569, 6621), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['hdu_2[0].data[i]'], {'sigma': 'delta_sigma'}), '(hdu_2[0].data[i], sigma=delta_sigma)\n', (6584, 6621), False, 'from scipy.ndimage import gaussian_filter\n')] |
import json
import os
import subprocess
import h5py
import uuid
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.DataFileUtilClient import DataFileUtil
from pprint import pprint
from shutil import copy
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import re
import json
class ReactiveTransportSimulatorUtil:
PREPDE_TOOLKIT_PATH = '/kb/module/lib/ReactiveTransportSimulator/Utils'
def _generate_html_report(self):
report = "<html> <head> ReactiveTransportSimulator-KBase report </head> <body> </body> </html>"
return report
class ReactiveTransportSimulatorRunBatchUtil:
def __init__(self,params):
self.params = params
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.dfu = DataFileUtil(self.callback_url)
self.output_files = []
self.html_files = []
self.data_folder = os.path.abspath('./data/')
self.shared_folder = params['shared_folder']
self.scratch_folder = os.path.join(params['shared_folder'],"scratch")
def run_batch_model(self):
print('params:',self.params)
try:
os.mkdir(self.scratch_folder)
except OSError:
print ("Creation of the directory %s failed" % self.scratch_folder)
else:
print ("Successfully created the directory %s " % self.scratch_folder)
# move file templates from data folder to scratch folder
pflotran_input_temp = os.path.join(self.data_folder,'batch_template.in')
pflotran_db_temp = os.path.join(self.data_folder,'database_template.dat')
pflotran_input = os.path.join(self.scratch_folder,'batch.in')
pflotran_db = os.path.join(self.scratch_folder,'database.dat')
stoi_csv_fba = os.path.join(self.scratch_folder,'rxn_fba.csv')
cpd_csv_fba = os.path.join(self.scratch_folder,'cpd_fba.csv')
# read inputs
print("Input FBA model: ",self.params['input_FBA_model'])
dfu = DataFileUtil(self.callback_url)
fba_model = dfu.get_objects({'object_refs': [self.params['input_FBA_model']]})['data'][0]
print("FBA model name :",fba_model['data']['name'])
nrxn = int(self.params['number_simulated_reactions'])
tot_time = float(self.params['simulation_time'])
timestep = float(self.params['snapshot_period'])
temperature = float(self.params['temperature'])
# collect the compound info
cpdid2formula = dict()
df_cpd = pd.DataFrame({'formula':[None]})
for compound in fba_model['data']['modelcompounds']:
cpdid2formula[compound['id']] = compound['formula']
if 'biom' in compound['id']:
df_cpd = df_cpd.append({'formula':'BIOMASS'}, ignore_index=True)
else:
df_cpd = df_cpd.append({'formula':compound['formula']}, ignore_index=True)
df_cpd.insert(len(df_cpd.columns),'initial_concentration(mol/L)',1,True)
df_cpd['formula'].replace('', np.nan, inplace=True)
df_cpd = df_cpd.dropna()
df_cpd.to_csv(cpd_csv_fba,index=False)
print("Compounds saved. \n")
# collect donor, acceptor, biom from reactions
"""
donor : "~/modelcompounds/id/xcpd2_c0"
acceptor : "~/modelcompounds/id/acceptor_c0"
biom : "~/modelcompounds/id/biom_c0"
"""
rxn_ref = ['r'+str(i+1) for i in range(nrxn)]
df_rxn = pd.DataFrame({'rxn_ref':rxn_ref,'rxn_id':None,'DOC_formula':None})
# selected_reactions = random.choices(fba_model['data']['modelreactions'],k=nrxn)
selected_reactions = []
selected_cpd = []
i = 0
while i < nrxn:
irxn = random.choice(fba_model['data']['modelreactions'])
acceptor_flag = False
for reagent in irxn['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
if 'acceptor' in cpdid:
acceptor_flag = True
if 'xcpd' in cpdid:
doc = cpdid2formula[cpdid]
selected_cpd.append(doc)
if acceptor_flag and selected_cpd.count(doc) == 1:
selected_reactions.append(irxn)
i += 1
for reaction_idx,reaction_val in enumerate(selected_reactions):
df_rxn['rxn_id'].iloc[reaction_idx] = reaction_val['id']
for reagent in reaction_val['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
formula = cpdid2formula[cpdid]
coef = reagent['coefficient']
if "xcpd" in cpdid:
df_rxn['DOC_formula'].iloc[reaction_idx] = formula
if "biom" in cpdid:
formula = 'BIOMASS'
if not formula in df_rxn.columns:
temp = ['0']*df_rxn.shape[0]
df_rxn.insert(len(df_rxn.columns),formula,temp,True)
df_rxn[formula].iloc[reaction_idx] = coef
else:
df_rxn[formula].iloc[reaction_idx] = coef
print(df_rxn.columns)
print(df_rxn.head())
df_rxn.to_csv(stoi_csv_fba,index=False)
print("Selected reactions saved. \n")
# read initial condition from /bin/module/data
init_cond = cpd_csv_fba
# generate sandbox file
sb_file = os.path.join(self.scratch_folder,'reaction_sandbox_pnnl_cyber.F90')
var = ['mu_max','vh','k_deg','cc','activation_energy','reference_temperature']
var_unit = ['1/sec','m^3','1/sec','M','J/mol','K']
generate_sandbox_code(nrxn,var,var_unit,sb_file,stoi_csv_fba)
print("Sandbox file generated.")
# format sandbox fortran code
fmt_sb_cmd = 'fprettify ' + sb_file
process = subprocess.Popen(fmt_sb_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Sandbox file formatted.")
# copy sandbox file to src dir and recompile pflotran
src_dir = '/bin/pflotran/src/pflotran'
copy(sb_file,src_dir)
print(os.getcwd())
compile_pflotran_cmd = 'sh ./data/compile.sh'
process = subprocess.Popen(compile_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Compile PFLOTRAN output:",output[-300:])
print("Complile PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
# generate batch input deck
self.generate_pflotran_input_batch(pflotran_input_temp,stoi_csv_fba,cpd_csv_fba,pflotran_input,tot_time,timestep,temperature)
print("Batch input deck generated.")
# generate database
update_pflotran_database(stoi_csv_fba,pflotran_db_temp,pflotran_db)
print("Database generated.")
# running pflotran
exepath = '/bin/pflotran/src/pflotran/pflotran'
run_pflotran_cmd = exepath + ' -n 1 -pflotranin ' + pflotran_input
process = subprocess.Popen(run_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Running PFLOTRAN output:",output[-300:])
print("Running PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
h5_file = os.path.join(self.scratch_folder,'batch.h5')
if os.path.isfile(h5_file):
print ("Successfully run PFLOTRAN")
else:
print ("Fail to run PFLOTRAN")
# generate plots in /kb/module/work/tmp/scratch/
self.plot_time_series_batch(h5_file)
# Attach output
self.output_files.append(
{'path': cpd_csv_fba,
'name': os.path.basename(cpd_csv_fba),
'label': os.path.basename(cpd_csv_fba),
'description': 'compounds'}
)
self.output_files.append(
{'path': stoi_csv_fba,
'name': os.path.basename(stoi_csv_fba),
'label': os.path.basename(stoi_csv_fba),
'description': 'reactions stoichiometry table'}
)
self.output_files.append(
{'path': sb_file,
'name': os.path.basename(sb_file),
'label': os.path.basename(sb_file),
'description': 'Sandbox source code'}
)
self.output_files.append(
{'path': pflotran_input,
'name': os.path.basename(pflotran_input),
'label': os.path.basename(pflotran_input),
'description': 'Batch reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': pflotran_db,
'name': os.path.basename(pflotran_db),
'label': os.path.basename(pflotran_db),
'description': 'Batch reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': h5_file,
'name': os.path.basename(h5_file),
'label': os.path.basename(h5_file),
'description': 'H5 file generated by PFLOTRAN batch reaction'}
)
fig_name = 'time_series_plot.png'
fig_file = os.path.join(self.scratch_folder,fig_name)
self.output_files.append(
{'path': fig_file,
'name': os.path.basename(fig_file),
'label': os.path.basename(fig_file),
'description': 'Plots of breakthrough curves generated by PFLOTRAN batch reaction'}
)
# Return the report
return self._generate_html_report()
def generate_pflotran_input_batch(self,batch_file,stoi_file,init_file,output_file,tot_time,timestep,temp):
file = open(batch_file,'r')
rxn_df = pd.read_csv(stoi_file)
init_df = pd.read_csv(init_file)
primary_species_charge = []
primary_species_nocharge = []
for spec in list(rxn_df.columns):
if spec in ['rxn_id','DOC_formula','rxn_ref','H2O','BIOMASS']:
continue
primary_species_nocharge.append(spec)
if spec=='NH4':
primary_species_charge.append('NH4+')
continue
if spec=='HCO3':
primary_species_charge.append('HCO3-')
continue
if spec=='H':
primary_species_charge.append('H+')
continue
if spec=='HS':
primary_species_charge.append('HS-')
continue
if spec=='HPO4':
primary_species_charge.append('HPO4-')
continue
primary_species_charge.append(spec)
init_cond = [init_df.loc[init_df['formula']==i,'initial_concentration(mol/L)'].iloc[0] for i in primary_species_nocharge]
init_biom = init_df.loc[init_df['formula']=='BIOMASS','initial_concentration(mol/L)'].iloc[0]
for idx,val in enumerate(primary_species_nocharge):
print("The initial concentration of {} is {} mol/L \n".format(val,init_cond[idx]))
pri_spec = ""
pri_spec_init = ""
new_file_content = ""
for line in file:
if 'PRIMARY_SPECIES' in line:
new_file_content += line
for i in primary_species_charge:
pri_spec += " " + i + "\n"
new_file_content += " " + pri_spec + "\n"
elif 'CONSTRAINT initial' in line:
new_file_content += line
new_file_content += " CONCENTRATIONS" + "\n"
for j in range(len(primary_species_charge)):
new_file_content += " {} {} T".format(primary_species_charge[j],init_cond[j])+ "\n"
new_file_content += " /" + "\n"
new_file_content += " IMMOBILE" + "\n"
new_file_content += " BIOMASS {} ".format(init_biom) + "\n"
new_file_content += " /"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'MAXIMUM_TIMESTEP_SIZE' in line:
new_file_content += " MAXIMUM_TIMESTEP_SIZE {} h".format(timestep) + "\n"
elif 'PERIODIC TIME' in line:
new_file_content += " PERIODIC TIME {} h".format(timestep) + "\n"
elif 'REFERENCE_TEMPERATURE' in line:
new_file_content += " REFERENCE_TEMPERATURE {} ! degrees C".format(temp) + "\n"
else:
new_file_content += line
writing_file = open(output_file, "w")
writing_file.write(new_file_content)
writing_file.close()
print('The batch input deck is updated.')
return
def plot_time_series_batch(self,h5_file):
obs_coord = [0.5,0.5,0.5]
file = h5py.File(h5_file,'r+')
time_str = [list(file.keys())[i] for i in range(len(list(file.keys()))) if list(file.keys())[i][0:4] == "Time"]
time_unit = time_str[0][-1]
time = sorted([float(time_str[i].split()[1]) for i in range(len(time_str))])
bound = []
bound.append(file['Coordinates']['X [m]'][0])
bound.append(file['Coordinates']['X [m]'][-1])
bound.append(file['Coordinates']['Y [m]'][0])
bound.append(file['Coordinates']['Y [m]'][-1])
bound.append(file['Coordinates']['Z [m]'][0])
bound.append(file['Coordinates']['Z [m]'][-1])
nxyz = []
nxyz.append(len(file['Coordinates']['X [m]'])-1)
nxyz.append(len(file['Coordinates']['Y [m]'])-1)
nxyz.append(len(file['Coordinates']['Z [m]'])-1)
x_coord = (np.linspace(bound[0],bound[1],nxyz[0]+1)[:-1]+np.linspace(bound[0],bound[1],nxyz[0]+1)[1:])/2
y_coord = (np.linspace(bound[2],bound[3],nxyz[1]+1)[:-1]+np.linspace(bound[2],bound[3],nxyz[1]+1)[1:])/2
z_coord = (np.linspace(bound[4],bound[5],nxyz[2]+1)[:-1]+np.linspace(bound[4],bound[5],nxyz[2]+1)[1:])/2
x_idx = np.argmin(np.absolute(x_coord-obs_coord[0]))
y_idx = np.argmin(np.absolute(y_coord-obs_coord[1]))
z_idx = np.argmin(np.absolute(z_coord-obs_coord[2]))
time_zero = "Time:"+str(" %12.5E" % 0)+str(" %s" % time_unit)
var_name = [x for x in list(file[time_zero].keys()) if 'Total' in x]
var_value = np.zeros((len(var_name),len(time)))
for i, itime in enumerate(time):
time_slice = "Time:"+str(" %12.5E" % itime)+str(" %s" % time_unit)
# print(file[time_slice][var_name].keys())
for j in range(len(var_name)):
var_value[j,i] = file[time_slice][var_name[j]][x_idx][y_idx][z_idx]
fig = plt.figure(num=1,dpi=150)
first_doc = True
for i in range(len(var_name)):
if var_name[i][6] == 'C':
if first_doc == True:
plt.plot(time,var_value[i,:],label='DOCs',color='k')[0]
first_doc = False
else:
plt.plot(time,var_value[i,:],color='k')[0]
else:
plt.plot(time,var_value[i,:],label=var_name[i])[0]
plt.ioff()
plt.xlabel("Time (%s)" %time_unit)
ylabel = 'Concentration [M]'
plt.ylabel(ylabel)
plt.legend(frameon=False,loc='upper center', bbox_to_anchor=(0.5, -0.15),ncol=3)
fig_name = 'time_series_plot.png'
fig_path = os.path.join(self.scratch_folder,fig_name)
plt.savefig(fig_path,dpi=150,bbox_inches='tight')
if os.path.isfile(fig_path):
print ("Successfully generated time series plot")
else:
print ("Fail to generate time series plot")
return
def visualize_hdf_in_html(self):
output_directory = os.path.join(self.shared_folder,'output')
os.makedirs(output_directory)
print("output dir:", output_directory)
html_file = os.path.join(output_directory,'summary.html')
fig_name = 'time_series_plot.png'
pflotran_out_name = 'batch.out'
fig_path = os.path.join(self.scratch_folder,fig_name)
pflotran_out_path = os.path.join(self.scratch_folder,pflotran_out_name)
if os.path.isfile(fig_path):
print ("Time series plot exists")
else:
print ("Time series plot does not exist")
print("figpath:",fig_path)
if os.path.isfile(pflotran_out_path):
print ("PFLOTRAN output exists")
else:
print ("PFLOTRAN output does not exist")
print("figpath:",pflotran_out_path)
copy(fig_path,'/kb/module/work/tmp/output')
copy(pflotran_out_path,'/kb/module/work/tmp/output')
with open(html_file, 'w') as f:
f.write("""
<!DOCTYPE html>
<html>
<body>
<h1>PFLOTRAN-KBbase</h1>
<p>PFLOTRAN output</p>
<embed src="batch.out" width="480" height="960">
<p>Visulize PFLOTRAN output</p>
<img src="{}" alt="Time series plot" height="360" width="480"></img>
</body>
</html>
""".format(fig_name))
with open(html_file, 'r') as f:
print("html_file:",f.readlines())
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
return {'shock_id': report_shock_id,
'name': os.path.basename(html_file),
'label': os.path.basename(html_file),
'description': 'HTML summary report for run_batch_model App'}
def _generate_html_report(self):
# Get the workspace name from the parameters
ws_name = self.params["workspace"]
# Visualize the result in html
html_report_viz_file = self.visualize_hdf_in_html()
self.html_files.append(html_report_viz_file)
# Save the html to the report dictionary
report_params = {
# message is an optional field.
# A string that appears in the summary section of the result page
'message': "Say something...",
# A list of typed objects created during the execution
# of the App. This can only be used to refer to typed
# objects in the workspace and is separate from any files
# generated by the app.
# See a working example here:
# https://github.com/kbaseapps/kb_deseq/blob/586714d/lib/kb_deseq/Utils/DESeqUtil.py#L262-L264
# 'objects_created': objects_created_in_app,
# A list of strings that can be used to alert the user
# 'warnings': warnings_in_app,
# The workspace name or ID is included in every report
'workspace_name': ws_name,
# A list of paths or Shock IDs pointing to
# a single flat file. They appear in Files section
'file_links': self.output_files,
# HTML files that appear in “Links”
'html_links': self.html_files,
'direct_html_link_index': 0,
'html_window_height': 333,
} # end of report_params
# Make the client, generate the report
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
# Return references which will allow inline display of
# the report in the Narrative
report_output = {'report_name': output['name'],
'report_ref': output['ref']}
return report_output
class ReactiveTransportSimulatorRun1DUtil:
def __init__(self,params):
self.params = params
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.dfu = DataFileUtil(self.callback_url)
self.output_files = []
self.html_files = []
self.data_folder = os.path.abspath('./data/')
self.shared_folder = params['shared_folder']
self.scratch_folder = os.path.join(params['shared_folder'],"scratch")
def run_1d_model(self):
print('params:',self.params)
try:
os.mkdir(self.scratch_folder)
except OSError:
print ("Creation of the directory %s failed" % self.scratch_folder)
else:
print ("Successfully created the directory %s " % self.scratch_folder)
# move file templates from data folder to scratch folder
pflotran_input_temp = os.path.join(self.data_folder,'column_template.in')
pflotran_db_temp = os.path.join(self.data_folder,'database_template.dat')
pflotran_input = os.path.join(self.scratch_folder,'column.in')
pflotran_db = os.path.join(self.scratch_folder,'database.dat')
stoi_csv_fba = os.path.join(self.scratch_folder,'rxn_fba.csv')
cpd_csv_fba = os.path.join(self.scratch_folder,'cpd_fba.csv')
# read inputs
print("Input FBA model: ",self.params['input_FBA_model'])
dfu = DataFileUtil(self.callback_url)
fba_model = dfu.get_objects({'object_refs': [self.params['input_FBA_model']]})['data'][0]
print("FBA model name :",fba_model['data']['name'])
nrxn = int(self.params['number_simulated_reactions'])
velocity = float(self.params['velocity'])
length = float(self.params['length'])
ngrid = int(self.params['number_grids'])
tot_time = float(self.params['simulation_time'])
timestep = float(self.params['snapshot_period'])
temperature = float(self.params['temperature'])
# collect the compound info
cpdid2formula = dict()
df_cpd = pd.DataFrame({'formula':[None]})
for compound in fba_model['data']['modelcompounds']:
cpdid2formula[compound['id']] = compound['formula']
if 'biom' in compound['id']:
df_cpd = df_cpd.append({'formula':'BIOMASS'}, ignore_index=True)
else:
df_cpd = df_cpd.append({'formula':compound['formula']}, ignore_index=True)
df_cpd.insert(len(df_cpd.columns),'initial_concentration(mol/L)',0.01,True)
df_cpd.loc[df_cpd.formula == 'BIOMASS', 'initial_concentration(mol/L)'] = 0.001
df_cpd.insert(len(df_cpd.columns),'inlet_concentration(mol/L)',1,True)
df_cpd.loc[df_cpd.formula == 'BIOMASS', 'inlet_concentration(mol/L)'] = 0
df_cpd['formula'].replace('', np.nan, inplace=True)
df_cpd = df_cpd.dropna()
df_cpd.to_csv(cpd_csv_fba,index=False)
print("Compounds saved. \n")
# collect donor, acceptor, biom from reactions
"""
donor : "~/modelcompounds/id/xcpd2_c0"
acceptor : "~/modelcompounds/id/acceptor_c0"
biom : "~/modelcompounds/id/biom_c0"
"""
rxn_ref = ['r'+str(i+1) for i in range(nrxn)]
df_rxn = pd.DataFrame({'rxn_ref':rxn_ref,'rxn_id':None,'DOC_formula':None})
# selected_reactions = random.choices(fba_model['data']['modelreactions'],k=nrxn)
selected_reactions = []
selected_cpd = []
i = 0
while i < nrxn:
irxn = random.choice(fba_model['data']['modelreactions'])
acceptor_flag = False
for reagent in irxn['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
if 'acceptor' in cpdid:
acceptor_flag = True
if 'xcpd' in cpdid:
doc = cpdid2formula[cpdid]
selected_cpd.append(doc)
if acceptor_flag and selected_cpd.count(doc) == 1:
selected_reactions.append(irxn)
i += 1
for reaction_idx,reaction_val in enumerate(selected_reactions):
df_rxn['rxn_id'].iloc[reaction_idx] = reaction_val['id']
for reagent in reaction_val['modelReactionReagents']:
cpdid = reagent['modelcompound_ref'].split('/id/')[1]
formula = cpdid2formula[cpdid]
coef = reagent['coefficient']
if "xcpd" in cpdid:
df_rxn['DOC_formula'].iloc[reaction_idx] = formula
if "biom" in cpdid:
formula = 'BIOMASS'
if not formula in df_rxn.columns:
temp = ['0']*df_rxn.shape[0]
df_rxn.insert(len(df_rxn.columns),formula,temp,True)
df_rxn[formula].iloc[reaction_idx] = coef
else:
df_rxn[formula].iloc[reaction_idx] = coef
print(df_rxn.columns)
print(df_rxn.head())
df_rxn.to_csv(stoi_csv_fba,index=False)
print("Selected reactions saved. \n")
# read initial and boundary conditions from /bin/module/data
init_cond = cpd_csv_fba
# generate sandbox file
sb_file = os.path.join(self.scratch_folder,'reaction_sandbox_pnnl_cyber.F90')
var = ['mu_max','vh','k_deg','cc','activation_energy','reference_temperature']
var_unit = ['1/sec','m^3','1/sec','M','J/mol','K']
generate_sandbox_code(nrxn,var,var_unit,sb_file,stoi_csv_fba)
print("Sandbox file generated.")
# format sandbox fortran code
fmt_sb_cmd = 'fprettify ' + sb_file
process = subprocess.Popen(fmt_sb_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Sandbox file formatted.")
# copy sandbox file to src dir and recompile pflotran
src_dir = '/bin/pflotran/src/pflotran'
copy(sb_file,src_dir)
print(os.getcwd())
compile_pflotran_cmd = 'sh ./data/compile.sh'
process = subprocess.Popen(compile_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Compile PFLOTRAN output:",output[-300:])
print("Complile PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
# generate 1d input deck
self.generate_pflotran_input_1d(pflotran_input_temp,stoi_csv_fba,cpd_csv_fba,
pflotran_input,velocity,length,ngrid,tot_time,timestep,temperature)
print("Batch input deck generated.")
# generate database
update_pflotran_database(stoi_csv_fba,pflotran_db_temp,pflotran_db)
print("Database generated.")
# running pflotran
exepath = '/bin/pflotran/src/pflotran/pflotran'
run_pflotran_cmd = exepath + ' -n 1 -pflotranin ' + pflotran_input
process = subprocess.Popen(run_pflotran_cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("Running PFLOTRAN output:",output[-300:])
print("Running PFLOTRAN err:",error)
pprint(os.listdir(self.scratch_folder))
h5_file = os.path.join(self.scratch_folder,'column.h5')
if os.path.isfile(h5_file):
print ("Successfully run PFLOTRAN")
else:
print ("Fail to run PFLOTRAN")
# generate plots in /kb/module/work/tmp/scratch/
# self.plot_time_series_batch(h5_file)
# Attach output
self.output_files.append(
{'path': cpd_csv_fba,
'name': os.path.basename(cpd_csv_fba),
'label': os.path.basename(cpd_csv_fba),
'description': 'compounds'}
)
self.output_files.append(
{'path': stoi_csv_fba,
'name': os.path.basename(stoi_csv_fba),
'label': os.path.basename(stoi_csv_fba),
'description': 'reactions stoichiometry table'}
)
self.output_files.append(
{'path': sb_file,
'name': os.path.basename(sb_file),
'label': os.path.basename(sb_file),
'description': 'Sandbox source code'}
)
self.output_files.append(
{'path': pflotran_input,
'name': os.path.basename(pflotran_input),
'label': os.path.basename(pflotran_input),
'description': '1d column reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': pflotran_db,
'name': os.path.basename(pflotran_db),
'label': os.path.basename(pflotran_db),
'description': '1d column reaction input deck for PFLOTRAN'}
)
self.output_files.append(
{'path': h5_file,
'name': os.path.basename(h5_file),
'label': os.path.basename(h5_file),
'description': 'H5 file generated by PFLOTRAN 1d column reaction'}
)
# fig_name = 'time_series_plot.png'
# fig_file = os.path.join(self.scratch_folder,fig_name)
# self.output_files.append(
# {'path': fig_file,
# 'name': os.path.basename(fig_file),
# 'label': os.path.basename(fig_file),
# 'description': 'Plots of breakthrough curves generated by PFLOTRAN batch reaction'}
# )
# Return the report
return self._generate_html_report()
def generate_pflotran_input_1d(self,template,stoi_file,icbc_file,output_file,
velocity,length,ngrid,tot_time,timestep,temp):
file = open(template,'r')
rxn_df = pd.read_csv(stoi_file)
init_df = pd.read_csv(icbc_file)
primary_species_charge = []
primary_species_nocharge = []
for spec in list(rxn_df.columns):
if spec in ['rxn_id','DOC_formula','rxn_ref','H2O','BIOMASS']:
continue
primary_species_nocharge.append(spec)
if spec=='NH4':
primary_species_charge.append('NH4+')
continue
if spec=='HCO3':
primary_species_charge.append('HCO3-')
continue
if spec=='H':
primary_species_charge.append('H+')
continue
if spec=='HS':
primary_species_charge.append('HS-')
continue
if spec=='HPO4':
primary_species_charge.append('HPO4-')
continue
primary_species_charge.append(spec)
init_cond = [init_df.loc[init_df['formula']==i,'initial_concentration(mol/L)'].iloc[0] for i in primary_species_nocharge]
init_biom = init_df.loc[init_df['formula']=='BIOMASS','initial_concentration(mol/L)'].iloc[0]
inlet_cond = [init_df.loc[init_df['formula']==i,'inlet_concentration(mol/L)'].iloc[0] for i in primary_species_nocharge]
inlet_biom = init_df.loc[init_df['formula']=='BIOMASS','inlet_concentration(mol/L)'].iloc[0]
for idx,val in enumerate(primary_species_nocharge):
print("The initial concentration of {} is {} mol/L \n".format(val,init_cond[idx]))
print("The inlet concentration of {} is {} mol/L \n".format(val,inlet_cond[idx]))
pri_spec = ""
pri_spec_init = ""
new_file_content = ""
for line in file:
if 'DATASET' in line:
new_file_content += ' DATASET {} 0 0 m/h'.format(velocity) + "\n"
elif 'NXYZ' in line:
new_file_content += ' NXYZ {} 1 1'.format(ngrid) + "\n"
elif 'PRIMARY_SPECIES' in line:
new_file_content += line
for i in primary_species_charge:
pri_spec += " " + i + "\n"
new_file_content += " " + pri_spec + "\n"
elif 'BOUNDS' in line:
new_file_content += line
new_file_content += " 0.d0 -1.d20 -1.d20" + "\n"
new_file_content += " {} 1.d20 1.d20".format(length) + "\n"
elif 'REGION outlet' in line:
new_file_content += line
new_file_content += " COORDINATES" + "\n"
new_file_content += " {} -1.d20 -1.d20".format(length) + "\n"
new_file_content += " {} -1.d20 -1.d20".format(length) + "\n"
new_file_content += " /" + "\n"
new_file_content += " FACE EAST" + "\n"
elif 'CONSTRAINT initial' in line:
new_file_content += line
new_file_content += " CONCENTRATIONS" + "\n"
for j in range(len(primary_species_charge)):
new_file_content += " {} {} T".format(primary_species_charge[j],init_cond[j])+ "\n"
new_file_content += " /" + "\n"
new_file_content += " IMMOBILE" + "\n"
new_file_content += " BIOMASS {} ".format(init_biom) + "\n"
new_file_content += " /"
elif 'CONSTRAINT inlet' in line:
new_file_content += line
new_file_content += " CONCENTRATIONS" + "\n"
for j in range(len(primary_species_charge)):
new_file_content += " {} {} T".format(primary_species_charge[j],inlet_cond[j])+ "\n"
new_file_content += " /" + "\n"
new_file_content += " IMMOBILE" + "\n"
new_file_content += " BIOMASS {} ".format(inlet_biom) + "\n"
new_file_content += " /"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'FINAL_TIME' in line:
new_file_content += " FINAL_TIME {} h".format(tot_time) + "\n"
elif 'MAXIMUM_TIMESTEP_SIZE' in line:
new_file_content += " MAXIMUM_TIMESTEP_SIZE {} h".format(timestep) + "\n"
elif 'PERIODIC TIME' in line:
new_file_content += " PERIODIC TIME {} h".format(timestep) + "\n"
elif 'REFERENCE_TEMPERATURE' in line:
new_file_content += " REFERENCE_TEMPERATURE {} ! degrees C".format(temp) + "\n"
else:
new_file_content += line
writing_file = open(output_file, "w")
writing_file.write(new_file_content)
writing_file.close()
print('The batch input deck is updated.')
return
def plot_time_series_batch(self,h5_file):
obs_coord = [0.5,0.5,0.5]
file = h5py.File(h5_file,'r+')
time_str = [list(file.keys())[i] for i in range(len(list(file.keys()))) if list(file.keys())[i][0:4] == "Time"]
time_unit = time_str[0][-1]
time = sorted([float(time_str[i].split()[1]) for i in range(len(time_str))])
bound = []
bound.append(file['Coordinates']['X [m]'][0])
bound.append(file['Coordinates']['X [m]'][-1])
bound.append(file['Coordinates']['Y [m]'][0])
bound.append(file['Coordinates']['Y [m]'][-1])
bound.append(file['Coordinates']['Z [m]'][0])
bound.append(file['Coordinates']['Z [m]'][-1])
nxyz = []
nxyz.append(len(file['Coordinates']['X [m]'])-1)
nxyz.append(len(file['Coordinates']['Y [m]'])-1)
nxyz.append(len(file['Coordinates']['Z [m]'])-1)
x_coord = (np.linspace(bound[0],bound[1],nxyz[0]+1)[:-1]+np.linspace(bound[0],bound[1],nxyz[0]+1)[1:])/2
y_coord = (np.linspace(bound[2],bound[3],nxyz[1]+1)[:-1]+np.linspace(bound[2],bound[3],nxyz[1]+1)[1:])/2
z_coord = (np.linspace(bound[4],bound[5],nxyz[2]+1)[:-1]+np.linspace(bound[4],bound[5],nxyz[2]+1)[1:])/2
x_idx = np.argmin(np.absolute(x_coord-obs_coord[0]))
y_idx = np.argmin(np.absolute(y_coord-obs_coord[1]))
z_idx = np.argmin(np.absolute(z_coord-obs_coord[2]))
time_zero = "Time:"+str(" %12.5E" % 0)+str(" %s" % time_unit)
var_name = [x for x in list(file[time_zero].keys()) if 'Total' in x]
var_value = np.zeros((len(var_name),len(time)))
for i, itime in enumerate(time):
time_slice = "Time:"+str(" %12.5E" % itime)+str(" %s" % time_unit)
# print(file[time_slice][var_name].keys())
for j in range(len(var_name)):
var_value[j,i] = file[time_slice][var_name[j]][x_idx][y_idx][z_idx]
fig = plt.figure(num=1,dpi=150)
first_doc = True
for i in range(len(var_name)):
if var_name[i][6] == 'C':
if first_doc == True:
plt.plot(time,var_value[i,:],label='DOCs',color='k')[0]
first_doc = False
else:
plt.plot(time,var_value[i,:],color='k')[0]
else:
plt.plot(time,var_value[i,:],label=var_name[i])[0]
plt.ioff()
plt.xlabel("Time (%s)" %time_unit)
ylabel = 'Concentration [M]'
plt.ylabel(ylabel)
plt.legend(frameon=False,loc='upper center', bbox_to_anchor=(0.5, -0.15),ncol=3)
fig_name = 'time_series_plot.png'
fig_path = os.path.join(self.scratch_folder,fig_name)
plt.savefig(fig_path,dpi=150,bbox_inches='tight')
if os.path.isfile(fig_path):
print ("Successfully generated time series plot")
else:
print ("Fail to generate time series plot")
return
def visualize_hdf_in_html(self):
output_directory = os.path.join(self.shared_folder,'output')
os.makedirs(output_directory)
print("output dir:", output_directory)
html_file = os.path.join(output_directory,'summary.html')
fig_name = 'time_series_plot.png'
pflotran_out_name = 'batch.out'
fig_path = os.path.join(self.scratch_folder,fig_name)
pflotran_out_path = os.path.join(self.scratch_folder,pflotran_out_name)
if os.path.isfile(fig_path):
print ("Time series plot exists")
else:
print ("Time series plot does not exist")
print("figpath:",fig_path)
if os.path.isfile(pflotran_out_path):
print ("PFLOTRAN output exists")
else:
print ("PFLOTRAN output does not exist")
print("figpath:",pflotran_out_path)
# copy(fig_path,'/kb/module/work/tmp/output')
# copy(pflotran_out_path,'/kb/module/work/tmp/output')
# with open(html_file, 'w') as f:
# f.write("""
# <!DOCTYPE html>
# <html>
# <body>
# <h1>PFLOTRAN-KBbase</h1>
# <p>PFLOTRAN output</p>
# <embed src="batch.out" width="480" height="960">
# <p>Visulize PFLOTRAN output</p>
# <img src="{}" alt="Time series plot" height="360" width="480"></img>
# </body>
# </html>
# """.format(fig_name))
# test
with open(html_file, 'w') as f:
f.write("""
<!DOCTYPE html>
<html>
<body>
<h1>PFLOTRAN-KBbase</h1>
<p>PFLOTRAN output</p>
<embed src="batch.out" width="480" height="960">
<p>Visulize PFLOTRAN output</p>
<img src="" alt="Time series plot" height="360" width="480"></img>
</body>
</html>
""")
with open(html_file, 'r') as f:
print("html_file:",f.readlines())
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
return {'shock_id': report_shock_id,
'name': os.path.basename(html_file),
'label': os.path.basename(html_file),
'description': 'HTML summary report for run_batch_model App'}
def _generate_html_report(self):
# Get the workspace name from the parameters
ws_name = self.params["workspace"]
# Visualize the result in html
html_report_viz_file = self.visualize_hdf_in_html()
self.html_files.append(html_report_viz_file)
# Save the html to the report dictionary
report_params = {
# message is an optional field.
# A string that appears in the summary section of the result page
'message': "Say something...",
# A list of typed objects created during the execution
# of the App. This can only be used to refer to typed
# objects in the workspace and is separate from any files
# generated by the app.
# See a working example here:
# https://github.com/kbaseapps/kb_deseq/blob/586714d/lib/kb_deseq/Utils/DESeqUtil.py#L262-L264
# 'objects_created': objects_created_in_app,
# A list of strings that can be used to alert the user
# 'warnings': warnings_in_app,
# The workspace name or ID is included in every report
'workspace_name': ws_name,
# A list of paths or Shock IDs pointing to
# a single flat file. They appear in Files section
'file_links': self.output_files,
# HTML files that appear in “Links”
'html_links': self.html_files,
'direct_html_link_index': 0,
'html_window_height': 333,
} # end of report_params
# Make the client, generate the report
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
# Return references which will allow inline display of
# the report in the Narrative
report_output = {'report_name': output['name'],
'report_ref': output['ref']}
return report_output
def generate_sandbox_code(nrxn,var,var_unit,sb_file,stoi_file):
rxn_name = 'cyber'
rxn_df = pd.read_csv(stoi_file)
primary_species_charge = []
primary_species_nocharge = []
for spec in list(rxn_df.columns):
if spec in ['rxn_id','DOC_formula','rxn_ref','H2O']:
continue
primary_species_nocharge.append(spec)
if spec=='NH4':
primary_species_charge.append('NH4+')
continue
if spec=='HCO3':
primary_species_charge.append('HCO3-')
continue
if spec=='H':
primary_species_charge.append('H+')
continue
if spec=='HS':
primary_species_charge.append('HS-')
continue
if spec=='HPO4':
primary_species_charge.append('HPO4-')
continue
primary_species_charge.append(spec)
sandbox_file = open(sb_file,'w+')
sb = '''
module Reaction_Sandbox_{}_class
use Reaction_Sandbox_Base_class
use Global_Aux_module
use Reactive_Transport_Aux_module
use PFLOTRAN_Constants_module
implicit none
private
#include "petsc/finclude/petscsys.h"
'''
sb = sb.format(rxn_name.capitalize())
for idx,item in enumerate(primary_species_nocharge):
sb = sb+" PetscInt, parameter :: {}_MASS_STORAGE_INDEX = {}\n".format(item,idx+1)
sb = sb+'''
type, public, &
extends(reaction_sandbox_base_type) :: reaction_sandbox_{}_type
'''.format(rxn_name)
for idx,item in enumerate(primary_species_nocharge):
sb = sb+" PetscInt :: {}_id \n".format(item.lower())
for i in var:
sb = sb+" PetscReal :: {} \n".format(i)
sb = sb+'''
PetscReal :: nrxn
PetscBool :: store_cumulative_mass
PetscInt :: offset_auxiliary
contains
procedure, public :: ReadInput => {}Read
procedure, public :: Setup => {}Setup
procedure, public :: Evaluate => {}React
procedure, public :: Destroy => {}Destroy
end type reaction_sandbox_{}_type
public :: {}Create
contains
! ************************************************************************** !
'''.format(rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.capitalize(),
rxn_name,rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function create()
#
#----------------------------------------------------------------------------
sb = sb+'''
function {}Create()
#include "petsc/finclude/petscsys.h"
use petscsys
implicit none
class(reaction_sandbox_{}_type), pointer :: {}Create
allocate({}Create)
'''.format(rxn_name.capitalize(),rxn_name,rxn_name.capitalize(),rxn_name.capitalize())
for i in primary_species_nocharge:
sb = sb+" {}Create%{}_id = UNINITIALIZED_INTEGER \n".format(rxn_name.capitalize(),i.lower())
for i in var:
if i.lower() == 'reference_temperature':
sb = sb + ' CyberCreate%reference_temperature = 298.15d0 ! 25 C\n'
else:
sb = sb+" {}Create%{} = UNINITIALIZED_DOUBLE \n".format(rxn_name.capitalize(),i)
sb = sb+'''
{}Create%nrxn = UNINITIALIZED_INTEGER
{}Create%store_cumulative_mass = PETSC_FALSE
nullify({}Create%next)
print *, '{}Creat Done'
end function {}Create
! ************************************************************************** !
'''.format(rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.capitalize(),
rxn_name.capitalize(),rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function read()
#
#----------------------------------------------------------------------------
sb = sb+'''
! ************************************************************************** !
subroutine {}Read(this,input,option)
use Option_module
use String_module
use Input_Aux_module
implicit none
class(reaction_sandbox_{}_type) :: this
type(input_type), pointer :: input
type(option_type) :: option
PetscInt :: i
character(len=MAXWORDLENGTH) :: word, internal_units, units
character(len=MAXSTRINGLENGTH) :: error_string
error_string = 'CHEMISTRY,REACTION_SANDBOX,{}'
call InputPushBlock(input,option)
do
call InputReadPflotranString(input,option)
if (InputError(input)) exit
if (InputCheckExit(input,option)) exit
call InputReadCard(input,option,word)
call InputErrorMsg(input,option,'keyword',error_string)
call StringToUpper(word)
select case(trim(word))
'''.format(rxn_name.capitalize(),rxn_name.lower(),rxn_name.upper())
for idx,item in enumerate(var):
if item!='reference_temperature':
sb = sb+'''
case('{}')
call InputReadDouble(input,option,this%{})
call InputErrorMsg(input,option,'{}',error_string)
call InputReadAndConvertUnits(input,this%{},'{}', &
trim(error_string)//',{}',option)
'''.format(item.upper(),item.lower(),item.lower(),item.lower(),
var_unit[idx],item.lower())
else:
sb = sb+'''
case('REFERENCE_TEMPERATURE')
call InputReadDouble(input,option,this%reference_temperature)
call InputErrorMsg(input,option,'reference temperature [C]', &
error_string)
this%reference_temperature = this%reference_temperature + 273.15d0
'''
sb = sb+'''
case default
call InputKeywordUnrecognized(input,word,error_string,option)
end select
enddo
call InputPopBlock(input,option)
end subroutine {}Read
! ************************************************************************** !
'''.format(rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function setup()
#
#----------------------------------------------------------------------------
sb = sb+'''
subroutine {}Setup(this,reaction,option)
use Reaction_Aux_module, only : reaction_rt_type, GetPrimarySpeciesIDFromName
use Reaction_Immobile_Aux_module, only : GetImmobileSpeciesIDFromName
use Reaction_Mineral_Aux_module, only : GetKineticMineralIDFromName
use Option_module
implicit none
class(reaction_sandbox_{}_type) :: this
class(reaction_rt_type) :: reaction
type(option_type) :: option
character(len=MAXWORDLENGTH) :: word
PetscInt :: irxn
PetscReal, parameter :: per_day_to_per_sec = 1.d0 / 24.d0 / 3600.d0
'''.format(rxn_name.capitalize(),rxn_name.lower())
for idx,item in enumerate(primary_species_charge):
if item.upper()!='BIOMASS':
sb = sb+'''
word = '{}'
this%{}_id = &
GetPrimarySpeciesIDFromName(word,reaction,option)
'''.format(item.upper(),primary_species_nocharge[idx].lower())
else:
sb = sb+'''
word = 'BIOMASS'
this%biomass_id = &
GetImmobileSpeciesIDFromName(word,reaction%immobile,option) + reaction%offset_immobile
'''
sb = sb+'''
if (this%store_cumulative_mass) then
this%offset_auxiliary = reaction%nauxiliary
reaction%nauxiliary = reaction%nauxiliary + {}
endif
end subroutine {}Setup
! ************************************************************************** !
'''.format(len(primary_species_charge)*2,rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function PlotVariables()
#
#----------------------------------------------------------------------------
sb = sb+'''
subroutine {}AuxiliaryPlotVariables(this,list,reaction,option)
use Option_module
use Reaction_Aux_module
use Output_Aux_module
use Variables_module, only : REACTION_AUXILIARY
implicit none
class(reaction_sandbox_{}_type) :: this
type(output_variable_list_type), pointer :: list
type(option_type) :: option
class(reaction_rt_type) :: reaction
character(len=MAXWORDLENGTH) :: names({})
character(len=MAXWORDLENGTH) :: word
character(len=MAXWORDLENGTH) :: units
PetscInt :: indices({})
PetscInt :: i
'''.format(rxn_name.capitalize(),rxn_name.lower(),len(primary_species_charge),len(primary_species_charge))
for idx,item in enumerate(primary_species_charge):
sb = sb+" names({}) = '{}'\n".format(idx+1,item.upper())
for idx,item in enumerate(primary_species_nocharge):
sb = sb+" indices({}) = {}_MASS_STORAGE_INDEX\n".format(idx+1,item.upper())
sb = sb+'''
if (this%store_cumulative_mass) then
do i = 1, {}
word = trim(names(i)) // ' Rate'
units = 'mol/m^3-sec'
call OutputVariableAddToList(list,word,OUTPUT_RATE,units, &
REACTION_AUXILIARY, &
this%offset_auxiliary+indices(i))
enddo
do i = 1, {}
word = trim(names(i)) // ' Cum. Mass'
units = 'mol/m^3'
call OutputVariableAddToList(list,word,OUTPUT_GENERIC,units, &
REACTION_AUXILIARY, &
this%offset_auxiliary+{}+indices(i))
enddo
endif
end subroutine {}AuxiliaryPlotVariables
! ************************************************************************** !
'''.format(len(primary_species_charge),len(primary_species_charge),len(primary_species_charge),rxn_name.capitalize())
#----------------------------------------------------------------------------
#
# function react()
#
#----------------------------------------------------------------------------
sb = sb+'''
subroutine {}React(this,Residual,Jacobian,compute_derivative, &
rt_auxvar,global_auxvar,material_auxvar,reaction, &
option)
use Option_module
use Reaction_Aux_module
use Material_Aux_class
implicit none
class(reaction_sandbox_{}_type) :: this
type(option_type) :: option
class(reaction_rt_type) :: reaction
! the following arrays must be declared after reaction
PetscReal :: Residual(reaction%ncomp)
PetscReal :: Jacobian(reaction%ncomp,reaction%ncomp)
type(reactive_transport_auxvar_type) :: rt_auxvar
type(global_auxvar_type) :: global_auxvar
class(material_auxvar_type) :: material_auxvar
PetscInt, parameter :: iphase = 1
PetscReal :: L_water
PetscReal :: kg_water
PetscInt :: i, j, irxn
'''.format(rxn_name.capitalize(),rxn_name.lower())
for idx, item in enumerate(primary_species_nocharge):
sb = sb+" PetscReal :: C_{} \n".format(item.lower())
for i in range(nrxn):
sb = sb+" PetscReal :: r{}doc,r{}o2 \n".format(i+1,i+1)
for i in range(nrxn):
sb = sb+" PetscReal :: r{}kin \n".format(i+1)
sb = sb+" PetscReal :: sumkin \n"
for i in range(nrxn):
sb = sb+" PetscReal :: u{} \n".format(i+1)
sb = sb+" PetscReal :: molality_to_molarity\n PetscReal :: temperature_scaling_factor\n PetscReal :: mu_max_scaled\n"
for i in range(nrxn):
sb = sb+" PetscReal :: k{}_scaled \n".format(i+1)
sb = sb+" PetscReal :: k_deg_scaled"
sb = sb+'''
PetscReal :: volume, rate_scale
PetscBool :: compute_derivative
PetscReal :: rate({})
volume = material_auxvar%volume
L_water = material_auxvar%porosity*global_auxvar%sat(iphase)* &
volume*1.d3 ! m^3 -> L
kg_water = material_auxvar%porosity*global_auxvar%sat(iphase)* &
global_auxvar%den_kg(iphase)*volume
molality_to_molarity = global_auxvar%den_kg(iphase)*1.d-3
if (reaction%act_coef_update_frequency /= ACT_COEF_FREQUENCY_OFF) then
option%io_buffer = 'Activity coefficients not currently supported in &
&{}React().'
call printErrMsg(option)
endif
temperature_scaling_factor = 1.d0
if (Initialized(this%activation_energy)) then
temperature_scaling_factor = &
exp(this%activation_energy/IDEAL_GAS_CONSTANT* &
(1.d0/this%reference_temperature-1.d0/(global_auxvar%temp+273.15d0)))
endif
'''.format(nrxn,rxn_name.capitalize())
sb = sb +" ! concentrations are molarities [M]"
for i in primary_species_nocharge:
if i.upper()!='BIOMASS':
sb = sb+'''
C_{} = rt_auxvar%pri_molal(this%{}_id)* &
rt_auxvar%pri_act_coef(this%{}_id)*molality_to_molarity
'''.format(i.lower(),i.lower(),i.lower())
else:
sb = sb+'''
C_biomass = rt_auxvar%immobile(this%biomass_id-reaction%offset_immobile)
'''
sb = sb +'''
mu_max_scaled = this%mu_max * temperature_scaling_factor
k_deg_scaled = this%k_deg * temperature_scaling_factor
'''
sb = sb+generate_rate_expression(primary_species_nocharge, stoi_file, rxn_name)
sb = sb+'''
end subroutine {}React
! ************************************************************************** !
subroutine {}Destroy(this)
use Utility_module
implicit none
class(reaction_sandbox_{}_type) :: this
print *, '{}Destroy Done'
end subroutine {}Destroy
end module Reaction_Sandbox_{}_class
'''.format(rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.lower(),
rxn_name.capitalize(),rxn_name.capitalize(),rxn_name.capitalize())
sandbox_file.write(sb)
print('Sandbox code is generated at {}.'.format(sb_file))
return
def generate_rate_expression(primary_species_nocharge, stoi_file, rxn_name):
rxn_df = pd.read_csv(stoi_file)
rxn_df = rxn_df.set_index('rxn_ref')
rkin = {}
for i in range(len(rxn_df)):
# doc_name = rxn_df.iloc[i,0]
# doc_name = re.sub('[-+)]','',doc_name)
doc_name = rxn_df['DOC_formula'].iloc[i]
doc_name = doc_name.lower()
print(doc_name)
doc_sto = rxn_df[rxn_df['DOC_formula'].loc['r'+str(i+1)]].loc['r'+str(i+1)]
o2_sto = rxn_df['O2'].loc['r'+str(i+1)]
rdoc_i = ' r'+str(i+1)+'doc = '+'exp('+str(doc_sto)+'/(this%vh * C_' + doc_name+'))'
ro2_i = ' r'+str(i+1)+'o2 = '+'exp('+str(o2_sto)+'/(this%vh * C_o2))'
rkin_i = ' r'+str(i+1)+'kin = ' + 'mu_max_scaled * '+'r'+str(i+1)+'doc'+' * ' + 'r'+str(i+1)+'o2'
rkin[doc_name] = [rdoc_i,ro2_i,rkin_i]
sumkin = ' sumkin = '
for i in range(len(rxn_df)):
if i == len(rxn_df)-1:
sumkin = sumkin + ' r' + str(i+1) + 'kin '
elif i == 0:
sumkin = sumkin + 'r' + str(i+1) + 'kin + & \n'
else:
sumkin = sumkin + ' r' + str(i+1) + 'kin + & \n'
u = []
for i in range(len(rxn_df)):
u.append(' u' + str(i+1) + ' = 0.d0')
u.append(' if (r' + str(i+1) + 'kin > 0.d0) u' + str(i+1) + ' = r' + str(i+1) + 'kin/sumkin' )
rate = []
for i in range(len(rxn_df)):
rate.append(' rate(' + str(i+1) + ') = u' + str(i+1) + '*r' + str(i+1) + 'kin*(1-C_biomass/this%cc)')
res = {}
for i in primary_species_nocharge:
icol = rxn_df.columns.get_loc(i)
i = i.lower()
i_id = 'this%'+i+'_id'
res_i = [' Residual(' + i_id + ') = Residual(' + i_id +') &']
space_idx = res_i[0].find('=')
# first_rate_flag = True
for irow in range(len(rxn_df)):
if pd.isnull(rxn_df.iloc[irow,icol]):
continue
sto_i = str(rxn_df.iloc[irow,icol])
if sto_i[0] == '-':
# if first_rate_flag:
# res_i[0] = re.sub('[-]','+',res_i[0])
# first_rate_flag = False
sto_i = re.sub('[-]','',sto_i)
res_i_temp = ' '*space_idx + ' + ' + str(sto_i) + ' * rate(' + str(irow+1) +') * C_biomass * L_water &'
else:
res_i_temp = ' '*space_idx + ' - ' + str(sto_i) + ' * rate(' + str(irow+1) +') * C_biomass * L_water &'
res_i.append(res_i_temp)
res_i[-1] = res_i[-1][0:-2]
res_i[-1] = res_i[-1]
res[i_id] = res_i
res['this%biomass_id'].append(' Residual(this%biomass_id) = Residual(this%biomass_id) + k_deg_scaled * C_biomass * L_water \n')
mass = {}
for i in primary_species_nocharge:
icol = rxn_df.columns.get_loc(i)
i = i.lower()
i_id = i.upper()
mass_i = [' i = this%offset_auxiliary + ' + i_id + '_MASS_STORAGE_INDEX' ]
mass_i.append(' rt_auxvar%auxiliary_data(i) = &')
space_idx = mass_i[0].find('_')
for irow in range(len(rxn_df)):
if pd.isnull(rxn_df.iloc[irow,icol]):
continue
sto_i = str(rxn_df.iloc[irow,icol])
if sto_i[0] == '-':
sto_i = re.sub('[-]','',sto_i)
mass_i_temp = ' '*space_idx + ' + ' + str(sto_i) + ' * rate(' + str(irow+1) +') * rate_scale &'
else:
mass_i_temp = ' '*space_idx + ' + ' + str(sto_i) + ' * rate(' + str(irow+1) +') * rate_scale &'
mass_i.append(mass_i_temp)
mass_i[-1] = mass_i[-1][0:-2]
mass_i[-1] = mass_i[-1]
mass[i_id] = mass_i
rate_expr = '\n'
for key, values in rkin.items():
for i in range(len(values)):
rate_expr = rate_expr+values[i]+'\n'
rate_expr = rate_expr+'\n'
rate_expr = rate_expr+sumkin+'\n'
rate_expr = rate_expr+'\n'
for i in u:
rate_expr = rate_expr+i+'\n'
rate_expr = rate_expr+'\n'
for i in rate:
rate_expr = rate_expr+i+'\n'
rate_expr = rate_expr+'\n'
for key, values in res.items():
for i in range(len(values)):
rate_expr = rate_expr+values[i]+'\n'
rate_expr = rate_expr+'''
if (this%store_cumulative_mass) then
rate_scale = C_biomass * L_water / volume
'''
for key, values in mass.items():
for i in range(len(values)):
rate_expr = rate_expr+values[i]+'\n'
rate_expr = rate_expr+'''
endif
'''
return rate_expr
def update_pflotran_database(stoi_file,dbase_temp_file,dbase_out_file):
rxn_df = pd.read_csv(stoi_file)
print(rxn_df['DOC_formula'].values)
new_db_content = ""
doc_db = ""
file = open(dbase_temp_file,'r')
for line in file:
if "'C" not in line:
new_db_content += line
else:
for i in rxn_df['DOC_formula'].values:
doc_db += "'{}'".format(i)+" 3.0 0.0 100" + '\n'
new_db_content += doc_db
writing_file = open(dbase_out_file, "w")
writing_file.write(new_db_content)
writing_file.close()
print('The database is updated.')
return | [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"os.listdir",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"os.mkdir",
"pandas.DataFrame",
"random.choice",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ioff",
"h5py.File",
"os.path.isfile",
"shutil.copy",
"re.s... | [((42544, 42566), 'pandas.read_csv', 'pd.read_csv', (['stoi_file'], {}), '(stoi_file)\n', (42555, 42566), True, 'import pandas as pd\n'), ((56015, 56037), 'pandas.read_csv', 'pd.read_csv', (['stoi_file'], {}), '(stoi_file)\n', (56026, 56037), True, 'import pandas as pd\n'), ((60572, 60594), 'pandas.read_csv', 'pd.read_csv', (['stoi_file'], {}), '(stoi_file)\n', (60583, 60594), True, 'import pandas as pd\n'), ((826, 857), 'installed_clients.DataFileUtilClient.DataFileUtil', 'DataFileUtil', (['self.callback_url'], {}), '(self.callback_url)\n', (838, 857), False, 'from installed_clients.DataFileUtilClient import DataFileUtil\n'), ((945, 971), 'os.path.abspath', 'os.path.abspath', (['"""./data/"""'], {}), "('./data/')\n", (960, 971), False, 'import os\n'), ((1055, 1103), 'os.path.join', 'os.path.join', (["params['shared_folder']", '"""scratch"""'], {}), "(params['shared_folder'], 'scratch')\n", (1067, 1103), False, 'import os\n'), ((1526, 1577), 'os.path.join', 'os.path.join', (['self.data_folder', '"""batch_template.in"""'], {}), "(self.data_folder, 'batch_template.in')\n", (1538, 1577), False, 'import os\n'), ((1607, 1662), 'os.path.join', 'os.path.join', (['self.data_folder', '"""database_template.dat"""'], {}), "(self.data_folder, 'database_template.dat')\n", (1619, 1662), False, 'import os\n'), ((1692, 1737), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""batch.in"""'], {}), "(self.scratch_folder, 'batch.in')\n", (1704, 1737), False, 'import os\n'), ((1767, 1816), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""database.dat"""'], {}), "(self.scratch_folder, 'database.dat')\n", (1779, 1816), False, 'import os\n'), ((1846, 1894), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""rxn_fba.csv"""'], {}), "(self.scratch_folder, 'rxn_fba.csv')\n", (1858, 1894), False, 'import os\n'), ((1924, 1972), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""cpd_fba.csv"""'], {}), "(self.scratch_folder, 'cpd_fba.csv')\n", (1936, 1972), False, 'import os\n'), ((2091, 2122), 'installed_clients.DataFileUtilClient.DataFileUtil', 'DataFileUtil', (['self.callback_url'], {}), '(self.callback_url)\n', (2103, 2122), False, 'from installed_clients.DataFileUtilClient import DataFileUtil\n'), ((2653, 2686), 'pandas.DataFrame', 'pd.DataFrame', (["{'formula': [None]}"], {}), "({'formula': [None]})\n", (2665, 2686), True, 'import pandas as pd\n'), ((3625, 3696), 'pandas.DataFrame', 'pd.DataFrame', (["{'rxn_ref': rxn_ref, 'rxn_id': None, 'DOC_formula': None}"], {}), "({'rxn_ref': rxn_ref, 'rxn_id': None, 'DOC_formula': None})\n", (3637, 3696), True, 'import pandas as pd\n'), ((5642, 5710), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""reaction_sandbox_pnnl_cyber.F90"""'], {}), "(self.scratch_folder, 'reaction_sandbox_pnnl_cyber.F90')\n", (5654, 5710), False, 'import os\n'), ((6334, 6356), 'shutil.copy', 'copy', (['sb_file', 'src_dir'], {}), '(sb_file, src_dir)\n', (6338, 6356), False, 'from shutil import copy\n'), ((7539, 7584), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""batch.h5"""'], {}), "(self.scratch_folder, 'batch.h5')\n", (7551, 7584), False, 'import os\n'), ((7595, 7618), 'os.path.isfile', 'os.path.isfile', (['h5_file'], {}), '(h5_file)\n', (7609, 7618), False, 'import os\n'), ((9411, 9454), 'os.path.join', 'os.path.join', (['self.scratch_folder', 'fig_name'], {}), '(self.scratch_folder, fig_name)\n', (9423, 9454), False, 'import os\n'), ((9964, 9986), 'pandas.read_csv', 'pd.read_csv', (['stoi_file'], {}), '(stoi_file)\n', (9975, 9986), True, 'import pandas as pd\n'), ((10005, 10027), 'pandas.read_csv', 'pd.read_csv', (['init_file'], {}), '(init_file)\n', (10016, 10027), True, 'import pandas as pd\n'), ((13303, 13327), 'h5py.File', 'h5py.File', (['h5_file', '"""r+"""'], {}), "(h5_file, 'r+')\n", (13312, 13327), False, 'import h5py\n'), ((15146, 15172), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'dpi': '(150)'}), '(num=1, dpi=150)\n', (15156, 15172), True, 'import matplotlib.pyplot as plt\n'), ((15628, 15663), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Time (%s)' % time_unit)"], {}), "('Time (%s)' % time_unit)\n", (15638, 15663), True, 'import matplotlib.pyplot as plt\n'), ((15708, 15726), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (15718, 15726), True, 'import matplotlib.pyplot as plt\n'), ((15735, 15821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)', 'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, -0.15)', 'ncol': '(3)'}), "(frameon=False, loc='upper center', bbox_to_anchor=(0.5, -0.15),\n ncol=3)\n", (15745, 15821), True, 'import matplotlib.pyplot as plt\n'), ((15877, 15920), 'os.path.join', 'os.path.join', (['self.scratch_folder', 'fig_name'], {}), '(self.scratch_folder, fig_name)\n', (15889, 15920), False, 'import os\n'), ((15932, 15983), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'dpi': '(150)', 'bbox_inches': '"""tight"""'}), "(fig_path, dpi=150, bbox_inches='tight')\n", (15943, 15983), True, 'import matplotlib.pyplot as plt\n'), ((15995, 16019), 'os.path.isfile', 'os.path.isfile', (['fig_path'], {}), '(fig_path)\n', (16009, 16019), False, 'import os\n'), ((16234, 16276), 'os.path.join', 'os.path.join', (['self.shared_folder', '"""output"""'], {}), "(self.shared_folder, 'output')\n", (16246, 16276), False, 'import os\n'), ((16285, 16314), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (16296, 16314), False, 'import os\n'), ((16382, 16428), 'os.path.join', 'os.path.join', (['output_directory', '"""summary.html"""'], {}), "(output_directory, 'summary.html')\n", (16394, 16428), False, 'import os\n'), ((16529, 16572), 'os.path.join', 'os.path.join', (['self.scratch_folder', 'fig_name'], {}), '(self.scratch_folder, fig_name)\n', (16541, 16572), False, 'import os\n'), ((16600, 16652), 'os.path.join', 'os.path.join', (['self.scratch_folder', 'pflotran_out_name'], {}), '(self.scratch_folder, pflotran_out_name)\n', (16612, 16652), False, 'import os\n'), ((16663, 16687), 'os.path.isfile', 'os.path.isfile', (['fig_path'], {}), '(fig_path)\n', (16677, 16687), False, 'import os\n'), ((16849, 16882), 'os.path.isfile', 'os.path.isfile', (['pflotran_out_path'], {}), '(pflotran_out_path)\n', (16863, 16882), False, 'import os\n'), ((17049, 17093), 'shutil.copy', 'copy', (['fig_path', '"""/kb/module/work/tmp/output"""'], {}), "(fig_path, '/kb/module/work/tmp/output')\n", (17053, 17093), False, 'from shutil import copy\n'), ((17101, 17154), 'shutil.copy', 'copy', (['pflotran_out_path', '"""/kb/module/work/tmp/output"""'], {}), "(pflotran_out_path, '/kb/module/work/tmp/output')\n", (17105, 17154), False, 'from shutil import copy\n'), ((19787, 19817), 'installed_clients.KBaseReportClient.KBaseReport', 'KBaseReport', (['self.callback_url'], {}), '(self.callback_url)\n', (19798, 19817), False, 'from installed_clients.KBaseReportClient import KBaseReport\n'), ((20324, 20355), 'installed_clients.DataFileUtilClient.DataFileUtil', 'DataFileUtil', (['self.callback_url'], {}), '(self.callback_url)\n', (20336, 20355), False, 'from installed_clients.DataFileUtilClient import DataFileUtil\n'), ((20443, 20469), 'os.path.abspath', 'os.path.abspath', (['"""./data/"""'], {}), "('./data/')\n", (20458, 20469), False, 'import os\n'), ((20553, 20601), 'os.path.join', 'os.path.join', (["params['shared_folder']", '"""scratch"""'], {}), "(params['shared_folder'], 'scratch')\n", (20565, 20601), False, 'import os\n'), ((21021, 21073), 'os.path.join', 'os.path.join', (['self.data_folder', '"""column_template.in"""'], {}), "(self.data_folder, 'column_template.in')\n", (21033, 21073), False, 'import os\n'), ((21103, 21158), 'os.path.join', 'os.path.join', (['self.data_folder', '"""database_template.dat"""'], {}), "(self.data_folder, 'database_template.dat')\n", (21115, 21158), False, 'import os\n'), ((21188, 21234), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""column.in"""'], {}), "(self.scratch_folder, 'column.in')\n", (21200, 21234), False, 'import os\n'), ((21264, 21313), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""database.dat"""'], {}), "(self.scratch_folder, 'database.dat')\n", (21276, 21313), False, 'import os\n'), ((21343, 21391), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""rxn_fba.csv"""'], {}), "(self.scratch_folder, 'rxn_fba.csv')\n", (21355, 21391), False, 'import os\n'), ((21421, 21469), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""cpd_fba.csv"""'], {}), "(self.scratch_folder, 'cpd_fba.csv')\n", (21433, 21469), False, 'import os\n'), ((21588, 21619), 'installed_clients.DataFileUtilClient.DataFileUtil', 'DataFileUtil', (['self.callback_url'], {}), '(self.callback_url)\n', (21600, 21619), False, 'from installed_clients.DataFileUtilClient import DataFileUtil\n'), ((22333, 22366), 'pandas.DataFrame', 'pd.DataFrame', (["{'formula': [None]}"], {}), "({'formula': [None]})\n", (22345, 22366), True, 'import pandas as pd\n'), ((23557, 23628), 'pandas.DataFrame', 'pd.DataFrame', (["{'rxn_ref': rxn_ref, 'rxn_id': None, 'DOC_formula': None}"], {}), "({'rxn_ref': rxn_ref, 'rxn_id': None, 'DOC_formula': None})\n", (23569, 23628), True, 'import pandas as pd\n'), ((25588, 25656), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""reaction_sandbox_pnnl_cyber.F90"""'], {}), "(self.scratch_folder, 'reaction_sandbox_pnnl_cyber.F90')\n", (25600, 25656), False, 'import os\n'), ((26280, 26302), 'shutil.copy', 'copy', (['sb_file', 'src_dir'], {}), '(sb_file, src_dir)\n', (26284, 26302), False, 'from shutil import copy\n'), ((27510, 27556), 'os.path.join', 'os.path.join', (['self.scratch_folder', '"""column.h5"""'], {}), "(self.scratch_folder, 'column.h5')\n", (27522, 27556), False, 'import os\n'), ((27567, 27590), 'os.path.isfile', 'os.path.isfile', (['h5_file'], {}), '(h5_file)\n', (27581, 27590), False, 'import os\n'), ((29986, 30008), 'pandas.read_csv', 'pd.read_csv', (['stoi_file'], {}), '(stoi_file)\n', (29997, 30008), True, 'import pandas as pd\n'), ((30027, 30049), 'pandas.read_csv', 'pd.read_csv', (['icbc_file'], {}), '(icbc_file)\n', (30038, 30049), True, 'import pandas as pd\n'), ((35075, 35099), 'h5py.File', 'h5py.File', (['h5_file', '"""r+"""'], {}), "(h5_file, 'r+')\n", (35084, 35099), False, 'import h5py\n'), ((36918, 36944), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'dpi': '(150)'}), '(num=1, dpi=150)\n', (36928, 36944), True, 'import matplotlib.pyplot as plt\n'), ((37400, 37435), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Time (%s)' % time_unit)"], {}), "('Time (%s)' % time_unit)\n", (37410, 37435), True, 'import matplotlib.pyplot as plt\n'), ((37480, 37498), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (37490, 37498), True, 'import matplotlib.pyplot as plt\n'), ((37507, 37593), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)', 'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, -0.15)', 'ncol': '(3)'}), "(frameon=False, loc='upper center', bbox_to_anchor=(0.5, -0.15),\n ncol=3)\n", (37517, 37593), True, 'import matplotlib.pyplot as plt\n'), ((37649, 37692), 'os.path.join', 'os.path.join', (['self.scratch_folder', 'fig_name'], {}), '(self.scratch_folder, fig_name)\n', (37661, 37692), False, 'import os\n'), ((37704, 37755), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'dpi': '(150)', 'bbox_inches': '"""tight"""'}), "(fig_path, dpi=150, bbox_inches='tight')\n", (37715, 37755), True, 'import matplotlib.pyplot as plt\n'), ((37767, 37791), 'os.path.isfile', 'os.path.isfile', (['fig_path'], {}), '(fig_path)\n', (37781, 37791), False, 'import os\n'), ((38006, 38048), 'os.path.join', 'os.path.join', (['self.shared_folder', '"""output"""'], {}), "(self.shared_folder, 'output')\n", (38018, 38048), False, 'import os\n'), ((38057, 38086), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (38068, 38086), False, 'import os\n'), ((38154, 38200), 'os.path.join', 'os.path.join', (['output_directory', '"""summary.html"""'], {}), "(output_directory, 'summary.html')\n", (38166, 38200), False, 'import os\n'), ((38301, 38344), 'os.path.join', 'os.path.join', (['self.scratch_folder', 'fig_name'], {}), '(self.scratch_folder, fig_name)\n', (38313, 38344), False, 'import os\n'), ((38372, 38424), 'os.path.join', 'os.path.join', (['self.scratch_folder', 'pflotran_out_name'], {}), '(self.scratch_folder, pflotran_out_name)\n', (38384, 38424), False, 'import os\n'), ((38435, 38459), 'os.path.isfile', 'os.path.isfile', (['fig_path'], {}), '(fig_path)\n', (38449, 38459), False, 'import os\n'), ((38621, 38654), 'os.path.isfile', 'os.path.isfile', (['pflotran_out_path'], {}), '(pflotran_out_path)\n', (38635, 38654), False, 'import os\n'), ((42088, 42118), 'installed_clients.KBaseReportClient.KBaseReport', 'KBaseReport', (['self.callback_url'], {}), '(self.callback_url)\n', (42099, 42118), False, 'from installed_clients.KBaseReportClient import KBaseReport\n'), ((1198, 1227), 'os.mkdir', 'os.mkdir', (['self.scratch_folder'], {}), '(self.scratch_folder)\n', (1206, 1227), False, 'import os\n'), ((3904, 3954), 'random.choice', 'random.choice', (["fba_model['data']['modelreactions']"], {}), "(fba_model['data']['modelreactions'])\n", (3917, 3954), False, 'import random\n'), ((6370, 6381), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6379, 6381), False, 'import os\n'), ((6689, 6720), 'os.listdir', 'os.listdir', (['self.scratch_folder'], {}), '(self.scratch_folder)\n', (6699, 6720), False, 'import os\n'), ((7487, 7518), 'os.listdir', 'os.listdir', (['self.scratch_folder'], {}), '(self.scratch_folder)\n', (7497, 7518), False, 'import os\n'), ((14469, 14504), 'numpy.absolute', 'np.absolute', (['(x_coord - obs_coord[0])'], {}), '(x_coord - obs_coord[0])\n', (14480, 14504), True, 'import numpy as np\n'), ((14530, 14565), 'numpy.absolute', 'np.absolute', (['(y_coord - obs_coord[1])'], {}), '(y_coord - obs_coord[1])\n', (14541, 14565), True, 'import numpy as np\n'), ((14591, 14626), 'numpy.absolute', 'np.absolute', (['(z_coord - obs_coord[2])'], {}), '(z_coord - obs_coord[2])\n', (14602, 14626), True, 'import numpy as np\n'), ((15608, 15618), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (15616, 15618), True, 'import matplotlib.pyplot as plt\n'), ((17975, 18002), 'os.path.basename', 'os.path.basename', (['html_file'], {}), '(html_file)\n', (17991, 18002), False, 'import os\n'), ((18029, 18056), 'os.path.basename', 'os.path.basename', (['html_file'], {}), '(html_file)\n', (18045, 18056), False, 'import os\n'), ((20693, 20722), 'os.mkdir', 'os.mkdir', (['self.scratch_folder'], {}), '(self.scratch_folder)\n', (20701, 20722), False, 'import os\n'), ((23836, 23886), 'random.choice', 'random.choice', (["fba_model['data']['modelreactions']"], {}), "(fba_model['data']['modelreactions'])\n", (23849, 23886), False, 'import random\n'), ((26316, 26327), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (26325, 26327), False, 'import os\n'), ((26635, 26666), 'os.listdir', 'os.listdir', (['self.scratch_folder'], {}), '(self.scratch_folder)\n', (26645, 26666), False, 'import os\n'), ((27458, 27489), 'os.listdir', 'os.listdir', (['self.scratch_folder'], {}), '(self.scratch_folder)\n', (27468, 27489), False, 'import os\n'), ((36241, 36276), 'numpy.absolute', 'np.absolute', (['(x_coord - obs_coord[0])'], {}), '(x_coord - obs_coord[0])\n', (36252, 36276), True, 'import numpy as np\n'), ((36302, 36337), 'numpy.absolute', 'np.absolute', (['(y_coord - obs_coord[1])'], {}), '(y_coord - obs_coord[1])\n', (36313, 36337), True, 'import numpy as np\n'), ((36363, 36398), 'numpy.absolute', 'np.absolute', (['(z_coord - obs_coord[2])'], {}), '(z_coord - obs_coord[2])\n', (36374, 36398), True, 'import numpy as np\n'), ((37380, 37390), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (37388, 37390), True, 'import matplotlib.pyplot as plt\n'), ((40276, 40303), 'os.path.basename', 'os.path.basename', (['html_file'], {}), '(html_file)\n', (40292, 40303), False, 'import os\n'), ((40330, 40357), 'os.path.basename', 'os.path.basename', (['html_file'], {}), '(html_file)\n', (40346, 40357), False, 'import os\n'), ((57804, 57838), 'pandas.isnull', 'pd.isnull', (['rxn_df.iloc[irow, icol]'], {}), '(rxn_df.iloc[irow, icol])\n', (57813, 57838), True, 'import pandas as pd\n'), ((59052, 59086), 'pandas.isnull', 'pd.isnull', (['rxn_df.iloc[irow, icol]'], {}), '(rxn_df.iloc[irow, icol])\n', (59061, 59086), True, 'import pandas as pd\n'), ((7959, 7988), 'os.path.basename', 'os.path.basename', (['cpd_csv_fba'], {}), '(cpd_csv_fba)\n', (7975, 7988), False, 'import os\n'), ((8012, 8041), 'os.path.basename', 'os.path.basename', (['cpd_csv_fba'], {}), '(cpd_csv_fba)\n', (8028, 8041), False, 'import os\n'), ((8185, 8215), 'os.path.basename', 'os.path.basename', (['stoi_csv_fba'], {}), '(stoi_csv_fba)\n', (8201, 8215), False, 'import os\n'), ((8239, 8269), 'os.path.basename', 'os.path.basename', (['stoi_csv_fba'], {}), '(stoi_csv_fba)\n', (8255, 8269), False, 'import os\n'), ((8433, 8458), 'os.path.basename', 'os.path.basename', (['sb_file'], {}), '(sb_file)\n', (8449, 8458), False, 'import os\n'), ((8482, 8507), 'os.path.basename', 'os.path.basename', (['sb_file'], {}), '(sb_file)\n', (8498, 8507), False, 'import os\n'), ((8676, 8708), 'os.path.basename', 'os.path.basename', (['pflotran_input'], {}), '(pflotran_input)\n', (8692, 8708), False, 'import os\n'), ((8732, 8764), 'os.path.basename', 'os.path.basename', (['pflotran_input'], {}), '(pflotran_input)\n', (8748, 8764), False, 'import os\n'), ((8937, 8966), 'os.path.basename', 'os.path.basename', (['pflotran_db'], {}), '(pflotran_db)\n', (8953, 8966), False, 'import os\n'), ((8990, 9019), 'os.path.basename', 'os.path.basename', (['pflotran_db'], {}), '(pflotran_db)\n', (9006, 9019), False, 'import os\n'), ((9188, 9213), 'os.path.basename', 'os.path.basename', (['h5_file'], {}), '(h5_file)\n', (9204, 9213), False, 'import os\n'), ((9237, 9262), 'os.path.basename', 'os.path.basename', (['h5_file'], {}), '(h5_file)\n', (9253, 9262), False, 'import os\n'), ((9541, 9567), 'os.path.basename', 'os.path.basename', (['fig_file'], {}), '(fig_file)\n', (9557, 9567), False, 'import os\n'), ((9591, 9617), 'os.path.basename', 'os.path.basename', (['fig_file'], {}), '(fig_file)\n', (9607, 9617), False, 'import os\n'), ((27933, 27962), 'os.path.basename', 'os.path.basename', (['cpd_csv_fba'], {}), '(cpd_csv_fba)\n', (27949, 27962), False, 'import os\n'), ((27986, 28015), 'os.path.basename', 'os.path.basename', (['cpd_csv_fba'], {}), '(cpd_csv_fba)\n', (28002, 28015), False, 'import os\n'), ((28159, 28189), 'os.path.basename', 'os.path.basename', (['stoi_csv_fba'], {}), '(stoi_csv_fba)\n', (28175, 28189), False, 'import os\n'), ((28213, 28243), 'os.path.basename', 'os.path.basename', (['stoi_csv_fba'], {}), '(stoi_csv_fba)\n', (28229, 28243), False, 'import os\n'), ((28407, 28432), 'os.path.basename', 'os.path.basename', (['sb_file'], {}), '(sb_file)\n', (28423, 28432), False, 'import os\n'), ((28456, 28481), 'os.path.basename', 'os.path.basename', (['sb_file'], {}), '(sb_file)\n', (28472, 28481), False, 'import os\n'), ((28650, 28682), 'os.path.basename', 'os.path.basename', (['pflotran_input'], {}), '(pflotran_input)\n', (28666, 28682), False, 'import os\n'), ((28706, 28738), 'os.path.basename', 'os.path.basename', (['pflotran_input'], {}), '(pflotran_input)\n', (28722, 28738), False, 'import os\n'), ((28915, 28944), 'os.path.basename', 'os.path.basename', (['pflotran_db'], {}), '(pflotran_db)\n', (28931, 28944), False, 'import os\n'), ((28968, 28997), 'os.path.basename', 'os.path.basename', (['pflotran_db'], {}), '(pflotran_db)\n', (28984, 28997), False, 'import os\n'), ((29170, 29195), 'os.path.basename', 'os.path.basename', (['h5_file'], {}), '(h5_file)\n', (29186, 29195), False, 'import os\n'), ((29219, 29244), 'os.path.basename', 'os.path.basename', (['h5_file'], {}), '(h5_file)\n', (29235, 29244), False, 'import os\n'), ((58112, 58136), 're.sub', 're.sub', (['"""[-]"""', '""""""', 'sto_i'], {}), "('[-]', '', sto_i)\n", (58118, 58136), False, 'import re\n'), ((59217, 59241), 're.sub', 're.sub', (['"""[-]"""', '""""""', 'sto_i'], {}), "('[-]', '', sto_i)\n", (59223, 59241), False, 'import re\n'), ((14123, 14167), 'numpy.linspace', 'np.linspace', (['bound[0]', 'bound[1]', '(nxyz[0] + 1)'], {}), '(bound[0], bound[1], nxyz[0] + 1)\n', (14134, 14167), True, 'import numpy as np\n'), ((14169, 14213), 'numpy.linspace', 'np.linspace', (['bound[0]', 'bound[1]', '(nxyz[0] + 1)'], {}), '(bound[0], bound[1], nxyz[0] + 1)\n', (14180, 14213), True, 'import numpy as np\n'), ((14236, 14280), 'numpy.linspace', 'np.linspace', (['bound[2]', 'bound[3]', '(nxyz[1] + 1)'], {}), '(bound[2], bound[3], nxyz[1] + 1)\n', (14247, 14280), True, 'import numpy as np\n'), ((14282, 14326), 'numpy.linspace', 'np.linspace', (['bound[2]', 'bound[3]', '(nxyz[1] + 1)'], {}), '(bound[2], bound[3], nxyz[1] + 1)\n', (14293, 14326), True, 'import numpy as np\n'), ((14349, 14393), 'numpy.linspace', 'np.linspace', (['bound[4]', 'bound[5]', '(nxyz[2] + 1)'], {}), '(bound[4], bound[5], nxyz[2] + 1)\n', (14360, 14393), True, 'import numpy as np\n'), ((14395, 14439), 'numpy.linspace', 'np.linspace', (['bound[4]', 'bound[5]', '(nxyz[2] + 1)'], {}), '(bound[4], bound[5], nxyz[2] + 1)\n', (14406, 14439), True, 'import numpy as np\n'), ((15545, 15595), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'var_value[i, :]'], {'label': 'var_name[i]'}), '(time, var_value[i, :], label=var_name[i])\n', (15553, 15595), True, 'import matplotlib.pyplot as plt\n'), ((35895, 35939), 'numpy.linspace', 'np.linspace', (['bound[0]', 'bound[1]', '(nxyz[0] + 1)'], {}), '(bound[0], bound[1], nxyz[0] + 1)\n', (35906, 35939), True, 'import numpy as np\n'), ((35941, 35985), 'numpy.linspace', 'np.linspace', (['bound[0]', 'bound[1]', '(nxyz[0] + 1)'], {}), '(bound[0], bound[1], nxyz[0] + 1)\n', (35952, 35985), True, 'import numpy as np\n'), ((36008, 36052), 'numpy.linspace', 'np.linspace', (['bound[2]', 'bound[3]', '(nxyz[1] + 1)'], {}), '(bound[2], bound[3], nxyz[1] + 1)\n', (36019, 36052), True, 'import numpy as np\n'), ((36054, 36098), 'numpy.linspace', 'np.linspace', (['bound[2]', 'bound[3]', '(nxyz[1] + 1)'], {}), '(bound[2], bound[3], nxyz[1] + 1)\n', (36065, 36098), True, 'import numpy as np\n'), ((36121, 36165), 'numpy.linspace', 'np.linspace', (['bound[4]', 'bound[5]', '(nxyz[2] + 1)'], {}), '(bound[4], bound[5], nxyz[2] + 1)\n', (36132, 36165), True, 'import numpy as np\n'), ((36167, 36211), 'numpy.linspace', 'np.linspace', (['bound[4]', 'bound[5]', '(nxyz[2] + 1)'], {}), '(bound[4], bound[5], nxyz[2] + 1)\n', (36178, 36211), True, 'import numpy as np\n'), ((37317, 37367), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'var_value[i, :]'], {'label': 'var_name[i]'}), '(time, var_value[i, :], label=var_name[i])\n', (37325, 37367), True, 'import matplotlib.pyplot as plt\n'), ((15332, 15388), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'var_value[i, :]'], {'label': '"""DOCs"""', 'color': '"""k"""'}), "(time, var_value[i, :], label='DOCs', color='k')\n", (15340, 15388), True, 'import matplotlib.pyplot as plt\n'), ((15468, 15510), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'var_value[i, :]'], {'color': '"""k"""'}), "(time, var_value[i, :], color='k')\n", (15476, 15510), True, 'import matplotlib.pyplot as plt\n'), ((37104, 37160), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'var_value[i, :]'], {'label': '"""DOCs"""', 'color': '"""k"""'}), "(time, var_value[i, :], label='DOCs', color='k')\n", (37112, 37160), True, 'import matplotlib.pyplot as plt\n'), ((37240, 37282), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'var_value[i, :]'], {'color': '"""k"""'}), "(time, var_value[i, :], color='k')\n", (37248, 37282), True, 'import matplotlib.pyplot as plt\n')] |
import copy
import os
import tempfile
import time
import numpy as np
from ray.rllib.agents.pg import PGTrainer, PGTorchPolicy
from ray.tune.logger import UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
from marltoolbox.examples.rllib_api.pg_ipd import get_rllib_config
from marltoolbox.envs.matrix_sequential_social_dilemma import (
IteratedPrisonersDilemma,
)
from marltoolbox.utils import log, miscellaneous
from marltoolbox.utils import rollout
CONSTANT_REWARD = 1.0
EPI_LENGTH = 33
class FakeEnvWtCstReward(IteratedPrisonersDilemma):
def step(self, actions: dict):
observations, rewards, epi_is_done, info = super().step(actions)
for k in rewards.keys():
rewards[k] = CONSTANT_REWARD
return observations, rewards, epi_is_done, info
def make_FakePolicyWtDefinedActions(list_actions_to_play):
class FakePolicyWtDefinedActions(PGTorchPolicy):
def compute_actions(self, *args, **kwargs):
action = list_actions_to_play.pop(0)
return np.array([action]), [], {}
return FakePolicyWtDefinedActions
def init_worker(actions_list=None):
train_n_replicates = 1
debug = True
stop_iters = 200
tf = False
seeds = miscellaneous.get_random_seeds(train_n_replicates)
exp_name, _ = log.log_in_current_day_dir("testing")
rllib_config, stop_config = get_rllib_config(seeds, debug, stop_iters, tf)
rllib_config["env"] = FakeEnvWtCstReward
rllib_config["env_config"]["max_steps"] = EPI_LENGTH
rllib_config["seed"] = int(time.time())
if actions_list is not None:
for policy_id in FakeEnvWtCstReward({}).players_ids:
policy_to_modify = list(
rllib_config["multiagent"]["policies"][policy_id]
)
policy_to_modify[0] = make_FakePolicyWtDefinedActions(
copy.deepcopy(actions_list)
)
rllib_config["multiagent"]["policies"][
policy_id
] = policy_to_modify
pg_trainer = PGTrainer(
rllib_config, logger_creator=_get_logger_creator(exp_name)
)
return pg_trainer.workers._local_worker
def _get_logger_creator(exp_name):
logdir_prefix = exp_name + "/"
tail, head = os.path.split(exp_name)
tail_bis, _ = os.path.split(tail)
def default_logger_creator(config):
"""Creates a Unified logger with a default logdir prefix
containing the agent name and the env id
"""
if not os.path.exists(DEFAULT_RESULTS_DIR):
os.makedirs(DEFAULT_RESULTS_DIR)
if not os.path.exists(os.path.join(DEFAULT_RESULTS_DIR, tail_bis)):
os.mkdir(os.path.join(DEFAULT_RESULTS_DIR, tail_bis))
if not os.path.exists(os.path.join(DEFAULT_RESULTS_DIR, tail)):
os.mkdir(os.path.join(DEFAULT_RESULTS_DIR, tail))
if not os.path.exists(os.path.join(DEFAULT_RESULTS_DIR, exp_name)):
os.mkdir(os.path.join(DEFAULT_RESULTS_DIR, exp_name))
logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR
)
return UnifiedLogger(config, logdir, loggers=None)
return default_logger_creator
def test_rollout_constant_reward():
policy_agent_mapping = lambda policy_id: policy_id
def assert_(rollout_length, num_episodes):
worker = init_worker()
rollout_results = rollout.internal_rollout(
worker,
num_steps=rollout_length,
policy_agent_mapping=policy_agent_mapping,
reset_env_before=True,
num_episodes=num_episodes,
)
assert (
rollout_results._num_episodes == num_episodes
or rollout_results._total_steps == rollout_length
)
steps_in_last_epi = rollout_results._current_rollout
if rollout_results._total_steps == rollout_length:
n_steps_in_last_epi = rollout_results._total_steps % EPI_LENGTH
elif rollout_results._num_episodes == num_episodes:
n_steps_in_last_epi = EPI_LENGTH
# Verify rewards
for policy_id in worker.env.players_ids:
rewards = [step[3][policy_id] for step in steps_in_last_epi]
assert sum(rewards) == n_steps_in_last_epi * CONSTANT_REWARD
assert len(rewards) == n_steps_in_last_epi
all_steps = []
for epi_rollout in rollout_results._rollouts:
all_steps.extend(epi_rollout)
for policy_id in worker.env.players_ids:
rewards = [step[3][policy_id] for step in all_steps]
assert (
sum(rewards)
== min(rollout_length, num_episodes * EPI_LENGTH)
* CONSTANT_REWARD
)
assert len(rewards) == min(
rollout_length, num_episodes * EPI_LENGTH
)
assert_(rollout_length=20, num_episodes=1)
assert_(rollout_length=40, num_episodes=1)
assert_(rollout_length=77, num_episodes=2)
assert_(rollout_length=77, num_episodes=3)
assert_(rollout_length=6, num_episodes=3)
def test_rollout_specified_actions():
policy_agent_mapping = lambda policy_id: policy_id
def assert_(rollout_length, num_episodes, actions_list):
worker = init_worker(actions_list=actions_list)
rollout_results = rollout.internal_rollout(
worker,
num_steps=rollout_length,
policy_agent_mapping=policy_agent_mapping,
reset_env_before=True,
num_episodes=num_episodes,
)
assert (
rollout_results._num_episodes == num_episodes
or rollout_results._total_steps == rollout_length
)
steps_in_last_epi = rollout_results._current_rollout
if rollout_results._total_steps == rollout_length:
n_steps_in_last_epi = rollout_results._total_steps % EPI_LENGTH
elif rollout_results._num_episodes == num_episodes:
n_steps_in_last_epi = EPI_LENGTH
# Verify actions
all_steps = []
for epi_rollout in rollout_results._rollouts:
all_steps.extend(epi_rollout)
for policy_id in worker.env.players_ids:
actions_played = [step[1][policy_id] for step in all_steps]
assert len(actions_played) == min(
rollout_length, num_episodes * EPI_LENGTH
)
print(actions_list[1 : 1 + len(all_steps)], actions_played)
for action_required, action_played in zip(
actions_list[: len(all_steps)], actions_played
):
assert action_required == action_played
for policy_id in worker.env.players_ids:
actions_played = [step[1][policy_id] for step in steps_in_last_epi]
assert len(actions_played) == n_steps_in_last_epi
actions_required_during_last_epi = actions_list[: len(all_steps)][
-n_steps_in_last_epi:
]
for action_required, action_played in zip(
actions_required_during_last_epi, actions_played
):
assert action_required == action_played
assert_(rollout_length=20, num_episodes=1, actions_list=[0, 1] * 100)
assert_(rollout_length=40, num_episodes=1, actions_list=[1, 1] * 100)
assert_(rollout_length=77, num_episodes=2, actions_list=[0, 0] * 100)
assert_(rollout_length=77, num_episodes=3, actions_list=[0, 1] * 100)
assert_(rollout_length=6, num_episodes=3, actions_list=[1, 0] * 100)
| [
"os.path.exists",
"marltoolbox.utils.rollout.internal_rollout",
"ray.tune.logger.UnifiedLogger",
"os.makedirs",
"os.path.join",
"os.path.split",
"numpy.array",
"marltoolbox.utils.log.log_in_current_day_dir",
"tempfile.mkdtemp",
"copy.deepcopy",
"marltoolbox.examples.rllib_api.pg_ipd.get_rllib_co... | [((1231, 1281), 'marltoolbox.utils.miscellaneous.get_random_seeds', 'miscellaneous.get_random_seeds', (['train_n_replicates'], {}), '(train_n_replicates)\n', (1261, 1281), False, 'from marltoolbox.utils import log, miscellaneous\n'), ((1300, 1337), 'marltoolbox.utils.log.log_in_current_day_dir', 'log.log_in_current_day_dir', (['"""testing"""'], {}), "('testing')\n", (1326, 1337), False, 'from marltoolbox.utils import log, miscellaneous\n'), ((1371, 1417), 'marltoolbox.examples.rllib_api.pg_ipd.get_rllib_config', 'get_rllib_config', (['seeds', 'debug', 'stop_iters', 'tf'], {}), '(seeds, debug, stop_iters, tf)\n', (1387, 1417), False, 'from marltoolbox.examples.rllib_api.pg_ipd import get_rllib_config\n'), ((2246, 2269), 'os.path.split', 'os.path.split', (['exp_name'], {}), '(exp_name)\n', (2259, 2269), False, 'import os\n'), ((2288, 2307), 'os.path.split', 'os.path.split', (['tail'], {}), '(tail)\n', (2301, 2307), False, 'import os\n'), ((1551, 1562), 'time.time', 'time.time', ([], {}), '()\n', (1560, 1562), False, 'import time\n'), ((3007, 3070), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': 'logdir_prefix', 'dir': 'DEFAULT_RESULTS_DIR'}), '(prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)\n', (3023, 3070), False, 'import tempfile\n'), ((3108, 3151), 'ray.tune.logger.UnifiedLogger', 'UnifiedLogger', (['config', 'logdir'], {'loggers': 'None'}), '(config, logdir, loggers=None)\n', (3121, 3151), False, 'from ray.tune.logger import UnifiedLogger\n'), ((3385, 3544), 'marltoolbox.utils.rollout.internal_rollout', 'rollout.internal_rollout', (['worker'], {'num_steps': 'rollout_length', 'policy_agent_mapping': 'policy_agent_mapping', 'reset_env_before': '(True)', 'num_episodes': 'num_episodes'}), '(worker, num_steps=rollout_length,\n policy_agent_mapping=policy_agent_mapping, reset_env_before=True,\n num_episodes=num_episodes)\n', (3409, 3544), False, 'from marltoolbox.utils import rollout\n'), ((5316, 5475), 'marltoolbox.utils.rollout.internal_rollout', 'rollout.internal_rollout', (['worker'], {'num_steps': 'rollout_length', 'policy_agent_mapping': 'policy_agent_mapping', 'reset_env_before': '(True)', 'num_episodes': 'num_episodes'}), '(worker, num_steps=rollout_length,\n policy_agent_mapping=policy_agent_mapping, reset_env_before=True,\n num_episodes=num_episodes)\n', (5340, 5475), False, 'from marltoolbox.utils import rollout\n'), ((2490, 2525), 'os.path.exists', 'os.path.exists', (['DEFAULT_RESULTS_DIR'], {}), '(DEFAULT_RESULTS_DIR)\n', (2504, 2525), False, 'import os\n'), ((2539, 2571), 'os.makedirs', 'os.makedirs', (['DEFAULT_RESULTS_DIR'], {}), '(DEFAULT_RESULTS_DIR)\n', (2550, 2571), False, 'import os\n'), ((1035, 1053), 'numpy.array', 'np.array', (['[action]'], {}), '([action])\n', (1043, 1053), True, 'import numpy as np\n'), ((1858, 1885), 'copy.deepcopy', 'copy.deepcopy', (['actions_list'], {}), '(actions_list)\n', (1871, 1885), False, 'import copy\n'), ((2602, 2645), 'os.path.join', 'os.path.join', (['DEFAULT_RESULTS_DIR', 'tail_bis'], {}), '(DEFAULT_RESULTS_DIR, tail_bis)\n', (2614, 2645), False, 'import os\n'), ((2669, 2712), 'os.path.join', 'os.path.join', (['DEFAULT_RESULTS_DIR', 'tail_bis'], {}), '(DEFAULT_RESULTS_DIR, tail_bis)\n', (2681, 2712), False, 'import os\n'), ((2744, 2783), 'os.path.join', 'os.path.join', (['DEFAULT_RESULTS_DIR', 'tail'], {}), '(DEFAULT_RESULTS_DIR, tail)\n', (2756, 2783), False, 'import os\n'), ((2807, 2846), 'os.path.join', 'os.path.join', (['DEFAULT_RESULTS_DIR', 'tail'], {}), '(DEFAULT_RESULTS_DIR, tail)\n', (2819, 2846), False, 'import os\n'), ((2878, 2921), 'os.path.join', 'os.path.join', (['DEFAULT_RESULTS_DIR', 'exp_name'], {}), '(DEFAULT_RESULTS_DIR, exp_name)\n', (2890, 2921), False, 'import os\n'), ((2945, 2988), 'os.path.join', 'os.path.join', (['DEFAULT_RESULTS_DIR', 'exp_name'], {}), '(DEFAULT_RESULTS_DIR, exp_name)\n', (2957, 2988), False, 'import os\n')] |
import os
import numpy as np
import pandas as pd
def get_vs(df, rb_vs_const: bool, rb_vs_step: bool, par_vs: str, par_vs_gk: str, par_vs_ns: str,
par_slp_in: str, par_l_gk: str, par_l_ns: str, par_vs_trans: str):
vf = pd.DataFrame()
vf['Winkel'] = df['Winkel']
vf = vf.reset_index(drop=True)
vf['vs'] = int(par_vs)
if rb_vs_const:
vf['vs'] = int(par_vs)
elif rb_vs_step:
v_s_gk = int(par_vs_gk)
v_s_nh = int(par_vs_ns)
slp_in = int(par_slp_in)
l_gk = slp_in + int(par_l_gk)
l_nh = int(par_l_ns)
l_trans = int(par_vs_trans)
# bestimme die Positionen, an denen nur die Kontur aufhört
pos_nh_a = l_gk + l_trans
pos_nh_e = pos_nh_a + l_nh
# Bestimme die Positionen, an denen der Übergang stattfindet
pos_trans_1 = l_gk
pos_trans_2 = pos_nh_e + l_trans
# initialisiere vs-Spalte
vf['vs'] = v_s_gk
# ändere Werte, die im Bereich der Nockenkontur liegen
idx = vf[vf['Winkel'] <= pos_nh_a].idxmax().values[0]
idx_2 = vf[vf['Winkel'] > pos_nh_e].idxmin().values[0]
vf['vs'].loc[idx:idx_2] = v_s_nh
# Wie viele Punkte liegen im Übergang?
idx_3 = vf[vf['Winkel'] <= pos_trans_1].idxmax().values[0]
idx_4 = vf[vf['Winkel'] > pos_trans_2].idxmin().values[0]
anz_trans_1 = len(vf['vs'].loc[idx_3:idx])
anz_trans_2 = len(vf['vs'].loc[idx_2:idx_4])
# Ersetze den Übergang zwischen Kontur und GK mit einer linearen Interpolation
vf['vs'].loc[idx_3:idx] = np.linspace(v_s_gk, v_s_nh, anz_trans_1)
vf['vs'].loc[idx_2:idx_4] = np.linspace(v_s_nh, v_s_gk, anz_trans_2)
return vf['vs'].values
def get_slope_pos(slp_out):
"""
:param slp_out:
:return: pos
"""
# Aufaddieren der Slopepositionen, um Startpositionen zu berechnen
# zuerst umkehren der Matrix slp_out, da cumsum positiv aufaddiert
if len(slp_out) > 1:
pos = np.flipud(slp_out[:, 0])
# Bildung von cumsum, danach umkehren der Matrix, um ursprüngliche
# Reihenfolge nicht durcheinander zu bringen
pos = np.flipud(np.cumsum(pos))
else:
pos = [slp_out[0][0]]
return pos
def save_csv(df, versuch, directory):
file_out = versuch + '_out.csv'
fname = os.path.join(directory + os.sep, file_out)
df_out = df[['Temperatur', 'Winkel', 'IB', 'IB-korr']].copy()
df_out.to_csv(path_or_buf=fname, sep=';', decimal=',',
encoding='utf-8')
| [
"numpy.flipud",
"os.path.join",
"numpy.linspace",
"pandas.DataFrame",
"numpy.cumsum"
] | [((235, 249), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (247, 249), True, 'import pandas as pd\n'), ((2338, 2380), 'os.path.join', 'os.path.join', (['(directory + os.sep)', 'file_out'], {}), '(directory + os.sep, file_out)\n', (2350, 2380), False, 'import os\n'), ((2002, 2026), 'numpy.flipud', 'np.flipud', (['slp_out[:, 0]'], {}), '(slp_out[:, 0])\n', (2011, 2026), True, 'import numpy as np\n'), ((1593, 1633), 'numpy.linspace', 'np.linspace', (['v_s_gk', 'v_s_nh', 'anz_trans_1'], {}), '(v_s_gk, v_s_nh, anz_trans_1)\n', (1604, 1633), True, 'import numpy as np\n'), ((1670, 1710), 'numpy.linspace', 'np.linspace', (['v_s_nh', 'v_s_gk', 'anz_trans_2'], {}), '(v_s_nh, v_s_gk, anz_trans_2)\n', (1681, 1710), True, 'import numpy as np\n'), ((2179, 2193), 'numpy.cumsum', 'np.cumsum', (['pos'], {}), '(pos)\n', (2188, 2193), True, 'import numpy as np\n')] |
'''
Units:
The units module handles basic unit conversion by defining a set of unit conversion anonymous
functions. The functions use numpy array-friendly operations to allow users to pass a value array in place
of a single value, should they wish.
'''
import numpy as np
''' The unit lib stores conversion functions that take a value, x, from a given unit to the base unit. Metric modifiers on base units are
handled separately (made easy by the fact that all base units are the metric base units).
Ex: unit_lib['m']['in'] is the equivalent of 0.0254 (m / in)
NOTE: The unit lib doesn't handle unit systems with different zero-reference points, e.g. temperature. The unit_offset structure holds
offset values; if a unit is not found in the unit_offset library, it is assumed that reference points are the same.
'''
unit_lib = {
"m" : { "m": 1, "in": 0.0254, "ft": 0.0254 * 12.0 } , # LINEAR DISTANCE (base: meter)
"N" : { "N": 1, "lbf": 4.4482216 }, # FORCE (base: Newton)
"g" : { "g": 1, "lb": 453.59237 , "slug": 14593.90}, # MASS (base: gram | lb is included for imperial-minded folk)
"pa" :{ "pa": 1, "psi": 6894.7572931783, "atm": 101325, "bar": 1e5 }, # PRESSURE (base: Pascal)
"K" : { "K": 1, "R": 5.0/9.0, "C": 1, "F": 5.0/9.0 }, #TEMPERATURE (base: Kelvin)
"unitless" : { "unitless": 1, "none": 1 } # UNITLESS
}
''' The unit_offset stores the offsets of a unit from the base unit's zero reference, in the base unit. If a unit is not found in the unit_offset
structure, it is assumed that no reference-shift is necessary.
Ex: to convert a temperature, T, in degC to degK
degK = unit_lib['K']['C'] * T degC + unit_offset['K']['C'] degK
'''
unit_offset = {
"K" : {"C": 273.15 , "F": 273.15 - 32*5.0/9.0 }, #TEMPERATURE OFFSETS
}
''' The metric mult stores the metric multipliers based on the shorthand prefixes. '''
metric_mult = {
"k": 1000,
"h": 100,
"da": 10,
"d": 0.1,
"c": 0.01,
"m": 1E-3,
"n": 1E-9
}
class Units():
def __init__(self):
pass
def get_available_units(self):
''' Returns a list of all available units defined in the units lib '''
unit_list = []
for key in unit_lib:
unit_list += list(unit_lib[key].keys())
return unit_list
def get_compatible_units(self, unit):
''' Returns a list of units compatible with the given unit.
The returned list will contain the given unit, unless no other compatible units are found.
'''
my_base_unit = None
# Search for the from_unit in the unit lib
for base_unit in unit_lib:
for each_unit in unit_lib[base_unit]:
if unit.casefold() == each_unit.casefold() :
my_base_unit = base_unit
break
if my_base_unit is None: # If you don't find a base unit, return empty list
return []
else:
return unit_lib[my_base_unit].keys() # return list of keys for that base unit
def validate_units(self, from_unit, to_unit):
''' Returns true if from_unit can be converted to to_unit by this module. Otherwise returns false. '''
my_base_unit = None
# Search for the from_unit in the unit lib
for base_unit in unit_lib:
for unit in base_unit:
if unit.casefold() == from_unit.casefold():
my_base_unit = base_unit
break
# If didn't find it, check if the from_unit is a metric multiplier of the base unit.
# If so, remove the metric multiplier and validate the base unit
if my_base_unit is None:
for prefix in metric_mult:
if from_unit[:len(prefix)] == prefix:
my_base_unit = from_unit[len(prefix):]
if not(my_base_unit in [key.casefold() for key in unit_lib.keys()]):
return False # if can't find the base unit
# Confirm that the to_unit is a valid conversion based on the base unit.
if to_unit in [key.casefold() for key in unit_lib[my_base_unit].keys()]:
return True
else:
return False
def convert(self, value, from_unit, to_unit):
''' Returns the conversion of a value in one unit to a compatible unit.
value: the value to be converted (can also be a np array of values)
from_unit: the units of value
to_unit: the units to which value is converted
Raises a KeyError if the units passed are incompatible or invalid.
'''
my_base_unit = None
# Search for the from_unit in the unit lib
for base_unit in unit_lib:
for unit in unit_lib[base_unit]:
if unit.casefold() == from_unit.casefold() :
my_base_unit = base_unit
from_unit = unit
break
# If didn't find it, from_unit is a metric multiplier of the base unit.
# If so, adjust the value to get it in its base unit equivalent (e.g. take the km value to m)
if my_base_unit is None:
for prefix in metric_mult:
if from_unit[:len(prefix)].casefold() == prefix.casefold():
value = np.multiply(value, metric_mult[prefix] )
return self.convert(value, from_unit[len(prefix):], to_unit) # having converted to a base unit and multiplied valued, recurse
# Do the same as above for the to_unit, defining a multiplier thats applied on the value at end if to_unit is
# a metric multiplier of a base unit
mult = 1 # by default, multiplier is 1
flag = False
for unit in unit_lib[my_base_unit]:
if unit.casefold() == to_unit.casefold():
to_unit = unit
flag = True
if flag is False: # If didn't find it, must be a metric multiplier of a base unit
for prefix in metric_mult:
if to_unit[:len(prefix)].casefold() == prefix.casefold():
mult = 1 / metric_mult[prefix]
to_unit = to_unit[len(prefix):]
for unit in unit_lib[my_base_unit]: # if found the metric multiplier, find the key that is the case-insensitive match
if unit.casefold() == to_unit.casefold():
to_unit = unit
break
break
# Convert to base unit, checking to see if an offset exists
value = np.multiply(value, unit_lib[my_base_unit][from_unit] ) # value (from_unit) * # (base_unit/from_unit) = new value (base_unit)
try:
value = np.add(value, unit_offset[my_base_unit][from_unit])
except KeyError:
pass # KeyError indicates no offset exists for this unit pair
# Convert to to_unit, checking to see if an offset exists
try:
value = np.subtract(value, unit_offset[my_base_unit][to_unit])
except KeyError:
pass # KeyError indicates no offset exists for this unit pair
value = np.divide(value, unit_lib[my_base_unit][to_unit])
return np.multiply(value, mult) # apply the multiplier and return
units = Units()
### FUNCTIONALITY TESTING ###
# print( 0 == units.convert(-273.15, 'C', 'K'))
# print( 2.54 == units.convert(1.0, 'in', 'cm'))
# print(1000/0.0254 == units.convert(1.0, 'km', 'in'))
# print(2.54e-5 == units.convert(1.0, 'in', 'km'))
# print(abs(1/2.21 - units.convert(1.0,'lb','kg')) <= 1E-2)
# print( [[1.0], [2.0], [3.0]] == np.divide( units.convert([[1],[2],[3]], 'km', 'in'), 1000/0.0254 ) )
| [
"numpy.multiply",
"numpy.subtract",
"numpy.add",
"numpy.divide"
] | [((6627, 6680), 'numpy.multiply', 'np.multiply', (['value', 'unit_lib[my_base_unit][from_unit]'], {}), '(value, unit_lib[my_base_unit][from_unit])\n', (6638, 6680), True, 'import numpy as np\n'), ((7205, 7254), 'numpy.divide', 'np.divide', (['value', 'unit_lib[my_base_unit][to_unit]'], {}), '(value, unit_lib[my_base_unit][to_unit])\n', (7214, 7254), True, 'import numpy as np\n'), ((7279, 7303), 'numpy.multiply', 'np.multiply', (['value', 'mult'], {}), '(value, mult)\n', (7290, 7303), True, 'import numpy as np\n'), ((6785, 6836), 'numpy.add', 'np.add', (['value', 'unit_offset[my_base_unit][from_unit]'], {}), '(value, unit_offset[my_base_unit][from_unit])\n', (6791, 6836), True, 'import numpy as np\n'), ((7035, 7089), 'numpy.subtract', 'np.subtract', (['value', 'unit_offset[my_base_unit][to_unit]'], {}), '(value, unit_offset[my_base_unit][to_unit])\n', (7046, 7089), True, 'import numpy as np\n'), ((5313, 5352), 'numpy.multiply', 'np.multiply', (['value', 'metric_mult[prefix]'], {}), '(value, metric_mult[prefix])\n', (5324, 5352), True, 'import numpy as np\n')] |
import math
import os
import random
import tempfile
import numpy as np
import librosa
import subprocess
from scipy.io.wavfile import write
from audio_utils.utils.file_utils import split_base_and_extension
'''
Signal to noise ratio (SNR) can be defined as
SNR = 20*log(RMS_signal/RMS_noise)
Where: RMS_signal is the RMS value of signal
RMS_noise is that of noise.
Log is the logarithm of 10
****Additive White Gaussian Noise (AWGN)****
- This kind of noise can be added (arithmetic element-wise addition) to the signal
- Mean value is zero (randomly sampled from a Gaussian distribution with mean value of zero)
- Contains all the frequency components in an equal manner
****Real World Noise****
- An audio file which can be overlapped the signal as noise
- Frequency components will depend on the sound used
'''
def get_white_noise(signal, snr):
"""Given a signal and desired SNR, this gives the required AWGN
that should be added to the signal to get the desired SNR in dB"""
RMS_s = math.sqrt(np.mean(signal ** 2))
RMS_n = math.sqrt(RMS_s ** 2 / (pow(10, snr / 10)))
STD_n = RMS_n
noise = np.random.normal(0, STD_n, signal.shape[0])
return noise
def get_noise_from_sound(signal, noise, snr):
"""Given a signal, noise (audio) and desired SNR,
this gives the noise (scaled version of noise input) that gives the desired SNR"""
RMS_s = math.sqrt(np.mean(signal ** 2))
RMS_n = math.sqrt(RMS_s ** 2 / (pow(10, snr / 10)))
RMS_n_current = math.sqrt(np.mean(noise ** 2))
noise = noise * (RMS_n / RMS_n_current)
return noise
def to_polar(complex_ar):
"""convert complex np array to polar arrays (2 apprays; abs and angle)"""
return np.abs(complex_ar), np.angle(complex_ar)
def add_awgn(signal_file, snr, output_path, length):
"""Add AWGN to a .wav file"""
split_signal_file = split_base_and_extension(signal_file)
temp_broken_file_name = os.path.join(output_path, 'temp_broken' + split_signal_file[1])
temp_file_name = tempfile.mktemp(dir=output_path, prefix=split_signal_file[0], suffix=split_signal_file[1])
signal, sr = librosa.load(signal_file)
signal = np.interp(signal, (signal.min(), signal.max()), (-1, 1))
noise = get_white_noise(signal, snr=snr)
signal_noise = signal + noise
write(temp_broken_file_name, sr, signal_noise.astype(np.float32))
subprocess.call(
['ffmpeg', '-y', '-ss', '00:00:00', '-t', str(length), '-i', temp_broken_file_name,
'-ar', '44100', '-ac', '1', '-acodec', 'pcm_s16le', temp_file_name])
return temp_file_name
if __name__ == '__main__':
with open('../../random_data/processing/data/silence.csv', 'w') as fout:
for i in range(0, 800):
random_snr = random.randint(60, 100)
length = random.randint(2, 6)
file_name = add_awgn('D:\\Audio Features\\UrbanSound8K\\UrbanSound8K\\audio\\fold11\\10_silence.wav',
random_snr,
'D:\\Audio Features\\UrbanSound8K\\UrbanSound8K\\audio\\fold11', length)
ground_truth_file_name = os.path.splitext(os.path.basename(file_name))[0]
fout.write(f"{ground_truth_file_name},['silence']\n")
| [
"numpy.random.normal",
"numpy.mean",
"numpy.abs",
"audio_utils.utils.file_utils.split_base_and_extension",
"os.path.join",
"numpy.angle",
"tempfile.mktemp",
"os.path.basename",
"random.randint",
"librosa.load"
] | [((1140, 1183), 'numpy.random.normal', 'np.random.normal', (['(0)', 'STD_n', 'signal.shape[0]'], {}), '(0, STD_n, signal.shape[0])\n', (1156, 1183), True, 'import numpy as np\n'), ((1873, 1910), 'audio_utils.utils.file_utils.split_base_and_extension', 'split_base_and_extension', (['signal_file'], {}), '(signal_file)\n', (1897, 1910), False, 'from audio_utils.utils.file_utils import split_base_and_extension\n'), ((1939, 2002), 'os.path.join', 'os.path.join', (['output_path', "('temp_broken' + split_signal_file[1])"], {}), "(output_path, 'temp_broken' + split_signal_file[1])\n", (1951, 2002), False, 'import os\n'), ((2024, 2119), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'dir': 'output_path', 'prefix': 'split_signal_file[0]', 'suffix': 'split_signal_file[1]'}), '(dir=output_path, prefix=split_signal_file[0], suffix=\n split_signal_file[1])\n', (2039, 2119), False, 'import tempfile\n'), ((2132, 2157), 'librosa.load', 'librosa.load', (['signal_file'], {}), '(signal_file)\n', (2144, 2157), False, 'import librosa\n'), ((1032, 1052), 'numpy.mean', 'np.mean', (['(signal ** 2)'], {}), '(signal ** 2)\n', (1039, 1052), True, 'import numpy as np\n'), ((1412, 1432), 'numpy.mean', 'np.mean', (['(signal ** 2)'], {}), '(signal ** 2)\n', (1419, 1432), True, 'import numpy as np\n'), ((1520, 1539), 'numpy.mean', 'np.mean', (['(noise ** 2)'], {}), '(noise ** 2)\n', (1527, 1539), True, 'import numpy as np\n'), ((1719, 1737), 'numpy.abs', 'np.abs', (['complex_ar'], {}), '(complex_ar)\n', (1725, 1737), True, 'import numpy as np\n'), ((1739, 1759), 'numpy.angle', 'np.angle', (['complex_ar'], {}), '(complex_ar)\n', (1747, 1759), True, 'import numpy as np\n'), ((2757, 2780), 'random.randint', 'random.randint', (['(60)', '(100)'], {}), '(60, 100)\n', (2771, 2780), False, 'import random\n'), ((2802, 2822), 'random.randint', 'random.randint', (['(2)', '(6)'], {}), '(2, 6)\n', (2816, 2822), False, 'import random\n'), ((3142, 3169), 'os.path.basename', 'os.path.basename', (['file_name'], {}), '(file_name)\n', (3158, 3169), False, 'import os\n')] |
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import os
def cal_undistort(img, objpoints, imgpoints):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1:], None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
if __name__ == '__main__':
# prepare object points
nx = 9 # number of inside corners in x
ny = 6 # number of inside corners in y
objpoints = []
objp = np.zeros((nx*ny, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
objpoints.append(objp)
# Make a list of calibration images
images = glob.glob('camera_cal/calibration3.jpg')
for idx, image in enumerate(images):
imgpoints = []
img = cv2.imread(image)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# # Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# # If found, draw corners
if ret == True:
# Draw and display the corners
imgpoints.append(corners)
# cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
undistorted = cal_undistort(img, objpoints, imgpoints)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
# ax1.show()
ax2.imshow(undistorted)
ax2.set_title('Undistorted Image', fontsize=50)
# ax2.show()
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
print("../output_images/chess_{}".format(os.path.basename(image)))
plt.savefig("../output_images/chess_{}".format(os.path.basename(image)))
else:
print("Ret: False", image) | [
"matplotlib.pyplot.subplots_adjust",
"cv2.undistort",
"numpy.zeros",
"os.path.basename",
"cv2.cvtColor",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"cv2.imread",
"matplotlib.pyplot.subplots",
"glob.glob",
"matplotlib.pyplot.show"
] | [((213, 281), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'img.shape[1:]', 'None', 'None'], {}), '(objpoints, imgpoints, img.shape[1:], None, None)\n', (232, 281), False, 'import cv2\n'), ((295, 335), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (308, 335), False, 'import cv2\n'), ((527, 561), 'numpy.zeros', 'np.zeros', (['(nx * ny, 3)', 'np.float32'], {}), '((nx * ny, 3), np.float32)\n', (535, 561), True, 'import numpy as np\n'), ((695, 735), 'glob.glob', 'glob.glob', (['"""camera_cal/calibration3.jpg"""'], {}), "('camera_cal/calibration3.jpg')\n", (704, 735), False, 'import glob\n'), ((814, 831), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (824, 831), False, 'import cv2\n'), ((878, 915), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (890, 915), False, 'import cv2\n'), ((980, 1027), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(nx, ny)', 'None'], {}), '(gray, (nx, ny), None)\n', (1005, 1027), False, 'import cv2\n'), ((1333, 1368), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(24, 9)'}), '(1, 2, figsize=(24, 9))\n', (1345, 1368), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1700), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.0)', 'right': '(1)', 'top': '(0.9)', 'bottom': '(0.0)'}), '(left=0.0, right=1, top=0.9, bottom=0.0)\n', (1660, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1721), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1719, 1721), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1798), 'os.path.basename', 'os.path.basename', (['image'], {}), '(image)\n', (1791, 1798), False, 'import os\n'), ((1860, 1883), 'os.path.basename', 'os.path.basename', (['image'], {}), '(image)\n', (1876, 1883), False, 'import os\n')] |
from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten
from keras.models import Sequential
from mido import MidiFile, MidiTrack, Message
from keras.optimizers import Adam
from tensorflow.python.ops.init_ops import RandomNormal
from scripts.DataLoader import DataLoader
from scripts.GAN import GAN
import numpy as np
import datetime
class SimpleCnnGAN(GAN):
def __init__(self, dataloader: DataLoader = None, g_lr=0.001, g_beta=0.999, d_lr=0.001, d_beta=0.999, latent_dim=256,
content_shape=(128, 128, 1)):
GAN.__init__(self=self, data_generator=dataloader, name="simple-cnn-dcnn-GAN", latent_dim=latent_dim,
content_shape=content_shape)
self.generator = self.build_generator()
self.discriminator = self.build_discriminator(lr=d_lr, beta=d_beta)
self.combined = self.combined_model(lr=g_lr, beta=g_beta)
def build_generator(self):
model = Sequential()
# foundation for 8x8 image
n_nodes = 128 * 8 * 8
model.add(Dense(n_nodes, input_dim=self.latent_dim, kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Reshape((8, 8, 128)))
# upsample to 16X16
model.add(
Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
# upsample to 32x32
model.add(
Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
# upsample to 64x64
model.add(
Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
# upsample to 128x128
model.add(
Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(1, (7, 7), activation='sigmoid', padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
return model
def build_discriminator(self, lr=0.001, beta=0.999):
model = Sequential()
model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same', input_shape=self.content_shape,
kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Conv2D(64, (3, 3), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Conv2D(32, (3, 3), strides=(2, 2), padding='same', kernel_initializer=RandomNormal(stddev=0.5)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid', kernel_initializer=RandomNormal(stddev=0.5)))
# compile model
opt = Adam(lr=lr, beta_1=lr)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def combined_model(self, lr=0.001, beta=0.999):
self.discriminator.trainable = False
model = Sequential()
model.add(self.generator)
model.add(self.discriminator)
opt = Adam(lr=lr, beta_1=beta)
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def generate_sample(self, epoch):
path = "../samples/%s_%s_epoch_%d.mid" % (datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"), self.name, epoch)
self.generate_sample_to(path=path)
def generate_sample_to(self, path):
generated = self.generator.predict(np.random.randn(1, self.latent_dim))
generated = generated.reshape(128, 128)
mid = MidiFile()
track = MidiTrack()
t = 0
for note in generated:
max_index = np.argmax(note)
msg = Message('note_on', note=max_index)
t = t + 1
msg.time = t
msg.velocity = 67
track.append(msg)
mid.tracks.append(track)
mid.save(path) | [
"keras.optimizers.Adam",
"keras.layers.Flatten",
"tensorflow.python.ops.init_ops.RandomNormal",
"scripts.GAN.GAN.__init__",
"mido.MidiTrack",
"keras.layers.LeakyReLU",
"numpy.argmax",
"keras.models.Sequential",
"mido.Message",
"datetime.datetime.now",
"numpy.random.randn",
"mido.MidiFile",
"... | [((577, 712), 'scripts.GAN.GAN.__init__', 'GAN.__init__', ([], {'self': 'self', 'data_generator': 'dataloader', 'name': '"""simple-cnn-dcnn-GAN"""', 'latent_dim': 'latent_dim', 'content_shape': 'content_shape'}), "(self=self, data_generator=dataloader, name=\n 'simple-cnn-dcnn-GAN', latent_dim=latent_dim, content_shape=content_shape)\n", (589, 712), False, 'from scripts.GAN import GAN\n'), ((967, 979), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (977, 979), False, 'from keras.models import Sequential\n'), ((2273, 2285), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2283, 2285), False, 'from keras.models import Sequential\n'), ((3071, 3093), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr', 'beta_1': 'lr'}), '(lr=lr, beta_1=lr)\n', (3075, 3093), False, 'from keras.optimizers import Adam\n'), ((3316, 3328), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3326, 3328), False, 'from keras.models import Sequential\n'), ((3415, 3439), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr', 'beta_1': 'beta'}), '(lr=lr, beta_1=beta)\n', (3419, 3439), False, 'from keras.optimizers import Adam\n'), ((3914, 3924), 'mido.MidiFile', 'MidiFile', ([], {}), '()\n', (3922, 3924), False, 'from mido import MidiFile, MidiTrack, Message\n'), ((3941, 3952), 'mido.MidiTrack', 'MidiTrack', ([], {}), '()\n', (3950, 3952), False, 'from mido import MidiFile, MidiTrack, Message\n'), ((1169, 1189), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1178, 1189), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((1209, 1229), 'keras.layers.Reshape', 'Reshape', (['(8, 8, 128)'], {}), '((8, 8, 128))\n', (1216, 1229), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((1415, 1435), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1424, 1435), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((1621, 1641), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1630, 1641), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((1827, 1847), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1836, 1847), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((2035, 2055), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2044, 2055), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((2477, 2497), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2486, 2497), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((2517, 2529), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2524, 2529), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((2665, 2685), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2674, 2685), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((2705, 2717), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2712, 2717), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((2853, 2873), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2862, 2873), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((2893, 2905), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2900, 2905), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((2926, 2935), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2933, 2935), False, 'from keras.layers import Dense, LeakyReLU, Reshape, Conv2DTranspose, Conv2D, Dropout, Flatten\n'), ((3815, 3850), 'numpy.random.randn', 'np.random.randn', (['(1)', 'self.latent_dim'], {}), '(1, self.latent_dim)\n', (3830, 3850), True, 'import numpy as np\n'), ((4022, 4037), 'numpy.argmax', 'np.argmax', (['note'], {}), '(note)\n', (4031, 4037), True, 'import numpy as np\n'), ((4056, 4090), 'mido.Message', 'Message', (['"""note_on"""'], {'note': 'max_index'}), "('note_on', note=max_index)\n", (4063, 4090), False, 'from mido import MidiFile, MidiTrack, Message\n'), ((1124, 1148), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (1136, 1148), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((1370, 1394), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (1382, 1394), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((1576, 1600), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (1588, 1600), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((1782, 1806), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (1794, 1806), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((1990, 2014), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (2002, 2014), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((2151, 2175), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (2163, 2175), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((2432, 2456), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (2444, 2456), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((2620, 2644), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (2632, 2644), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((2808, 2832), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (2820, 2832), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((3005, 3029), 'tensorflow.python.ops.init_ops.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.5)'}), '(stddev=0.5)\n', (3017, 3029), False, 'from tensorflow.python.ops.init_ops import RandomNormal\n'), ((3615, 3638), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3636, 3638), False, 'import datetime\n')] |
from PIL import ImageGrab
import numpy as np
class GetDisplay:
def __init__(self):
self.img = np.zeros([1, 1, 3])
pass
def grabDisplay(self):
self.img = ImageGrab.grab()
pass
def img(self):
return self.img
def getSize(self):
self.grabDisplay()
return self.img.width, self.img.height
| [
"numpy.zeros",
"PIL.ImageGrab.grab"
] | [((109, 128), 'numpy.zeros', 'np.zeros', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (117, 128), True, 'import numpy as np\n'), ((189, 205), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {}), '()\n', (203, 205), False, 'from PIL import ImageGrab\n')] |
# Common definitions for GAN metrics: provides iterators over the datasets,
# file caching, progress reports and printing.
import os
import time
import hashlib
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from training import misc
from training import dataset
# Base class for metrics
class MetricBase:
def __init__(self, name, dataset = None):
self.name = name
self._dataset_obj = None
self._progress_lo = None
self._progress_hi = None
self._progress_max = None
self._progress_sec = None
self._progress_time = None
self._reset()
def close(self):
self._reset()
# Loading data from previous training runs
def parse_config_for_previous_run(run_dir):
with open(os.path.join(run_dir, "submit_config.pkl"), "rb") as f:
data = pickle.load(f)
data = data.get("run_func_kwargs", {})
return dict(train = data, dataset = data.get("dataset_args", {}))
def _reset(self, network_pkl = None, run_dir = None, data_dir = None, dataset_args = None,
mirror_augment = None):
self._dataset_args = dataset_args.copy() if dataset_args is not None else None
if self._dataset_obj is not None:
self._dataset_obj.close()
self._dataset_obj = None
self._network_pkl = network_pkl
self._data_dir = data_dir
self._mirror_augment = mirror_augment
self._eval_time = 0
self._results = []
if run_dir is not None and (dataset_args is None or mirror_augment is None):
run_config = parse_config_for_previous_run(run_dir)
self._dataset_args = dict(run_config["dataset"])
self._mirror_augment = run_config["train"].get("mirror_augment", False)
# When training is performed over a subset of the data, we still want to evaluate it
# over a new unrestricted sample of the full dataset.
if self._dataset_args is not None:
self._dataset_args["max_imgs"] = None
# self._dataset_args["shuffle_mb"] = 0
def configure_progress_reports(self, plo, phi, pmax, psec = 15):
self._progress_lo = plo
self._progress_hi = phi
self._progress_max = pmax
self._progress_sec = psec
def run(self, network_pkl, run_dir = None, data_dir = None, dataset_args = None,
mirror_augment = None, num_gpus = 1, tf_config = None, log_results = True,
Gs_kwargs = dict(is_validation = True), **kwargs):
self._reset(network_pkl = network_pkl, run_dir = run_dir, data_dir = data_dir,
dataset_args = dataset_args, mirror_augment = mirror_augment)
time_begin = time.time()
with tf.Graph().as_default(), tflib.create_session(tf_config).as_default():
self._report_progress(0, 1)
_G = _D = Gs = None
if self._network_pkl is not None:
_G, _D, Gs = misc.load_pkl(self._network_pkl)[:3]
self._evaluate(Gs, Gs_kwargs = Gs_kwargs, num_gpus = num_gpus, **kwargs)
self._report_progress(1, 1)
self._eval_time = time.time() - time_begin
if log_results:
if run_dir is not None:
log_file = os.path.join(run_dir, "metric-%s.txt" % self.name)
with dnnlib.util.Logger(log_file, "a", screen = False):
print(self.get_result_str().strip())
print(self.get_result_str(screen = True).strip())
return self._results[0].value
def get_result_str(self, screen = False):
if self._network_pkl is None:
network_name = "None"
else:
network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
if len(network_name) > 29:
network_name = "..." + network_name[-26:]
result_str = "%-30s" % network_name
result_str += " time %-12s" % dnnlib.util.format_time(self._eval_time)
nums = ""
for res in self._results:
nums += " " + self.name + res.suffix + " "
nums += res.fmt % res.value
if screen:
nums = misc.bcolored(nums, "blue")
result_str += nums
return result_str
def update_autosummaries(self):
for res in self._results:
name = self.name
tflib.autosummary.autosummary("Metrics/" + name + res.suffix, res.value)
def _evaluate(self, Gs, Gs_kwargs, num_gpus, paths = None):
raise NotImplementedError # to be overridden by subclasses
def _report_result(self, value, suffix = "", fmt = "%-10.4f"):
self._results += [dnnlib.EasyDict(value = value, suffix = suffix, fmt = fmt)]
def _report_progress(self, pcur, pmax, status_str = ""):
if self._progress_lo is None or self._progress_hi is None or self._progress_max is None:
return
t = time.time()
if self._progress_sec is not None and self._progress_time is not None and t < self._progress_time + self._progress_sec:
return
self._progress_time = t
val = self._progress_lo + (pcur / pmax) * (self._progress_hi - self._progress_lo)
dnnlib.RunContext.get().update(status_str, int(val), self._progress_max)
def _get_cache_file_for_reals(self, extension = "pkl", **kwargs):
all_args = dnnlib.EasyDict(metric_name = self.name, mirror_augment = self._mirror_augment)
all_args.update(self._dataset_args)
all_args.update(kwargs)
md5 = hashlib.md5(repr(sorted(all_args.items())).encode("utf-8"))
dataset_name = self._dataset_args.get("tfrecord_dir", None) or self._dataset_args.get("h5_file", None)
dataset_name = os.path.splitext(os.path.basename(dataset_name))[0]
return os.path.join(".stylegan2-cache", "%s-%s-%s.%s" % (md5.hexdigest(), self.name, dataset_name, extension))
def _get_dataset_obj(self):
if self._dataset_obj is None:
self._dataset_obj = dataset.load_dataset(data_dir = self._data_dir, **self._dataset_args)
return self._dataset_obj
def _iterate_files(self, paths, minibatch_size):
for idx in range(0, len(paths), minibatch_size):
load_img = lambda img: np.asarray(PIL.Image.open(img).convert("RGB")).transpose(2, 0, 1)
imgs = [load_img(img) for img in paths[idx : idx + minibatch_size]]
imgs = np.stack(imgs, axis = 0)
yield imgs
def _iterate_reals(self, minibatch_size):
dataset_obj = self._get_dataset_obj()
while True:
imgs, _labels = dataset_obj.get_minibatch_np(minibatch_size)
if self._mirror_augment:
imgs = misc.apply_mirror_augment(imgs)
yield imgs
def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
while True:
latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
fmt = dict(func = tflib.convert_imgs_to_uint8, nchw_to_nhwc = True)
imgs = Gs.run(latents, None, output_transform = fmt, is_validation = True, num_gpus = num_gpus, assume_frozen = True)[0]
yield imgs
def _get_random_labels_tf(self, minibatch_size):
return self._get_dataset_obj().get_random_labels_tf(minibatch_size)
def _get_random_imgs_tf(self):
return self._get_dataset_obj().get_minibatch_tf()[0]
# Iterate over images form the dataset and extract their features.
# Args:
# img_iter: an iterator over image batches
# featurizer: a feature extractor featurizer (e.g. inception/vgg), that receives an image batch
# and returns their vector feature embeddings
# minibatch_size: size of batches provides by the image iterator
# num_imgs: number of extracted images
# Returns the features [num_imgs, featurizer.output_shape[1]]
def _get_feats(self, img_iter, featurizer, minibatch_size, num_imgs):
feats = np.empty([num_imgs, featurizer.output_shape[1]], dtype = np.float32)
for idx, imgs in enumerate(img_iter):
begin = idx * minibatch_size
end = min(begin + minibatch_size, num_imgs)
feats[begin:end] = featurizer.run(imgs[:end-begin], num_gpus = num_gpus, assume_frozen = True)
if end == num_imgs:
break
return feats
# Generate images and extract their features using a generator model.
# Args:
# generator: a model for generating images
# featurizer: a feature extractor model (e.g. inception/vgg), that receives an image batch
# and returns their vector feature embeddings
# minibatch_size: size of batches provides by the image iterator
# num_imgs: number of extracted iamges
# num_gpus: number of GPUs to use for generating the images
# g_kwargs: generator arguments
# Returns the features [num_imgs, featurizer.output_shape[1]]
def _gen_feats(self, generator, featurizer, minibatch_size, num_imgs, num_gpus, g_kwargs):
# Construct TensorFlow graph
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device("/gpu:%d" % gpu_idx):
latents = tf.random_normal([self.minibatch_per_gpu] + generator.input_shape[1:])
labels = self._get_random_labels_tf(self.minibatch_per_gpu)
imgs = generator.clone().get_output_for(latents, labels, **g_kwargs)[0]
imgs = tflib.convert_imgs_to_uint8(imgs)
result_expr.append(featurizer.clone().get_output_for(imgs))
# Compute features for newly generated 'num_imgs' images
feats = np.empty([self.num_imgs, model.output_shape[1]], dtype = np.float32)
for begin in range(0, self.num_imgs, minibatch_size):
self._report_progress(begin, self.num_imgs)
end = min(begin + minibatch_size, self.num_imgs)
feats[begin:end] = np.concatenate(tflib.run(result_expr), axis = 0)[:end-begin]
return feats
# Iterate over image files and extract their features.
# Args:
# paths: a list of image patch to extract features for.
# featurizer: a feature extractor featurizer (e.g. inception/vgg), that receives an image batch
# and returns their vector feature embeddings
# minibatch_size: size of batches provides by the image iterator
# num_imgs: number of extracted images
# Returns the features [num_imgs, featurizer.output_shape[1]]
def _paths_to_feats(self, paths, featurizer, minibatch_size, num_imgs = None):
paths = glob.glob(paths)
if num_imgs is not None:
paths = paths[:num_imgs]
num_imgs = len(paths)
print("Evaluting FID on {} imgs.".format(num_imgs))
imgs = self._iterate_files(paths, minibatch_size)
feats = self._get_feats(imgs, featurizer, num_imgs, minibatch_size)
return feats
# Group of multiple metrics
class MetricGroup:
def __init__(self, metric_kwarg_list, dataset = None):
self.metrics = [dnnlib.util.call_func_by_name(**kwargs, dataset = None) for kwargs in metric_kwarg_list]
def run(self, *args, **kwargs):
ret = 0.0
for metric in self.metrics:
ret = metric.run(*args, **kwargs)
return ret
def get_result_str(self):
return " ".join(metric.get_result_str() for metric in self.metrics)
def update_autosummaries(self):
for metric in self.metrics:
metric.update_autosummaries()
# Dummy metric for debugging purposes
class DummyMetric(MetricBase):
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
self._report_result(0.0)
| [
"dnnlib.util.Logger",
"training.misc.load_pkl",
"training.misc.bcolored",
"dnnlib.tflib.create_session",
"tensorflow.Graph",
"tensorflow.random_normal",
"dnnlib.tflib.autosummary.autosummary",
"numpy.stack",
"numpy.empty",
"tensorflow.device",
"dnnlib.EasyDict",
"training.misc.apply_mirror_aug... | [((2752, 2763), 'time.time', 'time.time', ([], {}), '()\n', (2761, 2763), False, 'import time\n'), ((4944, 4955), 'time.time', 'time.time', ([], {}), '()\n', (4953, 4955), False, 'import time\n'), ((5396, 5471), 'dnnlib.EasyDict', 'dnnlib.EasyDict', ([], {'metric_name': 'self.name', 'mirror_augment': 'self._mirror_augment'}), '(metric_name=self.name, mirror_augment=self._mirror_augment)\n', (5411, 5471), False, 'import dnnlib\n'), ((7983, 8049), 'numpy.empty', 'np.empty', (['[num_imgs, featurizer.output_shape[1]]'], {'dtype': 'np.float32'}), '([num_imgs, featurizer.output_shape[1]], dtype=np.float32)\n', (7991, 8049), True, 'import numpy as np\n'), ((9697, 9763), 'numpy.empty', 'np.empty', (['[self.num_imgs, model.output_shape[1]]'], {'dtype': 'np.float32'}), '([self.num_imgs, model.output_shape[1]], dtype=np.float32)\n', (9705, 9763), True, 'import numpy as np\n'), ((3183, 3194), 'time.time', 'time.time', ([], {}), '()\n', (3192, 3194), False, 'import time\n'), ((3974, 4014), 'dnnlib.util.format_time', 'dnnlib.util.format_time', (['self._eval_time'], {}), '(self._eval_time)\n', (3997, 4014), False, 'import dnnlib\n'), ((4201, 4228), 'training.misc.bcolored', 'misc.bcolored', (['nums', '"""blue"""'], {}), "(nums, 'blue')\n", (4214, 4228), False, 'from training import misc\n'), ((4395, 4467), 'dnnlib.tflib.autosummary.autosummary', 'tflib.autosummary.autosummary', (["('Metrics/' + name + res.suffix)", 'res.value'], {}), "('Metrics/' + name + res.suffix, res.value)\n", (4424, 4467), True, 'import dnnlib.tflib as tflib\n'), ((4694, 4746), 'dnnlib.EasyDict', 'dnnlib.EasyDict', ([], {'value': 'value', 'suffix': 'suffix', 'fmt': 'fmt'}), '(value=value, suffix=suffix, fmt=fmt)\n', (4709, 4746), False, 'import dnnlib\n'), ((6034, 6101), 'training.dataset.load_dataset', 'dataset.load_dataset', ([], {'data_dir': 'self._data_dir'}), '(data_dir=self._data_dir, **self._dataset_args)\n', (6054, 6101), False, 'from training import dataset\n'), ((6448, 6470), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (6456, 6470), True, 'import numpy as np\n'), ((6900, 6952), 'numpy.random.randn', 'np.random.randn', (['minibatch_size', '*Gs.input_shape[1:]'], {}), '(minibatch_size, *Gs.input_shape[1:])\n', (6915, 6952), True, 'import numpy as np\n'), ((11100, 11153), 'dnnlib.util.call_func_by_name', 'dnnlib.util.call_func_by_name', ([], {'dataset': 'None'}), '(**kwargs, dataset=None)\n', (11129, 11153), False, 'import dnnlib\n'), ((800, 842), 'os.path.join', 'os.path.join', (['run_dir', '"""submit_config.pkl"""'], {}), "(run_dir, 'submit_config.pkl')\n", (812, 842), False, 'import os\n'), ((3296, 3346), 'os.path.join', 'os.path.join', (['run_dir', "('metric-%s.txt' % self.name)"], {}), "(run_dir, 'metric-%s.txt' % self.name)\n", (3308, 3346), False, 'import os\n'), ((5233, 5256), 'dnnlib.RunContext.get', 'dnnlib.RunContext.get', ([], {}), '()\n', (5254, 5256), False, 'import dnnlib\n'), ((5777, 5807), 'os.path.basename', 'os.path.basename', (['dataset_name'], {}), '(dataset_name)\n', (5793, 5807), False, 'import os\n'), ((6742, 6773), 'training.misc.apply_mirror_augment', 'misc.apply_mirror_augment', (['imgs'], {}), '(imgs)\n', (6767, 6773), False, 'from training import misc\n'), ((9187, 9217), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % gpu_idx)"], {}), "('/gpu:%d' % gpu_idx)\n", (9196, 9217), True, 'import tensorflow as tf\n'), ((9245, 9315), 'tensorflow.random_normal', 'tf.random_normal', (['([self.minibatch_per_gpu] + generator.input_shape[1:])'], {}), '([self.minibatch_per_gpu] + generator.input_shape[1:])\n', (9261, 9315), True, 'import tensorflow as tf\n'), ((9504, 9537), 'dnnlib.tflib.convert_imgs_to_uint8', 'tflib.convert_imgs_to_uint8', (['imgs'], {}), '(imgs)\n', (9531, 9537), True, 'import dnnlib.tflib as tflib\n'), ((2777, 2787), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2785, 2787), True, 'import tensorflow as tf\n'), ((2802, 2833), 'dnnlib.tflib.create_session', 'tflib.create_session', (['tf_config'], {}), '(tf_config)\n', (2822, 2833), True, 'import dnnlib.tflib as tflib\n'), ((2995, 3027), 'training.misc.load_pkl', 'misc.load_pkl', (['self._network_pkl'], {}), '(self._network_pkl)\n', (3008, 3027), False, 'from training import misc\n'), ((3368, 3415), 'dnnlib.util.Logger', 'dnnlib.util.Logger', (['log_file', '"""a"""'], {'screen': '(False)'}), "(log_file, 'a', screen=False)\n", (3386, 3415), False, 'import dnnlib\n'), ((3762, 3797), 'os.path.basename', 'os.path.basename', (['self._network_pkl'], {}), '(self._network_pkl)\n', (3778, 3797), False, 'import os\n'), ((9991, 10013), 'dnnlib.tflib.run', 'tflib.run', (['result_expr'], {}), '(result_expr)\n', (10000, 10013), True, 'import dnnlib.tflib as tflib\n')] |
from typing import Iterable, List, Tuple, Set, Dict, Union
from collections import defaultdict
from hashlib import blake2b
import numpy as np
from rdkit.Chem import AllChem
from rdkit.Chem.rdchem import Mol
from rdkit import RDLogger
RDLogger.DisableLog("rdApp.*")
class NoReactionError(Exception):
"""Raised when the encoder attempts to encode a non-reaction SMILES.
Attributes:
message: a message containing the non-reaction SMILES
"""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
class DrfpEncoder:
"""A class for encoding SMILES as drfp fingerprints."""
@staticmethod
def shingling_from_mol(
in_mol: Mol, radius: int = 3, rings: bool = True, min_radius: int = 0
) -> List[str]:
"""Creates a molecular shingling from a RDKit molecule (rdkit.Chem.rdchem.Mol).
Arguments:
in_mol: A RDKit molecule instance
radius: The drfp radius (a radius of 3 corresponds to drfp6)
rings: Whether or not to include rings in the shingling
min_radius: The minimum radius that is used to extract n-grams
Returns:
The molecular shingling.
"""
shingling = []
if rings:
for ring in AllChem.GetSymmSSSR(in_mol):
bonds = set()
ring = list(ring)
for i in ring:
for j in ring:
if i != j:
bond = in_mol.GetBondBetweenAtoms(i, j)
if bond is not None:
bonds.add(bond.GetIdx())
shingling.append(
AllChem.MolToSmiles(
AllChem.PathToSubmol(in_mol, list(bonds)),
canonical=True,
allHsExplicit=True,
).encode("utf-8")
)
if min_radius == 0:
for i, atom in enumerate(in_mol.GetAtoms()):
shingling.append(atom.GetSmarts().encode("utf-8"))
for index, _ in enumerate(in_mol.GetAtoms()):
for i in range(1, radius + 1):
p = AllChem.FindAtomEnvironmentOfRadiusN(in_mol, i, index)
amap = {}
submol = AllChem.PathToSubmol(in_mol, p, atomMap=amap)
if index not in amap:
continue
smiles = AllChem.MolToSmiles(
submol,
rootedAtAtom=amap[index],
canonical=True,
allHsExplicit=True,
)
if smiles != "":
shingling.append(smiles.encode("utf-8"))
# Set ensures that the same shingle is not hashed multiple times
# (which would not change the hash, since there would be no new minima)
return list(set(shingling))
@staticmethod
def internal_encode(
in_smiles: str,
radius: int = 3,
min_radius: int = 0,
rings: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""Creates an drfp array from a reaction SMILES string.
Arguments:
in_smiles: A valid reaction SMILES string
radius: The drfp radius (a radius of 3 corresponds to drfp6)
min_radius: The minimum radius that is used to extract n-grams
rings: Whether or not to include rings in the shingling
Returns:
A tuple with two arrays, the first containing the drfp hash values, the second the substructure SMILES
"""
sides = in_smiles.split(">")
if len(sides) < 3:
raise NoReactionError(
f"The following is not a valid reaction SMILES: '{in_smiles}'"
)
if len(sides[1]) > 0:
sides[0] += "." + sides[1]
left = sides[0].split(".")
right = sides[2].split(".")
left_shingles = set()
right_shingles = set()
for l in left:
mol = AllChem.MolFromSmiles(l)
if not mol:
continue
sh = DrfpEncoder.shingling_from_mol(
mol,
radius=radius,
rings=rings,
min_radius=min_radius,
)
for s in sh:
right_shingles.add(s)
for r in right:
mol = AllChem.MolFromSmiles(r)
if not mol:
continue
sh = DrfpEncoder.shingling_from_mol(
mol,
radius=radius,
rings=rings,
min_radius=min_radius,
)
for s in sh:
left_shingles.add(s)
s = right_shingles.symmetric_difference(left_shingles)
if len(s) == 0:
s = left_shingles
return DrfpEncoder.hash(list(s)), list(s)
@staticmethod
def hash(shingling: List[str]) -> np.ndarray:
"""Directly hash all the SMILES in a shingling to a 32-bit integerself.
Arguments:
shingling: A list of n-grams
Returns:
A list of hashed n-grams
"""
hash_values = []
for t in shingling:
hash_values.append(int(blake2b(t, digest_size=4).hexdigest(), 16))
return np.array(hash_values, dtype=np.int32)
@staticmethod
def fold(
hash_values: np.ndarray, length: int = 2048
) -> Tuple[np.ndarray, np.ndarray]:
"""Folds the hash values to a binary vector of a given length.
Arguments:
hash_value: An array containing the hash values
length: The length of the folded fingerprint
Returns:
A tuple containing the folded fingerprint and the indices of the on bits
"""
folded = np.zeros(length, dtype=np.uint8)
on_bits = hash_values % length
folded[on_bits] = 1
return folded, on_bits
@staticmethod
def encode(
X: Union[Iterable, str],
n_folded_length: int = 2048,
min_radius: int = 0,
radius: int = 3,
rings: bool = True,
mapping: bool = False,
) -> Union[List[np.ndarray], Tuple[List[np.ndarray], Dict[int, Set[str]]]]:
"""Encodes a list of reaction SMILES using the drfp fingerprint.
Args:
X: An iterable (e.g. List) of reaction SMILES or a single reaction SMILES to be encoded
n_folded_length: The folded length of the fingerprint (the parameter for the modulo hashing)
min_radius: The minimum radius of a substructure (0 includes single atoms)
radius: The maximum radius of a substructure
rings: Whether to include full rings as substructures
mapping: Return a feature to substructure mapping in addition to the fingerprints
Returns:
A list of drfp fingerprints or, if mapping is enabled, a tuple containing a list of drfp fingerprints and a mapping dict.
"""
if isinstance(X, str):
X = [X]
result = []
result_map = defaultdict(set)
for _, x in enumerate(X):
hashed_diff, smiles_diff = DrfpEncoder.internal_encode(
x, min_radius=min_radius, radius=radius, rings=rings
)
difference_folded, on_bits = DrfpEncoder.fold(
hashed_diff,
length=n_folded_length,
)
if mapping:
for unfolded_index, folded_index in enumerate(on_bits):
result_map[folded_index].add(
smiles_diff[unfolded_index].decode("utf-8")
)
result.append(difference_folded)
if mapping:
return result, result_map
else:
return result
| [
"rdkit.Chem.AllChem.MolToSmiles",
"hashlib.blake2b",
"rdkit.Chem.AllChem.MolFromSmiles",
"rdkit.Chem.AllChem.PathToSubmol",
"rdkit.Chem.AllChem.FindAtomEnvironmentOfRadiusN",
"rdkit.Chem.AllChem.GetSymmSSSR",
"numpy.array",
"numpy.zeros",
"collections.defaultdict",
"rdkit.RDLogger.DisableLog"
] | [((235, 265), 'rdkit.RDLogger.DisableLog', 'RDLogger.DisableLog', (['"""rdApp.*"""'], {}), "('rdApp.*')\n", (254, 265), False, 'from rdkit import RDLogger\n'), ((5355, 5392), 'numpy.array', 'np.array', (['hash_values'], {'dtype': 'np.int32'}), '(hash_values, dtype=np.int32)\n', (5363, 5392), True, 'import numpy as np\n'), ((5859, 5891), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': 'np.uint8'}), '(length, dtype=np.uint8)\n', (5867, 5891), True, 'import numpy as np\n'), ((7143, 7159), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (7154, 7159), False, 'from collections import defaultdict\n'), ((1301, 1328), 'rdkit.Chem.AllChem.GetSymmSSSR', 'AllChem.GetSymmSSSR', (['in_mol'], {}), '(in_mol)\n', (1320, 1328), False, 'from rdkit.Chem import AllChem\n'), ((4070, 4094), 'rdkit.Chem.AllChem.MolFromSmiles', 'AllChem.MolFromSmiles', (['l'], {}), '(l)\n', (4091, 4094), False, 'from rdkit.Chem import AllChem\n'), ((4436, 4460), 'rdkit.Chem.AllChem.MolFromSmiles', 'AllChem.MolFromSmiles', (['r'], {}), '(r)\n', (4457, 4460), False, 'from rdkit.Chem import AllChem\n'), ((2222, 2276), 'rdkit.Chem.AllChem.FindAtomEnvironmentOfRadiusN', 'AllChem.FindAtomEnvironmentOfRadiusN', (['in_mol', 'i', 'index'], {}), '(in_mol, i, index)\n', (2258, 2276), False, 'from rdkit.Chem import AllChem\n'), ((2328, 2373), 'rdkit.Chem.AllChem.PathToSubmol', 'AllChem.PathToSubmol', (['in_mol', 'p'], {'atomMap': 'amap'}), '(in_mol, p, atomMap=amap)\n', (2348, 2373), False, 'from rdkit.Chem import AllChem\n'), ((2468, 2561), 'rdkit.Chem.AllChem.MolToSmiles', 'AllChem.MolToSmiles', (['submol'], {'rootedAtAtom': 'amap[index]', 'canonical': '(True)', 'allHsExplicit': '(True)'}), '(submol, rootedAtAtom=amap[index], canonical=True,\n allHsExplicit=True)\n', (2487, 2561), False, 'from rdkit.Chem import AllChem\n'), ((5295, 5320), 'hashlib.blake2b', 'blake2b', (['t'], {'digest_size': '(4)'}), '(t, digest_size=4)\n', (5302, 5320), False, 'from hashlib import blake2b\n')] |
"""
utilities to analyze results
"""
import numpy as np
import matplotlib.pyplot as plt
from chainer import cuda
from tqdm import tqdm
from GraphNNPredictor import formatDataset,myConcat
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from Config import Config
CF=Config()
class AnalyzeUtility:
def __init__(self,AutoSC,ggnn):
self.AutoSC=AutoSC
self.ggnn=ggnn
def invSc(self,x,target="electric conductivity"):
if target!="":
return np.array([self.AutoSC.VSDict[target].inverse_transform(i) for i in x])
else:
return x
def plot(self,x,y,sigma=None,target="electric conductivity"):
if CF.genre==["V"]:
if target!="":
if sigma is not None:
sigma=self.invSc(y+sigma,target)-self.invSc(y,target)
x=self.invSc(x,target)
y=self.invSc(y,target)
plt.figure(figsize=(5,5))
plt.axes().set_aspect('equal', 'datalim')
if sigma is not None:
plt.errorbar(x, y, yerr = sigma, capsize=5, fmt='o', markersize=10, ecolor='blue', markeredgecolor = "blue", color='w',alpha=0.5)
print("Sigma ave.: ",np.average(sigma))
else:
plt.plot(x,y,"o")
print("R2: ",r2_score(x,y))
print("MAE: ",mean_absolute_error(x,y))
return x,y
def predictByGGNN_batch(self,dataset,batchSize=32):
splSize=batchSize
tList=[]
yList=[]
for i in tqdm(range(int(len(dataset)/splSize)+1)):
bgnIndex=i*splSize
finIndex=(i+1)*splSize
if len(dataset[bgnIndex:finIndex])==0:
break
cmp,adj,t=myConcat(dataset[bgnIndex:finIndex])
y = self.ggnn(cmp,adj)
y=cuda.to_cpu(y.data)
t=cuda.to_cpu(t)
tList.extend(t)
yList.extend(y)
return np.array(yList),np.array(tList)
def currentTime():
return str(datetime.datetime.now())
| [
"Config.Config",
"GraphNNPredictor.myConcat",
"numpy.average",
"matplotlib.pyplot.plot",
"chainer.cuda.to_cpu",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.errorbar",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.r2_score"
] | [((311, 319), 'Config.Config', 'Config', ([], {}), '()\n', (317, 319), False, 'from Config import Config\n'), ((978, 1004), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (988, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1105, 1236), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'y'], {'yerr': 'sigma', 'capsize': '(5)', 'fmt': '"""o"""', 'markersize': '(10)', 'ecolor': '"""blue"""', 'markeredgecolor': '"""blue"""', 'color': '"""w"""', 'alpha': '(0.5)'}), "(x, y, yerr=sigma, capsize=5, fmt='o', markersize=10, ecolor=\n 'blue', markeredgecolor='blue', color='w', alpha=0.5)\n", (1117, 1236), True, 'import matplotlib.pyplot as plt\n'), ((1313, 1332), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {}), "(x, y, 'o')\n", (1321, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1375), 'sklearn.metrics.r2_score', 'r2_score', (['x', 'y'], {}), '(x, y)\n', (1369, 1375), False, 'from sklearn.metrics import r2_score\n'), ((1398, 1423), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['x', 'y'], {}), '(x, y)\n', (1417, 1423), False, 'from sklearn.metrics import mean_absolute_error\n'), ((1813, 1849), 'GraphNNPredictor.myConcat', 'myConcat', (['dataset[bgnIndex:finIndex]'], {}), '(dataset[bgnIndex:finIndex])\n', (1821, 1849), False, 'from GraphNNPredictor import formatDataset, myConcat\n'), ((1912, 1931), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['y.data'], {}), '(y.data)\n', (1923, 1931), False, 'from chainer import cuda\n'), ((1946, 1960), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['t'], {}), '(t)\n', (1957, 1960), False, 'from chainer import cuda\n'), ((2046, 2061), 'numpy.array', 'np.array', (['yList'], {}), '(yList)\n', (2054, 2061), True, 'import numpy as np\n'), ((2062, 2077), 'numpy.array', 'np.array', (['tList'], {}), '(tList)\n', (2070, 2077), True, 'import numpy as np\n'), ((1012, 1022), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1020, 1022), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1285), 'numpy.average', 'np.average', (['sigma'], {}), '(sigma)\n', (1278, 1285), True, 'import numpy as np\n')] |
# -numpy sort
import numpy as np
a = np.array([[1, 4], [98, 7]], float)
print(a)
b = np.array([0, 0, 1, 1, 0], int)
c = np.array([0, 1, 1, 0, 1], int)
print(a[b, c])
# ---random no generate
print(np.random.rand(6))
| [
"numpy.array",
"numpy.random.rand"
] | [((39, 73), 'numpy.array', 'np.array', (['[[1, 4], [98, 7]]', 'float'], {}), '([[1, 4], [98, 7]], float)\n', (47, 73), True, 'import numpy as np\n'), ((88, 118), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0]', 'int'], {}), '([0, 0, 1, 1, 0], int)\n', (96, 118), True, 'import numpy as np\n'), ((123, 153), 'numpy.array', 'np.array', (['[0, 1, 1, 0, 1]', 'int'], {}), '([0, 1, 1, 0, 1], int)\n', (131, 153), True, 'import numpy as np\n'), ((201, 218), 'numpy.random.rand', 'np.random.rand', (['(6)'], {}), '(6)\n', (215, 218), True, 'import numpy as np\n')] |
import os
import sys
import pickle
import numpy as np
import torch
from GetLotteryHistory import GetLotteryHistory
import MyParam
def predict():
_args_ = MyParam.ARGS()
if _args_.ReFetchLog or not os.path.isfile(MyParam.SAVE_LIST_FILENAME_2):
LotteryHistoryLists = GetLotteryHistory()
LotteryHistoryLists = sorted(LotteryHistoryLists, key=lambda k: k['Index'])
with open(MyParam.SAVE_LIST_FILENAME_2, 'wb') as wfp:
pickle.dump(LotteryHistoryLists, wfp)
else:
with open(MyParam.SAVE_LIST_FILENAME_2, 'rb') as rfp:
LotteryHistoryLists = pickle.load(rfp)
TargetIndex = len(LotteryHistoryLists)
Index = TargetIndex - MyParam.LOTTERY_HEIGHT
MyDecodeMap = np.zeros((1, MyParam.LOTTERY_HEIGHT, MyParam.LOTTERY_NUM), dtype=np.long)
# MyDecodeMap = np.zeros((1, MyParam.LOTTERY_HEIGHT, MyParam.LOTTERY_HEIGHT), dtype=np.long)
for row in range(Index, Index + MyParam.LOTTERY_HEIGHT):
for col in range(MyParam.LOTTERY_NUM):
MyDecodeMap[0][row - Index][col] = LotteryHistoryLists[row]['Numbers'][col]
# print('Load', MyParam.CHECKPOINT_FILENAME)
print('load', MyParam.CHECKPOINT_FILENAME)
checkpoint = torch.load(MyParam.CHECKPOINT_FILENAME)
net = checkpoint['net'].to('cuda')
net.eval()
with torch.no_grad():
MyDecodeMap = torch.tensor(MyDecodeMap, dtype=torch.long).to('cuda')
Predict_Class = net(MyDecodeMap)
Predict_Class = torch.nn.functional.softmax(Predict_Class)
values, indices = torch.max(Predict_Class, 1)
print('[{0}]: {1}'.format(indices.item(), values.item()))
def main():
for i in range(0, 7):
MyParam.TRAIN_NUM_INDEX = i
MyParam.CHECKPOINT_FILENAME = 'checkpoint_2_{0}.ckpt'.format(MyParam.TRAIN_NUM_INDEX)
predict()
if __name__ == "__main__":
sys.exit(main()) | [
"pickle.dump",
"torch.load",
"MyParam.ARGS",
"torch.max",
"pickle.load",
"os.path.isfile",
"torch.tensor",
"numpy.zeros",
"GetLotteryHistory.GetLotteryHistory",
"torch.no_grad",
"torch.nn.functional.softmax"
] | [((160, 174), 'MyParam.ARGS', 'MyParam.ARGS', ([], {}), '()\n', (172, 174), False, 'import MyParam\n'), ((733, 806), 'numpy.zeros', 'np.zeros', (['(1, MyParam.LOTTERY_HEIGHT, MyParam.LOTTERY_NUM)'], {'dtype': 'np.long'}), '((1, MyParam.LOTTERY_HEIGHT, MyParam.LOTTERY_NUM), dtype=np.long)\n', (741, 806), True, 'import numpy as np\n'), ((1214, 1253), 'torch.load', 'torch.load', (['MyParam.CHECKPOINT_FILENAME'], {}), '(MyParam.CHECKPOINT_FILENAME)\n', (1224, 1253), False, 'import torch\n'), ((283, 302), 'GetLotteryHistory.GetLotteryHistory', 'GetLotteryHistory', ([], {}), '()\n', (300, 302), False, 'from GetLotteryHistory import GetLotteryHistory\n'), ((1318, 1333), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1331, 1333), False, 'import torch\n'), ((1477, 1519), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['Predict_Class'], {}), '(Predict_Class)\n', (1504, 1519), False, 'import torch\n'), ((1547, 1574), 'torch.max', 'torch.max', (['Predict_Class', '(1)'], {}), '(Predict_Class, 1)\n', (1556, 1574), False, 'import torch\n'), ((207, 251), 'os.path.isfile', 'os.path.isfile', (['MyParam.SAVE_LIST_FILENAME_2'], {}), '(MyParam.SAVE_LIST_FILENAME_2)\n', (221, 251), False, 'import os\n'), ((461, 498), 'pickle.dump', 'pickle.dump', (['LotteryHistoryLists', 'wfp'], {}), '(LotteryHistoryLists, wfp)\n', (472, 498), False, 'import pickle\n'), ((605, 621), 'pickle.load', 'pickle.load', (['rfp'], {}), '(rfp)\n', (616, 621), False, 'import pickle\n'), ((1357, 1400), 'torch.tensor', 'torch.tensor', (['MyDecodeMap'], {'dtype': 'torch.long'}), '(MyDecodeMap, dtype=torch.long)\n', (1369, 1400), False, 'import torch\n')] |
import argparse
import cv2
import numpy as np
import torch
from time import *
from models.with_mobilenet import PoseEstimationWithMobileNet
from modules.keypoints import extract_keypoints, group_keypoints
from modules.load_state import load_state
from modules.pose import Pose, track_poses
from val import normalize, pad_width
class ImageReader(object):
def __init__(self, file_names):
self.file_names = file_names
self.max_idx = len(file_names)
def __iter__(self):
self.idx = 0
return self
def __next__(self):
if self.idx == self.max_idx:
raise StopIteration
img = cv2.imread(self.file_names[self.idx], cv2.IMREAD_COLOR)
if img.size == 0:
raise IOError('Image {} cannot be read'.format(self.file_names[self.idx]))
self.idx = self.idx + 1
return img
class VideoReader(object):
def __init__(self, file_name):
self.file_name = file_name
try: # OpenCV needs int to read from webcam
self.file_name = int(file_name)
except ValueError:
pass
def __iter__(self):
self.cap = cv2.VideoCapture(self.file_name)
if not self.cap.isOpened():
raise IOError('Video {} cannot be opened'.format(self.file_name))
return self
def __next__(self):
was_read, img = self.cap.read()
if not was_read:
raise StopIteration
return img
def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu,
pad_value=(0, 0, 0), img_mean=np.array([128, 128, 128], np.float32), img_scale=np.float32(1/256)):
height, width, _ = img.shape
scale = net_input_height_size / height
scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
scaled_img = normalize(scaled_img, img_mean, img_scale)
min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
if not cpu:
tensor_img = tensor_img.cuda()
stages_output = net(tensor_img)
stage2_heatmaps = stages_output[-2]
heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
return heatmaps, pafs, scale, pad
def run_demo(net, image_provider, height_size, cpu, track, smooth):
net = net.eval()
if not cpu:
net = net.cuda()
stride = 8
upsample_ratio = 4
num_keypoints = Pose.num_kpts
previous_poses = []
delay = 1
i=0
ct=time()
for img in image_provider:
at=time()
orig_img = img.copy()
heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
print(pose_keypoints)
pose = Pose(pose_keypoints, pose_entries[n][18])
current_poses.append(pose)
if track:
track_poses(previous_poses, current_poses, smooth=smooth)
previous_poses = current_poses
for pose in current_poses:
pose.draw(img)
img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)
bt=time()
print("第{}张图耗时{}".format(i,bt-at))
i=i+1
for pose in current_poses:
cv2.rectangle(img, (pose.bbox[0], pose.bbox[1]),
(pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))
if track:
cv2.putText(img, 'id: {}'.format(pose.id), (pose.bbox[0], pose.bbox[1] - 16),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
key = cv2.waitKey(delay)
if key == 27: # esc
return
elif key == 112: # 'p'
if delay == 1:
delay = 0
else:
delay = 1
dt=time()
print('一共耗时{}'.format(dt-ct))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='''Lightweight human pose estimation python demo.
This is just for quick results preview.
Please, consider c++ demo for the best performance.''')
parser.add_argument('--checkpoint-path', type=str, default='/media/wow/disk2/STT2/STTran-main/light_openpose/checkpoint_iter_370000.pth', help='path to the checkpoint')
parser.add_argument('--height-size', type=int, default=256, help='network input layer height size')
parser.add_argument('--video', type=str, default='/media/wow/disk2/warmup.mp4', help='path to video file or camera id')
parser.add_argument('--images', nargs='+', default='', help='path to input image(s)')
parser.add_argument('--cpu', action='store_true', help='run network inference on cpu')
parser.add_argument('--track', type=int, default=1, help='track pose id in video')
parser.add_argument('--smooth', type=int, default=1, help='smooth pose keypoints')
args = parser.parse_args()
if args.video == '' and args.images == '':
raise ValueError('Either --video or --image has to be provided')
net = PoseEstimationWithMobileNet()
checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
load_state(net, checkpoint)
a=time()
frame_provider = ImageReader(args.images)
b=time()
print('读取时间耗时:{}'.format(b-a))
if args.video != '':
frame_provider = VideoReader(args.video)
else:
args.track = 0
run_demo(net, frame_provider, args.height_size, args.cpu, args.track, args.smooth)
| [
"cv2.rectangle",
"modules.keypoints.group_keypoints",
"models.with_mobilenet.PoseEstimationWithMobileNet",
"torch.from_numpy",
"cv2.imshow",
"numpy.array",
"modules.keypoints.extract_keypoints",
"modules.pose.Pose",
"argparse.ArgumentParser",
"cv2.addWeighted",
"val.normalize",
"cv2.waitKey",
... | [((1579, 1616), 'numpy.array', 'np.array', (['[128, 128, 128]', 'np.float32'], {}), '([128, 128, 128], np.float32)\n', (1587, 1616), True, 'import numpy as np\n'), ((1628, 1647), 'numpy.float32', 'np.float32', (['(1 / 256)'], {}), '(1 / 256)\n', (1638, 1647), True, 'import numpy as np\n'), ((1742, 1817), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': 'scale', 'fy': 'scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)\n', (1752, 1817), False, 'import cv2\n'), ((1835, 1877), 'val.normalize', 'normalize', (['scaled_img', 'img_mean', 'img_scale'], {}), '(scaled_img, img_mean, img_scale)\n', (1844, 1877), False, 'from val import normalize, pad_width\n'), ((1988, 2038), 'val.pad_width', 'pad_width', (['scaled_img', 'stride', 'pad_value', 'min_dims'], {}), '(scaled_img, stride, pad_value, min_dims)\n', (1997, 2038), False, 'from val import normalize, pad_width\n'), ((2357, 2458), 'cv2.resize', 'cv2.resize', (['heatmaps', '(0, 0)'], {'fx': 'upsample_ratio', 'fy': 'upsample_ratio', 'interpolation': 'cv2.INTER_CUBIC'}), '(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio,\n interpolation=cv2.INTER_CUBIC)\n', (2367, 2458), False, 'import cv2\n'), ((2580, 2677), 'cv2.resize', 'cv2.resize', (['pafs', '(0, 0)'], {'fx': 'upsample_ratio', 'fy': 'upsample_ratio', 'interpolation': 'cv2.INTER_CUBIC'}), '(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio,\n interpolation=cv2.INTER_CUBIC)\n', (2590, 2677), False, 'import cv2\n'), ((5516, 5753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Lightweight human pose estimation python demo.\n This is just for quick results preview.\n Please, consider c++ demo for the best performance."""'}), '(description=\n """Lightweight human pose estimation python demo.\n This is just for quick results preview.\n Please, consider c++ demo for the best performance."""\n )\n', (5539, 5753), False, 'import argparse\n'), ((6672, 6701), 'models.with_mobilenet.PoseEstimationWithMobileNet', 'PoseEstimationWithMobileNet', ([], {}), '()\n', (6699, 6701), False, 'from models.with_mobilenet import PoseEstimationWithMobileNet\n'), ((6719, 6771), 'torch.load', 'torch.load', (['args.checkpoint_path'], {'map_location': '"""cpu"""'}), "(args.checkpoint_path, map_location='cpu')\n", (6729, 6771), False, 'import torch\n'), ((6776, 6803), 'modules.load_state.load_state', 'load_state', (['net', 'checkpoint'], {}), '(net, checkpoint)\n', (6786, 6803), False, 'from modules.load_state import load_state\n'), ((643, 698), 'cv2.imread', 'cv2.imread', (['self.file_names[self.idx]', 'cv2.IMREAD_COLOR'], {}), '(self.file_names[self.idx], cv2.IMREAD_COLOR)\n', (653, 698), False, 'import cv2\n'), ((1147, 1179), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.file_name'], {}), '(self.file_name)\n', (1163, 1179), False, 'import cv2\n'), ((3446, 3490), 'modules.keypoints.group_keypoints', 'group_keypoints', (['all_keypoints_by_type', 'pafs'], {}), '(all_keypoints_by_type, pafs)\n', (3461, 3490), False, 'from modules.keypoints import extract_keypoints, group_keypoints\n'), ((4640, 4683), 'cv2.addWeighted', 'cv2.addWeighted', (['orig_img', '(0.6)', 'img', '(0.4)', '(0)'], {}), '(orig_img, 0.6, img, 0.4, 0)\n', (4655, 4683), False, 'import cv2\n'), ((5150, 5214), 'cv2.imshow', 'cv2.imshow', (['"""Lightweight Human Pose Estimation Python Demo"""', 'img'], {}), "('Lightweight Human Pose Estimation Python Demo', img)\n", (5160, 5214), False, 'import cv2\n'), ((5229, 5247), 'cv2.waitKey', 'cv2.waitKey', (['delay'], {}), '(delay)\n', (5240, 5247), False, 'import cv2\n'), ((3320, 3410), 'modules.keypoints.extract_keypoints', 'extract_keypoints', (['heatmaps[:, :, kpt_idx]', 'all_keypoints_by_type', 'total_keypoints_num'], {}), '(heatmaps[:, :, kpt_idx], all_keypoints_by_type,\n total_keypoints_num)\n', (3337, 3410), False, 'from modules.keypoints import extract_keypoints, group_keypoints\n'), ((4352, 4393), 'modules.pose.Pose', 'Pose', (['pose_keypoints', 'pose_entries[n][18]'], {}), '(pose_keypoints, pose_entries[n][18])\n', (4356, 4393), False, 'from modules.pose import Pose, track_poses\n'), ((4463, 4520), 'modules.pose.track_poses', 'track_poses', (['previous_poses', 'current_poses'], {'smooth': 'smooth'}), '(previous_poses, current_poses, smooth=smooth)\n', (4474, 4520), False, 'from modules.pose import Pose, track_poses\n'), ((4806, 4932), 'cv2.rectangle', 'cv2.rectangle', (['img', '(pose.bbox[0], pose.bbox[1])', '(pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3])', '(0, 255, 0)'], {}), '(img, (pose.bbox[0], pose.bbox[1]), (pose.bbox[0] + pose.bbox[\n 2], pose.bbox[1] + pose.bbox[3]), (0, 255, 0))\n', (4819, 4932), False, 'import cv2\n'), ((3928, 3971), 'numpy.ones', 'np.ones', (['(num_keypoints, 2)'], {'dtype': 'np.int32'}), '((num_keypoints, 2), dtype=np.int32)\n', (3935, 3971), True, 'import numpy as np\n'), ((2057, 2085), 'torch.from_numpy', 'torch.from_numpy', (['padded_img'], {}), '(padded_img)\n', (2073, 2085), False, 'import torch\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""State management for eager execution."""
import collections
import contextlib
import copy
import gc
import os
import random
import threading
from absl import logging
import numpy as np
import six
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python import tf2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import executor
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_starting_device_spec = pydev.DeviceSpec.from_string("")
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tfe.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tfe.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
_KEEP_ALIVE_SECS = 600
_python_eager_context_create_counter = monitoring.Counter(
"/tensorflow/api/python/eager_context_create_counter",
"Counter for number of eager contexts created in Python.")
# Re-exporting through context.
is_tfrt_enabled = tfrt_utils.enabled
# This flag and the associated environment var are transient and will eventually
# be removed, once this experiment is enabled by default.
_RUN_EAGER_OP_AS_FUNCTION_ENABLED = os.getenv(
"TF_RUN_EAGER_OP_AS_FUNCTION") == "1"
# This method should only be called after the context has beein initialized.
def enable_run_eager_op_as_function():
"""Execute elementary eager ops (non-function) wrapped in a call op.
This should be functionally equivalent to running the eager op's kernel
directly (the default) but reduces the number of codepaths for executing
TF2 programs in the runtime, thereby improving consistency (in terms of
optimizations and rewrites for instance) and maintainability.
"""
global _RUN_EAGER_OP_AS_FUNCTION_ENABLED
_RUN_EAGER_OP_AS_FUNCTION_ENABLED = True
if context_safe() is not None:
context_safe().run_eager_op_as_function = True
# This method should only be called after the context has been initialized.
def disable_run_eager_op_as_function():
global _RUN_EAGER_OP_AS_FUNCTION_ENABLED
_RUN_EAGER_OP_AS_FUNCTION_ENABLED = False
if context_safe() is not None:
context_safe().run_eager_op_as_function = False
def run_eager_op_as_function_enabled():
if context_safe() is not None:
return context_safe().run_eager_op_as_function
return _RUN_EAGER_OP_AS_FUNCTION_ENABLED
# Expose it as internally public APIs for Keras use cases in b/171080602.
tf_export("__internal__.is_tfrt_enabled", v1=[])(is_tfrt_enabled)
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
__slots__ = ["_data", "_max_items", "_max_tensor_size"]
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data.clear()
class FunctionCallOptions(object):
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
__slots__ = ["_config_proto_serialized", "_executor_type"]
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or a serialized
string of that proto. The config used by Grappler when optimizing the
function graph. Each concrete function is optimized the first time is
called. Changing config_proto after the first call has no effect. If
config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString(
deterministic=True)
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString())
else:
raise ValueError("the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config)))
# Map from context_id (an int) to _TensorCaches.
# Dicts are thread safe in CPython.
# TODO(iga): Remove this once TensorCaches are moved to C++.
_tensor_caches_map = {}
class _TensorCaches(threading.local):
"""Thread local tensor caches."""
__slots__ = ["_ones_rank_cache", "_zeros_cache"]
def __init__(self):
super(_TensorCaches, self).__init__()
self._ones_rank_cache = None
self._zeros_cache = None
@property
def ones_rank_cache(self):
if not self._ones_rank_cache:
self._ones_rank_cache = _EagerTensorCache()
return self._ones_rank_cache
@property
def zeros_cache(self):
if not self._zeros_cache:
self._zeros_cache = _EagerTensorCache()
return self._zeros_cache
ContextSwitch = collections.namedtuple(
"ContextSwitch",
["is_building_function", "enter_context_fn", "device_stack"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(
is_building_function=False,
enter_context_fn=eager_mode,
device_stack=None)
def push(self, is_building_function, enter_context_fn, device_stack):
"""Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this graph.
When breaking out of graphs in init_scope, the innermost nonempty device
stack is used. Eager contexts put `None` here and the value is never
used.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn, device_stack))
def pop(self):
"""Pop the stack."""
self.stack.pop()
@tf_export("config.LogicalDevice")
class LogicalDevice(
collections.namedtuple("LogicalDevice", ["name", "device_type"])):
"""Abstraction for a logical device initialized by the runtime.
A `tf.config.LogicalDevice` corresponds to an initialized logical device on a
`tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors
and operations can be placed on a specific logical device by calling
`tf.device` with a specified `tf.config.LogicalDevice`.
Fields:
name: The fully qualified name of the device. Can be used for Op or function
placement.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
@tf_export("config.LogicalDeviceConfiguration",
"config.experimental.VirtualDeviceConfiguration")
class LogicalDeviceConfiguration(
collections.namedtuple("LogicalDeviceConfiguration",
["memory_limit", "experimental_priority"])):
"""Configuration class for a logical devices.
The class specifies the parameters to configure a `tf.config.PhysicalDevice`
as it is initialized to a `tf.config.LogicalDevice` during runtime
initialization. Not all fields are valid for all device types.
See `tf.config.get_logical_device_configuration` and
`tf.config.set_logical_device_configuration` for usage examples.
Fields:
memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual
device. Currently only supported for GPUs.
experimental_priority: (optional) Priority to assign to a virtual device.
Lower values have higher priorities and 0 is the default.
Within a physical GPU, the GPU scheduler will prioritize ops on virtual
devices with higher priority. Currently only supported for Nvidia GPUs.
"""
def __new__(cls, memory_limit=None, experimental_priority=None):
return super(LogicalDeviceConfiguration,
cls).__new__(cls, memory_limit, experimental_priority)
@tf_export("config.PhysicalDevice")
class PhysicalDevice(
collections.namedtuple("PhysicalDevice", ["name", "device_type"])):
"""Abstraction for a locally visible physical device.
TensorFlow can utilize various devices such as the CPU or multiple GPUs
for computation. Before initializing a local device for use, the user can
customize certain properties of the device such as it's visibility or memory
configuration.
Once a visible `tf.config.PhysicalDevice` is initialized one or more
`tf.config.LogicalDevice` objects are created. Use
`tf.config.set_visible_devices` to configure the visibility of a physical
device and `tf.config.set_logical_device_configuration` to configure multiple
`tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is
useful when separation between models is needed or to simulate a multi-device
environment.
Fields:
name: Unique identifier for device.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
class _AtomicCounter(object):
"""A simple atomic counter."""
__slots__ = ["_value", "_lock"]
def __init__(self):
self._value = 0
self._lock = threading.Lock()
def increment_and_get(self):
with self._lock:
self._value += 1
return self._value
_context_id_counter = _AtomicCounter()
class _TensorCacheDeleter(object):
"""Deletes tensor caches for a given context."""
__slots__ = ["_context_id"]
def __init__(self, context_id):
self._context_id = context_id
def __del__(self):
if _tensor_caches_map is None:
return
if self._context_id in _tensor_caches_map:
del _tensor_caches_map[self._context_id]
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device. When set
to None, an appropriate value will be picked automatically. The value
picked may change between TensorFlow releases. Defaults to
DEVICE_PLACEMENT_SILENT.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not
correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right
device but raises a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might hide
performance problems.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These operations may
return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution
on remote devices. GrpcServers need to be started by creating an
identical server_def to this, and setting the appropriate task_indexes,
so that the servers can communicate. It will then be possible to execute
operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
# This _id is used only to index the tensor caches.
# TODO(iga): Remove this when tensor caches are moved to C++.
self._id = _context_id_counter.increment_and_get()
self._tensor_cache_deleter = _TensorCacheDeleter(self._id)
_tensor_caches_map[self._id] = _TensorCaches()
self._config = config
self._thread_local_data = pywrap_tfe.EagerContextThreadLocalData(
self,
is_eager=lambda: default_execution_mode == EAGER_MODE,
device_spec=_starting_device_spec)
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._seed = None
self._initialize_lock = threading.Lock()
self._initialized = False
if device_policy is None:
device_policy = DEVICE_PLACEMENT_SILENT
self._device_policy = device_policy
self._mirroring_policy = None
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError("execution_mode should be None/SYNC/ASYNC. Got %s" %
execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._default_is_async = execution_mode == ASYNC
self._use_tfrt = is_tfrt_enabled()
self._use_tfrt_distributed_runtime = None
self._run_eager_op_as_function = run_eager_op_as_function_enabled()
self._server_def = server_def
self._collective_ops_server_def = None
self._collective_leader = None
self._collective_scoped_allocator_enabled_ops = None
self._collective_use_nccl_communication = None
self._collective_device_filters = None
self._coordination_service = None
self._device_lock = threading.Lock()
self._physical_devices = None
self._physical_device_to_index = None
self._visible_device_list = []
self._memory_growth_map = None
self._virtual_device_map = {}
# Values set after construction
self._optimizer_jit = None
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
self._soft_device_placement = None
self._log_device_placement = None
self._enable_mlir_graph_optimization = None
self._optimizer_experimental_options = {}
_python_eager_context_create_counter.get_cell().increase_by(1)
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
# `random.Random(seed)` needs `seed` to be hashable, while values of type
# e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them
# to int.
try:
hash(seed)
except TypeError:
seed = int(np.array(seed))
self._rng = random.Random(seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tfe.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_logical_devices(self):
"""Helper to initialize devices."""
# Store list of devices
logical_devices = []
context_devices = []
device_list = pywrap_tfe.TFE_ContextListDevices(self._context_handle)
try:
self._num_gpus = 0
current_job, current_task = None, None
server_def = self._server_def or self._collective_ops_server_def
if server_def is not None:
current_job, current_task = server_def.job_name, server_def.task_index
for i in range(pywrap_tfe.TF_DeviceListCount(device_list)):
dev_name = pywrap_tfe.TF_DeviceListName(device_list, i)
context_devices.append(pydev.canonical_name(dev_name))
spec = pydev.DeviceSpec.from_string(dev_name)
# If the job is localhost, we assume that the cluster has not yet been
# configured and thus clear the job, replica & task.
if spec.job == "localhost":
spec = spec.replace(job=None, replica=None, task=None)
logical_devices.append(
LogicalDevice(name=spec.to_string(), device_type=spec.device_type))
dev_type = pywrap_tfe.TF_DeviceListType(device_list, i)
if (dev_type == "GPU" and spec.job == current_job and
spec.task == current_task):
self._num_gpus += 1
finally:
self._logical_devices = logical_devices
self._context_devices = context_devices
pywrap_tfe.TF_DeleteDeviceList(device_list)
def ensure_initialized(self):
"""Initialize handle and devices if not already done so."""
if self._initialized:
return
with self._initialize_lock:
if self._initialized:
return
assert self._context_devices is None
opts = pywrap_tfe.TFE_NewContextOptions()
try:
config_str = self.config.SerializeToString()
pywrap_tfe.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._mirroring_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy(
opts, self._mirroring_policy)
if self._default_is_async == ASYNC:
pywrap_tfe.TFE_ContextOptionsSetAsync(opts, True)
if self._use_tfrt is not None:
pywrap_tfe.TFE_ContextOptionsSetTfrt(opts, self._use_tfrt)
# pylint: disable=g-backslash-continuation
if self._use_tfrt is not None and \
self._use_tfrt_distributed_runtime is not None:
pywrap_tfe.TFE_ContextOptionsSetTfrtDistributedRuntime(
opts, self._use_tfrt_distributed_runtime)
pywrap_tfe.TFE_ContextOptionsSetRunEagerOpAsFunction(
opts, self._run_eager_op_as_function)
context_handle = pywrap_tfe.TFE_NewContext(opts)
finally:
pywrap_tfe.TFE_DeleteContextOptions(opts)
assert not (self._server_def and self._collective_ops_server_def), (
"Cannot enable remote execution as well as collective ops at the "
"moment. If this is important to you, please file an issue.")
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(context_handle, _KEEP_ALIVE_SECS,
server_def_str)
elif self._collective_ops_server_def is not None:
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tfe.TFE_EnableCollectiveOps(context_handle, server_def_str)
self._context_handle = context_handle
self._initialize_logical_devices()
self._initialized = True
def _clear_caches(self):
self.ones_rank_cache().flush()
self.zeros_cache().flush()
pywrap_tfe.TFE_ClearScalarCache()
def get_server_def(self):
return self._server_def
def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
keep_alive_secs: Num. seconds after which the remote end will hang up. As
long as the client is still alive, the server state for the context will
be kept alive. If the client is killed (or there is some failure), the
server will clean up its context keep_alive_secs after the final RPC it
receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs,
server_def_str)
self._initialize_logical_devices()
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Update a server_def on the context.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
keep_alive_secs: Num. seconds after which the remote end will hang up. As
long as the client is still alive, the server state for the context will
be kept alive. If the client is killed (or there is some failure), the
server will clean up its context keep_alive_secs after the final RPC it
receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle,
keep_alive_secs, server_def_str)
self._initialize_logical_devices()
self._clear_caches()
def check_alive(self, worker_name):
"""Checks whether a remote worker is alive or not.
Args:
worker_name: a string representing the remote worker. It must be a fully
specified name like "/job:worker/replica:0/task:0".
Returns:
a boolean indicating whether the remote worker is alive or not.
Raises:
ValueError: if context is not initialized.
"""
# TODO(yuefengz): support checking multiple workers.
if self._context_handle:
return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)
else:
raise ValueError("Context is not initialized.")
def sync_executors(self):
"""Sync both local executors and the ones on remote workers.
In async execution mode, local function calls can return before the
corresponding remote op/function execution requests are completed. Calling
this method creates a synchronization barrier for remote executors. It only
returns when all remote pending nodes are finished, potentially with errors
if any remote executors are in error state.
Raises:
ValueError: if context is not initialized.
"""
if self._context_handle:
pywrap_tfe.TFE_ContextSyncExecutors(self._context_handle)
else:
raise ValueError("Context is not initialized.")
def clear_executor_errors(self):
"""Clear errors in both local executors and remote workers.
After receiving errors from remote workers, additional requests on the fly
could further taint the status on the remote workers due to the async nature
of remote execution. Calling this method block on waiting for all pending
nodes in remote executors to finish and clear their error statuses.
Raises:
ValueError: if context is not initialized.
"""
if self._context_handle:
pywrap_tfe.TFE_ContextClearExecutors(self._context_handle)
else:
raise ValueError("Context is not initialized.")
def enable_coordination_service(self, service_type):
if self._context_handle:
logging.warning("Configuring coordination service type may not be "
"effective because the context is already initialized.")
self._coordination_service = service_type
@property
def coordination_service(self):
return self._coordination_service
def set_config_key_value(self, key, value):
ensure_initialized()
pywrap_tfe.TFE_InsertConfigKeyValue(self._context_handle, key, value)
def get_config_key_value(self, key):
ensure_initialized()
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_GetConfigKeyValue(self._context_handle, key, buffer_)
value = pywrap_tf_session.TF_GetBuffer(buffer_).decode("utf-8")
return value
def delete_config_key_value(self, key):
ensure_initialized()
pywrap_tfe.TFE_DeleteConfigKeyValue(self._context_handle, key)
def report_error_to_cluster(self, error_code, error_message):
"""Report error to other members in a multi-client cluster.
Args:
error_code: a `tf.errors` error code.
error_message: a string. The error message.
"""
if self._context_handle:
pywrap_tfe.TFE_ReportErrorToCluster(self._context_handle, error_code,
error_message)
else:
raise ValueError("Context is not initialized.")
def clear_kernel_cache(self):
"""Clear kernel cache and reset all stateful kernels."""
if self._context_handle is not None:
pywrap_tfe.TFE_ContextClearCaches(self._context_handle)
def enable_collective_ops(self, server_def):
"""Enable distributed collective ops with an appropriate server_def.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
Raises:
ValueError: if server_def is None.
RuntimeError: if this method is not called at program startup.
"""
if not server_def:
raise ValueError("server_def is None.")
self._collective_ops_server_def = server_def
# TODO(b/129298253): Allow creating datasets/tensors before enabling
# collective ops.
if self._context_handle is not None:
logging.warning("Enabling collective ops after program startup may cause "
"error when accessing previously created tensors.")
with self._initialize_lock:
assert self._initialized
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tfe.TFE_EnableCollectiveOps(self._context_handle, server_def_str)
self._initialize_logical_devices()
self._clear_caches()
def configure_collective_ops(
self,
collective_leader="",
scoped_allocator_enabled_ops=("CollectiveReduce",),
use_nccl_communication=False,
device_filters=None):
"""Configure collective ops.
Collective group leader is necessary for collective ops to run, other
configurations are mainly for the purpose of performance.
Args:
collective_leader: a device string for collective leader, e.g.
"/job:worker/replica:0/task:0"; empty string means local execution of
collective ops.
scoped_allocator_enabled_ops: a tuple or a list of op names for scoped
allocator to run with.
use_nccl_communication: whether to use nccl communication for collective
ops.
device_filters: a tuple or a list of device strings. If set, corresponding
task can only see the devices filtered by these device filters.
Raises:
RuntimeError: if this method is not called at program startup.
"""
if self._collective_leader is not None:
if (self._collective_leader != collective_leader or
self._collective_scoped_allocator_enabled_ops !=
scoped_allocator_enabled_ops or
self._collective_use_nccl_communication != use_nccl_communication or
self._collective_device_filters != device_filters):
raise ValueError("Collective ops are already configured.")
else:
return
if self._context_handle is not None:
raise RuntimeError("Collective ops must be configured at program startup")
self._collective_leader = collective_leader
self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops
self._collective_use_nccl_communication = use_nccl_communication
self._collective_device_filters = device_filters
def abort_collective_ops(self, code, message):
"""Abort the collective ops.
This is intended to be used when a peer failure is detected, which allows
the user to handle the case instead of hanging. This aborts all on-going
collectives. After all subsequent collectives error immediately, and you
need to reset_context() to use collectives again.
Args:
code: a `tf.errors` error code.
message: a string. The error message.
"""
self.ensure_initialized()
pywrap_tfe.TFE_AbortCollectiveOps(self._handle, code, message)
def check_collective_ops_peer_health(self, task, timeout_in_ms):
"""Check collective peer health.
This probes each task to see if they're still alive. Note that restarted
tasks are considered a different one, and they're considered not healthy.
This should only be used in multi client multi worker training.
Args:
task: a task string, must be in the format of /job:xxx/replica:0/task:N.
timeout_in_ms: an integer, the timeout. If zero, there's no timeout.
Raises:
tf.errors.UnavailableError: when a peer is down.
tf.errors.FailedPreconditionError: when a peer is a different one from the
one this task has talked to, e.g. the peer has restarted.
tf.errors.InvalidArgumentError: when the task string is invalid.
"""
self.ensure_initialized()
pywrap_tfe.TFE_CollectiveOpsCheckPeerHealth(self._handle, task,
timeout_in_ms)
@property
def _handle(self):
if self._context_handle is None:
raise AssertionError("Context must be initialized first.")
return self._context_handle
@property
def _devices(self):
if self._context_devices is None:
raise AssertionError("Context must be initialized first.")
return self._context_devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._thread_local_data
old_is_eager = ctx.is_eager
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode, None)
try:
yield
finally:
ctx.is_eager = old_is_eager
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._thread_local_data.is_eager
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._thread_local_data.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._thread_local_data.scope_name = s
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._thread_local_data.device_spec
def _set_device(self, device_name, device_spec):
self._thread_local_data.device_name = device_name
self._thread_local_data.device_spec = device_spec
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Returns:
Context manager that forces device placement.
Raises:
ValueError: If name is not a string or is an invalid device name.
RuntimeError: If device scopes are not properly nested.
"""
if isinstance(name, LogicalDevice):
name = name.name
elif pydev.is_device_spec(name):
name = name.to_string()
return _EagerDeviceContext(self, name)
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
def host_address_space(self):
self.ensure_initialized()
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_HostAddressSpace(self._context_handle, buffer_)
address_space = pywrap_tf_session.TF_GetBuffer(buffer_).decode("utf-8")
return address_space
# TODO(fishx): remove this property.
@property
def execution_mode(self):
"""Gets execution mode for current thread."""
return ASYNC if self.is_async() else SYNC
@execution_mode.setter
def execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError("Execution mode should be None/SYNC/ASYNC. Got %s" %
mode)
if mode is None:
mode = SYNC
enable_async = (mode == ASYNC)
if self.is_async() != enable_async:
# Only set the execution mode if the context has already been initialized
if self._context_handle is not None:
self.executor.wait()
executor_new = executor.new_executor(enable_async)
self._thread_local_data.executor = executor_new
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle,
executor_new.handle())
else:
self._default_is_async = enable_async
def is_async(self):
if self._context_handle is not None:
return self.executor.is_async()
else:
return self._default_is_async
@property
def executor(self):
self.ensure_initialized()
return executor.Executor(
pywrap_tfe.TFE_ContextGetExecutorForThread(self._context_handle))
@executor.setter
def executor(self, e):
self.ensure_initialized()
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, e.handle())
@property
def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
# Ensure physical devices have been discovered and config has been imported
self._initialize_physical_devices()
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
is_mlir_bridge_enabled = pywrap_tfe.TF_IsMlirBridgeEnabled()
config.experimental.mlir_bridge_rollout = is_mlir_bridge_enabled
if (is_mlir_bridge_enabled ==
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_ENABLED):
config.experimental.enable_mlir_bridge = True
if self._enable_mlir_graph_optimization is not None:
config.experimental.enable_mlir_graph_optimization = (
self._enable_mlir_graph_optimization)
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options, option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options, option, toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_toggle("use_plugin_optimizers")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
# Compute device counts
config.device_count["CPU"] = 0
config.device_count["GPU"] = 0
for dev in self._physical_devices:
if dev not in self._visible_device_list:
continue
virtual_devices = self._virtual_device_map.get(dev)
if virtual_devices is None:
config.device_count[dev.device_type] += 1
else:
config.device_count[dev.device_type] += len(virtual_devices)
# Configure gpu_options
gpu_options = self._compute_gpu_options()
config.gpu_options.MergeFrom(gpu_options)
# Configure collective ops
if self._collective_leader:
config.experimental.collective_group_leader = self._collective_leader
if self._collective_scoped_allocator_enabled_ops:
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
for op in self._collective_scoped_allocator_enabled_ops:
rewrite_options.scoped_allocator_opts.enable_op.append(op)
if self._collective_use_nccl_communication:
config.experimental.collective_nccl = True
if self._collective_device_filters:
del config.device_filters[:]
for f in self._collective_device_filters:
config.device_filters.append(f)
# Configure coordination service
if self._coordination_service:
config.experimental.coordination_service = self._coordination_service
return config
def _compute_gpu_options(self):
"""Build the GPUOptions proto."""
visible_device_list = []
virtual_devices = []
gpu_index = -1
memory_growths = set()
for dev in self.list_physical_devices("GPU"):
gpu_index += 1
if dev not in self._visible_device_list:
continue
growth = self._memory_growth_map[dev]
memory_growths.add(growth)
visible_device_list.append(str(gpu_index))
if self._virtual_device_map:
vdevs = self._virtual_device_map.get(dev, [])
device_limits = []
priority = []
for virt_dev in vdevs:
device_limits.append(virt_dev.memory_limit)
if virt_dev.experimental_priority is not None:
priority.append(virt_dev.experimental_priority)
# If priority is specified, it must be specified for all virtual
# devices.
if priority and len(device_limits) != len(priority):
raise ValueError("priority must be specified for all virtual devices")
virtual_devices.append(
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=device_limits, priority=priority))
# Only compute growth if virtual devices have not been configured and we
# have GPUs
if not virtual_devices and memory_growths:
if len(memory_growths) > 1:
raise ValueError("Memory growth cannot differ between GPU devices")
allow_growth = memory_growths.pop()
else:
allow_growth = None
return config_pb2.GPUOptions(
allow_growth=allow_growth,
visible_device_list=",".join(visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
@property
def function_call_options(self):
"""Returns function call options for current thread.
Note that the returned object is still referenced by the eager context.
Returns: the FunctionCallOptions for current thread.
"""
if self._thread_local_data.function_call_options is None:
config = self.config
# Default to soft placement for functions unless specified
if self._soft_device_placement is None:
config.allow_soft_placement = True
self._thread_local_data.function_call_options = FunctionCallOptions(
config_proto=config)
return self._thread_local_data.function_call_options
@function_call_options.setter
def function_call_options(self, options):
"""Returns function call options for current thread."""
self._thread_local_data.function_call_options = options
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self.ensure_initialized()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
self.ensure_initialized()
fdef_string = fdef.SerializeToString()
pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string,
len(fdef_string))
def get_function_def(self, name):
"""Get a function definition from the context.
Args:
name: function signature name.
Returns:
The requested FunctionDef.
Raises:
tf.errors.NotFoundError: if name is not the name of a registered function.
"""
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_)
proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(proto_data)
return function_def
def register_custom_device(self, device_capsule, device_name,
device_info_capsule):
"""Calls TFE_RegisterCustomDevice. See the non-member function."""
self.ensure_initialized()
pywrap_tfe.TFE_Py_RegisterCustomDevice(self._handle, device_capsule,
device_name, device_info_capsule)
def pack_eager_tensors(self, tensors):
"""Pack multiple `EagerTensor`s of the same dtype and shape.
Args:
tensors: a list of EagerTensors to pack.
Returns:
A packed EagerTensor.
"""
self.ensure_initialized()
return pywrap_tfe.TFE_Py_PackEagerTensors(self._handle, tensors)
def list_function_names(self):
"""Get a list of names of registered functions.
Returns:
A set of names of all registered functions for the context.
"""
self.ensure_initialized()
return set(pywrap_tfe.TFE_ContextListFunctionNames(self._handle))
def remove_function(self, name):
"""Remove a function from the context.
Once removed, the function cannot be executed anymore.
Args:
name: function signature name.
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)
def has_function(self, name):
"""Check if a function `name` is registered."""
self.ensure_initialized()
return bool(pywrap_tfe.TFE_ContextHasFunction(self._handle, name))
def add_op_callback(self, callback):
"""Add a post-op callback to the context.
A post-op callback is invoked immediately after an eager operation or
function has finished execution or after a op has been added to a graph,
providing access to the op's type, name input and output tensors. Multiple
op callbacks can be added, in which case the callbacks will be invoked in
the order in which they are added.
Args:
callback: a callable of the signature `f(op_type, inputs, attrs, outputs,
op_name=None, graph=None)`. See doc strings in `op_callbacks.py` for
details on the function signature and its semantics.
"""
if callback not in self._thread_local_data.op_callbacks:
self._thread_local_data.op_callbacks.append(callback)
def remove_op_callback(self, callback):
"""Remove an already-registered op callback.
Args:
callback: The op callback to be removed.
Raises:
KeyError: If `callback` is not already registered.
"""
if callback not in self._thread_local_data.op_callbacks:
raise KeyError("The specified op callback has not been registered, "
"and hence cannot be removed.")
del self._thread_local_data.op_callbacks[
self._thread_local_data.op_callbacks.index(callback)]
@property
def op_callbacks(self):
return self._thread_local_data.op_callbacks
@property
def invoking_op_callbacks(self):
return self._thread_local_data.invoking_op_callbacks
@invoking_op_callbacks.setter
def invoking_op_callbacks(self, value):
self._thread_local_data.invoking_op_callbacks = value
def _initialize_physical_devices(self, reinitialize=False):
"""Gets local devices visible to the system.
Args:
reinitialize: If True, reinitializes self._physical_devices so that
dynamic registered devices will also be visible to the python front-end.
"""
# We lazy initialize self._physical_devices since we do not want to do this
# the constructor since the backend may not be initialized yet.
with self._device_lock:
if not reinitialize and self._physical_devices is not None:
return
devs = pywrap_tfe.TF_ListPhysicalDevices()
self._physical_devices = [
PhysicalDevice(name=d.decode(), device_type=d.decode().split(":")[1])
for d in devs
]
self._physical_device_to_index = {
p: i for i, p in enumerate(self._physical_devices)
}
self._visible_device_list = list(self._physical_devices)
self._memory_growth_map = {
d: None for d in self._physical_devices if d.device_type == "GPU"
}
# Import device settings that may have been passed into the constructor
self._import_config()
def reinitialize_physical_devices(self):
"""Gets local devices visible to the system."""
# Reinitialize the physical device list after registering
# the pluggable device.
self._initialize_physical_devices(True)
def list_physical_devices(self, device_type=None):
"""List local devices visible to the system.
This API allows a client to query the devices before they have been
initialized by the eager runtime. Additionally a user can filter by device
type, to get only CPUs or GPUs.
Args:
device_type: Optional device type to limit results to
Returns:
List of PhysicalDevice objects.
"""
self._initialize_physical_devices()
if device_type is None:
return list(self._physical_devices)
return [d for d in self._physical_devices if d.device_type == device_type]
def get_device_details(self, device): # pylint: disable=redefined-outer-name
"""Returns details about a physical devices.
Args:
device: A `tf.config.PhysicalDevice` returned by
`tf.config.list_physical_devices` or `tf.config.get_visible_devices`.
Returns:
A dict with string keys.
"""
if not isinstance(device, PhysicalDevice):
raise ValueError("device must be a tf.config.PhysicalDevice, but got: "
"%s" % (device,))
if (self._physical_device_to_index is None or
device not in self._physical_device_to_index):
raise ValueError("The PhysicalDevice must be one obtained from "
"calling `tf.config.list_physical_devices`, but got: "
"%s" % (device,))
index = self._physical_device_to_index[device]
details = pywrap_tfe.TF_GetDeviceDetails(index)
# Change compute_capability from a string to a tuple
if "compute_capability" in details:
try:
major, minor = details["compute_capability"].split(".")
details["compute_capability"] = (int(major), int(minor))
except ValueError:
raise RuntimeError("Device returned compute capability an in invalid "
"format: %s" % details["compute_capability"])
return details
def _import_config(self):
"""Import config if passed in during construction.
If Context was created with a ConfigProto such as when calling
tf.compat.v1.enable_eager_execution(), then we need to pull out the
various pieces we might be replacing and import then into our internal
class representation.
"""
if self._config is None:
return
num_cpus = self._config.device_count.get("CPU", 1)
if num_cpus != 1:
cpus = [d for d in self._physical_devices if d.device_type == "CPU"]
if num_cpus == 0:
self.set_visible_devices([], "CPU")
elif num_cpus > 1:
self.set_logical_device_configuration(
cpus[0], [LogicalDeviceConfiguration() for _ in range(num_cpus)])
# Parse GPU options
gpus = [d for d in self._physical_devices if d.device_type == "GPU"]
# If there are no GPUs detected, simply ignore all the GPU options passed in
# rather than doing any validation checks.
if not gpus:
return
gpu_count = self._config.device_count.get("GPU", None)
visible_gpus = []
# TODO(gjn): Handle importing existing virtual GPU configuration
visible_indices = self._config.gpu_options.visible_device_list
if visible_indices:
for index in visible_indices.split(","):
if int(index) >= len(gpus):
raise ValueError("Invalid visible device index: %s" % index)
visible_gpus.append(gpus[int(index)])
else:
visible_gpus = gpus
if gpu_count is not None:
visible_gpus = visible_gpus[:gpu_count]
self.set_visible_devices(visible_gpus, "GPU")
def list_logical_devices(self, device_type=None):
"""Return logical devices."""
self.ensure_initialized()
if device_type is None:
return list(self._logical_devices)
return [d for d in self._logical_devices if d.device_type == device_type]
def get_visible_devices(self, device_type=None):
"""Get the list of visible devices."""
self._initialize_physical_devices()
if device_type is None:
return list(self._visible_device_list)
return [
d for d in self._visible_device_list if d.device_type == device_type
]
def set_visible_devices(self, devices, device_type=None):
"""Set the list of visible devices."""
self._initialize_physical_devices()
if not isinstance(devices, list):
devices = [devices]
for d in devices:
if d not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(d))
if device_type is not None and d.device_type != device_type:
raise ValueError("Unrecognized device: %s" % repr(d))
visible_device_list = []
if device_type is not None:
visible_device_list = [
d for d in self._visible_device_list if d.device_type != device_type
]
visible_device_list += devices
if self._visible_device_list == visible_device_list:
return
if self._context_handle is not None:
raise RuntimeError(
"Visible devices cannot be modified after being initialized")
self._visible_device_list = visible_device_list
def get_memory_info(self, dev):
"""Returns a dict of memory info for the device."""
self._initialize_physical_devices()
self.ensure_initialized()
return pywrap_tfe.TFE_GetMemoryInfo(self._context_handle, dev)
def reset_memory_stats(self, dev):
"""Resets the tracked memory stats for the device."""
self._initialize_physical_devices()
self.ensure_initialized()
pywrap_tfe.TFE_ResetMemoryStats(self._context_handle, dev)
def get_memory_growth(self, dev):
"""Get if memory growth is enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._memory_growth_map[dev]
def set_memory_growth(self, dev, enable):
"""Set if memory growth should be enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev in self._virtual_device_map:
raise ValueError(
"Cannot set memory growth on device when virtual devices configured")
if dev.device_type != "GPU":
raise ValueError("Cannot set memory growth on non-GPU devices")
if self._memory_growth_map.get(dev) == enable:
return
if self._context_handle is not None:
raise RuntimeError(
"Physical devices cannot be modified after being initialized")
self._memory_growth_map[dev] = enable
def get_logical_device_configuration(self, dev):
"""Get the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._virtual_device_map.get(dev)
def set_logical_device_configuration(self, dev, virtual_devices):
"""Set the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev.device_type == "CPU":
for vdev in virtual_devices:
if vdev.memory_limit is not None:
raise ValueError("Setting memory limit on CPU virtual devices is "
"currently not supported")
if vdev.experimental_priority is not None:
raise ValueError("Setting experimental_priority on CPU virtual "
" devices is currently not supported")
elif dev.device_type == "GPU":
for vdev in virtual_devices:
if vdev.memory_limit is None:
raise ValueError(
"Setting memory limit is required for GPU virtual devices")
else:
raise ValueError("Virtual devices are not supported for %s" %
dev.device_type)
if self._virtual_device_map.get(dev) == virtual_devices:
return
if self._context_handle is not None:
raise RuntimeError(
"Virtual devices cannot be modified after being initialized")
self._virtual_device_map[dev] = virtual_devices
def set_logical_cpu_devices(self, num_cpus, prefix=""):
"""Set virtual CPU devices in context.
If virtual CPU devices are already configured at context initialization
by tf.config.set_logical_device_configuration(), this method should not be
called.
Args:
num_cpus: Number of virtual CPUs.
prefix: Device name prefix.
Raises:
RuntimeError: If virtual CPUs are already configured at context
initialization.
"""
server_def = self._server_def or self._collective_ops_server_def
local_prefix = ["/device"]
if server_def is not None:
local_prefix.append("/job:%s/replica:0/task:%d" % (server_def.job_name,
server_def.task_index))
logical_local_devices = [d for d in self.list_logical_devices("CPU") if
d.name.startswith(tuple(local_prefix))]
self.ensure_initialized()
# Error out if there are already multiple logical CPU in the context.
if len(logical_local_devices) > 1:
raise RuntimeError("Virtual CPUs already set, cannot modify again.")
pywrap_tfe.TFE_SetLogicalCpuDevices(self._context_handle, num_cpus, prefix)
self._initialize_logical_devices()
def get_compiler_ir(self, device_name, function_name, args, stage="hlo"):
return pywrap_tfe.TF_GetCompilerIr(self._context_handle, function_name,
stage, device_name, args)
@deprecated(
None, "XLA:CPU and XLA:GPU devices are deprecated", warn_once=True)
def enable_xla_devices(self):
"""Enables XLA:CPU and XLA:GPU devices registration."""
pywrap_tfe.TF_EnableXlaDevices()
@property
def enable_mlir_bridge(self):
return pywrap_tfe.TF_IsMlirBridgeEnabled()
@property
def enable_mlir_graph_optimization(self):
return self._enable_mlir_graph_optimization
@enable_mlir_bridge.setter
def enable_mlir_bridge(self, enabled):
pywrap_tfe.TF_EnableMlirBridge(enabled)
self._thread_local_data.function_call_options = None
@enable_mlir_graph_optimization.setter
def enable_mlir_graph_optimization(self, enabled):
self._enable_mlir_graph_optimization = enabled
self._thread_local_data.function_call_options = None
@property
def optimizer_jit(self):
level = self.config.graph_options.optimizer_options.global_jit_level
return (level == config_pb2.OptimizerOptions.ON_1 or
level == config_pb2.OptimizerOptions.ON_2)
@optimizer_jit.setter
def optimizer_jit(self, enabled):
self._optimizer_jit = enabled
self._thread_local_data.function_call_options = None
def get_optimizer_experimental_options(self):
"""Get experimental options for the optimizer.
Returns:
Dictionary of current option values
"""
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_toggle("use_plugin_optimizers")
rewriter_bool("disable_meta_optimizer")
if rewrite_options.min_graph_nodes != 0:
options["min_graph_nodes"] = rewrite_options.min_graph_nodes
return options
def set_optimizer_experimental_options(self, options):
"""Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
"""
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
@property
def intra_op_parallelism_threads(self):
return self.config.intra_op_parallelism_threads
@intra_op_parallelism_threads.setter
def intra_op_parallelism_threads(self, num_threads):
if self._intra_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Intra op parallelism cannot be modified after initialization.")
self._intra_op_parallelism_threads = num_threads
@property
def inter_op_parallelism_threads(self):
return self.config.inter_op_parallelism_threads
@inter_op_parallelism_threads.setter
def inter_op_parallelism_threads(self, num_threads):
if self._inter_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Inter op parallelism cannot be modified after initialization.")
self._inter_op_parallelism_threads = num_threads
@property
def soft_device_placement(self):
return self.config.allow_soft_placement
@soft_device_placement.setter
def soft_device_placement(self, enable):
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetSoftDevicePlacement(self._handle, enable)
self._soft_device_placement = enable
self._thread_local_data.function_call_options = None
@property
def log_device_placement(self):
return self.config.log_device_placement
@log_device_placement.setter
def log_device_placement(self, enable):
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetLogDevicePlacement(self._handle, enable)
self._log_device_placement = enable
self._thread_local_data.function_call_options = None
@property
def run_eager_op_as_function(self):
return self._run_eager_op_as_function
@run_eager_op_as_function.setter
def run_eager_op_as_function(self, enable):
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetRunEagerOpAsFunction(self._handle, enable)
self._run_eager_op_as_function = enable
@property
def device_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tfe.TFE_ContextGetDevicePlacementPolicy(self._handle)
return self._device_policy
@device_policy.setter
def device_policy(self, policy):
if policy is None:
policy = DEVICE_PLACEMENT_SILENT
if self._device_policy != policy:
self._device_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, self._device_policy)
@property
def use_tfrt(self):
return self._use_tfrt
@use_tfrt.setter
def use_tfrt(self, tfrt):
"""Sets whether to use TFRT."""
if not isinstance(tfrt, bool):
raise ValueError("Expecting a boolean but got %s" % type(tfrt))
if self._use_tfrt != tfrt:
if self._initialized:
raise ValueError("use_tfrt should be set before being initialized.")
self._use_tfrt = tfrt
@property
def use_tfrt_distributed_runtime(self):
return self._use_tfrt_distributed_runtime
@use_tfrt_distributed_runtime.setter
def use_tfrt_distributed_runtime(self, enable):
"""Sets whether to use TFRT distributed runtime.
This is only effective when use_tfrt is also true. Note that currently TFRT
distributed runtime is not function complete and this config is for testing
only.
Args:
enable: A boolean to set whether to use TFRT distributed runtime.
"""
if not isinstance(enable, bool):
raise ValueError("Expecting a boolean but got %s" % type(enable))
if self._use_tfrt_distributed_runtime != enable:
if self._initialized:
raise ValueError("use_tfrt should be set before being initialized.")
self._use_tfrt_distributed_runtime = enable
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableRunMetadata(self._context_handle)
def enable_graph_collection(self):
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableGraphCollection(self._handle)
def disable_graph_collection(self):
"""Disables graph collection of executed functions."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableGraphCollection(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ContextExportRunMetadata(self._context_handle, buffer_)
proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
class _EagerDeviceContext(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
__slots__ = ["_device_name", "_ctx", "_stack"]
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
# TODO(b/189233748): Consolidate the device string parsing logic with
# tensorflow/core/util/device_name_utils.cc.
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
except KeyError:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, six.string_types):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx.ensure_initialized()
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]) # pylint: disable=protected-access
new_device_spec = new_device_spec.make_merged_spec(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError("Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
# Do not set directly. Use _set_context.
_context = None
_context_lock = threading.Lock()
def _set_context_locked(ctx):
global _context
pywrap_tfe.TFE_Py_SetEagerContext(ctx)
_context = ctx
def _set_context(ctx):
with _context_lock:
_set_context_locked(ctx)
def _create_context():
with _context_lock:
if _context is None:
ctx = Context()
_set_context_locked(ctx)
def _reset_context():
"""Clears and re-initializes the singleton context.
Should only be used for testing.
"""
global _context
global _device_parsing_cache
# Garbage collect and clear scalar cache to avoid Tensor from current context
# polluting next context.
gc.collect()
pywrap_tfe.TFE_ClearScalarCache()
with _context_lock:
if _context is not None:
_context._clear_caches()
_context = None
_create_context()
_device_parsing_cache = {}
def context():
"""Returns a singleton context object."""
if _context is None:
_create_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def ensure_initialized():
"""Initialize the context."""
context().ensure_initialized()
def initialize_logical_devices():
"""Initialize the virtual devices."""
context()._initialize_logical_devices() # pylint: disable=protected-access
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly", v1=[])
def executing_eagerly():
"""Checks whether the current thread has eager execution enabled.
Eager execution is enabled by default and this API returns `True`
in most of cases. However, this API might return `False` in the following use
cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called:
>>> tf.config.run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
ctx = context_safe()
if ctx is None:
return default_execution_mode == EAGER_MODE
return ctx.executing_eagerly()
@tf_export(v1=["executing_eagerly"])
def executing_eagerly_v1():
"""Checks whether the current thread has eager execution enabled.
Eager execution is typically enabled via
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
context of a Python function via tf.contrib.eager.py_func.
When eager execution is enabled, returns `True` in most cases. However,
this API might return `False` in the following use cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
>>> tf.compat.v1.enable_eager_execution()
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function`
after `tf.config.run_functions_eagerly(True)` is called:
>>> tf.config.run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
return executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def anonymous_name():
"""Returns the anonymous shared name.
In eager mode we create anonymous resources to avoid spurious sharing issues.
The runtime generates a unique name on our behalf when the reserved
anonymous shared name is used as a shared name.
Returns:
The anonymous shared name.
"""
# The magic value is defined as
# `tensorflow::ResourceHandle::ANONYMOUS_NAME` in C++.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
# Used by b/167638505 for keras backend API and Lambda layer.
@tf_export("__internal__.eager_context.eager_mode", v1=[])
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to perform
automatic placement.
Returns:
Context manager for setting the device.
"""
ensure_initialized()
return context().device(name)
# Expose some properties of Context as internally public APIs (b/160348781).
@tf_export("__internal__.eager_context.get_config", v1=[])
def get_config():
"""Get the ConfigProto of Context.
Returns:
The ConfigProto of Context.
"""
return context().config
@tf_export("__internal__.eager_context.get_device_name", v1=[])
def get_device_name():
"""Get the device name for the current thread.
Returns:
The device name for the current thread.
"""
return context().device_name
@tf_export("__internal__.eager_context.set_soft_device_placement", v1=[])
def set_soft_device_placement(enabled):
"""Set if soft device placements should be allowed.
Args:
enabled: Whether to enable soft device placement.
"""
context().soft_device_placement = enabled
@tf_export("__internal__.eager_context.get_executor", v1=[])
def get_executor():
"""Get the Executor of the current thread.
Returns:
The Executor of the current thread.
"""
return context().executor
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Turns logging for device placement decisions on or off.
Operations execute on a particular device, producing and consuming tensors on
that device. This may change the performance of the operation or require
TensorFlow to copy data to or from an accelerator, so knowing where operations
execute is useful for debugging performance issues.
For more advanced profiling, use the [TensorFlow
profiler](https://www.tensorflow.org/guide/profiler).
Device placement for operations is typically controlled by a `tf.device`
scope, but there are exceptions, for example operations on a `tf.Variable`
which follow the initial placement of the variable. Turning off soft device
placement (with `tf.config.set_soft_device_placement`) provides more explicit
control.
>>> tf.debugging.set_log_device_placement(True)
>>> tf.ones([])
>>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:GPU:0
>>> with tf.device("CPU"):
... tf.ones([])
>>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:CPU:0
>>> tf.debugging.set_log_device_placement(False)
Turning on `tf.debugging.set_log_device_placement` also logs the placement of
ops inside `tf.function` when the function is called.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
# TODO(fishx): remove this method.
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
if mode is None:
yield
else:
ctx = context()
executor_new = executor.new_executor(mode == ASYNC)
executor_old = ctx.executor
try:
executor_old.wait()
ctx.executor = executor_new
yield
finally:
ctx.executor = executor_old
executor_new.wait()
@tf_contextlib.contextmanager
def executor_scope(e):
"""Context manager for changing executor for current thread.
Args:
e: A Executor to execute eager ops under this scope. Setting it to None will
switch back to use the default executor for the context.
Yields:
Context manager for setting the executor for current thread.
"""
ctx = context()
executor_old = ctx.executor
try:
ctx.executor = e
yield
finally:
ctx.executor = executor_old
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def is_async():
"""Returns true if current thread is in async mode."""
return context().is_async()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables graph collection of executed functions."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
@contextlib.contextmanager
def collect_graphs(optimized=True):
"""Collects a flat list of pre- or post-optimization graphs.
The collected graphs include device placements, which can be useful for
testing.
Usage:
```
@def_function.function
def f(x):
return x + constant_op.constant(1.)
with context.collect_graphs() as graphs:
with ops.device("CPU:0"):
f(constant_op.constant(1.))
graph, = graphs # `graph` contains a single GraphDef for inspection
```
Args:
optimized: whether to collect optimized graphs or non-optimized graphs
Yields:
A list of GraphDefs, populated when the context manager exits.
"""
ctx = context()
ctx.enable_graph_collection()
try:
graphs = []
yield graphs
metadata = ctx.export_run_metadata()
finally:
ctx.disable_graph_collection()
for graph in metadata.function_graphs:
if optimized:
graphs.append(graph.post_optimization_graph)
else:
graphs.append(graph.pre_optimization_graph)
def get_server_def():
return context().get_server_def()
def set_server_def(server_def):
context().set_server_def(server_def)
def update_server_def(server_def):
context().update_server_def(server_def)
def check_alive(worker_name):
return context().check_alive(worker_name)
@tf_export("experimental.async_scope")
@tf_contextlib.contextmanager
def async_scope():
"""Context manager for grouping async operations.
Ops/function calls inside the scope can return before finishing the actual
execution. When exiting the async scope, a synchronization barrier will be
automatically added to ensure the completion of all async op and function
execution, potentially raising exceptions if async execution results in
an error state.
Users may write the following code to asynchronously invoke `train_step_fn`
and log the `loss` metric for every `num_steps` steps in a training loop.
`train_step_fn` internally consumes data using `iterator.get_next()`, and may
throw OutOfRangeError when running out of data. In the case:
```
try:
with tf.experimental.async_scope():
for _ in range(num_steps):
# Step function updates the metric `loss` internally
train_step_fn()
except tf.errors.OutOfRangeError:
tf.experimental.async_clear_error()
logging.info('loss = %s', loss.numpy())
```
Yields:
Context manager for grouping async operations.
"""
# TODO(haoyuzhang): replace env var once we have a config method to turn on
# and off async streaming RPC
remote_async_env_var = "TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE"
old_policy = os.environ.get(remote_async_env_var)
try:
os.environ[remote_async_env_var] = str(True)
yield
# Note: sync local and remote executors iff the async block does not raise
# an exception. Triggering sync after an exception may lead to derived
# runtime errors and unexpected exception types.
context().sync_executors()
finally:
if old_policy is None:
del os.environ[remote_async_env_var]
else:
os.environ[remote_async_env_var] = old_policy
def async_wait():
"""Sync all async operations and raise any errors during execution.
In async execution mode, an op/function call can return before finishing the
actual execution. Calling this method creates a synchronization barrier for
all async op and function execution. It only returns when all pending nodes
are finished, potentially raising exceptions if async execution results in
an error state. It is a no-op if the context is not initialized.
"""
disable_async_executor_env_var = "TF_PS_DISABLE_ASYNC_EXECUTOR_GLOBALLY"
if os.environ.get(disable_async_executor_env_var) == str(True):
return
if context()._context_handle is not None: # pylint: disable=protected-access
context().sync_executors()
@tf_export("experimental.async_clear_error")
def async_clear_error():
"""Clear pending operations and error statuses in async execution.
In async execution mode, an error in op/function execution can lead to errors
in subsequent ops/functions that are scheduled but not yet executed. Calling
this method clears all pending operations and reset the async execution state.
Example:
```
while True:
try:
# Step function updates the metric `loss` internally
train_step_fn()
except tf.errors.OutOfRangeError:
tf.experimental.async_clear_error()
break
logging.info('loss = %s', loss.numpy())
```
"""
context().clear_executor_errors()
def add_function(fdef):
"""Add a function definition to the context."""
context().add_function(fdef)
def remove_function(name):
"""Remove a function from the context."""
context().remove_function(name)
def get_function_def(name):
return context().get_function_def(name)
def register_custom_device(device_capsule, device_name, device_info_capsule):
"""Calls TFE_RegisterCustomDevice to register a custom device with Python.
Enables using C extensions specifying a custom device from Python. See the
experimental eager C API in tensorflow/c/eager/c_api_experimental.h for
details.
Note that custom devices are not currently supported inside `tf.function`s.
Args:
device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice'
containing a pointer to a TFE_CustomDevice struct. The capsule retains
ownership of the memory.
device_name: A string indicating the name to register the custom device
under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may
subsequently be passed to `with tf.device(...):`.
device_info_capsule: A PyCapsule with the name set to
'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific
struct with the initial state of the custom device (the void* device_info
argument to TFE_RegisterCustomDevice). This method takes ownership of the
memory and clears the capsule destructor.
"""
context().register_custom_device(device_capsule, device_name,
device_info_capsule)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
| [
"tensorflow.python.pywrap_tfe.TFE_HostAddressSpace",
"tensorflow.python.pywrap_tfe.TFE_ContextSetSoftDevicePlacement",
"tensorflow.python.pywrap_tfe.TFE_Py_RegisterCustomDevice",
"tensorflow.python.pywrap_tfe.TF_EnableXlaDevices",
"tensorflow.core.protobuf.config_pb2.GPUOptions.Experimental",
"tensorflow.... | [((2077, 2109), 'tensorflow.python.framework.device.DeviceSpec.from_string', 'pydev.DeviceSpec.from_string', (['""""""'], {}), "('')\n", (2105, 2109), True, 'from tensorflow.python.framework import device as pydev\n'), ((2505, 2641), 'tensorflow.python.eager.monitoring.Counter', 'monitoring.Counter', (['"""/tensorflow/api/python/eager_context_create_counter"""', '"""Counter for number of eager contexts created in Python."""'], {}), "('/tensorflow/api/python/eager_context_create_counter',\n 'Counter for number of eager contexts created in Python.')\n", (2523, 2641), False, 'from tensorflow.python.eager import monitoring\n'), ((7544, 7649), 'collections.namedtuple', 'collections.namedtuple', (['"""ContextSwitch"""', "['is_building_function', 'enter_context_fn', 'device_stack']"], {}), "('ContextSwitch', ['is_building_function',\n 'enter_context_fn', 'device_stack'])\n", (7566, 7649), False, 'import collections\n'), ((9576, 9640), 'collections.namedtuple', 'collections.namedtuple', (['"""LogicalDevice"""', "['name', 'device_type']"], {}), "('LogicalDevice', ['name', 'device_type'])\n", (9598, 9640), False, 'import collections\n'), ((9517, 9550), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""config.LogicalDevice"""'], {}), "('config.LogicalDevice')\n", (9526, 9550), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((10347, 10446), 'collections.namedtuple', 'collections.namedtuple', (['"""LogicalDeviceConfiguration"""', "['memory_limit', 'experimental_priority']"], {}), "('LogicalDeviceConfiguration', ['memory_limit',\n 'experimental_priority'])\n", (10369, 10446), False, 'import collections\n'), ((10201, 10301), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""config.LogicalDeviceConfiguration"""', '"""config.experimental.VirtualDeviceConfiguration"""'], {}), "('config.LogicalDeviceConfiguration',\n 'config.experimental.VirtualDeviceConfiguration')\n", (10210, 10301), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((11549, 11614), 'collections.namedtuple', 'collections.namedtuple', (['"""PhysicalDevice"""', "['name', 'device_type']"], {}), "('PhysicalDevice', ['name', 'device_type'])\n", (11571, 11614), False, 'import collections\n'), ((11488, 11522), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""config.PhysicalDevice"""'], {}), "('config.PhysicalDevice')\n", (11497, 11522), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((71722, 71738), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (71736, 71738), False, 'import threading\n'), ((73453, 73490), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""executing_eagerly"""'], {'v1': '[]'}), "('executing_eagerly', v1=[])\n", (73462, 73490), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((75023, 75058), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', ([], {'v1': "['executing_eagerly']"}), "(v1=['executing_eagerly'])\n", (75032, 75058), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((77514, 77571), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""__internal__.eager_context.eager_mode"""'], {'v1': '[]'}), "('__internal__.eager_context.eager_mode', v1=[])\n", (77523, 77571), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((78517, 78574), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""__internal__.eager_context.get_config"""'], {'v1': '[]'}), "('__internal__.eager_context.get_config', v1=[])\n", (78526, 78574), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((78709, 78771), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""__internal__.eager_context.get_device_name"""'], {'v1': '[]'}), "('__internal__.eager_context.get_device_name', v1=[])\n", (78718, 78771), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((78940, 79012), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""__internal__.eager_context.set_soft_device_placement"""'], {'v1': '[]'}), "('__internal__.eager_context.set_soft_device_placement', v1=[])\n", (78949, 79012), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((79223, 79282), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""__internal__.eager_context.get_executor"""'], {'v1': '[]'}), "('__internal__.eager_context.get_executor', v1=[])\n", (79232, 79282), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((79437, 79484), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""debugging.get_log_device_placement"""'], {}), "('debugging.get_log_device_placement')\n", (79446, 79484), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((79657, 79704), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""debugging.set_log_device_placement"""'], {}), "('debugging.set_log_device_placement')\n", (79666, 79704), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((82451, 82499), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""experimental.function_executor_type"""'], {}), "('experimental.function_executor_type')\n", (82460, 82499), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((85843, 85880), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""experimental.async_scope"""'], {}), "('experimental.async_scope')\n", (85852, 85880), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((88386, 88429), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""experimental.async_clear_error"""'], {}), "('experimental.async_clear_error')\n", (88395, 88429), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((1754, 1767), 'tensorflow.python.tf2.enabled', 'tf2.enabled', ([], {}), '()\n', (1765, 1767), False, 'from tensorflow.python import tf2\n'), ((2893, 2933), 'os.getenv', 'os.getenv', (['"""TF_RUN_EAGER_OP_AS_FUNCTION"""'], {}), "('TF_RUN_EAGER_OP_AS_FUNCTION')\n", (2902, 2933), False, 'import os\n'), ((4134, 4182), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""__internal__.is_tfrt_enabled"""'], {'v1': '[]'}), "('__internal__.is_tfrt_enabled', v1=[])\n", (4143, 4182), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((60666, 60744), 'tensorflow.python.util.deprecation.deprecated', 'deprecated', (['None', '"""XLA:CPU and XLA:GPU devices are deprecated"""'], {'warn_once': '(True)'}), "(None, 'XLA:CPU and XLA:GPU devices are deprecated', warn_once=True)\n", (60676, 60744), False, 'from tensorflow.python.util.deprecation import deprecated\n'), ((71791, 71829), 'tensorflow.python.pywrap_tfe.TFE_Py_SetEagerContext', 'pywrap_tfe.TFE_Py_SetEagerContext', (['ctx'], {}), '(ctx)\n', (71824, 71829), False, 'from tensorflow.python import pywrap_tfe\n'), ((72328, 72340), 'gc.collect', 'gc.collect', ([], {}), '()\n', (72338, 72340), False, 'import gc\n'), ((72343, 72376), 'tensorflow.python.pywrap_tfe.TFE_ClearScalarCache', 'pywrap_tfe.TFE_ClearScalarCache', ([], {}), '()\n', (72374, 72376), False, 'from tensorflow.python import pywrap_tfe\n'), ((83022, 83048), 'copy.copy', 'copy.copy', (['current_options'], {}), '(current_options)\n', (83031, 83048), False, 'import copy\n'), ((87160, 87196), 'os.environ.get', 'os.environ.get', (['remote_async_env_var'], {}), '(remote_async_env_var)\n', (87174, 87196), False, 'import os\n'), ((4446, 4471), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4469, 4471), False, 'import collections\n'), ((12675, 12691), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (12689, 12691), False, 'import threading\n'), ((15947, 16087), 'tensorflow.python.pywrap_tfe.EagerContextThreadLocalData', 'pywrap_tfe.EagerContextThreadLocalData', (['self'], {'is_eager': '(lambda : default_execution_mode == EAGER_MODE)', 'device_spec': '_starting_device_spec'}), '(self, is_eager=lambda : \n default_execution_mode == EAGER_MODE, device_spec=_starting_device_spec)\n', (15985, 16087), False, 'from tensorflow.python import pywrap_tfe\n'), ((16297, 16313), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (16311, 16313), False, 'import threading\n'), ((17254, 17270), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (17268, 17270), False, 'import threading\n'), ((18275, 18294), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (18288, 18294), False, 'import random\n'), ((19038, 19093), 'tensorflow.python.pywrap_tfe.TFE_ContextListDevices', 'pywrap_tfe.TFE_ContextListDevices', (['self._context_handle'], {}), '(self._context_handle)\n', (19071, 19093), False, 'from tensorflow.python import pywrap_tfe\n'), ((22646, 22679), 'tensorflow.python.pywrap_tfe.TFE_ClearScalarCache', 'pywrap_tfe.TFE_ClearScalarCache', ([], {}), '()\n', (22677, 22679), False, 'from tensorflow.python import pywrap_tfe\n'), ((27462, 27531), 'tensorflow.python.pywrap_tfe.TFE_InsertConfigKeyValue', 'pywrap_tfe.TFE_InsertConfigKeyValue', (['self._context_handle', 'key', 'value'], {}), '(self._context_handle, key, value)\n', (27497, 27531), False, 'from tensorflow.python import pywrap_tfe\n'), ((27875, 27937), 'tensorflow.python.pywrap_tfe.TFE_DeleteConfigKeyValue', 'pywrap_tfe.TFE_DeleteConfigKeyValue', (['self._context_handle', 'key'], {}), '(self._context_handle, key)\n', (27910, 27937), False, 'from tensorflow.python import pywrap_tfe\n'), ((31982, 32044), 'tensorflow.python.pywrap_tfe.TFE_AbortCollectiveOps', 'pywrap_tfe.TFE_AbortCollectiveOps', (['self._handle', 'code', 'message'], {}), '(self._handle, code, message)\n', (32015, 32044), False, 'from tensorflow.python import pywrap_tfe\n'), ((32868, 32946), 'tensorflow.python.pywrap_tfe.TFE_CollectiveOpsCheckPeerHealth', 'pywrap_tfe.TFE_CollectiveOpsCheckPeerHealth', (['self._handle', 'task', 'timeout_in_ms'], {}), '(self._handle, task, timeout_in_ms)\n', (32911, 32946), False, 'from tensorflow.python import pywrap_tfe\n'), ((38182, 38206), 'tensorflow.core.protobuf.config_pb2.ConfigProto', 'config_pb2.ConfigProto', ([], {}), '()\n', (38204, 38206), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((39090, 39125), 'tensorflow.python.pywrap_tfe.TF_IsMlirBridgeEnabled', 'pywrap_tfe.TF_IsMlirBridgeEnabled', ([], {}), '()\n', (39123, 39125), False, 'from tensorflow.python import pywrap_tfe\n'), ((45496, 45547), 'tensorflow.python.pywrap_tfe.TFE_ContextAddFunction', 'pywrap_tfe.TFE_ContextAddFunction', (['self._handle', 'fn'], {}), '(self._handle, fn)\n', (45529, 45547), False, 'from tensorflow.python import pywrap_tfe\n'), ((46484, 46510), 'tensorflow.core.framework.function_pb2.FunctionDef', 'function_pb2.FunctionDef', ([], {}), '()\n', (46508, 46510), False, 'from tensorflow.core.framework import function_pb2\n'), ((46802, 46908), 'tensorflow.python.pywrap_tfe.TFE_Py_RegisterCustomDevice', 'pywrap_tfe.TFE_Py_RegisterCustomDevice', (['self._handle', 'device_capsule', 'device_name', 'device_info_capsule'], {}), '(self._handle, device_capsule,\n device_name, device_info_capsule)\n', (46840, 46908), False, 'from tensorflow.python import pywrap_tfe\n'), ((47204, 47261), 'tensorflow.python.pywrap_tfe.TFE_Py_PackEagerTensors', 'pywrap_tfe.TFE_Py_PackEagerTensors', (['self._handle', 'tensors'], {}), '(self._handle, tensors)\n', (47238, 47261), False, 'from tensorflow.python import pywrap_tfe\n'), ((47765, 47821), 'tensorflow.python.pywrap_tfe.TFE_ContextRemoveFunction', 'pywrap_tfe.TFE_ContextRemoveFunction', (['self._handle', 'name'], {}), '(self._handle, name)\n', (47801, 47821), False, 'from tensorflow.python import pywrap_tfe\n'), ((52480, 52517), 'tensorflow.python.pywrap_tfe.TF_GetDeviceDetails', 'pywrap_tfe.TF_GetDeviceDetails', (['index'], {}), '(index)\n', (52510, 52517), False, 'from tensorflow.python import pywrap_tfe\n'), ((56249, 56304), 'tensorflow.python.pywrap_tfe.TFE_GetMemoryInfo', 'pywrap_tfe.TFE_GetMemoryInfo', (['self._context_handle', 'dev'], {}), '(self._context_handle, dev)\n', (56277, 56304), False, 'from tensorflow.python import pywrap_tfe\n'), ((56475, 56533), 'tensorflow.python.pywrap_tfe.TFE_ResetMemoryStats', 'pywrap_tfe.TFE_ResetMemoryStats', (['self._context_handle', 'dev'], {}), '(self._context_handle, dev)\n', (56506, 56533), False, 'from tensorflow.python import pywrap_tfe\n'), ((60329, 60404), 'tensorflow.python.pywrap_tfe.TFE_SetLogicalCpuDevices', 'pywrap_tfe.TFE_SetLogicalCpuDevices', (['self._context_handle', 'num_cpus', 'prefix'], {}), '(self._context_handle, num_cpus, prefix)\n', (60364, 60404), False, 'from tensorflow.python import pywrap_tfe\n'), ((60532, 60626), 'tensorflow.python.pywrap_tfe.TF_GetCompilerIr', 'pywrap_tfe.TF_GetCompilerIr', (['self._context_handle', 'function_name', 'stage', 'device_name', 'args'], {}), '(self._context_handle, function_name, stage,\n device_name, args)\n', (60559, 60626), False, 'from tensorflow.python import pywrap_tfe\n'), ((60848, 60880), 'tensorflow.python.pywrap_tfe.TF_EnableXlaDevices', 'pywrap_tfe.TF_EnableXlaDevices', ([], {}), '()\n', (60878, 60880), False, 'from tensorflow.python import pywrap_tfe\n'), ((60937, 60972), 'tensorflow.python.pywrap_tfe.TF_IsMlirBridgeEnabled', 'pywrap_tfe.TF_IsMlirBridgeEnabled', ([], {}), '()\n', (60970, 60972), False, 'from tensorflow.python import pywrap_tfe\n'), ((61153, 61192), 'tensorflow.python.pywrap_tfe.TF_EnableMlirBridge', 'pywrap_tfe.TF_EnableMlirBridge', (['enabled'], {}), '(enabled)\n', (61183, 61192), False, 'from tensorflow.python import pywrap_tfe\n'), ((67690, 67743), 'tensorflow.python.pywrap_tfe.TFE_ContextEnableRunMetadata', 'pywrap_tfe.TFE_ContextEnableRunMetadata', (['self._handle'], {}), '(self._handle)\n', (67729, 67743), False, 'from tensorflow.python import pywrap_tfe\n'), ((67889, 67951), 'tensorflow.python.pywrap_tfe.TFE_ContextDisableRunMetadata', 'pywrap_tfe.TFE_ContextDisableRunMetadata', (['self._context_handle'], {}), '(self._context_handle)\n', (67929, 67951), False, 'from tensorflow.python import pywrap_tfe\n'), ((68237, 68294), 'tensorflow.python.pywrap_tfe.TFE_ContextEnableGraphCollection', 'pywrap_tfe.TFE_ContextEnableGraphCollection', (['self._handle'], {}), '(self._handle)\n', (68280, 68294), False, 'from tensorflow.python import pywrap_tfe\n'), ((68443, 68509), 'tensorflow.python.pywrap_tfe.TFE_ContextDisableGraphCollection', 'pywrap_tfe.TFE_ContextDisableGraphCollection', (['self._context_handle'], {}), '(self._context_handle)\n', (68487, 68509), False, 'from tensorflow.python import pywrap_tfe\n'), ((69082, 69106), 'tensorflow.core.protobuf.config_pb2.RunMetadata', 'config_pb2.RunMetadata', ([], {}), '()\n', (69104, 69106), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((81743, 81779), 'tensorflow.python.eager.executor.new_executor', 'executor.new_executor', (['(mode == ASYNC)'], {}), '(mode == ASYNC)\n', (81764, 81779), False, 'from tensorflow.python.eager import executor\n'), ((88200, 88246), 'os.environ.get', 'os.environ.get', (['disable_async_executor_env_var'], {}), '(disable_async_executor_env_var)\n', (88214, 88246), False, 'import os\n'), ((18405, 18460), 'tensorflow.python.pywrap_tfe.TFE_ContextClearCaches', 'pywrap_tfe.TFE_ContextClearCaches', (['self._context_handle'], {}), '(self._context_handle)\n', (18438, 18460), False, 'from tensorflow.python import pywrap_tfe\n'), ((20264, 20307), 'tensorflow.python.pywrap_tfe.TF_DeleteDeviceList', 'pywrap_tfe.TF_DeleteDeviceList', (['device_list'], {}), '(device_list)\n', (20294, 20307), False, 'from tensorflow.python import pywrap_tfe\n'), ((20575, 20609), 'tensorflow.python.pywrap_tfe.TFE_NewContextOptions', 'pywrap_tfe.TFE_NewContextOptions', ([], {}), '()\n', (20607, 20609), False, 'from tensorflow.python import pywrap_tfe\n'), ((23779, 23872), 'tensorflow.python.pywrap_tfe.TFE_ContextSetServerDef', 'pywrap_tfe.TFE_ContextSetServerDef', (['self._context_handle', 'keep_alive_secs', 'server_def_str'], {}), '(self._context_handle, keep_alive_secs,\n server_def_str)\n', (23813, 23872), False, 'from tensorflow.python import pywrap_tfe\n'), ((24866, 24962), 'tensorflow.python.pywrap_tfe.TFE_ContextUpdateServerDef', 'pywrap_tfe.TFE_ContextUpdateServerDef', (['self._context_handle', 'keep_alive_secs', 'server_def_str'], {}), '(self._context_handle, keep_alive_secs,\n server_def_str)\n', (24903, 24962), False, 'from tensorflow.python import pywrap_tfe\n'), ((25565, 25632), 'tensorflow.python.pywrap_tfe.TFE_ContextCheckAlive', 'pywrap_tfe.TFE_ContextCheckAlive', (['self._context_handle', 'worker_name'], {}), '(self._context_handle, worker_name)\n', (25597, 25632), False, 'from tensorflow.python import pywrap_tfe\n'), ((26256, 26313), 'tensorflow.python.pywrap_tfe.TFE_ContextSyncExecutors', 'pywrap_tfe.TFE_ContextSyncExecutors', (['self._context_handle'], {}), '(self._context_handle)\n', (26291, 26313), False, 'from tensorflow.python import pywrap_tfe\n'), ((26894, 26952), 'tensorflow.python.pywrap_tfe.TFE_ContextClearExecutors', 'pywrap_tfe.TFE_ContextClearExecutors', (['self._context_handle'], {}), '(self._context_handle)\n', (26930, 26952), False, 'from tensorflow.python import pywrap_tfe\n'), ((27108, 27239), 'absl.logging.warning', 'logging.warning', (['"""Configuring coordination service type may not be effective because the context is already initialized."""'], {}), "(\n 'Configuring coordination service type may not be effective because the context is already initialized.'\n )\n", (27123, 27239), False, 'from absl import logging\n'), ((27606, 27628), 'tensorflow.python.framework.c_api_util.tf_buffer', 'c_api_util.tf_buffer', ([], {}), '()\n', (27626, 27628), False, 'from tensorflow.python.framework import c_api_util\n'), ((27647, 27715), 'tensorflow.python.pywrap_tfe.TFE_GetConfigKeyValue', 'pywrap_tfe.TFE_GetConfigKeyValue', (['self._context_handle', 'key', 'buffer_'], {}), '(self._context_handle, key, buffer_)\n', (27679, 27715), False, 'from tensorflow.python import pywrap_tfe\n'), ((28215, 28303), 'tensorflow.python.pywrap_tfe.TFE_ReportErrorToCluster', 'pywrap_tfe.TFE_ReportErrorToCluster', (['self._context_handle', 'error_code', 'error_message'], {}), '(self._context_handle, error_code,\n error_message)\n', (28250, 28303), False, 'from tensorflow.python import pywrap_tfe\n'), ((28547, 28602), 'tensorflow.python.pywrap_tfe.TFE_ContextClearCaches', 'pywrap_tfe.TFE_ContextClearCaches', (['self._context_handle'], {}), '(self._context_handle)\n', (28580, 28602), False, 'from tensorflow.python import pywrap_tfe\n'), ((29222, 29355), 'absl.logging.warning', 'logging.warning', (['"""Enabling collective ops after program startup may cause error when accessing previously created tensors."""'], {}), "(\n 'Enabling collective ops after program startup may cause error when accessing previously created tensors.'\n )\n", (29237, 29355), False, 'from absl import logging\n'), ((35958, 35984), 'tensorflow.python.framework.device.is_device_spec', 'pydev.is_device_spec', (['name'], {}), '(name)\n', (35978, 35984), True, 'from tensorflow.python.framework import device as pydev\n'), ((36250, 36272), 'tensorflow.python.framework.c_api_util.tf_buffer', 'c_api_util.tf_buffer', ([], {}), '()\n', (36270, 36272), False, 'from tensorflow.python.framework import c_api_util\n'), ((36291, 36353), 'tensorflow.python.pywrap_tfe.TFE_HostAddressSpace', 'pywrap_tfe.TFE_HostAddressSpace', (['self._context_handle', 'buffer_'], {}), '(self._context_handle, buffer_)\n', (36322, 36353), False, 'from tensorflow.python import pywrap_tfe\n'), ((37727, 37791), 'tensorflow.python.pywrap_tfe.TFE_ContextGetExecutorForThread', 'pywrap_tfe.TFE_ContextGetExecutorForThread', (['self._context_handle'], {}), '(self._context_handle)\n', (37769, 37791), False, 'from tensorflow.python import pywrap_tfe\n'), ((46299, 46321), 'tensorflow.python.framework.c_api_util.tf_buffer', 'c_api_util.tf_buffer', ([], {}), '()\n', (46319, 46321), False, 'from tensorflow.python.framework import c_api_util\n'), ((46340, 46405), 'tensorflow.python.pywrap_tfe.TFE_ContextGetFunctionDef', 'pywrap_tfe.TFE_ContextGetFunctionDef', (['self._handle', 'name', 'buffer_'], {}), '(self._handle, name, buffer_)\n', (46376, 46405), False, 'from tensorflow.python import pywrap_tfe\n'), ((46425, 46464), 'tensorflow.python.client.pywrap_tf_session.TF_GetBuffer', 'pywrap_tf_session.TF_GetBuffer', (['buffer_'], {}), '(buffer_)\n', (46455, 46464), False, 'from tensorflow.python.client import pywrap_tf_session\n'), ((47481, 47534), 'tensorflow.python.pywrap_tfe.TFE_ContextListFunctionNames', 'pywrap_tfe.TFE_ContextListFunctionNames', (['self._handle'], {}), '(self._handle)\n', (47520, 47534), False, 'from tensorflow.python import pywrap_tfe\n'), ((47953, 48006), 'tensorflow.python.pywrap_tfe.TFE_ContextHasFunction', 'pywrap_tfe.TFE_ContextHasFunction', (['self._handle', 'name'], {}), '(self._handle, name)\n', (47986, 48006), False, 'from tensorflow.python import pywrap_tfe\n'), ((50208, 50243), 'tensorflow.python.pywrap_tfe.TF_ListPhysicalDevices', 'pywrap_tfe.TF_ListPhysicalDevices', ([], {}), '()\n', (50241, 50243), False, 'from tensorflow.python import pywrap_tfe\n'), ((64615, 64681), 'tensorflow.python.pywrap_tfe.TFE_ContextSetSoftDevicePlacement', 'pywrap_tfe.TFE_ContextSetSoftDevicePlacement', (['self._handle', 'enable'], {}), '(self._handle, enable)\n', (64659, 64681), False, 'from tensorflow.python import pywrap_tfe\n'), ((64993, 65058), 'tensorflow.python.pywrap_tfe.TFE_ContextSetLogDevicePlacement', 'pywrap_tfe.TFE_ContextSetLogDevicePlacement', (['self._handle', 'enable'], {}), '(self._handle, enable)\n', (65036, 65058), False, 'from tensorflow.python import pywrap_tfe\n'), ((65379, 65446), 'tensorflow.python.pywrap_tfe.TFE_ContextSetRunEagerOpAsFunction', 'pywrap_tfe.TFE_ContextSetRunEagerOpAsFunction', (['self._handle', 'enable'], {}), '(self._handle, enable)\n', (65424, 65446), False, 'from tensorflow.python import pywrap_tfe\n'), ((65663, 65723), 'tensorflow.python.pywrap_tfe.TFE_ContextGetDevicePlacementPolicy', 'pywrap_tfe.TFE_ContextGetDevicePlacementPolicy', (['self._handle'], {}), '(self._handle)\n', (65709, 65723), False, 'from tensorflow.python import pywrap_tfe\n'), ((68892, 68914), 'tensorflow.python.framework.c_api_util.tf_buffer', 'c_api_util.tf_buffer', ([], {}), '()\n', (68912, 68914), False, 'from tensorflow.python.framework import c_api_util\n'), ((68933, 69003), 'tensorflow.python.pywrap_tfe.TFE_ContextExportRunMetadata', 'pywrap_tfe.TFE_ContextExportRunMetadata', (['self._context_handle', 'buffer_'], {}), '(self._context_handle, buffer_)\n', (68972, 69003), False, 'from tensorflow.python import pywrap_tfe\n'), ((69023, 69062), 'tensorflow.python.client.pywrap_tf_session.TF_GetBuffer', 'pywrap_tf_session.TF_GetBuffer', (['buffer_'], {}), '(buffer_)\n', (69053, 69062), False, 'from tensorflow.python.client import pywrap_tf_session\n'), ((69140, 69167), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['proto_data'], {}), '(proto_data)\n', (69155, 69167), False, 'from tensorflow.python.util import compat\n'), ((19377, 19419), 'tensorflow.python.pywrap_tfe.TF_DeviceListCount', 'pywrap_tfe.TF_DeviceListCount', (['device_list'], {}), '(device_list)\n', (19406, 19419), False, 'from tensorflow.python import pywrap_tfe\n'), ((19441, 19485), 'tensorflow.python.pywrap_tfe.TF_DeviceListName', 'pywrap_tfe.TF_DeviceListName', (['device_list', 'i'], {}), '(device_list, i)\n', (19469, 19485), False, 'from tensorflow.python import pywrap_tfe\n'), ((19564, 19602), 'tensorflow.python.framework.device.DeviceSpec.from_string', 'pydev.DeviceSpec.from_string', (['dev_name'], {}), '(dev_name)\n', (19592, 19602), True, 'from tensorflow.python.framework import device as pydev\n'), ((19975, 20019), 'tensorflow.python.pywrap_tfe.TF_DeviceListType', 'pywrap_tfe.TF_DeviceListType', (['device_list', 'i'], {}), '(device_list, i)\n', (20003, 20019), False, 'from tensorflow.python import pywrap_tfe\n'), ((20682, 20738), 'tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetConfig', 'pywrap_tfe.TFE_ContextOptionsSetConfig', (['opts', 'config_str'], {}), '(opts, config_str)\n', (20720, 20738), False, 'from tensorflow.python import pywrap_tfe\n'), ((21536, 21631), 'tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetRunEagerOpAsFunction', 'pywrap_tfe.TFE_ContextOptionsSetRunEagerOpAsFunction', (['opts', 'self._run_eager_op_as_function'], {}), '(opts, self.\n _run_eager_op_as_function)\n', (21588, 21631), False, 'from tensorflow.python import pywrap_tfe\n'), ((21665, 21696), 'tensorflow.python.pywrap_tfe.TFE_NewContext', 'pywrap_tfe.TFE_NewContext', (['opts'], {}), '(opts)\n', (21690, 21696), False, 'from tensorflow.python import pywrap_tfe\n'), ((21720, 21761), 'tensorflow.python.pywrap_tfe.TFE_DeleteContextOptions', 'pywrap_tfe.TFE_DeleteContextOptions', (['opts'], {}), '(opts)\n', (21755, 21761), False, 'from tensorflow.python import pywrap_tfe\n'), ((22095, 22183), 'tensorflow.python.pywrap_tfe.TFE_ContextSetServerDef', 'pywrap_tfe.TFE_ContextSetServerDef', (['context_handle', '_KEEP_ALIVE_SECS', 'server_def_str'], {}), '(context_handle, _KEEP_ALIVE_SECS,\n server_def_str)\n', (22129, 22183), False, 'from tensorflow.python import pywrap_tfe\n'), ((29523, 29595), 'tensorflow.python.pywrap_tfe.TFE_EnableCollectiveOps', 'pywrap_tfe.TFE_EnableCollectiveOps', (['self._context_handle', 'server_def_str'], {}), '(self._context_handle, server_def_str)\n', (29557, 29595), False, 'from tensorflow.python import pywrap_tfe\n'), ((37179, 37214), 'tensorflow.python.eager.executor.new_executor', 'executor.new_executor', (['enable_async'], {}), '(enable_async)\n', (37200, 37214), False, 'from tensorflow.python.eager import executor\n'), ((44110, 44177), 'tensorflow.core.protobuf.config_pb2.GPUOptions.Experimental', 'config_pb2.GPUOptions.Experimental', ([], {'virtual_devices': 'virtual_devices'}), '(virtual_devices=virtual_devices)\n', (44144, 44177), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((66076, 66172), 'tensorflow.python.pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy', 'pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy', (['self._handle', 'self._device_policy'], {}), '(self._handle,\n self._device_policy)\n', (66133, 66172), False, 'from tensorflow.python import pywrap_tfe\n'), ((18243, 18257), 'numpy.array', 'np.array', (['seed'], {}), '(seed)\n', (18251, 18257), True, 'import numpy as np\n'), ((19517, 19547), 'tensorflow.python.framework.device.canonical_name', 'pydev.canonical_name', (['dev_name'], {}), '(dev_name)\n', (19537, 19547), True, 'from tensorflow.python.framework import device as pydev\n'), ((20793, 20878), 'tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy', 'pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy', (['opts', 'self._device_policy'], {}), '(opts, self._device_policy\n )\n', (20846, 20878), False, 'from tensorflow.python import pywrap_tfe\n'), ((20946, 21023), 'tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy', 'pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy', (['opts', 'self._mirroring_policy'], {}), '(opts, self._mirroring_policy)\n', (20993, 21023), False, 'from tensorflow.python import pywrap_tfe\n'), ((21093, 21142), 'tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetAsync', 'pywrap_tfe.TFE_ContextOptionsSetAsync', (['opts', '(True)'], {}), '(opts, True)\n', (21130, 21142), False, 'from tensorflow.python import pywrap_tfe\n'), ((21192, 21250), 'tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetTfrt', 'pywrap_tfe.TFE_ContextOptionsSetTfrt', (['opts', 'self._use_tfrt'], {}), '(opts, self._use_tfrt)\n', (21228, 21250), False, 'from tensorflow.python import pywrap_tfe\n'), ((21416, 21517), 'tensorflow.python.pywrap_tfe.TFE_ContextOptionsSetTfrtDistributedRuntime', 'pywrap_tfe.TFE_ContextOptionsSetTfrtDistributedRuntime', (['opts', 'self._use_tfrt_distributed_runtime'], {}), '(opts, self.\n _use_tfrt_distributed_runtime)\n', (21470, 21517), False, 'from tensorflow.python import pywrap_tfe\n'), ((22364, 22430), 'tensorflow.python.pywrap_tfe.TFE_EnableCollectiveOps', 'pywrap_tfe.TFE_EnableCollectiveOps', (['context_handle', 'server_def_str'], {}), '(context_handle, server_def_str)\n', (22398, 22430), False, 'from tensorflow.python import pywrap_tfe\n'), ((27730, 27769), 'tensorflow.python.client.pywrap_tf_session.TF_GetBuffer', 'pywrap_tf_session.TF_GetBuffer', (['buffer_'], {}), '(buffer_)\n', (27760, 27769), False, 'from tensorflow.python.client import pywrap_tf_session\n'), ((36376, 36415), 'tensorflow.python.client.pywrap_tf_session.TF_GetBuffer', 'pywrap_tf_session.TF_GetBuffer', (['buffer_'], {}), '(buffer_)\n', (36406, 36415), False, 'from tensorflow.python.client import pywrap_tf_session\n'), ((43513, 43617), 'tensorflow.core.protobuf.config_pb2.GPUOptions.Experimental.VirtualDevices', 'config_pb2.GPUOptions.Experimental.VirtualDevices', ([], {'memory_limit_mb': 'device_limits', 'priority': 'priority'}), '(memory_limit_mb=\n device_limits, priority=priority)\n', (43562, 43617), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((70534, 70579), 'tensorflow.python.framework.device.DeviceSpec.from_string', 'pydev.DeviceSpec.from_string', (['new_device_name'], {}), '(new_device_name)\n', (70562, 70579), True, 'from tensorflow.python.framework import device as pydev\n'), ((70955, 70987), 'tensorflow.python.framework.device.DeviceSpec.from_string', 'pydev.DeviceSpec.from_string', (['""""""'], {}), "('')\n", (70983, 70987), True, 'from tensorflow.python.framework import device as pydev\n'), ((70636, 70662), 'copy.copy', 'copy.copy', (['old_device_spec'], {}), '(old_device_spec)\n', (70645, 70662), False, 'import copy\n'), ((70740, 70793), 'tensorflow.python.framework.device.DeviceSpec.from_string', 'pydev.DeviceSpec.from_string', (['ctx._context_devices[0]'], {}), '(ctx._context_devices[0])\n', (70768, 70793), True, 'from tensorflow.python.framework import device as pydev\n'), ((6527, 6551), 'tensorflow.core.protobuf.config_pb2.ConfigProto', 'config_pb2.ConfigProto', ([], {}), '()\n', (6549, 6551), False, 'from tensorflow.core.protobuf import config_pb2\n')] |
# Helper script for making run-time plots.
#
# Requires a Python installation with the full numeric stack (Numpy, Matplotlib)
# including Seaborn (for prettier plots).
#
# <NAME> [http://eli.thegreenplace.net]
# This code is in the public domain.
import numpy as np
import matplotlib.pyplot as plt
import seaborn
launch_0mb = (5.5, 22.3)
launch_2mb = (5.5, 34.5)
launch_4mb = (5.5, 44.7)
launch_8mb = (5.5, 66.8)
N = len(launch_0mb)
ind = np.arange(N) # the x locations for the groups
width = 0.13 # the width of the bars
fig, ax = plt.subplots()
rects4 = ax.bar(ind, launch_0mb, width, color='#7c9acc')
rects5 = ax.bar(ind + 1 * width, launch_2mb, width, color='#5c8add')
rects6 = ax.bar(ind + 2 * width, launch_4mb, width, color='#3c7aee')
rects7 = ax.bar(ind + 3 * width, launch_8mb, width, color='#1c6aff')
# add some text for labels, title and axes ticks
ax.set_ylabel('Launch-time (usec)', fontsize=14)
ax.set_xticks(ind + 2 * width)
ax.set_xticklabels(('thread', 'fork'), fontsize=14)
ax.legend((
rects4[0],
rects5[0],
rects6[0],
rects7[0],
),
(
'0 MB',
'2 MB',
'4 MB',
'8 MB',
), fontsize=14, loc='best')
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-launch.png', dpi=80)
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((442, 454), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (451, 454), True, 'import numpy as np\n'), ((544, 558), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (556, 558), True, 'import matplotlib.pyplot as plt\n'), ((1260, 1269), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1267, 1269), True, 'import matplotlib.pyplot as plt\n'), ((1326, 1364), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot-launch.png"""'], {'dpi': '(80)'}), "('plot-launch.png', dpi=80)\n", (1337, 1364), True, 'import matplotlib.pyplot as plt\n'), ((1366, 1376), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1374, 1376), True, 'import matplotlib.pyplot as plt\n')] |
################################################################################
# System dependencies
################################################################################
import numpy as np
from joblib import Parallel
from joblib import delayed
from itertools import product
################################################################################
# Local dependencies
################################################################################
from org.gesis.model.DPAH import DPAH
from org.gesis.lib import graph
################################################################################
# Functions
################################################################################
def get_metadata(g, steps, njobs=1, verbose=False, seed=None):
'''
Retrieves information form the given graph.
- N number of nodes
- fm fraction of minorities (node attribute from graph.graph['label']
- d density
- plo_* power law out-degree distribution from (M) majority and (m) minority
- pli_* power law in-degree distribution from (M) majority and (m) minority
- E** fraction of types of edges, e.g., EMM between majorities
- _N number of nodes used for synthetic network
- _d degree used for synthetic network
- _mindiff minimum difference found between E** from real network and synthetic network.
'''
N = g.number_of_nodes()
fm = graph.get_minority_fraction(g)
d = graph.get_density(g)
plo_M, plo_m = graph.get_outdegree_powerlaw_exponents(g)
plo_M = plo_M.power_law.alpha
plo_m = plo_m.power_law.alpha
pli_M, pli_m = graph.get_indegree_powerlaw_exponents(g)
pli_M = pli_M.power_law.alpha
pli_m = pli_m.power_law.alpha
EMM, EMm, EmM, Emm = graph.get_edge_type_counts(g, True)
_N = 500
_d = 3/_N
hMM, hmm, _mindiff = infer_homophily_MLE(_N, fm, _d, plo_M, plo_m, EMM, EMm, EmM, Emm, steps, njobs, verbose, seed)
return N, fm, d, plo_M, plo_m, pli_M, pli_m, EMM, EMm, EmM, Emm, hMM, hmm, _N, _d, _mindiff
def infer_homophily_MLE(N, fm, d, plo_M, plo_m, EMM, EMm, EmM, Emm, steps=0.05, njobs=1, verbose=False, seed=None):
'''
Infers the homophily value of a network given the DPAH model.
'''
h = np.arange(0.0, 1.0+steps, steps)
hval = []
diff = []
if N < (1/d):
N = int(round(1/d))
if verbose:
print("N={} d={} fm={}".format(N, d, fm))
results = Parallel(n_jobs=njobs)(delayed(_infer_homophily_MLE)(N, fm, d, plo_M, plo_m,
EMM, EMm, EmM, Emm, hMM, hmm, verbose, i)
for i,(hMM,hmm) in enumerate(product(h,h)) )
hMM, hmm, diff = zip(*results)
mindiff = min(diff)
if verbose:
print("Minimum difference: {}".format(mindiff))
mi = diff.index(mindiff)
return hMM[mi], hmm[mi], mindiff
def _infer_homophily_MLE(N, fm, d, plo_M, plo_m, EMM, EMm, EmM, Emm, hMM, hmm, verbose, seed):
'''
Handler for infer_homophily_MLE to work in parallel.
'''
hMM = round(hMM,2)
hmm = round(hmm,2)
g = DPAH(N=N, fm=fm, d=d, plo_M=plo_M, plo_m=plo_m, h_MM=hMM, h_mm=hmm, verbose=False, seed=seed)
eMM, eMm, emM, emm = graph.get_edge_type_counts(g, True)
diff = abs(eMM-EMM)+abs(eMm-EMm)+abs(emM-EmM)+abs(emm-Emm)
if verbose:
print("hMM:{} hmm:{} diff:{}".format(hMM, hmm, diff))
return (hMM, hmm, diff) | [
"org.gesis.lib.graph.get_density",
"itertools.product",
"org.gesis.lib.graph.get_outdegree_powerlaw_exponents",
"org.gesis.lib.graph.get_minority_fraction",
"joblib.Parallel",
"org.gesis.model.DPAH.DPAH",
"org.gesis.lib.graph.get_indegree_powerlaw_exponents",
"joblib.delayed",
"numpy.arange",
"org... | [((1412, 1442), 'org.gesis.lib.graph.get_minority_fraction', 'graph.get_minority_fraction', (['g'], {}), '(g)\n', (1439, 1442), False, 'from org.gesis.lib import graph\n'), ((1451, 1471), 'org.gesis.lib.graph.get_density', 'graph.get_density', (['g'], {}), '(g)\n', (1468, 1471), False, 'from org.gesis.lib import graph\n'), ((1496, 1537), 'org.gesis.lib.graph.get_outdegree_powerlaw_exponents', 'graph.get_outdegree_powerlaw_exponents', (['g'], {}), '(g)\n', (1534, 1537), False, 'from org.gesis.lib import graph\n'), ((1630, 1670), 'org.gesis.lib.graph.get_indegree_powerlaw_exponents', 'graph.get_indegree_powerlaw_exponents', (['g'], {}), '(g)\n', (1667, 1670), False, 'from org.gesis.lib import graph\n'), ((1769, 1804), 'org.gesis.lib.graph.get_edge_type_counts', 'graph.get_edge_type_counts', (['g', '(True)'], {}), '(g, True)\n', (1795, 1804), False, 'from org.gesis.lib import graph\n'), ((2264, 2298), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0 + steps)', 'steps'], {}), '(0.0, 1.0 + steps, steps)\n', (2273, 2298), True, 'import numpy as np\n'), ((3200, 3298), 'org.gesis.model.DPAH.DPAH', 'DPAH', ([], {'N': 'N', 'fm': 'fm', 'd': 'd', 'plo_M': 'plo_M', 'plo_m': 'plo_m', 'h_MM': 'hMM', 'h_mm': 'hmm', 'verbose': '(False)', 'seed': 'seed'}), '(N=N, fm=fm, d=d, plo_M=plo_M, plo_m=plo_m, h_MM=hMM, h_mm=hmm, verbose\n =False, seed=seed)\n', (3204, 3298), False, 'from org.gesis.model.DPAH import DPAH\n'), ((3319, 3354), 'org.gesis.lib.graph.get_edge_type_counts', 'graph.get_edge_type_counts', (['g', '(True)'], {}), '(g, True)\n', (3345, 3354), False, 'from org.gesis.lib import graph\n'), ((2474, 2496), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'njobs'}), '(n_jobs=njobs)\n', (2482, 2496), False, 'from joblib import Parallel\n'), ((2497, 2526), 'joblib.delayed', 'delayed', (['_infer_homophily_MLE'], {}), '(_infer_homophily_MLE)\n', (2504, 2526), False, 'from joblib import delayed\n'), ((2736, 2749), 'itertools.product', 'product', (['h', 'h'], {}), '(h, h)\n', (2743, 2749), False, 'from itertools import product\n')] |
import os
import numpy as np
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
class DenseCRF:
def __init__(self):
# self.gauss_sxy = 3
# self.gauss_compat = 30
# self.bilat_sxy = 10
# self.bilat_srgb = 20
# self.bilat_compat = 50
# self.n_infer = 5
self.gauss_sxy = 3
self.gauss_compat = 30
self.bilat_sxy = 10
self.bilat_srgb = 20
self.bilat_compat = 50
self.n_infer = 5
def load_config(self, path):
if os.path.exists(path):
config = np.load(path)
self.gauss_sxy, self.gauss_compat, self.bilat_sxy, self.bilat_srgb, self.bilat_compat, self.n_config = \
config[0]
else:
print('Warning: dense CRF config file ' + path + ' does not exist - using defaults')
def process(self, probs, images):
# Set up variable sizes
num_input_images = probs.shape[0]
num_classes = probs.shape[1]
size = images.shape[1:3]
crf = np.zeros((num_input_images, num_classes, size[0], size[1]))
for iter_input_image in range(num_input_images):
pass_class_inds = np.where(np.sum(np.sum(probs[iter_input_image], axis=1), axis=1) > 0)
# Set up dense CRF 2D
d = dcrf.DenseCRF2D(size[1], size[0], len(pass_class_inds[0]))
if len(pass_class_inds[0]) > 0:
cur_probs = probs[iter_input_image, pass_class_inds[0]]
# Unary energy
U = np.ascontiguousarray(unary_from_softmax(cur_probs))
d.setUnaryEnergy(U)
# Penalize small, isolated segments
# (sxy are PosXStd, PosYStd)
d.addPairwiseGaussian(sxy=self.gauss_sxy, compat=self.gauss_compat)
# Incorporate local colour-dependent features
# (sxy are Bi_X_Std and Bi_Y_Std,
# srgb are Bi_R_Std, Bi_G_Std, Bi_B_Std)
d.addPairwiseBilateral(sxy=self.bilat_sxy, srgb=self.bilat_srgb, rgbim=np.uint8(images[iter_input_image]),
compat=self.bilat_compat)
# Do inference
Q = d.inference(self.n_infer)
crf[iter_input_image, pass_class_inds] = np.array(Q).reshape((len(pass_class_inds[0]), size[0], size[1]))
maxconf_crf = np.argmax(crf, axis=1)
return maxconf_crf | [
"numpy.uint8",
"os.path.exists",
"matplotlib.use",
"numpy.argmax",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"pydensecrf.utils.unary_from_softmax",
"numpy.load"
] | [((130, 153), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (144, 153), False, 'import matplotlib\n'), ((627, 647), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (641, 647), False, 'import os\n'), ((1131, 1190), 'numpy.zeros', 'np.zeros', (['(num_input_images, num_classes, size[0], size[1])'], {}), '((num_input_images, num_classes, size[0], size[1]))\n', (1139, 1190), True, 'import numpy as np\n'), ((2472, 2494), 'numpy.argmax', 'np.argmax', (['crf'], {'axis': '(1)'}), '(crf, axis=1)\n', (2481, 2494), True, 'import numpy as np\n'), ((670, 683), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (677, 683), True, 'import numpy as np\n'), ((1645, 1674), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['cur_probs'], {}), '(cur_probs)\n', (1663, 1674), False, 'from pydensecrf.utils import unary_from_softmax\n'), ((1294, 1333), 'numpy.sum', 'np.sum', (['probs[iter_input_image]'], {'axis': '(1)'}), '(probs[iter_input_image], axis=1)\n', (1300, 1333), True, 'import numpy as np\n'), ((2150, 2184), 'numpy.uint8', 'np.uint8', (['images[iter_input_image]'], {}), '(images[iter_input_image])\n', (2158, 2184), True, 'import numpy as np\n'), ((2385, 2396), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (2393, 2396), True, 'import numpy as np\n')] |
import astropy.io.fits as fits
import numpy as np
fname_steve = '/project/projectdirs/desi/users/ameisner/CI/post_install_calibs/CI_master_dark-no_pdu_image.fits'
fname = '/project/projectdirs/desi/users/ameisner/CI/post_install_calibs/CI_master_dark.fits'
hdul_steve = fits.open(fname_steve)
assert(hdul_steve[0].data is None)
assert(len(hdul_steve) == 6)
for extname in ['CIE', 'CIN', 'CIC', 'CIS', 'CIW']:
old = fits.getdata(fname, extname=extname)
new = fits.getdata(fname_steve, extname=extname)
ndiff = np.sum(old != new)
print(len(np.ravel(old)))
print(len(np.ravel(new)))
print(np.median(old), np.median(new))
assert(ndiff == 0)
| [
"numpy.median",
"numpy.sum",
"astropy.io.fits.getdata",
"astropy.io.fits.open",
"numpy.ravel"
] | [((272, 294), 'astropy.io.fits.open', 'fits.open', (['fname_steve'], {}), '(fname_steve)\n', (281, 294), True, 'import astropy.io.fits as fits\n'), ((424, 460), 'astropy.io.fits.getdata', 'fits.getdata', (['fname'], {'extname': 'extname'}), '(fname, extname=extname)\n', (436, 460), True, 'import astropy.io.fits as fits\n'), ((471, 513), 'astropy.io.fits.getdata', 'fits.getdata', (['fname_steve'], {'extname': 'extname'}), '(fname_steve, extname=extname)\n', (483, 513), True, 'import astropy.io.fits as fits\n'), ((526, 544), 'numpy.sum', 'np.sum', (['(old != new)'], {}), '(old != new)\n', (532, 544), True, 'import numpy as np\n'), ((616, 630), 'numpy.median', 'np.median', (['old'], {}), '(old)\n', (625, 630), True, 'import numpy as np\n'), ((632, 646), 'numpy.median', 'np.median', (['new'], {}), '(new)\n', (641, 646), True, 'import numpy as np\n'), ((560, 573), 'numpy.ravel', 'np.ravel', (['old'], {}), '(old)\n', (568, 573), True, 'import numpy as np\n'), ((590, 603), 'numpy.ravel', 'np.ravel', (['new'], {}), '(new)\n', (598, 603), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 19 15:12:52 2020
@author: <NAME>
@description: Examples of the 2DES modules
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erfc
import yaml
from matplotlib import cm
from numpy import sqrt
import sys
sys.path.append(r'C:\Users\Bing\Google Drive\lime')
sys.path.append(r'/Users/bing/Google Drive/lime')
from lime.phys import dag, lorentzian, pauli, norm, ket2dm, obs, transform
from lime.style import matplot, set_style
from lime.style import linecolors as lc
from lime.units import au2ev, au2fs, au2kev, au2as, au2nm, au2wavenumber
from lime.cavity import Cavity, Polariton
from lime.optics import Pulse
#from lime.oqs import Oqs
from lime.mol import Mol
from lime.signal.sos import linear_absorption, GSB, SE, ESA, DQC_R1, DQC_R2
import lime.signal.liouville as so
N = 3 # number of states
E = [0, 1, 1.5]
ham = np.diagflat(E)
dip = np.zeros((N,N))
dip[0,1] = dip[1,0] = 1.
dip[1,2] = dip[2,1] = 1.
print('number of molecular states = {}'.format(N))
mol = Mol(ham, dip) # number of single-polariton states
pump = np.linspace(0, 2)
probe = np.linspace(0, 2)
omega_min = pump.min()
omega_max = pump.max()
gamma = np.array([0.1, ] * N)
gamma[0] = 0. # ground state has infinity lifetime
e_idx = [1]
f_idx = [2]
R1 = DQC_R1(E, dip, omega1 = pump, omega2 = 2. * probe,\
tau3=1e-6, g_idx=[0], e_idx=e_idx, f_idx = f_idx, \
gamma=gamma)
R2 = DQC_R2(E, dip, omega1 = pump, omega2 = 2. * probe,\
tau3=1e-6, g_idx=[0], e_idx=e_idx, f_idx = f_idx, \
gamma=gamma)
#np.savez('DQC12', R1=R1, R2=R2)
R1a = DQC_R1(E, dip, omega2 = 2. * pump, omega3 = probe, tau1=1e-6, \
g_idx=[0], e_idx=e_idx, f_idx = f_idx, gamma=gamma)
R2b = DQC_R2(E, dip, omega2 = 2. * pump, omega3 = probe, tau1=1e-6, \
g_idx=[0], e_idx=e_idx, f_idx = f_idx, gamma=gamma)
#np.savez('DQC23', R1=R1a, R2=R2b)
from scipy import ndimage
fig, ax = plt.subplots()
signal = np.abs(R2b)
print(len(pump), len(probe))
signal = ndimage.zoom(signal, 3)
scale = np.amax((signal))
signal /= scale
print('Signal is scaled by {}'.format(scale))
extent = [2. * omega_min, 2. * omega_max, omega_min, omega_max]
# im = ax.imshow(signal, interpolation='bilinear', cmap=cm.RdBu_r,\
# extent=extent, origin='lower', norm=norm,
# vmax=0.2, vmin=-0.5, aspect=1) #-abs(SPE).max())
# levels = p.append(np.linspace(-1, -0.1, 15), np.linspace(0.1, 1, 15))
levels = np.linspace(0.01, 1, 20)
pump = np.linspace(0, 4, 150)
probe = np.linspace(0, 2, 150)
im = ax.contour(signal.T, levels=levels, cmap=cm.Blues,\
origin='lower', extent=extent)
ax.set_xlabel(r'$\Omega_2$/eV')
ax.set_ylabel(r'$\Omega_3$/eV')
| [
"numpy.abs",
"numpy.amax",
"scipy.ndimage.zoom",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"lime.signal.sos.DQC_R1",
"lime.signal.sos.DQC_R2",
"matplotlib.pyplot.subplots",
"numpy.diagflat",
"sys.path.append",
"lime.mol.Mol"
] | [((314, 368), 'sys.path.append', 'sys.path.append', (['"""C:\\\\Users\\\\Bing\\\\Google Drive\\\\lime"""'], {}), "('C:\\\\Users\\\\Bing\\\\Google Drive\\\\lime')\n", (329, 368), False, 'import sys\n'), ((366, 414), 'sys.path.append', 'sys.path.append', (['"""/Users/bing/Google Drive/lime"""'], {}), "('/Users/bing/Google Drive/lime')\n", (381, 414), False, 'import sys\n'), ((944, 958), 'numpy.diagflat', 'np.diagflat', (['E'], {}), '(E)\n', (955, 958), True, 'import numpy as np\n'), ((965, 981), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (973, 981), True, 'import numpy as np\n'), ((1094, 1107), 'lime.mol.Mol', 'Mol', (['ham', 'dip'], {}), '(ham, dip)\n', (1097, 1107), False, 'from lime.mol import Mol\n'), ((1161, 1178), 'numpy.linspace', 'np.linspace', (['(0)', '(2)'], {}), '(0, 2)\n', (1172, 1178), True, 'import numpy as np\n'), ((1187, 1204), 'numpy.linspace', 'np.linspace', (['(0)', '(2)'], {}), '(0, 2)\n', (1198, 1204), True, 'import numpy as np\n'), ((1263, 1282), 'numpy.array', 'np.array', (['([0.1] * N)'], {}), '([0.1] * N)\n', (1271, 1282), True, 'import numpy as np\n'), ((1372, 1485), 'lime.signal.sos.DQC_R1', 'DQC_R1', (['E', 'dip'], {'omega1': 'pump', 'omega2': '(2.0 * probe)', 'tau3': '(1e-06)', 'g_idx': '[0]', 'e_idx': 'e_idx', 'f_idx': 'f_idx', 'gamma': 'gamma'}), '(E, dip, omega1=pump, omega2=2.0 * probe, tau3=1e-06, g_idx=[0],\n e_idx=e_idx, f_idx=f_idx, gamma=gamma)\n', (1378, 1485), False, 'from lime.signal.sos import linear_absorption, GSB, SE, ESA, DQC_R1, DQC_R2\n'), ((1525, 1638), 'lime.signal.sos.DQC_R2', 'DQC_R2', (['E', 'dip'], {'omega1': 'pump', 'omega2': '(2.0 * probe)', 'tau3': '(1e-06)', 'g_idx': '[0]', 'e_idx': 'e_idx', 'f_idx': 'f_idx', 'gamma': 'gamma'}), '(E, dip, omega1=pump, omega2=2.0 * probe, tau3=1e-06, g_idx=[0],\n e_idx=e_idx, f_idx=f_idx, gamma=gamma)\n', (1531, 1638), False, 'from lime.signal.sos import linear_absorption, GSB, SE, ESA, DQC_R1, DQC_R2\n'), ((1715, 1828), 'lime.signal.sos.DQC_R1', 'DQC_R1', (['E', 'dip'], {'omega2': '(2.0 * pump)', 'omega3': 'probe', 'tau1': '(1e-06)', 'g_idx': '[0]', 'e_idx': 'e_idx', 'f_idx': 'f_idx', 'gamma': 'gamma'}), '(E, dip, omega2=2.0 * pump, omega3=probe, tau1=1e-06, g_idx=[0],\n e_idx=e_idx, f_idx=f_idx, gamma=gamma)\n', (1721, 1828), False, 'from lime.signal.sos import linear_absorption, GSB, SE, ESA, DQC_R1, DQC_R2\n'), ((1859, 1972), 'lime.signal.sos.DQC_R2', 'DQC_R2', (['E', 'dip'], {'omega2': '(2.0 * pump)', 'omega3': 'probe', 'tau1': '(1e-06)', 'g_idx': '[0]', 'e_idx': 'e_idx', 'f_idx': 'f_idx', 'gamma': 'gamma'}), '(E, dip, omega2=2.0 * pump, omega3=probe, tau1=1e-06, g_idx=[0],\n e_idx=e_idx, f_idx=f_idx, gamma=gamma)\n', (1865, 1972), False, 'from lime.signal.sos import linear_absorption, GSB, SE, ESA, DQC_R1, DQC_R2\n'), ((2070, 2084), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2082, 2084), True, 'import matplotlib.pyplot as plt\n'), ((2095, 2106), 'numpy.abs', 'np.abs', (['R2b'], {}), '(R2b)\n', (2101, 2106), True, 'import numpy as np\n'), ((2147, 2170), 'scipy.ndimage.zoom', 'ndimage.zoom', (['signal', '(3)'], {}), '(signal, 3)\n', (2159, 2170), False, 'from scipy import ndimage\n'), ((2180, 2195), 'numpy.amax', 'np.amax', (['signal'], {}), '(signal)\n', (2187, 2195), True, 'import numpy as np\n'), ((2596, 2620), 'numpy.linspace', 'np.linspace', (['(0.01)', '(1)', '(20)'], {}), '(0.01, 1, 20)\n', (2607, 2620), True, 'import numpy as np\n'), ((2629, 2651), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(150)'], {}), '(0, 4, 150)\n', (2640, 2651), True, 'import numpy as np\n'), ((2660, 2682), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(150)'], {}), '(0, 2, 150)\n', (2671, 2682), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""A python script to perform watermark embedding/detection
on the basis of singular value decomposition (SVD) and cepstrum method."""
# Copyright (C) 2020 by <NAME>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from acoustics.cepstrum import inverse_complex_cepstrum
from scipy.io import wavfile
import numpy as np
import pickle
HOST_SIGNAL_FILE = "host.wav" # 透かし埋め込み先のファイル
WATERMARK_SIGNAL_FILE = "wmed_signal.wav" # 透かしを埋め込んだファイル
WATERMARK_U_FILE = 'svd_left.dat' # 特異値分解の左側の行列
WATERMARK_D_FILE = 'svd_center.dat' # 特異値分解の真ん中の行列
WATERMARK_V_FILE = 'svd_right.dat' # 特異値分解の右側の行列
WATERMARK_ORIGINAL_FILE = 'watermark_ori.dat' # オリジナルの透かし信号
REP_CODE = True # 繰り返し埋め込みを使う
FRAME_LENGTH = 2048 # フレーム長
FFT_LENGTH = FRAME_LENGTH
HOP_LENGTH = 80
CONTROL_STRENGTH = 0.001 # 埋め込み強度
NUM_REPS = 3 # 埋め込みの繰り返し数
THRESHOLD = 0.0
LOG_FLOOR = 0.00001 # 対数のフロア値
def complex_cepstrum(x, n=None):
"""
Compute the complex cepstrum of a real sequence.
borrowed from http://python-acoustics.github.io/python-acoustics
"""
def _unwrap(phase):
samples = phase.shape[-1]
unwrapped = np.unwrap(phase)
center = (samples + 1) // 2
if samples == 1:
center = 0
ndelay = np.array(np.round(unwrapped[..., center] / np.pi))
unwrapped -= np.pi * ndelay[..., None] * np.arange(samples) / center
return unwrapped, ndelay
spectrum = np.fft.fft(x, n=n)
unwrapped_phase, ndelay = _unwrap(np.angle(spectrum))
log_spectrum = np.log(np.abs(spectrum) + LOG_FLOOR) + 1j * unwrapped_phase
ceps = np.fft.ifft(log_spectrum).real
return ceps, ndelay
def fix(xs):
"""
A emuration of MATLAB 'fix' function.
borrowed from https://ideone.com/YjJwOh
"""
# res = [np.floor(e) if e >= 0 else np.ceil(e) for e in xs]
if xs >= 0:
res = np.floor(xs)
else:
res = np.ceil(xs)
return res
def embed():
"""
perform embedding.
"""
sr, host_signal = wavfile.read(HOST_SIGNAL_FILE)
host_signal = host_signal.astype(np.float64)
signal_len = len(host_signal)
# フレームの移動量
frame_shift = HOP_LENGTH
# 隣接フレームとのオーバーラップ長
overlap_length = FRAME_LENGTH - HOP_LENGTH
# 埋め込みの総ビット数
n_frames = int(fix((signal_len - overlap_length) / frame_shift))
# 複素ケプストラムを抽出
pointer = 0
ceps_mat = np.zeros((n_frames, FRAME_LENGTH))
ndelay_vec = np.zeros(n_frames)
for i in range(n_frames):
frame = host_signal[pointer: (pointer + FRAME_LENGTH)]
# 複素ケプストラム
real_ceps, ndelay = complex_cepstrum(frame)
ceps_mat[i, :] = real_ceps
ndelay_vec[i] = ndelay
pointer = pointer + frame_shift
# ケプストラム行列を特異値分解
U, D, V = np.linalg.svd(ceps_mat, full_matrices=False)
off_diag_index = np.where(~np.eye(np.diag(D).shape[0], dtype=bool))
embed_nbit = len(off_diag_index[0])
if REP_CODE:
# 実効的な埋め込み可能ビット数
effective_nbit = int(np.floor(embed_nbit / NUM_REPS))
embed_nbit = int(effective_nbit * NUM_REPS)
else:
effective_nbit = embed_nbit
# オリジナルの透かし信号を作成(0と1のビット列)
wmark_original = np.random.randint(2, size=effective_nbit)
# オリジナルの透かし信号を保存
with open(WATERMARK_ORIGINAL_FILE, 'wb') as f:
pickle.dump(wmark_original, f)
wmark_original = 2 * wmark_original - 1 # 1と-1に変換する
# 透かし信号を拡張する
if REP_CODE:
wmark_extended = np.repeat(wmark_original, NUM_REPS)
else:
wmark_extended = wmark_original
# 埋め込み強度
alpha = CONTROL_STRENGTH
# 透かしの埋め込み
wmed_D = np.diag(D)
row_index = off_diag_index[0]
col_index = off_diag_index[1]
for i in range(embed_nbit):
wmed_D[row_index[i], col_index[i]] = alpha * wmark_extended[i]
# 埋め込んだ行列を特異値分解
Uw, Dw, Vw = np.linalg.svd(wmed_D, full_matrices=False)
# 行列を保存する
with open(WATERMARK_U_FILE, 'wb') as f:
pickle.dump(Uw, f)
with open(WATERMARK_D_FILE, 'wb') as f:
pickle.dump(D, f)
with open(WATERMARK_V_FILE, 'wb') as f:
pickle.dump(Vw, f)
# 再構築
wmed_ceps = U @ np.diag(Dw) @ V
# 透かし入りケプストラムを音声に戻す
wmed_signal = np.zeros((frame_shift * n_frames)) # watermarked signal
for i in range(n_frames):
# 逆変換
wmarked_frame = inverse_complex_cepstrum(wmed_ceps[i, :], ndelay_vec[i])
wmed_signal[frame_shift * i:
frame_shift * (i + 1)] = wmarked_frame[0:frame_shift]
# ホスト信号の残りと結合
wmed_signal = np.concatenate(
(wmed_signal, host_signal[len(wmed_signal): signal_len]))
# 透かしが埋め込まれた信号をwavとして保存
wmed_signal = wmed_signal.astype(np.int16) # convert float into integer
wavfile.write(WATERMARK_SIGNAL_FILE, sr, wmed_signal)
def detect():
"""
perform detecton.
"""
# ホスト信号のロード
sr, host_signal = wavfile.read(HOST_SIGNAL_FILE)
# 埋め込み済みの音声ファイルを開く
_, eval_signal = wavfile.read(WATERMARK_SIGNAL_FILE)
eval_signal = eval_signal.astype(np.float64)
signal_len = len(eval_signal)
# フレームの移動量
frame_shift = HOP_LENGTH
# 隣接フレームとのオーバーラップ長
overlap_length = FRAME_LENGTH - HOP_LENGTH
# 埋め込みの総ビット数
n_frames = int(fix((signal_len - overlap_length) / frame_shift))
# 透かしの埋め込みに用いた行列をロードする
with open(WATERMARK_U_FILE, 'rb') as f:
Uw = pickle.load(f)
with open(WATERMARK_D_FILE, 'rb') as f:
D = pickle.load(f)
with open(WATERMARK_V_FILE, 'rb') as f:
Vw = pickle.load(f)
# 複素ケプストラムを抽出
pointer = 0
wmed_ceps_mat = np.zeros((n_frames, FRAME_LENGTH))
ndelay_vec = np.zeros(n_frames)
for i in range(n_frames):
frame = eval_signal[pointer: (pointer + FRAME_LENGTH)]
# 複素ケプストラム
real_ceps, ndelay = complex_cepstrum(frame)
wmed_ceps_mat[i, :] = real_ceps
ndelay_vec[i] = ndelay
pointer = pointer + frame_shift
# オリジナルの透かし信号をロード
with open(WATERMARK_ORIGINAL_FILE, 'rb') as f:
wmark_original = pickle.load(f)
# ケプストラム行列を特異値分解
U, Dw, V = np.linalg.svd(wmed_ceps_mat, full_matrices=False)
off_diag_index = np.where(~np.eye(np.diag(D).shape[0], dtype=bool))
embed_nbit = len(off_diag_index[0])
if REP_CODE:
# 実効的な埋め込み可能ビット数
effective_nbit = int(np.floor(embed_nbit / NUM_REPS))
embed_nbit = int(effective_nbit * NUM_REPS)
else:
effective_nbit = embed_nbit
# 透かし入りの行列を再構築
wmed_D = Uw @ np.diag(Dw) @ Vw
# 埋め込み強度
alpha = CONTROL_STRENGTH
# 対角成分を除去し、「拡張透かし」を復元
W = (wmed_D - np.diag(D))
W = (1.0 / alpha) * W
# ビットに変換
detected_bit = np.zeros((embed_nbit))
row_index = off_diag_index[0]
col_index = off_diag_index[1]
for i in range(embed_nbit):
if W[row_index[i], col_index[i]] > THRESHOLD:
detected_bit[i] = 1
# 「透かし」を復元
wmark_recovered = np.zeros((effective_nbit))
count = 0
for i in range(effective_nbit):
# ビットを集計(平均値)
ave = np.sum(detected_bit[count:count + NUM_REPS]) / NUM_REPS
if ave >= 0.5: # 過半数
wmark_recovered[i] = 1
else:
wmark_recovered[i] = 0
count = count + NUM_REPS
# ビット誤り率を表示
denom = np.int(np.sum(np.abs(wmark_recovered - wmark_original)))
BER = np.sum(np.abs(wmark_recovered - wmark_original)) / \
(effective_nbit) * 100
print(f'bit error rate = {BER:.2f}% ({denom} / {effective_nbit})')
# SNRを表示
SNR = 10 * np.log10(
np.sum(np.square(host_signal.astype(np.float32)))
/ np.sum(np.square(host_signal.astype(np.float32)
- eval_signal.astype(np.float32))))
print(f'SNR = {SNR}dB')
# bpsを表示
print('BPS = {:.2f} bps'.format(
effective_nbit * 2 / (len(host_signal) / sr)))
def main():
"""Main routine. """
embed()
detect()
if __name__ in '__main__':
main()
| [
"numpy.unwrap",
"numpy.arange",
"acoustics.cepstrum.inverse_complex_cepstrum",
"numpy.repeat",
"numpy.fft.fft",
"numpy.round",
"numpy.abs",
"numpy.ceil",
"numpy.floor",
"pickle.load",
"scipy.io.wavfile.read",
"numpy.linalg.svd",
"numpy.fft.ifft",
"pickle.dump",
"numpy.diag",
"numpy.ang... | [((2147, 2165), 'numpy.fft.fft', 'np.fft.fft', (['x'], {'n': 'n'}), '(x, n=n)\n', (2157, 2165), True, 'import numpy as np\n'), ((2723, 2753), 'scipy.io.wavfile.read', 'wavfile.read', (['HOST_SIGNAL_FILE'], {}), '(HOST_SIGNAL_FILE)\n', (2735, 2753), False, 'from scipy.io import wavfile\n'), ((3090, 3124), 'numpy.zeros', 'np.zeros', (['(n_frames, FRAME_LENGTH)'], {}), '((n_frames, FRAME_LENGTH))\n', (3098, 3124), True, 'import numpy as np\n'), ((3142, 3160), 'numpy.zeros', 'np.zeros', (['n_frames'], {}), '(n_frames)\n', (3150, 3160), True, 'import numpy as np\n'), ((3469, 3513), 'numpy.linalg.svd', 'np.linalg.svd', (['ceps_mat'], {'full_matrices': '(False)'}), '(ceps_mat, full_matrices=False)\n', (3482, 3513), True, 'import numpy as np\n'), ((3883, 3924), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'effective_nbit'}), '(2, size=effective_nbit)\n', (3900, 3924), True, 'import numpy as np\n'), ((4313, 4323), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (4320, 4323), True, 'import numpy as np\n'), ((4533, 4575), 'numpy.linalg.svd', 'np.linalg.svd', (['wmed_D'], {'full_matrices': '(False)'}), '(wmed_D, full_matrices=False)\n', (4546, 4575), True, 'import numpy as np\n'), ((4893, 4925), 'numpy.zeros', 'np.zeros', (['(frame_shift * n_frames)'], {}), '(frame_shift * n_frames)\n', (4901, 4925), True, 'import numpy as np\n'), ((5416, 5469), 'scipy.io.wavfile.write', 'wavfile.write', (['WATERMARK_SIGNAL_FILE', 'sr', 'wmed_signal'], {}), '(WATERMARK_SIGNAL_FILE, sr, wmed_signal)\n', (5429, 5469), False, 'from scipy.io import wavfile\n'), ((5563, 5593), 'scipy.io.wavfile.read', 'wavfile.read', (['HOST_SIGNAL_FILE'], {}), '(HOST_SIGNAL_FILE)\n', (5575, 5593), False, 'from scipy.io import wavfile\n'), ((5639, 5674), 'scipy.io.wavfile.read', 'wavfile.read', (['WATERMARK_SIGNAL_FILE'], {}), '(WATERMARK_SIGNAL_FILE)\n', (5651, 5674), False, 'from scipy.io import wavfile\n'), ((6259, 6293), 'numpy.zeros', 'np.zeros', (['(n_frames, FRAME_LENGTH)'], {}), '((n_frames, FRAME_LENGTH))\n', (6267, 6293), True, 'import numpy as np\n'), ((6311, 6329), 'numpy.zeros', 'np.zeros', (['n_frames'], {}), '(n_frames)\n', (6319, 6329), True, 'import numpy as np\n'), ((6758, 6807), 'numpy.linalg.svd', 'np.linalg.svd', (['wmed_ceps_mat'], {'full_matrices': '(False)'}), '(wmed_ceps_mat, full_matrices=False)\n', (6771, 6807), True, 'import numpy as np\n'), ((7338, 7358), 'numpy.zeros', 'np.zeros', (['embed_nbit'], {}), '(embed_nbit)\n', (7346, 7358), True, 'import numpy as np\n'), ((7585, 7609), 'numpy.zeros', 'np.zeros', (['effective_nbit'], {}), '(effective_nbit)\n', (7593, 7609), True, 'import numpy as np\n'), ((1852, 1868), 'numpy.unwrap', 'np.unwrap', (['phase'], {}), '(phase)\n', (1861, 1868), True, 'import numpy as np\n'), ((2204, 2222), 'numpy.angle', 'np.angle', (['spectrum'], {}), '(spectrum)\n', (2212, 2222), True, 'import numpy as np\n'), ((2314, 2339), 'numpy.fft.ifft', 'np.fft.ifft', (['log_spectrum'], {}), '(log_spectrum)\n', (2325, 2339), True, 'import numpy as np\n'), ((2582, 2594), 'numpy.floor', 'np.floor', (['xs'], {}), '(xs)\n', (2590, 2594), True, 'import numpy as np\n'), ((2619, 2630), 'numpy.ceil', 'np.ceil', (['xs'], {}), '(xs)\n', (2626, 2630), True, 'import numpy as np\n'), ((4006, 4036), 'pickle.dump', 'pickle.dump', (['wmark_original', 'f'], {}), '(wmark_original, f)\n', (4017, 4036), False, 'import pickle\n'), ((4155, 4190), 'numpy.repeat', 'np.repeat', (['wmark_original', 'NUM_REPS'], {}), '(wmark_original, NUM_REPS)\n', (4164, 4190), True, 'import numpy as np\n'), ((4643, 4661), 'pickle.dump', 'pickle.dump', (['Uw', 'f'], {}), '(Uw, f)\n', (4654, 4661), False, 'import pickle\n'), ((4714, 4731), 'pickle.dump', 'pickle.dump', (['D', 'f'], {}), '(D, f)\n', (4725, 4731), False, 'import pickle\n'), ((4784, 4802), 'pickle.dump', 'pickle.dump', (['Vw', 'f'], {}), '(Vw, f)\n', (4795, 4802), False, 'import pickle\n'), ((5018, 5074), 'acoustics.cepstrum.inverse_complex_cepstrum', 'inverse_complex_cepstrum', (['wmed_ceps[i, :]', 'ndelay_vec[i]'], {}), '(wmed_ceps[i, :], ndelay_vec[i])\n', (5042, 5074), False, 'from acoustics.cepstrum import inverse_complex_cepstrum\n'), ((6046, 6060), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6057, 6060), False, 'import pickle\n'), ((6117, 6131), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6128, 6131), False, 'import pickle\n'), ((6189, 6203), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6200, 6203), False, 'import pickle\n'), ((6706, 6720), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6717, 6720), False, 'import pickle\n'), ((7267, 7277), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (7274, 7277), True, 'import numpy as np\n'), ((1979, 2019), 'numpy.round', 'np.round', (['(unwrapped[..., center] / np.pi)'], {}), '(unwrapped[..., center] / np.pi)\n', (1987, 2019), True, 'import numpy as np\n'), ((3698, 3729), 'numpy.floor', 'np.floor', (['(embed_nbit / NUM_REPS)'], {}), '(embed_nbit / NUM_REPS)\n', (3706, 3729), True, 'import numpy as np\n'), ((4834, 4845), 'numpy.diag', 'np.diag', (['Dw'], {}), '(Dw)\n', (4841, 4845), True, 'import numpy as np\n'), ((6992, 7023), 'numpy.floor', 'np.floor', (['(embed_nbit / NUM_REPS)'], {}), '(embed_nbit / NUM_REPS)\n', (7000, 7023), True, 'import numpy as np\n'), ((7162, 7173), 'numpy.diag', 'np.diag', (['Dw'], {}), '(Dw)\n', (7169, 7173), True, 'import numpy as np\n'), ((7698, 7742), 'numpy.sum', 'np.sum', (['detected_bit[count:count + NUM_REPS]'], {}), '(detected_bit[count:count + NUM_REPS])\n', (7704, 7742), True, 'import numpy as np\n'), ((7950, 7990), 'numpy.abs', 'np.abs', (['(wmark_recovered - wmark_original)'], {}), '(wmark_recovered - wmark_original)\n', (7956, 7990), True, 'import numpy as np\n'), ((2070, 2088), 'numpy.arange', 'np.arange', (['samples'], {}), '(samples)\n', (2079, 2088), True, 'import numpy as np\n'), ((2250, 2266), 'numpy.abs', 'np.abs', (['spectrum'], {}), '(spectrum)\n', (2256, 2266), True, 'import numpy as np\n'), ((8010, 8050), 'numpy.abs', 'np.abs', (['(wmark_recovered - wmark_original)'], {}), '(wmark_recovered - wmark_original)\n', (8016, 8050), True, 'import numpy as np\n'), ((3553, 3563), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (3560, 3563), True, 'import numpy as np\n'), ((6847, 6857), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (6854, 6857), True, 'import numpy as np\n')] |
### This is test App: manu-hello-world-app##
## ##
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
import time
from math import ceil
# Keras
from keras.models import load_model
from imageio import imread
from keras.preprocessing import image
from matplotlib import pyplot as plt
# Keras import from local function
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_L2Normalization import L2Normalization
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
###################
###################
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# print('Setting parameters and loading our trained Model... ')
img_height = 300 # Height of the model input images
img_width = 300 # Width of the model input images
img_channels = 3 # Number of color channels of the model input images
mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
n_classes = 1 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets
scales = scales_pascal
aspect_ratios = [[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters
two_boxes_for_ar1 = True
steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.
offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation
normalize_coords = True
__file__ = 'uploads'
def save_prediction(img_path, pred_decoded):
orig_image = imread(img_path)
colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist()
classes = ['background','pool']
plt.imshow(orig_image)
current_axis = plt.gca()
plt.axis('off')
plt.savefig('static/preds/predicted_img.jpg', bbox_inches='tight', pad_inches = 0)
plt.clf()
plt.cla()
plt.close()
#################
#################
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['image']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
result = str('No Pool detected in the image.')
# Call fucntion to save the predicted image with boundary boxes drawn
try: os.remove('static/preds/predicted_img.jpg')
except: None
save_prediction(file_path, None)
# Delete the uploaded image from storage
os.remove(file_path)
return result
return None
'''
@app.route("/")
def hello():
return "Hello World!"
'''
'''
if __name__ == '__main__':
# Serve the app with gevent
# http_server = WSGIServer(('0.0.0.0', 5000), app)
http_server = WSGIServer(app)
http_server.serve_forever()
''' | [
"matplotlib.pyplot.imshow",
"flask.render_template",
"matplotlib.pyplot.savefig",
"flask.Flask",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"os.path.dirname",
"numpy.linspace",
"werkzeug.utils.secure_filename",
"imageio.imread",
"matplotlib.pyplot.axis",
"ma... | [((3346, 3361), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (3351, 3361), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((2960, 2976), 'imageio.imread', 'imread', (['img_path'], {}), '(img_path)\n', (2966, 2976), False, 'from imageio import imread\n'), ((3087, 3109), 'matplotlib.pyplot.imshow', 'plt.imshow', (['orig_image'], {}), '(orig_image)\n', (3097, 3109), True, 'from matplotlib import pyplot as plt\n'), ((3129, 3138), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3136, 3138), True, 'from matplotlib import pyplot as plt\n'), ((3143, 3158), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3151, 3158), True, 'from matplotlib import pyplot as plt\n'), ((3163, 3248), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""static/preds/predicted_img.jpg"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "('static/preds/predicted_img.jpg', bbox_inches='tight', pad_inches=0\n )\n", (3174, 3248), True, 'from matplotlib import pyplot as plt\n'), ((3250, 3259), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3257, 3259), True, 'from matplotlib import pyplot as plt\n'), ((3264, 3273), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3271, 3273), True, 'from matplotlib import pyplot as plt\n'), ((3278, 3289), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3287, 3289), True, 'from matplotlib import pyplot as plt\n'), ((3480, 3509), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (3495, 3509), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((3743, 3768), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3758, 3768), False, 'import os\n'), ((4228, 4248), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (4237, 4248), False, 'import os\n'), ((3836, 3863), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (3851, 3863), False, 'from werkzeug.utils import secure_filename\n'), ((4056, 4099), 'os.remove', 'os.remove', (['"""static/preds/predicted_img.jpg"""'], {}), "('static/preds/predicted_img.jpg')\n", (4065, 4099), False, 'import os\n'), ((3006, 3038), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(n_classes + 1)'], {}), '(0, 1, n_classes + 1)\n', (3017, 3038), True, 'import numpy as np\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tensor formatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class RichTextLinesTest(test_util.TensorFlowTestCase):
def setUp(self):
np.set_printoptions(
precision=8, threshold=1000, edgeitems=3, linewidth=75)
def _checkTensorMetadata(self, tensor, annotations):
self.assertEqual(
{"dtype": tensor.dtype, "shape": tensor.shape},
annotations["tensor_metadata"])
def _checkBeginIndices(self, expected_indices, annot):
self.assertEqual({tensor_format.BEGIN_INDICES_KEY: expected_indices},
annot)
def _checkOmittedIndices(self, expected_indices, annot):
self.assertEqual({tensor_format.OMITTED_INDICES_KEY: expected_indices},
annot)
def testFormatTensor1DNoEllipsis(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
self.assertEqual([
"Tensor \"a\":",
"",
"array([ 0., 0., 0., 0., 0., 0.,",
" 0., 0., 0., 0., 0., 0.,",
" 0., 0., 0., 0., 0., 0.,",
" 0., 0.])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
self._checkBeginIndices([0], out.annotations[2])
self._checkBeginIndices([6], out.annotations[3])
self._checkBeginIndices([12], out.annotations[4])
self._checkBeginIndices([18], out.annotations[5])
def testFormatTensor2DNoEllipsisNoRowBreak(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a")
self.assertEqual([
"Tensor \"a\":",
"",
"array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
for i in xrange(2, 6):
self._checkBeginIndices([i - 2, 0], out.annotations[i])
def testFormatTensorSuppressingTensorName(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, None)
self.assertEqual([
"array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
for i in xrange(4):
self._checkBeginIndices([i, 0], out.annotations[i])
def testFormatTensorWithMetadata(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a", include_metadata=True)
self.assertEqual([
"Tensor \"a\":",
" dtype: float64",
" shape: (4, 4)",
"",
"array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
for i in xrange(4, 7):
self._checkBeginIndices([i - 4, 0], out.annotations[i])
def testFormatTensor2DNoEllipsisWithRowBreak(self):
a = np.linspace(0.0, 1.0 - 1.0 / 40.0, 40).reshape([2, 20])
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 50})
self.assertEqual(
{"dtype": a.dtype, "shape": a.shape},
out.annotations["tensor_metadata"])
self.assertEqual([
"Tensor \"a\":",
"",
"array([[ 0. , 0.025, 0.05 , 0.075, 0.1 ,",
" 0.125, 0.15 , 0.175, 0.2 , 0.225,",
" 0.25 , 0.275, 0.3 , 0.325, 0.35 ,",
" 0.375, 0.4 , 0.425, 0.45 , 0.475],",
" [ 0.5 , 0.525, 0.55 , 0.575, 0.6 ,",
" 0.625, 0.65 , 0.675, 0.7 , 0.725,",
" 0.75 , 0.775, 0.8 , 0.825, 0.85 ,",
" 0.875, 0.9 , 0.925, 0.95 , 0.975]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for the beginning indices of the lines.
self._checkBeginIndices([0, 0], out.annotations[2])
self._checkBeginIndices([0, 5], out.annotations[3])
self._checkBeginIndices([0, 10], out.annotations[4])
self._checkBeginIndices([0, 15], out.annotations[5])
self._checkBeginIndices([1, 0], out.annotations[6])
self._checkBeginIndices([1, 5], out.annotations[7])
self._checkBeginIndices([1, 10], out.annotations[8])
self._checkBeginIndices([1, 15], out.annotations[9])
def testFormatTensor3DNoEllipsis(self): # TODO(cais): Test name.
a = np.linspace(0.0, 1.0 - 1.0 / 24.0, 24).reshape([2, 3, 4])
out = tensor_format.format_tensor(a, "a")
self.assertEqual([
"Tensor \"a\":",
"",
"array([[[ 0. , 0.04166667, 0.08333333, 0.125 ],",
" [ 0.16666667, 0.20833333, 0.25 , 0.29166667],",
" [ 0.33333333, 0.375 , 0.41666667, 0.45833333]],",
"",
" [[ 0.5 , 0.54166667, 0.58333333, 0.625 ],",
" [ 0.66666667, 0.70833333, 0.75 , 0.79166667],",
" [ 0.83333333, 0.875 , 0.91666667, 0.95833333]]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
self._checkBeginIndices([0, 0, 0], out.annotations[2])
self._checkBeginIndices([0, 1, 0], out.annotations[3])
self._checkBeginIndices([0, 2, 0], out.annotations[4])
self.assertNotIn(5, out.annotations)
self._checkBeginIndices([1, 0, 0], out.annotations[6])
self._checkBeginIndices([1, 1, 0], out.annotations[7])
self._checkBeginIndices([1, 2, 0], out.annotations[8])
def testFormatTensorWithEllipses(self):
a = np.zeros([11, 11, 11])
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100, "edgeitems": 2})
self.assertEqual([
"Tensor \"a\":",
"",
"array([[[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" ..., ",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]]])",
], out.lines)
self._checkTensorMetadata(a, out.annotations)
# Check annotations for beginning indices of the lines.
for i in xrange(2):
self._checkBeginIndices([i, 0, 0], out.annotations[i * 6 + 2])
self._checkBeginIndices([i, 1, 0], out.annotations[i * 6 + 3])
self._checkOmittedIndices([i, 2, 0], out.annotations[i * 6 + 4])
self._checkBeginIndices([i, 9, 0], out.annotations[i * 6 + 5])
self._checkBeginIndices([i, 10, 0], out.annotations[i * 6 + 6])
self.assertNotIn(i * 6 + 7, out.annotations)
p = 15
for i in xrange(2):
self._checkBeginIndices([9 + i, 0, 0], out.annotations[p + i * 6])
self._checkBeginIndices([9 + i, 1, 0], out.annotations[p + i * 6 + 1])
self._checkOmittedIndices(
[9 + i, 2, 0], out.annotations[p + i * 6 + 2])
self._checkBeginIndices([9 + i, 9, 0], out.annotations[p + i * 6 + 3])
self._checkBeginIndices([9 + i, 10, 0], out.annotations[p + i * 6 + 4])
if i < 1:
self.assertNotIn(p + i * 6 + 5, out.annotations)
def testFormatNone(self):
out = tensor_format.format_tensor(None, "a")
self.assertEqual(
["Tensor \"a\":", "", "None"], out.lines)
def testLocateTensorElement1DNoEllipsis(self):
a = np.zeros(20)
out = tensor_format.format_tensor(
a, "a", np_printoptions={"linewidth": 40})
self.assertEqual([
"Tensor \"a\":",
"",
"array([ 0., 0., 0., 0., 0., 0.,",
" 0., 0., 0., 0., 0., 0.,",
" 0., 0., 0., 0., 0., 0.,",
" 0., 0.])",
], out.lines)
is_omitted, row = tensor_format.locate_tensor_element(out, [0])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [5])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [6])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [11])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [12])
self.assertFalse(is_omitted)
self.assertEqual(4, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [18])
self.assertFalse(is_omitted)
self.assertEqual(5, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [19])
self.assertFalse(is_omitted)
self.assertEqual(5, row)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [20])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0, 0])
def testLocateTensorElement2DNoEllipsis(self):
a = np.linspace(0.0, 1.0 - 1.0 / 16.0, 16).reshape([4, 4])
out = tensor_format.format_tensor(a, "a")
self.assertEqual([
"Tensor \"a\":",
"",
"array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])",
], out.lines)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 0])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 3])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [1, 0])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [1, 3])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [3, 3])
self.assertFalse(is_omitted)
self.assertEqual(5, row)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [1, 4])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 2])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [0])
def testLocateTensorElement3DWithEllipses(self):
a = np.zeros([11, 11, 11])
out = tensor_format.format_tensor(
a, "a", False, np_printoptions={"threshold": 100, "edgeitems": 2})
self.assertEqual([
"Tensor \"a\":",
"",
"array([[[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" ..., ",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]],",
"",
" [[ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.],",
" ..., ",
" [ 0., 0., ..., 0., 0.],",
" [ 0., 0., ..., 0., 0.]]])",
], out.lines)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 0, 0])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 0, 10])
self.assertFalse(is_omitted)
self.assertEqual(2, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 1, 0])
self.assertFalse(is_omitted)
self.assertEqual(3, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 2, 0])
self.assertTrue(is_omitted) # In omitted line.
self.assertEqual(4, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 2, 10])
self.assertTrue(is_omitted) # In omitted line.
self.assertEqual(4, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 8, 10])
self.assertTrue(is_omitted) # In omitted line.
self.assertEqual(4, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [0, 10, 1])
self.assertFalse(is_omitted)
self.assertEqual(6, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [5, 1, 1])
self.assertTrue(is_omitted) # In omitted line.
self.assertEqual(14, row)
is_omitted, row = tensor_format.locate_tensor_element(out, [10, 10, 10])
self.assertFalse(is_omitted)
self.assertEqual(25, row)
with self.assertRaisesRegexp(
ValueError, "Indices exceed tensor dimensions"):
tensor_format.locate_tensor_element(out, [11, 5, 5])
with self.assertRaisesRegexp(
ValueError, "Indices contain negative"):
tensor_format.locate_tensor_element(out, [-1, 5, 5])
with self.assertRaisesRegexp(
ValueError, "Dimensions mismatch"):
tensor_format.locate_tensor_element(out, [5, 5])
def testLocateTensorElementAnnotationsUnavailable(self):
out = tensor_format.format_tensor(None, "a")
self.assertEqual(
["Tensor \"a\":", "", "None"], out.lines)
with self.assertRaisesRegexp(
AttributeError, "tensor_metadata is not available in annotations"):
tensor_format.locate_tensor_element(out, [0])
if __name__ == "__main__":
googletest.main()
| [
"tensorflow.python.debug.cli.tensor_format.format_tensor",
"numpy.zeros",
"tensorflow.python.platform.googletest.main",
"six.moves.xrange",
"tensorflow.python.debug.cli.tensor_format.locate_tensor_element",
"numpy.linspace",
"numpy.set_printoptions"
] | [((16146, 16163), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (16161, 16163), False, 'from tensorflow.python.platform import googletest\n'), ((1159, 1234), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(8)', 'threshold': '(1000)', 'edgeitems': '(3)', 'linewidth': '(75)'}), '(precision=8, threshold=1000, edgeitems=3, linewidth=75)\n', (1178, 1234), True, 'import numpy as np\n'), ((1793, 1805), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (1801, 1805), True, 'import numpy as np\n'), ((1817, 1887), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""'], {'np_printoptions': "{'linewidth': 40}"}), "(a, 'a', np_printoptions={'linewidth': 40})\n", (1844, 1887), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((2602, 2637), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""'], {}), "(a, 'a')\n", (2629, 2637), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((3071, 3083), 'six.moves.xrange', 'xrange', (['(2)', '(6)'], {}), '(2, 6)\n', (3077, 3083), False, 'from six.moves import xrange\n'), ((3274, 3310), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', 'None'], {}), '(a, None)\n', (3301, 3310), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((3707, 3716), 'six.moves.xrange', 'xrange', (['(4)'], {}), '(4)\n', (3713, 3716), False, 'from six.moves import xrange\n'), ((3893, 3951), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""'], {'include_metadata': '(True)'}), "(a, 'a', include_metadata=True)\n", (3920, 3951), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((4440, 4452), 'six.moves.xrange', 'xrange', (['(4)', '(7)'], {}), '(4, 7)\n', (4446, 4452), False, 'from six.moves import xrange\n'), ((4647, 4717), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""'], {'np_printoptions': "{'linewidth': 50}"}), "(a, 'a', np_printoptions={'linewidth': 50})\n", (4674, 4717), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((6108, 6143), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""'], {}), "(a, 'a')\n", (6135, 6143), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((7234, 7256), 'numpy.zeros', 'np.zeros', (['[11, 11, 11]'], {}), '([11, 11, 11])\n', (7242, 7256), True, 'import numpy as np\n'), ((7268, 7367), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""', '(False)'], {'np_printoptions': "{'threshold': 100, 'edgeitems': 2}"}), "(a, 'a', False, np_printoptions={'threshold': \n 100, 'edgeitems': 2})\n", (7295, 7367), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((8477, 8486), 'six.moves.xrange', 'xrange', (['(2)'], {}), '(2)\n', (8483, 8486), False, 'from six.moves import xrange\n'), ((8912, 8921), 'six.moves.xrange', 'xrange', (['(2)'], {}), '(2)\n', (8918, 8921), False, 'from six.moves import xrange\n'), ((9431, 9469), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['None', '"""a"""'], {}), "(None, 'a')\n", (9458, 9469), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((9601, 9613), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (9609, 9613), True, 'import numpy as np\n'), ((9625, 9695), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""'], {'np_printoptions': "{'linewidth': 40}"}), "(a, 'a', np_printoptions={'linewidth': 40})\n", (9652, 9695), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((9980, 10025), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0]'], {}), '(out, [0])\n', (10015, 10025), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((10111, 10156), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[5]'], {}), '(out, [5])\n', (10146, 10156), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((10242, 10287), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[6]'], {}), '(out, [6])\n', (10277, 10287), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((10373, 10419), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[11]'], {}), '(out, [11])\n', (10408, 10419), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((10505, 10551), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[12]'], {}), '(out, [12])\n', (10540, 10551), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((10637, 10683), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[18]'], {}), '(out, [18])\n', (10672, 10683), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((10769, 10815), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[19]'], {}), '(out, [19])\n', (10804, 10815), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((11418, 11453), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""'], {}), "(a, 'a')\n", (11445, 11453), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((11781, 11829), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 0]'], {}), '(out, [0, 0])\n', (11816, 11829), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((11915, 11963), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 3]'], {}), '(out, [0, 3])\n', (11950, 11963), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((12049, 12097), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[1, 0]'], {}), '(out, [1, 0])\n', (12084, 12097), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((12183, 12231), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[1, 3]'], {}), '(out, [1, 3])\n', (12218, 12231), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((12317, 12365), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[3, 3]'], {}), '(out, [3, 3])\n', (12352, 12365), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((12906, 12928), 'numpy.zeros', 'np.zeros', (['[11, 11, 11]'], {}), '([11, 11, 11])\n', (12914, 12928), True, 'import numpy as np\n'), ((12940, 13039), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['a', '"""a"""', '(False)'], {'np_printoptions': "{'threshold': 100, 'edgeitems': 2}"}), "(a, 'a', False, np_printoptions={'threshold': \n 100, 'edgeitems': 2})\n", (12967, 13039), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((14047, 14098), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 0, 0]'], {}), '(out, [0, 0, 0])\n', (14082, 14098), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((14184, 14236), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 0, 10]'], {}), '(out, [0, 0, 10])\n', (14219, 14236), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((14322, 14373), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 1, 0]'], {}), '(out, [0, 1, 0])\n', (14357, 14373), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((14459, 14510), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 2, 0]'], {}), '(out, [0, 2, 0])\n', (14494, 14510), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((14615, 14667), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 2, 10]'], {}), '(out, [0, 2, 10])\n', (14650, 14667), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((14772, 14824), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 8, 10]'], {}), '(out, [0, 8, 10])\n', (14807, 14824), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((14929, 14981), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 10, 1]'], {}), '(out, [0, 10, 1])\n', (14964, 14981), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((15067, 15118), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[5, 1, 1]'], {}), '(out, [5, 1, 1])\n', (15102, 15118), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((15224, 15278), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[10, 10, 10]'], {}), '(out, [10, 10, 10])\n', (15259, 15278), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((15840, 15878), 'tensorflow.python.debug.cli.tensor_format.format_tensor', 'tensor_format.format_tensor', (['None', '"""a"""'], {}), "(None, 'a')\n", (15867, 15878), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((10976, 11022), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[20]'], {}), '(out, [20])\n', (11011, 11022), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((11113, 11159), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[-1]'], {}), '(out, [-1])\n', (11148, 11159), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((11245, 11293), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0, 0]'], {}), '(out, [0, 0])\n', (11280, 11293), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((12526, 12574), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[1, 4]'], {}), '(out, [1, 4])\n', (12561, 12574), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((12665, 12714), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[-1, 2]'], {}), '(out, [-1, 2])\n', (12700, 12714), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((12800, 12845), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0]'], {}), '(out, [0])\n', (12835, 12845), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((15440, 15492), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[11, 5, 5]'], {}), '(out, [11, 5, 5])\n', (15475, 15492), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((15583, 15635), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[-1, 5, 5]'], {}), '(out, [-1, 5, 5])\n', (15618, 15635), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((15721, 15769), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[5, 5]'], {}), '(out, [5, 5])\n', (15756, 15769), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((16069, 16114), 'tensorflow.python.debug.cli.tensor_format.locate_tensor_element', 'tensor_format.locate_tensor_element', (['out', '[0]'], {}), '(out, [0])\n', (16104, 16114), False, 'from tensorflow.python.debug.cli import tensor_format\n'), ((2536, 2574), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0 - 1.0 / 16.0)', '(16)'], {}), '(0.0, 1.0 - 1.0 / 16.0, 16)\n', (2547, 2574), True, 'import numpy as np\n'), ((3208, 3246), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0 - 1.0 / 16.0)', '(16)'], {}), '(0.0, 1.0 - 1.0 / 16.0, 16)\n', (3219, 3246), True, 'import numpy as np\n'), ((3827, 3865), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0 - 1.0 / 16.0)', '(16)'], {}), '(0.0, 1.0 - 1.0 / 16.0, 16)\n', (3838, 3865), True, 'import numpy as np\n'), ((4580, 4618), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0 - 1.0 / 40.0)', '(40)'], {}), '(0.0, 1.0 - 1.0 / 40.0, 40)\n', (4591, 4618), True, 'import numpy as np\n'), ((6039, 6077), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0 - 1.0 / 24.0)', '(24)'], {}), '(0.0, 1.0 - 1.0 / 24.0, 24)\n', (6050, 6077), True, 'import numpy as np\n'), ((11352, 11390), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0 - 1.0 / 16.0)', '(16)'], {}), '(0.0, 1.0 - 1.0 / 16.0, 16)\n', (11363, 11390), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import onnx
from extensions.front.onnx.image_scaler_ext import ImageScalerFrontExtractor
from mo.utils.unittest.extractors import PB
class TestImageScalerONNXExt(unittest.TestCase):
@staticmethod
def _create_image_scaler_node():
pb = onnx.helper.make_node(
'ImageScaler',
inputs=['a'],
outputs=['b'],
scale=1.0,
bias=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
)
node = PB({'pb': pb, 'graph': PB({'graph': {'layout': 'NCHW'}})})
return node
def test_image_scaler_ext(self):
node = self._create_image_scaler_node()
ImageScalerFrontExtractor.extract(node)
exp_res = {
'scale': 1.0,
'bias': [[[1.0]], [[2.0]], [[3.0]], [[4.0]], [[5.0]], [[6.0]], [[7.0]], [[8.0]]],
}
for key in exp_res.keys():
if type(node[key]) in [list, np.ndarray]:
self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key])))
else:
self.assertEqual(node[key], exp_res[key])
| [
"extensions.front.onnx.image_scaler_ext.ImageScalerFrontExtractor.extract",
"numpy.array",
"mo.utils.unittest.extractors.PB",
"onnx.helper.make_node"
] | [((876, 1003), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""ImageScaler"""'], {'inputs': "['a']", 'outputs': "['b']", 'scale': '(1.0)', 'bias': '[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'}), "('ImageScaler', inputs=['a'], outputs=['b'], scale=1.0,\n bias=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n", (897, 1003), False, 'import onnx\n'), ((1259, 1298), 'extensions.front.onnx.image_scaler_ext.ImageScalerFrontExtractor.extract', 'ImageScalerFrontExtractor.extract', (['node'], {}), '(node)\n', (1292, 1298), False, 'from extensions.front.onnx.image_scaler_ext import ImageScalerFrontExtractor\n'), ((1109, 1142), 'mo.utils.unittest.extractors.PB', 'PB', (["{'graph': {'layout': 'NCHW'}}"], {}), "({'graph': {'layout': 'NCHW'}})\n", (1111, 1142), False, 'from mo.utils.unittest.extractors import PB\n'), ((1587, 1606), 'numpy.array', 'np.array', (['node[key]'], {}), '(node[key])\n', (1595, 1606), True, 'import numpy as np\n'), ((1608, 1630), 'numpy.array', 'np.array', (['exp_res[key]'], {}), '(exp_res[key])\n', (1616, 1630), True, 'import numpy as np\n')] |
import numpy as np
# convert data into a [1 - 10 [ scale
def convert(data, value_1, value_9):
a = 8/(value_9-value_1)
b = 1 - a * value_1
return a * data + b
def compute_moisture(drain_map, monthly_precip):
result = np.zeros((12, drain_map.shape[0], drain_map.shape[0]), dtype = drain_map.dtype)
for i, p in enumerate(monthly_precip):
result[i] = drain_map + np.log(p)
return result
def monthly_vigor(low, high, data0, data1):
"""
Return vigor map between month1 and month2
"""
mx = np.maximum(data0, data1)
mn = np.minimum(data0, data1)
diff = mx - mn
same_mask = diff == 0
diff[same_mask] = 1
max_in = np.minimum(high, mx)
min_in = np.maximum(low, mn)
size_in = np.maximum(0, max_in - min_in)
vigor = size_in / diff
vigor[same_mask] = np.logical_and(mx >= low, mx <= high)[same_mask]
vigor[same_mask] = np.logical_and(mx >= low, mx <= high)[same_mask]
return vigor
def yearly_vigor(low, high, year_data):
result = np.zeros_like(year_data[0])
for i in range(11):
result += monthly_vigor(low, high, year_data[i], year_data[i+1])
result += monthly_vigor(low, high, year_data[11], year_data[0])
return result / 12
def geology_vigor(plant_geol, ground_map):
result = np.zeros(ground_map.shape, dtype = np.float64)
for i, v in enumerate(plant_geol):
result += (ground_map == i) * v
return result
def plant_density(plant_info, condition_list, geology = None):
density = np.ones_like(condition_list.values().__iter__().__next__()[0])
for name, v in plant_info.items():
if name in condition_list:
density = np.minimum(density, yearly_vigor(v[0], v[1], condition_list[name]))
if "geology" in plant_info and geology is not None:
geo_vigor = geology_vigor(plant_info["geology"], geology)
density = np.minimum(density, geo_vigor)
return density
def plant_compet(plants, condition_list, geology):
# copy conditions
cond = {}
for name, t in condition_list.items():
cond[name] = t.copy()
result = np.zeros_like(condition_list.values().__iter__().__next__()[0]) + np.zeros(len(plants))[:, np.newaxis, np.newaxis]
#compute vigor
gpi = {}
for i, pi in enumerate(plants):
result[i] = plant_density(pi, cond, geology)
for name, v in pi.items():
if name in condition_list:
if not name in gpi:
gpi[name] = np.zeros((3, len(plants)))
gpi[name][:, i] = v
elif name == "geology":
if not name in gpi:
gpi[name] = np.zeros((len(v), len(plants)))
gpi[name][:, i] = v
#locally sort plants by vigor
parse = np.argsort(result, axis = 0)
result_sorted = np.zeros_like(result)
for i, p in enumerate(parse):
#create conditions
new_pi = {}
for name, v in gpi.items():
if name in condition_list or name == "geology":
new_pi[name] = v[:, p]
#recompute vigor
result_sorted[i] = plant_density(new_pi, cond, geology)
#update conditions
for name, v in gpi.items():
if name in cond:
cond[name] -= v[2, p] * result_sorted[i]
np.put_along_axis(result, parse, result_sorted, 0)
return result
| [
"numpy.minimum",
"numpy.logical_and",
"numpy.log",
"numpy.argsort",
"numpy.zeros",
"numpy.maximum",
"numpy.zeros_like",
"numpy.put_along_axis"
] | [((238, 315), 'numpy.zeros', 'np.zeros', (['(12, drain_map.shape[0], drain_map.shape[0])'], {'dtype': 'drain_map.dtype'}), '((12, drain_map.shape[0], drain_map.shape[0]), dtype=drain_map.dtype)\n', (246, 315), True, 'import numpy as np\n'), ((543, 567), 'numpy.maximum', 'np.maximum', (['data0', 'data1'], {}), '(data0, data1)\n', (553, 567), True, 'import numpy as np\n'), ((577, 601), 'numpy.minimum', 'np.minimum', (['data0', 'data1'], {}), '(data0, data1)\n', (587, 601), True, 'import numpy as np\n'), ((686, 706), 'numpy.minimum', 'np.minimum', (['high', 'mx'], {}), '(high, mx)\n', (696, 706), True, 'import numpy as np\n'), ((720, 739), 'numpy.maximum', 'np.maximum', (['low', 'mn'], {}), '(low, mn)\n', (730, 739), True, 'import numpy as np\n'), ((755, 785), 'numpy.maximum', 'np.maximum', (['(0)', '(max_in - min_in)'], {}), '(0, max_in - min_in)\n', (765, 785), True, 'import numpy as np\n'), ((1032, 1059), 'numpy.zeros_like', 'np.zeros_like', (['year_data[0]'], {}), '(year_data[0])\n', (1045, 1059), True, 'import numpy as np\n'), ((1309, 1353), 'numpy.zeros', 'np.zeros', (['ground_map.shape'], {'dtype': 'np.float64'}), '(ground_map.shape, dtype=np.float64)\n', (1317, 1353), True, 'import numpy as np\n'), ((2790, 2816), 'numpy.argsort', 'np.argsort', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (2800, 2816), True, 'import numpy as np\n'), ((2839, 2860), 'numpy.zeros_like', 'np.zeros_like', (['result'], {}), '(result)\n', (2852, 2860), True, 'import numpy as np\n'), ((3324, 3374), 'numpy.put_along_axis', 'np.put_along_axis', (['result', 'parse', 'result_sorted', '(0)'], {}), '(result, parse, result_sorted, 0)\n', (3341, 3374), True, 'import numpy as np\n'), ((838, 875), 'numpy.logical_and', 'np.logical_and', (['(mx >= low)', '(mx <= high)'], {}), '(mx >= low, mx <= high)\n', (852, 875), True, 'import numpy as np\n'), ((910, 947), 'numpy.logical_and', 'np.logical_and', (['(mx >= low)', '(mx <= high)'], {}), '(mx >= low, mx <= high)\n', (924, 947), True, 'import numpy as np\n'), ((1901, 1931), 'numpy.minimum', 'np.minimum', (['density', 'geo_vigor'], {}), '(density, geo_vigor)\n', (1911, 1931), True, 'import numpy as np\n'), ((395, 404), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (401, 404), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
import re
import traceback
import warnings
from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
from ..core import indexing
from ..core.formatting import first_n_items, format_timestamp, last_item
from ..core.pycompat import PY3
from ..core.variable import Variable
from .variables import (
SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to,
safe_setitem, unpack_for_decoding, unpack_for_encoding)
try:
from pandas.errors import OutOfBoundsDatetime
except ImportError:
# pandas < 0.20
from pandas.tslib import OutOfBoundsDatetime
# standard calendars recognized by netcdftime
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
_NS_PER_TIME_DELTA = {'us': int(1e3),
'ms': int(1e6),
's': int(1e9),
'm': int(1e9) * 60,
'h': int(1e9) * 60 * 60,
'D': int(1e9) * 60 * 60 * 24}
TIME_UNITS = frozenset(['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'])
def _import_netcdftime():
'''
helper function handle the transition to netcdftime as a stand-alone
package
'''
try:
# Try importing netcdftime directly
import netcdftime as nctime
if not hasattr(nctime, 'num2date'):
# must have gotten an old version from netcdf4-python
raise ImportError
except ImportError:
# in netCDF4 the num2date/date2num function are top-level api
try:
import netCDF4 as nctime
except ImportError:
raise ImportError("Failed to import netcdftime")
return nctime
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith('s'):
units = '%ss' % units
return {'microseconds': 'us', 'milliseconds': 'ms', 'seconds': 's',
'minutes': 'm', 'hours': 'h', 'days': 'D'}[units]
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace.
matches = re.match('(.+) since (.+)', units)
if not matches:
raise ValueError('invalid time units: %s' % units)
delta_units, ref_date = [s.strip() for s in matches.groups()]
return delta_units, ref_date
def _decode_datetime_with_netcdftime(num_dates, units, calendar):
nctime = _import_netcdftime()
dates = np.asarray(nctime.num2date(num_dates, units, calendar))
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'netcdftime.datetime objects instead, reason: dates out'
' of range', SerializationWarning, stacklevel=3)
else:
try:
dates = nctime_to_nptime(dates)
except ValueError as e:
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'dummy netcdftime.datetime objects instead, reason:'
'{0}'.format(e), SerializationWarning, stacklevel=3)
return dates
def _decode_cf_datetime_dtype(data, units, calendar):
# Verify that at least the first and last date can be decoded
# successfully. Otherwise, tracebacks end up swallowed by
# Dataset.__repr__ when users try to view their lazily decoded array.
values = indexing.ImplicitToExplicitIndexingAdapter(
indexing.as_indexable(data))
example_value = np.concatenate([first_n_items(values, 1) or [0],
last_item(values) or [0]])
try:
result = decode_cf_datetime(example_value, units, calendar)
except Exception:
calendar_msg = ('the default calendar' if calendar is None
else 'calendar %r' % calendar)
msg = ('unable to decode time units %r with %s. Try '
'opening your dataset with decode_times=False.'
% (units, calendar_msg))
if not PY3:
msg += ' Full traceback:\n' + traceback.format_exc()
raise ValueError(msg)
else:
dtype = getattr(result, 'dtype', np.dtype('object'))
return dtype
def decode_cf_datetime(num_dates, units, calendar=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than netcdftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
netcdftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using netcdftime
raise OutOfBoundsDatetime
# fixes: https://github.com/pydata/pandas/issues/14068
# these lines check if the the lowest or the highest value in dates
# cause an OutOfBoundsDatetime (Overflow) error
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# Cast input dates to integers of nanoseconds because `pd.to_datetime`
# works much faster when dealing with integers
# make _NS_PER_TIME_DELTA an array to ensure type upcasting
flat_num_dates_ns_int = (flat_num_dates.astype(np.float64) *
_NS_PER_TIME_DELTA[delta]).astype(np.int64)
dates = (pd.to_timedelta(flat_num_dates_ns_int, 'ns') +
ref_date).values
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_netcdftime(
flat_num_dates.astype(np.float), units, calendar)
return dates.reshape(num_dates.shape)
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
shape = num_timedeltas.shape
num_timedeltas = num_timedeltas.ravel()
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result.reshape(shape)
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit in ['days', 'hours', 'minutes', 'seconds']:
delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]
unit_delta = np.timedelta64(delta_ns, 'ns')
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return 'seconds'
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = pd.to_datetime(np.asarray(dates).ravel(), box=False)
dates = dates[pd.notnull(dates)]
unique_timedeltas = np.unique(np.diff(dates))
units = _infer_time_units_from_diff(unique_timedeltas)
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
return '%s since %s' % (units, pd.Timestamp(reference_date))
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units
def nctime_to_nptime(times):
"""Given an array of netcdftime.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype='M8[ns]')
for i, t in np.ndenumerate(times):
dt = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = '%s since %s' % (delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_netcdftime(dates, units, calendar):
"""Fallback method for encoding dates using netcdftime.
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
nctime = _import_netcdftime()
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype('M8[us]').astype(datetime)
def encode_datetime(d):
return np.nan if d is None else nctime.date2num(d, units, calendar)
return np.vectorize(encode_datetime)(dates)
def cast_to_int_if_safe(num):
int_num = np.array(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return num
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF compliant time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See also
--------
netcdftime.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = 'proleptic_gregorian'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == 'O':
# parse with netcdftime instead
raise OutOfBoundsDatetime
assert dates.dtype == 'datetime64[ns]'
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype('timedelta64[ns]')
ref_date = np.datetime64(pd.Timestamp(ref_date))
num = (dates - ref_date) / time_delta
except (OutOfBoundsDatetime, OverflowError):
num = _encode_datetime_with_netcdftime(dates, units, calendar)
num = cast_to_int_if_safe(num)
return (num, units, calendar)
def encode_cf_timedelta(timedeltas, units=None):
if units is None:
units = infer_timedelta_units(timedeltas)
np_unit = _netcdf_to_numpy_timeunit(units)
num = 1.0 * timedeltas / np.timedelta64(1, np_unit)
num = np.where(pd.isnull(timedeltas), np.nan, num)
num = cast_to_int_if_safe(num)
return (num, units)
class CFDatetimeCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if np.issubdtype(data.dtype, np.datetime64):
(data, units, calendar) = encode_cf_datetime(
data,
encoding.pop('units', None),
encoding.pop('calendar', None))
safe_setitem(attrs, 'units', units, name=name)
safe_setitem(attrs, 'calendar', calendar, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if 'units' in attrs and 'since' in attrs['units']:
units = pop_to(attrs, encoding, 'units')
calendar = pop_to(attrs, encoding, 'calendar')
dtype = _decode_cf_datetime_dtype(data, units, calendar)
transform = partial(
decode_cf_datetime, units=units, calendar=calendar)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
class CFTimedeltaCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if np.issubdtype(data.dtype, np.timedelta64):
data, units = encode_cf_timedelta(
data, encoding.pop('units', None))
safe_setitem(attrs, 'units', units, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if 'units' in attrs and attrs['units'] in TIME_UNITS:
units = pop_to(attrs, encoding, 'units')
transform = partial(decode_cf_timedelta, units=units)
dtype = np.dtype('timedelta64[ns]')
data = lazy_elemwise_func(data, transform, dtype=dtype)
return Variable(dims, data, attrs, encoding)
| [
"numpy.nanargmax",
"pandas.to_timedelta",
"numpy.array",
"pandas.notnull",
"datetime.datetime",
"numpy.nanargmin",
"netCDF4.num2date",
"numpy.asarray",
"numpy.diff",
"numpy.ndenumerate",
"numpy.issubdtype",
"numpy.empty",
"numpy.datetime64",
"warnings.warn",
"numpy.dtype",
"netCDF4.dat... | [((2247, 2281), 're.match', 're.match', (['"""(.+) since (.+)"""', 'units'], {}), "('(.+) since (.+)', units)\n", (2255, 2281), False, 'import re\n'), ((5077, 5098), 'numpy.asarray', 'np.asarray', (['num_dates'], {}), '(num_dates)\n', (5087, 5098), True, 'import numpy as np\n'), ((6831, 6857), 'numpy.asarray', 'np.asarray', (['num_timedeltas'], {}), '(num_timedeltas)\n', (6841, 6857), True, 'import numpy as np\n'), ((6995, 7049), 'pandas.to_timedelta', 'pd.to_timedelta', (['num_timedeltas'], {'unit': 'units', 'box': '(False)'}), '(num_timedeltas, unit=units, box=False)\n', (7010, 7049), True, 'import pandas as pd\n'), ((8906, 8923), 'numpy.asarray', 'np.asarray', (['times'], {}), '(times)\n', (8916, 8923), True, 'import numpy as np\n'), ((8934, 8971), 'numpy.empty', 'np.empty', (['times.shape'], {'dtype': '"""M8[ns]"""'}), "(times.shape, dtype='M8[ns]')\n", (8942, 8971), True, 'import numpy as np\n'), ((8988, 9009), 'numpy.ndenumerate', 'np.ndenumerate', (['times'], {}), '(times)\n', (9002, 9009), True, 'import numpy as np\n'), ((9756, 9797), 'numpy.issubdtype', 'np.issubdtype', (['dates.dtype', 'np.datetime64'], {}), '(dates.dtype, np.datetime64)\n', (9769, 9797), True, 'import numpy as np\n'), ((10128, 10157), 'numpy.array', 'np.array', (['num'], {'dtype': 'np.int64'}), '(num, dtype=np.int64)\n', (10136, 10157), True, 'import numpy as np\n'), ((10558, 10575), 'numpy.asarray', 'np.asarray', (['dates'], {}), '(dates)\n', (10568, 10575), True, 'import numpy as np\n'), ((2586, 2629), 'netCDF4.num2date', 'nctime.num2date', (['num_dates', 'units', 'calendar'], {}), '(num_dates, units, calendar)\n', (2601, 2629), True, 'import netCDF4 as nctime\n'), ((2751, 2963), 'warnings.warn', 'warnings.warn', (['"""Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy netcdftime.datetime objects instead, reason: dates out of range"""', 'SerializationWarning'], {'stacklevel': '(3)'}), "(\n 'Unable to decode time axis into full numpy.datetime64 objects, continuing using dummy netcdftime.datetime objects instead, reason: dates out of range'\n , SerializationWarning, stacklevel=3)\n", (2764, 2963), False, 'import warnings\n'), ((7467, 7497), 'numpy.timedelta64', 'np.timedelta64', (['delta_ns', '"""ns"""'], {}), "(delta_ns, 'ns')\n", (7481, 7497), True, 'import numpy as np\n'), ((8034, 8051), 'pandas.notnull', 'pd.notnull', (['dates'], {}), '(dates)\n', (8044, 8051), True, 'import pandas as pd\n'), ((8087, 8101), 'numpy.diff', 'np.diff', (['dates'], {}), '(dates)\n', (8094, 8101), True, 'import numpy as np\n'), ((9024, 9084), 'datetime.datetime', 'datetime', (['t.year', 't.month', 't.day', 't.hour', 't.minute', 't.second'], {}), '(t.year, t.month, t.day, t.hour, t.minute, t.second)\n', (9032, 9084), False, 'from datetime import datetime\n'), ((9102, 9119), 'numpy.datetime64', 'np.datetime64', (['dt'], {}), '(dt)\n', (9115, 9119), True, 'import numpy as np\n'), ((10045, 10074), 'numpy.vectorize', 'np.vectorize', (['encode_datetime'], {}), '(encode_datetime)\n', (10057, 10074), True, 'import numpy as np\n'), ((11667, 11693), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'np_unit'], {}), '(1, np_unit)\n', (11681, 11693), True, 'import numpy as np\n'), ((11713, 11734), 'pandas.isnull', 'pd.isnull', (['timedeltas'], {}), '(timedeltas)\n', (11722, 11734), True, 'import pandas as pd\n'), ((11972, 12012), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.datetime64'], {}), '(data.dtype, np.datetime64)\n', (11985, 12012), True, 'import numpy as np\n'), ((13100, 13141), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.timedelta64'], {}), '(data.dtype, np.timedelta64)\n', (13113, 13141), True, 'import numpy as np\n'), ((4478, 4496), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (4486, 4496), True, 'import numpy as np\n'), ((5431, 5453), 'pandas.Timestamp', 'pd.Timestamp', (['ref_date'], {}), '(ref_date)\n', (5443, 5453), True, 'import pandas as pd\n'), ((8263, 8291), 'pandas.Timestamp', 'pd.Timestamp', (['reference_date'], {}), '(reference_date)\n', (8275, 8291), True, 'import pandas as pd\n'), ((8644, 8662), 'pandas.notnull', 'pd.notnull', (['deltas'], {}), '(deltas)\n', (8654, 8662), True, 'import pandas as pd\n'), ((9997, 10032), 'netCDF4.date2num', 'nctime.date2num', (['d', 'units', 'calendar'], {}), '(d, units, calendar)\n', (10012, 10032), True, 'import netCDF4 as nctime\n'), ((11206, 11228), 'pandas.Timestamp', 'pd.Timestamp', (['ref_date'], {}), '(ref_date)\n', (11218, 11228), True, 'import pandas as pd\n'), ((12742, 12801), 'functools.partial', 'partial', (['decode_cf_datetime'], {'units': 'units', 'calendar': 'calendar'}), '(decode_cf_datetime, units=units, calendar=calendar)\n', (12749, 12801), False, 'from functools import partial\n'), ((13606, 13647), 'functools.partial', 'partial', (['decode_cf_timedelta'], {'units': 'units'}), '(decode_cf_timedelta, units=units)\n', (13613, 13647), False, 'from functools import partial\n'), ((13668, 13695), 'numpy.dtype', 'np.dtype', (['"""timedelta64[ns]"""'], {}), "('timedelta64[ns]')\n", (13676, 13695), True, 'import numpy as np\n'), ((6353, 6397), 'pandas.to_timedelta', 'pd.to_timedelta', (['flat_num_dates_ns_int', '"""ns"""'], {}), "(flat_num_dates_ns_int, 'ns')\n", (6368, 6397), True, 'import pandas as pd\n'), ((7978, 7995), 'numpy.asarray', 'np.asarray', (['dates'], {}), '(dates)\n', (7988, 7995), True, 'import numpy as np\n'), ((8564, 8582), 'numpy.asarray', 'np.asarray', (['deltas'], {}), '(deltas)\n', (8574, 8582), True, 'import numpy as np\n'), ((11116, 11146), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'delta_units'], {}), '(1, delta_units)\n', (11130, 11146), True, 'import numpy as np\n'), ((2645, 2668), 'numpy.nanargmin', 'np.nanargmin', (['num_dates'], {}), '(num_dates)\n', (2657, 2668), True, 'import numpy as np\n'), ((2703, 2726), 'numpy.nanargmax', 'np.nanargmax', (['num_dates'], {}), '(num_dates)\n', (2715, 2726), True, 'import numpy as np\n'), ((4374, 4396), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4394, 4396), False, 'import traceback\n')] |
import torch
import torch.nn as nn
import torchaudio
import numpy as np
import random
class RandomTimeShift(object):
def __init__(self, p, max_time_shift=None):
self.p = p
self.max_time_shift = max_time_shift
def __call__(self, sample):
if torch.rand(1) < self.p:
if self.max_time_shift is None:
self.max_time_shift = sample.shape[-1] // 20
n_shift = random.randint(0, self.max_time_shift)
if n_shift == 0:
return sample
else:
pad = torch.zeros(n_shift, dtype=sample.dtype)
direcion = random.random()
if direcion > 0.5:
sample = torch.cat((pad, sample[:-n_shift]), dim=-1)
else:
sample = torch.cat((sample[n_shift:], pad), dim=-1)
return sample
class RandomAmp(object):
def __init__(self, low, high):
self.low = low
self.high = high
def __call__(self, sample):
amp = torch.FloatTensor(1).uniform_(self.low, self.high)
sample.mult_(amp)
return sample
class RandomFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, sample):
if torch.rand(1) > self.p:
sample = sample.flip(dims=[-1]).contiguous()
return sample
class RandomAdd180Phase(object):
def __init__(self, p):
self.p = p
def __call__(self, sample):
if torch.rand(1) > self.p:
sample.mult_(-1)
return sample
class RandomQuantNoise(object):
def __init__(self, n_bits=16, p=0.5):
self.p = p
self.n_bits = n_bits
def __call__(self, sample):
if torch.rand(1) > self.p:
sample = torch.round(sample * 2**self.n_bits) / 2**self.n_bits
return sample
class RandomAddAWGN(object):
def __init__(self, snr_db=30, p=0.5):
self.p = p
self.snr_db = snr_db
def __call__(self, sample):
if torch.rand(1) > self.p:
s = torch.sqrt(torch.mean(sample**2))
sgm = s * 10**(-self.snr_db/20.)
w = torch.randn_like(sample) * sgm
sample.add_(w)
return sample
class RandomAddSine(object):
def __init__(self, fs, snr_db=30, p=0.5):
self.p = p
self.fs = fs
self.snr_db = snr_db
def __call__(self, sample):
if torch.rand(1) > self.p:
n = torch.arange(0, sample.shape[0], 1)
f = 50 + 3*torch.randn(1)
t = n*1./self.fs
s = torch.sqrt(torch.mean(sample**2))
sgm = s * np.sqrt(2) * 10**(-self.snr_db/20.)
b = sgm*torch.sin(2*np.pi*f*t+torch.rand(1)*np.pi)
sample.add_(b)
return sample
class AudioAugs(object):
def __init__(self, augs, fs):
self.random_amp = RandomAmp(low=0.3, high=1)
self.random_flip = RandomFlip(p=0.5)
self.random_neg = RandomAdd180Phase(p=0.5)
self.random_quantnoise = RandomQuantNoise(n_bits=16, p=0.5)
self.awgn = RandomAddAWGN(snr_db=30, p=0.5)
self.sine = RandomAddSine(fs=fs, snr_db=30, p=0.5)
self.tshift = RandomTimeShift(p=0.5, max_time_shift=None)
self.augs = augs
def __call__(self, sample):
for aug in self.augs:
if aug=='amp':
sample = self.random_amp(sample)
elif aug=='flip':
sample = self.random_flip(sample)
elif aug=='neg':
sample = self.random_neg(sample)
elif aug=='quant':
sample = self.random_quantnoise(sample)
elif aug=='sine':
sample = self.sine(sample)
elif aug=='awgn':
sample = self.awgn(sample)
elif aug == 'tshift':
sample = self.tshift(sample)
return sample
if __name__ == "__main__":
RA = RandomAmp(0.3, 1.)
RF = RandomFlip(0.5)
RN = RandomAdd180Phase(0.5)
RQ = RandomQuantNoise(16, 0.5)
RAW = RandomAddAWGN(30, 0.5)
RS = RandomAddSine(30, 0.5)
x = torch.randn(4)
y1 = RF(x)
y2 = RN(x)
y3 = RQ(x)
y4 = RA(x)
y5 = RS(x)
print(x-y4) | [
"random.randint",
"numpy.sqrt",
"torch.rand",
"torch.mean",
"torch.FloatTensor",
"torch.cat",
"torch.randn_like",
"torch.round",
"random.random",
"torch.zeros",
"torch.arange",
"torch.randn"
] | [((4167, 4181), 'torch.randn', 'torch.randn', (['(4)'], {}), '(4)\n', (4178, 4181), False, 'import torch\n'), ((279, 292), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (289, 292), False, 'import torch\n'), ((430, 468), 'random.randint', 'random.randint', (['(0)', 'self.max_time_shift'], {}), '(0, self.max_time_shift)\n', (444, 468), False, 'import random\n'), ((1252, 1265), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1262, 1265), False, 'import torch\n'), ((1503, 1516), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1513, 1516), False, 'import torch\n'), ((1750, 1763), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1760, 1763), False, 'import torch\n'), ((2043, 2056), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2053, 2056), False, 'import torch\n'), ((2453, 2466), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2463, 2466), False, 'import torch\n'), ((2498, 2533), 'torch.arange', 'torch.arange', (['(0)', 'sample.shape[0]', '(1)'], {}), '(0, sample.shape[0], 1)\n', (2510, 2533), False, 'import torch\n'), ((568, 608), 'torch.zeros', 'torch.zeros', (['n_shift'], {'dtype': 'sample.dtype'}), '(n_shift, dtype=sample.dtype)\n', (579, 608), False, 'import torch\n'), ((636, 651), 'random.random', 'random.random', ([], {}), '()\n', (649, 651), False, 'import random\n'), ((1032, 1052), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (1049, 1052), False, 'import torch\n'), ((1800, 1838), 'torch.round', 'torch.round', (['(sample * 2 ** self.n_bits)'], {}), '(sample * 2 ** self.n_bits)\n', (1811, 1838), False, 'import torch\n'), ((2099, 2122), 'torch.mean', 'torch.mean', (['(sample ** 2)'], {}), '(sample ** 2)\n', (2109, 2122), False, 'import torch\n'), ((2183, 2207), 'torch.randn_like', 'torch.randn_like', (['sample'], {}), '(sample)\n', (2199, 2207), False, 'import torch\n'), ((2628, 2651), 'torch.mean', 'torch.mean', (['(sample ** 2)'], {}), '(sample ** 2)\n', (2638, 2651), False, 'import torch\n'), ((716, 759), 'torch.cat', 'torch.cat', (['(pad, sample[:-n_shift])'], {'dim': '(-1)'}), '((pad, sample[:-n_shift]), dim=-1)\n', (725, 759), False, 'import torch\n'), ((811, 853), 'torch.cat', 'torch.cat', (['(sample[n_shift:], pad)'], {'dim': '(-1)'}), '((sample[n_shift:], pad), dim=-1)\n', (820, 853), False, 'import torch\n'), ((2557, 2571), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (2568, 2571), False, 'import torch\n'), ((2673, 2683), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2680, 2683), True, 'import numpy as np\n'), ((2753, 2766), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2763, 2766), False, 'import torch\n')] |
from sklearn import preprocessing, compose, model_selection
import pandas as pd
import numpy as np
GOODMOVIETHRESHOLD = 6.95
NUMBEROFFOLDS = 5
# Reads the csv file and save data to memory
class RaceData:
def __init__(self, file_name):
"""
Initializes the data and labels
:param file_name: the path to file
"""
self.file = file_name # path to file
self.X = pd.DataFrame # Data without Labels
self.y = pd.DataFrame # Labels
def preprocess(self):
"""
Preprocesses the data according to specified demands and for the classifiers
:return: None
"""
# Display current operation
# print(" Reading csv, dropping excluded columns, movie duplicates and rows with na values...")
# import csv
data = pd.read_csv(self.file, delimiter=',')
# save all Attributes excluding content_Rating, movie_imdb_link, plot_keywords
data.drop(columns=['content_rating', 'movie_imdb_link', 'plot_keywords'], inplace=True)
# discard entries with any NaN value
data.dropna(inplace=True)
# Handle duplicate movie_tile values
data.drop_duplicates(subset='movie_title', keep='first', inplace=True)
# As movie title is now unique we can discard it
data.drop(columns=['movie_title'], inplace=True)
# Utilize the fact that data is not normally distributed
data['index1'] = data.index
# saves imdb score as labels & Discard label from data
self.y = data.pop('imdb_score')
# Display current operation
# print(" Turning genres column and the 3 actors to dummy variables...")
# Turn into dummy variables and discard original column from data
genres = data.pop('genres').str.get_dummies()
# Merge the 3 actors into one column & delete original columns from data & Turn into dummy variables
actors = (data.pop('actor_1_name') + "|" + data.pop('actor_2_name') + "|" +
data.pop('actor_3_name')).str.get_dummies()
# Create column lists for transformer
numerical_cols = data.select_dtypes(include='number').columns
category_cols = data.select_dtypes(exclude='number').columns
# Convert numerical columns int64 to float64
data[numerical_cols] = data[numerical_cols].astype('float64')
# After creating the column lists - joins back the dummy-variable actors and genres
data = data.join(actors)
data = data.join(genres)
# Display current operation
# print(" Applying Standard Scaler to numerical columns and OneHotEncoder for remaining categorical columns...")
preprocessor = compose.ColumnTransformer(transformers=[('num', preprocessing.StandardScaler(), numerical_cols),
('cat', preprocessing.OneHotEncoder(), category_cols)],
remainder="passthrough")
self.X = preprocessor.fit_transform(data)
# Display current operation
# print(" Binarizing Labels...")
# all labels lower that 7 become 0, 7 and higher become 1
self.y = preprocessing.Binarizer(GOODMOVIETHRESHOLD).fit_transform(self.y.to_numpy().reshape(-1, 1))
self.y = np.ravel(self.y)
# Display current operation
# print(" Data preprocessing complete.")
@staticmethod
def splitToFiveFolds():
"""
Initializes the sklearn KFold object with NUMBEROFFOLDS(5) for slicing the data
:return: model_selection.KFold
"""
return model_selection.KFold(n_splits=NUMBEROFFOLDS, shuffle=False, random_state=1)
| [
"sklearn.preprocessing.Binarizer",
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.StandardScaler",
"numpy.ravel",
"sklearn.model_selection.KFold"
] | [((823, 860), 'pandas.read_csv', 'pd.read_csv', (['self.file'], {'delimiter': '""","""'}), "(self.file, delimiter=',')\n", (834, 860), True, 'import pandas as pd\n'), ((3338, 3354), 'numpy.ravel', 'np.ravel', (['self.y'], {}), '(self.y)\n', (3346, 3354), True, 'import numpy as np\n'), ((3654, 3730), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': 'NUMBEROFFOLDS', 'shuffle': '(False)', 'random_state': '(1)'}), '(n_splits=NUMBEROFFOLDS, shuffle=False, random_state=1)\n', (3675, 3730), False, 'from sklearn import preprocessing, compose, model_selection\n'), ((3229, 3272), 'sklearn.preprocessing.Binarizer', 'preprocessing.Binarizer', (['GOODMOVIETHRESHOLD'], {}), '(GOODMOVIETHRESHOLD)\n', (3252, 3272), False, 'from sklearn import preprocessing, compose, model_selection\n'), ((2774, 2804), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (2802, 2804), False, 'from sklearn import preprocessing, compose, model_selection\n'), ((2894, 2923), 'sklearn.preprocessing.OneHotEncoder', 'preprocessing.OneHotEncoder', ([], {}), '()\n', (2921, 2923), False, 'from sklearn import preprocessing, compose, model_selection\n')] |
# Functions to format data
# Author: D.Kisler <<EMAIL>>
import pandas as pd
import numpy as np
def list_slit(lst, size=None):
"""
Function to split range into n sub-ranges, or into m sub-ranges of the size <= size
:param lst: range to split
:param size: size of a sub-range
"""
if not size:
size = len(lst)
return [lst[i:i + size] for i in range(0, len(lst), size)]
def dict_keys_formatter(dict):
"""
Function to remove special characters from json keys
:param dict: input dictionary
"""
return {k.replace('.', '_').replace('@', ''): v for k, v in dict.items()}
def dict_subsetter(dict, keys):
"""
Function to subset the dict by the selected keys
:param dict: input dictionary
:param keys: keys to subset initial dict
"""
return {k: dict[k] for k in keys if k in dict.keys()}
def dict_flattener(dict_in, simplify_arrays=False):
"""
Function to flatten a nested dict
___
Required:
:dict_in: dict_in - input json/dictionary
:simplify_arrays: boolean - flag: shall the array be simplified
Output:
:dict: output flatten json
"""
def _one_step(dict_in):
dict_out = {}
for k in dict_in.keys():
if isinstance(dict_in[k], dict):
for iK in dict_in[k].keys():
iVal = dict_in[k][iK]
if simplify_arrays:
if isinstance(iVal, list):
for iV in range(len(iVal)):
dict_out['{}.{}_{}'.format(
k, iK, iV)] = iVal[iV]
else:
dict_out['{}.{}'.format(k, iK)] = iVal
else:
dict_out['{}.{}'.format(k, iK)] = iVal
else:
if simplify_arrays:
iVal = dict_in[k]
if isinstance(iVal, list):
for iV in range(len(iVal)):
dict_out['{}_{}'.format(k, iV)] = iVal[iV]
else:
dict_out[k] = dict_in[k]
else:
dict_out[k] = dict_in[k]
return dict_out
dict_out = _one_step(dict_in)
while len(dict_in.keys()) < len(dict_out.keys()):
dict_in = dict_out
dict_out = _one_step(dict_in)
return dict_out
def df_datatypes_downcast(df):
"""
Function to reduce the allocated memory for numeric data in DataFrame
:param df: pandas DataFrame
"""
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
return True
def sql_reader(path_sql=None, sql=None):
"""
Function to read and split queries
:param path_sql: path to queries
:param sql: string with sql statements
"""
# read the queries
if not sql and not path_sql:
return None
if sql:
queries = sql
elif path_sql:
with open(path_sql, 'r') as f:
queries = f.read()
# split the sql queries
queries = [f'{iQ};' for iQ in queries.split(';')[:-1]]
return queries
| [
"numpy.finfo",
"numpy.iinfo"
] | [((2817, 2834), 'numpy.iinfo', 'np.iinfo', (['np.int8'], {}), '(np.int8)\n', (2825, 2834), True, 'import numpy as np\n'), ((2851, 2868), 'numpy.iinfo', 'np.iinfo', (['np.int8'], {}), '(np.int8)\n', (2859, 2868), True, 'import numpy as np\n'), ((3402, 3422), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (3410, 3422), True, 'import numpy as np\n'), ((3439, 3459), 'numpy.finfo', 'np.finfo', (['np.float16'], {}), '(np.float16)\n', (3447, 3459), True, 'import numpy as np\n'), ((2957, 2975), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (2965, 2975), True, 'import numpy as np\n'), ((2992, 3010), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (3000, 3010), True, 'import numpy as np\n'), ((3551, 3571), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3559, 3571), True, 'import numpy as np\n'), ((3588, 3608), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3596, 3608), True, 'import numpy as np\n'), ((3100, 3118), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (3108, 3118), True, 'import numpy as np\n'), ((3135, 3153), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (3143, 3153), True, 'import numpy as np\n'), ((3243, 3261), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (3251, 3261), True, 'import numpy as np\n'), ((3278, 3296), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (3286, 3296), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""extenders tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow_estimator.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.predictor import from_saved_model
from tensorflow.python.data.ops import dataset_ops
from tensorflow_estimator.python.estimator import estimator_lib
from tensorflow_estimator.python.estimator.canned import linear
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import training
from tensorflow.python.util import compat
def get_input_fn(x, y):
def input_fn():
dataset = dataset_ops.Dataset.from_tensor_slices({'x': x, 'y': y})
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
labels = features.pop('y')
return features, labels
return input_fn
class AddMetricsTest(test.TestCase):
def test_should_add_metrics(self):
input_fn = get_input_fn(
x=np.arange(4)[:, None, None], y=np.ones(4)[:, None])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn(features):
return {'mean_x': metrics_lib.mean(features['x'])}
estimator = extenders.add_metrics(estimator, metric_fn)
estimator.train(input_fn=input_fn)
metrics = estimator.evaluate(input_fn=input_fn)
self.assertIn('mean_x', metrics)
self.assertEqual(1.5, metrics['mean_x'])
# assert that it keeps original estimators metrics
self.assertIn('auc', metrics)
def test_should_error_out_for_not_recognized_args(self):
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn(features, not_recognized):
_, _ = features, not_recognized
return {}
with self.assertRaisesRegexp(ValueError, 'not_recognized'):
estimator = extenders.add_metrics(estimator, metric_fn)
def test_all_supported_args(self):
input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn(features, predictions, labels, config):
self.assertIn('x', features)
self.assertIsNotNone(labels)
self.assertIn('logistic', predictions)
self.assertTrue(isinstance(config, estimator_lib.RunConfig))
return {}
estimator = extenders.add_metrics(estimator, metric_fn)
estimator.train(input_fn=input_fn)
estimator.evaluate(input_fn=input_fn)
def test_all_supported_args_in_different_order(self):
input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn(labels, config, features, predictions):
self.assertIn('x', features)
self.assertIsNotNone(labels)
self.assertIn('logistic', predictions)
self.assertTrue(isinstance(config, estimator_lib.RunConfig))
return {}
estimator = extenders.add_metrics(estimator, metric_fn)
estimator.train(input_fn=input_fn)
estimator.evaluate(input_fn=input_fn)
def test_all_args_are_optional(self):
input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
def metric_fn():
return {'two': metrics_lib.mean(constant_op.constant([2.]))}
estimator = extenders.add_metrics(estimator, metric_fn)
estimator.train(input_fn=input_fn)
metrics = estimator.evaluate(input_fn=input_fn)
self.assertEqual(2., metrics['two'])
def test_overrides_existing_metrics(self):
input_fn = get_input_fn(x=[[[0.]]], y=[[[1]]])
estimator = linear.LinearClassifier([fc.numeric_column('x')])
estimator.train(input_fn=input_fn)
metrics = estimator.evaluate(input_fn=input_fn)
self.assertNotEqual(2., metrics['auc'])
def metric_fn():
return {'auc': metrics_lib.mean(constant_op.constant([2.]))}
estimator = extenders.add_metrics(estimator, metric_fn)
metrics = estimator.evaluate(input_fn=input_fn)
self.assertEqual(2., metrics['auc'])
class ClipGradientsByNormTest(test.TestCase):
"""Tests clip_gradients_by_norm."""
def test_applies_norm(self):
optimizer = extenders.clip_gradients_by_norm(
training.GradientDescentOptimizer(1.0), clip_norm=3.)
with ops.Graph().as_default():
w = variables.Variable(1., name='weight')
x = constant_op.constant(5.)
y = -x * w
grads = optimizer.compute_gradients(y, var_list=[w])[0]
opt_op = optimizer.minimize(y, var_list=[w])
with training.MonitoredSession() as sess:
grads_value = sess.run(grads)
self.assertEqual(-5., grads_value[0])
sess.run(opt_op)
new_w = sess.run(w)
self.assertEqual(4., new_w) # 1 + 1*3 (w - lr * clipped_grad)
def test_name(self):
optimizer = extenders.clip_gradients_by_norm(
training.GradientDescentOptimizer(1.0), clip_norm=3.)
self.assertEqual('ClipByNormGradientDescent', optimizer.get_name())
class ForwardFeaturesTest(test.TestCase):
"""Tests forward_features."""
def _export_estimator(self, estimator, serving_input_fn):
tmpdir = tempfile.mkdtemp()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = estimator.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir))
return export_dir, tmpdir
def make_dummy_input_fn(self):
def _input_fn():
dataset = dataset_ops.Dataset.from_tensors({
'x': [[3.], [5.]],
'id': [[101], [102]],
'sparse_id': sparse_tensor.SparseTensor(
values=[1, 2, 3],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2]),
'labels': [[1.], [2.]]
})
def _split(x):
labels = x.pop('labels')
return x, labels
dataset = dataset.map(_split)
return dataset
return _input_fn
def test_forward_keys(self):
input_fn = self.make_dummy_input_fn()
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
forwarded_keys = ['id', 'sparse_id']
for key in forwarded_keys:
self.assertNotIn(key, next(estimator.predict(input_fn=input_fn)))
estimator = extenders.forward_features(
estimator, forwarded_keys, sparse_default_values={'sparse_id': 1})
expected_results = [101, 2, 102, 5]
predictions = estimator.predict(input_fn=input_fn)
for _ in range(2):
prediction = next(predictions)
for key in forwarded_keys:
self.assertIn(key, prediction)
self.assertEqual(expected_results.pop(0), sum(prediction[key]))
def test_forward_in_exported(self):
def serving_input_fn():
features_ph = {
'x': array_ops.placeholder(dtypes.float32, [None]),
'id': array_ops.placeholder(dtypes.int32, [None])
}
features = {
key: array_ops.expand_dims(tensor, -1)
for key, tensor in features_ph.items()
}
return estimator_lib.export.ServingInputReceiver(features, features_ph)
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}, [[1.], [2.]]
# create estimator
feature_columns = [fc.numeric_column('x')]
estimator = linear.LinearRegressor(feature_columns)
estimator.train(input_fn=input_fn, steps=1)
estimator = extenders.forward_features(estimator, 'id')
# export saved model
export_dir, tmpdir = self._export_estimator(estimator, serving_input_fn)
# restore model
predict_fn = from_saved_model(export_dir, signature_def_key='predict')
predictions = predict_fn({'x': [3], 'id': [101]})
# verify that 'id' exists in predictions
self.assertIn('id', predictions)
self.assertEqual(101, predictions['id'])
# Clean up.
gfile.DeleteRecursively(tmpdir)
def test_forward_in_exported_sparse(self):
features_columns = [fc.indicator_column(
fc.categorical_column_with_vocabulary_list('x', range(10)))]
classifier = linear.LinearClassifier(feature_columns=features_columns)
def train_input_fn():
dataset = dataset_ops.Dataset.from_tensors({
'x': sparse_tensor.SparseTensor(
values=[1, 2, 3],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2]),
'labels': [[0], [1]]
})
def _split(x):
labels = x.pop('labels')
return x, labels
dataset = dataset.map(_split)
return dataset
classifier.train(train_input_fn, max_steps=1)
classifier = extenders.forward_features(
classifier, keys=['x'], sparse_default_values={'x': 0})
def serving_input_fn():
features_ph = array_ops.placeholder(dtype=dtypes.int32, name='x',
shape=[None])
features = {'x': layers.dense_to_sparse(features_ph)}
return estimator_lib.export.ServingInputReceiver(features,
{'x': features_ph})
export_dir, tmpdir = self._export_estimator(classifier, serving_input_fn)
prediction_fn = from_saved_model(export_dir, signature_def_key='predict')
features = (0, 2)
prediction = prediction_fn({'x': features})
self.assertIn('x', prediction)
self.assertEqual(features, tuple(prediction['x']))
gfile.DeleteRecursively(tmpdir)
def test_forward_list(self):
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}, [[1.], [2.]]
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
self.assertNotIn('id', next(estimator.predict(input_fn=input_fn)))
estimator = extenders.forward_features(estimator, ['x', 'id'])
predictions = next(estimator.predict(input_fn=input_fn))
self.assertIn('id', predictions)
self.assertIn('x', predictions)
self.assertEqual(101, predictions['id'])
self.assertEqual(3., predictions['x'])
def test_forward_all(self):
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}, [[1.], [2.]]
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
self.assertNotIn('id', next(estimator.predict(input_fn=input_fn)))
self.assertNotIn('x', next(estimator.predict(input_fn=input_fn)))
estimator = extenders.forward_features(estimator)
predictions = next(estimator.predict(input_fn=input_fn))
self.assertIn('id', predictions)
self.assertIn('x', predictions)
self.assertEqual(101, predictions['id'])
self.assertEqual(3., predictions['x'])
def test_key_should_be_string(self):
estimator = linear.LinearRegressor([fc.numeric_column('x')])
with self.assertRaisesRegexp(TypeError, 'keys should be either a string'):
extenders.forward_features(estimator, estimator)
def test_key_should_be_list_of_string(self):
estimator = linear.LinearRegressor([fc.numeric_column('x')])
with self.assertRaisesRegexp(TypeError, 'should be a string'):
extenders.forward_features(estimator, ['x', estimator])
def test_key_should_be_in_features(self):
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}, [[1.], [2.]]
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
estimator = extenders.forward_features(estimator, 'y')
with self.assertRaisesRegexp(ValueError,
'keys should be exist in features'):
next(estimator.predict(input_fn=input_fn))
def test_forwarded_feature_should_not_be_a_sparse_tensor(self):
def input_fn():
return {
'x': [[3.], [5.]],
'id': sparse_tensor.SparseTensor(
values=['1', '2'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1])
}, [[1.], [2.]]
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
estimator = extenders.forward_features(estimator)
with self.assertRaisesRegexp(ValueError,
'Feature .* should be a Tensor.*'):
next(estimator.predict(input_fn=input_fn))
def test_forwarded_feature_should_be_a_sparse_tensor(self):
input_fn = self.make_dummy_input_fn()
estimator = linear.LinearRegressor([fc.numeric_column('x')])
estimator.train(input_fn=input_fn, steps=1)
estimator = extenders.forward_features(
estimator, sparse_default_values={'id': 0, 'sparse_id': 0})
with self.assertRaisesRegexp(
ValueError, 'Feature .* is expected to be a `SparseTensor`.'):
next(estimator.predict(input_fn=input_fn))
def test_predictions_should_be_dict(self):
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}
def model_fn(features, mode):
del features
global_step = training.get_global_step()
return estimator_lib.EstimatorSpec(
mode,
loss=constant_op.constant([5.]),
predictions=constant_op.constant([5.]),
train_op=global_step.assign_add(1))
estimator = estimator_lib.Estimator(model_fn=model_fn)
estimator.train(input_fn=input_fn, steps=1)
estimator = extenders.forward_features(estimator)
with self.assertRaisesRegexp(ValueError, 'Predictions should be a dict'):
next(estimator.predict(input_fn=input_fn))
def test_should_not_conflict_with_existing_predictions(self):
def input_fn():
return {'x': [[3.], [5.]], 'id': [[101], [102]]}
def model_fn(features, mode):
del features
global_step = training.get_global_step()
return estimator_lib.EstimatorSpec(
mode,
loss=constant_op.constant([5.]),
predictions={'x': constant_op.constant([5.])},
train_op=global_step.assign_add(1))
estimator = estimator_lib.Estimator(model_fn=model_fn)
estimator.train(input_fn=input_fn, steps=1)
estimator = extenders.forward_features(estimator)
with self.assertRaisesRegexp(ValueError, 'Cannot forward feature key'):
next(estimator.predict(input_fn=input_fn))
if __name__ == '__main__':
test.main()
| [
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.variables.Variable",
"numpy.arange",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow_estimator.python.estimator.estimator_lib.Estimator",
"tensorflow.python.framework.ops.Graph",
"tensorflow_estimator.python.estimator.canne... | [((15369, 15380), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (15378, 15380), False, 'from tensorflow.python.platform import test\n'), ((1890, 1946), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices', 'dataset_ops.Dataset.from_tensor_slices', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (1928, 1946), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((2446, 2489), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.add_metrics', 'extenders.add_metrics', (['estimator', 'metric_fn'], {}), '(estimator, metric_fn)\n', (2467, 2489), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((3535, 3578), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.add_metrics', 'extenders.add_metrics', (['estimator', 'metric_fn'], {}), '(estimator, metric_fn)\n', (3556, 3578), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((4109, 4152), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.add_metrics', 'extenders.add_metrics', (['estimator', 'metric_fn'], {}), '(estimator, metric_fn)\n', (4130, 4152), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((4499, 4542), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.add_metrics', 'extenders.add_metrics', (['estimator', 'metric_fn'], {}), '(estimator, metric_fn)\n', (4520, 4542), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((5080, 5123), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.add_metrics', 'extenders.add_metrics', (['estimator', 'metric_fn'], {}), '(estimator, metric_fn)\n', (5101, 5123), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((6309, 6327), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6325, 6327), False, 'import tempfile\n'), ((7463, 7561), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator', 'forwarded_keys'], {'sparse_default_values': "{'sparse_id': 1}"}), "(estimator, forwarded_keys, sparse_default_values\n ={'sparse_id': 1})\n", (7489, 7561), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((8464, 8503), 'tensorflow_estimator.python.estimator.canned.linear.LinearRegressor', 'linear.LinearRegressor', (['feature_columns'], {}), '(feature_columns)\n', (8486, 8503), False, 'from tensorflow_estimator.python.estimator.canned import linear\n'), ((8568, 8611), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator', '"""id"""'], {}), "(estimator, 'id')\n", (8594, 8611), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((8753, 8810), 'tensorflow.contrib.predictor.from_saved_model', 'from_saved_model', (['export_dir'], {'signature_def_key': '"""predict"""'}), "(export_dir, signature_def_key='predict')\n", (8769, 8810), False, 'from tensorflow.contrib.predictor import from_saved_model\n'), ((9014, 9045), 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['tmpdir'], {}), '(tmpdir)\n', (9037, 9045), False, 'from tensorflow.python.platform import gfile\n'), ((9224, 9281), 'tensorflow_estimator.python.estimator.canned.linear.LinearClassifier', 'linear.LinearClassifier', ([], {'feature_columns': 'features_columns'}), '(feature_columns=features_columns)\n', (9247, 9281), False, 'from tensorflow_estimator.python.estimator.canned import linear\n'), ((9763, 9850), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['classifier'], {'keys': "['x']", 'sparse_default_values': "{'x': 0}"}), "(classifier, keys=['x'], sparse_default_values={\n 'x': 0})\n", (9789, 9850), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((10310, 10367), 'tensorflow.contrib.predictor.from_saved_model', 'from_saved_model', (['export_dir'], {'signature_def_key': '"""predict"""'}), "(export_dir, signature_def_key='predict')\n", (10326, 10367), False, 'from tensorflow.contrib.predictor import from_saved_model\n'), ((10534, 10565), 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['tmpdir'], {}), '(tmpdir)\n', (10557, 10565), False, 'from tensorflow.python.platform import gfile\n'), ((10890, 10940), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator', "['x', 'id']"], {}), "(estimator, ['x', 'id'])\n", (10916, 10940), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((11556, 11593), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator'], {}), '(estimator)\n', (11582, 11593), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((12562, 12604), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator', '"""y"""'], {}), "(estimator, 'y')\n", (12588, 12604), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((13208, 13245), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator'], {}), '(estimator)\n', (13234, 13245), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((13645, 13735), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator'], {'sparse_default_values': "{'id': 0, 'sparse_id': 0}"}), "(estimator, sparse_default_values={'id': 0,\n 'sparse_id': 0})\n", (13671, 13735), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((14331, 14373), 'tensorflow_estimator.python.estimator.estimator_lib.Estimator', 'estimator_lib.Estimator', ([], {'model_fn': 'model_fn'}), '(model_fn=model_fn)\n', (14354, 14373), False, 'from tensorflow_estimator.python.estimator import estimator_lib\n'), ((14439, 14476), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator'], {}), '(estimator)\n', (14465, 14476), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((15067, 15109), 'tensorflow_estimator.python.estimator.estimator_lib.Estimator', 'estimator_lib.Estimator', ([], {'model_fn': 'model_fn'}), '(model_fn=model_fn)\n', (15090, 15109), False, 'from tensorflow_estimator.python.estimator import estimator_lib\n'), ((15175, 15212), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator'], {}), '(estimator)\n', (15201, 15212), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((3062, 3105), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.add_metrics', 'extenders.add_metrics', (['estimator', 'metric_fn'], {}), '(estimator, metric_fn)\n', (3083, 3105), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((5393, 5431), 'tensorflow.python.training.training.GradientDescentOptimizer', 'training.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (5426, 5431), False, 'from tensorflow.python.training import training\n'), ((5492, 5530), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(1.0)'], {'name': '"""weight"""'}), "(1.0, name='weight')\n", (5510, 5530), False, 'from tensorflow.python.ops import variables\n'), ((5540, 5565), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(5.0)'], {}), '(5.0)\n', (5560, 5565), False, 'from tensorflow.python.framework import constant_op\n'), ((6033, 6071), 'tensorflow.python.training.training.GradientDescentOptimizer', 'training.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (6066, 6071), False, 'from tensorflow.python.training import training\n'), ((6372, 6395), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['tmpdir'], {}), '(tmpdir)\n', (6387, 6395), False, 'from tensorflow.python.util import compat\n'), ((6397, 6422), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""export"""'], {}), "('export')\n", (6412, 6422), False, 'from tensorflow.python.util import compat\n'), ((6524, 6548), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['export_dir'], {}), '(export_dir)\n', (6536, 6548), False, 'from tensorflow.python.platform import gfile\n'), ((8224, 8288), 'tensorflow_estimator.python.estimator.estimator_lib.export.ServingInputReceiver', 'estimator_lib.export.ServingInputReceiver', (['features', 'features_ph'], {}), '(features, features_ph)\n', (8265, 8288), False, 'from tensorflow_estimator.python.estimator import estimator_lib\n'), ((8424, 8446), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (8441, 8446), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((9904, 9969), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.int32', 'name': '"""x"""', 'shape': '[None]'}), "(dtype=dtypes.int32, name='x', shape=[None])\n", (9925, 9969), False, 'from tensorflow.python.ops import array_ops\n'), ((10085, 10156), 'tensorflow_estimator.python.estimator.estimator_lib.export.ServingInputReceiver', 'estimator_lib.export.ServingInputReceiver', (['features', "{'x': features_ph}"], {}), "(features, {'x': features_ph})\n", (10126, 10156), False, 'from tensorflow_estimator.python.estimator import estimator_lib\n'), ((12006, 12054), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator', 'estimator'], {}), '(estimator, estimator)\n', (12032, 12054), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((12241, 12296), 'tensorflow_estimator.contrib.estimator.python.estimator.extenders.forward_features', 'extenders.forward_features', (['estimator', "['x', estimator]"], {}), "(estimator, ['x', estimator])\n", (12267, 12296), False, 'from tensorflow_estimator.contrib.estimator.python.estimator import extenders\n'), ((14090, 14116), 'tensorflow.python.training.training.get_global_step', 'training.get_global_step', ([], {}), '()\n', (14114, 14116), False, 'from tensorflow.python.training import training\n'), ((14819, 14845), 'tensorflow.python.training.training.get_global_step', 'training.get_global_step', ([], {}), '()\n', (14843, 14845), False, 'from tensorflow.python.training import training\n'), ((2317, 2339), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (2334, 2339), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((2396, 2427), 'tensorflow.python.ops.metrics.mean', 'metrics_lib.mean', (["features['x']"], {}), "(features['x'])\n", (2412, 2427), True, 'from tensorflow.python.ops import metrics as metrics_lib\n'), ((2854, 2876), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (2871, 2876), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((3236, 3258), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (3253, 3258), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((3810, 3832), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (3827, 3832), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((4368, 4390), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (4385, 4390), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((4814, 4836), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (4831, 4836), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((5706, 5733), 'tensorflow.python.training.training.MonitoredSession', 'training.MonitoredSession', ([], {}), '()\n', (5731, 5733), False, 'from tensorflow.python.training import training\n'), ((7227, 7249), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (7244, 7249), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((7971, 8016), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32', '[None]'], {}), '(dtypes.float32, [None])\n', (7992, 8016), False, 'from tensorflow.python.ops import array_ops\n'), ((8034, 8077), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int32', '[None]'], {}), '(dtypes.int32, [None])\n', (8055, 8077), False, 'from tensorflow.python.ops import array_ops\n'), ((8120, 8153), 'tensorflow.python.ops.array_ops.expand_dims', 'array_ops.expand_dims', (['tensor', '(-1)'], {}), '(tensor, -1)\n', (8141, 8153), False, 'from tensorflow.python.ops import array_ops\n'), ((10035, 10070), 'tensorflow.contrib.layers.python.layers.layers.dense_to_sparse', 'layers.dense_to_sparse', (['features_ph'], {}), '(features_ph)\n', (10057, 10070), False, 'from tensorflow.contrib.layers.python.layers import layers\n'), ((10729, 10751), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (10746, 10751), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((11325, 11347), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (11342, 11347), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((11896, 11918), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (11913, 11918), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((12143, 12165), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (12160, 12165), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((12472, 12494), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (12489, 12494), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((13118, 13140), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (13135, 13140), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((13555, 13577), 'tensorflow.python.feature_column.feature_column.numeric_column', 'fc.numeric_column', (['"""x"""'], {}), "('x')\n", (13572, 13577), True, 'from tensorflow.python.feature_column import feature_column as fc\n'), ((2224, 2236), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2233, 2236), True, 'import numpy as np\n'), ((2255, 2265), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2262, 2265), True, 'import numpy as np\n'), ((4453, 4480), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[2.0]'], {}), '([2.0])\n', (4473, 4480), False, 'from tensorflow.python.framework import constant_op\n'), ((5034, 5061), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[2.0]'], {}), '([2.0])\n', (5054, 5061), False, 'from tensorflow.python.framework import constant_op\n'), ((5456, 5467), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (5465, 5467), False, 'from tensorflow.python.framework import ops\n'), ((6770, 6873), 'tensorflow.python.framework.sparse_tensor.SparseTensor', 'sparse_tensor.SparseTensor', ([], {'values': '[1, 2, 3]', 'indices': '[[0, 0], [1, 0], [1, 1]]', 'dense_shape': '[2, 2]'}), '(values=[1, 2, 3], indices=[[0, 0], [1, 0], [1, 1\n ]], dense_shape=[2, 2])\n', (6796, 6873), False, 'from tensorflow.python.framework import sparse_tensor\n'), ((9375, 9478), 'tensorflow.python.framework.sparse_tensor.SparseTensor', 'sparse_tensor.SparseTensor', ([], {'values': '[1, 2, 3]', 'indices': '[[0, 0], [1, 0], [1, 1]]', 'dense_shape': '[2, 2]'}), '(values=[1, 2, 3], indices=[[0, 0], [1, 0], [1, 1\n ]], dense_shape=[2, 2])\n', (9401, 9478), False, 'from tensorflow.python.framework import sparse_tensor\n'), ((12916, 13011), 'tensorflow.python.framework.sparse_tensor.SparseTensor', 'sparse_tensor.SparseTensor', ([], {'values': "['1', '2']", 'indices': '[[0, 0], [1, 0]]', 'dense_shape': '[2, 1]'}), "(values=['1', '2'], indices=[[0, 0], [1, 0]],\n dense_shape=[2, 1])\n", (12942, 13011), False, 'from tensorflow.python.framework import sparse_tensor\n'), ((14190, 14217), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[5.0]'], {}), '([5.0])\n', (14210, 14217), False, 'from tensorflow.python.framework import constant_op\n'), ((14240, 14267), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[5.0]'], {}), '([5.0])\n', (14260, 14267), False, 'from tensorflow.python.framework import constant_op\n'), ((14919, 14946), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[5.0]'], {}), '([5.0])\n', (14939, 14946), False, 'from tensorflow.python.framework import constant_op\n'), ((14975, 15002), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[5.0]'], {}), '([5.0])\n', (14995, 15002), False, 'from tensorflow.python.framework import constant_op\n')] |
from .base import Base
import numpy as np
import tensorflow as tf
import time
class MarginDistribution(Base):
def __init__(self, n_batches, batch_size, is_demogen, dist_norm=2, epsilon=1e-6, till_layer=4, to_logspace=True):
name = "Margin Distribution"
self.dist_norm = dist_norm
self.epsilon = epsilon
self.till_layer = till_layer # Recommended 4 as in the paper considering only first 4 layers input + 3 hidden layers set to -1 to compute on all layers
self.batch_size = batch_size
self.n_batches = n_batches # -1 to use all data
self.to_logspace = to_logspace # to move features to logspace
feature_names = [
stat_name + "_" + str(layer)
for layer in range(till_layer)
for stat_name in [
"stats_5",
"stats_25",
"stats_50",
"stats_75",
"stats_95",
"upper_fence",
"lower_fence",
]
]
super(MarginDistribution, self).__init__(name, feature_names, is_demogen)
def extract_features(self, model, dataset):
@tf.function()
def compute_margin(inputs, labels):
layer_activations_dict = {}
x = inputs
for i, l in enumerate(model.layers):
x = l(x)
layer_activations_dict[i] = x
del x
with tf.GradientTape(persistent=True) as tape:
logits = model(inputs, tape=tape)
num_classes = logits.get_shape().as_list()[1]
batch_size = tf.shape(logits)[0]
bs_lin = tf.range(0, batch_size)
indices_true = tf.stop_gradient(tf.transpose(tf.stack([bs_lin, labels])))
values_true = tf.gather_nd(logits, indices_true)
values, indices = tf.nn.top_k(logits, k=2)
# indicator if the highest class matches the ground truth
true_match_float = tf.cast(
tf.equal(indices[:, 0], labels), dtype=tf.float32
)
# if zero match the true class then we take the next class, otherwise we use
# the highest class
values_c = values[:, 1] * true_match_float + values[:, 0] * (
1 - true_match_float
)
true_match = tf.cast(true_match_float, dtype=tf.int32)
indices_c = indices[:, 1] * true_match + indices[:, 0] * (1 - true_match)
grad_ys = tf.one_hot(labels, num_classes)
grad_ys -= tf.one_hot(indices_c, num_classes)
# numerator of the distance
# TODO use only positive value not misclassified data points
# For margin distribution, we only consider distances with
# positive sign (we ignore all misclassified training points). Such design choice facilitates our empirical
# analysis when we transform our features
numerator = values_true - values_c
dct = {}
layer_activations = []
for i, l in enumerate(model.layers):
if self.till_layer != -1 and len(dct) == self.till_layer:
break
try:
layer_dims = l._last_seen_input.shape.rank
gradient = tape.gradient(logits, l._last_seen_input, grad_ys)
if self.dist_norm == 0: # l infinity
g_norm = self.epsilon + tf.reduce_max(
tf.abs(gradient), axis=np.arange(1, layer_dims)
)
elif self.dist_norm == 1:
g_norm = self.epsilon + tf.reduce_sum(
tf.abs(gradient), axis=np.arange(1, layer_dims)
)
elif self.dist_norm == 2:
g_norm = tf.sqrt(
self.epsilon
+ tf.reduce_sum(
gradient * gradient, axis=np.arange(1, layer_dims)
)
)
else:
raise ValueError("only norms supported are 1, 2, and infinity")
dct[i] = numerator / g_norm
layer_activations.append(
tf.reshape(layer_activations_dict[i], (batch_size, -1))
)
except AttributeError: # no _last_seen_input, layer not wrapped (ex: flatten)
dct[i] = None
return dct, layer_activations
start_time = time.time()
data_batches = dataset.batch(self.batch_size, drop_remainder=True)
per_layer_dict = {}
for i, data in enumerate(data_batches):
x, y = self.get_input_target(data)
margin_dist, layer_activations = compute_margin(x, y)
all_activations = np.concatenate(
[np.squeeze(activation.numpy()) for activation in layer_activations],
axis=1,
)
response_flat = all_activations.reshape([all_activations.shape[0], -1])
response_std = np.std(response_flat, axis=0)
total_variation = (np.sum(response_std ** 2)) ** 0.5
# make the margin dist scale invariant by dividing on to total variation
layers_norm = [v / total_variation for v in margin_dist.values()]
for layer_indx, layer in enumerate(layers_norm):
if layer_indx not in per_layer_dict:
per_layer_dict[layer_indx] = []
per_layer_dict[layer_indx].append(layer)
if i == self.n_batches:
break
# computing statistical signature
stats_signature = []
for layer in per_layer_dict:
layer_norm_margin = np.concatenate(per_layer_dict[layer], axis=0)
quartiles = np.percentile(layer_norm_margin, [5, 25, 50, 75, 95])
inter_quartile = quartiles[-2] - quartiles[1] # Q3 - Q1
upper_fence = quartiles[-2] + 1.5 * inter_quartile
lower_fence = quartiles[1] - 1.5 * inter_quartile
signature = np.append(quartiles, [upper_fence, lower_fence])
stats_signature.append(signature)
stats_signature = np.concatenate(stats_signature)
if self.to_logspace:
stats_signature = np.log(np.abs(stats_signature))
self.last_runtime = time.time() - start_time
return stats_signature
| [
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.GradientTape",
"tensorflow.cast",
"numpy.arange",
"numpy.concatenate",
"tensorflow.stack",
"tensorflow.one_hot",
"numpy.abs",
"tensorflow.range",
"tensorflow.function",
"tensorflow.reshape",
"numpy.std",
"time.time",
"tensorflow.nn.top_... | [((1165, 1178), 'tensorflow.function', 'tf.function', ([], {}), '()\n', (1176, 1178), True, 'import tensorflow as tf\n'), ((4613, 4624), 'time.time', 'time.time', ([], {}), '()\n', (4622, 4624), False, 'import time\n'), ((6314, 6345), 'numpy.concatenate', 'np.concatenate', (['stats_signature'], {}), '(stats_signature)\n', (6328, 6345), True, 'import numpy as np\n'), ((1657, 1680), 'tensorflow.range', 'tf.range', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (1665, 1680), True, 'import tensorflow as tf\n'), ((1793, 1827), 'tensorflow.gather_nd', 'tf.gather_nd', (['logits', 'indices_true'], {}), '(logits, indices_true)\n', (1805, 1827), True, 'import tensorflow as tf\n'), ((1858, 1882), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['logits'], {'k': '(2)'}), '(logits, k=2)\n', (1869, 1882), True, 'import tensorflow as tf\n'), ((2344, 2385), 'tensorflow.cast', 'tf.cast', (['true_match_float'], {'dtype': 'tf.int32'}), '(true_match_float, dtype=tf.int32)\n', (2351, 2385), True, 'import tensorflow as tf\n'), ((2494, 2525), 'tensorflow.one_hot', 'tf.one_hot', (['labels', 'num_classes'], {}), '(labels, num_classes)\n', (2504, 2525), True, 'import tensorflow as tf\n'), ((2549, 2583), 'tensorflow.one_hot', 'tf.one_hot', (['indices_c', 'num_classes'], {}), '(indices_c, num_classes)\n', (2559, 2583), True, 'import tensorflow as tf\n'), ((5172, 5201), 'numpy.std', 'np.std', (['response_flat'], {'axis': '(0)'}), '(response_flat, axis=0)\n', (5178, 5201), True, 'import numpy as np\n'), ((5851, 5896), 'numpy.concatenate', 'np.concatenate', (['per_layer_dict[layer]'], {'axis': '(0)'}), '(per_layer_dict[layer], axis=0)\n', (5865, 5896), True, 'import numpy as np\n'), ((5921, 5974), 'numpy.percentile', 'np.percentile', (['layer_norm_margin', '[5, 25, 50, 75, 95]'], {}), '(layer_norm_margin, [5, 25, 50, 75, 95])\n', (5934, 5974), True, 'import numpy as np\n'), ((6193, 6241), 'numpy.append', 'np.append', (['quartiles', '[upper_fence, lower_fence]'], {}), '(quartiles, [upper_fence, lower_fence])\n', (6202, 6241), True, 'import numpy as np\n'), ((6465, 6476), 'time.time', 'time.time', ([], {}), '()\n', (6474, 6476), False, 'import time\n'), ((1441, 1473), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (1456, 1473), True, 'import tensorflow as tf\n'), ((1616, 1632), 'tensorflow.shape', 'tf.shape', (['logits'], {}), '(logits)\n', (1624, 1632), True, 'import tensorflow as tf\n'), ((2009, 2040), 'tensorflow.equal', 'tf.equal', (['indices[:, 0]', 'labels'], {}), '(indices[:, 0], labels)\n', (2017, 2040), True, 'import tensorflow as tf\n'), ((5233, 5258), 'numpy.sum', 'np.sum', (['(response_std ** 2)'], {}), '(response_std ** 2)\n', (5239, 5258), True, 'import numpy as np\n'), ((6412, 6435), 'numpy.abs', 'np.abs', (['stats_signature'], {}), '(stats_signature)\n', (6418, 6435), True, 'import numpy as np\n'), ((1738, 1764), 'tensorflow.stack', 'tf.stack', (['[bs_lin, labels]'], {}), '([bs_lin, labels])\n', (1746, 1764), True, 'import tensorflow as tf\n'), ((4342, 4397), 'tensorflow.reshape', 'tf.reshape', (['layer_activations_dict[i]', '(batch_size, -1)'], {}), '(layer_activations_dict[i], (batch_size, -1))\n', (4352, 4397), True, 'import tensorflow as tf\n'), ((3511, 3527), 'tensorflow.abs', 'tf.abs', (['gradient'], {}), '(gradient)\n', (3517, 3527), True, 'import tensorflow as tf\n'), ((3534, 3558), 'numpy.arange', 'np.arange', (['(1)', 'layer_dims'], {}), '(1, layer_dims)\n', (3543, 3558), True, 'import numpy as np\n'), ((3722, 3738), 'tensorflow.abs', 'tf.abs', (['gradient'], {}), '(gradient)\n', (3728, 3738), True, 'import tensorflow as tf\n'), ((3745, 3769), 'numpy.arange', 'np.arange', (['(1)', 'layer_dims'], {}), '(1, layer_dims)\n', (3754, 3769), True, 'import numpy as np\n'), ((4028, 4052), 'numpy.arange', 'np.arange', (['(1)', 'layer_dims'], {}), '(1, layer_dims)\n', (4037, 4052), True, 'import numpy as np\n')] |
import sys
import numpy
import PIL
from PIL import Image, ImageDraw
# chars from the darkest to the lightest
chars = ["\"", "`", "^", "\\", ":", ";", "I", "l", "!", "i", "~", "+", "_", "-", "?", "]", "[", "}", "{", "1", ")", "(", "|", "/", "t", "f", "j", "r", "x", "n", "u", "v", "c", "z", "X", "Y", "U", "J", "C", "L", "Q", "0", "O", "Z", "m", "w", "q", "p", "d", "b", "k", "h", "a", "o", "*", "#", "M", "W", "&", "8", "%", "B", "@", "$"]
maxsize = (150, 150) # downscale to use
success = True
print("Starting...")
try:
img_path = sys.argv[1] # image path from argument
except IndexError:
print("Image not given.\n")
print("Usage: \"python main.py path/to/img\"")
success = False
if success:
try:
img = Image.open(img_path).convert('LA') # open image and convert to black and white
except PIL.UnidentifiedImageError:
print("Error reading image. Probably given file is not an image.")
success = False
except FileNotFoundError:
print("Image not found.")
success = False
if success:
img.thumbnail(maxsize, Image.ANTIALIAS) # downscale image
img.show()
img_array = numpy.asarray(img)
ascii_art = ""
print("Converting to ASCII art...")
# go through each pixel to find the char correspondent to the gray scale
for row in img_array:
for pixel in row:
correspondent_char = chars[int(pixel[0] / (255 / len(chars))) - 1]
ascii_art += correspondent_char + " "
ascii_art += "\n"
print("Saving result...")
# create image with the needed resolution
img_out = Image.new('RGB', (6 * len(ascii_art.split("\n")[0]), 14 * len(ascii_art.split("\n"))), color = (0, 0, 0))
# draw text in image and save file
d = ImageDraw.Draw(img_out)
d.text((0, 0), ascii_art, fill = (255, 255, 255))
img_out.save("../data/out.png")
img_out.show()
# write text to txt file
f = open("../data/out.txt", "w")
f.write(ascii_art)
f.close()
print("Done")
| [
"PIL.ImageDraw.Draw",
"PIL.Image.open",
"numpy.asarray"
] | [((1171, 1189), 'numpy.asarray', 'numpy.asarray', (['img'], {}), '(img)\n', (1184, 1189), False, 'import numpy\n'), ((1850, 1873), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_out'], {}), '(img_out)\n', (1864, 1873), False, 'from PIL import Image, ImageDraw\n'), ((739, 759), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (749, 759), False, 'from PIL import Image, ImageDraw\n')] |
from copy import deepcopy
import time
import numpy as np
from typing import Dict, Any, Tuple, List
from flatland.core.grid.grid4_utils import get_new_position
from flatland.envs.rail_env import RailEnv, EnvAgent, Grid4TransitionsEnum, RailAgentStatus
from flatland.utils.rendertools import AgentRenderVariant, RenderTool
from configs import configurator as Configs
from utils.action import HighLevelAction
from utils.obs_node import Node
###
class RailEnvWrapper:
def __init__(self, observator, rail_generator, schedule_generator, malfunction_generator):
self._info = None
self._done = None
self._observator = observator
self._rail_generator = rail_generator
self._schedule_generator = schedule_generator
self._malfunction_generator = malfunction_generator
self._rail_env = RailEnv(
width=Configs.RAIL_ENV_MAP_WIDTH,
height=Configs.RAIL_ENV_MAP_HEIGHT,
rail_generator=self._rail_generator,
schedule_generator=self._schedule_generator,
number_of_agents=Configs.N_AGENTS,
obs_builder_object=self._observator,
# malfunction_generator_and_process_data=None,
malfunction_generator=self._malfunction_generator,
remove_agents_at_target=Configs.RAIL_ENV_REMOVE_AGENTS_AT_TARGET,
# record_steps=False,
# close_following=True
)
if Configs.EMULATOR_ACTIVE is True:
self._emulator = RenderTool(
self._rail_env,
show_debug=Configs.DEBUG,
screen_width=Configs.EMULATOR_WINDOW_WIDTH,
screen_height=Configs.EMULATOR_WINDOW_HEIGHT,
agent_render_variant=AgentRenderVariant.AGENT_SHOWS_OPTIONS_AND_BOX,
)
###
def is_episode_finished(self) -> bool:
return dict is not None and isinstance(self._done, dict) and self._done['__all__'] is True
def get_info(self) -> dict:
return self._info
def get_done(self) -> Dict[Any, bool]:
return self._done
###
@property
def n_agents(self) -> int:
return Configs.N_AGENTS
def get_grid(self) -> np.ndarray:
return self._rail_env.rail.grid
def get_agent(self, agent_index: int) -> EnvAgent:
return self._rail_env.agents[agent_index]
def get_agent_position(self, agent: EnvAgent) -> Tuple[int, int]:
"""
maybe not so easy:
- if agent.status == READY_TO_DEPART the agent is already asking for observations
and answering with some decisions, but its position in still None
==> in this case it's maybe better to return agent.initial_position
- we have 2 cases when the agent.position==None (agent.status==READY_TO_DEPART &
& agent.status==DONE_REMOVED), maybe we want to distinguish those
(remember also to not use agent.position during observations (agent.old_position becomes the correct one))
"""
if agent.status == RailAgentStatus.READY_TO_DEPART:
return agent.initial_position
elif agent.status == RailAgentStatus.DONE_REMOVED:
return None # TODO: reason about this ...
else:
return agent.position
def get_agent_direction(self, agent: EnvAgent) -> Grid4TransitionsEnum:
if agent.status == RailAgentStatus.READY_TO_DEPART:
return agent.initial_direction
elif agent.status == RailAgentStatus.DONE_REMOVED:
return None # TODO: reason about this ...
else:
return agent.direction
def get_agent_transitions(self, agent: EnvAgent) -> Tuple[bool]:
position = self.get_agent_position(agent)
direction = self.get_agent_direction(agent)
if position is None or direction is None:
return [False, False, False, False]
### this considers also the agent direction
transitions = self._rail_env.rail.get_transitions(*position, direction)
return tuple([x == 1 for x in list(transitions)])
###
def reset(self):
if Configs.EMULATOR_ACTIVE is True:
self._emulator.reset()
observations, self._info = self._rail_env.reset()
self._rail_env._max_episode_steps = None
self._info['action_required2'] = {
agent_id: self.action_required(agent_id)
for agent_id in range(self._rail_env.get_num_agents())
}
obs = {
agent_id: observations.get(agent_id).get_subtree_array()
for agent_id in observations
}
return obs
def step(self, high_actions: Dict[int, int]) -> Tuple[Dict[int, Node], Dict[int, float]]:
low_actions = self.processor_action(high_actions)
observations, rewards, done, info = self._rail_env.step(low_actions)
self._done = deepcopy(done)
observations, rewards, self._info = self.processor_step(observations, info)
if Configs.EMULATOR_ACTIVE is True:
self._emulator.render_env(show=True, show_observations=True, show_predictions=False)
time.sleep(Configs.EMULATOR_STEP_TIMEBREAK_SECONDS)
return observations, rewards, self._done, self._info
def processor_step(self, obs, info, attr_list=[]):
rewards = {}
for agent_id in range(len(obs)):
obs_node = obs.get(agent_id)
TARGET_MASS = 1000
if obs_node is None:
rewards[agent_id] = TARGET_MASS * 2
continue
########################## OBSERVATION PREPARATION ##########################
# attr_list is supposed to be a list of str (attribute names)
# only the first node is supposed to have only one child
if not obs_node.left_child:
assert obs_node.right_child is not None
first_val = [1]
subtree_list = [obs_node.right_child.get_attribute_dict(attr_list)]
last = [obs_node.right_child]
else:
first_val = [0]
subtree_list = [obs_node.get_attribute_dict(attr_list)]
last = [obs_node]
############################ REWARD PREPARATION ############################
reward = 0
AGENT_MASS = 1
MAX_REWARD = 2 * TARGET_MASS
agent = self.get_agent(agent_id)
if agent.status == RailAgentStatus.DONE:
reward += TARGET_MASS * 2
p = (obs_node.pos_x, obs_node.pos_y)
t = agent.target
if last[0].dist_min_to_target == 0:
attractive_force = TARGET_MASS * 2
else:
attractive_force = TARGET_MASS / (
last[0].dist_min_to_target * last[0].dist_min_to_target
)
repulsive_force = 0
unusuable_stiches = [0, 0]
"""
# compute probability of conflict
n_opp_l = last[0].left_child.num_agents_opposite_direction
c_l = sum([math.comb(n_opp_l, k+1) for k in range(n_opp_l)])
prob_conflict_l = (c_l / (c_l + 1))**last[0].left_child.unusable_switch
n_opp_r = last[0].right_child.num_agents_opposite_direction
c_r = sum([math.comb(n_opp_r, k+1) for k in range(n_opp_r)])
prob_conflict_r = (c_r / (c_r + 1))**last[0].right_child.unusable_switch
prob_conflict = (prob_conflict_l + prob_conflict_r) / 2
reward -= prob_conflict
"""
############################# NODE EXPLORATION #############################
# if no attr_list is given, all numerical attributes are given
visited = []
prob = 1
while True:
# the loop is repeated for each depth of tree, starting from 0
prob /= 2
for i in range(len(last)):
node = last[i]
child_list = [
child for child in node.get_childs() if child
] # get_childs() returns forward and turn child even if they are None
# observation process
l = [child.get_attribute_dict(attr_list) for child in child_list]
# l = [attr for child in child_list for attr in child.get_attribute_list(attr_list)]
subtree_list += l
# reward compute
# update attractive force
if node.dist_min_to_target == 0:
attractive_force += TARGET_MASS * 2 * prob
# update repulsive force
agent_dist = unusuable_stiches[i] + node.dist_unusable_switch
if agent_dist == 0:
repulsive_force = AGENT_MASS * 100
else:
tot_mass = node.num_agents_opposite_direction * AGENT_MASS
repulsive_force += tot_mass / (agent_dist * agent_dist)
# update agent "distances"
unusuable_stiches[i] += node.tot_unusable_switch
visited += child_list
if not visited:
break
last = visited
visited = []
unusuable_stiches = [
unusuable_stiches[i // 2] for i in range(len(unusuable_stiches) * 2)
]
#################################### CONCLUSIVE OBSERVATION TRANSFORMATION / NORMALIZATION
# transforming into array
# subtree_array = np.array(subtree_list)
# removing inf
# subtree_array[subtree_array == -np.inf] = 0
# subtree_array[subtree_array == np.inf] = 0
########################### NORMALIZATION
node_list = []
test_count = 0
assert len(subtree_list) == Configs.OBS_TREE_N_NODES
for node in subtree_list:
normalization_dict = self.get_normalization_dict(node)
assert len(node) == Node.get_n_of_features()
for attr in node:
test_count += 1
if node[attr] == np.inf:
node[attr] = normalization_dict[attr]
node_list.append(node[attr] / normalization_dict[attr])
assert len(node_list) == test_count
assert test_count <= (Node.get_n_of_features() * Configs.OBS_TREE_N_NODES)
assert len(node_list) == (Node.get_n_of_features() * Configs.OBS_TREE_N_NODES)
# node_list = first_val + node_list # INFO: @bug
# if len(node_list) != (Node.get_n_of_features() * Configs.OBS_TREE_N_NODES + 1):
if len(node_list) != self._observator.get_observations_len():
print(
'\nnumber of node features:', Node.get_n_of_features(),
'\nnumber of nodes per obs:', Configs.OBS_TREE_N_NODES, '\nobs len:',
len(node_list), '\nexpected len:',
# Node.get_n_of_features() * Configs.OBS_TREE_N_NODES + 1
Node.get_n_of_features() * Configs.OBS_TREE_N_NODES
)
assert len(node_list) == self._observator.get_observations_len()
obs[agent_id] = np.array(node_list)
node_list = []
#################################### CONCLUSIVE REWARD TRANSFORMATION / NORMALIZATION
if attractive_force > MAX_REWARD:
attractive_force = MAX_REWARD
if repulsive_force < - MAX_REWARD:
repulsive_force = - MAX_REWARD
reward += attractive_force - repulsive_force
reward /= MAX_REWARD
rewards[agent_id] = reward
assert reward <= 1 and reward >=-1
info['action_required2'] = {
agent_id: self.action_required(agent_id)
for agent_id in range(self._rail_env.get_num_agents())
}
return obs, rewards, info
def get_normalization_dict(self, node_dict):
branch_length = node_dict.get("dist_to_next_branch") or 1
max_n_agents = node_dict.get("num_agents_same_direction"
) + node_dict.get("num_agents_opposite_direction") or 1
normalization_dict = {
"dist_own_target_encountered": branch_length,
"dist_other_target_encountered": branch_length,
"dist_other_agent_encountered": branch_length,
"dist_potential_conflict": branch_length,
"dist_unusable_switch": node_dict.get("tot_unusable_switch") or 1,
"tot_unusable_switch": branch_length,
"dist_to_next_branch": Configs.RAIL_ENV_MAP_WIDTH + Configs.RAIL_ENV_MAP_HEIGHT,
"dist_min_to_target": Configs.RAIL_ENV_MAP_WIDTH + Configs.RAIL_ENV_MAP_HEIGHT,
"target_reached": 1,
"num_agents_same_direction": branch_length,
"num_agents_opposite_direction": branch_length,
"num_agents_malfunctioning": max_n_agents,
"speed_min_fractional": 1,
"num_agents_ready_to_depart": max_n_agents,
"pos_x": Configs.RAIL_ENV_MAP_WIDTH,
"pos_y": Configs.RAIL_ENV_MAP_HEIGHT,
}
return normalization_dict
def processor_action(self, high_actions):
low_actions = {}
for (agent_idx, high_action) in high_actions.items():
high_action = HighLevelAction(high_action)
agent = self.get_agent(agent_idx)
direction = self.get_agent_direction(agent)
transitions = self.get_agent_transitions(agent)
low_action = high_action.to_low_level(direction, transitions)
low_actions.update({agent_idx: low_action})
return low_actions
def action_required(self, idx_agent):
get_transitions = self._rail_env.rail.get_transitions
agent = self.get_agent(idx_agent)
if agent.status == RailAgentStatus.DONE:
return True
elif agent.status == RailAgentStatus.DONE_REMOVED:
return False
pos = self.get_agent_position(agent)
direction = self.get_agent_direction(agent)
t = get_transitions(*pos, direction)
# if more than one transition possible we are in switch
if np.count_nonzero(t) > 1:
return True
# if here, then we are in a straight cell
# check if next is a switch
direction = t.index(1)
pos = get_new_position(pos, direction)
t = get_transitions(*pos, direction)
# if more than one transition possible we are in switch
if np.count_nonzero(t) > 1:
return True
return False
| [
"utils.action.HighLevelAction",
"utils.obs_node.Node.get_n_of_features",
"time.sleep",
"numpy.count_nonzero",
"numpy.array",
"flatland.envs.rail_env.RailEnv",
"copy.deepcopy",
"flatland.utils.rendertools.RenderTool",
"flatland.core.grid.grid4_utils.get_new_position"
] | [((843, 1215), 'flatland.envs.rail_env.RailEnv', 'RailEnv', ([], {'width': 'Configs.RAIL_ENV_MAP_WIDTH', 'height': 'Configs.RAIL_ENV_MAP_HEIGHT', 'rail_generator': 'self._rail_generator', 'schedule_generator': 'self._schedule_generator', 'number_of_agents': 'Configs.N_AGENTS', 'obs_builder_object': 'self._observator', 'malfunction_generator': 'self._malfunction_generator', 'remove_agents_at_target': 'Configs.RAIL_ENV_REMOVE_AGENTS_AT_TARGET'}), '(width=Configs.RAIL_ENV_MAP_WIDTH, height=Configs.\n RAIL_ENV_MAP_HEIGHT, rail_generator=self._rail_generator,\n schedule_generator=self._schedule_generator, number_of_agents=Configs.\n N_AGENTS, obs_builder_object=self._observator, malfunction_generator=\n self._malfunction_generator, remove_agents_at_target=Configs.\n RAIL_ENV_REMOVE_AGENTS_AT_TARGET)\n', (850, 1215), False, 'from flatland.envs.rail_env import RailEnv, EnvAgent, Grid4TransitionsEnum, RailAgentStatus\n'), ((4900, 4914), 'copy.deepcopy', 'deepcopy', (['done'], {}), '(done)\n', (4908, 4914), False, 'from copy import deepcopy\n'), ((14681, 14713), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['pos', 'direction'], {}), '(pos, direction)\n', (14697, 14713), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((1501, 1721), 'flatland.utils.rendertools.RenderTool', 'RenderTool', (['self._rail_env'], {'show_debug': 'Configs.DEBUG', 'screen_width': 'Configs.EMULATOR_WINDOW_WIDTH', 'screen_height': 'Configs.EMULATOR_WINDOW_HEIGHT', 'agent_render_variant': 'AgentRenderVariant.AGENT_SHOWS_OPTIONS_AND_BOX'}), '(self._rail_env, show_debug=Configs.DEBUG, screen_width=Configs.\n EMULATOR_WINDOW_WIDTH, screen_height=Configs.EMULATOR_WINDOW_HEIGHT,\n agent_render_variant=AgentRenderVariant.AGENT_SHOWS_OPTIONS_AND_BOX)\n', (1511, 1721), False, 'from flatland.utils.rendertools import AgentRenderVariant, RenderTool\n'), ((5154, 5205), 'time.sleep', 'time.sleep', (['Configs.EMULATOR_STEP_TIMEBREAK_SECONDS'], {}), '(Configs.EMULATOR_STEP_TIMEBREAK_SECONDS)\n', (5164, 5205), False, 'import time\n'), ((11453, 11472), 'numpy.array', 'np.array', (['node_list'], {}), '(node_list)\n', (11461, 11472), True, 'import numpy as np\n'), ((13623, 13651), 'utils.action.HighLevelAction', 'HighLevelAction', (['high_action'], {}), '(high_action)\n', (13638, 13651), False, 'from utils.action import HighLevelAction\n'), ((14499, 14518), 'numpy.count_nonzero', 'np.count_nonzero', (['t'], {}), '(t)\n', (14515, 14518), True, 'import numpy as np\n'), ((14836, 14855), 'numpy.count_nonzero', 'np.count_nonzero', (['t'], {}), '(t)\n', (14852, 14855), True, 'import numpy as np\n'), ((10182, 10206), 'utils.obs_node.Node.get_n_of_features', 'Node.get_n_of_features', ([], {}), '()\n', (10204, 10206), False, 'from utils.obs_node import Node\n'), ((10651, 10675), 'utils.obs_node.Node.get_n_of_features', 'Node.get_n_of_features', ([], {}), '()\n', (10673, 10675), False, 'from utils.obs_node import Node\n'), ((11008, 11032), 'utils.obs_node.Node.get_n_of_features', 'Node.get_n_of_features', ([], {}), '()\n', (11030, 11032), False, 'from utils.obs_node import Node\n'), ((11277, 11301), 'utils.obs_node.Node.get_n_of_features', 'Node.get_n_of_features', ([], {}), '()\n', (11299, 11301), False, 'from utils.obs_node import Node\n'), ((10559, 10583), 'utils.obs_node.Node.get_n_of_features', 'Node.get_n_of_features', ([], {}), '()\n', (10581, 10583), False, 'from utils.obs_node import Node\n')] |
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(load_boston().data, load_boston().target, test_size=0.2, random_state=42)
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
X_train = std.fit_transform(X_train)
X_test = std.transform(X_test)
class LinearRegression:
"""
Parameters
-------------
learning_rate: float
The step length that will be used when updating weights.
n_iterations: float
The number of training iterations the algorithm will tune the weight for.
"""
def __init__(self,learning_rate=0.01,iterations=100):
self.learning_rate = learning_rate
self.iterations = iterations
self.weights = None
self.bias = None
def predict(self,X_test):
y_predicted = np.dot(X_test,self.weights) + self.bias #y = mx + b (Line Equation)
return y_predicted
def fit(self,X,y):
self.weights = np.ones(shape=(X.shape[1])) #Coefficients
self.bias = np.zeros(shape=(1)) #Intercept
N = len(X) #Total Values
for _ in range(1,self.iterations+1): #Iterating over each epoch
for i in range(N): #Iterating over each value
y_predicted = np.dot(X,self.weights) + self.bias
dw = -(2/N) * np.dot(X.T,(y - y_predicted))
db = -(2/N) * np.sum((y - y_predicted))
self.weights = self.weights - self.learning_rate * dw
self.bias = self.bias - self.learning_rate * db
loss = np.mean(np.square(y - y_predicted))
print("Epoch:{} Loss:{}".format(_,loss))
model = LinearRegression()
model.fit(X_train,y_train)
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse) | [
"numpy.ones",
"sklearn.datasets.load_boston",
"sklearn.metrics.mean_squared_error",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.square"
] | [((348, 364), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (362, 364), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2087, 2126), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2105, 2126), False, 'from sklearn.metrics import mean_squared_error\n'), ((215, 228), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (226, 228), False, 'from sklearn.datasets import load_boston\n'), ((235, 248), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (246, 248), False, 'from sklearn.datasets import load_boston\n'), ((1127, 1152), 'numpy.ones', 'np.ones', ([], {'shape': 'X.shape[1]'}), '(shape=X.shape[1])\n', (1134, 1152), True, 'import numpy as np\n'), ((1191, 1208), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1)'}), '(shape=1)\n', (1199, 1208), True, 'import numpy as np\n'), ((977, 1005), 'numpy.dot', 'np.dot', (['X_test', 'self.weights'], {}), '(X_test, self.weights)\n', (983, 1005), True, 'import numpy as np\n'), ((1502, 1525), 'numpy.dot', 'np.dot', (['X', 'self.weights'], {}), '(X, self.weights)\n', (1508, 1525), True, 'import numpy as np\n'), ((1586, 1614), 'numpy.dot', 'np.dot', (['X.T', '(y - y_predicted)'], {}), '(X.T, y - y_predicted)\n', (1592, 1614), True, 'import numpy as np\n'), ((1649, 1672), 'numpy.sum', 'np.sum', (['(y - y_predicted)'], {}), '(y - y_predicted)\n', (1655, 1672), True, 'import numpy as np\n'), ((1882, 1908), 'numpy.square', 'np.square', (['(y - y_predicted)'], {}), '(y - y_predicted)\n', (1891, 1908), True, 'import numpy as np\n')] |
"""
Laplacian matrix of graphs.
"""
# Copyright (C) 2004-2013 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import require, not_implemented_for
__author__ = "\n".join(['<NAME> <<EMAIL>>',
'<NAME> (<EMAIL>)',
'<NAME> (<EMAIL>)',
'<NAME> <<EMAIL>>'])
__all__ = ['laplacian_matrix',
'normalized_laplacian_matrix',
'directed_laplacian_matrix',
]
def laplacian_matrix(G, nodelist=None, weight='weight'):
"""Return the Laplacian matrix of G.
The graph Laplacian is the matrix L = D - A, where
A is the adjacency matrix and D is the diagonal matrix of node degrees.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
L : NumPy array
Laplacian of G.
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
to_numpy_matrix
normalized_laplacian_matrix
"""
try:
import numpy as np
except ImportError:
raise ImportError(
"laplacian_matrix() requires numpy: http://scipy.org/ ")
# this isn't the most efficient way to do this...
if G.is_multigraph():
A=np.asarray(nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight))
I=np.identity(A.shape[0])
D=I*np.sum(A,axis=1)
L=D-A
return L
# Graph or DiGraph, this is faster than above
if nodelist is None:
nodelist=G.nodes()
n=len(nodelist)
index=dict( (n,i) for i,n in enumerate(nodelist) )
L = np.zeros((n,n))
for ui,u in enumerate(nodelist):
totalwt=0.0
for v,d in G[u].items():
try:
vi=index[v]
except KeyError:
continue
wt=d.get(weight,1)
L[ui,vi]= -wt
totalwt+=wt
L[ui,ui]= totalwt
return L
def normalized_laplacian_matrix(G, nodelist=None, weight='weight'):
r"""Return the normalized Laplacian matrix of G.
The normalized graph Laplacian is the matrix
.. math::
NL = D^{-1/2} L D^{-1/2}
where `L` is the graph Laplacian and `D` is the diagonal matrix of
node degrees.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
L : NumPy array
Normalized Laplacian of G.
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
If the Graph contains selfloops, D is defined as diag(sum(A,1)), where A is
the adjencency matrix [2]_.
See Also
--------
laplacian_matrix
References
----------
.. [1] <NAME>-Graham, Spectral Graph Theory,
CBMS Regional Conference Series in Mathematics, Number 92, 1997.
.. [2] <NAME>, Interlacing For Weighted Graphs Using The Normalized
Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,
March 2007.
"""
# FIXME: this isn't the most efficient way to do this...
try:
import numpy as np
except ImportError:
raise ImportError(
"normalized_laplacian_matrix() requires numpy: http://scipy.org/ ")
if G.is_multigraph():
L = laplacian_matrix(G, nodelist=nodelist, weight=weight)
D = np.diag(L)
elif G.number_of_selfloops() == 0:
L = laplacian_matrix(G, nodelist=nodelist, weight=weight)
D = np.diag(L)
else:
A = np.array(nx.adj_matrix(G))
D = np.sum(A, 1)
L = np.diag(D) - A
# Handle div by 0. It happens if there are unconnected nodes
with np.errstate(divide='ignore'):
Disqrt = np.diag(1 / np.sqrt(D))
Disqrt[np.isinf(Disqrt)] = 0
Ln = np.dot(Disqrt, np.dot(L,Disqrt))
return Ln
###############################################################################
# Code based on
# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/
@require('numpy')
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def directed_laplacian_matrix(G, nodelist=None, weight='weight', walk_type=None, alpha=0.95):
r"""Return the directed Laplacian matrix of G.
The graph directed Laplacian is the matrix
.. math::
L = I - (\Phi^{1/2} P \Phi^{-1/2} + \Phi^{-1/2} P^T \Phi^{1/2} ) / 2
where `I` is the identity matrix, `P` is the transition matrix of the
graph, and `\Phi` a matrix with the Perron vector of `P` in the diagonal and
zeros elsewhere.
Depending on the value of walk_type, `P` can be the transition matrix
induced by a random walk, a lazy random walk, or a random walk with
teleportation (PageRank).
Parameters
----------
G : DiGraph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
walk_type : string or None, optional (default=None)
If None, `P` is selected depending on the properties of the
graph. Otherwise is one of 'random', 'lazy', or 'pagerank'
alpha : real
(1 - alpha) is the teleportation probability used with pagerank
Returns
-------
L : NumPy array
Normalized Laplacian of G.
Raises
------
NetworkXError
If NumPy cannot be imported
NetworkXNotImplemnted
If G is not a DiGraph
Notes
-----
Only implemented for DiGraphs
See Also
--------
laplacian_matrix
References
----------
.. [1] <NAME> (2005).
Laplacians and the Cheeger inequality for directed graphs.
Annals of Combinatorics, 9(1), 2005
"""
try:
import numpy as np
except ImportError:
raise ImportError(
"directed_laplacian_matrix() requires numpy: http://scipy.org/ ")
if walk_type is None:
if nx.is_strongly_connected(G):
if nx.is_aperiodic(G):
walk_type = "random"
else:
walk_type = "lazy"
else:
walk_type = "pagerank"
M = nx.to_numpy_matrix(G, nodelist=nodelist, weight=weight)
n, m = M.shape
if walk_type in ["random", "lazy"]:
DI = np.diagflat(1.0 / np.sum(M, axis=1))
if walk_type == "random":
P = DI * M
else:
I = np.identity(n)
P = (I + DI * M) / 2.0
elif walk_type == "pagerank":
if not (0 < alpha < 1):
raise nx.NetworkXError('alpha must be between 0 and 1')
# add constant to dangling nodes' row
dangling = np.where(M.sum(axis=1) == 0)
for d in dangling[0]:
M[d] = 1.0 / n
# normalize
M = M / M.sum(axis=1)
P = alpha * M + (1 - alpha) / n
else:
raise nx.NetworkXError("walk_type must be random, lazy, or pagerank")
evals, evecs = np.linalg.eig(P.T)
index = evals.argsort()[-1] # index of largest eval,evec
# eigenvector of largest eigenvalue at ind[-1]
v = np.array(evecs[:,index]).flatten().real
p = v / v.sum()
sp = np.sqrt(p)
Q = np.diag(sp) * P * np.diag(1.0/sp)
I = np.identity(len(G))
return I - (Q + Q.T) /2.0
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| [
"numpy.identity",
"networkx.utils.require",
"networkx.utils.not_implemented_for",
"numpy.linalg.eig",
"numpy.sqrt",
"networkx.is_aperiodic",
"networkx.NetworkXError",
"numpy.diag",
"nose.SkipTest",
"numpy.errstate",
"numpy.zeros",
"numpy.dot",
"numpy.sum",
"networkx.adj_matrix",
"numpy.a... | [((4834, 4850), 'networkx.utils.require', 'require', (['"""numpy"""'], {}), "('numpy')\n", (4841, 4850), False, 'from networkx.utils import require, not_implemented_for\n'), ((4852, 4885), 'networkx.utils.not_implemented_for', 'not_implemented_for', (['"""undirected"""'], {}), "('undirected')\n", (4871, 4885), False, 'from networkx.utils import require, not_implemented_for\n'), ((4887, 4920), 'networkx.utils.not_implemented_for', 'not_implemented_for', (['"""multigraph"""'], {}), "('multigraph')\n", (4906, 4920), False, 'from networkx.utils import require, not_implemented_for\n'), ((2080, 2096), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (2088, 2096), True, 'import numpy as np\n'), ((7162, 7217), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {'nodelist': 'nodelist', 'weight': 'weight'}), '(G, nodelist=nodelist, weight=weight)\n', (7180, 7217), True, 'import networkx as nx\n'), ((7948, 7966), 'numpy.linalg.eig', 'np.linalg.eig', (['P.T'], {}), '(P.T)\n', (7961, 7966), True, 'import numpy as np\n'), ((8157, 8167), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (8164, 8167), True, 'import numpy as np\n'), ((1811, 1834), 'numpy.identity', 'np.identity', (['A.shape[0]'], {}), '(A.shape[0])\n', (1822, 1834), True, 'import numpy as np\n'), ((4160, 4170), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (4167, 4170), True, 'import numpy as np\n'), ((4475, 4503), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (4486, 4503), True, 'import numpy as np\n'), ((4557, 4573), 'numpy.isinf', 'np.isinf', (['Disqrt'], {}), '(Disqrt)\n', (4565, 4573), True, 'import numpy as np\n'), ((4603, 4620), 'numpy.dot', 'np.dot', (['L', 'Disqrt'], {}), '(L, Disqrt)\n', (4609, 4620), True, 'import numpy as np\n'), ((6950, 6977), 'networkx.is_strongly_connected', 'nx.is_strongly_connected', (['G'], {}), '(G)\n', (6974, 6977), True, 'import networkx as nx\n'), ((8194, 8211), 'numpy.diag', 'np.diag', (['(1.0 / sp)'], {}), '(1.0 / sp)\n', (8201, 8211), True, 'import numpy as np\n'), ((1746, 1801), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {'nodelist': 'nodelist', 'weight': 'weight'}), '(G, nodelist=nodelist, weight=weight)\n', (1764, 1801), True, 'import networkx as nx\n'), ((1847, 1864), 'numpy.sum', 'np.sum', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (1853, 1864), True, 'import numpy as np\n'), ((4288, 4298), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (4295, 4298), True, 'import numpy as np\n'), ((4360, 4372), 'numpy.sum', 'np.sum', (['A', '(1)'], {}), '(A, 1)\n', (4366, 4372), True, 'import numpy as np\n'), ((6994, 7012), 'networkx.is_aperiodic', 'nx.is_aperiodic', (['G'], {}), '(G)\n', (7009, 7012), True, 'import networkx as nx\n'), ((7415, 7429), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (7426, 7429), True, 'import numpy as np\n'), ((7864, 7927), 'networkx.NetworkXError', 'nx.NetworkXError', (['"""walk_type must be random, lazy, or pagerank"""'], {}), "('walk_type must be random, lazy, or pagerank')\n", (7880, 7927), True, 'import networkx as nx\n'), ((8176, 8187), 'numpy.diag', 'np.diag', (['sp'], {}), '(sp)\n', (8183, 8187), True, 'import numpy as np\n'), ((8408, 8439), 'nose.SkipTest', 'SkipTest', (['"""NumPy not available"""'], {}), "('NumPy not available')\n", (8416, 8439), False, 'from nose import SkipTest\n'), ((4330, 4346), 'networkx.adj_matrix', 'nx.adj_matrix', (['G'], {}), '(G)\n', (4343, 4346), True, 'import networkx as nx\n'), ((4385, 4395), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (4392, 4395), True, 'import numpy as np\n'), ((4534, 4544), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (4541, 4544), True, 'import numpy as np\n'), ((7308, 7325), 'numpy.sum', 'np.sum', (['M'], {'axis': '(1)'}), '(M, axis=1)\n', (7314, 7325), True, 'import numpy as np\n'), ((7549, 7598), 'networkx.NetworkXError', 'nx.NetworkXError', (['"""alpha must be between 0 and 1"""'], {}), "('alpha must be between 0 and 1')\n", (7565, 7598), True, 'import networkx as nx\n'), ((8087, 8112), 'numpy.array', 'np.array', (['evecs[:, index]'], {}), '(evecs[:, index])\n', (8095, 8112), True, 'import numpy as np\n')] |
"""
Run symbolic reasoning on multiple-choice questions
"""
import os
import json
import argparse
import copy
from tqdm import tqdm
from executor import Executor
from utils import *
import numpy as np
np.random.seed(626)
def split_by_qtype(QA,return_dict=True):
qa_dict = {'Interaction':[],'Sequence':[],'Prediction':[],'Feasibility':[]}
for qa in QA:
qa_dict[qa['question_id'].split('_')[0]].append(qa)
if return_dict:
return qa_dict
return qa_dict['Interaction'], qa_dict['Sequence'], qa_dict['Prediction'], qa_dict['Feasibility']
def execute_program(all_qa,debug=False):
correct, qa_num = 0, len(all_qa)
if qa_num==0:
return 0
flag = ['Wrong','Correct']
pbar = tqdm(range(qa_num))
for ind in pbar:
qa = all_qa[ind]
situations = qa['situations']
exe = Executor(situations)
q_pg = qa['question_program']
count = 0
for choice in qa['choices']:
c_pg = choice['choice_program']
full_pg = q_pg + c_pg
pred = exe.run(full_pg, debug)
if pred == flag[int(choice['choice']==qa['answer'])]:
count += 1
if count == len(qa['choices']):
correct+=1
pbar.set_description('Acc {:f}'.format(round(float(correct)*100/qa_num,2)))
return round(float((correct)*100/qa_num),2)
def situation_reasoning(args):
qa = json.load(open(args.qa_dir))
qa_dict = split_by_qtype(qa)
print('----------Start Reasoning----------')
for qtype in qa_dict:
print('----------{}----------'.format(qtype))
acc = execute_program(qa_dict[qtype])
print(qtype,'Acc:',acc)
print('----------End Reasoning----------')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run Situation Reasoning")
parser.add_argument("--qa_dir", default="STAR/STAR_val.json")
args = parser.parse_args()
situation_reasoning(args) | [
"numpy.random.seed",
"argparse.ArgumentParser",
"executor.Executor"
] | [((201, 220), 'numpy.random.seed', 'np.random.seed', (['(626)'], {}), '(626)\n', (215, 220), True, 'import numpy as np\n'), ((1789, 1851), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run Situation Reasoning"""'}), "(description='Run Situation Reasoning')\n", (1812, 1851), False, 'import argparse\n'), ((840, 860), 'executor.Executor', 'Executor', (['situations'], {}), '(situations)\n', (848, 860), False, 'from executor import Executor\n')] |
from __future__ import division
import gym
from gym import spaces
import numpy as np
import pygame
from src.temporal_playground_env.objects import generate_objects
from src.temporal_playground_env.objects import Agent
from src.temporal_playground_env.env_params import get_env_params
# TODO update this to have a more complete agent
class TemporalPlaygroundV1(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
'''
Playground Environment:
set reward_screen to True to visualize modular reward function predictions
set viz_data_collection to True to visualize Social Partner interactions
'''
def __init__(self,
max_timesteps=50,
random_init=False,
human=False,
reward_screen=False,
viz_data_collection=False,
agent_step_size=0.15,
agent_initial_pos=(0, 0),
agent_initial_pos_range=0.6,
max_nb_objects=3, # number of objects in the scene
random_nb_obj=False,
admissible_actions=('Move', 'Grasp', 'Grow'), # which types of actions are admissible
admissible_attributes=('colors', 'categories', 'types'),
# , 'relative_sizes', 'shades', 'relative_shades', 'sizes', 'relative_positions'),
# which object attributes
# can be used
min_max_sizes=((0.2, 0.25), (0.25, 0.3)), # ranges of sizes of objects (small and large ones)
agent_size=0.05, # size of the agent
epsilon_initial_pos=0.6, # epsilon to sample initial positions
screen_size=800, # size of the visualization screen
next_to_epsilon=0.3, # define the area to qualify an object as 'next to' another.
attribute_combinations=False,
obj_size_update=0.0075,
render_mode=False
):
self.params = get_env_params(max_nb_objects=max_nb_objects,
admissible_actions=admissible_actions,
admissible_attributes=admissible_attributes,
min_max_sizes=min_max_sizes,
agent_size=agent_size,
epsilon_initial_pos=epsilon_initial_pos,
screen_size=screen_size,
next_to_epsilon=next_to_epsilon,
attribute_combinations=attribute_combinations,
obj_size_update=obj_size_update,
render_mode=render_mode
)
self.adm_attributes = self.params['admissible_attributes']
self.adm_abs_attributes = [a for a in self.adm_attributes if 'relative' not in a]
self.attributes = self.params['attributes']
self.categories = self.params['categories']
self.screen_size = self.params['screen_size']
self.viz_data_collection = viz_data_collection
self.reward_screen = reward_screen
self.first_action = False
self.SP_feedback = False
self.known_goals_update = False
self.known_goals_descr = []
self.p_move_animal = 0.5
self.circles = [[x * 3, 200, x * 4] for x in range(50)]
self.random_init = random_init
self.max_timesteps = max_timesteps
# Dimensions of action and observations spaces
self.dim_act = 3
self.max_nb_objects = self.params['max_nb_objects']
self.random_nb_obj = random_nb_obj
self.nb_obj = self.params['max_nb_objects']
self.dim_obj = self.params['dim_obj_features']
self.dim_body = self.params['dim_body_features']
self.inds_objs = [np.arange(self.dim_body + self.dim_obj * i_obj, self.dim_body + self.dim_obj * (i_obj + 1))
for i_obj in range(self.nb_obj)]
self.half_dim_obs = self.max_nb_objects * self.dim_obj + self.dim_body
self.dim_obs = int(2 * self.half_dim_obs)
# We define the spaces
self.action_space = spaces.Box(low=-np.ones(self.dim_act),
high=np.ones(self.dim_act),
dtype=np.float32)
self.observation_space = spaces.Box(low=-np.ones(self.dim_obs),
high=np.ones(self.dim_obs),
dtype=np.float32)
# Agent parameters
self.agent_step_size = agent_step_size
self.agent_initial_pos = agent_initial_pos
self.agent_initial_pos_range = agent_initial_pos_range
self.agent = Agent(agent_initial_pos)
# rendering
self.human = human
self.render_mode = render_mode
self.logits_concat = (0 for _ in range(self.nb_obj))
if self.render_mode:
pygame.init()
if self.reward_screen:
self.viewer = pygame.display.set_mode((self.screen_size + 300, self.screen_size))
else:
self.viewer = pygame.display.set_mode((self.screen_size, self.screen_size))
self.viewer_started = False
self.background = None
self.reset()
# We set to None to rush error if reset not called
self.observation = None
self.initial_observation = None
self.done = None
def regularize_type_and_attribute(self, object):
if object['categories'] is None and object['types'] is not None:
for k in self.categories.keys():
if object['types'] in self.categories[k]:
object['categories'] = k
elif object['categories'] is not None and object['types'] is None:
object['types'] = np.random.choice(self.categories[object['categories']])
elif object['categories'] is None and object['types'] is None:
object['categories'] = np.random.choice(list(self.categories.keys()))
object['types'] = np.random.choice(self.categories[object['categories']])
elif object['categories'] is not None and object['types'] is not None:
if object['types'] not in self.categories[object['categories']]:
object['types'] = np.random.choice(self.categories[object['categories']])
return object.copy()
def complete_and_check_objs(self, objects_decr):
objects_decr = [self.regularize_type_and_attribute(o) for o in objects_decr]
for o in objects_decr:
for k in o.keys():
if o[k] is None:
o[k] = np.random.choice(self.attributes[k])
return objects_decr.copy()
def reset_with_goal(self, goal_str):
words = goal_str.split(' ')
objs = []
if words[0] == 'Grow':
obj_to_be_grown = dict(zip(self.adm_abs_attributes, [None for _ in range(len(self.adm_abs_attributes))]))
obj_supply = dict(zip(self.adm_abs_attributes, [None for _ in range(len(self.adm_abs_attributes))]))
# first add the object that should be grown
for w in words[1:]:
for k in self.adm_abs_attributes:
if w in self.attributes[k]:
obj_to_be_grown[k] = w
if obj_to_be_grown['categories'] is None and obj_to_be_grown['types'] is None:
# if only attributes are proposed, sample a grownable object type
obj_to_be_grown['categories'] = np.random.choice(['animal', 'plant'])
objs.append(obj_to_be_grown.copy())
# now sample the supply
if obj_to_be_grown['categories'] in ['living_thing', 'plant'] or obj_to_be_grown['types'] in \
self.categories['plant']:
obj_supply.update(dict(types='water',
categories='supply'))
else:
obj_supply.update(dict(categories='supply'))
objs.append(obj_supply.copy())
else:
obj = dict(zip(self.adm_abs_attributes, [None for _ in range(len(self.adm_abs_attributes))]))
for w in words[1:]:
for k in self.adm_abs_attributes:
if w in self.attributes[k]:
obj[k] = w
objs.append(obj.copy())
return self.reset_scene(objs)
def reset(self):
if self.random_nb_obj:
self.nb_obj = np.random.randint(2, self.max_nb_objects)
self.half_dim_obs = self.nb_obj * self.dim_obj + self.dim_body
self.dim_obs = int(2 * self.half_dim_obs)
self.first_action = False
self.logits_concat = (0 for _ in range(self.nb_obj))
self.SP_feedback = False
self.known_goals_update = False
return self.reset_scene()
def reset_scene(self, objects=None):
self.agent.pos = self.agent_initial_pos
self.agent.initial_pos = self.agent_initial_pos
# self.agent_pos = self.agent_initial_pos
if self.random_init:
self.agent.pos += np.random.uniform(-self.agent_initial_pos_range, self.agent_initial_pos_range, 2)
self.gripper_state = np.random.choice([-1, 1])
else:
self.gripper_state = -1
self.objects = self.sample_objects(objects)
# Print objects
self.object_grasped = False
for obj in self.objects:
self.object_grasped = obj.update_state(self.agent.pos,
self.gripper_state > 0,
self.objects,
self.object_grasped,
np.zeros([self.dim_act]))
# construct vector of observations
self.observation = np.zeros(self.dim_obs)
self.observation[:self.half_dim_obs] = self.observe()
self.initial_observation = self.observation[:self.half_dim_obs].copy()
self.env_step = 0
self.done = False
return self.observation.copy()
def get_pixel_coordinates(self, xpos, ypos):
# return ((xpos + 1) / 2 * (self.params['screen_size'] * 2 / 3) + 1 / 6 * self.params['screen_size']).astype(np.int), \
# ((-ypos + 1) / 2 * (self.params['screen_size'] * 2 / 3) + 1 / 6 * self.params['screen_size']).astype(np.int)
return int((xpos + 1) / 2 * (self.params['screen_size'] * 2 / 3) + 1 / 6 * self.params['screen_size']), \
int((-ypos + 1) / 2 * (self.params['screen_size'] * 2 / 3) + 1 / 6 * self.params['screen_size'])
def sample_objects(self, objects_to_add):
object_descr = objects_to_add if objects_to_add is not None else []
while len(object_descr) < self.nb_obj:
object = dict()
for k in self.adm_abs_attributes:
object[k] = np.random.choice(self.attributes[k])
object_descr.append(object)
object_descr = self.complete_and_check_objs(object_descr)
objects_ids = [self.get_obj_identifier(o) for o in object_descr]
objects = generate_objects(object_descr, self.params)
return objects
def get_obj_identifier(self, object):
id_str = ''
for k in sorted(list(object.keys())):
id_str += '{}:{} '.format(k, object[k])
return id_str
def observe(self):
obj_features = np.array([obj.get_features() for obj in self.objects]).flatten()
obs = np.concatenate([self.agent.pos, # size 2
np.array([self.gripper_state]),
obj_features,
])
return obs.copy()
def step(self, action):
# actions
# 0 = x
# 1 = y
# 2 = gripper
"""
Run one timestep of the environment's dynamics.
"""
action = np.array(action).clip(-1, 1)
if np.sum(action) != 0:
self.first_action = True
# Update the agent position
self.agent.pos = np.clip(self.agent.pos + action[:2] * self.agent_step_size, -1.2, 1.2)
# Update the gripper state
if self.human:
if action[2] > 0:
self.gripper_state = 1 if self.gripper_state == -1 else -1
else:
if action[2] > 0.:
new_gripper = 1
else:
new_gripper = -1
self.gripper_change = new_gripper == self.gripper_state
self.gripper_state = new_gripper
for obj in self.objects:
self.object_grasped = obj.update_state(self.agent.pos,
self.gripper_state > 0,
self.objects,
self.object_grasped,
action)
if obj.grasp is False and 'animal' in obj.object_attributes['categories']:
if np.random.random(1) > 1 - self.p_move_animal:
new_animal_pos = obj.position + 0.1 * obj.direction + (- 0.04 + 0.08 * np.random.random(2))
obj._update_position(new_animal_pos)
self.observation[:self.half_dim_obs] = self.observe()
self.observation[self.half_dim_obs:] = self.observation[:self.half_dim_obs] - self.initial_observation
self.env_step += 1
if self.env_step == self.max_timesteps:
self.done = True
return self.observation.copy(), 0, False, {}
def render(self, goal_str, mode='human', close=False):
background_color = [220, 220, 220]
FONT = pygame.font.Font(None, 25)
self.viewer.fill(background_color)
self.shapes = {}
self.anchors = {}
self.patches = {}
# OBJECTS
for object in self.objects:
object.update_rendering(self.viewer)
# REWARD SCREEN
if self.reward_screen:
pygame.draw.rect(self.viewer, pygame.Color('darkgray'), (800, 0, 300, 800))
goal_txt_surface = FONT.render(goal_str, True, pygame.Color('black'))
self.viewer.blit(goal_txt_surface, (800 + 150 - goal_txt_surface.get_width() // 2, 50))
cross_icon = pygame.image.load(self.params['img_path'] + 'cross.png')
cross_icon = pygame.transform.scale(cross_icon, (50, 50)).convert_alpha()
tick_icon = pygame.image.load(self.params['img_path'] + 'tick.png')
tick_icon = pygame.transform.scale(tick_icon, (50, 50)).convert_alpha()
if any(logit > 0.5 for logit in self.logits_concat):
self.viewer.blit(tick_icon, (800 + 125, 75))
else:
self.viewer.blit(cross_icon, (800 + 125, 75))
for i_obj, object in enumerate(self.objects):
object_surface = object.surface
object_surface = pygame.transform.scale(object_surface, (80, 80)).convert_alpha()
self.viewer.blit(object_surface, (900, 150 + 200 * i_obj))
circle_img = pygame.Surface((20, 20))
for x in self.circles:
pygame.draw.circle(circle_img, (255 - x[2], 255 - x[2], 255 - x[2]), (10, 10), 8)
circle_img.set_colorkey(0)
self.viewer.blit(circle_img, (860 + x[0], 255 + 200 * i_obj))
# pygame.draw.rect(self.viewer, pygame.Color('white'), (880, 255 + 200*i_obj, 120,20))
x = self.logits_concat[i_obj]
pygame.draw.rect(self.viewer, pygame.Color('darkred'), (860 + int(x * 160), 252.5 + 200 * i_obj, 3, 25))
# GRIPPER
x, y = self.get_pixel_coordinates(self.agent.pos[0], self.agent.pos[1])
# TODO don't load in rendering this is stupid
size_gripper_pixels = 55
size_gripper_closed_pixels = 45
gripper_icon = pygame.image.load(self.params['img_path'] + 'hand_open.png')
gripper_icon = pygame.transform.scale(gripper_icon, (size_gripper_pixels, size_gripper_pixels)).convert_alpha()
closed_gripper_icon = pygame.image.load(self.params['img_path'] + 'hand_closed.png')
closed_gripper_icon = pygame.transform.scale(closed_gripper_icon,
(size_gripper_closed_pixels, size_gripper_pixels)).convert_alpha()
if self.gripper_state == 1:
left = int(x - size_gripper_closed_pixels // 2)
top = int(y - size_gripper_closed_pixels // 2)
self.viewer.blit(closed_gripper_icon, (left, top))
else:
left = int(x - size_gripper_pixels // 2)
top = int(y - size_gripper_pixels // 2)
self.viewer.blit(gripper_icon, (left, top))
# IMAGINATION BUBBLE
# if self.first_action == False:
# txt_surface = FONT.render(goal_str, True, pygame.Color('black'))
#
# speech_bubble_icon = pygame.image.load(self.params['img_path'] + 'bubble.png')
# speech_bubble_icon = pygame.transform.scale(speech_bubble_icon,
# (txt_surface.get_width() + 50, 120)).convert_alpha()
# off_set_bubble = int(1.2 * size_gripper_pixels)
# bubble_x = x - off_set_bubble // 2
# bubble_y = y - 2 * off_set_bubble
# self.viewer.blit(speech_bubble_icon, (bubble_x, bubble_y))
# self.viewer.blit(txt_surface, (bubble_x + 25, bubble_y + 20))
if self.viz_data_collection:
# KNOWN GOALS
known_goals_txt = FONT.render('Known Goals', True, pygame.Color('darkblue'))
known_goals_icon = pygame.image.load(self.params['img_path'] + 'known_goals_box.png')
known_goals_icon = pygame.transform.scale(known_goals_icon,
(300, 35 + 25 * len(self.known_goals_descr))).convert_alpha()
self.viewer.blit(known_goals_icon, (50, 50))
self.viewer.blit(known_goals_txt, (75, 60))
for i, descr in enumerate(self.known_goals_descr):
goal_txt_surface = FONT.render(descr, True, pygame.Color('black'))
self.viewer.blit(goal_txt_surface, (100, 85 + 25 * i))
if self.SP_feedback == True:
# SOCIAL PEER
SP_head_icon = pygame.image.load(self.params['img_path'] + 'SP_head.png')
SP_head_icon = pygame.transform.scale(SP_head_icon, (80, 80)).convert_alpha()
SP_x = 50
SP_y = 700
self.viewer.blit(SP_head_icon, (SP_x, SP_y))
SP_txt_surface = FONT.render('You ' + 'g' + self.SP_goal_descr[1:], True, pygame.Color('black'))
SP_bubble_icon = pygame.image.load(self.params['img_path'] + 'SP_bubble.png')
SP_bubble_icon = pygame.transform.scale(SP_bubble_icon,
(SP_txt_surface.get_width() + 50, 80)).convert_alpha()
self.viewer.blit(SP_bubble_icon, (SP_x + 70, SP_y - 25))
self.viewer.blit(SP_txt_surface, (SP_x + 100, SP_y))
## KNOWN GOALS UPDATE
if self.known_goals_update == True:
if self.SP_goal_descr not in self.known_goals_descr:
known_goals_icon = pygame.transform.scale(known_goals_icon,
(300, 35 + 25 * (1 + len(
self.known_goals_descr)))).convert_alpha()
self.viewer.blit(known_goals_icon, (50, 50))
self.viewer.blit(known_goals_txt, (75, 60))
for i, descr in enumerate(self.known_goals_descr):
goal_txt_surface = FONT.render(descr, True, pygame.Color('black'))
self.viewer.blit(goal_txt_surface, (100, 85 + 25 * i))
if len(self.known_goals_descr) > 1:
goal_txt_surface = FONT.render(self.SP_goal_descr, True, pygame.Color('black'))
self.viewer.blit(goal_txt_surface, (
100,
SP_y - int(self.progress_goal_move * (SP_y - 85 - 25 * (len(self.known_goals_descr))))))
print(self.progress_goal_move)
else:
goal_txt_surface = FONT.render(self.SP_goal_descr, True, pygame.Color('black'))
self.viewer.blit(goal_txt_surface,
(100, SP_y - int(self.progress_goal_move * (SP_y - 100)) - 15))
pygame.display.update()
pygame.time.wait(50)
def set_SP_feedback(self, goal_descr):
self.SP_feedback = True
self.SP_goal_descr = goal_descr
def update_known_goal_position(self, x):
self.known_goals_update = True
self.progress_goal_move = x / 10
def update_known_goals_list(self):
if self.SP_goal_descr not in self.known_goals_descr:
self.known_goals_descr.append(self.SP_goal_descr)
def set_logits_concat(self, logits_concats):
self.logits_concat = logits_concats
def seed(self, seed):
np.random.seed(seed)
def close(self):
if self.viewer is not None:
pygame.quit()
self.viewer = None
| [
"numpy.clip",
"pygame.init",
"pygame.quit",
"numpy.array",
"pygame.font.Font",
"src.temporal_playground_env.objects.Agent",
"numpy.arange",
"pygame.transform.scale",
"numpy.random.random",
"pygame.display.set_mode",
"numpy.random.seed",
"pygame.image.load",
"pygame.display.update",
"numpy.... | [((2075, 2483), 'src.temporal_playground_env.env_params.get_env_params', 'get_env_params', ([], {'max_nb_objects': 'max_nb_objects', 'admissible_actions': 'admissible_actions', 'admissible_attributes': 'admissible_attributes', 'min_max_sizes': 'min_max_sizes', 'agent_size': 'agent_size', 'epsilon_initial_pos': 'epsilon_initial_pos', 'screen_size': 'screen_size', 'next_to_epsilon': 'next_to_epsilon', 'attribute_combinations': 'attribute_combinations', 'obj_size_update': 'obj_size_update', 'render_mode': 'render_mode'}), '(max_nb_objects=max_nb_objects, admissible_actions=\n admissible_actions, admissible_attributes=admissible_attributes,\n min_max_sizes=min_max_sizes, agent_size=agent_size, epsilon_initial_pos\n =epsilon_initial_pos, screen_size=screen_size, next_to_epsilon=\n next_to_epsilon, attribute_combinations=attribute_combinations,\n obj_size_update=obj_size_update, render_mode=render_mode)\n', (2089, 2483), False, 'from src.temporal_playground_env.env_params import get_env_params\n'), ((4903, 4927), 'src.temporal_playground_env.objects.Agent', 'Agent', (['agent_initial_pos'], {}), '(agent_initial_pos)\n', (4908, 4927), False, 'from src.temporal_playground_env.objects import Agent\n'), ((10070, 10092), 'numpy.zeros', 'np.zeros', (['self.dim_obs'], {}), '(self.dim_obs)\n', (10078, 10092), True, 'import numpy as np\n'), ((11361, 11404), 'src.temporal_playground_env.objects.generate_objects', 'generate_objects', (['object_descr', 'self.params'], {}), '(object_descr, self.params)\n', (11377, 11404), False, 'from src.temporal_playground_env.objects import generate_objects\n'), ((12306, 12376), 'numpy.clip', 'np.clip', (['(self.agent.pos + action[:2] * self.agent_step_size)', '(-1.2)', '(1.2)'], {}), '(self.agent.pos + action[:2] * self.agent_step_size, -1.2, 1.2)\n', (12313, 12376), True, 'import numpy as np\n'), ((13928, 13954), 'pygame.font.Font', 'pygame.font.Font', (['None', '(25)'], {}), '(None, 25)\n', (13944, 13954), False, 'import pygame\n'), ((16169, 16229), 'pygame.image.load', 'pygame.image.load', (["(self.params['img_path'] + 'hand_open.png')"], {}), "(self.params['img_path'] + 'hand_open.png')\n", (16186, 16229), False, 'import pygame\n'), ((16380, 16442), 'pygame.image.load', 'pygame.image.load', (["(self.params['img_path'] + 'hand_closed.png')"], {}), "(self.params['img_path'] + 'hand_closed.png')\n", (16397, 16442), False, 'import pygame\n'), ((21064, 21087), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (21085, 21087), False, 'import pygame\n'), ((21096, 21116), 'pygame.time.wait', 'pygame.time.wait', (['(50)'], {}), '(50)\n', (21112, 21116), False, 'import pygame\n'), ((21651, 21671), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (21665, 21671), True, 'import numpy as np\n'), ((3983, 4079), 'numpy.arange', 'np.arange', (['(self.dim_body + self.dim_obj * i_obj)', '(self.dim_body + self.dim_obj * (i_obj + 1))'], {}), '(self.dim_body + self.dim_obj * i_obj, self.dim_body + self.\n dim_obj * (i_obj + 1))\n', (3992, 4079), True, 'import numpy as np\n'), ((5117, 5130), 'pygame.init', 'pygame.init', ([], {}), '()\n', (5128, 5130), False, 'import pygame\n'), ((8674, 8715), 'numpy.random.randint', 'np.random.randint', (['(2)', 'self.max_nb_objects'], {}), '(2, self.max_nb_objects)\n', (8691, 8715), True, 'import numpy as np\n'), ((9305, 9391), 'numpy.random.uniform', 'np.random.uniform', (['(-self.agent_initial_pos_range)', 'self.agent_initial_pos_range', '(2)'], {}), '(-self.agent_initial_pos_range, self.\n agent_initial_pos_range, 2)\n', (9322, 9391), True, 'import numpy as np\n'), ((9420, 9445), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (9436, 9445), True, 'import numpy as np\n'), ((12186, 12200), 'numpy.sum', 'np.sum', (['action'], {}), '(action)\n', (12192, 12200), True, 'import numpy as np\n'), ((14531, 14587), 'pygame.image.load', 'pygame.image.load', (["(self.params['img_path'] + 'cross.png')"], {}), "(self.params['img_path'] + 'cross.png')\n", (14548, 14587), False, 'import pygame\n'), ((14699, 14754), 'pygame.image.load', 'pygame.image.load', (["(self.params['img_path'] + 'tick.png')"], {}), "(self.params['img_path'] + 'tick.png')\n", (14716, 14754), False, 'import pygame\n'), ((17964, 18030), 'pygame.image.load', 'pygame.image.load', (["(self.params['img_path'] + 'known_goals_box.png')"], {}), "(self.params['img_path'] + 'known_goals_box.png')\n", (17981, 18030), False, 'import pygame\n'), ((21742, 21755), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (21753, 21755), False, 'import pygame\n'), ((4407, 4428), 'numpy.ones', 'np.ones', (['self.dim_act'], {}), '(self.dim_act)\n', (4414, 4428), True, 'import numpy as np\n'), ((4608, 4629), 'numpy.ones', 'np.ones', (['self.dim_obs'], {}), '(self.dim_obs)\n', (4615, 4629), True, 'import numpy as np\n'), ((5196, 5263), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.screen_size + 300, self.screen_size)'], {}), '((self.screen_size + 300, self.screen_size))\n', (5219, 5263), False, 'import pygame\n'), ((5312, 5373), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.screen_size, self.screen_size)'], {}), '((self.screen_size, self.screen_size))\n', (5335, 5373), False, 'import pygame\n'), ((6004, 6059), 'numpy.random.choice', 'np.random.choice', (["self.categories[object['categories']]"], {}), "(self.categories[object['categories']])\n", (6020, 6059), True, 'import numpy as np\n'), ((7721, 7758), 'numpy.random.choice', 'np.random.choice', (["['animal', 'plant']"], {}), "(['animal', 'plant'])\n", (7737, 7758), True, 'import numpy as np\n'), ((9973, 9997), 'numpy.zeros', 'np.zeros', (['[self.dim_act]'], {}), '([self.dim_act])\n', (9981, 9997), True, 'import numpy as np\n'), ((11127, 11163), 'numpy.random.choice', 'np.random.choice', (['self.attributes[k]'], {}), '(self.attributes[k])\n', (11143, 11163), True, 'import numpy as np\n'), ((11810, 11840), 'numpy.array', 'np.array', (['[self.gripper_state]'], {}), '([self.gripper_state])\n', (11818, 11840), True, 'import numpy as np\n'), ((12145, 12161), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (12153, 12161), True, 'import numpy as np\n'), ((14277, 14301), 'pygame.Color', 'pygame.Color', (['"""darkgray"""'], {}), "('darkgray')\n", (14289, 14301), False, 'import pygame\n'), ((14382, 14403), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (14394, 14403), False, 'import pygame\n'), ((15354, 15378), 'pygame.Surface', 'pygame.Surface', (['(20, 20)'], {}), '((20, 20))\n', (15368, 15378), False, 'import pygame\n'), ((16253, 16338), 'pygame.transform.scale', 'pygame.transform.scale', (['gripper_icon', '(size_gripper_pixels, size_gripper_pixels)'], {}), '(gripper_icon, (size_gripper_pixels, size_gripper_pixels)\n )\n', (16275, 16338), False, 'import pygame\n'), ((16473, 16571), 'pygame.transform.scale', 'pygame.transform.scale', (['closed_gripper_icon', '(size_gripper_closed_pixels, size_gripper_pixels)'], {}), '(closed_gripper_icon, (size_gripper_closed_pixels,\n size_gripper_pixels))\n', (16495, 16571), False, 'import pygame\n'), ((17907, 17931), 'pygame.Color', 'pygame.Color', (['"""darkblue"""'], {}), "('darkblue')\n", (17919, 17931), False, 'import pygame\n'), ((18652, 18710), 'pygame.image.load', 'pygame.image.load', (["(self.params['img_path'] + 'SP_head.png')"], {}), "(self.params['img_path'] + 'SP_head.png')\n", (18669, 18710), False, 'import pygame\n'), ((19065, 19125), 'pygame.image.load', 'pygame.image.load', (["(self.params['img_path'] + 'SP_bubble.png')"], {}), "(self.params['img_path'] + 'SP_bubble.png')\n", (19082, 19125), False, 'import pygame\n'), ((4340, 4361), 'numpy.ones', 'np.ones', (['self.dim_act'], {}), '(self.dim_act)\n', (4347, 4361), True, 'import numpy as np\n'), ((4536, 4557), 'numpy.ones', 'np.ones', (['self.dim_obs'], {}), '(self.dim_obs)\n', (4543, 4557), True, 'import numpy as np\n'), ((6243, 6298), 'numpy.random.choice', 'np.random.choice', (["self.categories[object['categories']]"], {}), "(self.categories[object['categories']])\n", (6259, 6298), True, 'import numpy as np\n'), ((6835, 6871), 'numpy.random.choice', 'np.random.choice', (['self.attributes[k]'], {}), '(self.attributes[k])\n', (6851, 6871), True, 'import numpy as np\n'), ((13261, 13280), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (13277, 13280), True, 'import numpy as np\n'), ((14613, 14657), 'pygame.transform.scale', 'pygame.transform.scale', (['cross_icon', '(50, 50)'], {}), '(cross_icon, (50, 50))\n', (14635, 14657), False, 'import pygame\n'), ((14779, 14822), 'pygame.transform.scale', 'pygame.transform.scale', (['tick_icon', '(50, 50)'], {}), '(tick_icon, (50, 50))\n', (14801, 14822), False, 'import pygame\n'), ((15438, 15524), 'pygame.draw.circle', 'pygame.draw.circle', (['circle_img', '(255 - x[2], 255 - x[2], 255 - x[2])', '(10, 10)', '(8)'], {}), '(circle_img, (255 - x[2], 255 - x[2], 255 - x[2]), (10, \n 10), 8)\n', (15456, 15524), False, 'import pygame\n'), ((15845, 15868), 'pygame.Color', 'pygame.Color', (['"""darkred"""'], {}), "('darkred')\n", (15857, 15868), False, 'import pygame\n'), ((18455, 18476), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (18467, 18476), False, 'import pygame\n'), ((19009, 19030), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (19021, 19030), False, 'import pygame\n'), ((15185, 15233), 'pygame.transform.scale', 'pygame.transform.scale', (['object_surface', '(80, 80)'], {}), '(object_surface, (80, 80))\n', (15207, 15233), False, 'import pygame\n'), ((18742, 18788), 'pygame.transform.scale', 'pygame.transform.scale', (['SP_head_icon', '(80, 80)'], {}), '(SP_head_icon, (80, 80))\n', (18764, 18788), False, 'import pygame\n'), ((6489, 6544), 'numpy.random.choice', 'np.random.choice', (["self.categories[object['categories']]"], {}), "(self.categories[object['categories']])\n", (6505, 6544), True, 'import numpy as np\n'), ((13398, 13417), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (13414, 13417), True, 'import numpy as np\n'), ((20188, 20209), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (20200, 20209), False, 'import pygame\n'), ((20440, 20461), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (20452, 20461), False, 'import pygame\n'), ((20860, 20881), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (20872, 20881), False, 'import pygame\n')] |
import gmplot
from location import Satellite, River, BGate
from matplotlib.cm import ScalarMappable
import matplotlib.pyplot as plt
import numpy as np
SW_LAT, SW_LON = 52.464011, 13.274099
NE_LAT, NE_LON = 52.586925, 13.521837
class Mapper(object):
def __init__(self, objs, n=256):
self.objects = objs
self.latitudes, self.longitudes = self.generate_mesh_grid(n)
def generate_mesh_grid(self, n):
"""
Returns X and Y 1-D arrays of latitudes and longitudes
gridding Berlin's map.
"""
x = np.linspace(SW_LAT, NE_LAT, n)
y = np.linspace(SW_LON, NE_LON, n)
X, Y = np.meshgrid(x, y)
return X.flatten(), Y.flatten()
def pull_heatmap_idx(self, distribution, size=10000):
"""
Sample points on map from a distribution to generate heatmap.
"""
return np.random.choice(np.arange(distribution.size), size=size, p=distribution)
def get_distribution(self, objs):
"""
Get the distribution on the map from different objects.
Distributions get combined through bayesian update:
Starting from a uniform distribution, the new distributions from
independent sources come as new likelihoods multiplying our prior
to obtain the posterior. Normalization is done once, as a final step.
"""
distribution = np.ones(self.latitudes.shape)
distribution /= np.sum(distribution)
for obj in objs:
probs = obj.get_pdf(self.latitudes, self.longitudes)
distribution *= probs
distribution /= np.sum(distribution)
return distribution
def find_maximum(self, distribution):
"""
Returns the coordinates of the maximum of a given distribution.
"""
max_idx = np.argmax(distribution)
return self.latitudes[max_idx], self.longitudes[max_idx]
def generate_map(self, objs=None, plot_type='lines', max_marker=False,
heatmap_size=20000, threshold=10):
"""
Use gmplot module to generate map overlay of the given distributions.
Input
----
objs: Location objects with PDF as defined in location.py
plot_type: type of overlay ('lines' or 'heatmap')
max_marker: add a marker where the next top analyst is more likely to be
size: Number of draws from the PDF to plot the heatmap.
Threshold: Min. # of values to color an area in red on the heatmap.
Returns
----
HTML file containing the Map object
"""
if not objs:
objs = self.objects
distribution = self.get_distribution(objs)
gmap = gmplot.GoogleMapPlotter((SW_LAT + NE_LAT)/2, (SW_LON + NE_LON)/2, 11)
# mark maximum likelihood
if max_marker:
x, y = self.find_maximum(distribution)
gmap.scatter([x], [y],
c='r', marker=True)
if plot_type=='lines':
# add probability lines to the map
X, Y = self.latitudes.reshape((256, 256)), self.longitudes.reshape((256, 256))
probs = distribution.reshape((256, 256))
C = plt.contour(X, Y, probs, 5, colors='black', linewidth=.1)
color_map = self.get_color_map(C.levels)
for i, level in enumerate(C.collections):
for path in level.get_paths():
gmap.plot(*zip(*path.vertices), color=color_map[i], edge_width=3)
elif plot_type=='heatmap':
# generate heatmap
heatmap_idx = self.pull_heatmap_idx(distribution, size=heatmap_size)
gmap.heatmap(self.latitudes[heatmap_idx], self.longitudes[heatmap_idx],
radius=5, opacity=.4, threshold=threshold)
# delimiting region of interest
gmap.polygon([SW_LAT, SW_LAT, NE_LAT, NE_LAT], [SW_LON, NE_LON, NE_LON, SW_LON], face_alpha=0.01)
fn = 'maps/{}.html'.format(objs[0].name if len(objs) == 1 else 'final_map')
gmap.draw(fn)
def get_color_map(self, levels):
"""Returns gradient of color from green to red.
"""
sm = ScalarMappable(cmap='RdYlGn_r')
normed_levels = levels / np.max(levels)
colors = 255 * sm.to_rgba(normed_levels)[:, :3]
return ['#%02x%02x%02x' % (r, g, b) for r,g,b in colors]
| [
"gmplot.GoogleMapPlotter",
"numpy.ones",
"numpy.argmax",
"numpy.max",
"numpy.sum",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.contour",
"numpy.meshgrid",
"numpy.arange"
] | [((553, 583), 'numpy.linspace', 'np.linspace', (['SW_LAT', 'NE_LAT', 'n'], {}), '(SW_LAT, NE_LAT, n)\n', (564, 583), True, 'import numpy as np\n'), ((596, 626), 'numpy.linspace', 'np.linspace', (['SW_LON', 'NE_LON', 'n'], {}), '(SW_LON, NE_LON, n)\n', (607, 626), True, 'import numpy as np\n'), ((642, 659), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (653, 659), True, 'import numpy as np\n'), ((1378, 1407), 'numpy.ones', 'np.ones', (['self.latitudes.shape'], {}), '(self.latitudes.shape)\n', (1385, 1407), True, 'import numpy as np\n'), ((1432, 1452), 'numpy.sum', 'np.sum', (['distribution'], {}), '(distribution)\n', (1438, 1452), True, 'import numpy as np\n'), ((1601, 1621), 'numpy.sum', 'np.sum', (['distribution'], {}), '(distribution)\n', (1607, 1621), True, 'import numpy as np\n'), ((1807, 1830), 'numpy.argmax', 'np.argmax', (['distribution'], {}), '(distribution)\n', (1816, 1830), True, 'import numpy as np\n'), ((2688, 2761), 'gmplot.GoogleMapPlotter', 'gmplot.GoogleMapPlotter', (['((SW_LAT + NE_LAT) / 2)', '((SW_LON + NE_LON) / 2)', '(11)'], {}), '((SW_LAT + NE_LAT) / 2, (SW_LON + NE_LON) / 2, 11)\n', (2711, 2761), False, 'import gmplot\n'), ((4155, 4186), 'matplotlib.cm.ScalarMappable', 'ScalarMappable', ([], {'cmap': '"""RdYlGn_r"""'}), "(cmap='RdYlGn_r')\n", (4169, 4186), False, 'from matplotlib.cm import ScalarMappable\n'), ((885, 913), 'numpy.arange', 'np.arange', (['distribution.size'], {}), '(distribution.size)\n', (894, 913), True, 'import numpy as np\n'), ((3185, 3243), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'probs', '(5)'], {'colors': '"""black"""', 'linewidth': '(0.1)'}), "(X, Y, probs, 5, colors='black', linewidth=0.1)\n", (3196, 3243), True, 'import matplotlib.pyplot as plt\n'), ((4220, 4234), 'numpy.max', 'np.max', (['levels'], {}), '(levels)\n', (4226, 4234), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""DepthDependentTaylorNonLinearDiffuser Component.
@author: <NAME>
@author: <NAME>
@author: <NAME>
"""
import numpy as np
from landlab import Component, LinkStatus
from landlab.core.messages import deprecation_message
class DepthDependentTaylorDiffuser(Component):
r"""
This component implements a depth-dependent Taylor series diffusion rule,
combining concepts of Ganti et al. (2012) and Johnstone and Hilley (2014).
Hillslope sediment flux uses a Taylor Series expansion of the Andrews-
Bucknam formulation of nonlinear hillslope flux derived following following
Ganti et al., 2012 with a depth dependent component inspired Johnstone and
Hilley (2014). The flux :math:`q_s` is given as:
.. math::
q_s = - K H_* \nabla \eta ( 1 + (S/S_c)^2 + (S/S_c)^4 + .. + (S/S_c)^2(n-1) ) (1 - exp( - H / H_*)
where :math:`K` is a transport velocity coefficient, :math:`\eta` is land
surface elevation, :math:`S` is the slope gradient (defined as
positive downward), :math:`S_c` is the critical slope, :math:`n` is the
number of terms, :math:`H` is the soil depth on links, and :math:`H_*` is
the soil transport decay depth.
The default behavior uses two terms to produce a slope dependence as
described by Equation 6 of Ganti et al. (2012).
This component will ignore soil thickness located at non-core nodes.
Examples
--------
First lets make a simple example with flat topography.
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import ExponentialWeatherer
>>> from landlab.components import DepthDependentTaylorDiffuser
>>> mg = RasterModelGrid((5, 5))
>>> soilTh = mg.add_zeros('node', 'soil__depth')
>>> z = mg.add_zeros('node', 'topographic__elevation')
>>> BRz = mg.add_zeros('node', 'bedrock__elevation')
>>> expweath = ExponentialWeatherer(mg)
>>> DDdiff = DepthDependentTaylorDiffuser(mg)
>>> expweath.calc_soil_prod_rate()
>>> np.allclose(mg.at_node['soil_production__rate'][mg.core_nodes], 1.)
True
>>> DDdiff.run_one_step(2.)
>>> np.allclose(mg.at_node['topographic__elevation'][mg.core_nodes], 0.)
True
>>> np.allclose(mg.at_node['bedrock__elevation'][mg.core_nodes], -2.)
True
>>> np.allclose(mg.at_node['soil__depth'][mg.core_nodes], 2.)
True
Now a more complicated example with a slope.
>>> mg = RasterModelGrid((3, 5))
>>> soilTh = mg.add_zeros('node', 'soil__depth')
>>> z = mg.add_zeros('node', 'topographic__elevation')
>>> BRz = mg.add_zeros('node', 'bedrock__elevation')
>>> z += mg.node_x.copy()
>>> BRz += mg.node_x / 2.
>>> soilTh[:] = z - BRz
>>> expweath = ExponentialWeatherer(mg)
>>> DDdiff = DepthDependentTaylorDiffuser(mg)
>>> expweath.calc_soil_prod_rate()
>>> np.allclose(
... mg.at_node['soil_production__rate'][mg.core_nodes],
... np.array([ 0.60653066, 0.36787944, 0.22313016]))
True
>>> DDdiff.run_one_step(0.1)
>>> np.allclose(
... mg.at_node['topographic__elevation'][mg.core_nodes],
... np.array([ 1.04773024, 2.02894986, 3.01755898]))
True
>>> np.allclose(mg.at_node['bedrock__elevation'][mg.core_nodes],
... np.array([ 0.43934693, 0.96321206, 1.47768698]))
True
>>> np.allclose(mg.at_node['soil__depth'], z - BRz)
True
The DepthDependentTaylorDiffuser makes and moves soil at a rate proportional
to slope, this means that there is a characteristic time scale for soil
transport and an associated stability criteria for the timestep. The
maximum characteristic time scale, :math:`De_{max}`, is given as a function of the
hillslope diffustivity, :math:`D`, the maximum slope, :math:`S_{max}`, and the critical slope
:math:`S_c`.
.. math::
De_{max} = D
\left(
1 +
\left( \frac{S_{max}{S_c}\right )^2 +
\left( \frac{S_{max}{S_c}\right )^4 +
\dots +
\left( \frac{S_{max}{S_c}\right )^{( 2 * ( n - 1 ))}
\right)
The maximum stable time step is given by
.. math::
dtmax = courant_factor * dx * dx / Demax
Where the courant factor is a user defined scale (default is 0.2)
The DepthDependentTaylorDiffuser has a boolean flag that permits a user
to be warned if timesteps are too large for the slopes in the model grid
(if_unstable = 'warn') and a boolean flag that turns on dynamic timestepping
(dynamic_dt = False).
>>> DDdiff = DepthDependentTaylorDiffuser(mg, if_unstable='warn')
>>> DDdiff.run_one_step(2.)
Topographic slopes are high enough such that the Courant condition is
exceeded AND you have not selected dynamic timestepping with
dynamic_dt=True. This may lead to infinite and/or nan values for slope,
elevation, and soil depth. Consider using a smaller time step or dynamic
timestepping. The Courant condition recommends a timestep of
0.0953407607307 or smaller.
Alternatively you can specify if_unstable='raise', and a Runtime Error will
be raised if this condition is not met.
Next, lets do an example with dynamic timestepping.
>>> mg = RasterModelGrid((3, 5))
>>> soilTh = mg.add_zeros('node', 'soil__depth')
>>> z = mg.add_zeros('node', 'topographic__elevation')
>>> BRz = mg.add_zeros('node', 'bedrock__elevation')
We'll use a steep slope and very little soil.
>>> z += mg.node_x.copy()**2
>>> BRz = z.copy() - 1.0
>>> soilTh[:] = z - BRz
>>> expweath = ExponentialWeatherer(mg)
Lets try to move the soil with a large timestep. Without dynamic time
steps, this gives a warning that we've exceeded the dynamic timestep size
and should use a smaller timestep. We could either use the smaller timestep,
or specify that we want to use a dynamic timestep.
>>> DDdiff = DepthDependentTaylorDiffuser(mg, if_unstable='warn', dynamic_dt=False)
>>> expweath.calc_soil_prod_rate()
>>> DDdiff.run_one_step(10)
Topographic slopes are high enough such that the Courant condition is
exceeded AND you have not selected dynamic timestepping with
dynamic_dt=True. This may lead to infinite and/or nan values for slope,
elevation, and soil depth. Consider using a smaller time step or dynamic
timestepping. The Courant condition recommends a timestep of
0.004 or smaller.
Now, we'll re-build the grid and do the same example with dynamic timesteps.
>>> mg = RasterModelGrid((3, 5))
>>> soilTh = mg.add_zeros('node', 'soil__depth')
>>> z = mg.add_zeros('node', 'topographic__elevation')
>>> BRz = mg.add_zeros('node', 'bedrock__elevation')
>>> z += mg.node_x.copy()**2
>>> BRz = z.copy() - 1.0
>>> soilTh[:] = z - BRz
>>> expweath = ExponentialWeatherer(mg)
>>> DDdiff = DepthDependentTaylorDiffuser(mg, if_unstable='warn', dynamic_dt=True)
>>> expweath.calc_soil_prod_rate()
>>> DDdiff.run_one_step(10)
>>> np.any(np.isnan(z))
False
Now, we'll test that changing the transport decay depth behaves as expected.
>>> mg = RasterModelGrid((3, 5))
>>> soilTh = mg.add_zeros('node', 'soil__depth')
>>> z = mg.add_zeros('node', 'topographic__elevation')
>>> BRz = mg.add_zeros('node', 'bedrock__elevation')
>>> z += mg.node_x.copy()**0.5
>>> BRz = z.copy() - 1.0
>>> soilTh[:] = z - BRz
>>> expweath = ExponentialWeatherer(mg)
>>> DDdiff = DepthDependentTaylorDiffuser(mg, soil_transport_decay_depth = 0.1)
>>> DDdiff.run_one_step(1)
>>> soil_decay_depth_point1 = mg.at_node['topographic__elevation'][mg.core_nodes]
>>> z[:] = 0
>>> z += mg.node_x.copy()**0.5
>>> BRz = z.copy() - 1.0
>>> soilTh[:] = z - BRz
>>> DDdiff = DepthDependentTaylorDiffuser(mg, soil_transport_decay_depth = 1.0)
>>> DDdiff.run_one_step(1)
>>> soil_decay_depth_1 = mg.at_node['topographic__elevation'][mg.core_nodes]
>>> np.greater(soil_decay_depth_1[1], soil_decay_depth_point1[1])
False
References
----------
**Required Software Citation(s) Specific to this Component**
<NAME>., <NAME>., <NAME>., <NAME>. (2019). Terrainbento 1.0: a
Python package for multi-model analysis in long-term drainage basin
evolution. Geoscientific Model Development 12(4), 1267--1297.
https://dx.doi.org/10.5194/gmd-12-1267-2019
**Additional References**
<NAME>., <NAME>., <NAME>. (2012). A sub-grid scale
closure for nonlinear hillslope sediment transport models Journal of
Geophysical Research: Earth Surface 117(F2).
https://dx.doi.org/10.1029/2011jf002181
<NAME>., <NAME>. (2015). Lithologic control on the form of
soil-mantled hillslopes Geology 43(1), 83-86.
https://doi.org/10.1130/G36052.1
"""
_name = "DepthDependentTaylorDiffuser"
_unit_agnostic = True
_cite_as = """
@article{barnhart2019terrain,
author = {Barnhart, <NAME> and Glade, <NAME> and Shobe, <NAME> and Tucker, <NAME>},
title = {{Terrainbento 1.0: a Python package for multi-model analysis in long-term drainage basin evolution}},
doi = {10.5194/gmd-12-1267-2019},
pages = {1267---1297},
number = {4},
volume = {12},
journal = {Geoscientific Model Development},
year = {2019},
}
"""
_info = {
"bedrock__elevation": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "elevation of the bedrock surface",
},
"soil__depth": {
"dtype": float,
"intent": "inout",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Depth of soil or weathered bedrock",
},
"soil__flux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m^2/yr",
"mapping": "link",
"doc": "flux of soil in direction of link",
},
"soil_production__rate": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m/yr",
"mapping": "node",
"doc": "rate of soil production at nodes",
},
"topographic__elevation": {
"dtype": float,
"intent": "inout",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
},
"topographic__slope": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m/m",
"mapping": "link",
"doc": "gradient of the ground surface",
},
}
def __init__(
self,
grid,
linear_diffusivity=None,
slope_crit=1.0,
soil_transport_decay_depth=1.0,
nterms=2,
dynamic_dt=False,
if_unstable="pass",
courant_factor=0.2,
soil_transport_velocity=1.0,
):
"""Initialize the DepthDependentTaylorDiffuser.
Parameters
----------
grid: ModelGrid
Landlab ModelGrid object
linear_diffusivity: float, optional, DEPRECATED
Hillslope diffusivity / decay depth, m/yr
Default = 1.0
slope_crit: float, optional
Critical gradient parameter, m/m
Default = 1.0
soil_transport_decay_depth: float, optional
characteristic transport soil depth, m
Default = 1.0
nterms: int, optional. default = 2
number of terms in the Taylor expansion.
Two terms (default) gives the behavior
described in Ganti et al. (2012).
dynamic_dt : bool, optional, default = False
Whether internal timestepping is used.
if_unstable : str, optional, default = "pass"
What to do if unstable (options are "pass",
"raise", "warn")
courant_factor : float, optional, default = 0.2
Courant factor for timestep calculation.
soil_transport_velocity : float, optional, default = 1.0
Velocity parameter for soil transport, m/yr. Diffusivity is the
product of this parameter and soil_transport_decay_depth.
"""
super().__init__(grid)
# Handle now-deprecated diffusivity argument
if linear_diffusivity is None:
self._K = soil_transport_velocity
else:
message = """Use of linear_diffusivity is deprecated, because the
name is misleading: it is actually a velocity;
diffusivity is obtained by multiplying by soil
transport decay depth. Use soil_transport_velocity
instead."""
print(deprecation_message(message))
self._K = linear_diffusivity
self._soil_transport_decay_depth = soil_transport_decay_depth
self._slope_crit = slope_crit
self._nterms = nterms
self._dynamic_dt = dynamic_dt
self._if_unstable = if_unstable
self._courant_factor = courant_factor
# get reference to inputs
self._elev = self._grid.at_node["topographic__elevation"]
self._soil_prod_rate = self._grid.at_node["soil_production__rate"]
self._depth = self._grid.at_node["soil__depth"]
# create outputs if necessary and get reference.
self.initialize_output_fields()
self._slope = self._grid.at_link["topographic__slope"]
self._flux = self._grid.at_link["soil__flux"]
self._bedrock = self._grid.at_node["bedrock__elevation"]
def soilflux(self, dt):
"""Calculate soil flux for a time period 'dt'.
Parameters
----------
dt: float (time)
The imposed timestep.
"""
# establish time left as all of dt
time_left = dt
# begin while loop for time left
while time_left > 0.0:
# calculate soil__depth
self._grid.at_node["soil__depth"][:] = (
self._grid.at_node["topographic__elevation"]
- self._grid.at_node["bedrock__elevation"]
)
# Calculate soil depth at links.
self._H_link = self._grid.map_value_at_max_node_to_link(
"topographic__elevation", "soil__depth"
)
# Calculate gradients
self._slope = self._grid.calc_grad_at_link(self._elev)
self._slope[self._grid.status_at_link == LinkStatus.INACTIVE] = 0.0
# Test for time stepping courant condition
# Test for time stepping courant condition
courant_slope_term = 0.0
courant_s_over_scrit = self._slope.max() / self._slope_crit
for i in range(0, 2 * self._nterms, 2):
courant_slope_term += courant_s_over_scrit**i
if np.any(np.isinf(courant_slope_term)):
message = (
"Soil flux term is infinite in Courant condition "
"calculation. This is likely due to "
"using too many terms in the Taylor expansion."
)
raise RuntimeError(message)
# Calculate De Max
De_max = self._K * (courant_slope_term)
# Calculate longest stable timestep
self._dt_max = self._courant_factor * (self._grid.dx**2) / De_max
# Test for the Courant condition and print warning if user intended
# for it to be printed.
if (
(self._dt_max < dt)
and (not self._dynamic_dt)
and (self._if_unstable != "pass")
):
message = (
"Topographic slopes are high enough such that the "
"Courant condition is exceeded AND you have not "
"selected dynamic timestepping with dynamic_dt=True. "
"This may lead to infinite and/or nan values for "
"slope, elevation, and soil depth. Consider using a "
"smaller time step or dynamic timestepping. The "
"Courant condition recommends a timestep of "
"" + str(self._dt_max) + " or smaller."
)
if self._if_unstable == "raise":
raise RuntimeError(message)
if self._if_unstable == "warn":
print(message)
# if dynamic dt is selected, use it, otherwise, use the entire time
if self._dynamic_dt:
self._sub_dt = np.min([dt, self._dt_max])
time_left -= self._sub_dt
else:
self._sub_dt = dt
time_left = 0
# update sed flux, topography, soil, and bedrock based on the
# current self._sub_dt
self._update_flux_topography_soil_and_bedrock()
def _update_flux_topography_soil_and_bedrock(self):
"""Calculate soil flux and update topography."""
# Calculate flux
slope_term = 0.0
s_over_scrit = self._slope / self._slope_crit
for i in range(0, 2 * self._nterms, 2):
slope_term += s_over_scrit**i
if np.any(np.isinf(slope_term)):
message = (
"Soil flux term is infinite. This is likely due to "
"using too many terms in the Taylor expansion."
)
raise RuntimeError(message)
self._flux[:] = -(
(self._K * self._slope * self._soil_transport_decay_depth)
* (slope_term)
* (1.0 - np.exp(-self._H_link / self._soil_transport_decay_depth))
)
# Calculate flux divergence
dqdx = self._grid.calc_flux_div_at_node(self._flux)
# Calculate change in soil depth
dhdt = self._soil_prod_rate - dqdx
# Calculate soil depth at nodes
self._depth[self._grid.core_nodes] += dhdt[self._grid.core_nodes] * self._sub_dt
# prevent negative soil thickness
self._depth[self._depth < 0.0] = 0.0
# Calculate bedrock elevation
self._bedrock[self._grid.core_nodes] -= (
self._soil_prod_rate[self._grid.core_nodes] * self._sub_dt
)
# Update topography
self._elev[self._grid.core_nodes] = (
self._depth[self._grid.core_nodes] + self._bedrock[self._grid.core_nodes]
)
def run_one_step(self, dt):
"""
Parameters
----------
dt: float (time)
The imposed timestep.
"""
self.soilflux(dt)
| [
"landlab.core.messages.deprecation_message",
"numpy.exp",
"numpy.isinf",
"numpy.min"
] | [((13002, 13030), 'landlab.core.messages.deprecation_message', 'deprecation_message', (['message'], {}), '(message)\n', (13021, 13030), False, 'from landlab.core.messages import deprecation_message\n'), ((16890, 16916), 'numpy.min', 'np.min', (['[dt, self._dt_max]'], {}), '([dt, self._dt_max])\n', (16896, 16916), True, 'import numpy as np\n'), ((17541, 17561), 'numpy.isinf', 'np.isinf', (['slope_term'], {}), '(slope_term)\n', (17549, 17561), True, 'import numpy as np\n'), ((15132, 15160), 'numpy.isinf', 'np.isinf', (['courant_slope_term'], {}), '(courant_slope_term)\n', (15140, 15160), True, 'import numpy as np\n'), ((17942, 17998), 'numpy.exp', 'np.exp', (['(-self._H_link / self._soil_transport_decay_depth)'], {}), '(-self._H_link / self._soil_transport_decay_depth)\n', (17948, 17998), True, 'import numpy as np\n')] |
import faceDet.ViolaJones.Regions as region
import numpy as np
class WeakClassifier:
def __init__(self, positive_regions, negative_regions, threshold, polarity):
"""
This is the actual feature which can also be called a weak classifier.
:param positive_regions: positively contributing region
:param negative_regions: negatively contributing region
:param threshold: threshold
:param polarity: polarity 1 or -1
"""
self.positive_regions = positive_regions
self.negative_regions = negative_regions
self.threshold = threshold
self.polarity = polarity
def classify(self, x):
"""
Classifies an integral image based on a feature f and the classifiers threshold and polarity
:param x: the integral image
:return:
1 if polarity * feature(x) < polarity * threshold
0 otherwise
"""
return 1 if self.polarity * self.computeVal(x) < self.polarity * self.threshold else 0
def computeVal(self, x):
"""
Computes the feature value through pos, neg rectangles provided
:param x: the integral image
:return: value of the feature by summing positive and subtracting negative region
"""
return sum([pos.computeScore(x) for pos in self.positive_regions]) - sum(
[neg.computeScore(x) for neg in self.negative_regions])
def __str__(self):
return "Feature(WeakClassifier): (threshold=%d, polarity=%d, %s, %s" % (
self.threshold, self.polarity, str(self.positive_regions), str(self.negative_regions))
def computeFeatures(frameSize):
"""
Builds all possible modified features in frameSize
:param frameSize: a tuple of form (height, width)
:return: an array of tuples. Each tuple's first element is an array of the rectangle regions which positively contribute to the feature.
The second element is an array of rectangle regions negatively contributing to the feature
"""
height, width = frameSize
features = []
for w in range(1, width + 1, 2): # width, height are the frame values 53
for h in range(1, height + 1, 2): # i, j are the positions; w, h are the width and height of feature
i = 0
while i + w < width:
j = 0
while j + h < height:
# 2 rectangle features
immediate = region.RectangleRegion(i, j, w, h)
right = region.RectangleRegion(i + w, j, w, h)
if i + 2 * w < width: # Horizontally Adjacent
features.append(([immediate], [right])) # positive, negative region to consider
bottom = region.RectangleRegion(i, j + h, w, h)
if j + 2 * h < height: # Vertically Adjacent
features.append(([immediate], [bottom]))
right_2 = region.RectangleRegion(i + 2 * w, j, w, h)
# 3 rectangle features
if i + 3 * w < width: # Horizontally Adjacent
features.append(([right], [right_2, immediate]))
bottom_2 = region.RectangleRegion(i, j + 2 * h, w, h)
if j + 3 * h < height: # Vertically Adjacent
features.append(([bottom], [bottom_2, immediate]))
# 4 rectangle features
bottom_right = region.RectangleRegion(i + w, j + h, w, h)
if i + 2 * w < width and j + 2 * h < height:
features.append(([right, bottom], [immediate, bottom_right]))
j += 2
i += 2
print("Computed %d features" % (len(features)))
return np.array(features)
def apply_features(features, training_data):
"""
:param features:
An array of tuples [(positive), (negative)].
:param training_data: Array of tuples [(intergalImage, classificationValue)].
:return:
X: A numpy array of shape (len(features), len(training_data)). Each row represents the value of a single feature for each training example
y: A numpy array of shape len(training_data). y = training_data[1]
"""
X = np.zeros((len(features), len(training_data)))
y = np.array(list(map(lambda data: data[1], training_data))) # y is only the actual classification of images
i = 0
for positive_regions, negative_regions in features: # apply same feature to all images, repeat for all features
# if i%100 == 0: print("Applied %d" % i) # num of features applied
feature = lambda ii: sum([pos.computeScore(ii) for pos in positive_regions]) - sum(
[neg.computeScore(ii) for neg in negative_regions])
# data[0] is training data, feature(data[0]) is where the training data is applied thorough map
# provide training data to feature function above
X[i] = list(map(lambda data: feature(data[0]), training_data))
i += 1
return X, y | [
"numpy.array",
"faceDet.ViolaJones.Regions.RectangleRegion"
] | [((3893, 3911), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (3901, 3911), True, 'import numpy as np\n'), ((2530, 2564), 'faceDet.ViolaJones.Regions.RectangleRegion', 'region.RectangleRegion', (['i', 'j', 'w', 'h'], {}), '(i, j, w, h)\n', (2552, 2564), True, 'import faceDet.ViolaJones.Regions as region\n'), ((2594, 2632), 'faceDet.ViolaJones.Regions.RectangleRegion', 'region.RectangleRegion', (['(i + w)', 'j', 'w', 'h'], {}), '(i + w, j, w, h)\n', (2616, 2632), True, 'import faceDet.ViolaJones.Regions as region\n'), ((2842, 2880), 'faceDet.ViolaJones.Regions.RectangleRegion', 'region.RectangleRegion', (['i', '(j + h)', 'w', 'h'], {}), '(i, j + h, w, h)\n', (2864, 2880), True, 'import faceDet.ViolaJones.Regions as region\n'), ((3047, 3089), 'faceDet.ViolaJones.Regions.RectangleRegion', 'region.RectangleRegion', (['(i + 2 * w)', 'j', 'w', 'h'], {}), '(i + 2 * w, j, w, h)\n', (3069, 3089), True, 'import faceDet.ViolaJones.Regions as region\n'), ((3310, 3352), 'faceDet.ViolaJones.Regions.RectangleRegion', 'region.RectangleRegion', (['i', '(j + 2 * h)', 'w', 'h'], {}), '(i, j + 2 * h, w, h)\n', (3332, 3352), True, 'import faceDet.ViolaJones.Regions as region\n'), ((3578, 3620), 'faceDet.ViolaJones.Regions.RectangleRegion', 'region.RectangleRegion', (['(i + w)', '(j + h)', 'w', 'h'], {}), '(i + w, j + h, w, h)\n', (3600, 3620), True, 'import faceDet.ViolaJones.Regions as region\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import random
import unittest
import numpy
import torch
import torch.nn.functional as F
import nni
from nni.compression.pytorch.pruning import (
LevelPruner,
L1NormPruner,
L2NormPruner,
SlimPruner,
FPGMPruner,
ActivationAPoZRankPruner,
ActivationMeanRankPruner,
TaylorFOWeightPruner,
ADMMPruner,
MovementPruner
)
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 10, 5, 1)
self.bn1 = torch.nn.BatchNorm2d(10)
self.conv2 = torch.nn.Conv2d(10, 20, 5, 1)
self.bn2 = torch.nn.BatchNorm2d(20)
self.fc1 = torch.nn.Linear(4 * 4 * 20, 100)
self.fc2 = torch.nn.Linear(100, 10)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 20)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def trainer(model, optimizer, criterion):
model.train()
for _ in range(10):
input = torch.rand(10, 1, 28, 28)
label = torch.Tensor(list(range(10))).type(torch.LongTensor)
optimizer.zero_grad()
output = model(input)
loss = criterion(output, label)
loss.backward()
optimizer.step()
def get_optimizer(model):
return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
class PrunerTestCase(unittest.TestCase):
def test_level_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = LevelPruner(model=model, config_list=config_list)
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_level_pruner_bank(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.7}]
pruner = LevelPruner(model=model, config_list=config_list, mode='balance', balance_gran=[5])
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
# round down cause to lower sparsity
assert sparsity_list[0]['total_sparsity'] == 0.6
def test_l1_norm_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = L1NormPruner(model=model, config_list=config_list, mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_l2_norm_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = L2NormPruner(model=model, config_list=config_list, mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_fpgm_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = FPGMPruner(model=model, config_list=config_list, mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_slim_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['BatchNorm2d'], 'total_sparsity': 0.8}]
pruner = SlimPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model),
criterion=criterion, training_epochs=1, scale=0.001, mode='global')
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_activation_mean_rank_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = ActivationMeanRankPruner(model=model, config_list=config_list, trainer=trainer,
traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5,
activation='relu', mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_taylor_fo_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = TaylorFOWeightPruner(model=model, config_list=config_list, trainer=trainer,
traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5,
mode='dependency_aware', dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_admm_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8, 'rho': 1e-3}]
pruner = ADMMPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model),
criterion=criterion, iterations=2, training_epochs=1)
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_movement_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = MovementPruner(model=model, config_list=config_list, trainer=trainer, traced_optimizer=get_optimizer(model),
criterion=criterion, training_epochs=5, warm_up_step=0, cool_down_beginning_step=4)
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
class FixSeedPrunerTestCase(unittest.TestCase):
def test_activation_apoz_rank_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = ActivationAPoZRankPruner(model=model, config_list=config_list, trainer=trainer,
traced_optimizer=get_optimizer(model), criterion=criterion, training_batches=5,
activation='relu', mode='dependency_aware',
dummy_input=torch.rand(10, 1, 28, 28))
pruned_model, masks = pruner.compress()
pruner._unwrap_model()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def setUp(self) -> None:
# fix seed in order to solve the random failure of ut
random.seed(1024)
numpy.random.seed(1024)
torch.manual_seed(1024)
def tearDown(self) -> None:
# reset seed
import time
now = int(time.time() * 100)
random.seed(now)
seed = random.randint(0, 2 ** 32 - 1)
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if __name__ == '__main__':
unittest.main()
| [
"torch.nn.BatchNorm2d",
"torch.manual_seed",
"torch.nn.CrossEntropyLoss",
"nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact",
"random.seed",
"torch.nn.Conv2d",
"nni.compression.pytorch.pruning.LevelPruner",
"nni.trace",
"numpy.random.seed",
"torch.nn.Linear",
"torch.nn.f... | [((1688, 1715), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1713, 1715), False, 'import torch\n'), ((8762, 8777), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8775, 8777), False, 'import unittest\n'), ((624, 652), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(10)', '(5)', '(1)'], {}), '(1, 10, 5, 1)\n', (639, 652), False, 'import torch\n'), ((672, 696), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(10)'], {}), '(10)\n', (692, 696), False, 'import torch\n'), ((718, 747), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(10)', '(20)', '(5)', '(1)'], {}), '(10, 20, 5, 1)\n', (733, 747), False, 'import torch\n'), ((767, 791), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['(20)'], {}), '(20)\n', (787, 791), False, 'import torch\n'), ((811, 843), 'torch.nn.Linear', 'torch.nn.Linear', (['(4 * 4 * 20)', '(100)'], {}), '(4 * 4 * 20, 100)\n', (826, 843), False, 'import torch\n'), ((863, 887), 'torch.nn.Linear', 'torch.nn.Linear', (['(100)', '(10)'], {}), '(100, 10)\n', (878, 887), False, 'import torch\n'), ((971, 992), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (983, 992), True, 'import torch.nn.functional as F\n'), ((1049, 1070), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (1061, 1070), True, 'import torch.nn.functional as F\n'), ((1177, 1200), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1190, 1200), True, 'import torch.nn.functional as F\n'), ((1303, 1328), 'torch.rand', 'torch.rand', (['(10)', '(1)', '(28)', '(28)'], {}), '(10, 1, 28, 28)\n', (1313, 1328), False, 'import torch\n'), ((1586, 1612), 'nni.trace', 'nni.trace', (['torch.optim.SGD'], {}), '(torch.optim.SGD)\n', (1595, 1612), False, 'import nni\n'), ((1904, 1953), 'nni.compression.pytorch.pruning.LevelPruner', 'LevelPruner', ([], {'model': 'model', 'config_list': 'config_list'}), '(model=model, config_list=config_list)\n', (1915, 1953), False, 'from nni.compression.pytorch.pruning import LevelPruner, L1NormPruner, L2NormPruner, SlimPruner, FPGMPruner, ActivationAPoZRankPruner, ActivationMeanRankPruner, TaylorFOWeightPruner, ADMMPruner, MovementPruner\n'), ((2057, 2120), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (2086, 2120), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((2336, 2423), 'nni.compression.pytorch.pruning.LevelPruner', 'LevelPruner', ([], {'model': 'model', 'config_list': 'config_list', 'mode': '"""balance"""', 'balance_gran': '[5]'}), "(model=model, config_list=config_list, mode='balance',\n balance_gran=[5])\n", (2347, 2423), False, 'from nni.compression.pytorch.pruning import LevelPruner, L1NormPruner, L2NormPruner, SlimPruner, FPGMPruner, ActivationAPoZRankPruner, ActivationMeanRankPruner, TaylorFOWeightPruner, ADMMPruner, MovementPruner\n'), ((2523, 2586), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (2552, 2586), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((3085, 3148), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (3114, 3148), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((3609, 3672), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (3638, 3672), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((4126, 4189), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (4155, 4189), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((4714, 4777), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (4743, 4777), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((5475, 5538), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (5504, 5538), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((6152, 6215), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (6181, 6215), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((6728, 6791), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (6757, 6791), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((7333, 7396), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (7362, 7396), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((8148, 8211), 'nni.algorithms.compression.v2.pytorch.utils.compute_sparsity_mask2compact', 'compute_sparsity_mask2compact', (['pruned_model', 'masks', 'config_list'], {}), '(pruned_model, masks, config_list)\n', (8177, 8211), False, 'from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact\n'), ((8376, 8393), 'random.seed', 'random.seed', (['(1024)'], {}), '(1024)\n', (8387, 8393), False, 'import random\n'), ((8402, 8425), 'numpy.random.seed', 'numpy.random.seed', (['(1024)'], {}), '(1024)\n', (8419, 8425), False, 'import numpy\n'), ((8434, 8457), 'torch.manual_seed', 'torch.manual_seed', (['(1024)'], {}), '(1024)\n', (8451, 8457), False, 'import torch\n'), ((8577, 8593), 'random.seed', 'random.seed', (['now'], {}), '(now)\n', (8588, 8593), False, 'import random\n'), ((8609, 8639), 'random.randint', 'random.randint', (['(0)', '(2 ** 32 - 1)'], {}), '(0, 2 ** 32 - 1)\n', (8623, 8639), False, 'import random\n'), ((8648, 8665), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (8659, 8665), False, 'import random\n'), ((8674, 8697), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (8691, 8697), False, 'import numpy\n'), ((8706, 8729), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (8723, 8729), False, 'import torch\n'), ((2955, 2980), 'torch.rand', 'torch.rand', (['(10)', '(1)', '(28)', '(28)'], {}), '(10, 1, 28, 28)\n', (2965, 2980), False, 'import torch\n'), ((3479, 3504), 'torch.rand', 'torch.rand', (['(10)', '(1)', '(28)', '(28)'], {}), '(10, 1, 28, 28)\n', (3489, 3504), False, 'import torch\n'), ((3996, 4021), 'torch.rand', 'torch.rand', (['(10)', '(1)', '(28)', '(28)'], {}), '(10, 1, 28, 28)\n', (4006, 4021), False, 'import torch\n'), ((5345, 5370), 'torch.rand', 'torch.rand', (['(10)', '(1)', '(28)', '(28)'], {}), '(10, 1, 28, 28)\n', (5355, 5370), False, 'import torch\n'), ((6022, 6047), 'torch.rand', 'torch.rand', (['(10)', '(1)', '(28)', '(28)'], {}), '(10, 1, 28, 28)\n', (6032, 6047), False, 'import torch\n'), ((8018, 8043), 'torch.rand', 'torch.rand', (['(10)', '(1)', '(28)', '(28)'], {}), '(10, 1, 28, 28)\n', (8028, 8043), False, 'import torch\n'), ((8550, 8561), 'time.time', 'time.time', ([], {}), '()\n', (8559, 8561), False, 'import time\n')] |
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import *
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_server("burtin")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color=None,
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
| [
"collections.OrderedDict",
"numpy.log",
"math.log",
"numpy.array",
"six.moves.cStringIO",
"numpy.arange"
] | [((1625, 1722), 'collections.OrderedDict', 'OrderedDict', (["[('Penicillin', '#0d3362'), ('Streptomycin', '#c64737'), ('Neomycin', 'black')]"], {}), "([('Penicillin', '#0d3362'), ('Streptomycin', '#c64737'), (\n 'Neomycin', 'black')])\n", (1636, 1722), False, 'from collections import OrderedDict\n'), ((3942, 3975), 'numpy.array', 'np.array', (['(-big_angle / 2 + angles)'], {}), '(-big_angle / 2 + angles)\n', (3950, 3975), True, 'import numpy as np\n'), ((1833, 1854), 'six.moves.cStringIO', 'StringIO', (['antibiotics'], {}), '(antibiotics)\n', (1841, 1854), True, 'from six.moves import cStringIO as StringIO\n'), ((2040, 2060), 'math.log', 'log', (['(0.001 * 10000.0)'], {}), '(0.001 * 10000.0)\n', (2043, 2060), False, 'from math import log, sqrt\n'), ((2069, 2088), 'math.log', 'log', (['(1000 * 10000.0)'], {}), '(1000 * 10000.0)\n', (2072, 2088), False, 'from math import log, sqrt\n'), ((3416, 3432), 'numpy.arange', 'np.arange', (['(-3)', '(4)'], {}), '(-3, 4)\n', (3425, 3432), True, 'import numpy as np\n'), ((3843, 3876), 'numpy.array', 'np.array', (['(-big_angle / 2 + angles)'], {}), '(-big_angle / 2 + angles)\n', (3851, 3876), True, 'import numpy as np\n'), ((3897, 3930), 'numpy.array', 'np.array', (['(-big_angle / 2 + angles)'], {}), '(-big_angle / 2 + angles)\n', (3905, 3930), True, 'import numpy as np\n'), ((3454, 3478), 'numpy.log', 'np.log', (['(labels * 10000.0)'], {}), '(labels * 10000.0)\n', (3460, 3478), True, 'import numpy as np\n'), ((2202, 2223), 'numpy.log', 'np.log', (['(mic * 10000.0)'], {}), '(mic * 10000.0)\n', (2208, 2223), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
n = 12
X = np.arange(n)
Y1 = np.random.uniform(0.1, 1.0, n)
Y2 = np.random.uniform(0.1, 1.0, n)
plt.bar(X, +Y1)
plt.bar(X, -Y2)
plt.xlim(-.5, n)
plt.xticks(())
plt.ylim(-1.25, 1.25)
plt.yticks(())
for x, y in zip(X, Y1):
plt.text(x, y + 0.02, '%.2f' % y, ha='center', va='bottom')
for x, y in zip(X, Y2):
plt.text(x, -y - 0.02, '%.2f' % y, ha='center', va='top')
plt.show()
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.yticks",
"numpy.random.uniform",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((63, 75), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (72, 75), True, 'import numpy as np\n'), ((81, 111), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(1.0)', 'n'], {}), '(0.1, 1.0, n)\n', (98, 111), True, 'import numpy as np\n'), ((117, 147), 'numpy.random.uniform', 'np.random.uniform', (['(0.1)', '(1.0)', 'n'], {}), '(0.1, 1.0, n)\n', (134, 147), True, 'import numpy as np\n'), ((149, 164), 'matplotlib.pyplot.bar', 'plt.bar', (['X', '(+Y1)'], {}), '(X, +Y1)\n', (156, 164), True, 'import matplotlib.pyplot as plt\n'), ((165, 180), 'matplotlib.pyplot.bar', 'plt.bar', (['X', '(-Y2)'], {}), '(X, -Y2)\n', (172, 180), True, 'import matplotlib.pyplot as plt\n'), ((182, 199), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5)', 'n'], {}), '(-0.5, n)\n', (190, 199), True, 'import matplotlib.pyplot as plt\n'), ((199, 213), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (209, 213), True, 'import matplotlib.pyplot as plt\n'), ((214, 235), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.25)', '(1.25)'], {}), '(-1.25, 1.25)\n', (222, 235), True, 'import matplotlib.pyplot as plt\n'), ((236, 250), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (246, 250), True, 'import matplotlib.pyplot as plt\n'), ((428, 438), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (436, 438), True, 'import matplotlib.pyplot as plt\n'), ((280, 339), 'matplotlib.pyplot.text', 'plt.text', (['x', '(y + 0.02)', "('%.2f' % y)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x, y + 0.02, '%.2f' % y, ha='center', va='bottom')\n", (288, 339), True, 'import matplotlib.pyplot as plt\n'), ((369, 426), 'matplotlib.pyplot.text', 'plt.text', (['x', '(-y - 0.02)', "('%.2f' % y)"], {'ha': '"""center"""', 'va': '"""top"""'}), "(x, -y - 0.02, '%.2f' % y, ha='center', va='top')\n", (377, 426), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import signal
import datetime
import numpy as np
import atomize.general_modules.general_functions as general
import atomize.device_modules.PB_ESR_500_pro as pb_pro
import atomize.device_modules.Spectrum_M4I_4450_X8 as spectrum_dig
import atomize.device_modules.Mikran_X_band_MW_bridge as mwBridge
import atomize.device_modules.BH_15 as bh
import atomize.device_modules.SR_PTC_10 as sr
import atomize.general_modules.csv_opener_saver as openfile
### Experimental parameters
POINTS = 15
STEP = 100 # in NS; delta_start = str(STEP) + ' ns' -> delta_start = '100 ns'
FIELD = 3473
AVERAGES = 10
SCANS = 1
# PULSES
REP_RATE = '400 Hz'
PULSE_1_LENGTH = '16 ns'
PULSE_2_LENGTH = '16 ns'
PULSE_3_LENGTH = '32 ns'
PULSE_4_LENGTH = '16 ns'
PULSE_1_START = '0 ns'
PULSE_2_START = '300 ns'
PULSE_3_START = '500 ns'
PULSE_4_START = '700 ns'
PULSE_SIGNAL_START = '1000 ns'
# NAMES
EXP_NAME = 'HYSCORE'
CURVE_NAME = 'exp1'
# initialization of the devices
file_handler = openfile.Saver_Opener()
ptc10 = sr.SR_PTC_10()
mw = mwBridge.Mikran_X_band_MW_bridge()
pb = pb_pro.PB_ESR_500_Pro()
bh15 = bh.BH_15()
dig4450 = spectrum_dig.Spectrum_M4I_4450_X8()
def cleanup(*args):
dig4450.digitizer_stop()
dig4450.digitizer_close()
pb.pulser_stop()
file_handler.save_data(file_data, data, header = header, mode = 'w')
sys.exit(0)
signal.signal(signal.SIGTERM, cleanup)
# Setting magnetic field
bh15.magnet_setup(FIELD, 1)
bh15.magnet_field(FIELD)
dig4450.digitizer_read_settings()
dig4450.digitizer_number_of_averages(AVERAGES)
wind = dig4450.digitizer_window()
#
cycle_data_x = np.zeros( 16 )
cycle_data_y = np.zeros( 16 )
data = np.zeros( (2, POINTS, POINTS) )
###
pb.pulser_pulse(name = 'P0', channel = 'MW', start = PULSE_1_START, length = PULSE_1_LENGTH, \
phase_list = ['+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x'])
pb.pulser_pulse(name = 'P1', channel = 'MW', start = PULSE_2_START, length = PULSE_2_LENGTH, \
phase_list = ['+x', '+x', '+x', '+x', '+y', '+y', '+y', '+y', '-x', '-x', '-x', '-x', '-y', '-y', '-y', '-y'])
pb.pulser_pulse(name = 'P2', channel = 'MW', start = PULSE_3_START, length = PULSE_3_LENGTH, \
phase_list = ['+x', '+x', '-x', '-x', '+x', '+x', '-x', '-x', '+x', '+x', '-x', '-x', '+x', '+x', '-x', '-x'], delta_start = str(STEP) + ' ns')
pb.pulser_pulse(name = 'P3', channel = 'MW', start = PULSE_4_START, length = PULSE_4_LENGTH, \
phase_list = ['+x', '-x', '+x', '-x', '+x', '-x', '+x', '-x', '+x', '-x', '+x', '-x', '+x', '-x', '+x', '-x'], delta_start = str(STEP) + ' ns')
pb.pulser_pulse(name = 'P4', channel = 'TRIGGER', start = PULSE_SIGNAL_START, length = '100 ns', \
phase_list = ['+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x', '+x'], delta_start = str(STEP) + ' ns')
pb.pulser_repetition_rate( REP_RATE )
# Data saving
header = 'Date: ' + str(datetime.datetime.now().strftime("%d-%m-%Y %H-%M-%S")) + '\n' + 'HYSCORE\n' + \
'Field: ' + str(FIELD) + ' G \n' + str(mw.mw_bridge_att_prm()) + '\n' + \
str(mw.mw_bridge_synthesizer()) + '\n' + \
'Repetition Rate: ' + str(pb.pulser_repetition_rate()) + '\n' + 'Number of Scans: ' + str(SCANS) + '\n' +\
'Averages: ' + str(AVERAGES) + '\n' + 'Points: ' + str(POINTS) + '\n' + 'Window: ' + str(wind) + ' ns\n' \
+ 'Horizontal Resolution: ' + str(STEP) + ' ns\n' + 'Vertical Resolution: ' + str(STEP) + ' ns\n' + \
'Temperature: ' + str(ptc10.tc_temperature('2A')) + ' K\n' +\
'Pulse List: ' + '\n' + str(pb.pulser_pulse_list()) + '2D Data'
file_data, file_param = file_handler.create_file_parameters('.param')
file_handler.save_header(file_param, header = header, mode = 'w')
for j in general.scans(SCANS):
l = 0
for l in range(POINTS):
for i in range(POINTS):
# phase cycle
k = 0
while k < 16:
pb.pulser_next_phase()
cycle_data_x[k], cycle_data_y[k] = dig4450.digitizer_get_curve( integral = True )
k += 1
# acquisition cycle
x, y = pb.pulser_acquisition_cycle(cycle_data_x, cycle_data_y, \
acq_cycle = ['+', '-', '+', '-', '-i', '+i', '-i', '+i', '-', '+', '-', '+', '+i', '-i', '+i', '-i'])
data[0, i, l] = ( data[0, i, l] * (j - 1) + x ) / j
data[1, i, l] = ( data[0, i, l] * (j - 1) + y ) / j
general.plot_2d(EXP_NAME, data, start_step = ( (0, STEP), (0, STEP) ), xname = 'Delay_1',\
xscale = 'ns', yname = 'Delay_2', yscale = 'ns', zname = 'Intensity', zscale = 'V')
general.text_label( EXP_NAME, "Scan / Time: ", str(j) + ' / '+ str(l*STEP) + ' / '+ str(i*STEP) )
# Delay_1 scan
pb.pulser_shift('P2', 'P3', 'P4')
# Delay_2 change
pb.pulser_pulse_reset('P2', 'P3', 'P4')
##pb.pulser_redefine_start(name = 'P3', start = str( int( PULSE_3_START.split(' ')[0] ) + ( l + 1 ) * STEP ) + ' ns')
##pb.pulser_redefine_start(name = 'P4', start = str( int( PULSE_4_START.split(' ')[0] ) + ( l + 1 ) * STEP ) + ' ns')
d2 = 0
while d2 < (l + 1):
pb.pulser_shift('P3', 'P4')
d2 += 1
pb.pulser_pulse_reset()
dig4450.digitizer_stop()
dig4450.digitizer_close()
pb.pulser_stop()
file_handler.save_data(file_data, data, header = header, mode = 'w')
| [
"atomize.general_modules.general_functions.scans",
"signal.signal",
"atomize.device_modules.Spectrum_M4I_4450_X8.Spectrum_M4I_4450_X8",
"atomize.device_modules.PB_ESR_500_pro.PB_ESR_500_Pro",
"atomize.device_modules.BH_15.BH_15",
"atomize.general_modules.csv_opener_saver.Saver_Opener",
"atomize.device_m... | [((986, 1009), 'atomize.general_modules.csv_opener_saver.Saver_Opener', 'openfile.Saver_Opener', ([], {}), '()\n', (1007, 1009), True, 'import atomize.general_modules.csv_opener_saver as openfile\n'), ((1018, 1032), 'atomize.device_modules.SR_PTC_10.SR_PTC_10', 'sr.SR_PTC_10', ([], {}), '()\n', (1030, 1032), True, 'import atomize.device_modules.SR_PTC_10 as sr\n'), ((1038, 1072), 'atomize.device_modules.Mikran_X_band_MW_bridge.Mikran_X_band_MW_bridge', 'mwBridge.Mikran_X_band_MW_bridge', ([], {}), '()\n', (1070, 1072), True, 'import atomize.device_modules.Mikran_X_band_MW_bridge as mwBridge\n'), ((1078, 1101), 'atomize.device_modules.PB_ESR_500_pro.PB_ESR_500_Pro', 'pb_pro.PB_ESR_500_Pro', ([], {}), '()\n', (1099, 1101), True, 'import atomize.device_modules.PB_ESR_500_pro as pb_pro\n'), ((1109, 1119), 'atomize.device_modules.BH_15.BH_15', 'bh.BH_15', ([], {}), '()\n', (1117, 1119), True, 'import atomize.device_modules.BH_15 as bh\n'), ((1130, 1165), 'atomize.device_modules.Spectrum_M4I_4450_X8.Spectrum_M4I_4450_X8', 'spectrum_dig.Spectrum_M4I_4450_X8', ([], {}), '()\n', (1163, 1165), True, 'import atomize.device_modules.Spectrum_M4I_4450_X8 as spectrum_dig\n'), ((1357, 1395), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'cleanup'], {}), '(signal.SIGTERM, cleanup)\n', (1370, 1395), False, 'import signal\n'), ((1609, 1621), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (1617, 1621), True, 'import numpy as np\n'), ((1639, 1651), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (1647, 1651), True, 'import numpy as np\n'), ((1661, 1690), 'numpy.zeros', 'np.zeros', (['(2, POINTS, POINTS)'], {}), '((2, POINTS, POINTS))\n', (1669, 1690), True, 'import numpy as np\n'), ((3854, 3874), 'atomize.general_modules.general_functions.scans', 'general.scans', (['SCANS'], {}), '(SCANS)\n', (3867, 3874), True, 'import atomize.general_modules.general_functions as general\n'), ((1344, 1355), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1352, 1355), False, 'import sys\n'), ((4575, 4741), 'atomize.general_modules.general_functions.plot_2d', 'general.plot_2d', (['EXP_NAME', 'data'], {'start_step': '((0, STEP), (0, STEP))', 'xname': '"""Delay_1"""', 'xscale': '"""ns"""', 'yname': '"""Delay_2"""', 'yscale': '"""ns"""', 'zname': '"""Intensity"""', 'zscale': '"""V"""'}), "(EXP_NAME, data, start_step=((0, STEP), (0, STEP)), xname=\n 'Delay_1', xscale='ns', yname='Delay_2', yscale='ns', zname='Intensity',\n zscale='V')\n", (4590, 4741), True, 'import atomize.general_modules.general_functions as general\n'), ((2989, 3012), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3010, 3012), False, 'import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-"""
"""
Created on Mon Apr 24 15:59:19 2017
@author: Cling
"""
import lsystem
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections
def a_calculate_lines(
string, n_angle=90, n_line_len=2, t_init_pos=(0.0, 0.0), n_init_dir=0,
n_draw_step_scale=1
):
a_pen_pos = np.array(t_init_pos)
n_direction = np.deg2rad(n_init_dir)
n_rot_angle = np.deg2rad(n_angle)
n_draw_len = n_line_len
lst_lines = []
lst_stack = []
lst_line_len_stack = []
for cmd in string:
if cmd in 'FAB':
a_pen_pos_nxt = a_pen_pos + n_draw_len *\
np.array((
np.cos(n_direction),
np.sin(n_direction)
))
lst_lines.append(np.array((a_pen_pos, a_pen_pos_nxt)))
a_pen_pos = a_pen_pos_nxt
elif cmd == "+":
n_direction += n_rot_angle
elif cmd == "-":
n_direction -= n_rot_angle
elif cmd == "(":
lst_line_len_stack.append(n_draw_len)
n_draw_len *= n_draw_step_scale
elif cmd == ")":
n_draw_len = lst_line_len_stack.pop()
elif cmd == "[":
lst_stack.append((a_pen_pos, n_direction))
elif cmd == "]":
a_pen_pos, n_direction = lst_stack.pop()
a_lines = np.array(lst_lines)
return a_lines
def main():
# lsys_dragon = lsystem.L_System(
# axiom='FX',
# rules={"X": "X+YF+", "Y": "-FX-Y"}
# )
# lsys_dragon.evaluate(10)
#
# a_lines = a_calculate_lines(lsys_dragon, 90)
#
# lsys_tree = lsystem.Random_L_System(
# axiom='F',
# rules={'F': [
# [1/2, 'F[+F]F[-F]F'],
# [1/4, 'F[+F]F[-F[+F]]'],
# [1/4, 'FF[-F+F+F]+[+F-F-F]']
# ]}
# )
# lsys_tree.evaluate(6)
# print(len(lsys_tree))
# return None
lsys_tree = lsystem.L_System(
axiom='X',
rules={'X': '(F[+X][-X]FX)'}
)
lsys_tree.evaluate(3)
a_lines = a_calculate_lines(lsys_tree, 27.9, n_init_dir=90, n_draw_step_scale=0.5)
lines = collections.LineCollection(a_lines)
ax = plt.axes()
ax.add_collection(lines)
ax.axis("equal")
ax.set_xlim(ax.dataLim.xmin, ax.dataLim.xmax)
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.collections.LineCollection",
"numpy.array",
"numpy.deg2rad",
"matplotlib.pyplot.axes",
"numpy.cos",
"numpy.sin",
"lsystem.L_System",
"matplotlib.pyplot.show"
] | [((385, 405), 'numpy.array', 'np.array', (['t_init_pos'], {}), '(t_init_pos)\n', (393, 405), True, 'import numpy as np\n'), ((425, 447), 'numpy.deg2rad', 'np.deg2rad', (['n_init_dir'], {}), '(n_init_dir)\n', (435, 447), True, 'import numpy as np\n'), ((467, 486), 'numpy.deg2rad', 'np.deg2rad', (['n_angle'], {}), '(n_angle)\n', (477, 486), True, 'import numpy as np\n'), ((1484, 1503), 'numpy.array', 'np.array', (['lst_lines'], {}), '(lst_lines)\n', (1492, 1503), True, 'import numpy as np\n'), ((2060, 2117), 'lsystem.L_System', 'lsystem.L_System', ([], {'axiom': '"""X"""', 'rules': "{'X': '(F[+X][-X]FX)'}"}), "(axiom='X', rules={'X': '(F[+X][-X]FX)'})\n", (2076, 2117), False, 'import lsystem\n'), ((2271, 2306), 'matplotlib.collections.LineCollection', 'collections.LineCollection', (['a_lines'], {}), '(a_lines)\n', (2297, 2306), False, 'from matplotlib import collections\n'), ((2317, 2327), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2325, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2436, 2446), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2444, 2446), True, 'import matplotlib.pyplot as plt\n'), ((899, 935), 'numpy.array', 'np.array', (['(a_pen_pos, a_pen_pos_nxt)'], {}), '((a_pen_pos, a_pen_pos_nxt))\n', (907, 935), True, 'import numpy as np\n'), ((763, 782), 'numpy.cos', 'np.cos', (['n_direction'], {}), '(n_direction)\n', (769, 782), True, 'import numpy as np\n'), ((817, 836), 'numpy.sin', 'np.sin', (['n_direction'], {}), '(n_direction)\n', (823, 836), True, 'import numpy as np\n')] |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import numpy as np
from scipy import linalg
from ..source_estimate import (SourceEstimate, VolSourceEstimate,
_BaseSourceEstimate)
from ..minimum_norm.inverse import (combine_xyz, _prepare_forward,
_check_reference)
from ..forward import is_fixed_orient
from ..io.pick import pick_channels_evoked
from ..io.proj import deactivate_proj
from ..utils import logger, verbose, _check_depth
from ..dipole import Dipole
from .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver, _Phi,
norm_l2inf, tf_mixed_norm_solver, norm_epsilon_inf)
@verbose
def _prepare_weights(forward, gain, source_weighting, weights, weights_min):
mask = None
if isinstance(weights, _BaseSourceEstimate):
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
if len(source_weighting.shape) == 1:
source_weighting *= weights
else:
source_weighting *= weights[:, None]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) // n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
def _prepare_gain(forward, info, noise_cov, pca, depth, loose, rank,
weights=None, weights_min=None):
depth = _check_depth(depth, 'depth_sparse')
forward, gain_info, gain, _, _, source_weighting, _, _, whitener = \
_prepare_forward(forward, info, noise_cov, 'auto', loose, rank, pca,
use_cps=True, **depth)
if weights is None:
mask = None
else:
gain, source_weighting, mask = _prepare_weights(
forward, gain, source_weighting, weights, weights_min)
return forward, gain, gain_info, whitener, source_weighting, mask
def _reapply_source_weighting(X, source_weighting, active_set):
X *= source_weighting[active_set][:, None]
return X
def _compute_residual(forward, evoked, X, active_set, info):
# OK, picking based on row_names is safe
sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]
residual = evoked.copy()
residual = pick_channels_evoked(residual, include=info['ch_names'])
r_tmp = residual.copy()
r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)
# Take care of proj
active_projs = list()
non_active_projs = list()
for p in evoked.info['projs']:
if p['active']:
active_projs.append(p)
else:
non_active_projs.append(p)
if len(active_projs) > 0:
r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True)
r_tmp.apply_proj()
r_tmp.add_proj(non_active_projs, remove_existing=False)
residual.data -= r_tmp.data
return residual
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, verbose=None):
if not is_fixed_orient(forward):
logger.info('combining the current components...')
X = combine_xyz(X)
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
if src.kind != 'surface':
vertices = src[0]['vertno'][active_idx]
stc = VolSourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
else:
vertices = []
n_points_so_far = 0
for this_src in src:
this_n_points_so_far = n_points_so_far + len(this_src['vertno'])
this_active_idx = active_idx[(n_points_so_far <= active_idx) &
(active_idx < this_n_points_so_far)]
this_active_idx -= n_points_so_far
this_vertno = this_src['vertno'][this_active_idx]
n_points_so_far = this_n_points_so_far
vertices.append(this_vertno)
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
return stc
@verbose
def _make_dipoles_sparse(X, active_set, forward, tmin, tstep, M, M_est,
active_is_idx=False, verbose=None):
times = tmin + tstep * np.arange(X.shape[1])
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
gof = np.zeros(M_est.shape[1])
M_norm2 = np.sum(M ** 2, axis=0)
R_norm2 = np.sum((M - M_est) ** 2, axis=0)
gof[M_norm2 > 0.0] = 1. - R_norm2[M_norm2 > 0.0] / M_norm2[M_norm2 > 0.0]
gof *= 100.
dipoles = []
for k, i_dip in enumerate(active_idx):
i_pos = forward['source_rr'][i_dip][np.newaxis, :]
i_pos = i_pos.repeat(len(times), axis=0)
X_ = X[k * n_dip_per_pos: (k + 1) * n_dip_per_pos]
if n_dip_per_pos == 1:
amplitude = X_[0]
i_ori = forward['source_nn'][i_dip][np.newaxis, :]
i_ori = i_ori.repeat(len(times), axis=0)
else:
if forward['surf_ori']:
X_ = np.dot(forward['source_nn'][
i_dip * n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T, X_)
amplitude = np.sqrt(np.sum(X_ ** 2, axis=0))
i_ori = np.zeros((len(times), 3))
i_ori[amplitude > 0.] = (X_[:, amplitude > 0.] /
amplitude[amplitude > 0.]).T
dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof))
return dipoles
@verbose
def make_stc_from_dipoles(dipoles, src, verbose=None):
"""Convert a list of spatio-temporal dipoles into a SourceEstimate.
Parameters
----------
dipoles : Dipole | list of instances of Dipole
The dipoles to convert.
src : instance of SourceSpaces
The source space used to generate the forward operator.
%(verbose)s
Returns
-------
stc : SourceEstimate
The source estimate.
"""
logger.info('Converting dipoles into a SourceEstimate.')
if isinstance(dipoles, Dipole):
dipoles = [dipoles]
if not isinstance(dipoles, list):
raise ValueError('Dipoles must be an instance of Dipole or '
'a list of instances of Dipole. '
'Got %s!' % type(dipoles))
tmin = dipoles[0].times[0]
tstep = dipoles[0].times[1] - tmin
X = np.zeros((len(dipoles), len(dipoles[0].times)))
source_rr = np.concatenate([_src['rr'][_src['vertno'], :] for _src in src],
axis=0)
n_lh_points = len(src[0]['vertno'])
lh_vertno = list()
rh_vertno = list()
for i in range(len(dipoles)):
if not np.all(dipoles[i].pos == dipoles[i].pos[0]):
raise ValueError('Only dipoles with fixed position over time '
'are supported!')
X[i] = dipoles[i].amplitude
idx = np.all(source_rr == dipoles[i].pos[0], axis=1)
idx = np.where(idx)[0][0]
if idx < n_lh_points:
lh_vertno.append(src[0]['vertno'][idx])
else:
rh_vertno.append(src[1]['vertno'][idx - n_lh_points])
vertices = [np.array(lh_vertno).astype(int),
np.array(rh_vertno).astype(int)]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep,
subject=src._subject)
logger.info('[done]')
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8,
maxit=3000, tol=1e-4, active_set_size=10,
debias=True, time_pca=True, weights=None, weights_min=0.,
solver='auto', n_mxne_iter=1, return_residual=False,
return_as_dipoles=False, dgap_freq=10, rank=None,
verbose=None):
"""Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE).
Compute L1/L2 mixed-norm solution [1]_ or L0.5/L2 [2]_ mixed-norm
solution on evoked data.
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float in range [0, 100)
Regularization parameter. 0 means no regularization, 100 would give 0
active dipole.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None, a
1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained
with wMNE, dSPM, or fMRI).
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization. 'prox' stands for
proximal iterations using the FISTA algorithm, 'cd' uses
coordinate descent, and 'bcd' applies block coordinate descent.
'cd' is only available for fixed orientation.
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations. Ignored if
solver is 'cd'.
%(rank_None)s
.. versionadded:: 0.18
%(verbose)s
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
tf_mixed_norm
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
"Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods", Physics in Medicine and Biology, 2012.
https://doi.org/10.1088/0031-9155/57/7/1937
.. [2] <NAME>, <NAME>, <NAME>, <NAME>,
"The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal
MEG/EEG Source Reconstruction", IEEE Transactions of Medical Imaging,
Volume 35 (10), pp. 2218-2228, 2016.
"""
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if n_mxne_iter < 1:
raise ValueError('MxNE has to be computed at least 1 time. '
'Requires n_mxne_iter >= 1, got %d' % n_mxne_iter)
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
pca = True
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of alpha easy
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = np.dot(M, Vh)
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting, active_set)
outs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
if return_as_dipoles:
out = _make_dipoles_sparse(
Xe, active_set, forward, tmin, tstep,
M[:, cnt:(cnt + len(e.times))],
M_estimated[:, cnt:(cnt + len(e.times))], verbose=None)
else:
out = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)
outs.append(out)
cnt += len(e.times)
if return_residual:
residual.append(_compute_residual(forward, e, Xe, active_set,
gain_info))
logger.info('[done]')
if len(outs) == 1:
out = outs[0]
if return_residual:
residual = residual[0]
else:
out = outs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)."""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = evoked.copy()
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = np.hanning(lsize * 2)[:lsize]
rhann = np.hanning(rsize * 2)[-rsize:]
window = np.r_[lhann, np.ones(len(evoked.times) - lsize - rsize), rhann]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov,
loose='auto', depth=0.8, maxit=3000,
tol=1e-4, weights=None, weights_min=0., pca=True,
debias=True, wsize=64, tstep=4, window=0.02,
return_residual=False, return_as_dipoles=False,
alpha=None, l1_ratio=None, dgap_freq=10, rank=None,
verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE).
Compute L1/L2 + L1 mixed-norm solution on time-frequency
dictionary. Works with evoked data [1]_ [2]_.
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights: None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min: float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca: bool
If True the rank of the data is reduced to true dimension.
debias: bool
Remove coefficient amplitude bias due to L1 penalty.
wsize: int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4. See [3]_.
tstep: int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize. See [3]_.
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
alpha : float in [0, 100) or None
Overall regularization parameter.
If alpha and l1_ratio are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no regularization, 100 would give 0 active dipole.
l1_ratio : float in [0, 1] or None
Proportion of temporal regularization.
If l1_ratio and alpha are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no time regularization aka MxNE.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(rank_None)s
.. versionadded:: 0.18
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
mixed_norm
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
.. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries",
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.
DOI: 10.1007/978-3-642-22092-0_49
.. [3] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"M/EEG source localization with multiscale time-frequency dictionaries",
6th International Workshop on Pattern Recognition in Neuroimaging
(PRNI), 2016.
DOI: 10.1109/PRNI.2016.7552337
"""
_check_reference(evoked)
all_ch_names = evoked.ch_names
info = evoked.info
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if not (0. <= l1_ratio <= 1.):
raise ValueError('l1_ratio must be in range [0, 1].'
' Got l1_ratio = %s' % l1_ratio)
alpha_space = alpha * (1. - l1_ratio)
alpha_time = alpha * l1_ratio
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
# Scaling to make setting of alpha easy
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
X, active_set, E = tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
dgap_freq=dgap_freq, debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. "
"alpha_space/alpha_time are too big.")
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
X = _reapply_source_weighting(X, source_weighting, active_set)
if return_residual:
residual = _compute_residual(
forward, evoked, X, active_set, gain_info)
if return_as_dipoles:
out = _make_dipoles_sparse(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'],
M, M_estimated, verbose=None)
else:
out = _make_sparse_stc(
X, active_set, forward, evoked.times[0], 1.0 / info['sfreq'])
logger.info('[done]')
if return_residual:
out = out, residual
return out
| [
"numpy.hanning",
"numpy.abs",
"numpy.tile",
"numpy.unique",
"numpy.where",
"numpy.max",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.array",
"numpy.concatenate",
"scipy.linalg.svd",
"numpy.all",
"numpy.arange",
"numpy.atleast_1d"
] | [((947, 962), 'numpy.max', 'np.max', (['weights'], {}), '(weights)\n', (953, 962), True, 'import numpy as np\n'), ((2993, 3049), 'numpy.dot', 'np.dot', (["forward['sol']['data'][sel, :][:, active_set]", 'X'], {}), "(forward['sol']['data'][sel, :][:, active_set], X)\n", (2999, 3049), True, 'import numpy as np\n'), ((5289, 5313), 'numpy.zeros', 'np.zeros', (['M_est.shape[1]'], {}), '(M_est.shape[1])\n', (5297, 5313), True, 'import numpy as np\n'), ((5328, 5350), 'numpy.sum', 'np.sum', (['(M ** 2)'], {'axis': '(0)'}), '(M ** 2, axis=0)\n', (5334, 5350), True, 'import numpy as np\n'), ((5365, 5397), 'numpy.sum', 'np.sum', (['((M - M_est) ** 2)'], {'axis': '(0)'}), '((M - M_est) ** 2, axis=0)\n', (5371, 5397), True, 'import numpy as np\n'), ((7334, 7405), 'numpy.concatenate', 'np.concatenate', (["[_src['rr'][_src['vertno'], :] for _src in src]"], {'axis': '(0)'}), "([_src['rr'][_src['vertno'], :] for _src in src], axis=0)\n", (7348, 7405), True, 'import numpy as np\n'), ((13186, 13239), 'numpy.concatenate', 'np.concatenate', (['[e.data[sel] for e in evoked]'], {'axis': '(1)'}), '([e.data[sel] for e in evoked], axis=1)\n', (13200, 13239), True, 'import numpy as np\n'), ((13309, 13328), 'numpy.dot', 'np.dot', (['whitener', 'M'], {}), '(whitener, M)\n', (13315, 13328), True, 'import numpy as np\n'), ((14520, 14550), 'numpy.dot', 'np.dot', (['gain[:, active_set]', 'X'], {}), '(gain[:, active_set], X)\n', (14526, 14550), True, 'import numpy as np\n'), ((21998, 22018), 'numpy.atleast_1d', 'np.atleast_1d', (['tstep'], {}), '(tstep)\n', (22011, 22018), True, 'import numpy as np\n'), ((22031, 22051), 'numpy.atleast_1d', 'np.atleast_1d', (['wsize'], {}), '(wsize)\n', (22044, 22051), True, 'import numpy as np\n'), ((22747, 22766), 'numpy.dot', 'np.dot', (['whitener', 'M'], {}), '(whitener, M)\n', (22753, 22766), True, 'import numpy as np\n'), ((23569, 23599), 'numpy.dot', 'np.dot', (['gain[:, active_set]', 'X'], {}), '(gain[:, active_set], X)\n', (23575, 23599), True, 'import numpy as np\n'), ((3993, 4031), 'numpy.unique', 'np.unique', (['(active_idx // n_dip_per_pos)'], {}), '(active_idx // n_dip_per_pos)\n', (4002, 4031), True, 'import numpy as np\n'), ((5239, 5277), 'numpy.unique', 'np.unique', (['(active_idx // n_dip_per_pos)'], {}), '(active_idx // n_dip_per_pos)\n', (5248, 5277), True, 'import numpy as np\n'), ((7789, 7835), 'numpy.all', 'np.all', (['(source_rr == dipoles[i].pos[0])'], {'axis': '(1)'}), '(source_rr == dipoles[i].pos[0], axis=1)\n', (7795, 7835), True, 'import numpy as np\n'), ((13366, 13400), 'scipy.linalg.svd', 'linalg.svd', (['M'], {'full_matrices': '(False)'}), '(M, full_matrices=False)\n', (13376, 13400), False, 'from scipy import linalg\n'), ((13713, 13730), 'numpy.dot', 'np.dot', (['gain.T', 'M'], {}), '(gain.T, M)\n', (13719, 13730), True, 'import numpy as np\n'), ((14416, 14429), 'numpy.dot', 'np.dot', (['X', 'Vh'], {}), '(X, Vh)\n', (14422, 14429), True, 'import numpy as np\n'), ((14442, 14455), 'numpy.dot', 'np.dot', (['M', 'Vh'], {}), '(M, Vh)\n', (14448, 14455), True, 'import numpy as np\n'), ((16248, 16269), 'numpy.hanning', 'np.hanning', (['(lsize * 2)'], {}), '(lsize * 2)\n', (16258, 16269), True, 'import numpy as np\n'), ((16290, 16311), 'numpy.hanning', 'np.hanning', (['(rsize * 2)'], {}), '(rsize * 2)\n', (16300, 16311), True, 'import numpy as np\n'), ((899, 919), 'numpy.abs', 'np.abs', (['weights.data'], {}), '(weights.data)\n', (905, 919), True, 'import numpy as np\n'), ((1275, 1311), 'numpy.tile', 'np.tile', (['weights', '[n_dip_per_pos, 1]'], {}), '(weights, [n_dip_per_pos, 1])\n', (1282, 1311), True, 'import numpy as np\n'), ((1777, 1789), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (1783, 1789), True, 'import numpy as np\n'), ((3822, 3842), 'numpy.where', 'np.where', (['active_set'], {}), '(active_set)\n', (3830, 3842), True, 'import numpy as np\n'), ((4998, 5019), 'numpy.arange', 'np.arange', (['X.shape[1]'], {}), '(X.shape[1])\n', (5007, 5019), True, 'import numpy as np\n'), ((5068, 5088), 'numpy.where', 'np.where', (['active_set'], {}), '(active_set)\n', (5076, 5088), True, 'import numpy as np\n'), ((7572, 7615), 'numpy.all', 'np.all', (['(dipoles[i].pos == dipoles[i].pos[0])'], {}), '(dipoles[i].pos == dipoles[i].pos[0])\n', (7578, 7615), True, 'import numpy as np\n'), ((5968, 6057), 'numpy.dot', 'np.dot', (["forward['source_nn'][i_dip * n_dip_per_pos:(i_dip + 1) * n_dip_per_pos].T", 'X_'], {}), "(forward['source_nn'][i_dip * n_dip_per_pos:(i_dip + 1) *\n n_dip_per_pos].T, X_)\n", (5974, 6057), True, 'import numpy as np\n'), ((6108, 6131), 'numpy.sum', 'np.sum', (['(X_ ** 2)'], {'axis': '(0)'}), '(X_ ** 2, axis=0)\n', (6114, 6131), True, 'import numpy as np\n'), ((7850, 7863), 'numpy.where', 'np.where', (['idx'], {}), '(idx)\n', (7858, 7863), True, 'import numpy as np\n'), ((8048, 8067), 'numpy.array', 'np.array', (['lh_vertno'], {}), '(lh_vertno)\n', (8056, 8067), True, 'import numpy as np\n'), ((8097, 8116), 'numpy.array', 'np.array', (['rh_vertno'], {}), '(rh_vertno)\n', (8105, 8116), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri May 21 10:10:10 2021
@author: © <NAME>, <NAME>, <NAME>
"""
import ltspice
import matplotlib.pyplot as plt
import matplotlib.ticker as mplt
import numpy as np
import redpitaya_scpi as scpi
from time import sleep as delay
#%% paths and adresses
filename = 'rect'
ELIE4 = '192.168.111.184'
ELIE5 = '192.168.111.185'
ELIE6 = '192.168.111.186'
#%% measurement via RedPitaya
# check if data was already saved for skip
try:
data = np.loadtxt('../../data/int/'+filename+".csv", dtype='float',
comments='#', delimiter='\t', converters=None, skiprows=0,
unpack=False, ndmin=0)
saved = True
except OSError:
print("No saved data found, begin setup and measurement!")
saved = False
time_factor = 1e3 # factor for x-axis
voltage_division = 10 # Voltage divison factor
if not saved:
# settings of the Red Pitaya
waveform = 'SQUARE' # waveform of the Inputsignal
Amp = 0.5 # amplitude of the input signal
downsampling = 32 # downsamplingrate (decimation factor)
triggerdt = '8192' # trigger delay
sampling_rate = 125e6 #125 MS/s
# sampling rate with regard to actual decimation factor
effective_sampling_rate = sampling_rate/downsampling
buffer_size = 2**14 # 14 bit
# taking sample every sampling_interval seconds
sampling_interval = 1/effective_sampling_rate
# total buffer size in seconds
total_time = sampling_interval*buffer_size
# connection to Red Pitaya via SCPI
RP = scpi.scpi(ELIE6) # connect to Red Pitaya
RP.tx_txt('GEN:RST') # signal generator reset
RP.tx_txt('ACQ:RST') # input reset
RP.tx_txt('SOUR1:VOLT:OFFS 0') # set offset
## initial settings of the Red Pitaya
RP.tx_txt('SOUR1:FUNC ' + waveform.upper()) # set the signal waveform
RP.tx_txt('SOUR1:FREQ:FIX ' + str(1000)) # set frequency
RP.tx_txt('SOUR1:VOLT '+ str(Amp)) # set amplitude
RP.tx_txt('OUTPUT1:STATE ON') # set the output port 1 ON
RP.tx_txt('ACQ:DEC ' + str(downsampling)) # set downsamplingrate
RP.tx_txt('ACQ:TRIG:LEV 0') # set trigger level
RP.tx_txt('ACQ:TRIG:DLY ' + str(triggerdt)) # set trigger dt
RP.tx_txt('ACQ:START') # start the meassurement
RP.tx_txt('ACQ:TRIG NOW') # trigger
while True: # wait until measurement done
RP.tx_txt('ACQ:TRIG:STAT?')
if RP.rx_txt() == 'TD':
break
# data of input 1
RP.tx_txt('ACQ:SOUR1:DATA?') # read the buffer of input 1
buffstring1 = RP.rx_txt() # save the data from buffer
buffstring1 = buffstring1.strip('{}\n\r\E\R\!')\
.replace(" ", "").split(',') # replace and split characters
buff1 = np.array(list(map(float, buffstring1))) * voltage_division
# data of input 2
RP.tx_txt('ACQ:SOUR2:DATA?') # read the buffer of input 2
buffstring2 = RP.rx_txt() # save the data from buffer
buffstring2 = buffstring2.strip('{}\n\r\E\R\!')\
.replace(" ", "").split(',') # replace and split characters
buff2 = np.array(list(map(float, buffstring2))) * voltage_division
t = np.arange(0, len(buff2), 1)
data = np.matrix([t*sampling_interval, buff1, buff2]).transpose()
np.savetxt('../../data/int/'+filename+".csv", data, delimiter='\t')
RP.tx_txt('OUTPUT1:STATE OFF') # set the output port 1 OFF
else:
print("\n Data allready saved! \n")
#%% set plotting window configurations
plt.close('all') # close all figure windows
plt.rc('xtick', labelsize=16) # fontsize of tick labels of x axis
plt.rc('ytick', labelsize=16) # fontsize of tick labels of y axis
plt.rc('axes', labelsize=20) # fontsize of text labels of both axis
plt.rc('axes', titlesize=24) # fontsize of the title text
plt.rc('lines', lw=1) # default linewidth
plt.rc('legend', fontsize=18) # fontsize of the legend
#%% plot of LTspice simulation and meassured data of an integrator circuit
s1 = ltspice.Ltspice('../../spice/int/rect/rect-py.raw')
s1.parse()
t1 = s1.getTime()*time_factor
Vin = s1.getData('V(input)')
Vout = s1.getData('V(output)')
fig = plt.figure(filename.upper().replace('_', ' '))
fig.set_size_inches(19.20, 10.80)
ax1 = fig.add_subplot(211, projection='rectilinear')
ax1.set_title('Simulation')
ax1.set_xlabel(r'Zeit t in Millisekunden [ms]')
ax1.set_ylabel(r'Spannung in Volt [V]')
ax1.plot(t1, Vin, 'b-', label=r'LTspice $V_{In}$')
for label in ax1.get_yticklabels():
label.set_color('blue')
ax1.grid(linewidth=0.5)
ax1.legend(loc='upper left')
#
ax2 = ax1.twinx()
ax2.plot(t1, Vout, 'r-', label=r'LTspice $V_{Out}$')
ax2.set_xlim(min(t1), max(t1))
for label in ax2.get_yticklabels():
label.set_color('red')
ax2.legend(loc='upper right')
#
ax3 = fig.add_subplot(212, projection='rectilinear')
ax3.set_title('Messung')
ax3.set_xlabel('Zeit t in Millisekunden [ms]')
ax3.set_ylabel('Spannung in Volt [V]')
ax3.plot(data[:,0]*time_factor, data[:,1], 'b.-', markersize=2,
label=r'Messung $V_{In}$')
for label in ax3.get_yticklabels():
label.set_color('blue')
ax3.legend(loc='upper left')
#
ax4 = ax3.twinx()
ax4.plot(data[:,0]*time_factor, data[:,2], 'r.-', markersize=2,
label=r'Messung $V_{Out}$')
ax4.set_xlim(min(data[:,0])*time_factor, max(data[:,0])*time_factor)
for label in ax4.get_yticklabels():
label.set_color('red')
ax4.grid(linewidth=0.5)
ax4.legend(loc='upper right')
plt.tight_layout()
plt.show()
# save plot
if not saved:
plt.savefig('../../fig/int/'+filename+".png") # save as .png-file
plt.savefig('../../fig/int/'+filename+".svg") # save as .svg-file
#%% EOF | [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.tight_layout",
"numpy.savetxt",
"numpy.loadtxt",
"numpy.matrix",
"ltspice.Ltspice",
"redpitaya_scpi.scpi",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] | [((3668, 3684), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3677, 3684), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3755), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(16)'}), "('xtick', labelsize=16)\n", (3732, 3755), True, 'import matplotlib.pyplot as plt\n'), ((3793, 3822), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(16)'}), "('ytick', labelsize=16)\n", (3799, 3822), True, 'import matplotlib.pyplot as plt\n'), ((3860, 3888), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(20)'}), "('axes', labelsize=20)\n", (3866, 3888), True, 'import matplotlib.pyplot as plt\n'), ((3930, 3958), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(24)'}), "('axes', titlesize=24)\n", (3936, 3958), True, 'import matplotlib.pyplot as plt\n'), ((3990, 4011), 'matplotlib.pyplot.rc', 'plt.rc', (['"""lines"""'], {'lw': '(1)'}), "('lines', lw=1)\n", (3996, 4011), True, 'import matplotlib.pyplot as plt\n'), ((4041, 4070), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(18)'}), "('legend', fontsize=18)\n", (4047, 4070), True, 'import matplotlib.pyplot as plt\n'), ((4179, 4230), 'ltspice.Ltspice', 'ltspice.Ltspice', (['"""../../spice/int/rect/rect-py.raw"""'], {}), "('../../spice/int/rect/rect-py.raw')\n", (4194, 4230), False, 'import ltspice\n'), ((5622, 5640), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5638, 5640), True, 'import matplotlib.pyplot as plt\n'), ((5641, 5651), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5649, 5651), True, 'import matplotlib.pyplot as plt\n'), ((476, 627), 'numpy.loadtxt', 'np.loadtxt', (["('../../data/int/' + filename + '.csv')"], {'dtype': '"""float"""', 'comments': '"""#"""', 'delimiter': '"""\t"""', 'converters': 'None', 'skiprows': '(0)', 'unpack': '(False)', 'ndmin': '(0)'}), "('../../data/int/' + filename + '.csv', dtype='float', comments=\n '#', delimiter='\\t', converters=None, skiprows=0, unpack=False, ndmin=0)\n", (486, 627), True, 'import numpy as np\n'), ((1532, 1548), 'redpitaya_scpi.scpi', 'scpi.scpi', (['ELIE6'], {}), '(ELIE6)\n', (1541, 1548), True, 'import redpitaya_scpi as scpi\n'), ((3439, 3510), 'numpy.savetxt', 'np.savetxt', (["('../../data/int/' + filename + '.csv')", 'data'], {'delimiter': '"""\t"""'}), "('../../data/int/' + filename + '.csv', data, delimiter='\\t')\n", (3449, 3510), True, 'import numpy as np\n'), ((5681, 5730), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../../fig/int/' + filename + '.png')"], {}), "('../../fig/int/' + filename + '.png')\n", (5692, 5730), True, 'import matplotlib.pyplot as plt\n'), ((5750, 5799), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../../fig/int/' + filename + '.svg')"], {}), "('../../fig/int/' + filename + '.svg')\n", (5761, 5799), True, 'import matplotlib.pyplot as plt\n'), ((3378, 3426), 'numpy.matrix', 'np.matrix', (['[t * sampling_interval, buff1, buff2]'], {}), '([t * sampling_interval, buff1, buff2])\n', (3387, 3426), True, 'import numpy as np\n')] |
from functools import partial
from typing import Callable, Type, Union
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import assume, given
from numpy.testing import assert_array_equal
import mygrad as mg
from mygrad import Tensor
from mygrad.math.misc.ops import MatMul
from mygrad.math.arithmetic.ops import (
Add,
Divide,
Multiply,
Positive,
Power,
Square,
Subtract,
)
from mygrad.operation_base import Operation
def plus(x, y):
return x + y
def minus(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
def power(x, y):
return x ** y
def matmul(x, y):
assume(0 < x.ndim < 3)
return x @ y.T
@pytest.mark.parametrize(
"func, op",
[
(plus, Add),
(minus, Subtract),
(multiply, Multiply),
(divide, Divide),
(power, (Power, Positive, Square)), # can specialize
(matmul, MatMul),
],
)
@given(
arr=hnp.arrays(
shape=hnp.array_shapes(min_dims=0, min_side=0),
dtype=hnp.floating_dtypes(),
elements=dict(min_value=1.0, max_value=2.0),
)
)
def test_arithmetic_operators_between_array_and_tensor_cast_to_tensor(
arr: np.ndarray,
func: Callable[[Union[np.ndarray, Tensor], Union[np.ndarray, Tensor]], Tensor],
op: Type[Operation],
):
tensor = Tensor(arr)
out = func(tensor, arr)
assert isinstance(out, Tensor)
assert isinstance(out.creator, op)
out = func(arr, tensor)
assert isinstance(out, Tensor)
assert isinstance(out.creator, op)
out = func(tensor, tensor)
assert isinstance(out, Tensor)
assert isinstance(out.creator, op)
constant_tensor: Callable[..., Tensor] = partial(mg.tensor, constant=True)
@given(
arr1=hnp.arrays(
shape=st.just(tuple()) | st.just((3,)),
dtype=st.sampled_from([float, int]),
elements=dict(min_value=1, max_value=2),
),
arr2=hnp.arrays(
shape=st.just(tuple()) | st.just((3,)),
dtype=st.sampled_from([float, int]),
elements=dict(min_value=1, max_value=2),
),
)
@pytest.mark.parametrize(
"f1, f2",
[
(constant_tensor, lambda x: x),
(
lambda x: x.tolist(),
constant_tensor,
), # `list/tensor` ensures __rfloordiv__ gets called
(constant_tensor, constant_tensor),
],
)
def test_floor_div(arr1, arr2, f1, f2):
desired = arr1 // arr2
actual = f1(arr1) // f2(arr2)
assert actual.dtype == desired.dtype
assert_array_equal(desired, actual)
def test_floor_div_is_raises_for_variable_tensors():
with pytest.raises(ValueError):
mg.tensor(1.0, constant=False) // 1
with pytest.raises(ValueError):
1 // mg.tensor(1.0, constant=False)
| [
"hypothesis.strategies.sampled_from",
"hypothesis.assume",
"mygrad.tensor",
"pytest.mark.parametrize",
"hypothesis.strategies.just",
"mygrad.Tensor",
"functools.partial",
"pytest.raises",
"hypothesis.extra.numpy.floating_dtypes",
"numpy.testing.assert_array_equal",
"hypothesis.extra.numpy.array_... | [((772, 945), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func, op"""', '[(plus, Add), (minus, Subtract), (multiply, Multiply), (divide, Divide), (\n power, (Power, Positive, Square)), (matmul, MatMul)]'], {}), "('func, op', [(plus, Add), (minus, Subtract), (\n multiply, Multiply), (divide, Divide), (power, (Power, Positive, Square\n )), (matmul, MatMul)])\n", (795, 945), False, 'import pytest\n'), ((1785, 1818), 'functools.partial', 'partial', (['mg.tensor'], {'constant': '(True)'}), '(mg.tensor, constant=True)\n', (1792, 1818), False, 'from functools import partial\n'), ((727, 749), 'hypothesis.assume', 'assume', (['(0 < x.ndim < 3)'], {}), '(0 < x.ndim < 3)\n', (733, 749), False, 'from hypothesis import assume, given\n'), ((1419, 1430), 'mygrad.Tensor', 'Tensor', (['arr'], {}), '(arr)\n', (1425, 1430), False, 'from mygrad import Tensor\n'), ((2591, 2626), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['desired', 'actual'], {}), '(desired, actual)\n', (2609, 2626), False, 'from numpy.testing import assert_array_equal\n'), ((2691, 2716), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2704, 2716), False, 'import pytest\n'), ((2772, 2797), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2785, 2797), False, 'import pytest\n'), ((2726, 2756), 'mygrad.tensor', 'mg.tensor', (['(1.0)'], {'constant': '(False)'}), '(1.0, constant=False)\n', (2735, 2756), True, 'import mygrad as mg\n'), ((2812, 2842), 'mygrad.tensor', 'mg.tensor', (['(1.0)'], {'constant': '(False)'}), '(1.0, constant=False)\n', (2821, 2842), True, 'import mygrad as mg\n'), ((1062, 1102), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(0)', 'min_side': '(0)'}), '(min_dims=0, min_side=0)\n', (1078, 1102), True, 'import hypothesis.extra.numpy as hnp\n'), ((1118, 1139), 'hypothesis.extra.numpy.floating_dtypes', 'hnp.floating_dtypes', ([], {}), '()\n', (1137, 1139), True, 'import hypothesis.extra.numpy as hnp\n'), ((1912, 1941), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[float, int]'], {}), '([float, int])\n', (1927, 1941), True, 'import hypothesis.strategies as st\n'), ((2082, 2111), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[float, int]'], {}), '([float, int])\n', (2097, 2111), True, 'import hypothesis.strategies as st\n'), ((1883, 1896), 'hypothesis.strategies.just', 'st.just', (['(3,)'], {}), '((3,))\n', (1890, 1896), True, 'import hypothesis.strategies as st\n'), ((2053, 2066), 'hypothesis.strategies.just', 'st.just', (['(3,)'], {}), '((3,))\n', (2060, 2066), True, 'import hypothesis.strategies as st\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import onnx
import onnx.numpy_helper
import struct
from pathlib import Path
import numpy as np
from onnx import onnx_pb as onnx_proto
from onnxruntime import SessionOptions, InferenceSession, GraphOptimizationLevel
from onnxruntime.quantization.quant_utils import QuantizationMode, QuantizedValueType
from onnxruntime.quantization.quant_utils import find_by_name, get_elem_index, get_mul_node, \
generate_identified_filename, attribute_to_kwarg, type_to_name
from onnxruntime.quantization.quant_utils import onnx_domain, __producer__, __version__
from lpot.adaptor.ox_utils.registry import CreateOpQuantizer, CreateDefaultOpQuantizer
from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zo, quantize_data, \
QuantizedValue, QuantizedInitializer
from lpot.model.onnx_model import ONNXModel
def _get_qrange_for_qType(qType, reduce_range=False):
'''
Helper function to get the quantization range for a type.
parameter qType: quantization type.
return: quantization range.
'''
if qType == onnx_proto.TensorProto.UINT8:
return 127 if reduce_range else 255
elif qType == onnx_proto.TensorProto.INT8:
# [-64, 64] for reduce_range, and [-127, 127] full_range.
return 128 if reduce_range else 254
else:
raise ValueError('unsupported quantization data type')
class ONNXQuantizer:
def __init__(self, model, q_config, mode, static, quantization_params,
op_types_to_quantize):
self.model = ONNXModel(model)
self.config = q_config
self.reduce_range = False
self.mode = mode # QuantizationMode.Value
self.static = static # use static quantization for inputs.
self.fuse_dynamic_quant = False
self.quantization_params = quantization_params
self.op_types_to_quantize = op_types_to_quantize
self.new_nodes = []
self.opset_version = self.check_opset_version()
# QuantizeRange tensor name and zero tensor name for scale and zero point calculation.
# Used when static is False
self.fixed_qrange_uint8_name = "fixed_quantization_range_uint8"
self.fixed_qrange_int8_name = "fixed_quantization_range_int8"
# For uint8 data-type, to compute zero point,
# we subtract rmin from 0 (represented by fixed_zero_name tensor)
self.fixed_zero_name = "fixed_zero"
# For int8 data-type, zero point is always zero
# (respresented by fixed_zero_point_name tensor)
self.fixed_zero_zp_name = "fixed_zero_zp"
# List of quantized weights
self._quantized_weights = []
# Map of all original value names to quantized value names
self.quantized_value_map = {}
def check_opset_version(self):
ai_onnx_domain = [
opset for opset in self.model.model.opset_import if not opset.domain \
or opset.domain == "ai.onnx"
]
if 1 != len(ai_onnx_domain):
raise ValueError('Failed to find proper ai.onnx domain')
opset_version = ai_onnx_domain[0].version
if opset_version == 10:
print(
"Warning: The original model opset version is {}, which does not support node \
fusions. Please update the model to opset >= 11 for better performance."
.format(opset_version))
return 10
if opset_version < 10:
print(
"Warning: The original model opset version is {}, which does not support \
quantization. Please update the model to opset >= 11. Updating the model \
automatically to opset 11. Please verify the quantized model."
.format(opset_version))
self.model.model.opset_import.remove(ai_onnx_domain[0])
self.model.model.opset_import.extend([onnx.helper.make_opsetid("", 11)])
opset_version = 11
self.fuse_dynamic_quant = True
return opset_version
def remove_fake_quantized_nodes(self): # pragma: no cover
'''
Detect and remove the quantize/dequantizelinear node pairs(fake quantized nodes
in Quantization-Aware training) and reconnect and update the nodes.
!!! not supported now !!!
'''
nodes_to_remove = []
initializers_to_remove = []
for curr_node in self.model.nodes():
if curr_node.op_type == 'QuantizeLinear':
next_node, prev_node, succ_node = None, None, None
for child_node in self.model.get_children(curr_node):
if child_node.op_type == 'DequantizeLinear':
next_node = child_node
if next_node is None:
raise ValueError(
"Remove fake-quantized node pair Error: DequantizeLinear node is \
not found for {}.".format(curr_node.name))
prev_node = self.model.get_parent(curr_node, 0)
if prev_node is None:
raise ValueError("Remove fake-quantized node pair Error: Parent node is \
not found for {}.".format(curr_node.name))
succ_nodes = self.model.get_children(next_node)
if len(succ_nodes) == 0:
raise ValueError("Remove fake-quantized node pair Error: No successive \
nodes found for {}.".format(next_node.name))
# TODO: convert it to the specified input_type
scale_tensor_name = curr_node.input[1]
zp_tensor_name = curr_node.input[2]
initializer_scale = find_by_name(scale_tensor_name, self.model.initializer())
initializer_zp = find_by_name(zp_tensor_name, self.model.initializer())
zp_and_scale = [
onnx.numpy_helper.to_array(initializer_zp),
onnx.numpy_helper.to_array(initializer_scale)
]
# connect the previous and successive node input and output
for succ_node in succ_nodes:
succ_idx = get_elem_index(next_node.output[0], succ_node.input)
if succ_idx != -1:
succ_node.input[succ_idx] = curr_node.input[0]
else:
raise ValueError(
"Remove fake-quantized node pair Error: Connection failed. \
No matched successive node input found for {}.".format(next_node.name))
param_name = curr_node.input[0]
if self.quantization_params is None:
self.quantization_params = {}
self.quantization_params[param_name] = zp_and_scale
# remove fake-quantized nodes
nodes_to_remove.extend([curr_node])
nodes_to_remove.extend([next_node])
# remove unused initializers in graph
initializers_to_remove.extend([initializer_scale])
initializers_to_remove.extend([initializer_zp])
self.model.remove_nodes(nodes_to_remove)
self.model.remove_initializers(initializers_to_remove)
return self.model.model
def should_quantize(self, node):
if node.name in self.config:
return self.config[node.name] != 'fp32'
else:
return False
def quantize_model(self):
self.remove_fake_quantized_nodes()
for node in self.model.nodes():
if self.should_quantize(node):
op_quantizer = CreateOpQuantizer(self, node)
else:
op_quantizer = CreateDefaultOpQuantizer(self, node)
op_quantizer.quantize()
self._dequantize_outputs()
# extend is used to append to the list for a protobuf fields
# https://developers.google.com/protocol-buffers/docs/reference
# /python-generated?csw=1#fields
self.model.graph().ClearField('node')
self.model.graph().node.extend(self.new_nodes)
# Remove weights which are already quantized from graph.
self._remove_quantized_weights()
self.model.model.producer_name = __producer__
self.model.model.producer_version = __version__
return self.model.model
@staticmethod
def tensor_proto_to_array(initializer):
if initializer.data_type == onnx_proto.TensorProto.FLOAT:
weights = onnx.numpy_helper.to_array(initializer)
else:
raise ValueError('Only float type quantization is supported. \
Weights {} is {}. '.format(initializer.name, type_to_name[initializer.data_type]))
return weights
def is_input_a_weight(self, input_name):
initializer = find_by_name(input_name, self.model.initializer())
return initializer is not None
def is_valid_quantize_weight(self, weight_name):
weight = find_by_name(weight_name, self.model.initializer())
return weight is not None and weight.data_type == onnx_proto.TensorProto.FLOAT
def _remove_quantized_weights(self):
''' Remove the weights which are already quantized from graph initializer list.
This function assumes that after quantization, all nodes that previously use a weight:
- use output from DequantizeLinear as input if they do not support quantization.
- use quantized weight if they support quantization.
'''
for weight in self._quantized_weights:
# Remove existing weight initializer
self.model.initializer().remove(weight.initializer)
# Removing input weight to a convolution
try:
weight_input = next(val for val in self.model.graph().input \
if val.name == weight.name)
self.model.graph().input.remove(weight_input)
except StopIteration:
if self.model.ir_version() < 4:
print("Warning: invalid weight name {} found in the graph \
(not a graph input)".format(weight.name))
def _update_weight(self, weight):
'''
Given a weight object, update the graph by doing the following:
- remove old initializer, update new initializers for
quantized weight, zero point, and scale
- remove old weight input, update with new inputs for
quantized weight, zero point, and scale
This function does NOT update the nodes in the graph, just initializers and inputs
'''
quantized_value = self.quantized_value_map[weight.name]
assert (quantized_value is not None)
packed_weight_name = quantized_value.q_name
scale_name = quantized_value.scale_name
zero_point_name = quantized_value.zp_name
# Update packed weight, zero point, and scale initializers
packed_weight_np_data = np.asarray(weight.quantized_data,
dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[weight.qType]
).reshape(weight.initializer.dims)
packed_weight_initializer = onnx.numpy_helper.from_array(packed_weight_np_data,\
packed_weight_name)
if weight.axis is not None:
zero_scale_shape = [weight.initializer.dims[weight.axis]]
else: # scale and zero point must be scalar
zero_scale_shape = []
zero_point_type = weight.qType
scale_initializer = onnx.helper.make_tensor(scale_name, onnx_proto.TensorProto.FLOAT,
zero_scale_shape, weight.scales)
zero_initializer = onnx.helper.make_tensor(zero_point_name, zero_point_type,
zero_scale_shape, weight.zero_points)
self.model.initializer().extend([packed_weight_initializer, scale_initializer,
zero_initializer])
self._quantized_weights.append(weight)
def _get_quantized_weight(self, initializer, qType):
'''
:param initializer: TensorProto initializer
:param qType: type to quantize to
:return: Weight class with quantization information
'''
weights_data = self.tensor_proto_to_array(initializer)
rmin, rmax, zero_point, scale, quantized_weights_data = quantize_data(
weights_data.flatten().tolist(), _get_qrange_for_qType(qType, \
self.reduce_range), qType)
weight = QuantizedInitializer(initializer.name,
initializer, [rmin], [rmax], [zero_point], [scale],
weights_data,
quantized_weights_data,
axis=None,
qType=qType)
# Log entry for this quantized weight
assert (weight.name not in self.quantized_value_map)
quantized_value = QuantizedValue(weight.name, weight.name + "_quantized",
weight.name + "_scale",
weight.name + "_zero_point",
QuantizedValueType.Initializer, None, qType)
self.quantized_value_map[weight.name] = quantized_value
return weight
def _get_dynamic_input_quantization_params(self, input_name, nodes_list, qType):
'''
Create nodes for dynamic quantization of input and add them to nodes_list.
parameter input_name: Name of the input.
parameter nodes_list: new nodes are appended to this list.
parameter qType: type to quantize to.
return: scale_name, zero_point_name, scale_shape, zero_point_shape.
'''
if qType == onnx_proto.TensorProto.INT8:
return self._get_dynamic_input_quantization_params_int8(input_name, nodes_list)
return self._get_dynamic_input_quantization_params_uint8(input_name, nodes_list)
def _get_dynamic_input_quantization_params_int8(self, input_name, nodes_list):
'''
Create nodes for dynamic quantization of input to int8 and add them to nodes_list
parameter input_name: Name of the input.
parameter nodes_list: new nodes are appended to this list.
return: scale_name, zero_point_name, scale_shape, zero_point_shape.
'''
qType = onnx_proto.TensorProto.INT8
# Reduce min and Reduce max
input_scale_name = input_name + "_scale"
reduce_min_name = input_name + "_ReduceMin"
reduce_min_node = onnx.helper.make_node("ReduceMin", [input_name],
[reduce_min_name + ":0"],
reduce_min_name,
keepdims=0)
nodes_list.append(reduce_min_node)
reduce_max_name = input_name + "_ReduceMax"
reduce_max_node = onnx.helper.make_node("ReduceMax", [input_name],
[reduce_max_name + ":0"],
reduce_max_name,
keepdims=0)
nodes_list.append(reduce_max_node)
# Compute scale
# Find abs(rmin)
reduce_min_abs_name = reduce_min_name + "_Abs"
reduce_min_abs_node = onnx.helper.make_node("Abs", [reduce_min_node.output[0]],
[reduce_min_abs_name + ":0"],
reduce_min_abs_name)
nodes_list.append(reduce_min_abs_node)
# Find abs(rmax)
reduce_max_abs_name = reduce_max_name + "_Abs"
reduce_max_abs_node = onnx.helper.make_node("Abs", [reduce_max_node.output[0]],
[reduce_max_abs_name + ":0"],
reduce_max_abs_name)
nodes_list.append(reduce_max_abs_node)
# Compute max of abs(rmin) and abs(rmax)
abs_max_name = input_name + "_Abs_Max"
abs_max_node = onnx.helper.make_node("Max", [reduce_min_abs_node.output[0],
reduce_max_abs_node.output[0]],
[abs_max_name + ":0"], abs_max_name)
nodes_list.append(abs_max_node)
# and divide by (quantize_range/2.0) which will be equal to max(...)*2.0/quantize_range
initializer_div = onnx.helper.make_tensor(self.fixed_qrange_int8_name,
onnx_proto.TensorProto.FLOAT, [],
[_get_qrange_for_qType(qType) / 2.0])
self.model.add_initializer(initializer_div)
scale_div_name = input_name + "scale_Div"
scale_div_node = onnx.helper.make_node("Div",
[abs_max_node.output[0],
self.fixed_qrange_int8_name],
[input_scale_name], scale_div_name)
nodes_list.append(scale_div_node)
# Zero point
initializer_zp = onnx.helper.make_tensor(self.fixed_zero_zp_name, qType, [], [0])
self.model.add_initializer(initializer_zp)
return input_scale_name, self.fixed_zero_zp_name, [], []
def _get_dynamic_input_quantization_params_uint8(self,input_name,nodes_list):# pragma: no cover
'''
Create nodes for dynamic quantization of input to uint8 and add them to nodes_list
parameter input_name: Name of the input.
parameter nodes_list: new nodes are appended to this list.
return: scale_name, zero_point_name, scale_shape, zero_point_shape.
'''
qType = onnx_proto.TensorProto.UINT8
# Reduce min and Reduce max
input_scale_name = input_name + "_scale"
input_zp_name = input_name + "_zero_point"
reduce_min_name = input_name + "_ReduceMin"
reduce_min_node = onnx.helper.make_node("ReduceMin", [input_name],
[reduce_min_name + ":0"],
reduce_min_name,
keepdims=0)
nodes_list.append(reduce_min_node)
reduce_max_name = input_name + "_ReduceMax"
reduce_max_node = onnx.helper.make_node("ReduceMax", [input_name],
[reduce_max_name + ":0"],
reduce_max_name,
keepdims=0)
nodes_list.append(reduce_max_node)
# Add tensors for quantize range and zero value.
initializer_qrange = onnx.helper.make_tensor(self.fixed_qrange_uint8_name,
onnx_proto.TensorProto.FLOAT, [],
[_get_qrange_for_qType(qType)])
self.model.add_initializer(initializer_qrange)
initializer_qvalue = onnx.helper.make_tensor(self.fixed_zero_name,
onnx_proto.TensorProto.FLOAT, [], [0.0])
self.model.add_initializer(initializer_qvalue)
# Compute Scale
# Subtract rmax and rmin
scale_sub_name = input_name + "_scale_Sub"
scale_sub_node = onnx.helper.make_node("Sub", [reduce_max_node.output[0],
reduce_min_node.output[0]],
[scale_sub_name + ":0"], scale_sub_name)
nodes_list.append(scale_sub_node)
# and divide by quantize range
scale_div_name = input_name + "_scale_Div"
scale_div_node = onnx.helper.make_node("Div", [scale_sub_node.output[0],
self.fixed_qrange_uint8_name],
[input_scale_name], scale_div_name)
nodes_list.append(scale_div_node)
# Compute zero point
# Subtract zero and rmin
zp_sub_name = input_name + "_zero_point_Sub"
zp_sub_node = onnx.helper.make_node("Sub", [self.fixed_zero_name,
reduce_min_node.output[0]],
[zp_sub_name + ":0"], zp_sub_name)
nodes_list.append(zp_sub_node)
# Divide by scale
zp_div_name = input_name + "_zero_point_Div"
zp_div_node = onnx.helper.make_node("Div", [zp_sub_node.output[0], input_scale_name]
, [zp_div_name + ":0"],
zp_div_name)
nodes_list.append(zp_div_node)
# Compute floor
zp_floor_name = input_name + "_zero_point_Floor"
zp_floor_node = onnx.helper.make_node("Floor", zp_div_node.output, [zp_floor_name + ":0"]
, zp_floor_name)
nodes_list.append(zp_floor_node)
# Cast to integer
zp_cast_name = input_name + "_zero_point_Cast"
zp_cast_node = onnx.helper.make_node("Cast", zp_floor_node.output, [input_zp_name],
zp_cast_name, to=qType)
nodes_list.append(zp_cast_node)
return input_scale_name, input_zp_name, [], []
def _get_quantization_params(self, param_name):
'''
Create initializers and inputs in the graph for zero point and scale of output.
Zero point and scale values are obtained from self.quantization_params if specified.
parameter param_name: Name of the quantization parameter.
return: result, scale_name, zero_point_name, scale_shape, zero_point_shape.
'''
if self.quantization_params is None or param_name not in self.quantization_params:
return False, "", "", "", ""
params = self.quantization_params[param_name]
if params is None or len(params) != 2:
raise ValueError("Quantization parameters should contain zero point and scale. "
"Specified values for output {}: {}".format(param_name, params))
zero_point_values = [params[0].item()]
zero_point_shape = []
zero_point_name = param_name + "_zero_point"
zero_point_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[params[0].dtype]
scale_values = [params[1].item()]
scale_shape = []
scale_name = param_name + "_scale"
# Add initializers
init_zp = onnx.helper.make_tensor(zero_point_name, zero_point_type,
zero_point_shape, zero_point_values)
self.model.add_initializer(init_zp)
init_scale = onnx.helper.make_tensor(scale_name, onnx_proto.TensorProto.FLOAT,
scale_shape, scale_values)
self.model.add_initializer(init_scale)
return True, scale_name, zero_point_name, scale_shape, zero_point_shape
def _get_quantize_input_nodes(self, node, input_index, qType):
'''
Given an input for a node (which is not a initializer), this function
- add nodes to compute zero point and scale for this input if they don't exist.
- add new QuantizeLinear node to quantize the input.
parameter node: node being quantized in NodeProto format.
parameter input_index: index of input in node.input.
parameter qType: type to quantize to.
return: List of newly created nodes in NodeProto format.
'''
input_name = node.input[input_index]
output_name = input_name + "_quantized"
data_found, scale_name, zp_name, _, _ = \
self._get_quantization_params(input_name)
if self.static:
if data_found == False:
raise ValueError(
"Quantization parameters are not specified for param {}." \
"In static mode quantization params for inputs and outputs \
of nodes to be quantized are required.".format(input_name))
qlinear_node = onnx.helper.make_node("QuantizeLinear",
[input_name, scale_name, zp_name],
[output_name],
input_name + "_QuantizeLinear")
return [qlinear_node]
else:
if data_found == True:
qlinear_node = onnx.helper.make_node("QuantizeLinear",
[input_name, scale_name, zp_name],
[output_name],
input_name + "_QuantizeLinear")
return [qlinear_node]
else:
# Scale and Zero Points not available for this input.
# add nodes to dynamically compute it
if self.fuse_dynamic_quant and qType == onnx_proto.TensorProto.UINT8:
scale_name = input_name + "_scale"
zeropoint_name = input_name + "_zero_point"
qlinear_node = onnx.helper.make_node("DynamicQuantizeLinear", [input_name],
[output_name, scale_name, zeropoint_name],
input_name + "_QuantizeLinear")
return [qlinear_node]
else:
nodes = []
scale_name, zp_name, scale_shape, zp_shape = \
self._get_dynamic_input_quantization_params(
input_name, nodes, qType)
qlinear_node = onnx.helper.make_node("QuantizeLinear",
[input_name, scale_name, zp_name],
[output_name],
input_name + "_QuantizeLinear")
return nodes + [qlinear_node]
def get_bias_add_nodes(self, nodes, node, last_output, quantized_bias_name):
'''
Given a node, this function handles bias add by
adding a "reshape" node on bias and an "add" node
parameter nodes: new nodes would be appended into nodes
parameter node: current node (Conv)
parameter last_output: output of previous node (input to bias add)
return: the name of output
'''
# Add tensors for the shape to be reshaped to
weight = find_by_name(node.input[1], self.model.initializer())
if weight is None:
raise ValueError("Expected {} to be an initializer".format(node.input[1]))
# Add reshape for correct broadcase
reshape_input_data = quantized_bias_name
reshape_input_shape = quantized_bias_name + "_reshape_shape"
reshape_input = [reshape_input_data, reshape_input_shape]
reshape_shape = np.ones((len(weight.dims)), dtype=np.int64)
reshape_shape[1] = -1
init_shape = onnx.helper.make_tensor(reshape_input_shape, onnx_proto.TensorProto.INT64,
[len(weight.dims)], reshape_shape)
self.model.add_initializer(init_shape)
reshape_op_output = node.output[0] + "_reshape"
reshape_node = onnx.helper.make_node("Reshape", reshape_input, [reshape_op_output],
quantized_bias_name + "reshape")
nodes.append(reshape_node)
# Add an Add operation for bias
bias_add_input = [last_output]
bias_add_input.append(reshape_op_output)
add_node_output = node.output[0] + "_bias_add"
add_node = onnx.helper.make_node("Add", bias_add_input, [add_node_output],
quantized_bias_name + "bias_add")
nodes.append(add_node)
return add_node_output
def _dynamic_quantize_bias(self, input_name, weight_scale_name, \
bias_name, quantized_bias_name, new_node_list):
'''
Adds series of nodes required to quantize the bias dynamically.
parameter input_name: Input name
parameter weight_scale_name: Weight scale.
parameter bias_scale_name: Bias to quantize.
parameter quantied_bias_name: Output name to use for quantized bias.
'''
qType = onnx_proto.TensorProto.INT32
input_scale_name = input_name + "_scale"
bias_scale_node = onnx.helper.make_node("Mul",
[input_scale_name, weight_scale_name],
[bias_name + "_scale"],
bias_name + "_scale_node")
new_node_list.append(bias_scale_node)
quantize_bias_node = onnx.helper.make_node("Div", [bias_name, bias_scale_node.output[0]],
[bias_name + "_tmp_quant:0"],
bias_name + "_tmp_qaunt")
new_node_list.append(quantize_bias_node)
bias_rounded_node = onnx.helper.make_node("Floor", quantize_bias_node.output,
[bias_name + "_quant_rounded:0"],
bias_name + "_quant_rounded")
new_node_list.append(bias_rounded_node)
bias_cast_node = onnx.helper.make_node("Cast",
bias_rounded_node.output, [quantized_bias_name],
quantized_bias_name + "_node",
to=qType)
new_node_list.append(bias_cast_node)
return
def quantize_bias(self, node, new_node_list):
'''
Quantized the bias. Zero Point == 0 and Scale == Input_Scale * Weight_Scale
'''
# get scale for weight
weight_scale_name = self.quantized_value_map[node.input[1]].scale_name
weight_initializer = find_by_name(weight_scale_name, self.model.initializer())
weight_scale = self.tensor_proto_to_array(weight_initializer)
# get bias
bias_name = node.input[2]
bias_initializer = find_by_name(bias_name, self.model.initializer())
bias_data = self.tensor_proto_to_array(bias_initializer)
quantized_bias_name = bias_name + "_quantized"
# input scale is not provided and this input is dynamically quantized
# so it is not pre-computed at this point
# so resort to dynamic quantization for bias
if self.quantization_params is None or node.input[0] not in self.quantization_params and \
node.input[0] not in self.quantized_value_map:
self._dynamic_quantize_bias(node.input[0], weight_scale_name, bias_name,
quantized_bias_name, new_node_list)
else:
# get scale for input
if node.input[0] in self.quantized_value_map:
input_scale_name = self.quantized_value_map[node.input[0]].scale_name
elif node.input[0] in self.quantization_params:
_, input_scale_name, _, _, _ = self._get_quantization_params(node.input[0])
else:
raise ValueError("Expected {} to be in quantized value map \
for static quantization".format(node.input[0]))
inputscale_initializer = find_by_name(input_scale_name, self.model.initializer())
input_scale = self.tensor_proto_to_array(inputscale_initializer)
# calcuate scale for bias
bias_scale = input_scale * weight_scale
# quantize bias
quantized_data = (np.asarray(bias_data) / bias_scale).round().astype(np.int32)
# update bias initializer
bias_np_data = np.asarray(quantized_data, dtype=np.int32).reshape(\
bias_initializer.dims)
packed_bias_initializer = onnx.numpy_helper.from_array(bias_np_data,
quantized_bias_name)
self.model.initializer().extend([packed_bias_initializer])
# log entries for this quantized bias value
quantized_bias_entry = QuantizedInitializer(bias_name,
bias_initializer, [0], [0], [0],
[bias_scale],
bias_data,
quantized_data,
qType=onnx_proto.TensorProto.INT32)
self._quantized_weights.append(quantized_bias_entry)
assert (bias_name not in self.quantized_value_map)
quantized_value = QuantizedValue(bias_name, quantized_bias_name, "", "",
QuantizedValueType.Initializer,
None, onnx_proto.TensorProto.INT32)
self.quantized_value_map[bias_name] = quantized_value
return quantized_bias_name
def quantize_inputs(self, node, indices, initializer_use_weight_qType=True):
'''
Given a node, this function quantizes the inputs as follows:
- If input is an initializer, quantize the initializer data, replace old initializer
with new initializer
- Else, add QuantizeLinear nodes to perform quantization
parameter node: node being quantized in NodeProto format.
parameter indices: input indices to quantize.
return: (List of quantized input names,
List of zero point names used for input quantization,
List of scale names used for input quantization,
List of new QuantizeLinear nodes created)
'''
scale_names = []
zero_point_names = []
quantized_input_names = []
nodes = []
for input_index in indices:
node_input = node.input[input_index]
# Find if this input is already quantized
if node_input in self.quantized_value_map:
quantized_value = self.quantized_value_map[node_input]
scale_names.append(quantized_value.scale_name)
zero_point_names.append(quantized_value.zp_name)
quantized_input_names.append(quantized_value.q_name)
continue
# Quantize the input
initializer = find_by_name(node_input, self.model.initializer())
if initializer is not None:
weight = self._get_quantized_weight(initializer,
self.config[node.name]['weight']['dtype'] if \
initializer_use_weight_qType else \
self.config[node.name]['activation']['dtype'])
# Update graph
self._update_weight(weight)
quantized_input_names.append(weight.name + "_quantized")
zero_point_names.append(weight.name + "_zero_point")
scale_names.append(weight.name + "_scale")
else:
# Add QuantizeLinear node.
qlinear_node = self.model.find_node_by_name(node_input + "_QuantizeLinear",
self.new_nodes,
self.model.graph())
if qlinear_node is None:
quantize_input_nodes = self._get_quantize_input_nodes(node, input_index,
self.config[node.name]['activation']['dtype'])
nodes.extend(quantize_input_nodes)
qlinear_node = quantize_input_nodes[-1]
if qlinear_node.op_type == "QuantizeLinear":
quantized_input_names.extend(qlinear_node.output)
scale_names.append(qlinear_node.input[1])
zero_point_names.append(qlinear_node.input[2])
else:
quantized_input_names.append(qlinear_node.output[0])
scale_names.append(qlinear_node.output[1])
zero_point_names.append(qlinear_node.output[2])
return (quantized_input_names, zero_point_names, scale_names, nodes)
def quantize_weight_per_channel(self, weight_name, weight_qType, channel_axis):
# Find if this input is already quantized
if weight_name in self.quantized_value_map:
quantized_value = self.quantized_value_map[weight_name]
return (quantized_value.q_name, quantized_value.zp_name, quantized_value.scale_name)
initializer = find_by_name(weight_name, self.model.initializer())
if initializer is None:
raise ValueError("{} is not an initializer", weight_name)
weights = self.tensor_proto_to_array(initializer)
channel_count = weights.shape[channel_axis]
rmin_list = []
rmax_list = []
zero_point_list = []
scale_list = []
quantized_per_channel_data_list = []
for i in range(channel_count):
per_channel_data = weights.take(i, channel_axis)
rmin, rmax, zero_point, scale, quantized_per_channel_data = quantize_data(
per_channel_data.flatten().tolist(), _get_qrange_for_qType(weight_qType,
self.reduce_range), weight_qType)
rmin_list.append(rmin)
rmax_list.append(rmax)
zero_point_list.append(zero_point)
scale_list.append(scale)
quantized_per_channel_data_list.append(quantized_per_channel_data)
# combine per_channel_data into one
reshape_dims = list(weights.shape) # deep copy
reshape_dims[channel_axis] = 1 # only one per channel for reshape
quantized_weights = np.asarray(quantized_per_channel_data_list[0]).reshape(reshape_dims)
for i in range(1, len(quantized_per_channel_data_list)):
channel_weights = np.asarray(quantized_per_channel_data_list[i]).reshape(reshape_dims)
quantized_weights = np.concatenate((quantized_weights, channel_weights), channel_axis)
weight = QuantizedInitializer(initializer.name, initializer, rmin_list, rmax_list,
zero_point_list, scale_list,
weights,
quantized_weights.flatten().tolist(),
channel_axis, weight_qType)
# Make entry for this quantized weight
assert (weight.name not in self.quantized_value_map)
quantized_value = QuantizedValue(weight.name, weight.name + "_quantized",
weight.name + "_scale",
weight.name + "_zero_point",
QuantizedValueType.Initializer,
None, weight_qType)
self.quantized_value_map[weight.name] = quantized_value
self._update_weight(weight)
return (weight.name + "_quantized", weight.name + "_zero_point", weight.name + "_scale")
def _dequantize_value(self, value_name):
'''
Given a value (input/output) which is quantized, add a DequantizeLinear node to dequantize
it back to float32
parameter value_name: value to dequantize
parameter new_nodes_list: List of new nodes created before processing current node
return: None if there is already a DequantizeLinear node that dequantizes it
A DequantizeLinear node otherwise
'''
if value_name in self.quantized_value_map:
quantized_value = self.quantized_value_map[value_name]
# Add DequantizeLinear Node for this input
dqlinear_name = value_name + "_DequantizeLinear"
dqlinear_node = self.model.find_node_by_name(dqlinear_name, self.new_nodes,
self.model.graph())
if dqlinear_node is None:
dqlinear_inputs = [quantized_value.q_name,
quantized_value.scale_name,
quantized_value.zp_name]
dequantize_node = onnx.helper.make_node("DequantizeLinear", dqlinear_inputs,
[value_name],
dqlinear_name)
return dequantize_node
else:
# DQ op is already present, assert it's output matches the input of current node
assert (value_name == dqlinear_node.output[0])
return None
def _dequantize_outputs(self):
'''
Dequantize output if it is quantized
parameter new_nodes_list: List of new nodes created before processing current node
return: List of new nodes created
'''
for output in self.model.graph().output:
dequantize_node = self._dequantize_value(output.name)
if dequantize_node is not None:
self.new_nodes.append(dequantize_node)
| [
"onnx.helper.make_node",
"lpot.model.onnx_model.ONNXModel",
"lpot.adaptor.ox_utils.registry.CreateDefaultOpQuantizer",
"lpot.adaptor.ox_utils.util.QuantizedInitializer",
"onnx.numpy_helper.from_array",
"numpy.asarray",
"onnxruntime.quantization.quant_utils.get_elem_index",
"onnx.helper.make_tensor",
... | [((2522, 2538), 'lpot.model.onnx_model.ONNXModel', 'ONNXModel', (['model'], {}), '(model)\n', (2531, 2538), False, 'from lpot.model.onnx_model import ONNXModel\n'), ((12341, 12412), 'onnx.numpy_helper.from_array', 'onnx.numpy_helper.from_array', (['packed_weight_np_data', 'packed_weight_name'], {}), '(packed_weight_np_data, packed_weight_name)\n', (12369, 12412), False, 'import onnx\n'), ((12723, 12825), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['scale_name', 'onnx_proto.TensorProto.FLOAT', 'zero_scale_shape', 'weight.scales'], {}), '(scale_name, onnx_proto.TensorProto.FLOAT,\n zero_scale_shape, weight.scales)\n', (12746, 12825), False, 'import onnx\n'), ((12902, 13001), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['zero_point_name', 'zero_point_type', 'zero_scale_shape', 'weight.zero_points'], {}), '(zero_point_name, zero_point_type, zero_scale_shape,\n weight.zero_points)\n', (12925, 13001), False, 'import onnx\n'), ((13781, 13942), 'lpot.adaptor.ox_utils.util.QuantizedInitializer', 'QuantizedInitializer', (['initializer.name', 'initializer', '[rmin]', '[rmax]', '[zero_point]', '[scale]', 'weights_data', 'quantized_weights_data'], {'axis': 'None', 'qType': 'qType'}), '(initializer.name, initializer, [rmin], [rmax], [\n zero_point], [scale], weights_data, quantized_weights_data, axis=None,\n qType=qType)\n', (13801, 13942), False, 'from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zo, quantize_data, QuantizedValue, QuantizedInitializer\n'), ((14258, 14419), 'lpot.adaptor.ox_utils.util.QuantizedValue', 'QuantizedValue', (['weight.name', "(weight.name + '_quantized')", "(weight.name + '_scale')", "(weight.name + '_zero_point')", 'QuantizedValueType.Initializer', 'None', 'qType'], {}), "(weight.name, weight.name + '_quantized', weight.name +\n '_scale', weight.name + '_zero_point', QuantizedValueType.Initializer,\n None, qType)\n", (14272, 14419), False, 'from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zo, quantize_data, QuantizedValue, QuantizedInitializer\n'), ((15913, 16020), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""ReduceMin"""', '[input_name]', "[reduce_min_name + ':0']", 'reduce_min_name'], {'keepdims': '(0)'}), "('ReduceMin', [input_name], [reduce_min_name + ':0'],\n reduce_min_name, keepdims=0)\n", (15934, 16020), False, 'import onnx\n'), ((16284, 16391), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""ReduceMax"""', '[input_name]', "[reduce_max_name + ':0']", 'reduce_max_name'], {'keepdims': '(0)'}), "('ReduceMax', [input_name], [reduce_max_name + ':0'],\n reduce_max_name, keepdims=0)\n", (16305, 16391), False, 'import onnx\n'), ((16713, 16826), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Abs"""', '[reduce_min_node.output[0]]', "[reduce_min_abs_name + ':0']", 'reduce_min_abs_name'], {}), "('Abs', [reduce_min_node.output[0]], [\n reduce_min_abs_name + ':0'], reduce_min_abs_name)\n", (16734, 16826), False, 'import onnx\n'), ((17086, 17199), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Abs"""', '[reduce_max_node.output[0]]', "[reduce_max_abs_name + ':0']", 'reduce_max_abs_name'], {}), "('Abs', [reduce_max_node.output[0]], [\n reduce_max_abs_name + ':0'], reduce_max_abs_name)\n", (17107, 17199), False, 'import onnx\n'), ((17468, 17601), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Max"""', '[reduce_min_abs_node.output[0], reduce_max_abs_node.output[0]]', "[abs_max_name + ':0']", 'abs_max_name'], {}), "('Max', [reduce_min_abs_node.output[0],\n reduce_max_abs_node.output[0]], [abs_max_name + ':0'], abs_max_name)\n", (17489, 17601), False, 'import onnx\n'), ((18206, 18330), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Div"""', '[abs_max_node.output[0], self.fixed_qrange_int8_name]', '[input_scale_name]', 'scale_div_name'], {}), "('Div', [abs_max_node.output[0], self.\n fixed_qrange_int8_name], [input_scale_name], scale_div_name)\n", (18227, 18330), False, 'import onnx\n'), ((18558, 18622), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['self.fixed_zero_zp_name', 'qType', '[]', '[0]'], {}), '(self.fixed_zero_zp_name, qType, [], [0])\n', (18581, 18622), False, 'import onnx\n'), ((19420, 19527), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""ReduceMin"""', '[input_name]', "[reduce_min_name + ':0']", 'reduce_min_name'], {'keepdims': '(0)'}), "('ReduceMin', [input_name], [reduce_min_name + ':0'],\n reduce_min_name, keepdims=0)\n", (19441, 19527), False, 'import onnx\n'), ((19790, 19897), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""ReduceMax"""', '[input_name]', "[reduce_max_name + ':0']", 'reduce_max_name'], {'keepdims': '(0)'}), "('ReduceMax', [input_name], [reduce_max_name + ':0'],\n reduce_max_name, keepdims=0)\n", (19811, 19897), False, 'import onnx\n'), ((20479, 20569), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['self.fixed_zero_name', 'onnx_proto.TensorProto.FLOAT', '[]', '[0.0]'], {}), '(self.fixed_zero_name, onnx_proto.TensorProto.FLOAT,\n [], [0.0])\n', (20502, 20569), False, 'import onnx\n'), ((20811, 20941), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Sub"""', '[reduce_max_node.output[0], reduce_min_node.output[0]]', "[scale_sub_name + ':0']", 'scale_sub_name'], {}), "('Sub', [reduce_max_node.output[0], reduce_min_node.\n output[0]], [scale_sub_name + ':0'], scale_sub_name)\n", (20832, 20941), False, 'import onnx\n'), ((21191, 21318), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Div"""', '[scale_sub_node.output[0], self.fixed_qrange_uint8_name]', '[input_scale_name]', 'scale_div_name'], {}), "('Div', [scale_sub_node.output[0], self.\n fixed_qrange_uint8_name], [input_scale_name], scale_div_name)\n", (21212, 21318), False, 'import onnx\n'), ((21591, 21710), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Sub"""', '[self.fixed_zero_name, reduce_min_node.output[0]]', "[zp_sub_name + ':0']", 'zp_sub_name'], {}), "('Sub', [self.fixed_zero_name, reduce_min_node.output[\n 0]], [zp_sub_name + ':0'], zp_sub_name)\n", (21612, 21710), False, 'import onnx\n'), ((21938, 22049), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Div"""', '[zp_sub_node.output[0], input_scale_name]', "[zp_div_name + ':0']", 'zp_div_name'], {}), "('Div', [zp_sub_node.output[0], input_scale_name], [\n zp_div_name + ':0'], zp_div_name)\n", (21959, 22049), False, 'import onnx\n'), ((22280, 22373), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Floor"""', 'zp_div_node.output', "[zp_floor_name + ':0']", 'zp_floor_name'], {}), "('Floor', zp_div_node.output, [zp_floor_name + ':0'],\n zp_floor_name)\n", (22301, 22373), False, 'import onnx\n'), ((22564, 22660), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Cast"""', 'zp_floor_node.output', '[input_zp_name]', 'zp_cast_name'], {'to': 'qType'}), "('Cast', zp_floor_node.output, [input_zp_name],\n zp_cast_name, to=qType)\n", (22585, 22660), False, 'import onnx\n'), ((24002, 24100), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['zero_point_name', 'zero_point_type', 'zero_point_shape', 'zero_point_values'], {}), '(zero_point_name, zero_point_type, zero_point_shape,\n zero_point_values)\n', (24025, 24100), False, 'import onnx\n'), ((24205, 24301), 'onnx.helper.make_tensor', 'onnx.helper.make_tensor', (['scale_name', 'onnx_proto.TensorProto.FLOAT', 'scale_shape', 'scale_values'], {}), '(scale_name, onnx_proto.TensorProto.FLOAT,\n scale_shape, scale_values)\n', (24228, 24301), False, 'import onnx\n'), ((28950, 29056), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Reshape"""', 'reshape_input', '[reshape_op_output]', "(quantized_bias_name + 'reshape')"], {}), "('Reshape', reshape_input, [reshape_op_output], \n quantized_bias_name + 'reshape')\n", (28971, 29056), False, 'import onnx\n'), ((29335, 29437), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""', 'bias_add_input', '[add_node_output]', "(quantized_bias_name + 'bias_add')"], {}), "('Add', bias_add_input, [add_node_output], \n quantized_bias_name + 'bias_add')\n", (29356, 29437), False, 'import onnx\n'), ((30142, 30265), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Mul"""', '[input_scale_name, weight_scale_name]', "[bias_name + '_scale']", "(bias_name + '_scale_node')"], {}), "('Mul', [input_scale_name, weight_scale_name], [\n bias_name + '_scale'], bias_name + '_scale_node')\n", (30163, 30265), False, 'import onnx\n'), ((30483, 30612), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Div"""', '[bias_name, bias_scale_node.output[0]]', "[bias_name + '_tmp_quant:0']", "(bias_name + '_tmp_qaunt')"], {}), "('Div', [bias_name, bias_scale_node.output[0]], [\n bias_name + '_tmp_quant:0'], bias_name + '_tmp_qaunt')\n", (30504, 30612), False, 'import onnx\n'), ((30789, 30914), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Floor"""', 'quantize_bias_node.output', "[bias_name + '_quant_rounded:0']", "(bias_name + '_quant_rounded')"], {}), "('Floor', quantize_bias_node.output, [bias_name +\n '_quant_rounded:0'], bias_name + '_quant_rounded')\n", (30810, 30914), False, 'import onnx\n'), ((31086, 31210), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Cast"""', 'bias_rounded_node.output', '[quantized_bias_name]', "(quantized_bias_name + '_node')"], {'to': 'qType'}), "('Cast', bias_rounded_node.output, [\n quantized_bias_name], quantized_bias_name + '_node', to=qType)\n", (31107, 31210), False, 'import onnx\n'), ((40633, 40801), 'lpot.adaptor.ox_utils.util.QuantizedValue', 'QuantizedValue', (['weight.name', "(weight.name + '_quantized')", "(weight.name + '_scale')", "(weight.name + '_zero_point')", 'QuantizedValueType.Initializer', 'None', 'weight_qType'], {}), "(weight.name, weight.name + '_quantized', weight.name +\n '_scale', weight.name + '_zero_point', QuantizedValueType.Initializer,\n None, weight_qType)\n", (40647, 40801), False, 'from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zo, quantize_data, QuantizedValue, QuantizedInitializer\n'), ((9560, 9599), 'onnx.numpy_helper.to_array', 'onnx.numpy_helper.to_array', (['initializer'], {}), '(initializer)\n', (9586, 9599), False, 'import onnx\n'), ((25614, 25739), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""QuantizeLinear"""', '[input_name, scale_name, zp_name]', '[output_name]', "(input_name + '_QuantizeLinear')"], {}), "('QuantizeLinear', [input_name, scale_name, zp_name],\n [output_name], input_name + '_QuantizeLinear')\n", (25635, 25739), False, 'import onnx\n'), ((33700, 33763), 'onnx.numpy_helper.from_array', 'onnx.numpy_helper.from_array', (['bias_np_data', 'quantized_bias_name'], {}), '(bias_np_data, quantized_bias_name)\n', (33728, 33763), False, 'import onnx\n'), ((33995, 34141), 'lpot.adaptor.ox_utils.util.QuantizedInitializer', 'QuantizedInitializer', (['bias_name', 'bias_initializer', '[0]', '[0]', '[0]', '[bias_scale]', 'bias_data', 'quantized_data'], {'qType': 'onnx_proto.TensorProto.INT32'}), '(bias_name, bias_initializer, [0], [0], [0], [\n bias_scale], bias_data, quantized_data, qType=onnx_proto.TensorProto.INT32)\n', (34015, 34141), False, 'from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zo, quantize_data, QuantizedValue, QuantizedInitializer\n'), ((34577, 34704), 'lpot.adaptor.ox_utils.util.QuantizedValue', 'QuantizedValue', (['bias_name', 'quantized_bias_name', '""""""', '""""""', 'QuantizedValueType.Initializer', 'None', 'onnx_proto.TensorProto.INT32'], {}), "(bias_name, quantized_bias_name, '', '', QuantizedValueType.\n Initializer, None, onnx_proto.TensorProto.INT32)\n", (34591, 34704), False, 'from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zo, quantize_data, QuantizedValue, QuantizedInitializer\n'), ((40081, 40147), 'numpy.concatenate', 'np.concatenate', (['(quantized_weights, channel_weights)', 'channel_axis'], {}), '((quantized_weights, channel_weights), channel_axis)\n', (40095, 40147), True, 'import numpy as np\n'), ((8685, 8714), 'lpot.adaptor.ox_utils.registry.CreateOpQuantizer', 'CreateOpQuantizer', (['self', 'node'], {}), '(self, node)\n', (8702, 8714), False, 'from lpot.adaptor.ox_utils.registry import CreateOpQuantizer, CreateDefaultOpQuantizer\n'), ((8764, 8800), 'lpot.adaptor.ox_utils.registry.CreateDefaultOpQuantizer', 'CreateDefaultOpQuantizer', (['self', 'node'], {}), '(self, node)\n', (8788, 8800), False, 'from lpot.adaptor.ox_utils.registry import CreateOpQuantizer, CreateDefaultOpQuantizer\n'), ((12090, 12185), 'numpy.asarray', 'np.asarray', (['weight.quantized_data'], {'dtype': 'onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[weight.qType]'}), '(weight.quantized_data, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE\n [weight.qType])\n', (12100, 12185), True, 'import numpy as np\n'), ((26000, 26125), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""QuantizeLinear"""', '[input_name, scale_name, zp_name]', '[output_name]', "(input_name + '_QuantizeLinear')"], {}), "('QuantizeLinear', [input_name, scale_name, zp_name],\n [output_name], input_name + '_QuantizeLinear')\n", (26021, 26125), False, 'import onnx\n'), ((39816, 39862), 'numpy.asarray', 'np.asarray', (['quantized_per_channel_data_list[0]'], {}), '(quantized_per_channel_data_list[0])\n', (39826, 39862), True, 'import numpy as np\n'), ((42303, 42394), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""DequantizeLinear"""', 'dqlinear_inputs', '[value_name]', 'dqlinear_name'], {}), "('DequantizeLinear', dqlinear_inputs, [value_name],\n dqlinear_name)\n", (42324, 42394), False, 'import onnx\n'), ((4908, 4940), 'onnx.helper.make_opsetid', 'onnx.helper.make_opsetid', (['""""""', '(11)'], {}), "('', 11)\n", (4932, 4940), False, 'import onnx\n'), ((6927, 6969), 'onnx.numpy_helper.to_array', 'onnx.numpy_helper.to_array', (['initializer_zp'], {}), '(initializer_zp)\n', (6953, 6969), False, 'import onnx\n'), ((6991, 7036), 'onnx.numpy_helper.to_array', 'onnx.numpy_helper.to_array', (['initializer_scale'], {}), '(initializer_scale)\n', (7017, 7036), False, 'import onnx\n'), ((7208, 7260), 'onnxruntime.quantization.quant_utils.get_elem_index', 'get_elem_index', (['next_node.output[0]', 'succ_node.input'], {}), '(next_node.output[0], succ_node.input)\n', (7222, 7260), False, 'from onnxruntime.quantization.quant_utils import find_by_name, get_elem_index, get_mul_node, generate_identified_filename, attribute_to_kwarg, type_to_name\n'), ((26704, 26843), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""DynamicQuantizeLinear"""', '[input_name]', '[output_name, scale_name, zeropoint_name]', "(input_name + '_QuantizeLinear')"], {}), "('DynamicQuantizeLinear', [input_name], [output_name,\n scale_name, zeropoint_name], input_name + '_QuantizeLinear')\n", (26725, 26843), False, 'import onnx\n'), ((27274, 27399), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""QuantizeLinear"""', '[input_name, scale_name, zp_name]', '[output_name]', "(input_name + '_QuantizeLinear')"], {}), "('QuantizeLinear', [input_name, scale_name, zp_name],\n [output_name], input_name + '_QuantizeLinear')\n", (27295, 27399), False, 'import onnx\n'), ((33559, 33601), 'numpy.asarray', 'np.asarray', (['quantized_data'], {'dtype': 'np.int32'}), '(quantized_data, dtype=np.int32)\n', (33569, 33601), True, 'import numpy as np\n'), ((39980, 40026), 'numpy.asarray', 'np.asarray', (['quantized_per_channel_data_list[i]'], {}), '(quantized_per_channel_data_list[i])\n', (39990, 40026), True, 'import numpy as np\n'), ((33432, 33453), 'numpy.asarray', 'np.asarray', (['bias_data'], {}), '(bias_data)\n', (33442, 33453), True, 'import numpy as np\n')] |
import itertools
import string
import numpy as np
from numpy import random
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.boxplot(return_type="NOTATYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
dict(boxes="r", whiskers="b", medians="g", caps="c"),
dict(boxes="r", whiskers="b", medians="g", caps="c"),
),
(dict(boxes="r"), dict(boxes="r")),
("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: dict(color="C1")}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy3(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type="axes")
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby("gender").hist()
tm.close()
@pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by="gender")
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
# now for groupby
result = df.groupby("gender").boxplot(return_type="dict")
self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
columns2 = "X B C D A G Y N Q O".split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = "A B C D E F G H I J".split()
df2["category"] = categories2 * 5
for t in ["dict", "axes", "both"]:
returned = df.groupby("classroom").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=["A", "B", "C"])
returned = df.boxplot(by="classroom", return_type=t)
self._check_box_return_type(
returned, t, expected_keys=["height", "weight", "category"]
)
returned = df2.groupby("category").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.boxplot(
column=["height", "weight", "category"],
layout=(2, 1),
return_type="dict",
)
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("gender").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("classroom").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(
column=["height", "weight", "category"], by="gender", return_type="axes"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes["height"]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes["weight"], axes["category"]]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, 2),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, -1),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(4, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(-1, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], layout=(1, 4), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby("classroom").boxplot( # noqa
column=["height", "weight", "category"], layout=(1, -1), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
@pytest.mark.slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(
column=["height", "weight", "category"],
by="gender",
return_type="axes",
ax=axes[0],
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="axes", ax=axes[1]
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby("classroom").boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(
df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
)
| [
"pandas.Series",
"numpy.random.normal",
"numpy.random.rand",
"pandas._testing.RNGContext",
"numpy.random.choice",
"numpy.random.random",
"pandas.tests.plotting.common._check_plot_works",
"pandas.timedelta_range",
"pytest.mark.parametrize",
"numpy.random.randint",
"pandas._testing.close",
"pand... | [((7597, 7753), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""props, expected"""', "[('boxprops', 'boxes'), ('whiskerprops', 'whiskers'), ('capprops', 'caps'),\n ('medianprops', 'medians')]"], {}), "('props, expected', [('boxprops', 'boxes'), (\n 'whiskerprops', 'whiskers'), ('capprops', 'caps'), ('medianprops',\n 'medians')])\n", (7620, 7753), False, 'import pytest\n'), ((780, 829), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'return_type': '"""dict"""'}), "(df.boxplot, return_type='dict')\n", (797, 829), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((838, 910), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'column': "['one', 'two']", 'return_type': '"""dict"""'}), "(df.boxplot, column=['one', 'two'], return_type='dict')\n", (855, 910), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((1121, 1188), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'column': '"""one"""', 'by': "['indic', 'indic2']"}), "(df.boxplot, column='one', by=['indic', 'indic2'])\n", (1138, 1188), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((1425, 1502), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['plotting._core.boxplot'], {'data': "df['one']", 'return_type': '"""dict"""'}), "(plotting._core.boxplot, data=df['one'], return_type='dict')\n", (1442, 1502), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((1511, 1569), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'notch': '(1)', 'return_type': '"""dict"""'}), "(df.boxplot, notch=1, return_type='dict')\n", (1528, 1569), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((1836, 1894), 'pandas.Series', 'Series', (["['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B']"], {}), "(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])\n", (1842, 1894), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((1913, 1931), 'pandas.Series', 'Series', (["(['A'] * 10)"], {}), "(['A'] * 10)\n", (1919, 1931), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((4541, 4578), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)', 'df.shape[0]'], {}), '(1, 20, df.shape[0])\n', (4558, 4578), True, 'import numpy as np\n'), ((5442, 5491), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'return_type': '"""axes"""'}), "(df.boxplot, return_type='axes')\n", (5459, 5491), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((5838, 5874), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2, 3, 4, 5, 6]}"], {}), "({'a': [1, 2, 3, 4, 5, 6]})\n", (5847, 5874), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((8518, 8588), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['grouped.boxplot'], {'subplots': '(False)', 'return_type': '"""axes"""'}), "(grouped.boxplot, subplots=False, return_type='axes')\n", (8535, 8588), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((9115, 9185), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['grouped.boxplot'], {'subplots': '(False)', 'return_type': '"""axes"""'}), "(grouped.boxplot, subplots=False, return_type='axes')\n", (9132, 9185), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((9735, 9805), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['grouped.boxplot'], {'subplots': '(False)', 'return_type': '"""axes"""'}), "(grouped.boxplot, subplots=False, return_type='axes')\n", (9752, 9805), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((10177, 10242), 'pandas.DataFrame', 'DataFrame', (["{'height': height, 'weight': weight, 'gender': gender}"], {}), "({'height': height, 'weight': weight, 'gender': gender})\n", (10186, 10242), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((10387, 10397), 'pandas._testing.close', 'tm.close', ([], {}), '()\n', (10395, 10397), True, 'import pandas._testing as tm\n'), ((10529, 10539), 'pandas._testing.close', 'tm.close', ([], {}), '()\n', (10537, 10539), True, 'import pandas._testing as tm\n'), ((10646, 10656), 'pandas._testing.close', 'tm.close', ([], {}), '()\n', (10654, 10656), True, 'import pandas._testing as tm\n'), ((17125, 17171), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['returned', 'axes[0]'], {}), '(returned, axes[0])\n', (17152, 17171), True, 'import pandas._testing as tm\n'), ((17582, 17628), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['returned', 'axes[1]'], {}), '(returned, axes[1])\n', (17609, 17628), True, 'import pandas._testing as tm\n'), ((17982, 18043), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2, 3, 4, 5, 6], 'b': [0, 0, 0, 1, 1, 1]}"], {}), "({'a': [1, 2, 3, 4, 5, 6], 'b': [0, 0, 0, 1, 1, 1]})\n", (17991, 18043), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((545, 566), 'numpy.random.randn', 'np.random.randn', (['(6)', '(4)'], {}), '(6, 4)\n', (560, 566), True, 'import numpy as np\n'), ((995, 1034), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (1021, 1034), True, 'import pandas._testing as tm\n'), ((1048, 1112), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'column': "['one', 'two']", 'by': '"""indic"""'}), "(df.boxplot, column=['one', 'two'], by='indic')\n", (1065, 1112), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((1202, 1241), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (1228, 1241), True, 'import pandas._testing as tm\n'), ((1255, 1296), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'by': '"""indic"""'}), "(df.boxplot, by='indic')\n", (1272, 1296), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((1310, 1349), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (1336, 1349), True, 'import pandas._testing as tm\n'), ((1363, 1416), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'by': "['indic', 'indic2']"}), "(df.boxplot, by=['indic', 'indic2'])\n", (1380, 1416), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((1583, 1622), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (1609, 1622), True, 'import pandas._testing as tm\n'), ((1636, 1686), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'by': '"""indic"""', 'notch': '(1)'}), "(df.boxplot, by='indic', notch=1)\n", (1653, 1686), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((1769, 1790), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (1783, 1790), True, 'import numpy as np\n'), ((1945, 1984), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (1971, 1984), True, 'import pandas._testing as tm\n'), ((1998, 2035), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['df.boxplot'], {'by': '"""X"""'}), "(df.boxplot, by='X')\n", (2015, 2035), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((2570, 2609), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (2596, 2609), True, 'import pandas._testing as tm\n'), ((3503, 3524), 'numpy.random.randn', 'np.random.randn', (['(6)', '(4)'], {}), '(6, 4)\n', (3518, 3524), True, 'import numpy as np\n'), ((3652, 3677), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3665, 3677), False, 'import pytest\n'), ((3823, 3856), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['(False)'], {}), '(False)\n', (3849, 3856), True, 'import pandas._testing as tm\n'), ((3976, 4009), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['(False)'], {}), '(False)\n', (4002, 4009), True, 'import pandas._testing as tm\n'), ((4129, 4162), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['(False)'], {}), '(False)\n', (4155, 4162), True, 'import pandas._testing as tm\n'), ((5380, 5402), 'numpy.random.randn', 'np.random.randn', (['(20)', '(4)'], {}), '(20, 4)\n', (5395, 5402), True, 'import numpy as np\n'), ((5566, 5587), 'numpy.random.rand', 'np.random.rand', (['(10)', '(5)'], {}), '(10, 5)\n', (5580, 5587), True, 'import numpy as np\n'), ((7047, 7065), 'numpy.random.rand', 'random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (7058, 7065), False, 'from numpy import random\n'), ((7458, 7476), 'numpy.random.rand', 'random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (7469, 7476), False, 'from numpy import random\n'), ((7491, 7527), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (7504, 7527), False, 'import pytest\n'), ((8311, 8350), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (8337, 8350), True, 'import pandas._testing as tm\n'), ((8371, 8425), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['grouped.boxplot'], {'return_type': '"""axes"""'}), "(grouped.boxplot, return_type='axes')\n", (8388, 8425), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((8794, 8815), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (8808, 8815), True, 'import numpy as np\n'), ((8906, 8945), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (8932, 8945), True, 'import pandas._testing as tm\n'), ((8966, 9020), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['grouped.boxplot'], {'return_type': '"""axes"""'}), "(grouped.boxplot, return_type='axes')\n", (8983, 9020), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((9391, 9412), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (9405, 9412), True, 'import numpy as np\n'), ((9528, 9567), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (9554, 9567), True, 'import pandas._testing as tm\n'), ((9588, 9642), 'pandas.tests.plotting.common._check_plot_works', '_check_plot_works', (['grouped.boxplot'], {'return_type': '"""axes"""'}), "(grouped.boxplot, return_type='axes')\n", (9605, 9642), False, 'from pandas.tests.plotting.common import TestPlotBase, _check_plot_works\n'), ((9973, 10006), 'numpy.random.normal', 'np.random.normal', (['(166)', '(20)'], {'size': 'n'}), '(166, 20, size=n)\n', (9989, 10006), True, 'import numpy as np\n'), ((10032, 10064), 'numpy.random.normal', 'np.random.normal', (['(60)', '(10)'], {'size': 'n'}), '(60, 10, size=n)\n', (10048, 10064), True, 'import numpy as np\n'), ((10079, 10096), 'pandas._testing.RNGContext', 'tm.RNGContext', (['(42)'], {}), '(42)\n', (10092, 10096), True, 'import pandas._testing as tm\n'), ((10119, 10163), 'numpy.random.choice', 'np.random.choice', (["['male', 'female']"], {'size': 'n'}), "(['male', 'female'], size=n)\n", (10135, 10163), True, 'import numpy as np\n'), ((11249, 11269), 'numpy.random.randn', 'random.randn', (['(50)', '(10)'], {}), '(50, 10)\n', (11261, 11269), False, 'from numpy import random\n'), ((12240, 12276), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (12253, 12276), False, 'import pytest\n'), ((12445, 12481), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (12458, 12481), False, 'import pytest\n'), ((12725, 12761), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (12738, 12761), False, 'import pytest\n'), ((12931, 12970), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (12957, 12970), True, 'import pandas._testing as tm\n'), ((13198, 13237), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (13224, 13237), True, 'import pandas._testing as tm\n'), ((13485, 13524), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (13511, 13524), True, 'import pandas._testing as tm\n'), ((14524, 14563), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (14550, 14563), True, 'import pandas._testing as tm\n'), ((14856, 14895), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (14882, 14895), True, 'import pandas._testing as tm\n'), ((16439, 16478), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (16465, 16478), True, 'import pandas._testing as tm\n'), ((16758, 16797), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (16784, 16797), True, 'import pandas._testing as tm\n'), ((17256, 17295), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (17282, 17295), True, 'import pandas._testing as tm\n'), ((17684, 17709), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17697, 17709), False, 'import pytest\n'), ((6109, 6146), 'pandas.date_range', 'date_range', (['"""2012-01-01"""'], {'periods': '(100)'}), "('2012-01-01', periods=100)\n", (6119, 6146), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((6169, 6189), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (6184, 6189), True, 'import numpy as np\n'), ((6331, 6378), 'pandas.date_range', 'date_range', (['"""2012-01-01"""'], {'periods': '(100)', 'tz': '"""UTC"""'}), "('2012-01-01', periods=100, tz='UTC')\n", (6341, 6378), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((6401, 6439), 'pandas.timedelta_range', 'timedelta_range', (['"""1 days"""'], {'periods': '(100)'}), "('1 days', periods=100)\n", (6416, 6439), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((7930, 7951), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (7946, 7951), True, 'import numpy as np\n'), ((8823, 8853), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (['tuples'], {}), '(tuples)\n', (8845, 8853), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((9420, 9450), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (['tuples'], {}), '(tuples)\n', (9442, 9450), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n'), ((17834, 17873), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (17860, 17873), True, 'import pandas._testing as tm\n'), ((6212, 6232), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (6227, 6232), True, 'import numpy as np\n'), ((6259, 6296), 'pandas.date_range', 'date_range', (['"""2012-01-01"""'], {'periods': '(100)'}), "('2012-01-01', periods=100)\n", (6269, 6296), False, 'from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range\n')] |
# Import packages.
import cvxpy as cp
import numpy as np
# Generate data.
m = 20
n = 15
np.random.seed(1)
A = np.random.randn(m, n) * (np.random.randn(m, n) > 0.)
b = np.random.randn(m)
print(repr(A.reshape(1, -1)))
print(repr(b))
# Define and solve the CVXPY problem.
x = cp.Variable(n)
cost = cp.sum_squares(A @ x - b)
prob = cp.Problem(cp.Minimize(cost))
prob.solve(solver=cp.OSQP, verbose=True)
# Print result.
print("\nThe optimal value is", prob.value)
print("The optimal x is")
print(repr(x.value))
print("The norm of the residual is ", cp.norm(A @ x - b, p=2).value) | [
"cvxpy.Minimize",
"cvxpy.Variable",
"cvxpy.sum_squares",
"numpy.random.seed",
"cvxpy.norm",
"numpy.random.randn"
] | [((89, 106), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (103, 106), True, 'import numpy as np\n'), ((168, 186), 'numpy.random.randn', 'np.random.randn', (['m'], {}), '(m)\n', (183, 186), True, 'import numpy as np\n'), ((276, 290), 'cvxpy.Variable', 'cp.Variable', (['n'], {}), '(n)\n', (287, 290), True, 'import cvxpy as cp\n'), ((298, 323), 'cvxpy.sum_squares', 'cp.sum_squares', (['(A @ x - b)'], {}), '(A @ x - b)\n', (312, 323), True, 'import cvxpy as cp\n'), ((111, 132), 'numpy.random.randn', 'np.random.randn', (['m', 'n'], {}), '(m, n)\n', (126, 132), True, 'import numpy as np\n'), ((342, 359), 'cvxpy.Minimize', 'cp.Minimize', (['cost'], {}), '(cost)\n', (353, 359), True, 'import cvxpy as cp\n'), ((136, 157), 'numpy.random.randn', 'np.random.randn', (['m', 'n'], {}), '(m, n)\n', (151, 157), True, 'import numpy as np\n'), ((548, 571), 'cvxpy.norm', 'cp.norm', (['(A @ x - b)'], {'p': '(2)'}), '(A @ x - b, p=2)\n', (555, 571), True, 'import cvxpy as cp\n')] |
"""Delay space spectrum estimation and filtering.
"""
import numpy as np
import scipy.linalg as la
from caput import mpiarray, config
from cora.util import units
from ..core import containers, task, io
from ..util import random
class DelayFilter(task.SingleTask):
"""Remove delays less than a given threshold.
This is performed by projecting the data onto the null space that is orthogonal
to any mode at low delays.
Attributes
----------
delay_cut : float
Delay value to filter at in seconds.
za_cut : float
Sine of the maximum zenith angle included in baseline-dependent delay
filtering. Default is 1 which corresponds to the horizon (ie: filters out all
zenith angles). Setting to zero turns off baseline dependent cut.
extra_cut : float
Increase the delay threshold beyond the baseline dependent term.
weight_tol : float
Maximum weight kept in the masked data, as a fraction of the largest weight
in the original dataset.
telescope_orientation : one of ('NS', 'EW', 'none')
Determines if the baseline-dependent delay cut is based on the north-south
component, the east-west component or the full baseline length. For
cylindrical telescopes oriented in the NS direction (like CHIME) use 'NS'.
The default is 'NS'.
window : bool
Apply the window function to the data when applying the filter.
Notes
-----
The delay cut applied is `max(za_cut * baseline / c + extra_cut, delay_cut)`.
"""
delay_cut = config.Property(proptype=float, default=0.1)
za_cut = config.Property(proptype=float, default=1.0)
extra_cut = config.Property(proptype=float, default=0.0)
weight_tol = config.Property(proptype=float, default=1e-4)
telescope_orientation = config.enum(["NS", "EW", "none"], default="NS")
window = config.Property(proptype=bool, default=False)
def setup(self, telescope):
"""Set the telescope needed to obtain baselines.
Parameters
----------
telescope : TransitTelescope
"""
self.telescope = io.get_telescope(telescope)
def process(self, ss):
"""Filter out delays from a SiderealStream or TimeStream.
Parameters
----------
ss : containers.SiderealStream
Data to filter.
Returns
-------
ss_filt : containers.SiderealStream
Filtered dataset.
"""
tel = self.telescope
ss.redistribute(["input", "prod", "stack"])
freq = ss.freq[:]
bandwidth = np.ptp(freq)
ssv = ss.vis[:].view(np.ndarray)
ssw = ss.weight[:].view(np.ndarray)
ubase, uinv = np.unique(
tel.baselines[:, 0] + 1.0j * tel.baselines[:, 1], return_inverse=True
)
ubase = ubase.view(np.float64).reshape(-1, 2)
for lbi, bi in ss.vis[:].enumerate(axis=1):
# Select the baseline length to use
baseline = ubase[uinv[bi]]
if self.telescope_orientation == "NS":
baseline = abs(baseline[1]) # Y baseline
elif self.telescope_orientation == "EW":
baseline = abs(baseline[0]) # X baseline
else:
baseline = np.linalg.norm(baseline) # Norm
# In micro seconds
baseline_delay_cut = self.za_cut * baseline / units.c * 1e6 + self.extra_cut
delay_cut = np.amax([baseline_delay_cut, self.delay_cut])
# Calculate the number of samples needed to construct the delay null space.
# `4 * tau_max * bandwidth` is the amount recommended in the DAYENU paper
# and seems to work well here
number_cut = int(4.0 * bandwidth * delay_cut + 0.5)
weight_mask = np.median(ssw[:, lbi], axis=1)
weight_mask = (weight_mask > (self.weight_tol * weight_mask.max())).astype(
np.float64
)
NF = null_delay_filter(
freq, delay_cut, weight_mask, num_delay=number_cut, window=self.window
)
ssv[:, lbi] = np.dot(NF, ssv[:, lbi])
ssw[:, lbi] *= weight_mask[:, np.newaxis]
return ss
class DelaySpectrumEstimator(task.SingleTask, random.RandomTask):
"""Calculate the delay spectrum of a Sidereal/TimeStream for instrumental Stokes I.
The spectrum is calculated by Gibbs sampling. However, at the moment only
the final sample is used to calculate the spectrum.
Attributes
----------
nsamp : int, optional
The number of Gibbs samples to draw.
freq_zero : float, optional
The physical frequency (in MHz) of the *zero* channel. That is the DC
channel coming out of the F-engine. If not specified, use the first
frequency channel of the stream.
freq_spacing : float, optional
The spacing between the underlying channels (in MHz). This is conjugate
to the length of a frame of time samples that is transformed. If not
set, then use the smallest gap found between channels in the dataset.
nfreq : int, optional
The number of frequency channels in the full set produced by the
F-engine. If not set, assume the last included frequency is the last of
the full set (or is the penultimate if `skip_nyquist` is set).
skip_nyquist : bool, optional
Whether the Nyquist frequency is included in the data. This is `True` by
default to align with the output of CASPER PFBs.
"""
nsamp = config.Property(proptype=int, default=20)
freq_zero = config.Property(proptype=float, default=None)
freq_spacing = config.Property(proptype=float, default=None)
nfreq = config.Property(proptype=int, default=None)
skip_nyquist = config.Property(proptype=bool, default=True)
def setup(self, telescope):
"""Set the telescope needed to generate Stokes I.
Parameters
----------
telescope : TransitTelescope
"""
self.telescope = io.get_telescope(telescope)
def process(self, ss):
"""Estimate the delay spectrum.
Parameters
----------
ss : SiderealStream or TimeStream
Returns
-------
dspec : DelaySpectrum
"""
tel = self.telescope
ss.redistribute("freq")
# Construct the Stokes I vis
vis_I, vis_weight, baselines = stokes_I(ss, tel)
# ==== Figure out the frequency structure and delay values ====
if self.freq_zero is None:
self.freq_zero = ss.freq[0]
if self.freq_spacing is None:
self.freq_spacing = np.abs(np.diff(ss.freq[:])).min()
channel_ind = (np.abs(ss.freq[:] - self.freq_zero) / self.freq_spacing).astype(
np.int
)
if self.nfreq is None:
self.nfreq = channel_ind[-1] + 1
if self.skip_nyquist:
self.nfreq += 1
# Assume each transformed frame was an even number of samples long
ndelay = 2 * (self.nfreq - 1)
delays = np.fft.fftshift(np.fft.fftfreq(ndelay, d=self.freq_spacing)) # in us
# Initialise the spectrum container
delay_spec = containers.DelaySpectrum(baseline=baselines, delay=delays)
delay_spec.redistribute("baseline")
delay_spec.spectrum[:] = 0.0
initial_S = np.ones_like(delays) * 1e1
# Get the random Generator that we will use
rng = self.rng
# Iterate over all baselines and use the Gibbs sampler to estimate the spectrum
for lbi, bi in delay_spec.spectrum[:].enumerate(axis=0):
self.log.debug("Delay transforming baseline %i/%i", bi, len(baselines))
# Get the local selections
data = vis_I[lbi].view(np.ndarray).T
weight = vis_weight[lbi].view(np.ndarray)
# Mask out data with completely zero'd weights and generate time
# averaged weights
weight_cut = (
1e-4 * weight.mean()
) # Use approx threshold to ignore small weights
data = data * (weight.T > weight_cut)
weight = np.mean(weight, axis=1)
if (data == 0.0).all():
continue
# If there are no non-zero weighted entries skip
non_zero = weight > 0
if not non_zero.any():
continue
# Remove any frequency channel which is entirely zero, this is just to
# reduce the computational cost, it should make no difference to the result
data = data[:, non_zero]
weight = weight[non_zero]
non_zero_channel = channel_ind[non_zero]
spec = delay_spectrum_gibbs(
data,
ndelay,
weight,
initial_S,
fsel=non_zero_channel,
niter=self.nsamp,
rng=rng,
)
# Take an average over the last half of the delay spectrum samples
# (presuming that removes the burn-in)
spec_av = np.median(spec[-(self.nsamp // 2) :], axis=0)
delay_spec.spectrum[bi] = np.fft.fftshift(spec_av)
return delay_spec
def stokes_I(sstream, tel):
"""Extract instrumental Stokes I from a time/sidereal stream.
Parameters
----------
sstream : containers.SiderealStream, container.TimeStream
Stream of correlation data.
tel : TransitTelescope
Instance describing the telescope.
Returns
-------
vis_I : mpiarray.MPIArray[nbase, nfreq, ntime]
The instrumental Stokes I visibilities, distributed over baselines.
vis_weight : mpiarray.MPIArray[nbase, nfreq, ntime]
The weights for each visibility, distributed over baselines.
baselines : np.ndarray[nbase, 2]
"""
# Construct a complex number representing each baseline (used for determining
# unique baselines).
# NOTE: due to floating point precision, some baselines don't get matched as having
# the same lengths. To get around this, round all separations to 0.1 mm precision
bl_round = np.around(tel.baselines[:, 0] + 1.0j * tel.baselines[:, 1], 4)
# ==== Unpack into Stokes I
ubase, uinv, ucount = np.unique(bl_round, return_inverse=True, return_counts=True)
ubase = ubase.view(np.float64).reshape(-1, 2)
nbase = ubase.shape[0]
vis_shape = (nbase, sstream.vis.local_shape[0], sstream.vis.local_shape[2])
vis_I = np.zeros(vis_shape, dtype=sstream.vis.dtype)
vis_weight = np.zeros(vis_shape, dtype=sstream.weight.dtype)
# Iterate over products to construct the Stokes I vis
# TODO: this should be updated when driftscan gains a concept of polarisation
ssv = sstream.vis[:]
ssw = sstream.weight[:]
# Cache beamclass as it's regenerated every call
beamclass = tel.beamclass[:]
for ii, ui in enumerate(uinv):
# Skip if not all polarisations were included
if ucount[ui] < 4:
continue
fi, fj = tel.uniquepairs[ii]
bi, bj = beamclass[fi], beamclass[fj]
upi = tel.feedmap[fi, fj]
if upi == -1:
continue
if bi == bj:
vis_I[ui] += ssv[:, ii]
vis_weight[ui] += ssw[:, ii]
vis_I = mpiarray.MPIArray.wrap(vis_I, axis=1, comm=sstream.comm)
vis_I = vis_I.redistribute(axis=0)
vis_weight = mpiarray.MPIArray.wrap(
vis_weight, axis=1, comm=sstream.comm
).redistribute(axis=0)
return vis_I, vis_weight, ubase
def window_generalised(x, window="nuttall"):
"""A generalised high-order window at arbitrary locations.
Parameters
----------
x : np.ndarray[n]
Location to evaluate at. Must be in the range 0 to 1.
window : one of {'nuttall', 'blackman_nuttall', 'blackman_harris'}
Type of window function to return.
Returns
-------
w : np.ndarray[n]
Window function.
"""
a_table = {
"nuttall": np.array([0.355768, -0.487396, 0.144232, -0.012604]),
"blackman_nuttall": np.array([0.3635819, -0.4891775, 0.1365995, -0.0106411]),
"blackman_harris": np.array([0.35875, -0.48829, 0.14128, -0.01168]),
}
a = a_table[window]
t = 2 * np.pi * np.arange(4)[:, np.newaxis] * x[np.newaxis, :]
w = (a[:, np.newaxis] * np.cos(t)).sum(axis=0)
return w
def fourier_matrix_r2c(N, fsel=None):
"""Generate a Fourier matrix to represent a real to complex FFT.
Parameters
----------
N : integer
Length of timestream that we are transforming to. Must be even.
fsel : array_like, optional
Indexes of the frequency channels to include in the transformation
matrix. By default, assume all channels.
Returns
-------
F : np.ndarray
An array performing the Fourier transform from a real time series to
frequencies packed as alternating real and imaginary elements,
"""
if fsel is None:
fa = np.arange(N // 2 + 1)
else:
fa = np.array(fsel)
fa = fa[:, np.newaxis]
ta = np.arange(N)[np.newaxis, :]
Fr = np.zeros((2 * fa.shape[0], N), dtype=np.float64)
Fr[0::2] = np.cos(2 * np.pi * ta * fa / N)
Fr[1::2] = -np.sin(2 * np.pi * ta * fa / N)
return Fr
def fourier_matrix_c2r(N, fsel=None):
"""Generate a Fourier matrix to represent a complex to real FFT.
Parameters
----------
N : integer
Length of timestream that we are transforming to. Must be even.
fsel : array_like, optional
Indexes of the frequency channels to include in the transformation
matrix. By default, assume all channels.
Returns
-------
F : np.ndarray
An array performing the Fourier transform from frequencies packed as
alternating real and imaginary elements, to the real time series.
"""
if fsel is None:
fa = np.arange(N // 2 + 1)
else:
fa = np.array(fsel)
fa = fa[np.newaxis, :]
mul = np.where((fa == 0) | (fa == N // 2), 1.0, 2.0) / N
ta = np.arange(N)[:, np.newaxis]
Fr = np.zeros((N, 2 * fa.shape[1]), dtype=np.float64)
Fr[:, 0::2] = np.cos(2 * np.pi * ta * fa / N) * mul
Fr[:, 1::2] = -np.sin(2 * np.pi * ta * fa / N) * mul
return Fr
def delay_spectrum_gibbs(
data, N, Ni, initial_S, window=True, fsel=None, niter=20, rng=None
):
"""Estimate the delay spectrum by Gibbs sampling.
This routine estimates the spectrum at the `N` delay samples conjugate to
the frequency spectrum of ``N/2 + 1`` channels. A subset of these channels
can be specified using the `fsel` argument.
Parameters
----------
data : np.ndarray[:, freq]
Data to estimate the delay spectrum of.
N : int
The length of the output delay spectrum. There are assumed to `N/2 + 1`
total frequency channels.
Ni : np.ndarray[freq]
Inverse noise variance.
initial_S : np.ndarray[delay]
The initial delay spectrum guess.
window : bool, optional
Apply a Nuttall apodisation function. Default is True.
fsel : np.ndarray[freq], optional
Indices of channels that we have data at. By default assume all channels.
niter : int, optional
Number of Gibbs samples to generate.
rng : np.random.Generator, optional
A generator to use to produce the random samples.
Returns
-------
spec : list
List of spectrum samples.
"""
# Get reference to RNG
if rng is None:
rng = random.default_rng()
spec = []
total_freq = N // 2 + 1
if fsel is None:
fsel = np.arange(total_freq)
# Construct the Fourier matrix
F = fourier_matrix_r2c(N, fsel)
# Construct a view of the data with alternating real and imaginary parts
data = data.astype(np.complex128, order="C").view(np.float64).T.copy()
# Window the frequency data
if window:
# Construct the window function
x = fsel * 1.0 / total_freq
w = window_generalised(x, window="nuttall")
w = np.repeat(w, 2)
# Apply to the projection matrix and the data
F *= w[:, np.newaxis]
data *= w[:, np.newaxis]
is_real_freq = (fsel == 0) | (fsel == N // 2)
# Construct the Noise inverse array for the real and imaginary parts (taking
# into account that the zero and Nyquist frequencies are strictly real)
Ni_r = np.zeros(2 * Ni.shape[0])
Ni_r[0::2] = np.where(is_real_freq, Ni, Ni / 2 ** 0.5)
Ni_r[1::2] = np.where(is_real_freq, 0.0, Ni / 2 ** 0.5)
# Create the Hermitian conjugate weighted by the noise (this is used multiple times)
FTNih = F.T * Ni_r[np.newaxis, :] ** 0.5
FTNiF = np.dot(FTNih, FTNih.T)
# Pre-whiten the data to save doing it repeatedly
data = data * Ni_r[:, np.newaxis] ** 0.5
# Set the initial starting points
S_samp = initial_S
def _draw_signal_sample_f(S):
# Draw a random sample of the signal assuming a Gaussian model with a
# given delay spectrum shape. Do this using the perturbed Wiener filter
# approach
# This method is fastest if the number of frequencies is larger than the number
# of delays we are solving for. Typically this isn't true, so we probably want
# `_draw_signal_sample2`
# Construct the Wiener covariance
Si = 1.0 / S
Ci = np.diag(Si) + FTNiF
# Draw random vectors that form the perturbations
w1 = rng.standard_normal((N, data.shape[1]))
w2 = rng.standard_normal(data.shape)
# Construct the random signal sample by forming a perturbed vector and
# then doing a matrix solve
y = np.dot(FTNih, data + w2) + Si[:, np.newaxis] ** 0.5 * w1
return la.solve(Ci, y, sym_pos=True)
def _draw_signal_sample_t(S):
# This method is fastest if the number of delays is larger than the number of
# frequencies. This is usually the regime we are in.
# Construct various dependent matrices
Sh = S ** 0.5
Rt = Sh[:, np.newaxis] * FTNih
R = Rt.T
# Draw random vectors that form the perturbations
w1 = rng.standard_normal((N, data.shape[1]))
w2 = rng.standard_normal(data.shape)
# Perform the solve step (rather than explicitly using the inverse)
y = data + w2 - np.dot(R, w1)
Ci = np.identity(len(Ni_r)) + np.dot(R, Rt)
x = la.solve(Ci, y, sym_pos=True)
s = Sh[:, np.newaxis] * (np.dot(Rt, x) + w1)
return s
def _draw_ps_sample(d):
# Draw a random power spectrum sample assuming from the signal assuming
# the signal is Gaussian and we have a flat prior on the power spectrum.
# This means drawing from a inverse chi^2.
S_hat = d.var(axis=1)
df = d.shape[1]
chi2 = rng.chisquare(df, size=d.shape[0])
S_samp = S_hat * df / chi2
return S_samp
# Select the method to use for the signal sample based on how many frequencies
# versus delays there are
_draw_signal_sample = (
_draw_signal_sample_f if len(fsel) > 0.25 * N else _draw_signal_sample_t
)
# Perform the Gibbs sampling iteration for a given number of loops and
# return the power spectrum output of them.
for ii in range(niter):
d_samp = _draw_signal_sample(S_samp)
S_samp = _draw_ps_sample(d_samp)
spec.append(S_samp)
return spec
def null_delay_filter(freq, max_delay, mask, num_delay=200, tol=1e-8, window=True):
"""Take frequency data and null out any delays below some value.
Parameters
----------
freq : np.ndarray[freq]
Frequencies we have data at.
max_delay : float
Maximum delay to keep.
mask : np.ndarray[freq]
Frequencies to mask out.
num_delay : int, optional
Number of delay values to use.
tol : float, optional
Cut off value for singular values.
window : bool, optional
Apply a window function to the data while filtering.
Returns
-------
filter : np.ndarray[freq, freq]
The filter as a 2D matrix.
"""
# Construct the window function
x = (freq - freq.min()) / freq.ptp()
w = window_generalised(x, window="nuttall")
delay = np.linspace(-max_delay, max_delay, num_delay)
# Construct the Fourier matrix
F = mask[:, np.newaxis] * np.exp(
2.0j * np.pi * delay[np.newaxis, :] * freq[:, np.newaxis]
)
if window:
F *= w[:, np.newaxis]
# Use an SVD to figure out the set of significant modes spanning the delays
# we are wanting to get rid of.
u, sig, vh = la.svd(F)
nmodes = np.sum(sig > tol * sig.max())
p = u[:, :nmodes]
# Construct a projection matrix for the filter
proj = np.identity(len(freq)) - np.dot(p, p.T.conj())
proj *= mask[np.newaxis, :]
if window:
proj *= w[np.newaxis, :]
return proj
| [
"numpy.ptp",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"numpy.repeat",
"numpy.where",
"caput.config.Property",
"numpy.diff",
"numpy.exp",
"numpy.dot",
"numpy.linspace",
"caput.config.enum",
"numpy.abs",
"numpy.around",
"numpy.cos",
"scipy.linalg... | [((1570, 1614), 'caput.config.Property', 'config.Property', ([], {'proptype': 'float', 'default': '(0.1)'}), '(proptype=float, default=0.1)\n', (1585, 1614), False, 'from caput import mpiarray, config\n'), ((1628, 1672), 'caput.config.Property', 'config.Property', ([], {'proptype': 'float', 'default': '(1.0)'}), '(proptype=float, default=1.0)\n', (1643, 1672), False, 'from caput import mpiarray, config\n'), ((1689, 1733), 'caput.config.Property', 'config.Property', ([], {'proptype': 'float', 'default': '(0.0)'}), '(proptype=float, default=0.0)\n', (1704, 1733), False, 'from caput import mpiarray, config\n'), ((1751, 1798), 'caput.config.Property', 'config.Property', ([], {'proptype': 'float', 'default': '(0.0001)'}), '(proptype=float, default=0.0001)\n', (1766, 1798), False, 'from caput import mpiarray, config\n'), ((1825, 1872), 'caput.config.enum', 'config.enum', (["['NS', 'EW', 'none']"], {'default': '"""NS"""'}), "(['NS', 'EW', 'none'], default='NS')\n", (1836, 1872), False, 'from caput import mpiarray, config\n'), ((1886, 1931), 'caput.config.Property', 'config.Property', ([], {'proptype': 'bool', 'default': '(False)'}), '(proptype=bool, default=False)\n', (1901, 1931), False, 'from caput import mpiarray, config\n'), ((5581, 5622), 'caput.config.Property', 'config.Property', ([], {'proptype': 'int', 'default': '(20)'}), '(proptype=int, default=20)\n', (5596, 5622), False, 'from caput import mpiarray, config\n'), ((5639, 5684), 'caput.config.Property', 'config.Property', ([], {'proptype': 'float', 'default': 'None'}), '(proptype=float, default=None)\n', (5654, 5684), False, 'from caput import mpiarray, config\n'), ((5704, 5749), 'caput.config.Property', 'config.Property', ([], {'proptype': 'float', 'default': 'None'}), '(proptype=float, default=None)\n', (5719, 5749), False, 'from caput import mpiarray, config\n'), ((5762, 5805), 'caput.config.Property', 'config.Property', ([], {'proptype': 'int', 'default': 'None'}), '(proptype=int, default=None)\n', (5777, 5805), False, 'from caput import mpiarray, config\n'), ((5825, 5869), 'caput.config.Property', 'config.Property', ([], {'proptype': 'bool', 'default': '(True)'}), '(proptype=bool, default=True)\n', (5840, 5869), False, 'from caput import mpiarray, config\n'), ((10214, 10276), 'numpy.around', 'np.around', (['(tel.baselines[:, 0] + 1.0j * tel.baselines[:, 1])', '(4)'], {}), '(tel.baselines[:, 0] + 1.0j * tel.baselines[:, 1], 4)\n', (10223, 10276), True, 'import numpy as np\n'), ((10336, 10396), 'numpy.unique', 'np.unique', (['bl_round'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(bl_round, return_inverse=True, return_counts=True)\n', (10345, 10396), True, 'import numpy as np\n'), ((10567, 10611), 'numpy.zeros', 'np.zeros', (['vis_shape'], {'dtype': 'sstream.vis.dtype'}), '(vis_shape, dtype=sstream.vis.dtype)\n', (10575, 10611), True, 'import numpy as np\n'), ((10629, 10676), 'numpy.zeros', 'np.zeros', (['vis_shape'], {'dtype': 'sstream.weight.dtype'}), '(vis_shape, dtype=sstream.weight.dtype)\n', (10637, 10676), True, 'import numpy as np\n'), ((11371, 11427), 'caput.mpiarray.MPIArray.wrap', 'mpiarray.MPIArray.wrap', (['vis_I'], {'axis': '(1)', 'comm': 'sstream.comm'}), '(vis_I, axis=1, comm=sstream.comm)\n', (11393, 11427), False, 'from caput import mpiarray, config\n'), ((13209, 13257), 'numpy.zeros', 'np.zeros', (['(2 * fa.shape[0], N)'], {'dtype': 'np.float64'}), '((2 * fa.shape[0], N), dtype=np.float64)\n', (13217, 13257), True, 'import numpy as np\n'), ((13274, 13305), 'numpy.cos', 'np.cos', (['(2 * np.pi * ta * fa / N)'], {}), '(2 * np.pi * ta * fa / N)\n', (13280, 13305), True, 'import numpy as np\n'), ((14189, 14237), 'numpy.zeros', 'np.zeros', (['(N, 2 * fa.shape[1])'], {'dtype': 'np.float64'}), '((N, 2 * fa.shape[1]), dtype=np.float64)\n', (14197, 14237), True, 'import numpy as np\n'), ((16516, 16541), 'numpy.zeros', 'np.zeros', (['(2 * Ni.shape[0])'], {}), '(2 * Ni.shape[0])\n', (16524, 16541), True, 'import numpy as np\n'), ((16559, 16600), 'numpy.where', 'np.where', (['is_real_freq', 'Ni', '(Ni / 2 ** 0.5)'], {}), '(is_real_freq, Ni, Ni / 2 ** 0.5)\n', (16567, 16600), True, 'import numpy as np\n'), ((16618, 16660), 'numpy.where', 'np.where', (['is_real_freq', '(0.0)', '(Ni / 2 ** 0.5)'], {}), '(is_real_freq, 0.0, Ni / 2 ** 0.5)\n', (16626, 16660), True, 'import numpy as np\n'), ((16808, 16830), 'numpy.dot', 'np.dot', (['FTNih', 'FTNih.T'], {}), '(FTNih, FTNih.T)\n', (16814, 16830), True, 'import numpy as np\n'), ((20399, 20444), 'numpy.linspace', 'np.linspace', (['(-max_delay)', 'max_delay', 'num_delay'], {}), '(-max_delay, max_delay, num_delay)\n', (20410, 20444), True, 'import numpy as np\n'), ((20771, 20780), 'scipy.linalg.svd', 'la.svd', (['F'], {}), '(F)\n', (20777, 20780), True, 'import scipy.linalg as la\n'), ((2611, 2623), 'numpy.ptp', 'np.ptp', (['freq'], {}), '(freq)\n', (2617, 2623), True, 'import numpy as np\n'), ((2733, 2818), 'numpy.unique', 'np.unique', (['(tel.baselines[:, 0] + 1.0j * tel.baselines[:, 1])'], {'return_inverse': '(True)'}), '(tel.baselines[:, 0] + 1.0j * tel.baselines[:, 1], return_inverse=True\n )\n', (2742, 2818), True, 'import numpy as np\n'), ((12073, 12125), 'numpy.array', 'np.array', (['[0.355768, -0.487396, 0.144232, -0.012604]'], {}), '([0.355768, -0.487396, 0.144232, -0.012604])\n', (12081, 12125), True, 'import numpy as np\n'), ((12155, 12211), 'numpy.array', 'np.array', (['[0.3635819, -0.4891775, 0.1365995, -0.0106411]'], {}), '([0.3635819, -0.4891775, 0.1365995, -0.0106411])\n', (12163, 12211), True, 'import numpy as np\n'), ((12240, 12288), 'numpy.array', 'np.array', (['[0.35875, -0.48829, 0.14128, -0.01168]'], {}), '([0.35875, -0.48829, 0.14128, -0.01168])\n', (12248, 12288), True, 'import numpy as np\n'), ((13074, 13095), 'numpy.arange', 'np.arange', (['(N // 2 + 1)'], {}), '(N // 2 + 1)\n', (13083, 13095), True, 'import numpy as np\n'), ((13119, 13133), 'numpy.array', 'np.array', (['fsel'], {}), '(fsel)\n', (13127, 13133), True, 'import numpy as np\n'), ((13171, 13183), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (13180, 13183), True, 'import numpy as np\n'), ((13322, 13353), 'numpy.sin', 'np.sin', (['(2 * np.pi * ta * fa / N)'], {}), '(2 * np.pi * ta * fa / N)\n', (13328, 13353), True, 'import numpy as np\n'), ((13991, 14012), 'numpy.arange', 'np.arange', (['(N // 2 + 1)'], {}), '(N // 2 + 1)\n', (14000, 14012), True, 'import numpy as np\n'), ((14036, 14050), 'numpy.array', 'np.array', (['fsel'], {}), '(fsel)\n', (14044, 14050), True, 'import numpy as np\n'), ((14090, 14136), 'numpy.where', 'np.where', (['((fa == 0) | (fa == N // 2))', '(1.0)', '(2.0)'], {}), '((fa == 0) | (fa == N // 2), 1.0, 2.0)\n', (14098, 14136), True, 'import numpy as np\n'), ((14151, 14163), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (14160, 14163), True, 'import numpy as np\n'), ((14257, 14288), 'numpy.cos', 'np.cos', (['(2 * np.pi * ta * fa / N)'], {}), '(2 * np.pi * ta * fa / N)\n', (14263, 14288), True, 'import numpy as np\n'), ((15726, 15747), 'numpy.arange', 'np.arange', (['total_freq'], {}), '(total_freq)\n', (15735, 15747), True, 'import numpy as np\n'), ((16162, 16177), 'numpy.repeat', 'np.repeat', (['w', '(2)'], {}), '(w, 2)\n', (16171, 16177), True, 'import numpy as np\n'), ((17869, 17898), 'scipy.linalg.solve', 'la.solve', (['Ci', 'y'], {'sym_pos': '(True)'}), '(Ci, y, sym_pos=True)\n', (17877, 17898), True, 'import scipy.linalg as la\n'), ((18543, 18572), 'scipy.linalg.solve', 'la.solve', (['Ci', 'y'], {'sym_pos': '(True)'}), '(Ci, y, sym_pos=True)\n', (18551, 18572), True, 'import scipy.linalg as la\n'), ((20511, 20576), 'numpy.exp', 'np.exp', (['(2.0j * np.pi * delay[np.newaxis, :] * freq[:, np.newaxis])'], {}), '(2.0j * np.pi * delay[np.newaxis, :] * freq[:, np.newaxis])\n', (20517, 20576), True, 'import numpy as np\n'), ((3473, 3518), 'numpy.amax', 'np.amax', (['[baseline_delay_cut, self.delay_cut]'], {}), '([baseline_delay_cut, self.delay_cut])\n', (3480, 3518), True, 'import numpy as np\n'), ((3827, 3857), 'numpy.median', 'np.median', (['ssw[:, lbi]'], {'axis': '(1)'}), '(ssw[:, lbi], axis=1)\n', (3836, 3857), True, 'import numpy as np\n'), ((4151, 4174), 'numpy.dot', 'np.dot', (['NF', 'ssv[:, lbi]'], {}), '(NF, ssv[:, lbi])\n', (4157, 4174), True, 'import numpy as np\n'), ((7146, 7189), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['ndelay'], {'d': 'self.freq_spacing'}), '(ndelay, d=self.freq_spacing)\n', (7160, 7189), True, 'import numpy as np\n'), ((7427, 7447), 'numpy.ones_like', 'np.ones_like', (['delays'], {}), '(delays)\n', (7439, 7447), True, 'import numpy as np\n'), ((8218, 8241), 'numpy.mean', 'np.mean', (['weight'], {'axis': '(1)'}), '(weight, axis=1)\n', (8225, 8241), True, 'import numpy as np\n'), ((9164, 9208), 'numpy.median', 'np.median', (['spec[-(self.nsamp // 2):]'], {'axis': '(0)'}), '(spec[-(self.nsamp // 2):], axis=0)\n', (9173, 9208), True, 'import numpy as np\n'), ((9248, 9272), 'numpy.fft.fftshift', 'np.fft.fftshift', (['spec_av'], {}), '(spec_av)\n', (9263, 9272), True, 'import numpy as np\n'), ((11484, 11545), 'caput.mpiarray.MPIArray.wrap', 'mpiarray.MPIArray.wrap', (['vis_weight'], {'axis': '(1)', 'comm': 'sstream.comm'}), '(vis_weight, axis=1, comm=sstream.comm)\n', (11506, 11545), False, 'from caput import mpiarray, config\n'), ((14314, 14345), 'numpy.sin', 'np.sin', (['(2 * np.pi * ta * fa / N)'], {}), '(2 * np.pi * ta * fa / N)\n', (14320, 14345), True, 'import numpy as np\n'), ((17491, 17502), 'numpy.diag', 'np.diag', (['Si'], {}), '(Si)\n', (17498, 17502), True, 'import numpy as np\n'), ((17796, 17820), 'numpy.dot', 'np.dot', (['FTNih', '(data + w2)'], {}), '(FTNih, data + w2)\n', (17802, 17820), True, 'import numpy as np\n'), ((18465, 18478), 'numpy.dot', 'np.dot', (['R', 'w1'], {}), '(R, w1)\n', (18471, 18478), True, 'import numpy as np\n'), ((18517, 18530), 'numpy.dot', 'np.dot', (['R', 'Rt'], {}), '(R, Rt)\n', (18523, 18530), True, 'import numpy as np\n'), ((12342, 12354), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (12351, 12354), True, 'import numpy as np\n'), ((12418, 12427), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (12424, 12427), True, 'import numpy as np\n'), ((18607, 18620), 'numpy.dot', 'np.dot', (['Rt', 'x'], {}), '(Rt, x)\n', (18613, 18620), True, 'import numpy as np\n'), ((3296, 3320), 'numpy.linalg.norm', 'np.linalg.norm', (['baseline'], {}), '(baseline)\n', (3310, 3320), True, 'import numpy as np\n'), ((6761, 6796), 'numpy.abs', 'np.abs', (['(ss.freq[:] - self.freq_zero)'], {}), '(ss.freq[:] - self.freq_zero)\n', (6767, 6796), True, 'import numpy as np\n'), ((6710, 6729), 'numpy.diff', 'np.diff', (['ss.freq[:]'], {}), '(ss.freq[:])\n', (6717, 6729), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# The source code in this module was created by <NAME>
# from University Clermont Auvergne, CNRS, SIGMA Clermont, Institut Pascal.
#
# Publication:
# - <NAME>; <NAME>; <NAME>.; <NAME>; <NAME>;
# <NAME>; <NAME>; <NAME>; <NAME> (2018)
# Evolutionary algorithms converge towards evolved biological photonic structures,
# https://arxiv.org/abs/1808.04689
# - <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
# <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016.
# Moosh: A Numerical Swiss Army Knife for the Optics of Multilayers in Octave/Matlab. Journal of Open Research Software, 4(1), p.e13.
import typing as tp
import numpy as np
from scipy.linalg import toeplitz
# pylint: disable=blacklisted-name,too-many-locals,too-many-arguments
def bragg(X: np.ndarray) -> float:
lam = 600
bar = int(np.size(X) / 2)
n = np.concatenate(([1], np.sqrt(X[0:bar]), [1.7320508075688772]))
Type = np.arange(0, bar + 2)
hauteur = np.concatenate(([0], X[bar : 2 * bar], [0]))
tmp = np.tan(2 * np.pi * n[Type] * hauteur / lam)
# Specific to this substrate.
Z = n[-1]
for k in range(np.size(Type) - 1, 0, -1):
Z = (Z - 1j * n[Type[k]] * tmp[k]) / (1 - 1j * tmp[k] * Z / n[Type[k]])
# Specific to air.
r = (1 - Z) / (1 + Z)
c = np.real(1 - r * np.conj(r))
return float(c)
def chirped(X: np.ndarray) -> float:
lam = np.linspace(500, 800, 50)
n = np.array([1, 1.4142135623730951, 1.7320508075688772])
Type = np.concatenate(([0], np.tile([2, 1], int(np.size(X) / 2)), [2]))
hauteur = np.concatenate(([0], X, [0]))
r = np.zeros(np.size(lam)) + 0j
for m in range(0, np.size(lam)):
# Specific to this substrate.
tmp = np.tan(2 * np.pi * n[Type] * hauteur[Type] / lam[m])
Z = 1.7320508075688772
for k in range(np.size(Type) - 1, 0, -1):
Z = (Z - 1j * n[Type[k]] * tmp[k]) / (1 - 1j * tmp[k] * Z / n[Type[k]])
# Specific to air.
r[m] = (1 - Z) / (1 + Z)
# c=1-np.mean(abs(r)**2)
c = 1 - np.real(np.sum(r * np.conj(r)) / np.size(lam))
return float(c)
def cascade(T: np.ndarray, U: np.ndarray) -> np.ndarray:
n = int(T.shape[1] / 2)
J = np.linalg.inv(np.eye(n) - np.matmul(U[0:n, 0:n], T[n : 2 * n, n : 2 * n]))
K = np.linalg.inv(np.eye(n) - np.matmul(T[n : 2 * n, n : 2 * n], U[0:n, 0:n]))
S = np.block(
[
[
T[0:n, 0:n]
+ np.matmul(
np.matmul(np.matmul(T[0:n, n : 2 * n], J), U[0:n, 0:n]),
T[n : 2 * n, 0:n],
),
np.matmul(np.matmul(T[0:n, n : 2 * n], J), U[0:n, n : 2 * n]),
],
[
np.matmul(np.matmul(U[n : 2 * n, 0:n], K), T[n : 2 * n, 0:n]),
U[n : 2 * n, n : 2 * n]
+ np.matmul(
np.matmul(np.matmul(U[n : 2 * n, 0:n], K), T[n : 2 * n, n : 2 * n]),
U[0:n, n : 2 * n],
),
],
]
)
return S # type: ignore
def c_bas(A: np.ndarray, V: np.ndarray, h: float) -> np.ndarray:
n = int(A.shape[1] / 2)
D = np.diag(np.exp(1j * V * h))
S = np.block(
[
[A[0:n, 0:n], np.matmul(A[0:n, n : 2 * n], D)],
[
np.matmul(D, A[n : 2 * n, 0:n]),
np.matmul(np.matmul(D, A[n : 2 * n, n : 2 * n]), D),
],
]
)
return S # type: ignore
def marche(a: float, b: float, p: float, n: int, x: float) -> np.ndarray:
l = np.zeros(n, dtype=np.complex) # noqa
m = np.zeros(n, dtype=np.complex)
tmp = (
1
/ (2 * np.pi * np.arange(1, n))
* (np.exp(-2 * 1j * np.pi * p * np.arange(1, n)) - 1)
* np.exp(-2 * 1j * np.pi * np.arange(1, n) * x)
)
l[1:n] = 1j * (a - b) * tmp
l[0] = p * a + (1 - p) * b
m[0] = l[0]
m[1:n] = 1j * (b - a) * np.conj(tmp)
T = toeplitz(l, m)
return T # type: ignore
def creneau(k0: float, a0: float, pol: float, e1: float, e2: float, a: float, n: int, x0: float) -> tp.Tuple[np.ndarray, np.ndarray]:
nmod = int(n / 2)
alpha = np.diag(a0 + 2 * np.pi * np.arange(-nmod, nmod + 1))
if pol == 0:
M = alpha * alpha - k0 * k0 * marche(e1, e2, a, n, x0)
L, E = np.linalg.eig(M)
L = np.sqrt(-L + 0j)
L = (1 - 2 * (np.imag(L) < -1e-15)) * L
P = np.block([[E], [np.matmul(E, np.diag(L))]])
else:
U = marche(1 / e1, 1 / e2, a, n, x0)
T = np.linalg.inv(U)
M = (
np.matmul(
np.matmul(np.matmul(T, alpha), np.linalg.inv(marche(e1, e2, a, n, x0))),
alpha,
)
- k0 * k0 * T
)
L, E = np.linalg.eig(M)
L = np.sqrt(-L + 0j)
L = (1 - 2 * (np.imag(L) < -1e-15)) * L
P = np.block([[E], [np.matmul(np.matmul(U, E), np.diag(L))]])
return P, L
def homogene(k0: float, a0: float, pol: float, epsilon: float, n: int) -> tp.Tuple[np.ndarray, np.ndarray]:
nmod = int(n / 2)
valp = np.sqrt(
epsilon * k0 * k0 - (a0 + 2 * np.pi * np.arange(-nmod, nmod + 1)) ** 2 + 0j
)
valp = valp * (1 - 2 * (valp < 0)) * (pol / epsilon + (1 - pol))
P = np.block([[np.eye(n)], [np.diag(valp)]])
return P, valp
def interface(P: np.ndarray, Q: np.ndarray) -> np.ndarray:
n = int(P.shape[1])
S = np.matmul(
np.linalg.inv(
np.block(
[[P[0:n, 0:n], -Q[0:n, 0:n]], [P[n : 2 * n, 0:n], Q[n : 2 * n, 0:n]]]
)
),
np.block([[-P[0:n, 0:n], Q[0:n, 0:n]], [P[n : 2 * n, 0:n], Q[n : 2 * n, 0:n]]]),
)
return S # type: ignore
def morpho(X: np.ndarray) -> float:
lam = 449.5897
pol = 1.0
d = 600.521475
nmod = 25
# nmod=1
e2 = 2.4336
n = 2 * nmod + 1
n_motifs = int(X.size / 4)
X = X / d
h = X[0:n_motifs]
x0 = X[n_motifs : 2 * n_motifs]
a = X[2 * n_motifs : 3 * n_motifs]
spacers = X[3 * n_motifs : 4 * n_motifs]
l = lam / d # noqa
k0 = 2 * np.pi / l
P, V = homogene(k0, 0, pol, 1, n)
S = np.block(
[[np.zeros([n, n]), np.eye(n, dtype=np.complex)], [np.eye(n), np.zeros([n, n])]]
)
for j in range(0, n_motifs):
Pc, Vc = creneau(k0, 0, pol, e2, 1, a[j], n, x0[j])
S = cascade(S, interface(P, Pc))
S = c_bas(S, Vc, h[j])
S = cascade(S, interface(Pc, P))
S = c_bas(S, V, spacers[j])
Pc, Vc = homogene(k0, 0, pol, e2, n)
S = cascade(S, interface(P, Pc))
R = np.zeros(3, dtype=np.float)
for j in range(-1, 2):
R[j] = abs(S[j + nmod, nmod]) ** 2 * np.real(V[j + nmod]) / k0
cost: float = 1 - (R[-1] + R[1]) / 2 + R[0] / 2
lams = (np.array([400, 500, 600, 700, 800]) + 0.24587) / d
bar = 0
for lo in lams:
k0 = 2 * np.pi / lo
P, V = homogene(k0, 0, pol, 1, n)
S = np.block(
[
[np.zeros([n, n], dtype=np.complex), np.eye(n)],
[np.eye(n), np.zeros([n, n])],
]
)
for j in range(0, n_motifs):
Pc, Vc = creneau(k0, 0, pol, e2, 1, a[j], n, x0[j])
S = cascade(S, interface(P, Pc))
S = c_bas(S, Vc, h[j])
S = cascade(S, interface(Pc, P))
S = c_bas(S, V, spacers[j])
Pc, Vc = homogene(k0, 0, pol, e2, n)
S = cascade(S, interface(P, Pc))
bar += abs(S[nmod, nmod]) ** 2 * np.real(V[nmod]) / k0
cost += bar / lams.size
return cost
| [
"numpy.block",
"numpy.sqrt",
"numpy.array",
"numpy.imag",
"numpy.arange",
"numpy.exp",
"numpy.real",
"numpy.linspace",
"numpy.matmul",
"numpy.concatenate",
"numpy.eye",
"numpy.linalg.eig",
"numpy.conj",
"numpy.size",
"numpy.tan",
"numpy.diag",
"numpy.zeros",
"scipy.linalg.toeplitz"... | [((1124, 1145), 'numpy.arange', 'np.arange', (['(0)', '(bar + 2)'], {}), '(0, bar + 2)\n', (1133, 1145), True, 'import numpy as np\n'), ((1160, 1202), 'numpy.concatenate', 'np.concatenate', (['([0], X[bar:2 * bar], [0])'], {}), '(([0], X[bar:2 * bar], [0]))\n', (1174, 1202), True, 'import numpy as np\n'), ((1215, 1258), 'numpy.tan', 'np.tan', (['(2 * np.pi * n[Type] * hauteur / lam)'], {}), '(2 * np.pi * n[Type] * hauteur / lam)\n', (1221, 1258), True, 'import numpy as np\n'), ((1587, 1612), 'numpy.linspace', 'np.linspace', (['(500)', '(800)', '(50)'], {}), '(500, 800, 50)\n', (1598, 1612), True, 'import numpy as np\n'), ((1621, 1674), 'numpy.array', 'np.array', (['[1, 1.4142135623730951, 1.7320508075688772]'], {}), '([1, 1.4142135623730951, 1.7320508075688772])\n', (1629, 1674), True, 'import numpy as np\n'), ((1765, 1794), 'numpy.concatenate', 'np.concatenate', (['([0], X, [0])'], {}), '(([0], X, [0]))\n', (1779, 1794), True, 'import numpy as np\n'), ((3751, 3780), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.complex'}), '(n, dtype=np.complex)\n', (3759, 3780), True, 'import numpy as np\n'), ((3797, 3826), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.complex'}), '(n, dtype=np.complex)\n', (3805, 3826), True, 'import numpy as np\n'), ((4141, 4155), 'scipy.linalg.toeplitz', 'toeplitz', (['l', 'm'], {}), '(l, m)\n', (4149, 4155), False, 'from scipy.linalg import toeplitz\n'), ((6761, 6788), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (6769, 6788), True, 'import numpy as np\n'), ((1853, 1865), 'numpy.size', 'np.size', (['lam'], {}), '(lam)\n', (1860, 1865), True, 'import numpy as np\n'), ((1920, 1972), 'numpy.tan', 'np.tan', (['(2 * np.pi * n[Type] * hauteur[Type] / lam[m])'], {}), '(2 * np.pi * n[Type] * hauteur[Type] / lam[m])\n', (1926, 1972), True, 'import numpy as np\n'), ((3367, 3387), 'numpy.exp', 'np.exp', (['(1.0j * V * h)'], {}), '(1.0j * V * h)\n', (3373, 3387), True, 'import numpy as np\n'), ((4120, 4132), 'numpy.conj', 'np.conj', (['tmp'], {}), '(tmp)\n', (4127, 4132), True, 'import numpy as np\n'), ((4503, 4519), 'numpy.linalg.eig', 'np.linalg.eig', (['M'], {}), '(M)\n', (4516, 4519), True, 'import numpy as np\n'), ((4532, 4550), 'numpy.sqrt', 'np.sqrt', (['(-L + 0.0j)'], {}), '(-L + 0.0j)\n', (4539, 4550), True, 'import numpy as np\n'), ((4720, 4736), 'numpy.linalg.inv', 'np.linalg.inv', (['U'], {}), '(U)\n', (4733, 4736), True, 'import numpy as np\n'), ((4951, 4967), 'numpy.linalg.eig', 'np.linalg.eig', (['M'], {}), '(M)\n', (4964, 4967), True, 'import numpy as np\n'), ((4980, 4998), 'numpy.sqrt', 'np.sqrt', (['(-L + 0.0j)'], {}), '(-L + 0.0j)\n', (4987, 4998), True, 'import numpy as np\n'), ((5778, 5853), 'numpy.block', 'np.block', (['[[-P[0:n, 0:n], Q[0:n, 0:n]], [P[n:2 * n, 0:n], Q[n:2 * n, 0:n]]]'], {}), '([[-P[0:n, 0:n], Q[0:n, 0:n]], [P[n:2 * n, 0:n], Q[n:2 * n, 0:n]]])\n', (5786, 5853), True, 'import numpy as np\n'), ((1026, 1036), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (1033, 1036), True, 'import numpy as np\n'), ((1071, 1088), 'numpy.sqrt', 'np.sqrt', (['X[0:bar]'], {}), '(X[0:bar])\n', (1078, 1088), True, 'import numpy as np\n'), ((1326, 1339), 'numpy.size', 'np.size', (['Type'], {}), '(Type)\n', (1333, 1339), True, 'import numpy as np\n'), ((1812, 1824), 'numpy.size', 'np.size', (['lam'], {}), '(lam)\n', (1819, 1824), True, 'import numpy as np\n'), ((2415, 2424), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2421, 2424), True, 'import numpy as np\n'), ((2427, 2470), 'numpy.matmul', 'np.matmul', (['U[0:n, 0:n]', 'T[n:2 * n, n:2 * n]'], {}), '(U[0:n, 0:n], T[n:2 * n, n:2 * n])\n', (2436, 2470), True, 'import numpy as np\n'), ((2498, 2507), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2504, 2507), True, 'import numpy as np\n'), ((2510, 2553), 'numpy.matmul', 'np.matmul', (['T[n:2 * n, n:2 * n]', 'U[0:n, 0:n]'], {}), '(T[n:2 * n, n:2 * n], U[0:n, 0:n])\n', (2519, 2553), True, 'import numpy as np\n'), ((5649, 5724), 'numpy.block', 'np.block', (['[[P[0:n, 0:n], -Q[0:n, 0:n]], [P[n:2 * n, 0:n], Q[n:2 * n, 0:n]]]'], {}), '([[P[0:n, 0:n], -Q[0:n, 0:n]], [P[n:2 * n, 0:n], Q[n:2 * n, 0:n]]])\n', (5657, 5724), True, 'import numpy as np\n'), ((6952, 6987), 'numpy.array', 'np.array', (['[400, 500, 600, 700, 800]'], {}), '([400, 500, 600, 700, 800])\n', (6960, 6987), True, 'import numpy as np\n'), ((1506, 1516), 'numpy.conj', 'np.conj', (['r'], {}), '(r)\n', (1513, 1516), True, 'import numpy as np\n'), ((2027, 2040), 'numpy.size', 'np.size', (['Type'], {}), '(Type)\n', (2034, 2040), True, 'import numpy as np\n'), ((2272, 2284), 'numpy.size', 'np.size', (['lam'], {}), '(lam)\n', (2279, 2284), True, 'import numpy as np\n'), ((3441, 3470), 'numpy.matmul', 'np.matmul', (['A[0:n, n:2 * n]', 'D'], {}), '(A[0:n, n:2 * n], D)\n', (3450, 3470), True, 'import numpy as np\n'), ((3505, 3534), 'numpy.matmul', 'np.matmul', (['D', 'A[n:2 * n, 0:n]'], {}), '(D, A[n:2 * n, 0:n])\n', (3514, 3534), True, 'import numpy as np\n'), ((4380, 4406), 'numpy.arange', 'np.arange', (['(-nmod)', '(nmod + 1)'], {}), '(-nmod, nmod + 1)\n', (4389, 4406), True, 'import numpy as np\n'), ((5461, 5470), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (5467, 5470), True, 'import numpy as np\n'), ((5474, 5487), 'numpy.diag', 'np.diag', (['valp'], {}), '(valp)\n', (5481, 5487), True, 'import numpy as np\n'), ((6348, 6364), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (6356, 6364), True, 'import numpy as np\n'), ((6366, 6393), 'numpy.eye', 'np.eye', (['n'], {'dtype': 'np.complex'}), '(n, dtype=np.complex)\n', (6372, 6393), True, 'import numpy as np\n'), ((6397, 6406), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (6403, 6406), True, 'import numpy as np\n'), ((6408, 6424), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (6416, 6424), True, 'import numpy as np\n'), ((6861, 6881), 'numpy.real', 'np.real', (['V[j + nmod]'], {}), '(V[j + nmod])\n', (6868, 6881), True, 'import numpy as np\n'), ((7670, 7686), 'numpy.real', 'np.real', (['V[nmod]'], {}), '(V[nmod])\n', (7677, 7686), True, 'import numpy as np\n'), ((2819, 2848), 'numpy.matmul', 'np.matmul', (['T[0:n, n:2 * n]', 'J'], {}), '(T[0:n, n:2 * n], J)\n', (2828, 2848), True, 'import numpy as np\n'), ((2927, 2956), 'numpy.matmul', 'np.matmul', (['U[n:2 * n, 0:n]', 'K'], {}), '(U[n:2 * n, 0:n], K)\n', (2936, 2956), True, 'import numpy as np\n'), ((3564, 3597), 'numpy.matmul', 'np.matmul', (['D', 'A[n:2 * n, n:2 * n]'], {}), '(D, A[n:2 * n, n:2 * n])\n', (3573, 3597), True, 'import numpy as np\n'), ((3872, 3887), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (3881, 3887), True, 'import numpy as np\n'), ((3986, 4001), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (3995, 4001), True, 'import numpy as np\n'), ((4800, 4819), 'numpy.matmul', 'np.matmul', (['T', 'alpha'], {}), '(T, alpha)\n', (4809, 4819), True, 'import numpy as np\n'), ((7158, 7192), 'numpy.zeros', 'np.zeros', (['[n, n]'], {'dtype': 'np.complex'}), '([n, n], dtype=np.complex)\n', (7166, 7192), True, 'import numpy as np\n'), ((7194, 7203), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (7200, 7203), True, 'import numpy as np\n'), ((7223, 7232), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (7229, 7232), True, 'import numpy as np\n'), ((7234, 7250), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (7242, 7250), True, 'import numpy as np\n'), ((1727, 1737), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (1734, 1737), True, 'import numpy as np\n'), ((2258, 2268), 'numpy.conj', 'np.conj', (['r'], {}), '(r)\n', (2265, 2268), True, 'import numpy as np\n'), ((3929, 3944), 'numpy.arange', 'np.arange', (['(1)', 'n'], {}), '(1, n)\n', (3938, 3944), True, 'import numpy as np\n'), ((4571, 4581), 'numpy.imag', 'np.imag', (['L'], {}), '(L)\n', (4578, 4581), True, 'import numpy as np\n'), ((4638, 4648), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (4645, 4648), True, 'import numpy as np\n'), ((5019, 5029), 'numpy.imag', 'np.imag', (['L'], {}), '(L)\n', (5026, 5029), True, 'import numpy as np\n'), ((5083, 5098), 'numpy.matmul', 'np.matmul', (['U', 'E'], {}), '(U, E)\n', (5092, 5098), True, 'import numpy as np\n'), ((5100, 5110), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (5107, 5110), True, 'import numpy as np\n'), ((2688, 2717), 'numpy.matmul', 'np.matmul', (['T[0:n, n:2 * n]', 'J'], {}), '(T[0:n, n:2 * n], J)\n', (2697, 2717), True, 'import numpy as np\n'), ((3079, 3108), 'numpy.matmul', 'np.matmul', (['U[n:2 * n, 0:n]', 'K'], {}), '(U[n:2 * n, 0:n], K)\n', (3088, 3108), True, 'import numpy as np\n'), ((5329, 5355), 'numpy.arange', 'np.arange', (['(-nmod)', '(nmod + 1)'], {}), '(-nmod, nmod + 1)\n', (5338, 5355), True, 'import numpy as np\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset.
This example uses APIs in Tensorflow 1.4 or above.
"""
# Converted by <NAME> to the CK format (http://cKnowledge.org)
# from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/learn/iris.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import json
import numpy as np
import tensorflow as tf
ck_params='ck-params.json'
def main(i):
mode=i['mode']
fi=i['input_file']
# Load input file
with open(fi) as f:
s=f.read()
d = json.loads(s)
f.close()
ftable=d['ftable']
ctable=d.get('ctable',[])
model_params=d.get('model_params',{})
fo=d['output_file']
fod=d['model_dir']
# Prepare model parametrs
if mode=='train':
# Check distinct labels
labels=[]
max_label=0
for q1 in ctable:
q=q1[0]
if q not in labels:
labels.append(q)
if q>max_label: max_label=q
# xn_classes=len(labels)
xn_classes=max_label+1
xhidden_units=model_params.get('hidden_units',[])
if len(xhidden_units)==0: xhidden_units=[10, 20, 10]
feature_length=len(ftable[0])
else:
# Read ck-params.json
x=os.path.join(fod, ck_params)
with open(x) as f:
s=f.read()
dx = json.loads(s)
f.close()
xn_classes=dx['n_classes']
feature_length=dx['feature_length']
xhidden_units=dx['hidden_units']
# Prepare model
# Specify that all features have real-value data
feature_columns = [tf.feature_column.numeric_column("x", shape=[feature_length])]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
n_classes=xn_classes,
model_dir=fod)
# Use model
if mode=='train':
# Train model.
print ('')
print ('Training ...')
print ('')
xsteps=model_params.get('training_steps','')
if xsteps=='' or xsteps==None: xsteps="2000"
xsteps=int(xsteps)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(ftable)},
y=np.array(ctable),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=xsteps)
ftable_test=d.get('ftable_test',[])
if len(ftable_test)==0: ftable_test=ftable
ctable_test=d.get('ctable_test',[])
if len(ctable_test)==0: ctable_test=ctable
# Define the test inputs
print ('')
print ('Testing ...')
print ('')
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(ftable_test)},
y=np.array(ctable_test),
num_epochs=1,
shuffle=False)
# Evaluate accuracy.
accuracy_score = classifier.evaluate(input_fn=test_input_fn)["accuracy"]
print ('')
print ('Test Accuracy: {0:f}'.format(accuracy_score))
print ('')
dd={'output_dir':fod,
'accuracy':float(accuracy_score),
'hidden_units':xhidden_units,
'n_classes':xn_classes,
'feature_length':feature_length}
# Record model info
s=json.dumps(dd,indent=2,sort_keys=True)
with open(fo,'w') as f:
f.write(s)
f.close()
# Record model info to TF model dir
x=os.path.join(fod, ck_params)
with open(x,'w') as f:
f.write(s)
f.close()
##############################################################################
elif mode=='prediction':
# Classify samples
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": np.array(ftable, dtype=np.float32)},
num_epochs=1,
shuffle=False)
ctable=[]
print ('')
print ('Predictions:')
predictions = list(classifier.predict(input_fn=predict_input_fn))
# predictions1 = np.squeeze(predictions) # FGG: don't need it - wrong when only one entry
for q in range(0, len(predictions)):
print (str(q)+') '+str(np.asscalar(predictions[q]['class_ids'][0])))
ctable.append(int(np.asscalar(predictions[q]['class_ids'][0])))
# Record prediction
dd={'ftable':ftable,
'ctable':ctable}
print ('')
print ('Recording results to '+fo+' ...')
print ('')
s=json.dumps(dd,indent=2,sort_keys=True)
with open(fo,'w') as f:
f.write(s)
f.close()
else:
print ('Error in CK-TF wrapper: mode "'+mode+'" is not supported ...')
exit(1)
return
if __name__ == "__main__":
argv=sys.argv[1:]
if len(argv)<2:
print ('Not enough command line arguments ...')
exit(1)
mode=argv[0]
input_file=argv[1]
main({'mode':mode, 'input_file':input_file})
| [
"json.loads",
"tensorflow.estimator.LinearClassifier",
"json.dumps",
"os.path.join",
"numpy.asscalar",
"tensorflow.feature_column.numeric_column",
"numpy.array"
] | [((2274, 2378), 'tensorflow.estimator.LinearClassifier', 'tf.estimator.LinearClassifier', ([], {'feature_columns': 'feature_columns', 'n_classes': 'xn_classes', 'model_dir': 'fod'}), '(feature_columns=feature_columns, n_classes=\n xn_classes, model_dir=fod)\n', (2303, 2378), True, 'import tensorflow as tf\n'), ((1217, 1230), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (1227, 1230), False, 'import json\n'), ((1877, 1905), 'os.path.join', 'os.path.join', (['fod', 'ck_params'], {}), '(fod, ck_params)\n', (1889, 1905), False, 'import os\n'), ((2195, 2256), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""x"""'], {'shape': '[feature_length]'}), "('x', shape=[feature_length])\n", (2227, 2256), True, 'import tensorflow as tf\n'), ((3841, 3881), 'json.dumps', 'json.dumps', (['dd'], {'indent': '(2)', 'sort_keys': '(True)'}), '(dd, indent=2, sort_keys=True)\n', (3851, 3881), False, 'import json\n'), ((3995, 4023), 'os.path.join', 'os.path.join', (['fod', 'ck_params'], {}), '(fod, ck_params)\n', (4007, 4023), False, 'import os\n'), ((1961, 1974), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (1971, 1974), False, 'import json\n'), ((4971, 5011), 'json.dumps', 'json.dumps', (['dd'], {'indent': '(2)', 'sort_keys': '(True)'}), '(dd, indent=2, sort_keys=True)\n', (4981, 5011), False, 'import json\n'), ((2847, 2863), 'numpy.array', 'np.array', (['ctable'], {}), '(ctable)\n', (2855, 2863), True, 'import numpy as np\n'), ((3356, 3377), 'numpy.array', 'np.array', (['ctable_test'], {}), '(ctable_test)\n', (3364, 3377), True, 'import numpy as np\n'), ((2817, 2833), 'numpy.array', 'np.array', (['ftable'], {}), '(ftable)\n', (2825, 2833), True, 'import numpy as np\n'), ((3321, 3342), 'numpy.array', 'np.array', (['ftable_test'], {}), '(ftable_test)\n', (3329, 3342), True, 'import numpy as np\n'), ((4299, 4333), 'numpy.array', 'np.array', (['ftable'], {'dtype': 'np.float32'}), '(ftable, dtype=np.float32)\n', (4307, 4333), True, 'import numpy as np\n'), ((4759, 4802), 'numpy.asscalar', 'np.asscalar', (["predictions[q]['class_ids'][0]"], {}), "(predictions[q]['class_ids'][0])\n", (4770, 4802), True, 'import numpy as np\n'), ((4686, 4729), 'numpy.asscalar', 'np.asscalar', (["predictions[q]['class_ids'][0]"], {}), "(predictions[q]['class_ids'][0])\n", (4697, 4729), True, 'import numpy as np\n')] |
"""
From <NAME>'s odysseus project.
"""
"""Functions that approach several polylogarithms by polynomials.
Precision is on the order of 1e-7 or better. For working with fermions, the
polylog functions Li(x) are usually used in the form -Li(-exp(x)). We therefore
define functions fermi_poly as:
fermi_poly_s(x) :math:`=-Li_s(-e^x)`,
with :math:`Li_s(z)=\sum_{k=1}^{\infty}\frac{z^k}{k^s}`.
This is useful if you are only dealing with Fermi statistics. For working with
bose statistics we define g-functions in a similar way.
There is a more accurate and general algorithm in lerch.py for Li_s(x),
that works for all s>0, the polynomial approximations in this file are much
faster however.
"""
import numpy as np
def fermi_poly3(x):
"""fermi_poly3(x), equal to -Li_3(-e^x)"""
def f0(x):
return np.exp(x)
def f1(x):
ex = np.exp(x)
return (1 + (-0.125 + (0.037037037037037035 + (-0.015625 + (0.008 - 0.004629629629629629*ex)*ex)*ex)*ex)*ex)*ex
def f2(x):
x2 = x**2
return 0.9015426773696955 + (0.8224670334241131 + (0.34657359027997264 + (0.08333333333333333 + (0.010416666666666666 +(-0.00017361111111111112 + (6.200396825396825e-6 +(-2.927965167548501e-7 + (1.6179486665597777e-8 + (-9.90785651003905e-10 + (6.525181428041877e-11 +(-4.5372283133067906e-12 + 3.290608283068484e-13*x2)*x2)*x2)*x2)*x2)*x2)*x2)*x2)*x)*x)*x)*x
def f3(x):
invex = np.exp(-x)
return (((((0.008*invex - 0.015625)*invex + 0.037037037037037035)*invex) - 0.125)*invex + 1)*invex + 1.6449340668482262*x + 0.16666666666666666*x**3
def f4(x):
return 1.6449340668482262*x + 0.16666666666666666*x**3
# fix for bug in piecewise, fixed in more recent numpy
if np.isscalar(x):
x = np.array([x], dtype=float)
# define piecewise function and evaluate
ans = np.piecewise(x, [x<=-20, np.logical_and(x>-20, x<=-2), \
np.logical_and(x>-2, x<=2), np.logical_and(x>2, x<=20)],\
[f0, f1, f2, f3, f4])
return ans
def fermi_poly5half(x):
"""fermi_poly5half(x), equal to -Li_{5/2}(-e^x)
FAILS TESTS (COMPARING TO LERCH), DO NOT USE WITHOUT INVESTIGATING MORE
"""
def f0(x):
return np.exp(x)
def f1(x):
ex = np.exp(x)
return (1 + (-0.17677669529663687 + (0.06415002990995843 - (0.03125 + (0.01788854381999832 - (0.011340230290662863 + (0.007713560673657698 - (0.005524271728019902 + (0.00411522633744856 - 0.0031622776601683794*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex
def f2(x):
res = (7.999472242952045e-8 + (2.015789875039643e-8 + (-5.182488893752819e-9 + (-1.3550552937770878e-9 + (3.5944104666022113e-10 + (9.653703483078106e-11 + (-2.6209625544677692e-11 + (-7.185930974961928e-12 + (1.9812061650792594e-12 + 5.447084984800099e-13*x)*x)*x)*x)*x)*x)*x)*x)*x)*x
return 0.8671998890121841+(0.7651470246254081+(0.30244932171081546+(0.06335080210161399+(0.0049450362799933825+(-0.0007320093393446121+(-0.00013339945006254949 + (0.000027147085179903566+(5.930588304137955e-6+(-1.3626304577484817e-6 + (-3.252451788607287e-7 + res*x)*x)*x)*x)*x)*x)*x)*x)*x)*x)*x
def f3(x):
res = 5.992860912139351e-7 + (-6.083668666935579e-8 + (5.041252634789406e-9 + (-3.386896134140133e-10 + (1.8196669171414837e-11 + (-7.642990316874879e-13 + (2.4202106712129105e-14 + (-5.437364923509245e-16 + (7.72925401611516e-18 -5.228771407811986e-20*x)*x)*x)*x)*x)*x)*x)*x)*x
return 0.869416215427492 + (0.7603408345815055 + (0.30606614629176887 + (0.06361411550944529 + (0.002145410757189772 + (0.002020072416997651 + (-0.0017045762862650151 + (0.0006382881546811445 + (- 0.00016246851298525836 + (0.00003140383144730955 + (-4.819813947314412e-6+res*x)*x)*x)*x)*x)*x)*x)*x)*x)*x)*x
def f4(x):
x2 = x**2
invex = np.sqrt(x)
return (-2.0851412241155116/x/x - 0.5343060576801043)/x/invex + 1.8561093322772355*invex + 0.30090111122547003*x2*invex
def f5(x):
x2 = x**2
invex = np.sqrt(x)
return 1.8561093322772355*invex + 0.30090111122547003*x2*invex
# fix for bug in piecewise, fixed in more recent numpy
if np.isscalar(x):
x = np.array([x], dtype=float)
# define piecewise function and evaluate
ans = np.piecewise(x, [x<=-20, np.logical_and(x>-20, x<=-2), \
np.logical_and(x>-2, x<=2), np.logical_and(x>2, x<=12), \
np.logical_and(x>12, x<=20)], [f0, f1, f2, f3, f4, f5])
return ans
def fermi_poly2(x):
"""fermi_poly2(x), equal to -Li_2(-e^x)"""
def f0(x):
return np.exp(x)
def f1(x):
ex = np.exp(x)
return (1.+( -0.25+( 0.111111+( -0.0625+( 0.04+( -0.0277778+( 0.0204082+( -0.015625+( 0.0123457+( -0.01+( 0.00826446+( -0.00694444+( 0.00591716+( -0.00510204+( 0.00444444+( -0.00390625+( 0.00346021+( -0.00308642+( 0.00277008+ -0.0025*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex
def f2(x):
ex = x**2
return 0.822467+(0.6931471805599453+( 0.25+( 0.04166666666666666+( -0.0010416666666666534+( 0.00004960317460316857+( -2.927965167558005e-6+(1.9415383998507108e-7+( -1.3870999148454729e-8+(1.0440288911003276e-9+(-8.167040926799743e-11+6.5806618711692295e-12*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*x)*x)*x
def f3(x):
ex = np.exp(-x)
return 1.6449340668482262 + 0.5*x**2 - (1.+( -0.25+( 0.111111+( -0.0625+( 0.04+( -0.0277778+( 0.0204082+( -0.015625+( 0.0123457+( -0.01+( 0.00826446+( -0.00694444+( 0.00591716+( -0.00510204+( 0.00444444+( -0.00390625+( 0.00346021+( -0.00308642+( 0.00277008 -0.0025*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex
def f4(x):
return 1.6449340668482262 + 0.5*x**2
# fix for bug in piecewise, fixed in more recent numpy
if np.isscalar(x):
x = np.array([x], dtype=float)
# define piecewise function and evaluate
ans = np.piecewise(x, [x<=-20, np.logical_and(x>-20, x<=-1), \
np.logical_and(x>-1, x<=1), np.logical_and(x>1, x<=20)],\
[f0, f1, f2, f3, f4])
return ans
def dilog(z):
"""Dilog(x), equal to Li_2(x)
d = dilog(z) = Li_2(z)
= -Int From t=0 To t=z log(1-t) dt/t for all z.
= Sum From n=1 To n=Inf z**n/n**2 for |z|<=1.
INPUT z: real or complex, scalar, vector or matrix.
OUTPUT d: component-wise dilogarithm of z.
References:
[1] <NAME>. 1958. Dilogarithms and associated functions. Macdonald.
[2] <NAME>. 1992. Technical Report 15-92. University of Kent computing laboratory.
[3] http://en.wikipedia.org/wiki/Polylog
<NAME>, February 28th, 2006.
"""
if isinstance(z, float) or isinstance(z, int):
z = np.array([z])
# Initialization.
d = np.zeros_like(z)
s = np.ones_like(z)
# For large moduli: Mapping onto the unit circle |z|<=1.
j = np.where(np.abs(z)>1)
d[j] = -1.64493406684822643 - 0.5*np.log(-z[j])**2
s[j] = -s[j]
z[j] = 1./z[j]
# For large positive real parts: Mapping onto the unit circle with Re(z)<=1/2.
j = np.where(np.real(z)>0.5)
d[j] = d[j] + s[j]*( 1.64493406684822643 - np.log((1-z[j])**(np.log(z[j]))))
s[j] = -s[j]
z[j] = 1 - z[j]
# Transformation to Debye function and rational approximation.
z = -np.log(1-z)
s = s*z
d = d - 0.25*s*z
zsquared = z**2
s = s*(1+zsquared*(6.3710458848408100e-2+zsquared*(1.04089578261587314e-3+zsquared*4.0481119635180974e-6)))
s = s/(1+zsquared*(3.5932681070630322e-2+zsquared*(3.20543530653919745e-4+zsquared*4.0131343133751755e-7)))
d = d + s
return d
# g_5/2 function approximated by an 18th degree polynomial. note, it is not the first 18 terms in the series expansion for g_5/2(x).
# using the first 18 terms would systematically put all the error near x=1 and none of the error near x=0.
def g5halves(x):
"""g5halves(x), equal to -g_{5/2}(-e^x)"""
if (x<1e-4):
return x
else:
return 0.999856*x + 0.179586*x**2 + 0.0296957*x**3 + 0.328735*x**4 -1.90262*x**5 + 9.61982*x**6 - 38.0899*x**7 + 121.384*x**8 - 313.11*x**9 +655.703*x**10 - 1112.32*x**11 + 1517.67*x**12 - 1643.55*x**13 + 1382.29*x**14 -871.741 *x**15 + 388.521 *x**16 - 109.329 *x**17+ 14.6478*x**18
# This is the g2-function, approximated by a piecewise defined function, each piece being a series expansion of 20th degrees around x = 0.35 and x = 0.9.
# Largest error is at "1": -0.005. At "0.98" it is already down to -8.2E-6.
# Standard Deviation from the "real" g2 is 2.5E-8. This should be good enough ;-)
def g_two(x):
"""g_two(x), equal to -g_2(-e^x)"""
if (x<1e-4):
return x
elif (x<=0.82):
xp = -0.35+x
res = (0.49915291366412107+ (0.6328253123804949 + (0.81609574001162 + (1.0675943847818847 + (1.413674598740013 + (1.89162489083520825 + (2.554295058361196 + (3.4767564222154945 + (4.765898838137037 + (6.574283048568486 + 9.120156359601541*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp
return (0.38660594116058644+ (1.2308083316927263 + (0.43950458109830315 + (0.2899264671105867+ (0.24571211455256878 + (0.23866441621304754 + (0.2525638070139697 + (0.28346805806862585 + (0.33208947069236144 + (0.4019515561735721 + res)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)
else:
xp = -0.9+x
res = (1.0976006258704519e7 + (8.992322600130886e7 + (7.501623708235847e8 + (6.35310981909834e9 + (5.449526520495981e10 + (4.725869207398318e11 + (4.137351587192319e12 + (3.6523031019929805e13 + (3.247815687554126e14 + (2.9069875624537055e15 + 2.6171279210392548e16*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp
return 1.2997147230049588 + (2.5584278811044956 + (4.134206732719726 + (15.456143160948358 + (79.71247329180234 + (484.7000237406208 + (3254.9073854253556 + (23355.114659383304 + (175706.33693829825 + (1.36967275364119e6 + res)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp
# This is the g3-function, approximated by a piecewise defined function, each piece being a series expansion of 20th degrees around x = 0.35 and x = 0.9.
# Largest error is at "1": -0.000013. At "0.98" it is already down to -3.8E-8.
def g_three(x):
"""g_three(x), equal to -g_3(-e^x)"""
if (x<1e-4):
return x
elif (x<=0.82):
xp = -0.35+x
res = (0.0396957603891595 + (0.04670760669864216 + (0.05617680434859797 + (0.0688359037828904 + (0.08570894194968269 + (0.10821263549305878 + (0.13830133228577052 + (0.17867204142073595 + (0.2330529526732962 + 0.30661105212057754*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp
return 0.367187924868168 + (1.1045884033159612 + (0.18031418339537875 + (0.07512020410242393 + (0.04611846771665166 + (0.03499328210626231 + (0.030332383657974384 + (0.02880387961885722 + (0.02922889312039987 + (0.031193119279524506 + (0.03463242361601278 + res)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp
else:
xp = -0.9+x
res = (974070.3442266576 + (7.3341159458162645e6 + (5.6594271749896556e7 + (4.458241497102925e8 + (3.574350156223386e9 + (2.909525474975562e10 + (2.399887262219758e11 + (2.002668066305938e12 + (1.6885002547479885e13 + 1.4367625078064384e14*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp
return 1.04965895018644 + (1.4441274700055098 + (0.6190557839438808 + ( 1.0726278388266532 + (3.399516567907888 + (14.692090448926841 + (76.15547620296046 + (444.122940985332 + (2811.9797322897193 + (18914.876429627224 + ( 133270.98508606057 + res)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp)*xp
| [
"numpy.ones_like",
"numpy.abs",
"numpy.sqrt",
"numpy.isscalar",
"numpy.logical_and",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.real",
"numpy.zeros_like"
] | [((1728, 1742), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (1739, 1742), True, 'import numpy as np\n'), ((4156, 4170), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (4167, 4170), True, 'import numpy as np\n'), ((5823, 5837), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (5834, 5837), True, 'import numpy as np\n'), ((6818, 6834), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (6831, 6834), True, 'import numpy as np\n'), ((6844, 6859), 'numpy.ones_like', 'np.ones_like', (['z'], {}), '(z)\n', (6856, 6859), True, 'import numpy as np\n'), ((815, 824), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (821, 824), True, 'import numpy as np\n'), ((853, 862), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (859, 862), True, 'import numpy as np\n'), ((1415, 1425), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1421, 1425), True, 'import numpy as np\n'), ((1756, 1782), 'numpy.array', 'np.array', (['[x]'], {'dtype': 'float'}), '([x], dtype=float)\n', (1764, 1782), True, 'import numpy as np\n'), ((2233, 2242), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2239, 2242), True, 'import numpy as np\n'), ((2271, 2280), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2277, 2280), True, 'import numpy as np\n'), ((3819, 3829), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (3826, 3829), True, 'import numpy as np\n'), ((4007, 4017), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (4014, 4017), True, 'import numpy as np\n'), ((4184, 4210), 'numpy.array', 'np.array', (['[x]'], {'dtype': 'float'}), '([x], dtype=float)\n', (4192, 4210), True, 'import numpy as np\n'), ((4598, 4607), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (4604, 4607), True, 'import numpy as np\n'), ((4636, 4645), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (4642, 4645), True, 'import numpy as np\n'), ((5333, 5343), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (5339, 5343), True, 'import numpy as np\n'), ((5851, 5877), 'numpy.array', 'np.array', (['[x]'], {'dtype': 'float'}), '([x], dtype=float)\n', (5859, 5877), True, 'import numpy as np\n'), ((6772, 6785), 'numpy.array', 'np.array', (['[z]'], {}), '([z])\n', (6780, 6785), True, 'import numpy as np\n'), ((7355, 7368), 'numpy.log', 'np.log', (['(1 - z)'], {}), '(1 - z)\n', (7361, 7368), True, 'import numpy as np\n'), ((1863, 1895), 'numpy.logical_and', 'np.logical_and', (['(x > -20)', '(x <= -2)'], {}), '(x > -20, x <= -2)\n', (1877, 1895), True, 'import numpy as np\n'), ((1918, 1948), 'numpy.logical_and', 'np.logical_and', (['(x > -2)', '(x <= 2)'], {}), '(x > -2, x <= 2)\n', (1932, 1948), True, 'import numpy as np\n'), ((1946, 1976), 'numpy.logical_and', 'np.logical_and', (['(x > 2)', '(x <= 20)'], {}), '(x > 2, x <= 20)\n', (1960, 1976), True, 'import numpy as np\n'), ((4291, 4323), 'numpy.logical_and', 'np.logical_and', (['(x > -20)', '(x <= -2)'], {}), '(x > -20, x <= -2)\n', (4305, 4323), True, 'import numpy as np\n'), ((4346, 4376), 'numpy.logical_and', 'np.logical_and', (['(x > -2)', '(x <= 2)'], {}), '(x > -2, x <= 2)\n', (4360, 4376), True, 'import numpy as np\n'), ((4374, 4404), 'numpy.logical_and', 'np.logical_and', (['(x > 2)', '(x <= 12)'], {}), '(x > 2, x <= 12)\n', (4388, 4404), True, 'import numpy as np\n'), ((4427, 4458), 'numpy.logical_and', 'np.logical_and', (['(x > 12)', '(x <= 20)'], {}), '(x > 12, x <= 20)\n', (4441, 4458), True, 'import numpy as np\n'), ((5959, 5991), 'numpy.logical_and', 'np.logical_and', (['(x > -20)', '(x <= -1)'], {}), '(x > -20, x <= -1)\n', (5973, 5991), True, 'import numpy as np\n'), ((6014, 6044), 'numpy.logical_and', 'np.logical_and', (['(x > -1)', '(x <= 1)'], {}), '(x > -1, x <= 1)\n', (6028, 6044), True, 'import numpy as np\n'), ((6042, 6072), 'numpy.logical_and', 'np.logical_and', (['(x > 1)', '(x <= 20)'], {}), '(x > 1, x <= 20)\n', (6056, 6072), True, 'import numpy as np\n'), ((6939, 6948), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (6945, 6948), True, 'import numpy as np\n'), ((7144, 7154), 'numpy.real', 'np.real', (['z'], {}), '(z)\n', (7151, 7154), True, 'import numpy as np\n'), ((6990, 7003), 'numpy.log', 'np.log', (['(-z[j])'], {}), '(-z[j])\n', (6996, 7003), True, 'import numpy as np\n'), ((7225, 7237), 'numpy.log', 'np.log', (['z[j]'], {}), '(z[j])\n', (7231, 7237), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.