code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from sklearn import neighbors, datasets
from sklearn import tree
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
#Training Set
X = np.zeros((22,1))
X[:,0] = np.arange(0,11,.5)
noisesigma = 2.5
Y = np.ravel((2-(X-5)**2 + noisesigma * np.random.randn(22, 1))>0)
#Testing Set
Xp = np.zeros((110,1))
Xp[:,0] = np.arange(0,11,.1)
#KNeighborsClassifier
n_neighbors = 2
#weights = 'distance'
weights = 'uniform'
clfKNNC = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clfKNNC.fit(X, Y)
#KNeighborsClassifier
n_neighbors = 3
weights = 'distance'
weights = 'uniform'
clfKNNC2 = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clfKNNC2.fit(X, Y)
min_samples_split = 8
clftree = tree.DecisionTreeClassifier(min_samples_split=min_samples_split)
clftree = clftree.fit(X, Y)
YpKNNC = clfKNNC.predict(Xp)
YpKNNC2 = clfKNNC2.predict(Xp)
Yptree = clftree.predict(Xp)
#clf.predict_proba(Xp)
plt.plot(X,Y,'x',label='data')
plt.plot(Xp,YpKNNC,'c',label='kNN2')
plt.plot(Xp,YpKNNC2,'b',label='kNN3')
plt.plot(Xp,Yptree,'r',label='DecisionTree')
plt.legend( loc = 1 )
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"numpy.zeros",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.arange"
] | [((182, 199), 'numpy.zeros', 'np.zeros', (['(22, 1)'], {}), '((22, 1))\n', (190, 199), True, 'import numpy as np\n'), ((208, 229), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(0.5)'], {}), '(0, 11, 0.5)\n', (217, 229), True, 'import numpy as np\n'), ((330, 348), 'numpy.zeros', 'np.zeros', (['(110, 1)'], {}), '((110, 1))\n', (338, 348), True, 'import numpy as np\n'), ((358, 379), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(0.1)'], {}), '(0, 11, 0.1)\n', (367, 379), True, 'import numpy as np\n'), ((469, 529), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', (['n_neighbors'], {'weights': 'weights'}), '(n_neighbors, weights=weights)\n', (499, 529), False, 'from sklearn import neighbors, datasets\n'), ((639, 699), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', (['n_neighbors'], {'weights': 'weights'}), '(n_neighbors, weights=weights)\n', (669, 699), False, 'from sklearn import neighbors, datasets\n'), ((753, 817), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'min_samples_split': 'min_samples_split'}), '(min_samples_split=min_samples_split)\n', (780, 817), False, 'from sklearn import tree\n'), ((959, 992), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""x"""'], {'label': '"""data"""'}), "(X, Y, 'x', label='data')\n", (967, 992), True, 'import matplotlib.pyplot as plt\n'), ((990, 1029), 'matplotlib.pyplot.plot', 'plt.plot', (['Xp', 'YpKNNC', '"""c"""'], {'label': '"""kNN2"""'}), "(Xp, YpKNNC, 'c', label='kNN2')\n", (998, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1067), 'matplotlib.pyplot.plot', 'plt.plot', (['Xp', 'YpKNNC2', '"""b"""'], {'label': '"""kNN3"""'}), "(Xp, YpKNNC2, 'b', label='kNN3')\n", (1035, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1112), 'matplotlib.pyplot.plot', 'plt.plot', (['Xp', 'Yptree', '"""r"""'], {'label': '"""DecisionTree"""'}), "(Xp, Yptree, 'r', label='DecisionTree')\n", (1073, 1112), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1127), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (1120, 1127), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1143), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1141, 1143), True, 'import matplotlib.pyplot as plt\n'), ((284, 306), 'numpy.random.randn', 'np.random.randn', (['(22)', '(1)'], {}), '(22, 1)\n', (299, 306), True, 'import numpy as np\n')] |
import numpy as np
jolts = sorted(np.array(open('input.txt').read().strip().split('\n'), dtype=int))
jolts.append(max(jolts)+3)
last = 0
deltas = []
for n in jolts:
delta = n - last
deltas.append(delta)
last = n
print(np.product(np.unique(deltas, return_counts=True)[1])) | [
"numpy.unique"
] | [((245, 282), 'numpy.unique', 'np.unique', (['deltas'], {'return_counts': '(True)'}), '(deltas, return_counts=True)\n', (254, 282), True, 'import numpy as np\n')] |
from unittest import TestCase
from copy import deepcopy
from numpy import array as np_array
from rptools.rpthermo.rpThermo import (
build_stoichio_matrix,
get_target_rxn_idx,
minimize,
remove_compounds,
# eQuilibrator,
# initThermo,
# get_compounds_from_cache
)
from brs_utils import (
create_logger,
Cache
)
from chemlite import Reaction
from rptools.rplibs import(
rpCompound,
rpReaction,
rpPathway
)
species = {
"TARGET_0000000001": rpCompound(
id="TARGET_0000000001",
smiles="[H]OC(=O)C([H])=C([H])C([H])=C([H])C(=O)O[H]",
inchi="InChI=1S/C6H6O4/c7-5(8)3-1-2-4-6(9)10/h1-4H,(H,7,8)(H,9,10)",
inchikey="TXXHDPDFNKHHGW-UHFFFAOYSA-N"
),
"CMPD_0000000010": rpCompound(
id="CMPD_0000000010",
smiles="[H]OC(=O)c1c([H])c([H])c(O[H])c(O[H])c1[H]",
inchi="InChI=1S/C7H6O4/c8-5-2-1-4(7(10)11)3-6(5)9/h1-3,8-9H,(H,10,11)",
inchikey="YQUVCSBJEUQKSH-UHFFFAOYSA-N"
),
"MNXM23": rpCompound(
id="MNXM23",
formula="C3H3O3",
smiles="CC(=O)C(=O)O]",
inchi="InChI=1S/C3H4O3/c1-2(4)3(5)6/h1H3,(H,5,6)",
inchikey="LCTONWCANYUPML-UHFFFAOYSA-N",
name="pyruvate"
),
"CMPD_0000000025": rpCompound(
id="CMPD_0000000025",
smiles="[H]OC(=O)c1c([H])c([H])c([H])c(O[H])c1[H]",
inchi="InChI=1S/C7H6O3/c8-6-3-1-2-5(4-6)7(9)10/h1-4,8H,(H,9,10)",
inchikey="IJFXRHURBJZNAO-UHFFFAOYSA-N"
),
"CMPD_0000000003": rpCompound(
id="CMPD_0000000003",
smiles="[H]Oc1c([H])c([H])c([H])c([H])c1O[H]",
inchi="InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H",
inchikey="YCIMNLLNPGFGHC-UHFFFAOYSA-N"
),
"CMPD_0000000003_wo_smiles": rpCompound(
id="CMPD_0000000003_wo_smiles",
inchi="InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H",
inchikey="YCIMNLLNPGFGHC-UHFFFAOYSA-N"
),
"CMPD_0000000004_wo_smiles": rpCompound(
id="CMPD_0000000003_wo_smiles",
inchi="InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H",
inchikey="YCIMNLLNPGFGHC-UHFFFAOYSA-N"
),
"CMPD_0000000003_w_smiles_None": rpCompound(
id="CMPD_0000000003_wo_smiles",
inchi="InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H",
inchikey="YCIMNLLNPGFGHC-UHFFFAOYSA-N",
smiles=None
),
"MNXM337": rpCompound(
id="MNXM337",
smiles="[H]OC(=O)C(OC1([H])C([H])=C(C(=O)O[H])C([H])=C([H])C1([H])O[H])=C([H])[H]",
inchi="InChI=1S/C10H10O6/c1-5(9(12)13)16-8-4-6(10(14)15)2-3-7(8)11/h2-4,7-8,11H,1H2,(H,12,13)(H,14,15)",
inchikey="WTFXTQVDAKGDEY-UHFFFAOYSA-N"
),
"MNXM2": rpCompound(
id="MNXM2",
smiles="[H]O[H]",
inchi="InChI=1S/H2O/h1H2",
inchikey="XLYOFNOQVPJJNP-UHFFFAOYSA-N"
),
"MNXM13": rpCompound(
id="MNXM13",
smiles="O=C=O",
inchi="InChI=1S/CO2/c2-1-3",
inchikey="CURLTUGMZLYLDI-UHFFFAOYSA-N",
formula="CO2",
name="CO2"
),
"MNXM5": rpCompound(
id="MNXM5",
smiles="N=C(O)c1ccc[n+](C2OC(COP(=O)(O)OP(=O)(O)OCC3OC(n4cnc5c(N)ncnc54)C(OP(=O)(O)O)C3O)C(O)C2O)c1",
inchi="InChI=1S/C21H28N7O17P3/c22-17-12-19(25-7-24-17)28(8-26-12)21-16(44-46(33,34)35)14(30)11(43-21)6-41-48(38,39)45-47(36,37)40-5-10-13(29)15(31)20(42-10)27-3-1-2-9(4-27)18(23)32/h1-4,7-8,10-11,13-16,20-21,29-31H,5-6H2,(H7-,22,23,24,25,32,33,34,35,36,37,38,39)/p+1",
inchikey="XJLXINKUBYWONI-UHFFFAOYSA-O",
formula="C21H25N7O17P3",
name="NADP(+)"
),
"MNXM4": rpCompound(
id="MNXM4",
smiles="O=O",
inchi="InChI=1S/O2/c1-2",
inchikey="MYMOFIZGZYHOMD-UHFFFAOYSA-N"
),
"MNXM1": rpCompound(
id="MNXM1",
smiles="[H+]",
inchi="InChI=1S/p+1",
inchikey="GPRLSGONYQIRFK-UHFFFAOYSA-N"
),
"MNXM6": rpCompound(
id="MNXM6",
smiles="[H]N=C(O[H])C1=C([H])N(C2([H])OC([H])(C([H])([H])OP(=O)(O[H])OP(=O)(O[H])OC([H])([H])C3([H])OC([H])(n4c([H])nc5c(N([H])[H])nc([H])nc54)C([H])(OP(=O)(O[H])O[H])C3([H])O[H])C([H])(O[H])C2([H])O[H])C([H])=C([H])C1([H])[H]",
inchi="InChI=1S/C21H30N7O17P3/c22-17-12-19(25-7-24-17)28(8-26-12)21-16(44-46(33,34)35)14(30)11(43-21)6-41-48(38,39)45-47(36,37)40-5-10-13(29)15(31)20(42-10)27-3-1-2-9(4-27)18(23)32/h1,3-4,7-8,10-11,13-16,20-21,29-31H,2,5-6H2,(H2,23,32)(H,36,37)(H,38,39)(H2,22,24,25)(H2,33,34,35)",
inchikey="<KEY>"
)
}
class Test_rpThermo(TestCase):
def setUp(self):
self.logger = create_logger(__name__, 'ERROR')
self.rxn_1 = rpReaction(
id='rxn_1',
reactants={'MNXM188': 1, 'MNXM4': 1, 'MNXM6': 1, 'MNXM1': 3},
products={'CMPD_0000000004': 1, 'CMPD_0000000003': 1, 'MNXM13': 1, 'MNXM15': 3, 'MNXM5': 1},
)
self.rxn_2 = rpReaction(
id='rxn_2',
reactants={'MNXM4': 1, 'CMPD_0000000003': 2},
products={'MNXM1': 1, 'TARGET_0000000001': 1},
)
self.rxn_3 = rpReaction(
id='rxn_3',
reactants={'CMPD_0000000004': 3, 'MNXM4': 1, 'MNXM6': 1},
products={'MNXM13': 1, 'MNXM5': 1},
)
self.reactions = [self.rxn_1, self.rxn_2, self.rxn_3]
self.sto_mat_1 = [
[-3.0, 1.0, 0.0],
[-1.0, -1.0, -1.0],
[1.0, 0.0, 1.0],
[3.0, 0.0, 0.0],
[1.0, 0.0, -3.0],
[1.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, -2.0, 0.0],
[-1.0, 0.0, 0.0],
[-1.0, 0.0, -1.0]
]
# Reactions
# |- rxn_1: 1.0 MNXM188 + 1.0 MNXM4 + 1.0 MNXM6 + 3.0 MNXM1 --> 1.0 CMPD_0000000004 + 1.0 CMPD_0000000003 + 1.0 MNXM13 + 1.0 MNXM15 + 1.0 MNXM5
# |- rxn_2: 1.0 MNXM4 + 2.0 CMPD_0000000003 --> 2.0 MNXM1 + 1.0 TARGET_0000000001
# |- rxn_3: 1.0 MNXM4 + 1.0 MNXM6 + 3.0 CMPD_0000000004 --> 1.0 MNXM13 + 1.0 MNXM5
# Compounds ordered: CMPD_0000000003, CMPD_0000000004
# Reactions ordered: rxn_1, rxn_2*, rxn_3
# * to be optimised
def test_minimize_1(self):
sto_mat = np_array(
[
[1, -2, 0],
[1, 0, -3]
]
)
rxn_tgt_idx = 1
coeffs = minimize(
sto_mat,
rxn_tgt_idx,
self.logger
)
self.assertSequenceEqual(
coeffs.tolist(),
[3.0, 1.5, 1.0]
)
# Reactions
# |- rxn_1: 1.0 MNXM4 + 1.0 MNXM421 + 1.0 MNXM6 + 1.0 MNXM1 --> 1.0 CMPD_0000000015 + 1.0 MNXM2 + 1.0 MNXM5
# |- rxn_2: 1.0 MNXM1 + 1.0 CMPD_0000000015 + 1.0 MNXM2 --> 1.0 CMPD_0000000010 + 1.0 MNXM15
# |- rxn_3: 1.0 MNXM1 + 1.0 CMPD_0000000010 --> 1.0 CMPD_0000000003 + 1.0 MNXM13
# |- rxn_4: 1.0 MNXM4 + 1.0 CMPD_0000000003 --> 2.0 MNXM1 + 1.0 TARGET_0000000001
# Compounds ordered: CMPD_0000000003, CMPD_0000000010, CMPD_0000000015
# Reactions ordered: rxn_1, rxn_2, rxn_3, rxn_4*
# * to be optimised
def test_minimize_2(self):
sto_mat = np_array(
[
[0, 0, 1, -1],
[0, 1, -1, 0],
[1, -1, 0, 0]
]
)
rxn_tgt_idx = 3
coeffs = minimize(
sto_mat,
rxn_tgt_idx,
self.logger
)
self.assertSequenceEqual(
coeffs.tolist(),
[1 , 1 , 1, 1]
)
def test_minimize_1cmpd(self):
sto_mat = np_array(
[
[ 1, -1, 0, 0]
]
)
rxn_tgt_idx = 2
coeffs = minimize(
sto_mat,
rxn_tgt_idx,
self.logger
)
_coeffs = deepcopy(coeffs)
for coeff_idx in range(len(_coeffs)):
if (
_coeffs[coeff_idx] == 0
or _coeffs[coeff_idx] == abs(float("inf"))
):
_coeffs[coeff_idx] = 1.
self.assertSequenceEqual(
list(_coeffs),
[1, 1, 1, 1]
)
def test_build_stoichio_matrix(self):
# Ignore the order of matrix lines because
# it is not relevant for our resolution system
self.assertCountEqual(
build_stoichio_matrix(self.reactions).tolist(),
self.sto_mat_1
)
def test_build_stoichio_matrix_w_sel_cmpds(self):
# Ignore the order of matrix lines because
# it is not relevant for our resolution system
self.assertCountEqual(
build_stoichio_matrix(
reactions=self.reactions,
compounds=['CMPD_0000000003']
).tolist(),
[self.sto_mat_1[7]]
)
def test_get_target_rxn_idx(self):
self.assertEqual(
get_target_rxn_idx(
reactions=self.reactions,
rxn_target_id=self.rxn_2.get_id(),
),
self.reactions.index(self.rxn_2)
)
def test_remove_compounds(self):
pathway = rpPathway(id='thermo')
for rxn in self.reactions:
pathway.add_reaction(rxn)
compd_id1 = 'UNK_CMPD_FOOBAR'
compd_id2 = 'UNK_CMPD_FOOBAR_2'
self.rxn_1.add_product(stoichio=2, compound_id=compd_id1)
self.rxn_1.add_product(stoichio=3, compound_id=compd_id2)
self.rxn_2.add_reactant(stoichio=2, compound_id=compd_id2)
self.rxn_3.add_reactant(stoichio=1, compound_id=compd_id1)
reactions = remove_compounds(
compounds=[compd_id1, compd_id2],
reactions=pathway.get_list_of_reactions(),
rxn_target_id=self.rxn_2.get_id(),
)
self.assertDictEqual(
Reaction.sum_stoichio(reactions),
{'MNXM1': -1.5, 'MNXM188': -1.0, 'MNXM4': -4.5, 'MNXM6': -3.0, 'CMPD_0000000003': -2.0, 'CMPD_0000000004': -5.0, 'MNXM13': 3.0, 'MNXM15': 3.0, 'MNXM5': 3.0, 'TARGET_0000000001': 1.5}
)
# cc = initThermo()
# species, unk_compounds = get_compounds_from_cache(
# compounds=pathway.get_species(),
# cc=cc
# )
# results = eQuilibrator(
# species_stoichio=pathway.net_reaction(),
# species=species,
# cc=cc
# )
| [
"rptools.rpthermo.rpThermo.minimize",
"copy.deepcopy",
"rptools.rpthermo.rpThermo.build_stoichio_matrix",
"rptools.rplibs.rpPathway",
"chemlite.Reaction.sum_stoichio",
"rptools.rplibs.rpReaction",
"numpy.array",
"rptools.rplibs.rpCompound",
"brs_utils.create_logger"
] | [((490, 703), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""TARGET_0000000001"""', 'smiles': '"""[H]OC(=O)C([H])=C([H])C([H])=C([H])C(=O)O[H]"""', 'inchi': '"""InChI=1S/C6H6O4/c7-5(8)3-1-2-4-6(9)10/h1-4H,(H,7,8)(H,9,10)"""', 'inchikey': '"""TXXHDPDFNKHHGW-UHFFFAOYSA-N"""'}), "(id='TARGET_0000000001', smiles=\n '[H]OC(=O)C([H])=C([H])C([H])=C([H])C(=O)O[H]', inchi=\n 'InChI=1S/C6H6O4/c7-5(8)3-1-2-4-6(9)10/h1-4H,(H,7,8)(H,9,10)', inchikey\n ='TXXHDPDFNKHHGW-UHFFFAOYSA-N')\n", (500, 703), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((751, 962), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""CMPD_0000000010"""', 'smiles': '"""[H]OC(=O)c1c([H])c([H])c(O[H])c(O[H])c1[H]"""', 'inchi': '"""InChI=1S/C7H6O4/c8-5-2-1-4(7(10)11)3-6(5)9/h1-3,8-9H,(H,10,11)"""', 'inchikey': '"""YQUVCSBJEUQKSH-UHFFFAOYSA-N"""'}), "(id='CMPD_0000000010', smiles=\n '[H]OC(=O)c1c([H])c([H])c(O[H])c(O[H])c1[H]', inchi=\n 'InChI=1S/C7H6O4/c8-5-2-1-4(7(10)11)3-6(5)9/h1-3,8-9H,(H,10,11)',\n inchikey='YQUVCSBJEUQKSH-UHFFFAOYSA-N')\n", (761, 962), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((1002, 1185), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""MNXM23"""', 'formula': '"""C3H3O3"""', 'smiles': '"""CC(=O)C(=O)O]"""', 'inchi': '"""InChI=1S/C3H4O3/c1-2(4)3(5)6/h1H3,(H,5,6)"""', 'inchikey': '"""LCTONWCANYUPML-UHFFFAOYSA-N"""', 'name': '"""pyruvate"""'}), "(id='MNXM23', formula='C3H3O3', smiles='CC(=O)C(=O)O]', inchi=\n 'InChI=1S/C3H4O3/c1-2(4)3(5)6/h1H3,(H,5,6)', inchikey=\n 'LCTONWCANYUPML-UHFFFAOYSA-N', name='pyruvate')\n", (1012, 1185), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((1254, 1459), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""CMPD_0000000025"""', 'smiles': '"""[H]OC(=O)c1c([H])c([H])c([H])c(O[H])c1[H]"""', 'inchi': '"""InChI=1S/C7H6O3/c8-6-3-1-2-5(4-6)7(9)10/h1-4,8H,(H,9,10)"""', 'inchikey': '"""IJFXRHURBJZNAO-UHFFFAOYSA-N"""'}), "(id='CMPD_0000000025', smiles=\n '[H]OC(=O)c1c([H])c([H])c([H])c(O[H])c1[H]', inchi=\n 'InChI=1S/C7H6O3/c8-6-3-1-2-5(4-6)7(9)10/h1-4,8H,(H,9,10)', inchikey=\n 'IJFXRHURBJZNAO-UHFFFAOYSA-N')\n", (1264, 1459), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((1507, 1695), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""CMPD_0000000003"""', 'smiles': '"""[H]Oc1c([H])c([H])c([H])c([H])c1O[H]"""', 'inchi': '"""InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H"""', 'inchikey': '"""YCIMNLLNPGFGHC-UHFFFAOYSA-N"""'}), "(id='CMPD_0000000003', smiles=\n '[H]Oc1c([H])c([H])c([H])c([H])c1O[H]', inchi=\n 'InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H', inchikey=\n 'YCIMNLLNPGFGHC-UHFFFAOYSA-N')\n", (1517, 1695), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((1753, 1899), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""CMPD_0000000003_wo_smiles"""', 'inchi': '"""InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H"""', 'inchikey': '"""YCIMNLLNPGFGHC-UHFFFAOYSA-N"""'}), "(id='CMPD_0000000003_wo_smiles', inchi=\n 'InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H', inchikey=\n 'YCIMNLLNPGFGHC-UHFFFAOYSA-N')\n", (1763, 1899), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((1954, 2100), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""CMPD_0000000003_wo_smiles"""', 'inchi': '"""InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H"""', 'inchikey': '"""YCIMNLLNPGFGHC-UHFFFAOYSA-N"""'}), "(id='CMPD_0000000003_wo_smiles', inchi=\n 'InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H', inchikey=\n 'YCIMNLLNPGFGHC-UHFFFAOYSA-N')\n", (1964, 2100), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((2159, 2318), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""CMPD_0000000003_wo_smiles"""', 'inchi': '"""InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H"""', 'inchikey': '"""YCIMNLLNPGFGHC-UHFFFAOYSA-N"""', 'smiles': 'None'}), "(id='CMPD_0000000003_wo_smiles', inchi=\n 'InChI=1S/C6H6O2/c7-5-3-1-2-4-6(5)8/h1-4,7-8H', inchikey=\n 'YCIMNLLNPGFGHC-UHFFFAOYSA-N', smiles=None)\n", (2169, 2318), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((2363, 2636), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""MNXM337"""', 'smiles': '"""[H]OC(=O)C(OC1([H])C([H])=C(C(=O)O[H])C([H])=C([H])C1([H])O[H])=C([H])[H]"""', 'inchi': '"""InChI=1S/C10H10O6/c1-5(9(12)13)16-8-4-6(10(14)15)2-3-7(8)11/h2-4,7-8,11H,1H2,(H,12,13)(H,14,15)"""', 'inchikey': '"""WTFXTQVDAKGDEY-UHFFFAOYSA-N"""'}), "(id='MNXM337', smiles=\n '[H]OC(=O)C(OC1([H])C([H])=C(C(=O)O[H])C([H])=C([H])C1([H])O[H])=C([H])[H]'\n , inchi=\n 'InChI=1S/C10H10O6/c1-5(9(12)13)16-8-4-6(10(14)15)2-3-7(8)11/h2-4,7-8,11H,1H2,(H,12,13)(H,14,15)'\n , inchikey='WTFXTQVDAKGDEY-UHFFFAOYSA-N')\n", (2373, 2636), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((2669, 2780), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""MNXM2"""', 'smiles': '"""[H]O[H]"""', 'inchi': '"""InChI=1S/H2O/h1H2"""', 'inchikey': '"""XLYOFNOQVPJJNP-UHFFFAOYSA-N"""'}), "(id='MNXM2', smiles='[H]O[H]', inchi='InChI=1S/H2O/h1H2',\n inchikey='XLYOFNOQVPJJNP-UHFFFAOYSA-N')\n", (2679, 2780), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((2830, 2969), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""MNXM13"""', 'smiles': '"""O=C=O"""', 'inchi': '"""InChI=1S/CO2/c2-1-3"""', 'inchikey': '"""CURLTUGMZLYLDI-UHFFFAOYSA-N"""', 'formula': '"""CO2"""', 'name': '"""CO2"""'}), "(id='MNXM13', smiles='O=C=O', inchi='InChI=1S/CO2/c2-1-3',\n inchikey='CURLTUGMZLYLDI-UHFFFAOYSA-N', formula='CO2', name='CO2')\n", (2840, 2969), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((3034, 3533), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""MNXM5"""', 'smiles': '"""N=C(O)c1ccc[n+](C2OC(COP(=O)(O)OP(=O)(O)OCC3OC(n4cnc5c(N)ncnc54)C(OP(=O)(O)O)C3O)C(O)C2O)c1"""', 'inchi': '"""InChI=1S/C21H28N7O17P3/c22-17-12-19(25-7-24-17)28(8-26-12)21-16(44-46(33,34)35)14(30)11(43-21)6-41-48(38,39)45-47(36,37)40-5-10-13(29)15(31)20(42-10)27-3-1-2-9(4-27)18(23)32/h1-4,7-8,10-11,13-16,20-21,29-31H,5-6H2,(H7-,22,23,24,25,32,33,34,35,36,37,38,39)/p+1"""', 'inchikey': '"""XJLXINKUBYWONI-UHFFFAOYSA-O"""', 'formula': '"""C21H25N7O17P3"""', 'name': '"""NADP(+)"""'}), "(id='MNXM5', smiles=\n 'N=C(O)c1ccc[n+](C2OC(COP(=O)(O)OP(=O)(O)OCC3OC(n4cnc5c(N)ncnc54)C(OP(=O)(O)O)C3O)C(O)C2O)c1'\n , inchi=\n 'InChI=1S/C21H28N7O17P3/c22-17-12-19(25-7-24-17)28(8-26-12)21-16(44-46(33,34)35)14(30)11(43-21)6-41-48(38,39)45-47(36,37)40-5-10-13(29)15(31)20(42-10)27-3-1-2-9(4-27)18(23)32/h1-4,7-8,10-11,13-16,20-21,29-31H,5-6H2,(H7-,22,23,24,25,32,33,34,35,36,37,38,39)/p+1'\n , inchikey='XJLXINKUBYWONI-UHFFFAOYSA-O', formula='C21H25N7O17P3', name\n ='NADP(+)')\n", (3044, 3533), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((3577, 3684), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""MNXM4"""', 'smiles': '"""O=O"""', 'inchi': '"""InChI=1S/O2/c1-2"""', 'inchikey': '"""MYMOFIZGZYHOMD-UHFFFAOYSA-N"""'}), "(id='MNXM4', smiles='O=O', inchi='InChI=1S/O2/c1-2', inchikey=\n 'MYMOFIZGZYHOMD-UHFFFAOYSA-N')\n", (3587, 3684), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((3732, 3836), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""MNXM1"""', 'smiles': '"""[H+]"""', 'inchi': '"""InChI=1S/p+1"""', 'inchikey': '"""GPRLSGONYQIRFK-UHFFFAOYSA-N"""'}), "(id='MNXM1', smiles='[H+]', inchi='InChI=1S/p+1', inchikey=\n 'GPRLSGONYQIRFK-UHFFFAOYSA-N')\n", (3742, 3836), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((3884, 4455), 'rptools.rplibs.rpCompound', 'rpCompound', ([], {'id': '"""MNXM6"""', 'smiles': '"""[H]N=C(O[H])C1=C([H])N(C2([H])OC([H])(C([H])([H])OP(=O)(O[H])OP(=O)(O[H])OC([H])([H])C3([H])OC([H])(n4c([H])nc5c(N([H])[H])nc([H])nc54)C([H])(OP(=O)(O[H])O[H])C3([H])O[H])C([H])(O[H])C2([H])O[H])C([H])=C([H])C1([H])[H]"""', 'inchi': '"""InChI=1S/C21H30N7O17P3/c22-17-12-19(25-7-24-17)28(8-26-12)21-16(44-46(33,34)35)14(30)11(43-21)6-41-48(38,39)45-47(36,37)40-5-10-13(29)15(31)20(42-10)27-3-1-2-9(4-27)18(23)32/h1,3-4,7-8,10-11,13-16,20-21,29-31H,2,5-6H2,(H2,23,32)(H,36,37)(H,38,39)(H2,22,24,25)(H2,33,34,35)"""', 'inchikey': '"""<KEY>"""'}), "(id='MNXM6', smiles=\n '[H]N=C(O[H])C1=C([H])N(C2([H])OC([H])(C([H])([H])OP(=O)(O[H])OP(=O)(O[H])OC([H])([H])C3([H])OC([H])(n4c([H])nc5c(N([H])[H])nc([H])nc54)C([H])(OP(=O)(O[H])O[H])C3([H])O[H])C([H])(O[H])C2([H])O[H])C([H])=C([H])C1([H])[H]'\n , inchi=\n 'InChI=1S/C21H30N7O17P3/c22-17-12-19(25-7-24-17)28(8-26-12)21-16(44-46(33,34)35)14(30)11(43-21)6-41-48(38,39)45-47(36,37)40-5-10-13(29)15(31)20(42-10)27-3-1-2-9(4-27)18(23)32/h1,3-4,7-8,10-11,13-16,20-21,29-31H,2,5-6H2,(H2,23,32)(H,36,37)(H,38,39)(H2,22,24,25)(H2,33,34,35)'\n , inchikey='<KEY>')\n", (3894, 4455), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((4552, 4584), 'brs_utils.create_logger', 'create_logger', (['__name__', '"""ERROR"""'], {}), "(__name__, 'ERROR')\n", (4565, 4584), False, 'from brs_utils import create_logger, Cache\n'), ((4606, 4791), 'rptools.rplibs.rpReaction', 'rpReaction', ([], {'id': '"""rxn_1"""', 'reactants': "{'MNXM188': 1, 'MNXM4': 1, 'MNXM6': 1, 'MNXM1': 3}", 'products': "{'CMPD_0000000004': 1, 'CMPD_0000000003': 1, 'MNXM13': 1, 'MNXM15': 3,\n 'MNXM5': 1}"}), "(id='rxn_1', reactants={'MNXM188': 1, 'MNXM4': 1, 'MNXM6': 1,\n 'MNXM1': 3}, products={'CMPD_0000000004': 1, 'CMPD_0000000003': 1,\n 'MNXM13': 1, 'MNXM15': 3, 'MNXM5': 1})\n", (4616, 4791), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((4852, 4971), 'rptools.rplibs.rpReaction', 'rpReaction', ([], {'id': '"""rxn_2"""', 'reactants': "{'MNXM4': 1, 'CMPD_0000000003': 2}", 'products': "{'MNXM1': 1, 'TARGET_0000000001': 1}"}), "(id='rxn_2', reactants={'MNXM4': 1, 'CMPD_0000000003': 2},\n products={'MNXM1': 1, 'TARGET_0000000001': 1})\n", (4862, 4971), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((5036, 5156), 'rptools.rplibs.rpReaction', 'rpReaction', ([], {'id': '"""rxn_3"""', 'reactants': "{'CMPD_0000000004': 3, 'MNXM4': 1, 'MNXM6': 1}", 'products': "{'MNXM13': 1, 'MNXM5': 1}"}), "(id='rxn_3', reactants={'CMPD_0000000004': 3, 'MNXM4': 1, 'MNXM6':\n 1}, products={'MNXM13': 1, 'MNXM5': 1})\n", (5046, 5156), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((6112, 6146), 'numpy.array', 'np_array', (['[[1, -2, 0], [1, 0, -3]]'], {}), '([[1, -2, 0], [1, 0, -3]])\n', (6120, 6146), True, 'from numpy import array as np_array\n'), ((6256, 6299), 'rptools.rpthermo.rpThermo.minimize', 'minimize', (['sto_mat', 'rxn_tgt_idx', 'self.logger'], {}), '(sto_mat, rxn_tgt_idx, self.logger)\n', (6264, 6299), False, 'from rptools.rpthermo.rpThermo import build_stoichio_matrix, get_target_rxn_idx, minimize, remove_compounds\n'), ((7045, 7100), 'numpy.array', 'np_array', (['[[0, 0, 1, -1], [0, 1, -1, 0], [1, -1, 0, 0]]'], {}), '([[0, 0, 1, -1], [0, 1, -1, 0], [1, -1, 0, 0]])\n', (7053, 7100), True, 'from numpy import array as np_array\n'), ((7228, 7271), 'rptools.rpthermo.rpThermo.minimize', 'minimize', (['sto_mat', 'rxn_tgt_idx', 'self.logger'], {}), '(sto_mat, rxn_tgt_idx, self.logger)\n', (7236, 7271), False, 'from rptools.rpthermo.rpThermo import build_stoichio_matrix, get_target_rxn_idx, minimize, remove_compounds\n'), ((7472, 7497), 'numpy.array', 'np_array', (['[[1, -1, 0, 0]]'], {}), '([[1, -1, 0, 0]])\n', (7480, 7497), True, 'from numpy import array as np_array\n'), ((7594, 7637), 'rptools.rpthermo.rpThermo.minimize', 'minimize', (['sto_mat', 'rxn_tgt_idx', 'self.logger'], {}), '(sto_mat, rxn_tgt_idx, self.logger)\n', (7602, 7637), False, 'from rptools.rpthermo.rpThermo import build_stoichio_matrix, get_target_rxn_idx, minimize, remove_compounds\n'), ((7702, 7718), 'copy.deepcopy', 'deepcopy', (['coeffs'], {}), '(coeffs)\n', (7710, 7718), False, 'from copy import deepcopy\n'), ((9007, 9029), 'rptools.rplibs.rpPathway', 'rpPathway', ([], {'id': '"""thermo"""'}), "(id='thermo')\n", (9016, 9029), False, 'from rptools.rplibs import rpCompound, rpReaction, rpPathway\n'), ((9685, 9717), 'chemlite.Reaction.sum_stoichio', 'Reaction.sum_stoichio', (['reactions'], {}), '(reactions)\n', (9706, 9717), False, 'from chemlite import Reaction\n'), ((8224, 8261), 'rptools.rpthermo.rpThermo.build_stoichio_matrix', 'build_stoichio_matrix', (['self.reactions'], {}), '(self.reactions)\n', (8245, 8261), False, 'from rptools.rpthermo.rpThermo import build_stoichio_matrix, get_target_rxn_idx, minimize, remove_compounds\n'), ((8513, 8591), 'rptools.rpthermo.rpThermo.build_stoichio_matrix', 'build_stoichio_matrix', ([], {'reactions': 'self.reactions', 'compounds': "['CMPD_0000000003']"}), "(reactions=self.reactions, compounds=['CMPD_0000000003'])\n", (8534, 8591), False, 'from rptools.rpthermo.rpThermo import build_stoichio_matrix, get_target_rxn_idx, minimize, remove_compounds\n')] |
import pymbs.symbolics as symbolics
from pymbs.symbolics import zeros, eye
from .frame import Frame
from pymbs.common.sidfilereader import SID, SIDFormatException
from pymbs.common.abstractbody import AbstractBody
import numpy as np
class Body(AbstractBody, Frame):
"""
"""
def __init__(self, name, mass, cg, inertia):
# super constructor (AbstractBody)
AbstractBody.__init__(self, name, mass, cg, inertia)
# super constructor (CoordianteSystem)
Frame.__init__(self, name, parent=None, p=zeros((3,)), R=eye((3,3)))
# save wheter a joint ends on this body
# and prevent that more than one joint is connected to it
self.endOfJoint = None
# create a CS which may be used when that body
# (by conveniece) is passed somewhere where a CS is expected
self.addFrame('_CS_0')
def _insertCS(self, cs):
"""
similar to addFrame but instead of creating a new one
here we take an existing instance
"""
# this is needed to make all CS top-Level during transformation
assert ((cs.__class__ is Frame) or (cs.__class__ is Body) or (cs.__class__ is FlexibleBody))
self.children.append(cs) #!! not needed (may lead to confusion)
self.coordList.append(cs)
#?? not added to the objectnamespace because
# this was just for writing convinience in the input file
def getParentBody(self):
return self
class FlexibleBody(AbstractBody, Frame):
"""
"""
def __init__(self,name,filepath):
# read a SID-File from the given path
f = open(filepath, "r")
try:
# create a sid object which includes the informations for
# generating a flexible body
self.sid = SID(f)
except SIDFormatException as fe:
print("Datei konnte nicht eingelesen werden: " + fe.message)
except:
print("Unbekannter Fehler!")
finally:
# close SID-file
f.close()
cg = [0,0,0]
inertia=symbolics.zeros((3,3))
mass = 0
# super constructor (AbstractBody)
AbstractBody.__init__(self, name, mass, cg, inertia)
# super constructor (CoordianteSystem)
Frame.__init__(self, name, parent=None, p=zeros((3,)), R=eye((3,3)))
# save wheter a joint ends on this body
# and prevent that more than one joint is connected to it
self.endOfJoint = None
# create a CS which may be used when that body
# (by conveniece) is passed somewhere where a CS is expected
self.sid.node_List = list()
for node in self.sid.modal.frame.Knoten:
'''
creating one frame per node
'''
# position of node i of undeflected body / zero order of originmatrix
pos_node_numpy = node.origin.originmatrix.M0
pos_node = np.array(pos_node_numpy).reshape(-1,).tolist()
node_number = node.node_number
node.frame = Frame.addFrame(self, name = 'node_%i'%node_number, p = pos_node,R = eye((3,3)))
self.sid.node_List.append(node)
self.node_list = self.sid.node_List
def node(self, i):
'''
returns frame for node i
'''
if ((i < 1) or (i > self.sid.nNodes)):
raise NotImplementedError("Node %i doesn´t exist! Index must be in range [1,%i]",(i,self.sid.nNodes))
node_i = self.sid.node_List[i-1]
return node_i.frame
## self.node_List = list()
## for nodes in xrange(self.sid.nNodes):
## '''
## creating one frame per node
## '''
## node_number = nodes+1
## frame = Frame.addFrame(self, name = 'node_%i'%node_number, p = zeros((3,)),R = eye((3,3)))
## self.node_List.append(frame)
##
##
## def node(self, i):
## '''
## returns frame for node i
## '''
## if ((i < 1) or (i > self.sid.nNodes)):
## raise NotImplementedError("Node %i doesn´t exist! Index must be in range [1,%i]",(i,self.sid.nNodes))
## return self.node_List[i-1]
def _insertCS(self, cs):
"""
similar to addFrame but instead of creating a new one
here we take an existing instance
"""
# this is needed to make all CS top-Level during transformation
assert ((cs.__class__ is Frame) or (cs.__class__ is Body) or (cs.__class__ is FlexibleBody))
self.children.append(cs) #!! not needed (may lead to confusion)
self.coordList.append(cs)
#?? not added to the objectnamespace because
# this was just for writing convinience in the input file
def getParentBody(self):
return self
def addFrame(self, *args, **kwargs):
'''
Calling addFrame is not permitted for a flexible body
'''
raise NotImplementedError("Calling addFrame is not permitted for a flexible body") | [
"pymbs.common.abstractbody.AbstractBody.__init__",
"pymbs.common.sidfilereader.SID",
"pymbs.symbolics.zeros",
"numpy.array",
"pymbs.symbolics.eye"
] | [((387, 439), 'pymbs.common.abstractbody.AbstractBody.__init__', 'AbstractBody.__init__', (['self', 'name', 'mass', 'cg', 'inertia'], {}), '(self, name, mass, cg, inertia)\n', (408, 439), False, 'from pymbs.common.abstractbody import AbstractBody\n'), ((2087, 2110), 'pymbs.symbolics.zeros', 'symbolics.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2102, 2110), True, 'import pymbs.symbolics as symbolics\n'), ((2179, 2231), 'pymbs.common.abstractbody.AbstractBody.__init__', 'AbstractBody.__init__', (['self', 'name', 'mass', 'cg', 'inertia'], {}), '(self, name, mass, cg, inertia)\n', (2200, 2231), False, 'from pymbs.common.abstractbody import AbstractBody\n'), ((1802, 1808), 'pymbs.common.sidfilereader.SID', 'SID', (['f'], {}), '(f)\n', (1805, 1808), False, 'from pymbs.common.sidfilereader import SID, SIDFormatException\n'), ((538, 549), 'pymbs.symbolics.zeros', 'zeros', (['(3,)'], {}), '((3,))\n', (543, 549), False, 'from pymbs.symbolics import zeros, eye\n'), ((553, 564), 'pymbs.symbolics.eye', 'eye', (['(3, 3)'], {}), '((3, 3))\n', (556, 564), False, 'from pymbs.symbolics import zeros, eye\n'), ((2330, 2341), 'pymbs.symbolics.zeros', 'zeros', (['(3,)'], {}), '((3,))\n', (2335, 2341), False, 'from pymbs.symbolics import zeros, eye\n'), ((2345, 2356), 'pymbs.symbolics.eye', 'eye', (['(3, 3)'], {}), '((3, 3))\n', (2348, 2356), False, 'from pymbs.symbolics import zeros, eye\n'), ((3132, 3143), 'pymbs.symbolics.eye', 'eye', (['(3, 3)'], {}), '((3, 3))\n', (3135, 3143), False, 'from pymbs.symbolics import zeros, eye\n'), ((2948, 2972), 'numpy.array', 'np.array', (['pos_node_numpy'], {}), '(pos_node_numpy)\n', (2956, 2972), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import itertools
import time
import numpy as np
import pytest
import requests
import astrodata
from astrodata.testing import download_from_archive
from geminidr.core import primitives_visualize
from geminidr.gmos.primitives_gmos_image import GMOSImage
single_aperture_data = [
# (Input Files, Associated Bias, Associated Flats, Associated Arc)
(["N20180112S0209.fits"], [], [], ["N20180112S0353.fits"]),
([f"S20190103S{i:04d}.fits" for i in range(138, 141)], [], [],
["S20190103S0136.fits"]),
(["N20180521S0101.fits"],
[f"N20180521S{i:04d}.fits" for i in range(217, 222)],
["N20180521S0100.fits", "N20180521S0102.fits"], ["N20180521S0185.fits"]),
]
HEMI = 'NS'
CCD = ('EEV', 'e2v', 'Ham')
@pytest.mark.parametrize('hemi, ccd', list(itertools.product(HEMI, CCD)))
def test_mosaic_detectors_gmos_binning(astrofaker, hemi, ccd):
"""
Tests that the spacing between amplifier centres for NxN binned data
is precisely N times smaller than for unbinned data when run through
mosaicDetectors()
"""
for binning in (1, 2, 4):
try:
ad = astrofaker.create('GMOS-{}'.format(hemi), ['IMAGE', ccd])
except ValueError: # No e2v for GMOS-S
pytest.skip()
ad.init_default_extensions(binning=binning, overscan=False)
for ext in ad:
shape = ext.data.shape
ext.add_star(amplitude=10000, x=0.5 * (shape[1] - 1),
y=0.5 * (shape[0] - 1), fwhm=0.5 * binning)
p = GMOSImage([ad])
ad = p.mosaicDetectors([ad])[0]
ad = p.detectSources([ad])[0]
x = np.array(sorted(ad[0].OBJCAT['X_IMAGE']))
if binning == 1:
unbinned_positions = x
else:
diffs = np.diff(unbinned_positions) - binning * np.diff(x)
assert np.max(abs(diffs)) < 0.01
def test_mosaic_detectors_raises_warning_with_different_gains(astrofaker, caplog):
ad = astrofaker.create('GMOS-N', ['IMAGE'])
ad.init_default_extensions(overscan=False)
p = GMOSImage([ad])
p.mosaicDetectors()
assert sum(["have different gains" in rec.msg for rec in caplog.records]) == 1
def test_tile_arrays_raises_warning_with_different_gains(astrofaker, caplog):
ad = astrofaker.create('GMOS-N', ['IMAGE'])
ad.init_default_extensions(overscan=False)
p = GMOSImage([ad])
p.tileArrays(tile_all=True)
assert sum(["have different gains" in rec.msg for rec in caplog.records]) == 1
caplog.clear()
p = GMOSImage([ad])
p.tileArrays(tile_all=False)
assert sum(["have different gains" in rec.msg for rec in caplog.records]) == 3
caplog.clear()
p = GMOSImage([ad])
p.prepare() # should set gain=1
p.ADUToElectrons()
p.tileArrays(tile_all=False)
assert sum(["have different gains" in rec.msg for rec in caplog.records]) == 0
def test_tile_arrays_does_not_raise_different_gain_warning_from_display(astrofaker, caplog):
ad = astrofaker.create('GMOS-N', ['IMAGE'])
ad.init_default_extensions(overscan=False)
p = GMOSImage([ad])
p.display()
assert sum(["have different gains" in rec.msg for rec in caplog.records]) == 0
def test_tile_arrays_creates_average_read_noise(astrofaker):
ad = astrofaker.create('GMOS-N', ['IMAGE'])
ad.init_default_extensions(overscan=False)
p = GMOSImage([ad])
p.prepare()
rn = ad.read_noise()
ad = p.tileArrays(tile_all=True).pop()
assert ad.read_noise()[0] == np.mean(rn)
@pytest.mark.preprocessed_data
@pytest.mark.parametrize("input_ads", single_aperture_data, indirect=True)
@pytest.mark.usefixtures("check_adcc")
def test_plot_spectra_for_qa(input_ads):
for i, ad in enumerate(input_ads):
# Plot single frame
p = primitives_visualize.Visualize([])
p.plotSpectraForQA(adinputs=[ad])
# Gives some time to page refresh
time.sleep(10)
# Plot Stack
if i >= 1:
print('Reducing stack')
stack_ad = GMOSImage([]).stackFrames(adinputs=input_ads[:i + 1])[0]
p.plotSpectraForQA(adinputs=[stack_ad])
# Gives some time to page refresh
time.sleep(10)
# -- Fixtures -----------------------------------------------------------------
@pytest.fixture(scope='module')
def check_adcc():
try:
_ = requests.get(url="http://localhost:8777/rqsite.json")
print("ADCC is up and running!")
except requests.exceptions.ConnectionError:
pytest.skip("ADCC is not running.")
@pytest.fixture(scope='module')
def input_ads(path_to_inputs, request):
basenames = request.param[0]
input_fnames = [b.replace('.fits', '_linearized.fits') for b in basenames]
input_paths = [os.path.join(path_to_inputs, f) for f in input_fnames]
input_data_list = []
for p in input_paths:
if os.path.exists(p):
input_data_list.append(astrodata.open(p))
else:
raise FileNotFoundError(p)
return input_data_list
# -- Input creation functions -------------------------------------------------
def create_inputs():
"""
Create inputs for `test_plot_spectra_for_qa_single_frame`.
The raw files will be downloaded and saved inside the path stored in the
`$DRAGONS_TEST/raw_inputs` directory. Processed files will be stored inside
a new folder called "dragons_test_inputs". The sub-directory structure
should reflect the one returned by the `path_to_inputs` fixture.
"""
import glob
import os
from geminidr.gmos.primitives_gmos_longslit import GMOSLongslit
from gempy.utils import logutils
from recipe_system.reduction.coreReduce import Reduce
from recipe_system.utils.reduce_utils import normalize_ucals
cwd = os.getcwd()
path = f"./dragons_test_inputs/geminidr/core/{__file__.split('.')[0]}/"
os.makedirs(path, exist_ok=True)
os.chdir(path)
os.makedirs("inputs/", exist_ok=True)
for raw_list, bias_list, quartz_list, arc_list in single_aperture_data:
if all([os.path.exists(f"inputs/{s.split('.')[0]}_extracted.fits")
for s in raw_list]):
print("Skipping already created input.")
continue
raw_paths = [download_from_archive(f) for f in raw_list]
bias_paths = [download_from_archive(f) for f in bias_list]
quartz_paths = [download_from_archive(f) for f in quartz_list]
arc_paths = [download_from_archive(f) for f in arc_list]
cals = []
raw_ads = [astrodata.open(p) for p in raw_paths]
data_label = raw_ads[0].data_label()
print('Current working directory:\n {:s}'.format(os.getcwd()))
if len(bias_paths):
logutils.config(file_name='log_bias_{}.txt'.format(data_label))
r = Reduce()
r.files.extend(bias_paths)
r.runr()
master_bias = r.output_filenames.pop()
cals.append(f"processed_bias:{master_bias}")
del r
else:
master_bias = None
if len(quartz_paths):
logutils.config(file_name='log_quartz_{}.txt'.format(data_label))
r = Reduce()
r.files.extend(quartz_paths)
r.ucals = normalize_ucals(cals)
r.runr()
master_quartz = r.output_filenames.pop()
cals.append(f"processed_flat:{master_quartz}")
del r
else:
master_quartz = None
logutils.config(file_name='log_arc_{}.txt'.format(data_label))
r = Reduce()
r.files.extend(arc_paths)
r.ucals = normalize_ucals(cals)
r.runr()
master_arc = r.output_filenames.pop()
do_cal_bias = 'skip' if master_bias is None else 'procmode'
do_cal_flat = 'skip' if master_quartz is None else 'procmode'
logutils.config(file_name='log_{}.txt'.format(data_label))
p = GMOSLongslit(raw_ads)
p.prepare()
p.addDQ(static_bpm=None)
p.addVAR(read_noise=True)
p.overscanCorrect()
p.biasCorrect(do_cal=do_cal_bias, bias=master_bias)
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.flatCorrect(do_cal=do_cal_flat, flat=master_quartz)
p.QECorrect(arc=master_arc)
p.distortionCorrect(arc=master_arc)
p.findApertures(max_apertures=3)
p.skyCorrectFromSlit()
p.traceApertures()
p.extractSpectra()
p.linearizeSpectra()
[os.remove(s) for s in glob.glob("*_arc.fits")]
[os.remove(s) for s in glob.glob("*_bias.fits")]
[os.remove(s) for s in glob.glob("*_flat.fits")]
[os.remove(s) for s in glob.glob("*_mosaic.fits")]
os.chdir("inputs/")
print("\n\n Writing processed files for tests into:\n"
" {:s}\n\n".format(os.getcwd()))
_ = p.writeOutputs()
os.chdir("../")
os.chdir(cwd)
if __name__ == "__main__":
import sys
if '--create-inputs' in sys.argv[1:]:
create_inputs()
else:
pytest.main()
| [
"os.remove",
"pytest.main",
"numpy.mean",
"glob.glob",
"pytest.mark.parametrize",
"geminidr.core.primitives_visualize.Visualize",
"os.path.join",
"os.chdir",
"astrodata.testing.download_from_archive",
"astrodata.open",
"os.path.exists",
"requests.get",
"itertools.product",
"recipe_system.r... | [((3545, 3618), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_ads"""', 'single_aperture_data'], {'indirect': '(True)'}), "('input_ads', single_aperture_data, indirect=True)\n", (3568, 3618), False, 'import pytest\n'), ((3620, 3657), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""check_adcc"""'], {}), "('check_adcc')\n", (3643, 3657), False, 'import pytest\n'), ((4280, 4310), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (4294, 4310), False, 'import pytest\n'), ((4540, 4570), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (4554, 4570), False, 'import pytest\n'), ((2072, 2087), 'geminidr.gmos.primitives_gmos_image.GMOSImage', 'GMOSImage', (['[ad]'], {}), '([ad])\n', (2081, 2087), False, 'from geminidr.gmos.primitives_gmos_image import GMOSImage\n'), ((2378, 2393), 'geminidr.gmos.primitives_gmos_image.GMOSImage', 'GMOSImage', (['[ad]'], {}), '([ad])\n', (2387, 2393), False, 'from geminidr.gmos.primitives_gmos_image import GMOSImage\n'), ((2536, 2551), 'geminidr.gmos.primitives_gmos_image.GMOSImage', 'GMOSImage', (['[ad]'], {}), '([ad])\n', (2545, 2551), False, 'from geminidr.gmos.primitives_gmos_image import GMOSImage\n'), ((2695, 2710), 'geminidr.gmos.primitives_gmos_image.GMOSImage', 'GMOSImage', (['[ad]'], {}), '([ad])\n', (2704, 2710), False, 'from geminidr.gmos.primitives_gmos_image import GMOSImage\n'), ((3085, 3100), 'geminidr.gmos.primitives_gmos_image.GMOSImage', 'GMOSImage', (['[ad]'], {}), '([ad])\n', (3094, 3100), False, 'from geminidr.gmos.primitives_gmos_image import GMOSImage\n'), ((3366, 3381), 'geminidr.gmos.primitives_gmos_image.GMOSImage', 'GMOSImage', (['[ad]'], {}), '([ad])\n', (3375, 3381), False, 'from geminidr.gmos.primitives_gmos_image import GMOSImage\n'), ((5767, 5778), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5776, 5778), False, 'import os\n'), ((5859, 5891), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (5870, 5891), False, 'import os\n'), ((5896, 5910), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (5904, 5910), False, 'import os\n'), ((5916, 5953), 'os.makedirs', 'os.makedirs', (['"""inputs/"""'], {'exist_ok': '(True)'}), "('inputs/', exist_ok=True)\n", (5927, 5953), False, 'import os\n'), ((8901, 8914), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (8909, 8914), False, 'import os\n'), ((1546, 1561), 'geminidr.gmos.primitives_gmos_image.GMOSImage', 'GMOSImage', (['[ad]'], {}), '([ad])\n', (1555, 1561), False, 'from geminidr.gmos.primitives_gmos_image import GMOSImage\n'), ((802, 830), 'itertools.product', 'itertools.product', (['HEMI', 'CCD'], {}), '(HEMI, CCD)\n', (819, 830), False, 'import itertools\n'), ((3499, 3510), 'numpy.mean', 'np.mean', (['rn'], {}), '(rn)\n', (3506, 3510), True, 'import numpy as np\n'), ((3779, 3813), 'geminidr.core.primitives_visualize.Visualize', 'primitives_visualize.Visualize', (['[]'], {}), '([])\n', (3809, 3813), False, 'from geminidr.core import primitives_visualize\n'), ((3907, 3921), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3917, 3921), False, 'import time\n'), ((4182, 4196), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (4192, 4196), False, 'import time\n'), ((4350, 4403), 'requests.get', 'requests.get', ([], {'url': '"""http://localhost:8777/rqsite.json"""'}), "(url='http://localhost:8777/rqsite.json')\n", (4362, 4403), False, 'import requests\n'), ((4742, 4773), 'os.path.join', 'os.path.join', (['path_to_inputs', 'f'], {}), '(path_to_inputs, f)\n', (4754, 4773), False, 'import os\n'), ((4860, 4877), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (4874, 4877), False, 'import os\n'), ((7544, 7552), 'recipe_system.reduction.coreReduce.Reduce', 'Reduce', ([], {}), '()\n', (7550, 7552), False, 'from recipe_system.reduction.coreReduce import Reduce\n'), ((7605, 7626), 'recipe_system.utils.reduce_utils.normalize_ucals', 'normalize_ucals', (['cals'], {}), '(cals)\n', (7620, 7626), False, 'from recipe_system.utils.reduce_utils import normalize_ucals\n'), ((7910, 7931), 'geminidr.gmos.primitives_gmos_longslit.GMOSLongslit', 'GMOSLongslit', (['raw_ads'], {}), '(raw_ads)\n', (7922, 7931), False, 'from geminidr.gmos.primitives_gmos_longslit import GMOSLongslit\n'), ((8707, 8726), 'os.chdir', 'os.chdir', (['"""inputs/"""'], {}), "('inputs/')\n", (8715, 8726), False, 'import os\n'), ((8880, 8895), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (8888, 8895), False, 'import os\n'), ((9044, 9057), 'pytest.main', 'pytest.main', ([], {}), '()\n', (9055, 9057), False, 'import pytest\n'), ((4501, 4536), 'pytest.skip', 'pytest.skip', (['"""ADCC is not running."""'], {}), "('ADCC is not running.')\n", (4512, 4536), False, 'import pytest\n'), ((6240, 6264), 'astrodata.testing.download_from_archive', 'download_from_archive', (['f'], {}), '(f)\n', (6261, 6264), False, 'from astrodata.testing import download_from_archive\n'), ((6306, 6330), 'astrodata.testing.download_from_archive', 'download_from_archive', (['f'], {}), '(f)\n', (6327, 6330), False, 'from astrodata.testing import download_from_archive\n'), ((6375, 6399), 'astrodata.testing.download_from_archive', 'download_from_archive', (['f'], {}), '(f)\n', (6396, 6399), False, 'from astrodata.testing import download_from_archive\n'), ((6443, 6467), 'astrodata.testing.download_from_archive', 'download_from_archive', (['f'], {}), '(f)\n', (6464, 6467), False, 'from astrodata.testing import download_from_archive\n'), ((6525, 6542), 'astrodata.open', 'astrodata.open', (['p'], {}), '(p)\n', (6539, 6542), False, 'import astrodata\n'), ((6803, 6811), 'recipe_system.reduction.coreReduce.Reduce', 'Reduce', ([], {}), '()\n', (6809, 6811), False, 'from recipe_system.reduction.coreReduce import Reduce\n'), ((7168, 7176), 'recipe_system.reduction.coreReduce.Reduce', 'Reduce', ([], {}), '()\n', (7174, 7176), False, 'from recipe_system.reduction.coreReduce import Reduce\n'), ((7240, 7261), 'recipe_system.utils.reduce_utils.normalize_ucals', 'normalize_ucals', (['cals'], {}), '(cals)\n', (7255, 7261), False, 'from recipe_system.utils.reduce_utils import normalize_ucals\n'), ((8478, 8490), 'os.remove', 'os.remove', (['s'], {}), '(s)\n', (8487, 8490), False, 'import os\n'), ((8534, 8546), 'os.remove', 'os.remove', (['s'], {}), '(s)\n', (8543, 8546), False, 'import os\n'), ((8591, 8603), 'os.remove', 'os.remove', (['s'], {}), '(s)\n', (8600, 8603), False, 'import os\n'), ((8648, 8660), 'os.remove', 'os.remove', (['s'], {}), '(s)\n', (8657, 8660), False, 'import os\n'), ((1258, 1271), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (1269, 1271), False, 'import pytest\n'), ((1788, 1815), 'numpy.diff', 'np.diff', (['unbinned_positions'], {}), '(unbinned_positions)\n', (1795, 1815), True, 'import numpy as np\n'), ((4914, 4931), 'astrodata.open', 'astrodata.open', (['p'], {}), '(p)\n', (4928, 4931), False, 'import astrodata\n'), ((6668, 6679), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6677, 6679), False, 'import os\n'), ((8500, 8523), 'glob.glob', 'glob.glob', (['"""*_arc.fits"""'], {}), "('*_arc.fits')\n", (8509, 8523), False, 'import glob\n'), ((8556, 8580), 'glob.glob', 'glob.glob', (['"""*_bias.fits"""'], {}), "('*_bias.fits')\n", (8565, 8580), False, 'import glob\n'), ((8613, 8637), 'glob.glob', 'glob.glob', (['"""*_flat.fits"""'], {}), "('*_flat.fits')\n", (8622, 8637), False, 'import glob\n'), ((8670, 8696), 'glob.glob', 'glob.glob', (['"""*_mosaic.fits"""'], {}), "('*_mosaic.fits')\n", (8679, 8696), False, 'import glob\n'), ((8829, 8840), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8838, 8840), False, 'import os\n'), ((1828, 1838), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (1835, 1838), True, 'import numpy as np\n'), ((4022, 4035), 'geminidr.gmos.primitives_gmos_image.GMOSImage', 'GMOSImage', (['[]'], {}), '([])\n', (4031, 4035), False, 'from geminidr.gmos.primitives_gmos_image import GMOSImage\n')] |
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import omni.kit.test
import omni.kit
import asyncio
import carb
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
from omni.isaac.samples.scripts.rmp_sample.sample import RMPSample
from .common import simulate
from pxr import Gf
import omni.physx as _physx
class TestRMPSample(omni.kit.test.AsyncTestCaseFailOnLogError):
# Before running each test
async def setUp(self):
self._sample = RMPSample()
self._timeline = omni.timeline.get_timeline_interface()
self._physx_subs = _physx.get_physx_interface().subscribe_physics_step_events(self._sample.step)
self._physics_rate = 60
carb.settings.get_settings().set_int("/app/runLoops/main/rateLimitFrequency", int(self._physics_rate))
carb.settings.get_settings().set_bool("/app/runLoops/main/rateLimitEnabled", True)
carb.settings.get_settings().set_int("/persistent/simulation/minFrameRate", int(self._physics_rate))
await omni.usd.get_context().new_stage_async()
await omni.kit.app.get_app().next_update_async()
pass
# After running each test
async def tearDown(self):
# In some cases the test will end before the asset is loaded, in this case wait for assets to load
while omni.usd.get_context().get_stage_loading_status()[2] > 0:
print("tearDown, assets still loading, waiting to finish...")
await asyncio.sleep(1.0)
await omni.kit.app.get_app().next_update_async()
self._sample = None
self._physx_subs = None
await omni.kit.app.get_app().next_update_async()
pass
# basic test, should not crash or error if we call all functions
async def test_no_simulation(self):
self._sample.create_robot()
self._sample.follow_target()
self._sample.has_arrived()
self._sample.gripper_state()
self._sample.add_obstacle()
self._sample.toggle_obstacle()
self._sample.toggle_gripper()
self._sample.get_states()
self._sample.reset()
pass
# enable following target, check that we reached it
async def test_follow(self):
self._sample.create_robot()
await omni.kit.app.get_app().next_update_async()
self._timeline.play()
await simulate(1)
self._sample.follow_target()
await simulate(0.1)
self.assertEqual(self._sample.has_arrived(), False) # not enough time passed for it to reach target
await simulate(2)
self.assertEqual(self._sample.has_arrived(), True)
self._timeline.stop()
await omni.kit.app.get_app().next_update_async()
pass
# enable following target, check that we reached it
async def test_gripper(self):
self._sample.create_robot()
await omni.kit.app.get_app().next_update_async()
self._timeline.play()
await simulate(1)
left, right = self._sample.gripper_state()
self.assertAlmostEqual(left, 0.0, delta=0.1)
self.assertAlmostEqual(right, 0.0, delta=0.1)
self._sample.toggle_gripper()
await simulate(2)
left, right = self._sample.gripper_state()
self.assertAlmostEqual(left, 4.0, delta=0.1)
self.assertAlmostEqual(right, 4.0, delta=0.1)
self._sample.toggle_gripper()
await simulate(2)
left, right = self._sample.gripper_state()
self.assertAlmostEqual(left, 0.0, delta=0.1)
self.assertAlmostEqual(right, 0.0, delta=0.1)
self._timeline.stop()
await omni.kit.app.get_app().next_update_async()
pass
async def test_obstacle(self):
self._sample.create_robot()
await omni.kit.app.get_app().next_update_async()
self._timeline.play()
self._sample.follow_target()
await simulate(1)
self._sample.add_obstacle()
# move target to location just above cube, we should not be able to reach
self._sample.move_target(Gf.Vec3d(30.0, -20.0, 12))
await simulate(3)
self.assertEqual(self._sample.has_arrived(), False)
# toggle, we should be able to reach
self._sample.toggle_obstacle()
await simulate(3)
self.assertEqual(self._sample.has_arrived(), True)
# toggle, we should not be able to reach
self._sample.toggle_obstacle()
await simulate(3)
self.assertEqual(self._sample.has_arrived(), False)
# toggle, we should be able to reach
self._sample.toggle_obstacle()
await simulate(3)
self.assertEqual(self._sample.has_arrived(), True)
# move target to above clear spot, we should be able to reach
self._sample.move_target(Gf.Vec3d(30.0, 30.0, 20))
await simulate(4)
self.assertEqual(self._sample.has_arrived(), True)
# move target to inside ground, we should not reach
self._sample.move_target(Gf.Vec3d(30.0, 30.0, 0))
await simulate(4)
self.assertEqual(self._sample.has_arrived(), False)
self._timeline.stop()
await omni.kit.app.get_app().next_update_async()
async def test_data_collection(self):
self._sample.create_robot()
await omni.kit.app.get_app().next_update_async()
self._timeline.play()
self._sample.follow_target()
await simulate(4)
self._sample.reset_action_state_dict()
print("Collect data")
self._sample.collect_action_state()
state_action_dict = self._sample.get_action_state_dict()
import numpy as np
print("Checking collected data")
np.testing.assert_almost_equal(
state_action_dict["joint command"][0],
np.array([-0.00882683, -0.78860676, 0.00875621, -2.84749961, 0.00704176, 2.05903769, 0.77942944, 0.0, 0.0]),
decimal=3,
)
np.testing.assert_almost_equal(
state_action_dict["joint state"][0],
np.array([-8.8267e-03, -7.8861e-01, 8.75626e-03, -2.8475, 7.04182e-03, 2.0590, 7.7940e-01, 0.0, 0.0]),
decimal=3,
)
self._timeline.stop()
await omni.kit.app.get_app().next_update_async()
# Run all functions with simulation enabled
async def test_simulation(self):
self._sample.create_robot()
await omni.kit.app.get_app().next_update_async()
self._timeline.play()
await simulate(1)
self._sample.follow_target()
await simulate(1)
self._sample.add_obstacle()
await simulate(1)
self._sample.toggle_obstacle()
await simulate(1)
self._sample.toggle_gripper()
await simulate(1)
self._sample.get_states()
self._sample.gripper_state()
self._sample.has_arrived()
await simulate(1)
self._sample.reset()
await simulate(1)
self._sample.stop_tasks()
await simulate(1)
self._timeline.stop()
await omni.kit.app.get_app().next_update_async()
pass
| [
"omni.isaac.samples.scripts.rmp_sample.sample.RMPSample",
"pxr.Gf.Vec3d",
"asyncio.sleep",
"carb.settings.get_settings",
"omni.physx.get_physx_interface",
"numpy.array"
] | [((1121, 1132), 'omni.isaac.samples.scripts.rmp_sample.sample.RMPSample', 'RMPSample', ([], {}), '()\n', (1130, 1132), False, 'from omni.isaac.samples.scripts.rmp_sample.sample import RMPSample\n'), ((4666, 4691), 'pxr.Gf.Vec3d', 'Gf.Vec3d', (['(30.0)', '(-20.0)', '(12)'], {}), '(30.0, -20.0, 12)\n', (4674, 4691), False, 'from pxr import Gf\n'), ((5394, 5418), 'pxr.Gf.Vec3d', 'Gf.Vec3d', (['(30.0)', '(30.0)', '(20)'], {}), '(30.0, 30.0, 20)\n', (5402, 5418), False, 'from pxr import Gf\n'), ((5598, 5621), 'pxr.Gf.Vec3d', 'Gf.Vec3d', (['(30.0)', '(30.0)', '(0)'], {}), '(30.0, 30.0, 0)\n', (5606, 5621), False, 'from pxr import Gf\n'), ((6384, 6496), 'numpy.array', 'np.array', (['[-0.00882683, -0.78860676, 0.00875621, -2.84749961, 0.00704176, 2.05903769,\n 0.77942944, 0.0, 0.0]'], {}), '([-0.00882683, -0.78860676, 0.00875621, -2.84749961, 0.00704176, \n 2.05903769, 0.77942944, 0.0, 0.0])\n', (6392, 6496), True, 'import numpy as np\n'), ((6627, 6722), 'numpy.array', 'np.array', (['[-0.0088267, -0.78861, 0.00875626, -2.8475, 0.00704182, 2.059, 0.7794, 0.0, 0.0\n ]'], {}), '([-0.0088267, -0.78861, 0.00875626, -2.8475, 0.00704182, 2.059, \n 0.7794, 0.0, 0.0])\n', (6635, 6722), True, 'import numpy as np\n'), ((1224, 1252), 'omni.physx.get_physx_interface', '_physx.get_physx_interface', ([], {}), '()\n', (1250, 1252), True, 'import omni.physx as _physx\n'), ((1343, 1371), 'carb.settings.get_settings', 'carb.settings.get_settings', ([], {}), '()\n', (1369, 1371), False, 'import carb\n'), ((1454, 1482), 'carb.settings.get_settings', 'carb.settings.get_settings', ([], {}), '()\n', (1480, 1482), False, 'import carb\n'), ((1545, 1573), 'carb.settings.get_settings', 'carb.settings.get_settings', ([], {}), '()\n', (1571, 1573), False, 'import carb\n'), ((2103, 2121), 'asyncio.sleep', 'asyncio.sleep', (['(1.0)'], {}), '(1.0)\n', (2116, 2121), False, 'import asyncio\n')] |
import inspect
from typing import Any
import numpy as np
import pandas as pd
from aistac.handlers.abstract_handlers import HandlerFactory
from ds_discovery.intent.abstract_common_intent import AbstractCommonsIntentModel
from ds_discovery.managers.feature_catalog_property_manager import FeatureCatalogPropertyManager
from ds_discovery.components.commons import Commons
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.discovery import DataDiscovery
__author__ = '<NAME>'
class FeatureCatalogIntentModel(AbstractCommonsIntentModel):
"""A set of methods to help build features as pandas.Dataframe"""
def __init__(self, property_manager: FeatureCatalogPropertyManager, default_save_intent: bool=None,
default_intent_level: [str, int, float]=None, order_next_available: bool=None,
default_replace_intent: bool=None):
"""initialisation of the Intent class.
:param property_manager: the property manager class that references the intent contract.
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param order_next_available: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
"""
default_save_intent = default_save_intent if isinstance(default_save_intent, bool) else True
default_replace_intent = default_replace_intent if isinstance(default_replace_intent, bool) else True
default_intent_level = default_intent_level if isinstance(default_intent_level, (str, int, float)) else 'base'
default_intent_order = -1 if isinstance(order_next_available, bool) and order_next_available else 0
intent_param_exclude = []
intent_type_additions = [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64,
pd.Timestamp]
super().__init__(property_manager=property_manager, default_save_intent=default_save_intent,
intent_param_exclude=intent_param_exclude, default_intent_level=default_intent_level,
default_intent_order=default_intent_order, default_replace_intent=default_replace_intent,
intent_type_additions=intent_type_additions)
def run_intent_pipeline(self, canonical: Any, feature_name: [int, str], train_size: [float, int]=None,
seed: int=None, shuffle: bool=None, **kwargs) -> [pd.DataFrame, pd.Series]:
""" Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract.
It is expected that all intent methods have the 'canonical' as the first parameter of the method signature
and will contain 'save_intent' as parameters. It is also assumed that all features have a feature contract to
save the feature outcome to
:param canonical: this is the iterative value all intent are applied to and returned.
:param feature_name: feature to run
:param train_size: (optional) If float, should be between 0.0 and 1.0 and represent the proportion of the
dataset to include in the train split. If int, represents the absolute number of train
samples. If None, then not used
:param seed: (optional) if shuffle is True a seed value for the choice
:param shuffle: (optional) Whether or not to shuffle the data before splitting or just split on train size.
:param kwargs: additional kwargs to add to the parameterised intent, these will replace any that already exist
:return
"""
# test if there is any intent to run
if self._pm.has_intent(level=feature_name):
canonical = self._get_canonical(canonical)
if isinstance(train_size, (float, int)):
canonical = self.canonical_sampler(canonical, sample_size=train_size, shuffle=shuffle, seed=seed)
# run the feature
level_key = self._pm.join(self._pm.KEY.intent_key, feature_name)
df_feature = None
for order in sorted(self._pm.get(level_key, {})):
for method, params in self._pm.get(self._pm.join(level_key, order), {}).items():
if method in self.__dir__():
if 'canonical' in params.keys():
df_feature = params.pop('canonical')
elif df_feature is None:
df_feature = canonical
# fail safe in case kwargs was sored as the reference
params.update(params.pop('kwargs', {}))
# add method kwargs to the params
if isinstance(kwargs, dict):
params.update(kwargs)
# remove the creator param
_ = params.pop('intent_creator', 'Unknown')
# add excluded params and set to False
params.update({'save_intent': False})
df_feature = eval(f"self.{method}(df_feature, **{params})", globals(), locals())
if df_feature is None:
raise ValueError(f"The feature '{feature_name}' pipeline did not run. ")
return df_feature
raise ValueError(f"The feature '{feature_name}, can't be found in the feature catalog")
def apply_date_diff(self, canonical: Any, key: [str, list], first_date: str, second_date: str,
aggregator: str=None, units: str=None, precision: int=None, rtn_columns: list=None,
regex: bool=None, rename: str=None, unindex: bool=None, save_intent: bool=None,
feature_name: [int, str]=None, intent_order: int=None, replace_intent: bool=None,
remove_duplicates: bool=None) -> pd.DataFrame:
""" adds a column for the difference between a primary and secondary date where the primary is an early date
than the secondary.
:param canonical: the DataFrame containing the column headers
:param key: the key label to group by and index on
:param first_date: the primary or older date field
:param second_date: the secondary or newer date field
:param aggregator: (optional) the aggregator as a function of Pandas DataFrame 'groupby'
:param units: (optional) The Timedelta units e.g. 'D', 'W', 'M', 'Y'. default is 'D'
:param precision: the precision of the result
:param rtn_columns: (optional) return columns, the header must be listed to be included.
If None then header
if 'all' then all original headers
:param regex: if True then treat the rtn_columns as a regular expression
:param rename: a new name for the column, else primary and secondary name used
:param unindex: (optional) if the passed canonical should be un-index before processing
:param save_intent: (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: the DataFrame with the extra column
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# Code block for intent
if second_date not in canonical.columns:
raise ValueError(f"The column header '{second_date}' is not in the canonical DataFrame")
if first_date not in canonical.columns:
raise ValueError(f"The column header '{first_date}' is not in the canonical DataFrame")
canonical = self._get_canonical(canonical)
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
rename = rename if isinstance(rename, str) else f'{second_date}-{first_date}'
if rtn_columns == 'all':
rtn_columns = Commons.filter_headers(canonical, headers=key + [rename], drop=True)
if isinstance(regex, bool) and regex:
rtn_columns = Commons.filter_headers(canonical, regex=rtn_columns)
rtn_columns = Commons.list_formatter(rtn_columns) if isinstance(rtn_columns, list) else [rename]
precision = precision if isinstance(precision, int) else 0
units = units if isinstance(units, str) else 'D'
selected = canonical[[first_date, second_date]].dropna(axis='index', how='any')
canonical[rename] = (selected[second_date].sub(selected[first_date], axis=0) / np.timedelta64(1, units))
canonical[rename] = [np.round(v, precision) for v in canonical[rename]]
return Commons.filter_columns(canonical, headers=list(set(key + [rename] + rtn_columns))).set_index(key)
def select_feature(self, canonical: Any, key: [str, list], headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, drop_dup_index: str=None, rename: dict=None, unindex: bool=None,
save_intent: bool=None, feature_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None) -> pd.DataFrame:
""" used for feature attribution allowing columns to be selected directly from the canonical attributes
:param canonical: the Pandas.DataFrame to get the selection from
:param key: the key column to index on
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or excluse. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param drop_dup_index: if any duplicate index should be removed passing either 'First' or 'last'
:param rename: a dictionary of headers to rename
:param unindex: if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: selected list of headers indexed on key
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# Code block for intent
canonical = self._get_canonical(canonical)
drop = drop if isinstance(drop, bool) else False
exclude = exclude if isinstance(exclude, bool) else False
re_ignore_case = re_ignore_case if isinstance(re_ignore_case, bool) else False
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
filter_headers = Commons.filter_headers(df=canonical, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case)
filter_headers += self._pm.list_formatter(key)
df_rtn = Commons.filter_columns(canonical, headers=filter_headers)
if isinstance(drop_dup_index, str) and drop_dup_index.lower() in ['first', 'last']:
df_rtn = df_rtn.loc[~df_rtn.index.duplicated(keep=drop_dup_index)]
if isinstance(rename, dict):
df_rtn.rename(columns=rename, inplace=True)
return df_rtn.set_index(key)
def apply_merge(self, canonical: Any, merge_connector: str, key: [str, list], how: str=None,
on: str=None, left_on: str=None, right_on: str=None, left_index: bool=None, right_index: bool=None,
sort: bool=None, suffixes: tuple=None, indicator: bool=None, validate: str=None,
rtn_columns: list=None, regex: bool=None, unindex: bool=None, save_intent: bool=None,
feature_name: [int, str]=None, intent_order: int=None, replace_intent: bool=None,
remove_duplicates: bool=None):
""" merges the canonical with another canonical obtained from a connector contract
:param canonical: the canonical to merge on the left
:param merge_connector: the name of the Connector Contract to load to merge on the right
:param key: the key column to index on
:param how: (optional) One of 'left', 'right', 'outer', 'inner'. Defaults to inner. See below for more detailed
description of each method.
:param on: (optional) Column or index level names to join on. Must be found in both the left and right
DataFrame and/or Series objects. If not passed and left_index and right_index are False, the
intersection of the columns in the DataFrames and/or Series will be inferred to be the join keys
:param left_on: (optional) Columns or index levels from the left DataFrame or Series to use as keys. Can either
be column names, index level names, or arrays with length equal to the length of the DataFrame
or Series.
:param right_on: (optional) Columns or index levels from the right DataFrame or Series to use as keys. Can
either be column names, index level names, or arrays with length equal to the length of the
DataFrame or Series.
:param left_index: (optional) If True, use the index (row labels) from the left DataFrame or Series as its join
key(s). In the case of a DataFrame or Series with a MultiIndex (hierarchical), the number of levels
must match the number of join keys from the right DataFrame or Series.
:param right_index: (optional) Same usage as left_index for the right DataFrame or Series
:param sort: (optional) Sort the result DataFrame by the join keys in lexicographical order. Defaults to True,
setting to False will improve performance substantially in many cases.
:param suffixes: (optional) A tuple of string suffixes to apply to overlapping columns. Defaults ('', '_dup').
:param indicator: (optional) Add a column to the output DataFrame called _merge with information on the source
of each row. _merge is Categorical-type and takes on a value of left_only for observations whose
merge key only appears in 'left' DataFrame or Series, right_only for observations whose merge key
only appears in 'right' DataFrame or Series, and both if the observation’s merge key is found
in both.
:param validate: (optional) validate : string, default None. If specified, checks if merge is of specified type.
“one_to_one” or “1:1”: checks if merge keys are unique in both left and right datasets.
“one_to_many” or “1:m”: checks if merge keys are unique in left dataset.
“many_to_one” or “m:1”: checks if merge keys are unique in right dataset.
“many_to_many” or “m:m”: allowed, but does not result in checks.
:param rtn_columns: (optional) return columns, the header must be listed to be included. If None then header
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param unindex: (optional) if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return:
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# intend code block on the canonical
canonical = self._get_canonical(canonical)
how = how if isinstance(how, str) and how in ['left', 'right', 'outer', 'inner'] else 'inner'
left_index = left_index if isinstance(left_index, bool) else False
right_index = right_index if isinstance(right_index, bool) else False
sort = sort if isinstance(sort, bool) else True
indicator = indicator if isinstance(indicator, bool) else False
suffixes = suffixes if isinstance(suffixes, tuple) and len(suffixes) == 2 else ('', '_dup')
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
if not self._pm.has_connector(connector_name=merge_connector):
raise ValueError(f"The connector name '{merge_connector}' is not in the connectors catalog")
handler = self._pm.get_connector_handler(merge_connector)
other = handler.load_canonical()
if isinstance(other, dict):
other = pd.DataFrame.from_dict(data=other)
df = pd.merge(left=canonical, right=other, how=how, on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator,
validate=validate)
if isinstance(regex, bool) and regex:
rtn_columns = Commons.filter_headers(df, regex=rtn_columns)
rtn_columns = rtn_columns if isinstance(rtn_columns, list) else df.columns.to_list()
return Commons.filter_columns(df, headers=list(set(key + rtn_columns))).set_index(key)
def apply_map(self, canonical: Any, key: [str, list], header: str, value_map: dict,
default_to: Any=None, replace_na: bool=None, rtn_columns: list=None, regex: bool=None,
rename: str=None, unindex: bool=None, save_intent: bool=None, feature_name: [int, str]=None,
intent_order: int=None, replace_intent: bool=None, remove_duplicates: bool=None) -> pd.DataFrame:
""" Apply mapping and filtering based on a key value pair of find and replace values
:param canonical: the value to apply the substitution to
:param key: the key column to index on
:param header: the column header name to apply the value map too
:param value_map: a dictionary of keys and their replace value
:param default_to: (optional) a default value if no map if found. If None then NaN
:param replace_na: (optional) if existing NaN values should be replaced with default_value. if None then True
:param rtn_columns: (optional) return columns, the header must be listed to be included.
If None then header
if 'all' then all original headers
:param regex: if True then treat the rtn_columns as a regular expression
:param rename: a new name for the column, else current column header
:param unindex: (optional) if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: the amended value
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# intend code block on the canonical
if header not in canonical.columns:
raise ValueError(f"The column header '{header}' is not in the canonical DataFrame")
canonical = self._get_canonical(canonical)
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
rename = rename if isinstance(rename, str) else header
if rtn_columns == 'all':
rtn_columns = Commons.filter_headers(canonical, headers=key, drop=True)
if isinstance(regex, bool) and regex:
rtn_columns = Commons.filter_headers(canonical, regex=rtn_columns)
rtn_columns = Commons.list_formatter(rtn_columns) if isinstance(rtn_columns, list) else [rename]
replace_na = replace_na if isinstance(replace_na, bool) else True
if default_to is not None:
value_map = Commons.dict_with_missing(value_map, default=default_to)
na_action = 'ignore' if replace_na else None
canonical[rename] = canonical[header].map(value_map, na_action=na_action)
canonical.dropna(subset=[rename], inplace=True)
return Commons.filter_columns(canonical, headers=list(set(key + rtn_columns))).set_index(key)
def apply_numeric_typing(self, canonical: Any, key: [str, list], header: str, normalise: bool=None,
precision: int=None, fillna: [int, float]=None, errors: str=None, rtn_columns: list=None,
rtn_regex: bool=None, unindex: bool=None, rename: str=None, save_intent: bool=None,
feature_name: [int, str]=None, intent_order: int=None, replace_intent: bool=None,
remove_duplicates: bool=None) -> pd.DataFrame:
""" converts columns to categories
:param canonical: the Pandas.DataFrame to get the column headers from
:param key: the key column to index on
:param header: the header to apply typing to
:param normalise: if the resulting column should be normalised
:param precision: how many decimal places to set the return values.
if None then precision is based on the most decimal places of all data points
if 0 (zero) the int is assumed
:param fillna: { num_value, 'mean', 'mode', 'median' }. Default to np.nan
- If num_value, then replaces NaN with this number value. Must be a value not a string
- If 'mean', then replaces NaN with the mean of the column
- If 'mode', then replaces NaN with a mode of the column. random sample if more than 1
- If 'median', then replaces NaN with the median of the column
:param errors : {'ignore', 'raise', 'coerce'}, default 'coerce'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaN
- If 'ignore', then invalid parsing will return the input
:param rtn_columns: (optional) return columns, the header must be listed to be included.
If None then header
if 'all' then all original headers
:param rtn_regex: if True then treat the rtn_columns as a regular expression
:param rename: a dictionary of headers to rename
:param unindex: if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: selected list of headers indexed on key
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# Code block for intent
canonical = self._get_canonical(canonical)
if header not in canonical.columns:
raise ValueError(f"The column header '{header}' is not in the canonical DataFrame")
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
rename = rename if isinstance(rename, str) else header
if rtn_columns == 'all':
rtn_columns = Commons.filter_headers(canonical, headers=key, drop=True)
if isinstance(rtn_regex, bool) and rtn_regex:
rtn_columns = Commons.filter_headers(canonical, regex=rtn_columns)
rtn_columns = Commons.list_formatter(rtn_columns) if isinstance(rtn_columns, list) else [rename]
if canonical[header].dtype.name.startswith('int') and not isinstance(precision, int):
precision = 0
if not isinstance(fillna, (int, float)) and isinstance(precision, int) and precision == 0:
fillna = 0
module = HandlerFactory.get_module(module_name='ds_discovery')
canonical = module.Transition.scratch_pad().to_numeric_type(df=canonical, headers=header, precision=precision,
fillna=fillna, errors=errors, inplace=False)
if isinstance(normalise, bool) and normalise:
s_column = canonical[rename]
s_column /= np.linalg.norm(s_column)
if isinstance(precision, int):
s_column = np.round(s_column, precision)
canonical[rename] = s_column
return Commons.filter_columns(canonical, headers=list(set(key + rtn_columns))).set_index(key)
def apply_category_typing(self, canonical: Any, key: [str, list], header: str, as_num: bool=None,
rtn_columns: list=None, rtn_regex: bool=None, unindex: bool=None, rename: str=None,
save_intent: bool=None, feature_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None) -> pd.DataFrame:
""" converts columns to categories
:param canonical: the Pandas.DataFrame to get the column headers from
:param key: the key column to index on
:param header: the header to apply typing to
:param as_num: if true returns the category as a category code
:param rtn_columns: (optional) return columns, the header must be listed to be included.
If None then header
if 'all' then all original headers
:param rtn_regex: if True then treat the rtn_columns as a regular expression
:param rename: a dictionary of headers to rename
:param unindex: if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: selected list of headers indexed on key
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# Code block for intent
canonical = self._get_canonical(canonical)
if header not in canonical.columns:
raise ValueError(f"The column header '{header}' is not in the canonical DataFrame")
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
rename = rename if isinstance(rename, str) else header
if rtn_columns == 'all':
rtn_columns = Commons.filter_headers(canonical, headers=key, drop=True)
if isinstance(rtn_regex, bool) and rtn_regex:
rtn_columns = Commons.filter_headers(canonical, regex=rtn_columns)
rtn_columns = Commons.list_formatter(rtn_columns) if isinstance(rtn_columns, list) else [rename]
module = HandlerFactory.get_module(module_name='ds_discovery')
canonical = module.Transition.scratch_pad().to_category_type(df=canonical, headers=header, as_num=as_num,
inplace=False)
return Commons.filter_columns(canonical, headers=list(set(key + rtn_columns))).set_index(key)
def apply_replace(self, canonical: Any, key: [str, list], header: str, to_replace: dict,
regex: bool=None, rtn_columns: list=None, rtn_regex: bool=None, unindex: bool=None,
rename: str=None, save_intent: bool=None, feature_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None) -> pd.DataFrame:
""" Apply replacement based on a key value pair of find and replace values. if you wish to replace null values
or put in null values use the tag '$null' to represent None or np.nan
:param canonical: the value to apply the substitution to
:param key: the key column to index on
:param header: the column header name to apply the value map too
:param to_replace: a dictionary of keys and their replace value
:param regex: if the to_replace is regular expression
:param rtn_columns: (optional) return columns, the header must be listed to be included.
If None then header
if 'all' then all original headers
:param rtn_regex: if True then treat the rtn_columns as a regular expression
:param rename: a dictionary of headers to rename
:param unindex: if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: the amended value
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# intend code block on the canonical
canonical = self._get_canonical(canonical)
if header not in canonical.columns:
raise ValueError(f"The column header '{header}' is not in the canonical DataFrame")
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
rename = rename if isinstance(rename, str) else header
if rtn_columns == 'all':
rtn_columns = Commons.filter_headers(canonical, headers=key, drop=True)
if isinstance(rtn_regex, bool) and rtn_regex:
rtn_columns = Commons.filter_headers(canonical, regex=rtn_columns)
rtn_columns = Commons.list_formatter(rtn_columns) if isinstance(rtn_columns, list) else [rename]
# replace null tag with np.nan
for _ref, _value in to_replace.copy().items():
if _ref == '$null':
to_replace.pop(_ref)
to_replace[np.nan] = _value
if _value == '$null':
to_replace[_ref] = np.nan
regex = regex if isinstance(regex, bool) else False
canonical[rename] = canonical[header].replace(to_replace=to_replace, inplace=False, regex=regex)
return Commons.filter_columns(canonical, headers=list(set(key + rtn_columns))).set_index(key)
def apply_condition(self, canonical: Any, key: [str, list], header: str, conditions: [tuple, list],
default: [int, float, str]=None, inc_columns: list=None, rename: str=None, unindex: bool=None,
save_intent: bool=None, feature_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None) -> pd.DataFrame:
""" applies a selections choice based on a set of conditions to a condition to a named column
Example: conditions = tuple('< 5', 'red')
or: conditions = [('< 5', 'green'), ('> 5 & < 10', 'red')]
:param canonical: the Pandas.DataFrame to get the column headers from
:param key: the key column to index on
:param header: a list of headers to apply the condition on,
:param unindex: if the passed canonical should be un-index before processing
:param conditions: a tuple or list of tuple conditions
:param default: (optional) a value if no condition is met. 0 if not set
:param inc_columns: additional columns to include in the returning DataFrame
:param rename: (optional) if the column should have an alternative name
:param save_intent: (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return:
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# Code block for intent
canonical = self._get_canonical(canonical)
if header not in canonical.columns:
raise ValueError(f"The column header '{header}' is not in the canonical DataFrame")
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
rename = rename if isinstance(rename, str) else header
inc_columns = self._pm.list_formatter(inc_columns)
if not inc_columns:
inc_columns = Commons.filter_headers(canonical, headers=key, drop=True)
str_code = ''
if isinstance(conditions, tuple):
conditions = [conditions]
choices = []
str_code = []
for item, choice in conditions:
choices.append(choice)
or_list = []
for _or in item.split('|'):
and_list = []
for _and in _or.split('&'):
and_list.append(f"(canonical[header]{_and})")
and_list.append('&')
_ = and_list.pop(-1)
_or = "".join(and_list)
or_list.append(f"({_or})")
or_list.append('|')
_ = or_list.pop(-1)
str_code.append("".join(or_list))
selection = []
for item in str_code:
selection.append(eval(item, globals(), locals()))
if isinstance(default, (str, int, float)):
canonical[rename] = np.select(selection, choices, default=default)
else:
canonical[rename] = np.select(selection, choices)
return Commons.filter_columns(canonical, headers=list(set(key + inc_columns))).set_index(key)
def select_where(self, canonical: Any, key: [str, list], selection: list, inc_columns: list=None,
unindex: bool=None, save_intent: bool=None, feature_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None) -> pd.DataFrame:
""" returns a selected result based upon a set of conditions.
:param canonical: the Pandas.DataFrame to get the column headers from
:param key: the key column to index on
:param selection: a list of dictionaries of selection where conditions to filter on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param inc_columns: additional columns to include in the returning DataFrame
:param unindex: if the passed canonical should be un-index before processing
:param save_intent: (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: apandas DataFrame of the resulting select
Conditions are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'conditions2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [self.select2dict(column='gender', condition="=='M'"),
self.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# Code block for intent
if not isinstance(selection, list) or not all(isinstance(x, dict) for x in selection):
raise ValueError("The 'selection' parameter must be a 'list' of 'dict' types")
for _where in selection:
if 'column' not in _where or 'condition' not in _where:
raise ValueError("all 'dict' in the 'selection' list must have a 'column' and 'condition' key "
"as a minimum")
canonical = self._get_canonical(canonical)
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
inc_columns = self._pm.list_formatter(inc_columns)
if not inc_columns:
inc_columns = Commons.filter_headers(canonical, headers=key, drop=True)
select_idx = None
for _where in selection:
select_idx = self._condition_index(canonical=canonical, condition=_where, select_idx=select_idx)
canonical = canonical.iloc[select_idx]
return Commons.filter_columns(canonical, headers=list(set(key + inc_columns))).set_index(key)
def remove_outliers(self, canonical: Any, key: [str, list], column: str, lower_quantile: float=None,
upper_quantile: float=None, unindex: bool=None, save_intent: bool=None,
feature_name: [int, str]=None, intent_order: int=None, replace_intent: bool=None,
remove_duplicates: bool=None) -> [None, pd.DataFrame]:
""" removes outliers by removing the boundary quantiles
:param canonical: the DataFrame to apply
:param key: the key column to index on
:param column: the column name to remove outliers
:param lower_quantile: (optional) the lower quantile in the range 0 < lower_quantile < 1, deafault to 0.001
:param upper_quantile: (optional) the upper quantile in the range 0 < upper_quantile < 1, deafault to 0.999
:param unindex: if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: the revised values
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# intend code block on the canonical
canonical = self._get_canonical(canonical)
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
df_rtn = Commons.filter_columns(canonical, headers=key + [column])
lower_quantile = lower_quantile if isinstance(lower_quantile, float) and 0 < lower_quantile < 1 else 0.00001
upper_quantile = upper_quantile if isinstance(upper_quantile, float) and 0 < upper_quantile < 1 else 0.99999
result = DataDiscovery.analyse_number(df_rtn[column], granularity=[lower_quantile, upper_quantile],
detail_stats=False)
analysis = DataAnalytics(result)
df_rtn = df_rtn[(df_rtn[column] > analysis.intent.intervals[0][1]) & (df_rtn[column] <
analysis.intent.intervals[2][0])]
return df_rtn.set_index(key)
def group_features(self, canonical: Any, headers: [str, list], group_by: [str, list],
aggregator: str=None, drop_group_by: bool=False, include_weighting: bool=False,
freq_precision: int=None, remove_weighting_zeros: bool=False, remove_aggregated: bool=False,
drop_dup_index: str=None, unindex: bool=None, save_intent: bool=None,
feature_name: [int, str]=None, intent_order: int=None, replace_intent: bool=None,
remove_duplicates: bool=None) -> pd.DataFrame:
""" groups features according to the aggregator passed. The list of aggregators are mean, sum, size, count,
nunique, first, last, min, max, std var, describe.
:param canonical: the pd.DataFrame to group
:param headers: the column headers to apply the aggregation too
:param group_by: the column headers to group by
:param aggregator: (optional) the aggregator as a function of Pandas DataFrame 'groupby'
:param drop_group_by: drops the group by headers
:param include_weighting: include a percentage weighting column for each
:param freq_precision: a precision for the weighting values
:param remove_aggregated: if used in conjunction with the weighting then drops the aggrigator column
:param remove_weighting_zeros: removes zero values
:param drop_dup_index: if any duplicate index should be removed passing either 'First' or 'last'
:param unindex: if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: pd.DataFrame
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# intend code block on the canonical
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
canonical = self._get_canonical(canonical)
freq_precision = freq_precision if isinstance(freq_precision, int) else 3
aggregator = aggregator if isinstance(aggregator, str) else 'sum'
headers = self._pm.list_formatter(headers)
group_by = self._pm.list_formatter(group_by)
df_sub = Commons.filter_columns(canonical, headers=headers + group_by).dropna()
df_sub = df_sub.groupby(group_by).agg(aggregator)
if include_weighting:
df_sub['sum'] = df_sub.sum(axis=1, numeric_only=True)
total = df_sub['sum'].sum()
df_sub['weighting'] = df_sub['sum'].\
apply(lambda x: round((x / total), freq_precision) if isinstance(x, (int, float)) else 0)
df_sub = df_sub.drop(columns='sum')
if remove_weighting_zeros:
df_sub = df_sub[df_sub['weighting'] > 0]
df_sub = df_sub.sort_values(by='weighting', ascending=False)
if isinstance(drop_dup_index, str) and drop_dup_index.lower() in ['first', 'last']:
df_sub = df_sub.loc[~df_sub.index.duplicated(keep=drop_dup_index)]
if remove_aggregated:
df_sub = df_sub.drop(headers, axis=1)
if drop_group_by:
df_sub = df_sub.drop(columns=group_by, errors='ignore')
return df_sub
def interval_categorical(self, canonical: Any, key: [str, list], column: str,
inc_columns: list=None, granularity: [int, float, list]=None, lower: [int, float]=None,
upper: [int, float]=None, rename: str=None, categories: list=None, precision: int=None,
unindex: bool=None, save_intent: bool=None, feature_name: [int, str]=None,
intent_order: int=None, replace_intent: bool=None,
remove_duplicates: bool=None) -> [None, pd.DataFrame]:
""" converts continuous representation into discrete representation through interval categorisation
:param canonical: the dataset where the column and target can be found
:param key: the key column to index one
:param column: the column name to be converted
:param inc_columns: additional columns to include in the returning DataFrame
:param granularity: (optional) the granularity of the analysis across the range. Default is 3
int passed - represents the number of periods
float passed - the length of each interval
list[tuple] - specific interval periods e.g []
list[float] - the percentile or quantities, All should fall between 0 and 1
:param lower: (optional) the lower limit of the number value. Default min()
:param upper: (optional) the upper limit of the number value. Default max()
:param precision: (optional) The precision of the range and boundary values. by default set to 5.
:param rename: (optional) if the column should have an alternative name
:param categories:(optional) a set of labels the same length as the intervals to name the categories
:param unindex: if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: the converted fields
"""
# exceptions check
canonical = self._get_canonical(canonical)
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
if column not in canonical.columns:
raise ValueError(f"The column value '{column}' is not a column name in the canonical passed")
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# intend code block on the canonical
inc_columns = self._pm.list_formatter(inc_columns)
if not inc_columns:
inc_columns = Commons.filter_headers(canonical, headers=key, drop=True)
granularity = 3 if not isinstance(granularity, (int, float, list)) or granularity == 0 else granularity
precision = precision if isinstance(precision, int) else 5
rename = rename if isinstance(rename, str) else f"{column}_cat"
# firstly get the granularity
lower = canonical[column].min() if not isinstance(lower, (int, float)) else lower
upper = canonical[column].max() if not isinstance(upper, (int, float)) else upper
if lower >= upper:
upper = lower
granularity = [(lower, upper, 'both')]
if isinstance(granularity, (int, float)):
# if granularity float then convert frequency to intervals
if isinstance(granularity, float):
# make sure frequency goes beyond the upper
_end = upper + granularity - (upper % granularity)
periods = pd.interval_range(start=lower, end=_end, freq=granularity).drop_duplicates()
periods = periods.to_tuples().to_list()
granularity = []
while len(periods) > 0:
period = periods.pop(0)
if len(periods) == 0:
granularity += [(period[0], period[1], 'both')]
else:
granularity += [(period[0], period[1], 'left')]
# if granularity int then convert periods to intervals
else:
periods = pd.interval_range(start=lower, end=upper, periods=granularity).drop_duplicates()
granularity = periods.to_tuples().to_list()
if isinstance(granularity, list):
if all(isinstance(value, tuple) for value in granularity):
if len(granularity[0]) == 2:
granularity[0] = (granularity[0][0], granularity[0][1], 'both')
granularity = [(t[0], t[1], 'right') if len(t) == 2 else t for t in granularity]
elif all(isinstance(value, float) and 0 < value < 1 for value in granularity):
quantiles = list(set(granularity + [0, 1.0]))
boundaries = canonical[column].quantile(quantiles).values
boundaries.sort()
granularity = [(boundaries[0], boundaries[1], 'both')]
granularity += [(boundaries[i - 1], boundaries[i], 'right') for i in range(2, boundaries.size)]
else:
granularity = (lower, upper, 'both')
granularity = [(np.round(p[0], precision), np.round(p[1], precision), p[2]) for p in granularity]
df_rtn = Commons.filter_columns(canonical, headers=key + [column])
# now create the categories
conditions = []
for interval in granularity:
lower, upper, closed = interval
if str.lower(closed) == 'neither':
conditions.append((df_rtn[column] > lower) & (df_rtn[column] < upper))
elif str.lower(closed) == 'right':
conditions.append((df_rtn[column] > lower) & (df_rtn[column] <= upper))
elif str.lower(closed) == 'both':
conditions.append((df_rtn[column] >= lower) & (df_rtn[column] <= upper))
else:
conditions.append((df_rtn[column] >= lower) & (df_rtn[column] < upper))
if isinstance(categories, list) and len(categories) == len(conditions):
choices = categories
else:
if df_rtn[column].dtype.name.startswith('int'):
choices = [f"{int(i[0])}->{int(i[1])}" for i in granularity]
else:
choices = [f"{i[0]}->{i[1]}" for i in granularity]
# noinspection PyTypeChecker
df_rtn[rename] = np.select(conditions, choices, default="<NA>")
df_rtn[rename] = df_rtn[rename].astype('category', copy=False)
df_rtn = df_rtn.drop(column, axis=1).set_index(key)
return df_rtn
def group_flatten_multihot(self, canonical: Any, key: [str, list], header: str, prefix=None,
prefix_sep: str=None, dummy_na: bool=False, drop_first: bool=False, dtype: Any=None,
aggregator: str=None, dups=True, title_rename_map: dict=None, title_case=None,
title_replace_spaces: str=None, inc_columns: list=None, unindex: bool=None,
save_intent: bool=None, feature_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None) -> [None, pd.DataFrame]:
""" groups flattens a one-hot or multi-hot encoding of a categorical
:param canonical: the Dataframe to reference
:param key: the key column to sum on
:param header: the category type column break into the category columns
:param aggregator: (optional) the aggregator as a function of Pandas DataFrame 'groupby'
:param title_rename_map: dictionary map of title header mapping
:param title_case: changes the column header title to lower, upper, title, snake.
:param title_replace_spaces: character to replace spaces in title headers. Default is '_' (underscore)
:param prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
:param prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
:param dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
:param drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
:param dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
:param inc_columns: (optional) additional columns to include in the returning canonical.
If extra columsn are included the group aggriation key will be on all these columns not just the key.
:param dups: id duplicates should be removed from the original canonical
:param unindex: if the passed canonical should be un-index before processing
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: a pd.Dataframe of the flattened categorical
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# intend code block on the canonical
if header not in canonical:
raise NameError(f"The column {header} can't be found in the DataFrame")
canonical = self._get_canonical(canonical)
aggregator = aggregator if isinstance(aggregator, str) else 'sum'
prefix = prefix if isinstance(prefix, str) else header
prefix_sep = prefix_sep if isinstance(prefix_sep, str) else "_"
dummy_na = dummy_na if isinstance(dummy_na, bool) else False
drop_first = drop_first if isinstance(drop_first, bool) else False
dtype = dtype if dtype else np.uint8
if isinstance(unindex, bool) and unindex:
canonical.reset_index(inplace=True)
key = Commons.list_formatter(key)
if canonical[header].dtype.name != 'category':
canonical[header] = canonical[header].astype('category')
inc_columns = self._pm.list_formatter(inc_columns)
df = Commons.filter_columns(canonical, headers=list(set(key + [header] + inc_columns)))
if not dups:
df.drop_duplicates(inplace=True)
dummy_df = pd.get_dummies(canonical, columns=[header], prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na,
drop_first=drop_first, dtype=dtype)
dummy_cols = Commons.filter_headers(dummy_df, regex=f'{prefix}{prefix_sep}')
group_cols = Commons.filter_headers(dummy_df, headers=dummy_cols, drop=True)
dummy_df = self.group_features(dummy_df, headers=dummy_cols, group_by=group_cols, aggregator=aggregator,
save_intent=False).reset_index()
module = HandlerFactory.get_module(module_name='ds_discovery')
module.Transition.scratch_pad().auto_clean_header(dummy_df, case=title_case, rename_map=title_rename_map,
replace_spaces=title_replace_spaces, inplace=True)
return dummy_df.set_index(key)
def custom_builder(self, canonical: Any, code_str: str, use_exec: bool=False,
save_intent: bool=None, feature_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None, **kwargs) -> [None, pd.DataFrame]:
""" enacts a code_str on a dataFrame, returning the output of the code_str or the DataFrame if using exec or
the evaluation returns None. Note that if using the input dataframe in your code_str, it is internally
referenced as it's parameter name 'canonical'.
:param canonical: a pd.DataFrame used in the action
:param code_str: an action on those column values
:param use_exec: (optional) By default the code runs as eval if set to true exec would be used
:param save_intent (optional) if the intent contract should be saved to the property manager
:param feature_name: (optional) the level name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:param kwargs: a set of kwargs to include in any executable function
:return: a list or pandas.DataFrame
"""
# resolve intent persist options
self._set_intend_signature(self._intent_builder(method=inspect.currentframe().f_code.co_name, params=locals()),
feature_name=feature_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# intend code block on the canonical
canonical = self._get_canonical(canonical)
local_kwargs = locals().get('kwargs') if 'kwargs' in locals() else dict()
if 'canonical' not in local_kwargs:
local_kwargs['canonical'] = canonical
result = exec(code_str, globals(), local_kwargs) if use_exec else eval(code_str, globals(), local_kwargs)
if result is None:
return canonical
return result
@staticmethod
def select2dict(column: str, condition: str, operator: str=None, logic: str=None, date_format: str=None,
offset: int=None):
""" a utility method to help build feature conditions by aligning method parameters with dictionary format.
:param column: the column name to apply the condition to
:param condition: the condition string (special conditions are 'date.now' for current date
:param operator: (optional) an operator to place before the condition if not included in the condition
:param logic: (optional) the logic to provide, options are 'and', 'or', 'not', 'xor'
:param date_format: (optional) a format of the date if only a specific part of the date and time is required
:param offset: (optional) a time delta in days (+/-) from the current date and time (minutes not supported)
:return: dictionary of the parameters
logic:
and: the intersect of the left and the right (common to both)
or: the union of the left and the right (everything in both)
diff: the left minus the intersect of the right (only things in the left excluding common to both)
"""
return Commons.param2dict(**locals())
@staticmethod
def canonical_sampler(canonical: [pd.DataFrame, pd.Series], sample_size: [int, float], shuffle: bool=None,
train_only: bool=True, seed: int=None) -> [tuple, pd.DataFrame, pd.Series]:
""" returns a tuple of the canonical split of sample size and the remaining
:param canonical: a canonical to take the sampler from
:param sample_size: If float, should be between 0.0 and 1.0 and represent the proportion of the
data set to return as a sample. If int, represents the absolute number of samples.
:param shuffle: (optional) if the canonical should be shuffled
:param train_only: (optional) if only the train data-set should be returned rather than the train, test tuple
:param seed: (optional) if shuffle is not None a seed value for the sample_size
:return: a (sample, remaining) tuple
"""
if not isinstance(canonical, (pd.DataFrame, pd.Series)):
raise ValueError(f"The canonical must be a pandas DataFrame or Series")
shuffle = shuffle if isinstance(shuffle, bool) else False
if isinstance(sample_size, float):
if not 0 < sample_size < 1:
raise ValueError(f"if passing a test_size as a float the number must be tween 0 and 1")
if shuffle:
train = canonical.sample(frac=sample_size, random_state=seed)
else:
train = canonical.iloc[:int(canonical.shape[0] * sample_size)]
elif isinstance(sample_size, int):
if sample_size > canonical.shape[0]:
raise ValueError(f"The sample size '{sample_size}' can't be greater than the canonical "
f"number the rows '{canonical.shape[0]}'")
if shuffle:
train = canonical.sample(n=sample_size, random_state=seed)
else:
train = canonical.iloc[:sample_size]
else:
raise ValueError(f"sample_size must be an int less than the number of rows or a float between 0 and 1")
test = canonical.loc[~canonical.index.isin(train.index), :]
if isinstance(train_only, bool) and train_only:
return train
return train, test
"""
PRIVATE METHODS SECTION
"""
def _intent_builder(self, method: str, params: dict, exclude: list=None) -> dict:
"""builds the intent_params. Pass the method name and local() parameters
Example:
self._intent_builder(inspect.currentframe().f_code.co_name, **locals())
:param method: the name of the method (intent). can use 'inspect.currentframe().f_code.co_name'
:param params: the parameters passed to the method. use `locals()` in the caller method
:param exclude: (optional) convenience parameter identifying param keys to exclude.
:return: dict of the intent
"""
if not isinstance(params.get('canonical', None), (str, dict)):
exclude = ['canonical']
return super()._intent_builder(method=method, params=params, exclude=exclude)
def _set_intend_signature(self, intent_params: dict, feature_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None, save_intent: bool=None):
""" sets the intent section in the configuration file. Note: by default any identical intent, e.g.
intent with the same intent (name) and the same parameter values, are removed from any level.
:param intent_params: a dictionary type set of configuration representing a intent section contract
:param feature_name: (optional) the feature name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:param save_intent (optional) if the intent contract should be saved to the property manager
"""
if save_intent or (not isinstance(save_intent, bool) and self._default_save_intent):
if not isinstance(feature_name, (str, int)) or not feature_name:
raise ValueError(f"if the intent is to be saved then a feature name must be provided")
super()._set_intend_signature(intent_params=intent_params, intent_level=feature_name, intent_order=intent_order,
replace_intent=replace_intent, remove_duplicates=remove_duplicates,
save_intent=save_intent)
return
@staticmethod
def _condition_index(canonical: pd.DataFrame, condition: dict, select_idx: pd.Int64Index):
""" private method to select index from the selection conditions
:param canonical: a pandas DataFrame to select from
:param condition: the dict conditions
:param select_idx: the current selection index of the canonical
:return: returns the current select_idx of the condition
"""
_column = condition.get('column')
_condition = condition.get('condition')
_operator = condition.get('operator', '')
_logic = condition.get('logic', 'and')
if _condition == 'date.now':
_date_format = condition.get('date_format', "%Y-%m-%dT%H:%M:%S")
_offset = condition.get('offset', 0)
_condition = f"'{(pd.Timestamp.now() + pd.Timedelta(days=_offset)).strftime(_date_format)}'"
s_values = canonical[_column]
idx = eval(f"s_values.where(s_values{_operator}{_condition}).dropna().index", globals(), locals())
if select_idx is None:
select_idx = idx
else:
if str(_logic).lower() == 'and':
select_idx = select_idx.intersection(idx)
elif str(_logic).lower() == 'or':
select_idx = select_idx.union(idx)
elif str(_logic).lower() == 'not':
select_idx = select_idx.difference(idx)
elif str(_logic).lower() == 'xor':
select_idx = select_idx.union(idx).difference(select_idx.intersection(idx))
else:
raise ValueError(f"The logic '{_logic}' for column '{_column}' is not recognised logic. "
f"Use 'AND', 'OR', 'NOT', 'XOR'")
return select_idx
| [
"pandas.interval_range",
"pandas.DataFrame.from_dict",
"aistac.handlers.abstract_handlers.HandlerFactory.get_module",
"ds_discovery.components.commons.Commons.list_formatter",
"pandas.get_dummies",
"pandas.merge",
"pandas.Timedelta",
"aistac.components.aistac_commons.DataAnalytics",
"ds_discovery.co... | [((9118, 9145), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (9140, 9145), False, 'from ds_discovery.components.commons import Commons\n'), ((13359, 13386), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (13381, 13386), False, 'from ds_discovery.components.commons import Commons\n'), ((13412, 13555), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', ([], {'df': 'canonical', 'headers': 'headers', 'drop': 'drop', 'dtype': 'dtype', 'exclude': 'exclude', 'regex': 'regex', 're_ignore_case': 're_ignore_case'}), '(df=canonical, headers=headers, drop=drop, dtype=\n dtype, exclude=exclude, regex=regex, re_ignore_case=re_ignore_case)\n', (13434, 13555), False, 'from ds_discovery.components.commons import Commons\n'), ((13671, 13728), 'ds_discovery.components.commons.Commons.filter_columns', 'Commons.filter_columns', (['canonical'], {'headers': 'filter_headers'}), '(canonical, headers=filter_headers)\n', (13693, 13728), False, 'from ds_discovery.components.commons import Commons\n'), ((20073, 20100), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (20095, 20100), False, 'from ds_discovery.components.commons import Commons\n'), ((20488, 20704), 'pandas.merge', 'pd.merge', ([], {'left': 'canonical', 'right': 'other', 'how': 'how', 'on': 'on', 'left_on': 'left_on', 'right_on': 'right_on', 'left_index': 'left_index', 'right_index': 'right_index', 'sort': 'sort', 'suffixes': 'suffixes', 'indicator': 'indicator', 'validate': 'validate'}), '(left=canonical, right=other, how=how, on=on, left_on=left_on,\n right_on=right_on, left_index=left_index, right_index=right_index, sort\n =sort, suffixes=suffixes, indicator=indicator, validate=validate)\n', (20496, 20704), True, 'import pandas as pd\n'), ((24154, 24181), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (24176, 24181), False, 'from ds_discovery.components.commons import Commons\n'), ((28997, 29024), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (29019, 29024), False, 'from ds_discovery.components.commons import Commons\n'), ((29702, 29755), 'aistac.handlers.abstract_handlers.HandlerFactory.get_module', 'HandlerFactory.get_module', ([], {'module_name': '"""ds_discovery"""'}), "(module_name='ds_discovery')\n", (29727, 29755), False, 'from aistac.handlers.abstract_handlers import HandlerFactory\n'), ((33208, 33235), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (33230, 33235), False, 'from ds_discovery.components.commons import Commons\n'), ((33671, 33724), 'aistac.handlers.abstract_handlers.HandlerFactory.get_module', 'HandlerFactory.get_module', ([], {'module_name': '"""ds_discovery"""'}), "(module_name='ds_discovery')\n", (33696, 33724), False, 'from aistac.handlers.abstract_handlers import HandlerFactory\n'), ((37058, 37085), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (37080, 37085), False, 'from ds_discovery.components.commons import Commons\n'), ((40945, 40972), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (40967, 40972), False, 'from ds_discovery.components.commons import Commons\n'), ((46031, 46058), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (46053, 46058), False, 'from ds_discovery.components.commons import Commons\n'), ((49013, 49040), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (49035, 49040), False, 'from ds_discovery.components.commons import Commons\n'), ((49058, 49115), 'ds_discovery.components.commons.Commons.filter_columns', 'Commons.filter_columns', (['canonical'], {'headers': '(key + [column])'}), '(canonical, headers=key + [column])\n', (49080, 49115), False, 'from ds_discovery.components.commons import Commons\n'), ((49368, 49482), 'ds_discovery.components.discovery.DataDiscovery.analyse_number', 'DataDiscovery.analyse_number', (['df_rtn[column]'], {'granularity': '[lower_quantile, upper_quantile]', 'detail_stats': '(False)'}), '(df_rtn[column], granularity=[lower_quantile,\n upper_quantile], detail_stats=False)\n', (49396, 49482), False, 'from ds_discovery.components.discovery import DataDiscovery\n'), ((49544, 49565), 'aistac.components.aistac_commons.DataAnalytics', 'DataAnalytics', (['result'], {}), '(result)\n', (49557, 49565), False, 'from aistac.components.aistac_commons import DataAnalytics\n'), ((57247, 57274), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (57269, 57274), False, 'from ds_discovery.components.commons import Commons\n'), ((60605, 60662), 'ds_discovery.components.commons.Commons.filter_columns', 'Commons.filter_columns', (['canonical'], {'headers': '(key + [column])'}), '(canonical, headers=key + [column])\n', (60627, 60662), False, 'from ds_discovery.components.commons import Commons\n'), ((61725, 61771), 'numpy.select', 'np.select', (['conditions', 'choices'], {'default': '"""<NA>"""'}), "(conditions, choices, default='<NA>')\n", (61734, 61771), True, 'import numpy as np\n'), ((66602, 66629), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['key'], {}), '(key)\n', (66624, 66629), False, 'from ds_discovery.components.commons import Commons\n'), ((66994, 67135), 'pandas.get_dummies', 'pd.get_dummies', (['canonical'], {'columns': '[header]', 'prefix': 'prefix', 'prefix_sep': 'prefix_sep', 'dummy_na': 'dummy_na', 'drop_first': 'drop_first', 'dtype': 'dtype'}), '(canonical, columns=[header], prefix=prefix, prefix_sep=\n prefix_sep, dummy_na=dummy_na, drop_first=drop_first, dtype=dtype)\n', (67008, 67135), True, 'import pandas as pd\n'), ((67186, 67249), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['dummy_df'], {'regex': 'f"""{prefix}{prefix_sep}"""'}), "(dummy_df, regex=f'{prefix}{prefix_sep}')\n", (67208, 67249), False, 'from ds_discovery.components.commons import Commons\n'), ((67271, 67334), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['dummy_df'], {'headers': 'dummy_cols', 'drop': '(True)'}), '(dummy_df, headers=dummy_cols, drop=True)\n', (67293, 67334), False, 'from ds_discovery.components.commons import Commons\n'), ((67537, 67590), 'aistac.handlers.abstract_handlers.HandlerFactory.get_module', 'HandlerFactory.get_module', ([], {'module_name': '"""ds_discovery"""'}), "(module_name='ds_discovery')\n", (67562, 67590), False, 'from aistac.handlers.abstract_handlers import HandlerFactory\n'), ((9291, 9359), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'headers': '(key + [rename])', 'drop': '(True)'}), '(canonical, headers=key + [rename], drop=True)\n', (9313, 9359), False, 'from ds_discovery.components.commons import Commons\n'), ((9432, 9484), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'regex': 'rtn_columns'}), '(canonical, regex=rtn_columns)\n', (9454, 9484), False, 'from ds_discovery.components.commons import Commons\n'), ((9507, 9542), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['rtn_columns'], {}), '(rtn_columns)\n', (9529, 9542), False, 'from ds_discovery.components.commons import Commons\n'), ((9889, 9913), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'units'], {}), '(1, units)\n', (9903, 9913), True, 'import numpy as np\n'), ((9944, 9966), 'numpy.round', 'np.round', (['v', 'precision'], {}), '(v, precision)\n', (9952, 9966), True, 'import numpy as np\n'), ((20440, 20474), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ([], {'data': 'other'}), '(data=other)\n', (20462, 20474), True, 'import pandas as pd\n'), ((20812, 20857), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['df'], {'regex': 'rtn_columns'}), '(df, regex=rtn_columns)\n', (20834, 20857), False, 'from ds_discovery.components.commons import Commons\n'), ((24304, 24361), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'headers': 'key', 'drop': '(True)'}), '(canonical, headers=key, drop=True)\n', (24326, 24361), False, 'from ds_discovery.components.commons import Commons\n'), ((24434, 24486), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'regex': 'rtn_columns'}), '(canonical, regex=rtn_columns)\n', (24456, 24486), False, 'from ds_discovery.components.commons import Commons\n'), ((24509, 24544), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['rtn_columns'], {}), '(rtn_columns)\n', (24531, 24544), False, 'from ds_discovery.components.commons import Commons\n'), ((24725, 24781), 'ds_discovery.components.commons.Commons.dict_with_missing', 'Commons.dict_with_missing', (['value_map'], {'default': 'default_to'}), '(value_map, default=default_to)\n', (24750, 24781), False, 'from ds_discovery.components.commons import Commons\n'), ((29147, 29204), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'headers': 'key', 'drop': '(True)'}), '(canonical, headers=key, drop=True)\n', (29169, 29204), False, 'from ds_discovery.components.commons import Commons\n'), ((29285, 29337), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'regex': 'rtn_columns'}), '(canonical, regex=rtn_columns)\n', (29307, 29337), False, 'from ds_discovery.components.commons import Commons\n'), ((29360, 29395), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['rtn_columns'], {}), '(rtn_columns)\n', (29382, 29395), False, 'from ds_discovery.components.commons import Commons\n'), ((30107, 30131), 'numpy.linalg.norm', 'np.linalg.norm', (['s_column'], {}), '(s_column)\n', (30121, 30131), True, 'import numpy as np\n'), ((33358, 33415), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'headers': 'key', 'drop': '(True)'}), '(canonical, headers=key, drop=True)\n', (33380, 33415), False, 'from ds_discovery.components.commons import Commons\n'), ((33496, 33548), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'regex': 'rtn_columns'}), '(canonical, regex=rtn_columns)\n', (33518, 33548), False, 'from ds_discovery.components.commons import Commons\n'), ((33571, 33606), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['rtn_columns'], {}), '(rtn_columns)\n', (33593, 33606), False, 'from ds_discovery.components.commons import Commons\n'), ((37208, 37265), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'headers': 'key', 'drop': '(True)'}), '(canonical, headers=key, drop=True)\n', (37230, 37265), False, 'from ds_discovery.components.commons import Commons\n'), ((37346, 37398), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'regex': 'rtn_columns'}), '(canonical, regex=rtn_columns)\n', (37368, 37398), False, 'from ds_discovery.components.commons import Commons\n'), ((37421, 37456), 'ds_discovery.components.commons.Commons.list_formatter', 'Commons.list_formatter', (['rtn_columns'], {}), '(rtn_columns)\n', (37443, 37456), False, 'from ds_discovery.components.commons import Commons\n'), ((41149, 41206), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'headers': 'key', 'drop': '(True)'}), '(canonical, headers=key, drop=True)\n', (41171, 41206), False, 'from ds_discovery.components.commons import Commons\n'), ((42105, 42151), 'numpy.select', 'np.select', (['selection', 'choices'], {'default': 'default'}), '(selection, choices, default=default)\n', (42114, 42151), True, 'import numpy as np\n'), ((42198, 42227), 'numpy.select', 'np.select', (['selection', 'choices'], {}), '(selection, choices)\n', (42207, 42227), True, 'import numpy as np\n'), ((46172, 46229), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'headers': 'key', 'drop': '(True)'}), '(canonical, headers=key, drop=True)\n', (46194, 46229), False, 'from ds_discovery.components.commons import Commons\n'), ((57961, 58018), 'ds_discovery.components.commons.Commons.filter_headers', 'Commons.filter_headers', (['canonical'], {'headers': 'key', 'drop': '(True)'}), '(canonical, headers=key, drop=True)\n', (57983, 58018), False, 'from ds_discovery.components.commons import Commons\n'), ((30202, 30231), 'numpy.round', 'np.round', (['s_column', 'precision'], {}), '(s_column, precision)\n', (30210, 30231), True, 'import numpy as np\n'), ((53205, 53266), 'ds_discovery.components.commons.Commons.filter_columns', 'Commons.filter_columns', (['canonical'], {'headers': '(headers + group_by)'}), '(canonical, headers=headers + group_by)\n', (53227, 53266), False, 'from ds_discovery.components.commons import Commons\n'), ((60506, 60531), 'numpy.round', 'np.round', (['p[0]', 'precision'], {}), '(p[0], precision)\n', (60514, 60531), True, 'import numpy as np\n'), ((60533, 60558), 'numpy.round', 'np.round', (['p[1]', 'precision'], {}), '(p[1], precision)\n', (60541, 60558), True, 'import numpy as np\n'), ((58913, 58971), 'pandas.interval_range', 'pd.interval_range', ([], {'start': 'lower', 'end': '_end', 'freq': 'granularity'}), '(start=lower, end=_end, freq=granularity)\n', (58930, 58971), True, 'import pandas as pd\n'), ((59486, 59548), 'pandas.interval_range', 'pd.interval_range', ([], {'start': 'lower', 'end': 'upper', 'periods': 'granularity'}), '(start=lower, end=upper, periods=granularity)\n', (59503, 59548), True, 'import pandas as pd\n'), ((8351, 8373), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (8371, 8373), False, 'import inspect\n'), ((12680, 12702), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (12700, 12702), False, 'import inspect\n'), ((19108, 19130), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (19128, 19130), False, 'import inspect\n'), ((23532, 23554), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (23552, 23554), False, 'import inspect\n'), ((28388, 28410), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (28408, 28410), False, 'import inspect\n'), ((32599, 32621), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (32619, 32621), False, 'import inspect\n'), ((36436, 36458), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (36456, 36458), False, 'import inspect\n'), ((40336, 40358), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (40356, 40358), False, 'import inspect\n'), ((45114, 45136), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (45134, 45136), False, 'import inspect\n'), ((48531, 48553), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (48551, 48553), False, 'import inspect\n'), ((52460, 52482), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (52480, 52482), False, 'import inspect\n'), ((57529, 57551), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (57549, 57551), False, 'import inspect\n'), ((65602, 65624), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (65622, 65624), False, 'import inspect\n'), ((69804, 69826), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (69824, 69826), False, 'import inspect\n'), ((77813, 77831), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (77829, 77831), True, 'import pandas as pd\n'), ((77834, 77860), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '_offset'}), '(days=_offset)\n', (77846, 77860), True, 'import pandas as pd\n')] |
from typing import List, Union
import numpy as np
import tensorflow as tf
import nn_utils.math_utils as math_utils
from dataflow.illumination_integration.helper import (
getBilinearFromUv,
)
from nn_utils.math_utils import shape_to_uv, uv_to_direction
@tf.function
def map_levels_to_samples(
num_roughness_0: int,
num_random_roughness: int,
data_levels: List[tf.Tensor],
):
# Setup uvs
env_shape = data_levels[0].shape
uvs = shape_to_uv(*env_shape[:-1])
uvs_flat = tf.reshape(uvs, [-1, 2])
total_directions_required = num_roughness_0 + num_random_roughness
if uvs_flat.shape[0] < total_directions_required:
repeats_required = tf.cast(
tf.math.ceil(total_directions_required / uvs_flat.shape[0]), tf.int32
)
uvs_flat = math_utils.repeat(uvs_flat, repeats_required, 0)
uvs_shuffle = tf.random.shuffle(uvs_flat)
uvs_random = uvs_shuffle[:total_directions_required]
jitter = tf.random.normal(uvs_random.shape, mean=0.0, stddev=0.3)
uvs_random = uvs_random + jitter
# Setup roughness
roughness_random = tf.clip_by_value(
tf.random.uniform(
(num_random_roughness, 1), minval=1 / 255, maxval=1 + 1 / 255
),
0,
1,
)
r0_uvs = uvs_random[:num_roughness_0]
rnd_uvs = uvs_random[num_roughness_0 : num_roughness_0 + num_random_roughness]
# Get samples
samples_random = random_uv_roughness_access(data_levels, rnd_uvs, roughness_random)
# Always get r0 samples
samples_r0 = random_uv_roughness_access(
data_levels, r0_uvs, tf.zeros_like(r0_uvs[:, :1])
)
ret = (
uv_to_direction(r0_uvs),
samples_r0,
uv_to_direction(rnd_uvs),
roughness_random,
samples_random,
)
return (
data_levels,
*ret,
)
@tf.function
def full_map_samples(num_roughness_steps: int, data_levels: List[tf.Tensor]):
# Setup random roughnesses and get all values
full_uvs = tf.reshape(shape_to_uv(*data_levels[0].shape[:-1]), (-1, 2))
roughness_steps = np.linspace(0.0, 1.0, num_roughness_steps, dtype=np.float32)[
:, None
] # Add a dimension
# Store the roughness steps
all_samples = tf.TensorArray(
tf.float32, size=num_roughness_steps, clear_after_read=True
)
for i, r in enumerate(roughness_steps): # The dimension is removed in the for loop
r = math_utils.repeat(
r[:, None], full_uvs.shape[0], 0
) # Add a batch dimension back
samples = random_uv_roughness_access(data_levels, full_uvs, r)
all_samples = all_samples.write(i, samples) # Write the sample
ret = (
uv_to_direction(full_uvs),
tf.convert_to_tensor(roughness_steps),
all_samples.stack(),
)
return (
data_levels,
*ret,
)
@tf.function
def random_uv_roughness_access(data_levels, uvs, roughness):
tf.debugging.assert_shapes(
[
(uvs, ("S", 2)),
(roughness, ("S", 1)),
]
+ [(d, ("H%d" % i, "W%d" % i, 3)) for i, d in enumerate(data_levels)]
)
# data_levels: List[H, W, 3]
# uvs: [S, 2]
# Roughness: [S, 1]
# Result: [S, 3]
smpl_list = []
for d in data_levels:
samples_level = getBilinearFromUv(d[None, ...], uvs[None, ...])[0]
smpl_list.append(samples_level)
level_samples_batched = tf.stack(smpl_list, 0) # M, S, 3
return interpolate_roughness_levels(level_samples_batched, roughness)
@tf.function
def interpolate_roughness_levels(samples, roughness):
tf.debugging.assert_shapes(
[
(samples, ("M", "S", 3)),
(roughness, ("S", 1)),
]
)
# Setup the roughness interpolation
roughness_mip_index = roughness[:, 0] * (samples.shape[0] - 1)
# S
lower_mip_index = tf.cast(tf.math.floor(roughness_mip_index), tf.int32)
upper_mip_index = tf.cast(tf.math.ceil(roughness_mip_index), tf.int32)
# Fetch the lower and upper roughness levels
rgh_low = tf.gather(
tf.transpose(samples, [1, 0, 2]), lower_mip_index[..., None], batch_dims=1
)[:, 0]
rgh_hgh = tf.gather(
tf.transpose(samples, [1, 0, 2]), upper_mip_index[..., None], batch_dims=1
)[:, 0]
tf.debugging.assert_shapes(
[
(samples, ("M", "S", 3)),
(roughness, ("S", 1)),
(rgh_low, ("S", 3)),
(rgh_hgh, ("S", 3)),
]
)
# Start interpolation
fraction_index = roughness_mip_index - tf.cast(lower_mip_index, tf.float32)
fraction_index = tf.reshape(fraction_index, roughness.shape)
samples_random = rgh_low * fraction_index + rgh_hgh * (1 - fraction_index)
return samples_random
@tf.function
def blend_two_maps(*batch_2_data):
ret = []
for b in batch_2_data:
b0 = b[0]
b1 = b[1]
alpha = tf.random.uniform((1,))
ret.append(alpha * b0 + (1 - alpha) * b1)
return ret
@tf.function
def specify_mip_levels_to_fetch(
dataset: List[Union[List[np.ndarray], np.ndarray]], idxs: List[int]
):
random_sampled_targets = dataset[1:]
ret = []
for idx in idxs:
ret.append(dataset[0][idx])
ret.extend(random_sampled_targets)
return (*ret,)
def random_sample_dataflow(
dataset: List[np.ndarray],
samples_roughness_0: int,
samples_random_roughness: int,
batch_size: int,
with_blend: bool = False,
full_l0: bool = False,
shuffle: bool = True,
):
dataset_len = len(dataset[0])
ds = tf.data.Dataset.from_tensor_slices((*dataset,))
if shuffle:
ds = ds.shuffle(dataset_len, reshuffle_each_iteration=True)
if with_blend:
ds = ds.batch(2, drop_remainder=True)
ds = ds.map(blend_two_maps)
ds = ds.repeat(2)
if full_l0:
ds = ds.map(
lambda *x: full_map_samples(5, x),
num_parallel_calls=tf.data.AUTOTUNE,
)
else:
ds = ds.map(
lambda *x: map_levels_to_samples(
samples_roughness_0, samples_random_roughness, x
),
num_parallel_calls=tf.data.AUTOTUNE,
)
ds = ds.map(lambda *x: specify_mip_levels_to_fetch(x, [0]))
if batch_size > 0:
ds = ds.batch(batch_size)
ds = ds.prefetch(5)
return ds
| [
"tensorflow.math.floor",
"tensorflow.reshape",
"tensorflow.zeros_like",
"dataflow.illumination_integration.helper.getBilinearFromUv",
"tensorflow.math.ceil",
"tensorflow.random.uniform",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.debugging.assert_shapes",
"numpy.linspace",
"nn_utils.math... | [((457, 485), 'nn_utils.math_utils.shape_to_uv', 'shape_to_uv', (['*env_shape[:-1]'], {}), '(*env_shape[:-1])\n', (468, 485), False, 'from nn_utils.math_utils import shape_to_uv, uv_to_direction\n'), ((501, 525), 'tensorflow.reshape', 'tf.reshape', (['uvs', '[-1, 2]'], {}), '(uvs, [-1, 2])\n', (511, 525), True, 'import tensorflow as tf\n'), ((868, 895), 'tensorflow.random.shuffle', 'tf.random.shuffle', (['uvs_flat'], {}), '(uvs_flat)\n', (885, 895), True, 'import tensorflow as tf\n'), ((967, 1023), 'tensorflow.random.normal', 'tf.random.normal', (['uvs_random.shape'], {'mean': '(0.0)', 'stddev': '(0.3)'}), '(uvs_random.shape, mean=0.0, stddev=0.3)\n', (983, 1023), True, 'import tensorflow as tf\n'), ((2243, 2318), 'tensorflow.TensorArray', 'tf.TensorArray', (['tf.float32'], {'size': 'num_roughness_steps', 'clear_after_read': '(True)'}), '(tf.float32, size=num_roughness_steps, clear_after_read=True)\n', (2257, 2318), True, 'import tensorflow as tf\n'), ((3430, 3452), 'tensorflow.stack', 'tf.stack', (['smpl_list', '(0)'], {}), '(smpl_list, 0)\n', (3438, 3452), True, 'import tensorflow as tf\n'), ((3612, 3689), 'tensorflow.debugging.assert_shapes', 'tf.debugging.assert_shapes', (["[(samples, ('M', 'S', 3)), (roughness, ('S', 1))]"], {}), "([(samples, ('M', 'S', 3)), (roughness, ('S', 1))])\n", (3638, 3689), True, 'import tensorflow as tf\n'), ((4301, 4424), 'tensorflow.debugging.assert_shapes', 'tf.debugging.assert_shapes', (["[(samples, ('M', 'S', 3)), (roughness, ('S', 1)), (rgh_low, ('S', 3)), (\n rgh_hgh, ('S', 3))]"], {}), "([(samples, ('M', 'S', 3)), (roughness, ('S', 1)),\n (rgh_low, ('S', 3)), (rgh_hgh, ('S', 3))])\n", (4327, 4424), True, 'import tensorflow as tf\n'), ((4622, 4665), 'tensorflow.reshape', 'tf.reshape', (['fraction_index', 'roughness.shape'], {}), '(fraction_index, roughness.shape)\n', (4632, 4665), True, 'import tensorflow as tf\n'), ((5573, 5620), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(*dataset,)'], {}), '((*dataset,))\n', (5607, 5620), True, 'import tensorflow as tf\n'), ((800, 848), 'nn_utils.math_utils.repeat', 'math_utils.repeat', (['uvs_flat', 'repeats_required', '(0)'], {}), '(uvs_flat, repeats_required, 0)\n', (817, 848), True, 'import nn_utils.math_utils as math_utils\n'), ((1133, 1218), 'tensorflow.random.uniform', 'tf.random.uniform', (['(num_random_roughness, 1)'], {'minval': '(1 / 255)', 'maxval': '(1 + 1 / 255)'}), '((num_random_roughness, 1), minval=1 / 255, maxval=1 + 1 / 255\n )\n', (1150, 1218), True, 'import tensorflow as tf\n'), ((1601, 1629), 'tensorflow.zeros_like', 'tf.zeros_like', (['r0_uvs[:, :1]'], {}), '(r0_uvs[:, :1])\n', (1614, 1629), True, 'import tensorflow as tf\n'), ((1657, 1680), 'nn_utils.math_utils.uv_to_direction', 'uv_to_direction', (['r0_uvs'], {}), '(r0_uvs)\n', (1672, 1680), False, 'from nn_utils.math_utils import shape_to_uv, uv_to_direction\n'), ((1710, 1734), 'nn_utils.math_utils.uv_to_direction', 'uv_to_direction', (['rnd_uvs'], {}), '(rnd_uvs)\n', (1725, 1734), False, 'from nn_utils.math_utils import shape_to_uv, uv_to_direction\n'), ((2016, 2055), 'nn_utils.math_utils.shape_to_uv', 'shape_to_uv', (['*data_levels[0].shape[:-1]'], {}), '(*data_levels[0].shape[:-1])\n', (2027, 2055), False, 'from nn_utils.math_utils import shape_to_uv, uv_to_direction\n'), ((2089, 2149), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'num_roughness_steps'], {'dtype': 'np.float32'}), '(0.0, 1.0, num_roughness_steps, dtype=np.float32)\n', (2100, 2149), True, 'import numpy as np\n'), ((2434, 2485), 'nn_utils.math_utils.repeat', 'math_utils.repeat', (['r[:, None]', 'full_uvs.shape[0]', '(0)'], {}), '(r[:, None], full_uvs.shape[0], 0)\n', (2451, 2485), True, 'import nn_utils.math_utils as math_utils\n'), ((2703, 2728), 'nn_utils.math_utils.uv_to_direction', 'uv_to_direction', (['full_uvs'], {}), '(full_uvs)\n', (2718, 2728), False, 'from nn_utils.math_utils import shape_to_uv, uv_to_direction\n'), ((2738, 2775), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['roughness_steps'], {}), '(roughness_steps)\n', (2758, 2775), True, 'import tensorflow as tf\n'), ((3885, 3919), 'tensorflow.math.floor', 'tf.math.floor', (['roughness_mip_index'], {}), '(roughness_mip_index)\n', (3898, 3919), True, 'import tensorflow as tf\n'), ((3961, 3994), 'tensorflow.math.ceil', 'tf.math.ceil', (['roughness_mip_index'], {}), '(roughness_mip_index)\n', (3973, 3994), True, 'import tensorflow as tf\n'), ((4564, 4600), 'tensorflow.cast', 'tf.cast', (['lower_mip_index', 'tf.float32'], {}), '(lower_mip_index, tf.float32)\n', (4571, 4600), True, 'import tensorflow as tf\n'), ((4914, 4937), 'tensorflow.random.uniform', 'tf.random.uniform', (['(1,)'], {}), '((1,))\n', (4931, 4937), True, 'import tensorflow as tf\n'), ((701, 760), 'tensorflow.math.ceil', 'tf.math.ceil', (['(total_directions_required / uvs_flat.shape[0])'], {}), '(total_directions_required / uvs_flat.shape[0])\n', (713, 760), True, 'import tensorflow as tf\n'), ((3310, 3357), 'dataflow.illumination_integration.helper.getBilinearFromUv', 'getBilinearFromUv', (['d[None, ...]', 'uvs[None, ...]'], {}), '(d[None, ...], uvs[None, ...])\n', (3327, 3357), False, 'from dataflow.illumination_integration.helper import getBilinearFromUv\n'), ((4089, 4121), 'tensorflow.transpose', 'tf.transpose', (['samples', '[1, 0, 2]'], {}), '(samples, [1, 0, 2])\n', (4101, 4121), True, 'import tensorflow as tf\n'), ((4209, 4241), 'tensorflow.transpose', 'tf.transpose', (['samples', '[1, 0, 2]'], {}), '(samples, [1, 0, 2])\n', (4221, 4241), True, 'import tensorflow as tf\n')] |
"""Modulator Module that implements various wireless modulators
These modulators are meant to turn arbitrary bit patterns to analog waveforms for wireless
transmission.
"""
import numpy as np
class OFDM:
"""Class that creates OFDM signals.
This class will set up an OFDM modulator to create random OFDM signals.
Attributes:
n_subcarriers: Number of subcarriers per OFDM symbol
subcarrier_spacing: Spacing between subcarriers in Hz
cp_length : Number of samples in the cyclic prefix
fft_size: Size of the IFFT/FFT used.
sampling_rate: The native sampling rate based on the FFT size and subcarrier spacing
symbol_alphabet: The constellation points
Todo:
- Add an arbitrary bit input
- Add a demodulator
"""
def __init__(self, n_subcarriers: int = 1200, subcarrier_spacing: int = 15000,
cp_length: int = 144, constellation: str = 'QPSK', seed: int = 0):
"""OFDM Modulator Constructor.
Construct an OFDM Modulator with custom number of subcarriers, subcarrier spacing,
cyclic prefix length, and constellation on each subcarrier.
Args:
n_subcarriers: Number of subcarriers per OFDM symbol
subcarrier_spacing: Spacing of the subcarriers in the frequency domain in Hertz
cp_length: Number of samples in cyclic prefix
constellation: Type of constellation used on each subcarrier. QPSK, 16QAM or 64QAM
seed: Seed for the random number generator
"""
self.n_subcarriers = n_subcarriers
self.subcarrier_spacing = subcarrier_spacing
self.cp_length = cp_length
self.fft_size = np.power(2, np.int(np.ceil(np.log2(n_subcarriers))))
self.sampling_rate = self.subcarrier_spacing * self.fft_size
self.symbol_alphabet = self.qam_alphabet(constellation)
self.seed = seed
self.fd_symbols = None # We'll hold the last TX symbols for calculating error later
def use(self, n_symbols: int = 10):
"""Use the OFDM modulator to generate a random signal.
Args:
n_symbols: Number of OFDM symbols to generate
Returns:
A time-domain OFDM signal
TODO:
- Allow to pass in an arbitrary bit pattern for modulation.
"""
np.random.seed(self.seed)
self.fd_symbols = self.symbol_alphabet[
np.random.randint(self.symbol_alphabet.size, size=(self.n_subcarriers, n_symbols))]
out = np.zeros((self.fft_size + self.cp_length, n_symbols), dtype='complex64')
for index, symbol in enumerate(self.fd_symbols.T):
td_waveform = self.frequency_to_time_domain(symbol)
out[:, index] = self.add_cyclic_prefix(td_waveform)
return out.flatten(1)
def frequency_to_time_domain(self, fd_symbol):
"""Convert the frequency domain symbol to time domain via IFFT
Args:
fd_symbol: One frequency domain symbol
Returns:
time domain signal
"""
# TODO: Verify that the RB are mapping to the IFFT input correctly
ifft_input = np.zeros((self.fft_size), dtype='complex64')
# Index 0 is DC. Leave blank. The 1st half needs to be in negative frequency
# so they go in the last IFFT inputs.
ifft_input[1: np.int(self.n_subcarriers / 2) + 1] = \
fd_symbol[np.int(self.n_subcarriers / 2):]
ifft_input[-np.int(self.n_subcarriers / 2):] = \
fd_symbol[:np.int(self.n_subcarriers / 2)]
return np.fft.ifft(ifft_input)
def time_to_frequency_domain(self, td_symbol):
full_fft_output = np.fft.fft(td_symbol, axis=0)
fd_symbols = np.zeros(shape=self.fd_symbols.shape, dtype='complex64')
fd_symbols[np.int(self.n_subcarriers / 2):, :] = full_fft_output[1:np.int(self.n_subcarriers/2) + 1, :]
fd_symbols[:np.int(self.n_subcarriers / 2), :] = full_fft_output[-np.int(self.n_subcarriers / 2):, :]
return fd_symbols
def add_cyclic_prefix(self, td_waveform):
"""Adds cyclic prefix
Adds by taking the last few samples and appending it to the beginning of the signal
Args:
td_waveform: IFFT output signal.
Returns:
time domain signal with a cyclic prefix
"""
# TODO: verify my indexing
out = np.zeros(td_waveform.size + self.cp_length, dtype='complex64')
out[self.cp_length:] = td_waveform
out[:self.cp_length] = td_waveform[-self.cp_length:]
return out
def remove_cyclic_prefix(self, td_grid):
w_out_cp = td_grid[-self.fft_size:, :]
return w_out_cp
@staticmethod
def qam_alphabet(constellation):
"""Returns constellation points for QPSK, 16QAM, or 64 QAM
Args:
constellation: String saying the desired constellation
Returns:
symbol alphabet on the complex plane
"""
constellation_dict = {
"QPSK": 4,
"16QAM": 16,
"64QAM": 64
}
n_points = constellation_dict[constellation]
x = np.int(np.sqrt(n_points)) - 1
alpha_n_points = np.arange(-x, x + 1, 2, dtype=int)
A = np.kron(np.ones((x + 1, 1)), alpha_n_points)
B = np.flipud(A.transpose())
const_qam = A + 1j * B
alphabet = const_qam.flatten(1)
return alphabet
def demodulate(self, time_domain_rx_signal):
"""Demodulate a time domain signal back into the FD symbols"""
# Reorganize to grid
_, n_symbols = self.fd_symbols.shape
td_grid = np.reshape(time_domain_rx_signal, (self.fft_size+self.cp_length, n_symbols), order='F')
td_grid = self.remove_cyclic_prefix(td_grid)
fd_symbols = self.time_to_frequency_domain(td_grid)
evm = self.calculate_evm(fd_symbols)
return fd_symbols, evm
def calculate_evm(self, fd_rx_signal):
# Get error vectors
e = fd_rx_signal - self.fd_symbols
evm = 100 * np.linalg.norm(e) / np.linalg.norm(self.fd_symbols)
return evm
if __name__ == "__main__":
ofdm = OFDM()
x = ofdm.use()
y, evm_percent = ofdm.demodulate(x)
1 + 1
| [
"numpy.fft.ifft",
"numpy.random.seed",
"numpy.fft.fft",
"numpy.log2",
"numpy.zeros",
"numpy.ones",
"numpy.random.randint",
"numpy.arange",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.int",
"numpy.sqrt"
] | [((2355, 2380), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2369, 2380), True, 'import numpy as np\n'), ((2539, 2611), 'numpy.zeros', 'np.zeros', (['(self.fft_size + self.cp_length, n_symbols)'], {'dtype': '"""complex64"""'}), "((self.fft_size + self.cp_length, n_symbols), dtype='complex64')\n", (2547, 2611), True, 'import numpy as np\n'), ((3176, 3218), 'numpy.zeros', 'np.zeros', (['self.fft_size'], {'dtype': '"""complex64"""'}), "(self.fft_size, dtype='complex64')\n", (3184, 3218), True, 'import numpy as np\n'), ((3596, 3619), 'numpy.fft.ifft', 'np.fft.ifft', (['ifft_input'], {}), '(ifft_input)\n', (3607, 3619), True, 'import numpy as np\n'), ((3698, 3727), 'numpy.fft.fft', 'np.fft.fft', (['td_symbol'], {'axis': '(0)'}), '(td_symbol, axis=0)\n', (3708, 3727), True, 'import numpy as np\n'), ((3749, 3805), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.fd_symbols.shape', 'dtype': '"""complex64"""'}), "(shape=self.fd_symbols.shape, dtype='complex64')\n", (3757, 3805), True, 'import numpy as np\n'), ((4416, 4478), 'numpy.zeros', 'np.zeros', (['(td_waveform.size + self.cp_length)'], {'dtype': '"""complex64"""'}), "(td_waveform.size + self.cp_length, dtype='complex64')\n", (4424, 4478), True, 'import numpy as np\n'), ((5237, 5271), 'numpy.arange', 'np.arange', (['(-x)', '(x + 1)', '(2)'], {'dtype': 'int'}), '(-x, x + 1, 2, dtype=int)\n', (5246, 5271), True, 'import numpy as np\n'), ((5676, 5769), 'numpy.reshape', 'np.reshape', (['time_domain_rx_signal', '(self.fft_size + self.cp_length, n_symbols)'], {'order': '"""F"""'}), "(time_domain_rx_signal, (self.fft_size + self.cp_length,\n n_symbols), order='F')\n", (5686, 5769), True, 'import numpy as np\n'), ((2441, 2527), 'numpy.random.randint', 'np.random.randint', (['self.symbol_alphabet.size'], {'size': '(self.n_subcarriers, n_symbols)'}), '(self.symbol_alphabet.size, size=(self.n_subcarriers,\n n_symbols))\n', (2458, 2527), True, 'import numpy as np\n'), ((5292, 5311), 'numpy.ones', 'np.ones', (['(x + 1, 1)'], {}), '((x + 1, 1))\n', (5299, 5311), True, 'import numpy as np\n'), ((6108, 6139), 'numpy.linalg.norm', 'np.linalg.norm', (['self.fd_symbols'], {}), '(self.fd_symbols)\n', (6122, 6139), True, 'import numpy as np\n'), ((3436, 3466), 'numpy.int', 'np.int', (['(self.n_subcarriers / 2)'], {}), '(self.n_subcarriers / 2)\n', (3442, 3466), True, 'import numpy as np\n'), ((3549, 3579), 'numpy.int', 'np.int', (['(self.n_subcarriers / 2)'], {}), '(self.n_subcarriers / 2)\n', (3555, 3579), True, 'import numpy as np\n'), ((5188, 5205), 'numpy.sqrt', 'np.sqrt', (['n_points'], {}), '(n_points)\n', (5195, 5205), True, 'import numpy as np\n'), ((6088, 6105), 'numpy.linalg.norm', 'np.linalg.norm', (['e'], {}), '(e)\n', (6102, 6105), True, 'import numpy as np\n'), ((1738, 1760), 'numpy.log2', 'np.log2', (['n_subcarriers'], {}), '(n_subcarriers)\n', (1745, 1760), True, 'import numpy as np\n'), ((3374, 3404), 'numpy.int', 'np.int', (['(self.n_subcarriers / 2)'], {}), '(self.n_subcarriers / 2)\n', (3380, 3404), True, 'import numpy as np\n'), ((3489, 3519), 'numpy.int', 'np.int', (['(self.n_subcarriers / 2)'], {}), '(self.n_subcarriers / 2)\n', (3495, 3519), True, 'import numpy as np\n'), ((3825, 3855), 'numpy.int', 'np.int', (['(self.n_subcarriers / 2)'], {}), '(self.n_subcarriers / 2)\n', (3831, 3855), True, 'import numpy as np\n'), ((3938, 3968), 'numpy.int', 'np.int', (['(self.n_subcarriers / 2)'], {}), '(self.n_subcarriers / 2)\n', (3944, 3968), True, 'import numpy as np\n'), ((3881, 3911), 'numpy.int', 'np.int', (['(self.n_subcarriers / 2)'], {}), '(self.n_subcarriers / 2)\n', (3887, 3911), True, 'import numpy as np\n'), ((3992, 4022), 'numpy.int', 'np.int', (['(self.n_subcarriers / 2)'], {}), '(self.n_subcarriers / 2)\n', (3998, 4022), True, 'import numpy as np\n')] |
import json
import numpy as np
from pdb import set_trace
names = ['DUT','ECSSD','HKU_IS','MSRA1000','SOD']
ext='json'
reduced_name = 'reduced'
dump = {}
dump['cols'] = ['max.abs', 'min.abs', 'median.abs', 'mean.abs', 'var.abs']
for name in names:
with open('{}.{}'.format(name,ext), 'r') as f:
stat = json.load(f)
nstat = np.array(stat)
# set_trace()
dump[name] = nstat.mean(axis=0).tolist()
with open('{}.{}'.format(reduced_name,ext), 'w+') as f:
json.dump(dump,f)
| [
"json.dump",
"json.load",
"numpy.array"
] | [((343, 357), 'numpy.array', 'np.array', (['stat'], {}), '(stat)\n', (351, 357), True, 'import numpy as np\n'), ((482, 500), 'json.dump', 'json.dump', (['dump', 'f'], {}), '(dump, f)\n', (491, 500), False, 'import json\n'), ((318, 330), 'json.load', 'json.load', (['f'], {}), '(f)\n', (327, 330), False, 'import json\n')] |
from pathlib import Path
import os
import sys
import shutil
import _init_paths
from utils import BBFormat, CoordinatesType
import numpy as np
# Get current path to set default folders
currentPath = os.path.dirname(os.path.abspath(__file__))
# Get folder details
gtFolder = Path(os.path.join(currentPath, 'groundtruths'))
detFolder = Path(os.path.join(currentPath, 'detections_1'))
savePath = Path(os.path.join(currentPath, 'results'))
# Confidence threshold values (starting value, end value, step change) - Change as per your requirement
conf_threshold = np.arange(0.1, 1.0, 0.05)
# IOU threshold values - Change as per your requirement
iouThreshold = [0.5, 0.6]
# Validate formats
def ValidateFormats(argFormat, argName):
if argFormat == 'xywh':
return BBFormat.XYWH
elif argFormat == 'xyrb':
return BBFormat.XYX2Y2
elif argFormat is None:
return BBFormat.XYWH # default when nothing is passed
else:
return ('argument %s: invalid value. It must be either \'xywh\' or \'xyrb\'' %
argName)
# Validate coordinate types
def ValidateCoordinatesTypes(arg, argName):
if arg == 'abs':
return CoordinatesType.Absolute
elif arg == 'rel':
return CoordinatesType.Relative
elif arg is None:
return CoordinatesType.Absolute # default when nothing is passed
else:
return ('argument %s: invalid value. It must be either \'rel\' or \'abs\'' % argName)
# Check if path to save results already exists and is not empty
if os.path.isdir(savePath) and os.listdir(savePath):
key_pressed = ''
while key_pressed.upper() not in ['Y', 'N']:
print(f'Folder {savePath} already exists and may contain important results.\n')
print(f'Enter \'Y\' to continue. WARNING: THIS WILL REMOVE ALL THE CONTENTS OF THE FOLDER!')
print(f'Or enter \'N\' to abort and choose another folder to save the results.')
key_pressed = input('')
if key_pressed.upper() == 'N':
print('Process canceled')
sys.exit()
# Clear folder and save results
shutil.rmtree(savePath, ignore_errors=True)
os.makedirs(savePath)
# Get the optional formats
## Default format is 'xyrb' you can also use 'xywh'
gtFormat = ValidateFormats('xyrb', '-gtformat')
detFormat = ValidateFormats('xyrb', '-detformat')
# Coordinates types
gtCoordType = ValidateCoordinatesTypes('abs', '-gtCoordinates')
detCoordType = ValidateCoordinatesTypes('abs', '-detCoordinates')
# Default for coordinate type 'abs' if coordinate type is 'rel' change the image size according to your requirement
imgSize = (0, 0)
| [
"os.path.abspath",
"os.makedirs",
"os.path.isdir",
"numpy.arange",
"shutil.rmtree",
"os.path.join",
"os.listdir",
"sys.exit"
] | [((578, 603), 'numpy.arange', 'np.arange', (['(0.1)', '(1.0)', '(0.05)'], {}), '(0.1, 1.0, 0.05)\n', (587, 603), True, 'import numpy as np\n'), ((2148, 2191), 'shutil.rmtree', 'shutil.rmtree', (['savePath'], {'ignore_errors': '(True)'}), '(savePath, ignore_errors=True)\n', (2161, 2191), False, 'import shutil\n'), ((2193, 2214), 'os.makedirs', 'os.makedirs', (['savePath'], {}), '(savePath)\n', (2204, 2214), False, 'import os\n'), ((226, 251), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (241, 251), False, 'import os\n'), ((294, 335), 'os.path.join', 'os.path.join', (['currentPath', '"""groundtruths"""'], {}), "(currentPath, 'groundtruths')\n", (306, 335), False, 'import os\n'), ((355, 396), 'os.path.join', 'os.path.join', (['currentPath', '"""detections_1"""'], {}), "(currentPath, 'detections_1')\n", (367, 396), False, 'import os\n'), ((415, 451), 'os.path.join', 'os.path.join', (['currentPath', '"""results"""'], {}), "(currentPath, 'results')\n", (427, 451), False, 'import os\n'), ((1583, 1606), 'os.path.isdir', 'os.path.isdir', (['savePath'], {}), '(savePath)\n', (1596, 1606), False, 'import os\n'), ((1611, 1631), 'os.listdir', 'os.listdir', (['savePath'], {}), '(savePath)\n', (1621, 1631), False, 'import os\n'), ((2101, 2111), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2109, 2111), False, 'import sys\n')] |
# --------------------------------------------------------------------
# jokettt project, v. 0.1
# by <NAME> <<EMAIL>>
# ---
# Tic Tac Toe Board class definition
# --------------------------------------------------------------------
"""Implementation of the Board class: a board to play Tic Tac Toe game."""
__all__ = ['Board']
import sys
import random
import numpy as np
class Board:
"""A board to play Tic Tac Toe game."""
# ------------------------------------------------------
def __init__(self, first_piece, second_piece, init_zhash=None, init_board=None):
"""Board class constructor"""
if init_board is not None:
self.__board = init_board
else:
self.__board = [['_', '_', '_'],
['_', '_', '_'],
['_', '_', '_']]
self.__first_piece = first_piece
self.__second_piece = second_piece
self.__zobrist_hash = 0
self.__init_zhash(init_zhash)
# ------------------------------------------------------
def reset(self, init_board=None):
"""Reset the board to the given schema (default = empty)."""
if init_board is not None:
for _x in range(0, 3):
for _y in range(0, 3):
self.__board[_x][_y] = init_board[_x][_y]
else:
self.__board = [['_', '_', '_'],
['_', '_', '_'],
['_', '_', '_']]
# initialize Zobrist hash value
self.__evaluate_zhash()
# ------------------------------------------------------
def is_empty(self):
"""Returns True if the board is empty"""
if self.__board == [['_', '_', '_'],
['_', '_', '_'],
['_', '_', '_']]:
return True
return False
# ------------------------------------------------------
def only_one_piece_present(self):
"""Returns True if only one piece present on board."""
num_pieces = 0
for _x in range(0, 3):
for _y in range(0, 3):
if self.__board[_x][_y] != "_":
num_pieces += 1
if num_pieces == 1:
return True
return False
# ------------------------------------------------------
def at_least_a_corner_busy(self):
"""Returns True if at least a corner of the board is busy"""
return self.__board[0][0] != '_' or \
self.__board[0][2] != '_' or \
self.__board[2][0] != '_' or \
self.__board[2][2] != '_'
# ------------------------------------------------------
def center_is_busy(self):
"""Returns True if the center cell of the board is busy"""
return self.__board[1][1] != '_'
# ------------------------------------------------------
def is_not_full(self):
"""Returns True if the board is not full."""
for _x in range(0, 3):
for _y in range(0, 3):
if self.__board[_x][_y] == "_":
return True
return False
# ------------------------------------------------------
def is_full(self):
"""Returns True if the board is full."""
return not self.is_not_full()
# ------------------------------------------------------
def pos_is_empty(self, _x, _y):
"""Returns True if the given board position does not contains a pawn."""
return bool(self.__board[_x][_y] == "_")
# ------------------------------------------------------
def pos_is_busy(self, _x, _y):
"""Returns True if the given board position contains a pawn."""
return not self.pos_is_empty(_x, _y)
# ------------------------------------------------------
def valid_moves(self):
"""Returns the list of the valid moves in the current board state."""
move_list = []
for _x in range(0, 3):
for _y in range(0, 3):
if self.__board[_x][_y] == "_":
move_list.append([_x, _y])
return move_list
# ------------------------------------------------------
def is_valid_move(self, move):
"""Returns True if the move is valid in the current board state."""
# check the format of the move
if len(move) != 2:
return False
_x, _y = self.convert_movestring_to_indexes(move)
if _x == -1 or _y == -1:
return False
# check if the position if free in the board
if self.pos_is_busy(_x, _y):
return False
return True
# ------------------------------------------------------
def analyze_move(self, move, piece):
"""analize a move, returning the new board hash,
and the score of the position"""
zhash, score = self.place_pawn(move[0], move[1], piece)
# return the board in the previous status
_ = self.__remove_pawn(move[0], move[1])
return zhash, score
# ------------------------------------------------------
def place_pawn(self, _x, _y, piece):
"""Places a pawn in the given board position."""
if self.pos_is_empty(_x, _y):
self.__board[_x][_y] = piece
self.__update_zhash(_x, _y, piece)
return self.evaluate(piece)
# ------------------------------------------------------
def evaluate(self, piece):
"""Evaluates the board value."""
neg_piece = self.__get_other_piece(piece)
score = self.__evaluate_rows(piece, neg_piece)
if score != 0:
return self.__zobrist_hash, score
score = self.__evaluate_cols(piece, neg_piece)
if score != 0:
return self.__zobrist_hash, score
return self.__zobrist_hash, self.__evaluate_diags(piece, neg_piece)
# ------------------------------------------------------
def convert_movestring_to_indexes(self, move):
"""Convert the move from the <row><col> format (e.g. "A1")
format to the board x,y indexes.
"""
row = move[0].upper()
col = move[1]
return self.__convert_move_coords_to_indexes(row, col)
# ------------------------------------------------------
def convert_move_to_movestring(self, move):
"""Convert the move from the [x,y] move format
to the <row><col> string format (e.g. "A1").
"""
return self.__convert_indexes_to_movestring(move[0], move[1])
# ------------------------------------------------------
def __remove_pawn(self, _x, _y):
"""Removes a pawn from the given board position."""
piece = self.__board[_x][_y]
if piece != "_":
self.__update_zhash(_x, _y, piece)
self.__board[_x][_y] = "_"
return piece
# experimental code that try to explore the concept
# of "equivalent boards"... could be used by learner
# player to speed up learning. Temporarly disabled...
# ------------------------------------------------------
#def get_zhash_equivalent_boards(self):
# """Return the zhash of the current board and of all
# the equivant simmetrical boards"""
# zhash2 = self.__rotate_board_clockwise()
# zhash3 = self.__rotate_board_clockwise()
# zhash4 = self.__rotate_board_clockwise()
# zhash1 = self.__rotate_board_clockwise()
# return zhash1, zhash2, zhash3, zhash4
# ------------------------------------------------------
#def __replace_pawn(self, _x, _y, piece):
# """Replace a pawn in the given board position
# with the given piece."""
# old_piece = self.__remove_pawn(_x, _y)
# self.place_pawn(_x, _y, piece)
# return old_piece
# ------------------------------------------------------
#def __move_pawn(self, x0, y0, x1, y1):
# """Move the pawn in the [x0, y0] position to the
# [x1, y1] position. Returns the piece that was
# in the [x1, y1] position"""
# return self.__replace_pawn(x1, y1, self.__remove_pawn(x0, y0))
#def __rotate_board_clockwise(self):
# """Build the board equivalent to the current one
# rotating it by 90 degrees clockwise"""
# piece = self.__board[0][0]
# piece = self.__replace_pawn(0, 2, piece)
# piece = self.__replace_pawn(2, 2, piece)
# piece = self.__replace_pawn(2, 0, piece)
# _ = self.__replace_pawn(0, 0, piece)
# piece = self.__board[0][1]
# piece = self.__replace_pawn(1, 2, piece)
# piece = self.__replace_pawn(2, 1, piece)
# piece = self.__replace_pawn(1, 0, piece)
# _ = self.__replace_pawn(0, 1, piece)
# return self.__zobrist_hash
# ------------------------------------------------------
@staticmethod
def __convert_move_coords_to_indexes(row, col):
"""Convert move coordinates (e.g. "A","1") to board x,y indexes."""
row_to_x = {
"A": 0,
"B": 1,
"C": 2
}
col_to_y = {
"1": 0,
"2": 1,
"3": 2
}
return row_to_x.get(row, -1), col_to_y.get(col, -1)
# ------------------------------------------------------
@staticmethod
def __convert_indexes_to_movestring(_x, _y):
"""Convert the move from board x,y indexes to <row><col> format (e.g. "A1")."""
x_to_row = {
0: "A",
1: "B",
2: "C"
}
y_to_col = {
0: "1",
1: "2",
2: "3"
}
mstring = ""
mstring += x_to_row[_x]
mstring += y_to_col[_y]
return mstring
# ------------------------------------------------------
def __evaluate_rows(self, pos_piece, neg_piece):
"""Evaluates the board value checking only rows."""
val = 0
row = 0
while val == 0 and row < 3:
if self.__board[row][0] == self.__board[row][1] and \
self.__board[row][1] == self.__board[row][2]:
if self.__board[row][0] == pos_piece:
val = 10
elif self.__board[row][0] == neg_piece:
val = -10
row += 1
return val
# ------------------------------------------------------
def __evaluate_cols(self, pos_piece, neg_piece):
"""Evaluates the board value checking only columns."""
val = 0
col = 0
while val == 0 and col < 3:
if self.__board[0][col] == self.__board[1][col] and \
self.__board[1][col] == self.__board[2][col]:
if self.__board[0][col] == pos_piece:
val = 10
elif self.__board[0][col] == neg_piece:
val = -10
col += 1
return val
# ------------------------------------------------------
def __evaluate_diags(self, pos_piece, neg_piece):
"""Evaluates the board value checking only diagonals."""
val = 0
if self.__board[0][0] == self.__board[1][1] and \
self.__board[1][1] == self.__board[2][2]:
if self.__board[1][1] == pos_piece:
val = 10
elif self.__board[1][1] == neg_piece:
val = -10
if val != 0:
return val
if self.__board[0][2] == self.__board[1][1] and \
self.__board[1][1] == self.__board[2][0]:
if self.__board[1][1] == pos_piece:
val = 10
elif self.__board[1][1] == neg_piece:
val = -10
return val
# ------------------------------------------------------
def __init_zhash(self, init_zhash):
"""Initialize Zobrist hash table with values provided
or generating random values."""
self.zhash_table = np.empty([3, 3, 2], dtype=int)
if init_zhash is not None:
for _x in range(0, 3):
for _y in range(0, 3):
for _e in range(0, 2):
self.zhash_table[_x][_y][_e] = init_zhash[_x][_y][_e]
else:
random.seed()
for _x in range(0, 3):
for _y in range(0, 3):
for _e in range(0, 2):
self.zhash_table[_x][_y][_e] = random.randint(0, sys.maxsize)
# compute current board Zobrist hash value
self.__evaluate_zhash()
# ------------------------------------------------------
def __evaluate_zhash(self):
"""Completely evaluates Zobrist hash value of the current board."""
self.__zobrist_hash = 0
for _x in range(0, 3):
for _y in range(0, 3):
piece = self.__board[_x][_y]
if piece != "_":
piece_ndx = self.__convert_piece_in_index(piece)
self.__zobrist_hash ^= self.zhash_table[_x][_y][piece_ndx]
# ------------------------------------------------------
def __update_zhash(self, _x, _y, piece):
"""Update Zobrist hash value after a board status change
due to a single place or remove of a pawn.
"""
piece_ndx = self.__convert_piece_in_index(piece)
self.__zobrist_hash ^= self.zhash_table[_x][_y][piece_ndx]
# ------------------------------------------------------
def __convert_piece_in_index(self, piece):
"""Convert a piece in internal index."""
if piece == self.__first_piece:
return 0
return 1
# ------------------------------------------------------
def __get_other_piece(self, piece):
if piece == self.__first_piece:
return self.__second_piece
return self.__first_piece
# ------------------------------------------------------
def __str__(self):
"""__str__ display of the board."""
###return ' 1 2 3\nA %r\nB %r\nC %r\n--- hash = %r' % \
### (self.__board[0], self.__board[1], self.__board[2], self.__zobrist_hash
return ' 1 2 3\nA %r\nB %r\nC %r\n' % \
(self.__board[0], self.__board[1], self.__board[2])
# ------------------------------------------------------
def __repr__(self):
"""__repr__ representation of the board."""
return 'Board(%s)' % self.__board
| [
"numpy.empty",
"random.seed",
"random.randint"
] | [((11916, 11946), 'numpy.empty', 'np.empty', (['[3, 3, 2]'], {'dtype': 'int'}), '([3, 3, 2], dtype=int)\n', (11924, 11946), True, 'import numpy as np\n'), ((12203, 12216), 'random.seed', 'random.seed', ([], {}), '()\n', (12214, 12216), False, 'import random\n'), ((12389, 12419), 'random.randint', 'random.randint', (['(0)', 'sys.maxsize'], {}), '(0, sys.maxsize)\n', (12403, 12419), False, 'import random\n')] |
import numpy as np
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank, size = comm.rank, comm.size
import dedalus.public as de
import matplotlib.pyplot as plt
import os
from scipy.special import erf
import time
import logging
root = logging.root
for h in root.handlers: h.setLevel("INFO")
logger = logging.getLogger(__name__)
# Simulation parameters
Re = 100
η = 1e-2
ε = np.sqrt(η/Re)
δ = 3.8017192844*ε
l = 0
N = [256,64,64,128]
boundaries = sorted(set([.1,1-l-δ,1-l+δ,4]))
iterations, wall_time = 1000+1, 1*60*60
dt = 5e-3
print_freq = 10
sim_name = 'cylinder-penalized'
data_dir = os.path.join('runs',sim_name)
if rank==0 and not os.path.isdir(data_dir): os.makedirs(data_dir)
# Create problem bases and domain
θbasis = de.Fourier('θ', N[0], interval=(-np.pi,np.pi), dealias=3/2)
rbases = [de.Chebyshev(f'r{i}',N[i+1],interval=boundaries[i:i+2], dealias=3/2) for i in range(len(boundaries)-1)]
rbasis = de.Compound('r',rbases)
domain = de.Domain([θbasis,rbasis], grid_dtype=np.float64)
θ, r = domain.grids(domain.dealias)
θθ,rr = np.meshgrid(θ,r,indexing='ij')
# Boundary condition functions
from dedalus.core.operators import GeneralFunction
# Define GeneralFunction subclass for time dependent boundary conditions
class ConstantFunction(GeneralFunction):
def __init__(self, domain, layout, func, args=[], kw={}, out=None,):
super().__init__(domain, layout, func, args=[], kw={}, out=None,)
def meta_constant(self, axis):
return True
def normalized_mask(x): return np.piecewise(x, [x<=-1,(x>-1)&(x<1),x>=1],
[lambda x:1,
lambda x:(1-erf(np.sqrt(np.pi)*x/np.sqrt(1-x**2)))/2,
lambda x:0])
def bc_func(solver): return normalized_mask(1-solver.sim_time/2)
def oscillation_func(solver): return np.sin(2*solver.sim_time/np.pi)
bc = ConstantFunction(domain, layout='g', func=bc_func)
oscillation = ConstantFunction(domain, layout='g', func=oscillation_func)
# Volume penalty mask
Γ = domain.new_field(scales=domain.dealias)
Γ['g'] = normalized_mask((rr-1)/δ)
disk = de.IVP(domain, variables=['u','v','p','q'], ncc_cutoff=1e-10)
disk.meta[:]['r']['dirichlet'] = True
# Parameters
params = [boundaries[0],boundaries[-1],Re,bc,np.pi] + N + [η, ε, δ, l, Γ]
param_names = ['R0','R1','Re','bc','π','Nθ']+[f'Nr{i}' for i in range(len(N)-1)] +['η', 'ε', 'δ', 'l', 'Γ']
if len(params)==len(param_names):
for param, param_name in zip(params, param_names):
disk.parameters[param_name] = param
disk.substitutions['pr'] = "p - 0.5*(u*u+v*v)"
disk.substitutions['qr'] = "q/r"
disk.substitutions['c'] = "cos(θ)"
disk.substitutions['s'] = "sin(θ)"
disk.substitutions['fpr'] = "-pr"
disk.substitutions['fpθ'] = "0"
disk.substitutions['fvr'] = "0"
disk.substitutions['fvθ'] = "(dθ(u) + r*dr(v) - v)/(Re*r)"
disk.substitutions['φ'] = "(π/2)*(1-cos(2*t/π))"
disk.substitutions['ω'] = "sin(2*t/π)"
disk.substitutions['α'] = "(2/π)*cos(2*t/π)"
disk.add_equation("dr(r*u) + dθ(v) = 0")
disk.add_equation("r*r*dt(u) + (1/Re)*dθ(q) + r*r*dr(p) = r*v*q - (r*r*Γ/η)*u")
disk.add_equation("r*r*dt(v) - (1/Re)*(r*dr(q) - q) + r*dθ(p) = -r*u*q - (r*r*Γ/η)*(v-r*ω)")
disk.add_equation("q - dr(r*v) + dθ(u) = 0")
# Boundary conditions
disk.add_bc("left(u) = 0")
disk.add_bc("left(v) = ω*R0")
disk.add_bc("right(u) = bc*cos(θ)", condition="(nθ != 0)")
disk.add_bc("right(v) =-bc*sin(θ)")
disk.add_bc("right(p) = 0", condition="(nθ == 0)")
# Build timestepper and solver
ts = de.timesteppers.SBDF3
solver = disk.build_solver(ts)
solver.stop_sim_time, solver.stop_wall_time, solver.stop_iteration = np.inf, wall_time, iterations
# Initialize variables
bc.original_args = bc.args = [solver]
oscillation.original_args = oscillation.args = [solver]
u, v, p, q = (solver.state[name] for name in disk.variables)
for field in [u,v,p,q]:
field.set_scales(domain.dealias)
field['g'] = 0
# Save state variables
analysis = solver.evaluator.add_file_handler('{}/data-{}'.format(data_dir,sim_name), iter=10, max_writes=200,mode='overwrite')
for task in disk.variables: analysis.add_task(task)
analysis.add_task("ω")
analysis.add_task("φ")
analysis.add_task("α")
analysis.add_task("bc")
# Save force calcs
forces = solver.evaluator.add_file_handler('{}/force-{}'.format(data_dir,sim_name),
iter=1, max_writes=iterations,mode='overwrite')
forces.add_task("integ(interp((c*fpr-s*fpθ)*r,r='left'),'θ')",name='Fpx')
forces.add_task("integ(interp((c*fvr-s*fvθ)*r,r='left'),'θ')",name='Fvx')
forces.add_task("integ(interp((s*fpr+c*fpθ)*r,r='left'),'θ')",name='Fpy')
forces.add_task("integ(interp((s*fvr+c*fvθ)*r,r='left'),'θ')",name='Fvy')
forces.add_task("integ(interp(fvθ*r*r,r='left'),'θ')",name='Tv')
forces.add_task("ω")
forces.add_task("φ")
forces.add_task("α")
# Save simulation parameters
parameters = solver.evaluator.add_file_handler('{}/parameters-{}'.format(data_dir,sim_name), iter=np.inf, max_writes=np.inf,mode='overwrite')
for param_name in param_names: parameters.add_task(param_name)
# Run the simulation
start_time = time.time()
while solver.ok:
solver.step(dt)
if solver.iteration % print_freq == 0:
logger.info('It:{:0>5d}, Time:{:.2f}, Max u:{:.2f}'.format(solver.iteration, (time.time()-start_time)/60,u['g'][N[0]//4,-1]))
| [
"numpy.meshgrid",
"os.makedirs",
"os.path.isdir",
"dedalus.public.IVP",
"dedalus.public.Fourier",
"dedalus.public.Domain",
"time.time",
"dedalus.public.Compound",
"dedalus.public.Chebyshev",
"numpy.sin",
"os.path.join",
"logging.getLogger",
"numpy.sqrt"
] | [((297, 324), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (314, 324), False, 'import logging\n'), ((373, 388), 'numpy.sqrt', 'np.sqrt', (['(η / Re)'], {}), '(η / Re)\n', (380, 388), True, 'import numpy as np\n'), ((585, 615), 'os.path.join', 'os.path.join', (['"""runs"""', 'sim_name'], {}), "('runs', sim_name)\n", (597, 615), False, 'import os\n'), ((726, 788), 'dedalus.public.Fourier', 'de.Fourier', (['"""θ"""', 'N[0]'], {'interval': '(-np.pi, np.pi)', 'dealias': '(3 / 2)'}), "('θ', N[0], interval=(-np.pi, np.pi), dealias=3 / 2)\n", (736, 788), True, 'import dedalus.public as de\n'), ((908, 932), 'dedalus.public.Compound', 'de.Compound', (['"""r"""', 'rbases'], {}), "('r', rbases)\n", (919, 932), True, 'import dedalus.public as de\n'), ((941, 991), 'dedalus.public.Domain', 'de.Domain', (['[θbasis, rbasis]'], {'grid_dtype': 'np.float64'}), '([θbasis, rbasis], grid_dtype=np.float64)\n', (950, 991), True, 'import dedalus.public as de\n'), ((1037, 1069), 'numpy.meshgrid', 'np.meshgrid', (['θ', 'r'], {'indexing': '"""ij"""'}), "(θ, r, indexing='ij')\n", (1048, 1069), True, 'import numpy as np\n'), ((2127, 2191), 'dedalus.public.IVP', 'de.IVP', (['domain'], {'variables': "['u', 'v', 'p', 'q']", 'ncc_cutoff': '(1e-10)'}), "(domain, variables=['u', 'v', 'p', 'q'], ncc_cutoff=1e-10)\n", (2133, 2191), True, 'import dedalus.public as de\n'), ((5186, 5197), 'time.time', 'time.time', ([], {}), '()\n', (5195, 5197), False, 'import time\n'), ((659, 680), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (670, 680), False, 'import os\n'), ((795, 871), 'dedalus.public.Chebyshev', 'de.Chebyshev', (['f"""r{i}"""', 'N[i + 1]'], {'interval': 'boundaries[i:i + 2]', 'dealias': '(3 / 2)'}), "(f'r{i}', N[i + 1], interval=boundaries[i:i + 2], dealias=3 / 2)\n", (807, 871), True, 'import dedalus.public as de\n'), ((1854, 1889), 'numpy.sin', 'np.sin', (['(2 * solver.sim_time / np.pi)'], {}), '(2 * solver.sim_time / np.pi)\n', (1860, 1889), True, 'import numpy as np\n'), ((634, 657), 'os.path.isdir', 'os.path.isdir', (['data_dir'], {}), '(data_dir)\n', (647, 657), False, 'import os\n'), ((5365, 5376), 'time.time', 'time.time', ([], {}), '()\n', (5374, 5376), False, 'import time\n'), ((1674, 1693), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (1681, 1693), True, 'import numpy as np\n'), ((1657, 1671), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1664, 1671), True, 'import numpy as np\n')] |
import math
import numpy as np
import matplotlib.pyplot as plt
def create_plot():
"Funcție ajutătoare pentru crearea graficului."
# Creez un subplot
fig, ax = plt.subplots(1, dpi=200)
# Setez axele de coordonate
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
return ax
###
print("Exercițiul 1")
# Pentru a aproxima valoarea lui radical din 8,
# căutăm soluția pozitivă a ecuației x^2 - 8 = 0
f = lambda x: x ** 2 - 8
# Metoda bisecției, dezvoltată la primul laborator
def bisectie(f, a, b, epsilon=1e-8):
"""Găsește rădăcina funcției `f` pe intervalul `[a, b]`
cu precizia `epsilon`.
"""
# Calculăm valorile în capătul stâng
f_a = f(a)
# Prima estimare, mijlocul intervalului inițial
x_num = (a + b) / 2
# Numărul necesar de iterații
num_iterations = math.floor(math.log2((b - a) / epsilon) - 1) + 1
# Aplicăm algoritmul
for step in range(num_iterations):
value = f(x_num)
# Am găsit fix valoarea căutată, ieșim
if value == 0:
break
elif f_a * value < 0:
b = x_num
else:
a = x_num
# Luăm mijlocul noului interval
x_num = (a + b) / 2
return x_num
# Căutăm rădăcina pozitivă
root = bisectie(f, 0, 8)
print("Radical din 8 este", root)
print()
# Afișez funcția folosită pentru calcularea radicalului
xs = np.linspace(0, 8, int(1e5))
ax = create_plot()
ax.set_title("Găsirea rădăcinii pătrate")
ax.plot(xs, f(xs), label="x^2 - 8")
ax.scatter(root, 0, c='red')
ax.legend()
plt.show()
###
print("Exercițiul 2")
# Definesc cele două părți ale egalității
f = lambda x: np.exp(x - 2)
g = lambda x: np.cos(np.exp(x - 2)) + 1
h = lambda x: f(x) - g(x)
# Găsim soluția ecuației h(x) = 0, care va fi și soluția ecuației inițiale
# Intervalul [1, 3] a fost ales analizând graficul
root = bisectie(h, 1, 3)
print("Funcțiile se intersectează în", root)
# Afișăm grafic rezultatul
xs = np.linspace(0, 5, int(1e5))
ax = create_plot()
ax.set_title("Rezolvarea unei ecuații")
ax.plot(xs, f(xs), label="e^(x - 2)")
ax.plot(xs, g(xs), label="cos(e^(x - 2)) + 1")
ax.scatter(root, f(root), c='red')
ax.legend()
plt.show()
print()
###
print("Exercițiul 3")
def pozitie_falsa(f, a, b, epsilon=1e-5):
"""Găsește soluția ecuației `f(x) = 0` folosind metoda poziției false
în intervalul [a, b], cu precizie `epsilon`.
"""
f_a = f(a)
f_b = f(b)
prev = (a * f_b - b * f_a)/(f_b - f_a)
f_prev = f(prev)
# Setez o valoare lui new, pentru cazul când f_prev == 0
# și ies imediat din buclă.
new = prev
num_iterations = 0
while True:
# Dacă am nimerit soluția exactă, mă opresc
if f_prev == 0:
break
elif f_a * f_prev < 0:
# Intervalul bun e cel din stânga
b = prev
f_b = f(b)
else:
# Intervalul bun e cel din dreapta
a = prev
f_a = f(a)
# Calculez noul punct de intersecție
new = (a * f_b - b * f_a)/(f_b - f_a)
f_new = f(new)
if np.abs(new - prev) / np.abs(prev) < epsilon:
break
prev, f_prev = new, f_new
num_iterations += 1
return new, num_iterations
f = lambda x: (x ** 3) - 19 * x + 30
# Calculez rădăcinile
# Intervalele au fost alese pe baza graficului
r1, n1 = pozitie_falsa(f, -6, -4)
print("Am găsit soluția", r1, "în", n1, "iterații")
r2, n2 = pozitie_falsa(f, 1, 2.5)
print("Am găsit soluția", r2, "în", n2, "iterații")
r3, n3 = pozitie_falsa(f, 2.5, 4)
print("Am găsit soluția", r3, "în", n3, "iterații")
xs = np.linspace(-5, 5, int(1e5))
ax = create_plot()
ax.set_title("Metoda poziției false")
# Afișez funcția
ax.plot(xs, f(xs), label='x^3 - 19x + 30')
# Afișez rădăcinile
ax.scatter([r1, r2, r3], [0, 0, 0], c='red')
ax.legend()
plt.show()
print()
###
print("Exercițiul 4")
def secanta(f, a, b, x0, x1, epsilon=1e-5):
"""Găsește rădăcina lui f pe intervalul [a, b],
plecând de la punctele x0 și x1.
"""
num_iterations = 0
# Ne oprim când eroarea relativă scade sub epsilon
while np.abs(x1 - x0) / np.abs(x0) >= epsilon:
# Calculăm următorul punct folosind secanta
x_new = (x0 * f(x1) - x1 * f(x0)) / (f(x1) - f(x0))
if x_new < a or x_new > b:
raise Exception("Valorile alese pentru x0 și x1 nu converg")
x0, x1 = x1, x_new
num_iterations += 1
return x1, num_iterations
f = lambda x: (x ** 3) - 7 * x + 6
a, b = -3, 3
r1, n1 = secanta(f, a, b, -3, -2.5)
print("Am găsit soluția", r1, "în", n1, "iterații")
r2, n2 = secanta(f, a, b, 0.5, 1.5)
print("Am găsit soluția", r2, "în", n2, "iterații")
r3, n3 = secanta(f, a, b, 1.5, 2.5)
print("Am găsit soluția", r3, "în", n3, "iterații")
xs = np.linspace(a, b, int(1e5))
ax = create_plot()
ax.set_title("Metoda secantei")
# Afișez funcția
ax.plot(xs, f(xs), label='x^3 - 7x + 6')
# Afișez rădăcinile
ax.scatter([r1, r2, r3], [0, 0, 0], c='red')
ax.legend()
plt.show()
| [
"numpy.abs",
"matplotlib.pyplot.show",
"numpy.exp",
"math.log2",
"matplotlib.pyplot.subplots"
] | [((1627, 1637), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1635, 1637), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2264, 2266), True, 'import matplotlib.pyplot as plt\n'), ((3927, 3937), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3935, 3937), True, 'import matplotlib.pyplot as plt\n'), ((5094, 5104), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5102, 5104), True, 'import matplotlib.pyplot as plt\n'), ((174, 198), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'dpi': '(200)'}), '(1, dpi=200)\n', (186, 198), True, 'import matplotlib.pyplot as plt\n'), ((1723, 1736), 'numpy.exp', 'np.exp', (['(x - 2)'], {}), '(x - 2)\n', (1729, 1736), True, 'import numpy as np\n'), ((1758, 1771), 'numpy.exp', 'np.exp', (['(x - 2)'], {}), '(x - 2)\n', (1764, 1771), True, 'import numpy as np\n'), ((4205, 4220), 'numpy.abs', 'np.abs', (['(x1 - x0)'], {}), '(x1 - x0)\n', (4211, 4220), True, 'import numpy as np\n'), ((4223, 4233), 'numpy.abs', 'np.abs', (['x0'], {}), '(x0)\n', (4229, 4233), True, 'import numpy as np\n'), ((908, 936), 'math.log2', 'math.log2', (['((b - a) / epsilon)'], {}), '((b - a) / epsilon)\n', (917, 936), False, 'import math\n'), ((3170, 3188), 'numpy.abs', 'np.abs', (['(new - prev)'], {}), '(new - prev)\n', (3176, 3188), True, 'import numpy as np\n'), ((3191, 3203), 'numpy.abs', 'np.abs', (['prev'], {}), '(prev)\n', (3197, 3203), True, 'import numpy as np\n')] |
from dreaml.dataframe.transform import BatchTransform
from dreaml.dataframe.dataframe import DataFrame
from pcabasis import PCABasis
from dot import Dot
import numpy as np
import numpy.linalg as la
_auto_dir = "auto/"
class PCA(BatchTransform):
def func(self,target_df,X_pca_df,X_full_df=None,num_bases=50):
""" Project onto the PCA basis """
if X_full_df == None:
X_full_df = X_pca_df
X_mean = np.mean(X_pca_df.r_matrix,axis=0)
# the PCA basis is exposed for the user
self.v = self.pca_basis(X_pca_df.r_matrix,num_bases)
# Use numbers as the label for the basis dimension
col_labels = [str(i) for i in range(num_bases)]
# Set the matrix with the corresponding labels
target_df.set_matrix((X_full_df.r_matrix-X_mean).dot(self.v),
row_labels=X_full_df.rows(),
col_labels=col_labels)
def pca_basis(self,X, num_bases=50):
X_m = np.mean(X,axis=0) # mean
X_zm = X - X_m # X with 0 mean
u,s,v_T = la.svd(X_zm)
return np.real(v_T.T[:,:num_bases]) | [
"numpy.linalg.svd",
"numpy.mean",
"numpy.real"
] | [((448, 482), 'numpy.mean', 'np.mean', (['X_pca_df.r_matrix'], {'axis': '(0)'}), '(X_pca_df.r_matrix, axis=0)\n', (455, 482), True, 'import numpy as np\n'), ((1000, 1018), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1007, 1018), True, 'import numpy as np\n'), ((1082, 1094), 'numpy.linalg.svd', 'la.svd', (['X_zm'], {}), '(X_zm)\n', (1088, 1094), True, 'import numpy.linalg as la\n'), ((1110, 1139), 'numpy.real', 'np.real', (['v_T.T[:, :num_bases]'], {}), '(v_T.T[:, :num_bases])\n', (1117, 1139), True, 'import numpy as np\n')] |
# module containing the External Compton radiative process
import numpy as np
from astropy.constants import c, sigma_T, G
from ..utils.math import (
trapz_loglog,
log,
axes_reshaper,
gamma_to_integrate,
mu_to_integrate,
phi_to_integrate,
)
from ..utils.conversion import nu_to_epsilon_prime, to_R_g_units
from ..utils.geometry import x_re_shell, mu_star_shell, x_re_ring
from ..targets import (
CMB,
PointSourceBehindJet,
SSDisk,
SphericalShellBLR,
RingDustTorus,
)
from .kernels import isotropic_kernel, compton_kernel
__all__ = ["ExternalCompton"]
class ExternalCompton:
"""class for External Compton radiation computation
Parameters
----------
blob : :class:`~agnpy.emission_regions.Blob`
emission region and electron distribution hitting the photon target
target : :class:`~agnpy.targets`
class describing the target photon field
r : :class:`~astropy.units.Quantity`
distance of the blob from the Black Hole (i.e. from the target photons)
"""
def __init__(self, blob, target, r=None, integrator=np.trapz):
self.blob = blob
# we integrate on a larger grid to account for the transformation
# of the electron density in the reference frame of the BH
self.gamma = self.blob.gamma_to_integrate
self.target = target
self.r = r
self.integrator = integrator
self.set_mu()
self.set_phi()
def set_mu(self, mu_size=100):
self.mu_size = mu_size
if isinstance(self.target, SSDisk):
# in case of hte disk the mu interval does not go from -1 to 1
r_tilde = (self.r / self.target.R_g).to_value("")
self.mu = self.target.evaluate_mu_from_r_tilde(
self.target.R_in_tilde, self.target.R_out_tilde, r_tilde
)
else:
self.mu = np.linspace(-1, 1, self.mu_size)
def set_phi(self, phi_size=50):
self.phi_size = phi_size
self.phi = np.linspace(0, 2 * np.pi, self.phi_size)
@staticmethod
def evaluate_sed_flux_iso_mono(
nu,
z,
d_L,
delta_D,
mu_s,
R_b,
epsilon_0,
u_0,
n_e,
*args,
integrator=np.trapz,
gamma=gamma_to_integrate,
mu=mu_to_integrate,
phi=phi_to_integrate
):
r"""Evaluates the flux SED,
:math:`\nu F_{\nu} \, [\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1}]`,
for External Compton on a monochromatic isotropic target photon field
for a general set of model parameters
Parameters
----------
nu : :class:`~astropy.units.Quantity`
array of frequencies, in Hz, to compute the sed
**note** these are observed frequencies (observer frame)
z : float
redshift of the source
d_L : :class:`~astropy.units.Quantity`
luminosity distance of the source
delta_D: float
Doppler factor of the relativistic outflow
mu_s : float
cosine of the angle between the blob motion and the jet axis
R_b : :class:`~astropy.units.Quantity`
size of the emitting region (spherical blob assumed)
epsilon_0 : float
dimensionless energy (in electron rest mass energy units) of the
target photon field
u_0 : :class:`~astropy.units.Quantity`
energy density [erg cm-3] of the target photon field
n_e : :class:`~agnpy.spectra.ElectronDistribution`
electron energy distribution
*args
parameters of the electron energy distribution (k_e, p, ...)
ssa : bool
whether to consider or not the self-absorption, default false
integrator : func
which function to use for integration, default `numpy.trapz`
gamma : :class:`~numpy.ndarray`
array of Lorentz factor over which to integrate the electron
distribution
mu, phi : :class:`~numpy.ndarray`
arrays of cosine of zenith and azimuth angles to integrate over
**Note** arguments after *args are keyword-only arguments
Returns
-------
:class:`~astropy.units.Quantity`
array of the SED values corresponding to each frequency
"""
# conversion
epsilon_s = nu_to_epsilon_prime(nu, z)
# multi-dimensional integration
_gamma, _mu, _phi, _epsilon_s = axes_reshaper(gamma, mu, phi, epsilon_s)
V_b = 4 / 3 * np.pi * np.power(R_b, 3)
N_e = V_b * n_e.evaluate(_gamma / delta_D, *args)
kernel = compton_kernel(_gamma, _epsilon_s, epsilon_0, mu_s, _mu, _phi)
integrand = N_e / np.power(_gamma, 2) * kernel
integral_gamma = integrator(integrand, gamma, axis=0)
integral_mu = np.trapz(integral_gamma, mu, axis=0)
integral_phi = np.trapz(integral_mu, phi, axis=0)
prefactor_num = (
3 * c * sigma_T * u_0 * np.power(epsilon_s, 2) * np.power(delta_D, 3)
)
prefactor_denom = (
np.power(2, 7)
* np.power(np.pi, 2)
* np.power(d_L, 2)
* np.power(epsilon_0, 2)
)
return (prefactor_num / prefactor_denom * integral_phi).to("erg cm-2 s-1")
def sed_flux_cmb(self, nu):
"""evaluates the flux SED for External Compton on the CMB"""
return self.evaluate_sed_flux_iso_mono(
nu,
self.blob.z,
self.blob.d_L,
self.blob.delta_D,
self.blob.mu_s,
self.blob.R_b,
self.target.epsilon_0,
self.target.u_0,
self.blob.n_e,
*self.blob.n_e.parameters,
integrator=self.integrator,
gamma=self.gamma,
mu=self.mu,
phi=self.phi
)
@staticmethod
def evaluate_sed_flux_ps_behind_jet(
nu,
z,
d_L,
delta_D,
mu_s,
R_b,
epsilon_0,
L_0,
r,
n_e,
*args,
integrator=np.trapz,
gamma=gamma_to_integrate
):
r"""Evaluates the flux SED,
:math:`\nu F_{\nu} \, [\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1}]`,
for External Compton on a point source of photons behind the jet
for a general set of model parameters
Parameters
----------
nu : :class:`~astropy.units.Quantity`
array of frequencies, in Hz, to compute the sed
**note** these are observed frequencies (observer frame)
z : float
redshift of the source
d_L : :class:`~astropy.units.Quantity`
luminosity distance of the source
delta_D: float
Doppler factor of the relativistic outflow
mu_s : float
cosine of the angle between the blob motion and the jet axis
R_b : :class:`~astropy.units.Quantity`
size of the emitting region (spherical blob assumed)
epsilon_0 : float
dimensionless energy (in electron rest mass energy units) of the
target photon field
L_0 : :class:`~astropy.units.Quantity`
luminosity [erg cm-3] of the point source behind the jet
r : :class:`~astropy.units.Quantity`
distance between the point source and the blob
n_e : :class:`~agnpy.spectra.ElectronDistribution`
electron energy distribution
*args
parameters of the electron energy distribution (k_e, p, ...)
ssa : bool
whether to consider or not the self-absorption, default false
integrator : func
which function to use for integration, default `numpy.trapz`
gamma : :class:`~numpy.ndarray`
array of Lorentz factor over which to integrate the electron
distribution
**Note** arguments after *args are keyword-only arguments
Returns
-------
:class:`~astropy.units.Quantity`
array of the SED values corresponding to each frequency
"""
# conversion
epsilon_s = nu_to_epsilon_prime(nu, z)
# multi-dimensional integration
_gamma, _epsilon_s = axes_reshaper(gamma, epsilon_s)
V_b = 4 / 3 * np.pi * np.power(R_b, 3)
N_e = V_b * n_e.evaluate(_gamma / delta_D, *args)
kernel = compton_kernel(_gamma, _epsilon_s, epsilon_0, mu_s, 1, 0)
integrand = N_e / np.power(_gamma, 2) * kernel
integral = integrator(integrand, gamma, axis=0)
prefactor_num = (
3 * sigma_T * L_0 * np.power(epsilon_s, 2) * np.power(delta_D, 3)
)
prefactor_denom = (
np.power(2, 7)
* np.power(np.pi, 2)
* np.power(d_L, 2)
* np.power(r, 2)
* np.power(epsilon_0, 2)
)
return (prefactor_num / prefactor_denom * integral).to("erg cm-2 s-1")
def sed_flux_ps_behind_jet(self, nu):
"""evaluates the flux SED for External Compton on a point source behind
the jet"""
return self.evaluate_sed_flux_ps_behind_jet(
nu,
self.blob.z,
self.blob.d_L,
self.blob.delta_D,
self.blob.mu_s,
self.blob.R_b,
self.target.epsilon_0,
self.target.L_0,
self.r,
self.blob.n_e,
*self.blob.n_e.parameters,
integrator=self.integrator,
gamma=self.gamma
)
@staticmethod
def evaluate_sed_flux_ss_disk(
nu,
z,
d_L,
delta_D,
mu_s,
R_b,
M_BH,
L_disk,
eta,
R_in,
R_out,
r,
n_e,
*args,
integrator=np.trapz,
gamma=gamma_to_integrate,
mu_size=100,
phi=phi_to_integrate
):
r"""Evaluates the flux SED,
:math:`\nu F_{\nu} \, [\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1}]`,
for External Compton on a monochromatic isotropic target photon field
for a general set of model parameters
Parameters
----------
nu : :class:`~astropy.units.Quantity`
array of frequencies, in Hz, to compute the sed
**note** these are observed frequencies (observer frame)
z : float
redshift of the source
d_L : :class:`~astropy.units.Quantity`
luminosity distance of the source
delta_D: float
Doppler factor of the relativistic outflow
mu_s : float
cosine of the angle between the blob motion and the jet axis
R_b : :class:`~astropy.units.Quantity`
size of the emitting region (spherical blob assumed)
M_BH : :class:`~astropy.units.Quantity`
Black Hole mass
L_disk : :class:`~astropy.units.Quantity`
luminosity of the disk
eta : float
accretion efficiency
R_in : :class:`~astropy.units.Quantity`
inner disk radius
R_out : :class:`~astropy.units.Quantity`
inner disk radius
r : :class:`~astropy.units.Quantity`
distance between the disk and the blob
n_e : :class:`~agnpy.spectra.ElectronDistribution`
electron energy distribution
*args
parameters of the electron energy distribution (k_e, p, ...)
ssa : bool
whether to consider or not the self-absorption, default false
integrator : func
which function to use for integration, default `numpy.trapz`
gamma : :class:`~numpy.ndarray`
array of Lorentz factor over which to integrate the electron
distribution
mu, phi : :class:`~numpy.ndarray`
arrays of cosine of zenith and azimuth angles to integrate over
**Note** arguments after *args are keyword-only arguments
Returns
-------
:class:`~astropy.units.Quantity`
array of the SED values corresponding to each frequency
"""
# conversions
epsilon_s = nu_to_epsilon_prime(nu, z)
r_tilde = to_R_g_units(r, M_BH)
R_in_tilde = to_R_g_units(R_in, M_BH)
R_out_tilde = to_R_g_units(R_out, M_BH)
m_dot = (L_disk / (eta * np.power(c, 2))).to("g / s")
# multidimensional integration
# for the disk we do not integrate mu from -1 to 1 but choose the range
# of zenith angles subtended from a given distance
mu = SSDisk.evaluate_mu_from_r_tilde(R_in_tilde, R_out_tilde, r_tilde)
_gamma, _mu, _phi, _epsilon_s = axes_reshaper(gamma, mu, phi, epsilon_s)
V_b = 4 / 3 * np.pi * np.power(R_b, 3)
N_e = V_b * n_e.evaluate(_gamma / delta_D, *args)
epsilon = SSDisk.evaluate_epsilon_mu(L_disk, M_BH, eta, _mu, r_tilde)
phi_disk = SSDisk.evaluate_phi_disk_mu(_mu, R_in_tilde, r_tilde)
kernel = compton_kernel(_gamma, _epsilon_s, epsilon, mu_s, _mu, _phi)
integrand = (
phi_disk
/ np.power(epsilon, 2)
/ _mu
/ np.power(np.power(_mu, -2) - 1, 3 / 2)
* N_e
/ np.power(_gamma, 2)
* kernel
)
integral_gamma = integrator(integrand, gamma, axis=0)
integral_mu = np.trapz(integral_gamma, mu, axis=0)
integral_phi = np.trapz(integral_mu, phi, axis=0)
prefactor_num = (
9
* sigma_T
* G
* M_BH
* m_dot
* np.power(epsilon_s, 2)
* np.power(delta_D, 3)
)
prefactor_denom = (
np.power(2, 9) * np.power(np.pi, 3) * np.power(d_L, 2) * np.power(r, 3)
)
return (prefactor_num / prefactor_denom * integral_phi).to("erg cm-2 s-1")
def sed_flux_ss_disk(self, nu):
"""evaluates the flux SED for External Compton on a [Shakura1973]_ disk"""
return self.evaluate_sed_flux_ss_disk(
nu,
self.blob.z,
self.blob.d_L,
self.blob.delta_D,
self.blob.mu_s,
self.blob.R_b,
self.target.M_BH,
self.target.L_disk,
self.target.eta,
self.target.R_in,
self.target.R_out,
self.r,
self.blob.n_e,
*self.blob.n_e.parameters,
integrator=self.integrator,
gamma=self.gamma,
mu_size=self.mu_size,
phi=self.phi
)
@staticmethod
def evaluate_sed_flux_blr(
nu,
z,
d_L,
delta_D,
mu_s,
R_b,
L_disk,
xi_line,
epsilon_line,
R_line,
r,
n_e,
*args,
integrator=np.trapz,
gamma=gamma_to_integrate,
mu=mu_to_integrate,
phi=phi_to_integrate
):
r"""Evaluates the flux SED,
:math:`\nu F_{\nu} \, [\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1}]`,
for External Compton on a monochromatic isotropic target photon field
for a general set of model parameters
Parameters
----------
nu : :class:`~astropy.units.Quantity`
array of frequencies, in Hz, to compute the sed
**note** these are observed frequencies (observer frame)
z : float
redshift of the source
d_L : :class:`~astropy.units.Quantity`
luminosity distance of the source
delta_D: float
Doppler factor of the relativistic outflow
mu_s : float
cosine of the angle between the blob motion and the jet axis
L_disk : :class:`~astropy.units.Quantity`
Luminosity of the disk whose radiation is being reprocessed by the BLR
xi_line : float
fraction of the disk radiation reprocessed by the BLR
epsilon_line : string
dimensionless energy of the emitted line
R_line : :class:`~astropy.units.Quantity`
radius of the BLR spherical shell
r : :class:`~astropy.units.Quantity`
distance between the Broad Line Region and the blob
n_e : :class:`~agnpy.spectra.ElectronDistribution`
electron energy distribution
*args
parameters of the electron energy distribution (k_e, p, ...)
ssa : bool
whether to consider or not the self-absorption, default false
integrator : func
which function to use for integration, default `numpy.trapz`
gamma : :class:`~numpy.ndarray`
array of Lorentz factor over which to integrate the electron
distribution
mu, phi : :class:`~numpy.ndarray`
arrays of cosine of zenith and azimuth angles to integrate over
**Note** arguments after *args are keyword-only arguments
Returns
-------
:class:`~astropy.units.Quantity`
array of the SED values corresponding to each frequency
"""
# conversions
epsilon_s = nu_to_epsilon_prime(nu, z)
# multidimensional integration
_gamma, _mu, _phi, _epsilon_s = axes_reshaper(gamma, mu, phi, epsilon_s)
V_b = 4 / 3 * np.pi * np.power(R_b, 3)
N_e = V_b * n_e.evaluate(_gamma / delta_D, *args)
x = x_re_shell(_mu, R_line, r)
mu_star = mu_star_shell(_mu, R_line, r)
kernel = compton_kernel(_gamma, _epsilon_s, epsilon_line, mu_s, mu_star, _phi)
integrand = 1 / np.power(x, 2) * N_e / np.power(_gamma, 2) * kernel
integral_gamma = integrator(integrand, gamma, axis=0)
integral_mu = np.trapz(integral_gamma, mu, axis=0)
integral_phi = np.trapz(integral_mu, phi, axis=0)
prefactor_num = (
3
* sigma_T
* xi_line
* L_disk
* np.power(epsilon_s, 2)
* np.power(delta_D, 3)
)
prefactor_denom = (
np.power(2, 9)
* np.power(np.pi, 3)
* np.power(d_L, 2)
* np.power(epsilon_line, 2)
)
return (prefactor_num / prefactor_denom * integral_phi).to("erg cm-2 s-1")
def sed_flux_blr(self, nu):
"""evaluates the flux SED for External Compton on a spherical BLR"""
return self.evaluate_sed_flux_blr(
nu,
self.blob.z,
self.blob.d_L,
self.blob.delta_D,
self.blob.mu_s,
self.blob.R_b,
self.target.L_disk,
self.target.xi_line,
self.target.epsilon_line,
self.target.R_line,
self.r,
self.blob.n_e,
*self.blob.n_e.parameters,
integrator=self.integrator,
gamma=self.gamma,
mu=self.mu,
phi=self.phi
)
@staticmethod
def evaluate_sed_flux_dt(
nu,
z,
d_L,
delta_D,
mu_s,
R_b,
L_disk,
xi_dt,
epsilon_dt,
R_dt,
r,
n_e,
*args,
integrator=np.trapz,
gamma=gamma_to_integrate,
phi=phi_to_integrate
):
r"""Evaluates the flux SED,
:math:`\nu F_{\nu} \, [\mathrm{erg}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1}]`,
for External Compton on a monochromatic isotropic target photon field
for a general set of model parameters
Parameters
----------
nu : :class:`~astropy.units.Quantity`
array of frequencies, in Hz, to compute the sed
**note** these are observed frequencies (observer frame)
z : float
redshift of the source
d_L : :class:`~astropy.units.Quantity`
luminosity distance of the source
delta_D: float
Doppler factor of the relativistic outflow
mu_s : float
cosine of the angle between the blob motion and the jet axis
L_disk : :class:`~astropy.units.Quantity`
Luminosity of the disk whose radiation is being reprocessed by the BLR
xi_dt : float
fraction of the disk radiation reprocessed by the disk
epsilon_dt : string
peak (dimensionless) energy of the black body radiated by the torus
R_dt : :class:`~astropy.units.Quantity`
radius of the ting-like torus
r : :class:`~astropy.units.Quantity`
distance between the Broad Line Region and the blob
n_e : :class:`~agnpy.spectra.ElectronDistribution`
electron energy distribution
*args
parameters of the electron energy distribution (k_e, p, ...)
ssa : bool
whether to consider or not the self-absorption, default false
integrator : func
which function to use for integration, default `numpy.trapz`
gamma : :class:`~numpy.ndarray`
array of Lorentz factor over which to integrate the electron
distribution
mu, phi : :class:`~numpy.ndarray`
arrays of cosine of zenith and azimuth angles to integrate over
**Note** arguments after *args are keyword-only arguments
Returns
-------
:class:`~astropy.units.Quantity`
array of the SED values corresponding to each frequency
"""
# conversions
epsilon_s = nu_to_epsilon_prime(nu, z)
# multidimensional integration
_gamma, _phi, _epsilon_s = axes_reshaper(gamma, phi, epsilon_s)
V_b = 4 / 3 * np.pi * np.power(R_b, 3)
N_e = V_b * n_e.evaluate(_gamma / delta_D, *args)
x_re = x_re_ring(R_dt, r)
mu = (r / x_re).to_value("")
kernel = compton_kernel(_gamma, _epsilon_s, epsilon_dt, mu_s, mu, _phi)
integrand = N_e / np.power(_gamma, 2) * kernel
integral_gamma = integrator(integrand, gamma, axis=0)
integral_phi = np.trapz(integral_gamma, phi, axis=0)
prefactor_num = (
3 * sigma_T * xi_dt * L_disk * np.power(epsilon_s, 2) * np.power(delta_D, 3)
)
prefactor_denom = (
np.power(2, 8)
* np.power(np.pi, 3)
* np.power(d_L, 2)
* np.power(x_re, 2)
* np.power(epsilon_dt, 2)
)
return (prefactor_num / prefactor_denom * integral_phi).to("erg cm-2 s-1")
def sed_flux_dt(self, nu):
"""evaluates the flux SED for External Compton on a ring dust torus"""
return self.evaluate_sed_flux_dt(
nu,
self.blob.z,
self.blob.d_L,
self.blob.delta_D,
self.blob.mu_s,
self.blob.R_b,
self.target.L_disk,
self.target.xi_dt,
self.target.epsilon_dt,
self.target.R_dt,
self.r,
self.blob.n_e,
*self.blob.n_e.parameters,
integrator=self.integrator,
gamma=self.gamma,
phi=self.phi
)
def sed_flux(self, nu):
"""SEDs for external Compton"""
if isinstance(self.target, CMB):
return self.sed_flux_cmb(nu)
if isinstance(self.target, PointSourceBehindJet):
return self.sed_flux_ps_behind_jet(nu)
if isinstance(self.target, SSDisk):
return self.sed_flux_ss_disk(nu)
if isinstance(self.target, SphericalShellBLR):
return self.sed_flux_blr(nu)
if isinstance(self.target, RingDustTorus):
return self.sed_flux_dt(nu)
def sed_luminosity(self, nu):
r"""Evaluates the external Compton luminosity SED
:math:`\nu L_{\nu} \, [\mathrm{erg}\,\mathrm{s}^{-1}]`"""
sphere = 4 * np.pi * np.power(self.blob.d_L, 2)
return (sphere * self.sed_flux(nu)).to("erg s-1")
| [
"numpy.trapz",
"numpy.linspace",
"numpy.power"
] | [((2018, 2058), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'self.phi_size'], {}), '(0, 2 * np.pi, self.phi_size)\n', (2029, 2058), True, 'import numpy as np\n'), ((4886, 4922), 'numpy.trapz', 'np.trapz', (['integral_gamma', 'mu'], {'axis': '(0)'}), '(integral_gamma, mu, axis=0)\n', (4894, 4922), True, 'import numpy as np\n'), ((4946, 4980), 'numpy.trapz', 'np.trapz', (['integral_mu', 'phi'], {'axis': '(0)'}), '(integral_mu, phi, axis=0)\n', (4954, 4980), True, 'import numpy as np\n'), ((13433, 13469), 'numpy.trapz', 'np.trapz', (['integral_gamma', 'mu'], {'axis': '(0)'}), '(integral_gamma, mu, axis=0)\n', (13441, 13469), True, 'import numpy as np\n'), ((13493, 13527), 'numpy.trapz', 'np.trapz', (['integral_mu', 'phi'], {'axis': '(0)'}), '(integral_mu, phi, axis=0)\n', (13501, 13527), True, 'import numpy as np\n'), ((17769, 17805), 'numpy.trapz', 'np.trapz', (['integral_gamma', 'mu'], {'axis': '(0)'}), '(integral_gamma, mu, axis=0)\n', (17777, 17805), True, 'import numpy as np\n'), ((17829, 17863), 'numpy.trapz', 'np.trapz', (['integral_mu', 'phi'], {'axis': '(0)'}), '(integral_mu, phi, axis=0)\n', (17837, 17863), True, 'import numpy as np\n'), ((22031, 22068), 'numpy.trapz', 'np.trapz', (['integral_gamma', 'phi'], {'axis': '(0)'}), '(integral_gamma, phi, axis=0)\n', (22039, 22068), True, 'import numpy as np\n'), ((1896, 1928), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'self.mu_size'], {}), '(-1, 1, self.mu_size)\n', (1907, 1928), True, 'import numpy as np\n'), ((4592, 4608), 'numpy.power', 'np.power', (['R_b', '(3)'], {}), '(R_b, 3)\n', (4600, 4608), True, 'import numpy as np\n'), ((5068, 5088), 'numpy.power', 'np.power', (['delta_D', '(3)'], {}), '(delta_D, 3)\n', (5076, 5088), True, 'import numpy as np\n'), ((5232, 5254), 'numpy.power', 'np.power', (['epsilon_0', '(2)'], {}), '(epsilon_0, 2)\n', (5240, 5254), True, 'import numpy as np\n'), ((8368, 8384), 'numpy.power', 'np.power', (['R_b', '(3)'], {}), '(R_b, 3)\n', (8376, 8384), True, 'import numpy as np\n'), ((8712, 8732), 'numpy.power', 'np.power', (['delta_D', '(3)'], {}), '(delta_D, 3)\n', (8720, 8732), True, 'import numpy as np\n'), ((8905, 8927), 'numpy.power', 'np.power', (['epsilon_0', '(2)'], {}), '(epsilon_0, 2)\n', (8913, 8927), True, 'import numpy as np\n'), ((12813, 12829), 'numpy.power', 'np.power', (['R_b', '(3)'], {}), '(R_b, 3)\n', (12821, 12829), True, 'import numpy as np\n'), ((13696, 13716), 'numpy.power', 'np.power', (['delta_D', '(3)'], {}), '(delta_D, 3)\n', (13704, 13716), True, 'import numpy as np\n'), ((13824, 13838), 'numpy.power', 'np.power', (['r', '(3)'], {}), '(r, 3)\n', (13832, 13838), True, 'import numpy as np\n'), ((17360, 17376), 'numpy.power', 'np.power', (['R_b', '(3)'], {}), '(R_b, 3)\n', (17368, 17376), True, 'import numpy as np\n'), ((18020, 18040), 'numpy.power', 'np.power', (['delta_D', '(3)'], {}), '(delta_D, 3)\n', (18028, 18040), True, 'import numpy as np\n'), ((18184, 18209), 'numpy.power', 'np.power', (['epsilon_line', '(2)'], {}), '(epsilon_line, 2)\n', (18192, 18209), True, 'import numpy as np\n'), ((21665, 21681), 'numpy.power', 'np.power', (['R_b', '(3)'], {}), '(R_b, 3)\n', (21673, 21681), True, 'import numpy as np\n'), ((22163, 22183), 'numpy.power', 'np.power', (['delta_D', '(3)'], {}), '(delta_D, 3)\n', (22171, 22183), True, 'import numpy as np\n'), ((22359, 22382), 'numpy.power', 'np.power', (['epsilon_dt', '(2)'], {}), '(epsilon_dt, 2)\n', (22367, 22382), True, 'import numpy as np\n'), ((23827, 23853), 'numpy.power', 'np.power', (['self.blob.d_L', '(2)'], {}), '(self.blob.d_L, 2)\n', (23835, 23853), True, 'import numpy as np\n'), ((4773, 4792), 'numpy.power', 'np.power', (['_gamma', '(2)'], {}), '(_gamma, 2)\n', (4781, 4792), True, 'import numpy as np\n'), ((5043, 5065), 'numpy.power', 'np.power', (['epsilon_s', '(2)'], {}), '(epsilon_s, 2)\n', (5051, 5065), True, 'import numpy as np\n'), ((5201, 5217), 'numpy.power', 'np.power', (['d_L', '(2)'], {}), '(d_L, 2)\n', (5209, 5217), True, 'import numpy as np\n'), ((8544, 8563), 'numpy.power', 'np.power', (['_gamma', '(2)'], {}), '(_gamma, 2)\n', (8552, 8563), True, 'import numpy as np\n'), ((8687, 8709), 'numpy.power', 'np.power', (['epsilon_s', '(2)'], {}), '(epsilon_s, 2)\n', (8695, 8709), True, 'import numpy as np\n'), ((8876, 8890), 'numpy.power', 'np.power', (['r', '(2)'], {}), '(r, 2)\n', (8884, 8890), True, 'import numpy as np\n'), ((13298, 13317), 'numpy.power', 'np.power', (['_gamma', '(2)'], {}), '(_gamma, 2)\n', (13306, 13317), True, 'import numpy as np\n'), ((13659, 13681), 'numpy.power', 'np.power', (['epsilon_s', '(2)'], {}), '(epsilon_s, 2)\n', (13667, 13681), True, 'import numpy as np\n'), ((13805, 13821), 'numpy.power', 'np.power', (['d_L', '(2)'], {}), '(d_L, 2)\n', (13813, 13821), True, 'import numpy as np\n'), ((17656, 17675), 'numpy.power', 'np.power', (['_gamma', '(2)'], {}), '(_gamma, 2)\n', (17664, 17675), True, 'import numpy as np\n'), ((17983, 18005), 'numpy.power', 'np.power', (['epsilon_s', '(2)'], {}), '(epsilon_s, 2)\n', (17991, 18005), True, 'import numpy as np\n'), ((18153, 18169), 'numpy.power', 'np.power', (['d_L', '(2)'], {}), '(d_L, 2)\n', (18161, 18169), True, 'import numpy as np\n'), ((21917, 21936), 'numpy.power', 'np.power', (['_gamma', '(2)'], {}), '(_gamma, 2)\n', (21925, 21936), True, 'import numpy as np\n'), ((22138, 22160), 'numpy.power', 'np.power', (['epsilon_s', '(2)'], {}), '(epsilon_s, 2)\n', (22146, 22160), True, 'import numpy as np\n'), ((22327, 22344), 'numpy.power', 'np.power', (['x_re', '(2)'], {}), '(x_re, 2)\n', (22335, 22344), True, 'import numpy as np\n'), ((5139, 5153), 'numpy.power', 'np.power', (['(2)', '(7)'], {}), '(2, 7)\n', (5147, 5153), True, 'import numpy as np\n'), ((5168, 5186), 'numpy.power', 'np.power', (['np.pi', '(2)'], {}), '(np.pi, 2)\n', (5176, 5186), True, 'import numpy as np\n'), ((8845, 8861), 'numpy.power', 'np.power', (['d_L', '(2)'], {}), '(d_L, 2)\n', (8853, 8861), True, 'import numpy as np\n'), ((13767, 13781), 'numpy.power', 'np.power', (['(2)', '(9)'], {}), '(2, 9)\n', (13775, 13781), True, 'import numpy as np\n'), ((13784, 13802), 'numpy.power', 'np.power', (['np.pi', '(3)'], {}), '(np.pi, 3)\n', (13792, 13802), True, 'import numpy as np\n'), ((18091, 18105), 'numpy.power', 'np.power', (['(2)', '(9)'], {}), '(2, 9)\n', (18099, 18105), True, 'import numpy as np\n'), ((18120, 18138), 'numpy.power', 'np.power', (['np.pi', '(3)'], {}), '(np.pi, 3)\n', (18128, 18138), True, 'import numpy as np\n'), ((22296, 22312), 'numpy.power', 'np.power', (['d_L', '(2)'], {}), '(d_L, 2)\n', (22304, 22312), True, 'import numpy as np\n'), ((8783, 8797), 'numpy.power', 'np.power', (['(2)', '(7)'], {}), '(2, 7)\n', (8791, 8797), True, 'import numpy as np\n'), ((8812, 8830), 'numpy.power', 'np.power', (['np.pi', '(2)'], {}), '(np.pi, 2)\n', (8820, 8830), True, 'import numpy as np\n'), ((12416, 12430), 'numpy.power', 'np.power', (['c', '(2)'], {}), '(c, 2)\n', (12424, 12430), True, 'import numpy as np\n'), ((17633, 17647), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (17641, 17647), True, 'import numpy as np\n'), ((22234, 22248), 'numpy.power', 'np.power', (['(2)', '(8)'], {}), '(2, 8)\n', (22242, 22248), True, 'import numpy as np\n'), ((22263, 22281), 'numpy.power', 'np.power', (['np.pi', '(3)'], {}), '(np.pi, 3)\n', (22271, 22281), True, 'import numpy as np\n'), ((13174, 13194), 'numpy.power', 'np.power', (['epsilon', '(2)'], {}), '(epsilon, 2)\n', (13182, 13194), True, 'import numpy as np\n'), ((13236, 13253), 'numpy.power', 'np.power', (['_mu', '(-2)'], {}), '(_mu, -2)\n', (13244, 13253), True, 'import numpy as np\n')] |
# pylint: disable=no-self-use,invalid-name
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import MultiLabelField
from allennlp.data.vocabulary import Vocabulary
class TestMultiLabelField(AllenNlpTestCase):
def test_as_tensor_returns_integer_tensor(self):
f = MultiLabelField([2, 3], skip_indexing=True, label_namespace="test1", num_labels=5)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().tolist()
assert tensor == [0, 0, 1, 1, 0]
assert {type(item) for item in tensor} == {int}
def test_multilabel_field_can_index_with_vocab(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("rel0", namespace="rel_labels")
vocab.add_token_to_namespace("rel1", namespace="rel_labels")
vocab.add_token_to_namespace("rel2", namespace="rel_labels")
f = MultiLabelField(["rel1", "rel0"], label_namespace="rel_labels")
f.index(vocab)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([1, 1, 0]))
def test_multilabel_field_raises_with_non_integer_labels_and_no_indexing(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField(["non integer field"], skip_indexing=True)
def test_multilabel_field_raises_with_no_indexing_and_missing_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([0, 2], skip_indexing=True, num_labels=None)
def test_multilabel_field_raises_with_no_indexing_and_wrong_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([0, 2, 4], skip_indexing=True, num_labels=3)
def test_multilabel_field_raises_with_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([1, 2], skip_indexing=False)
def test_multilabel_field_raises_with_given_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([1, 2], skip_indexing=False, num_labels=4)
def test_multilabel_field_empty_field_works(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("label1", namespace="test_empty_labels")
vocab.add_token_to_namespace("label2", namespace="test_empty_labels")
f = MultiLabelField([], label_namespace="test_empty_labels")
f.index(vocab)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0]))
g = f.empty_field()
g.index(vocab)
tensor = g.as_tensor(g.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0]))
def test_class_variables_for_namespace_warnings_work_correctly(self):
# pylint: disable=protected-access
assert "text" not in MultiLabelField._already_warned_namespaces
with self.assertLogs(logger="allennlp.data.fields.multilabel_field", level="WARNING"):
_ = MultiLabelField(["test"], label_namespace="text")
# We've warned once, so we should have set the class variable to False.
assert "text" in MultiLabelField._already_warned_namespaces
with pytest.raises(AssertionError):
with self.assertLogs(logger="allennlp.data.fields.multilabel_field", level="WARNING"):
_ = MultiLabelField(["test2"], label_namespace="text")
# ... but a new namespace should still log a warning.
assert "text2" not in MultiLabelField._already_warned_namespaces
with self.assertLogs(logger="allennlp.data.fields.multilabel_field", level="WARNING"):
_ = MultiLabelField(["test"], label_namespace="text2")
def test_printing_doesnt_crash(self):
field = MultiLabelField(["label"], label_namespace="namespace")
print(field)
| [
"allennlp.data.vocabulary.Vocabulary",
"allennlp.data.fields.MultiLabelField",
"pytest.raises",
"numpy.array"
] | [((387, 473), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (['[2, 3]'], {'skip_indexing': '(True)', 'label_namespace': '"""test1"""', 'num_labels': '(5)'}), "([2, 3], skip_indexing=True, label_namespace='test1',\n num_labels=5)\n", (402, 473), False, 'from allennlp.data.fields import MultiLabelField\n'), ((720, 732), 'allennlp.data.vocabulary.Vocabulary', 'Vocabulary', ([], {}), '()\n', (730, 732), False, 'from allennlp.data.vocabulary import Vocabulary\n'), ((953, 1016), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (["['rel1', 'rel0']"], {'label_namespace': '"""rel_labels"""'}), "(['rel1', 'rel0'], label_namespace='rel_labels')\n", (968, 1016), False, 'from allennlp.data.fields import MultiLabelField\n'), ((2265, 2277), 'allennlp.data.vocabulary.Vocabulary', 'Vocabulary', ([], {}), '()\n', (2275, 2277), False, 'from allennlp.data.vocabulary import Vocabulary\n'), ((2447, 2503), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (['[]'], {'label_namespace': '"""test_empty_labels"""'}), "([], label_namespace='test_empty_labels')\n", (2462, 2503), False, 'from allennlp.data.fields import MultiLabelField\n'), ((3957, 4012), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (["['label']"], {'label_namespace': '"""namespace"""'}), "(['label'], label_namespace='namespace')\n", (3972, 4012), False, 'from allennlp.data.fields import MultiLabelField\n'), ((1173, 1195), 'numpy.array', 'numpy.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (1184, 1195), False, 'import numpy\n'), ((1295, 1328), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (1308, 1328), False, 'import pytest\n'), ((1346, 1404), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (["['non integer field']"], {'skip_indexing': '(True)'}), "(['non integer field'], skip_indexing=True)\n", (1361, 1404), False, 'from allennlp.data.fields import MultiLabelField\n'), ((1503, 1536), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (1516, 1536), False, 'import pytest\n'), ((1554, 1614), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (['[0, 2]'], {'skip_indexing': '(True)', 'num_labels': 'None'}), '([0, 2], skip_indexing=True, num_labels=None)\n', (1569, 1614), False, 'from allennlp.data.fields import MultiLabelField\n'), ((1711, 1744), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (1724, 1744), False, 'import pytest\n'), ((1762, 1822), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (['[0, 2, 4]'], {'skip_indexing': '(True)', 'num_labels': '(3)'}), '([0, 2, 4], skip_indexing=True, num_labels=3)\n', (1777, 1822), False, 'from allennlp.data.fields import MultiLabelField\n'), ((1907, 1940), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (1920, 1940), False, 'import pytest\n'), ((1958, 2002), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (['[1, 2]'], {'skip_indexing': '(False)'}), '([1, 2], skip_indexing=False)\n', (1973, 2002), False, 'from allennlp.data.fields import MultiLabelField\n'), ((2083, 2116), 'pytest.raises', 'pytest.raises', (['ConfigurationError'], {}), '(ConfigurationError)\n', (2096, 2116), False, 'import pytest\n'), ((2134, 2192), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (['[1, 2]'], {'skip_indexing': '(False)', 'num_labels': '(4)'}), '([1, 2], skip_indexing=False, num_labels=4)\n', (2149, 2192), False, 'from allennlp.data.fields import MultiLabelField\n'), ((2660, 2679), 'numpy.array', 'numpy.array', (['[0, 0]'], {}), '([0, 0])\n', (2671, 2679), False, 'import numpy\n'), ((2865, 2884), 'numpy.array', 'numpy.array', (['[0, 0]'], {}), '([0, 0])\n', (2876, 2884), False, 'import numpy\n'), ((3187, 3236), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (["['test']"], {'label_namespace': '"""text"""'}), "(['test'], label_namespace='text')\n", (3202, 3236), False, 'from allennlp.data.fields import MultiLabelField\n'), ((3399, 3428), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3412, 3428), False, 'import pytest\n'), ((3847, 3897), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (["['test']"], {'label_namespace': '"""text2"""'}), "(['test'], label_namespace='text2')\n", (3862, 3897), False, 'from allennlp.data.fields import MultiLabelField\n'), ((3549, 3599), 'allennlp.data.fields.MultiLabelField', 'MultiLabelField', (["['test2']"], {'label_namespace': '"""text"""'}), "(['test2'], label_namespace='text')\n", (3564, 3599), False, 'from allennlp.data.fields import MultiLabelField\n')] |
#encoding: utf-8
import copy
import csv
import numpy as np
# number of motif
Nm = 8
def countMotifs(A,nodN):
rd=np.argsort(sum(np.transpose(A)))
rdA=A[rd]
rdA[:,]=rdA[:,rd]
A2=np.array(np.matrix(A)**2)
A3=np.array(np.matrix(A)**3)
A4=np.array(np.matrix(A)**4)
num_triangle=count_triangle(A3,nodN)
num_quads=count_quads(A2,A4,nodN)
Nm_1=count_chain(rdA,nodN,2)
Nm_2=count_chain(rdA,nodN,3)
Nm_3=count_polygon0(num_triangle,3)
Nm_4=count_chain(rdA,nodN,4)
Nm_5=count_star(rdA,nodN,3)
Nm_6=count_polygon0(num_quads,4)
Nm_7=count_chain(rdA,nodN,5)
Nm_8=count_star(rdA,nodN,4)
num=[Nm_1,Nm_2,Nm_3,Nm_4,Nm_5,Nm_6,Nm_7,Nm_8]
#print ('count_motifs: '+str(num))
return num
def count_star(A,N,neiN):
n=0
a=copy.copy(A)
for i in range(N):
if (np.sum(a[i])>neiN-1):
n+=1
for j in range(i):
a[N-j-1][i]=0
x=np.nonzero(a[i])
nei_Index=x[0][:neiN]
a[i].fill(0)
for j in nei_Index:
a[j].fill(0)
for k in range(N):
a[k][j]=0
return n
def find_next(a,N,i,rest):
if rest==0:
a[i].fill(0)
for j in range(N):
a[j][i] = 0
return i
else:
if np.sum(a[i])>0:
for j in range(N):
a[j][i]=0
x = np.nonzero(a[i])
a[i].fill(0)
next_Index=x[0][0]
return find_next(a,N,next_Index,rest-1)
else:
return -1
def count_chain(A,N,len):
n=0
a = copy.copy(A)
for i in range(N):
if find_next(a,N,i,len-1)>=0:
n+=1
return n
"""
def circle_find_next(a,N,i,rest):
if rest==0:
return i
else:
if np.sum(a[i])>0:
for j in range(N):
a[j][i]=0
x = np.nonzero(a[i])
a[i].fill(0)
next_Index=x[0]
for k in next_Index:
return circle_find_next(a,N,k,rest-1)
else:
return -1
def count_polygon(A,N,edges):
n=0
a=copy.copy(A)
for i in range(N):
if circle_find_next(a,N,i,edges)==i:
n+=1
return n
"""
def count_quads(A2,A4,N):
re=0
n=0
for i in range(N):
for j in range(N):
if j==i:
re+=A2[i][j]**2
else: re+=A2[i][j]
if(A4[i][i]-re)>=2:n+=1
re=0
return n
def count_triangle(A3,N):
n=0
for i in range(N):
if A3[i][i]>=2: n+=1
return n
def count_polygon0(num,edges):
n=num//edges
return n
def writeMotifNumber(graphfile):
with open("CountMotif.csv", "w") as fc:
csvWriter = csv.writer(fc)
csvWriter.writerow(countMotifs(graphfile))
fc.close
return
| [
"numpy.matrix",
"numpy.sum",
"csv.writer",
"numpy.transpose",
"copy.copy",
"numpy.nonzero"
] | [((785, 797), 'copy.copy', 'copy.copy', (['A'], {}), '(A)\n', (794, 797), False, 'import copy\n'), ((1607, 1619), 'copy.copy', 'copy.copy', (['A'], {}), '(A)\n', (1616, 1619), False, 'import copy\n'), ((2739, 2753), 'csv.writer', 'csv.writer', (['fc'], {}), '(fc)\n', (2749, 2753), False, 'import csv\n'), ((135, 150), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (147, 150), True, 'import numpy as np\n'), ((205, 217), 'numpy.matrix', 'np.matrix', (['A'], {}), '(A)\n', (214, 217), True, 'import numpy as np\n'), ((238, 250), 'numpy.matrix', 'np.matrix', (['A'], {}), '(A)\n', (247, 250), True, 'import numpy as np\n'), ((271, 283), 'numpy.matrix', 'np.matrix', (['A'], {}), '(A)\n', (280, 283), True, 'import numpy as np\n'), ((833, 845), 'numpy.sum', 'np.sum', (['a[i]'], {}), '(a[i])\n', (839, 845), True, 'import numpy as np\n'), ((947, 963), 'numpy.nonzero', 'np.nonzero', (['a[i]'], {}), '(a[i])\n', (957, 963), True, 'import numpy as np\n'), ((1315, 1327), 'numpy.sum', 'np.sum', (['a[i]'], {}), '(a[i])\n', (1321, 1327), True, 'import numpy as np\n'), ((1404, 1420), 'numpy.nonzero', 'np.nonzero', (['a[i]'], {}), '(a[i])\n', (1414, 1420), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Collection of tools for writing formatted text to files.
"""
import numpy as np
from pyyeti import ytools
def getith(i, args, fncs):
"""
Return list with i'th value from each input, typically called by
:func:`vecwrite`.
Parameters
----------
i : integer
Specifies which value to extract from each input; starts at 0.
args : list of variables
Variable to extract the i'th value from. Must be compatibly
sized (scalars or vectors of equal length). Strings are
considered scalars.
fncs : list of functions
Same length as args; the function is used to extract the
i'th item. Call signature: ith_element_of_a = func(a, i).
The function must return an iterable of items (eg, list).
Returns
-------
lst : list
List of the i'th items extracted from each variable in `args`.
Examples
--------
>>> from pyyeti import writer
>>> import numpy as np
>>> r = np.array([1.2, 45.])
>>> s = 'test string'
>>> i = 5
>>> v = ['One', 'Two']
>>> def f(a, i): return [a]
>>> def f2(a, i): return [a[i]]
>>> args = [r, s, i, v]
>>> fncs = [f2, f, f, f2]
>>> writer.getith(0, args, fncs)
[1.2, 'test string', 5, 'One']
>>> writer.getith(1, args, fncs)
[45.0, 'test string', 5, 'Two']
"""
lst = []
for arg, fnc in zip(args, fncs):
lst.extend(fnc(arg, i))
return lst
@ytools.write_text_file
def _vecwrite(fout, string, length, args, fncs, postfunc, pfargs, so):
"""Utility routine for :func:`vecwrite`."""
v = range(length)
if so is not None:
v = v[so]
if postfunc:
if pfargs is None:
pfargs = []
for i in v:
curargs = getith(i, args, fncs)
s = postfunc(string.format(*curargs), *pfargs)
fout.write(s)
else:
for i in v:
curargs = getith(i, args, fncs)
fout.write(string.format(*curargs))
def vecwrite(f, string, *args, postfunc=None, pfargs=None, so=None):
"""
Vectorized write.
Parameters
----------
f : string or file_like or 1 or None
Either a name of a file, or is a file_like object as returned
by :func:`open` or :class:`io.StringIO`. Input as integer 1 to
write to stdout. Can also be the name of a directory or None;
in these cases, a GUI is opened for file selection.
string : string
The formatting string for the write, Python 3 format as in:
`string`.format(a,b)
*args : list of variables
Variables to write. Must be compatibly sized (scalars or
vectors or numpy arrays of compatible sizes). numpy arrays of
length 1 are considered scalars. For 2-d numpy arrays, each
row is written on one line and each element of the row must
have a conversion specifier. 1-d numpy arrays are treated
like a column 2-d numpy array. Strings are considered
scalars.
postfunc : function or None
If a function, it is called with the final string (for each
line) as the argument and it must return a string. The return
string is what gets output. This can be handy for final string
substitutions, for example. This input must be named and must
be after the arguments to be printed; see example.
pfargs : iterable or None
If an iterable, contains extra arguments to pass to `postfunc`
after the string argument. Must be named and after the
arguments to be printed.
so : slice object or None
Allows selection of limited range and custom increment; eg:
``slice(0, 10, 2)``. Scalars are not sliced. Must be named and
after the arguments to be printed.
Returns
-------
None.
Notes
-----
The expected vector length is determined from the first non-scalar
input. Note that scalar values are repeated automatically as
necessary.
Raises
------
ValueError
When the lengths of print arguments do not match (for
lengths > 1). Note that the slice object `so` can make
otherwise incompatible arguments compatible; for example,
arguments of length 10 and length 100 would be compatible if
``so = slice(10)`` (or similar).
Examples
--------
>>> from pyyeti import writer
>>> import sys
>>> import numpy as np
>>> r = np.array([1.2, 45.8])
>>> s = 'test string'
>>> i = 5
>>> v = ['short string', 'a bit longer string']
>>> frm = '{:3}, {:5.1f}, {:<25}, {}' + chr(10)
>>> writer.vecwrite(sys.stdout, frm, i, r, v, s)
5, 1.2, short string , test string
5, 45.8, a bit longer string , test string
>>> r = np.array([[1.1, 1.2, 1.3], [10.1, 10.2, 10.3]])
>>> frm = '{:2}, {:=^25} : ' + ' {:6.2f}'*3 + chr(10)
>>> writer.vecwrite(sys.stdout, frm, i, v, r)
5, ======short string======= : 1.10 1.20 1.30
5, ===a bit longer string=== : 10.10 10.20 10.30
>>> def pf(s):
... return s.replace('0 ', ' ')
>>> writer.vecwrite(sys.stdout, frm, i, v, r, postfunc=pf)
5, ======short string======= : 1.1 1.2 1.30
5, ===a bit longer string=== : 10.1 10.2 10.30
>>> def pf(s, s_old, s_new):
... return s.replace(s_old, s_new)
>>> writer.vecwrite(1, frm, i, v, r, postfunc=pf,
... pfargs=['0 ', ' '])
5, ======short string======= : 1.1 1.2 1.30
5, ===a bit longer string=== : 10.1 10.2 10.30
"""
def _get_scalar(a, i):
return [a]
def _get_scalar1(a, i):
return [a[0]]
def _get_itemi(a, i):
return [a[i]]
def _get_matrow(a, i):
return a[i]
length = 1
fncs = []
for i, arg in enumerate(args):
if not isinstance(arg, str) and hasattr(arg, "__len__"):
if np.ndim(arg) == 2:
fncs.append(_get_matrow)
curlen = np.size(arg, 0)
elif len(arg) == 1:
fncs.append(_get_scalar1)
curlen = 1
else:
fncs.append(_get_itemi)
curlen = len(arg)
if curlen > 1:
if length > 1:
if so is not None:
if range(curlen)[so] != range(length)[so]:
msg = (
"length mismatch with slice object:"
f" arg # {i + 1} is incompatible with "
"previous args"
)
raise ValueError(msg)
elif curlen != length:
msg = (
f"length mismatch: arg # {i + 1} has "
f"length {curlen}; expected {length} or 1."
)
raise ValueError(msg)
length = curlen
else:
fncs.append(_get_scalar)
_vecwrite(f, string, length, args, fncs, postfunc, pfargs, so)
def formheader(headers, widths, formats, sep=(0, 2), just=-1, ulchar="-"):
"""
Form a nice table header for formatted output via f.write().
Parameters
----------
headers : list or tuple
List or tuple of column header strings, eg:
['Desc', 'Maximum', 'Time']. Can also be a list of lists (or
tuples) to support multiple header lines, eg:
[['Maximum', 'Minimum', 'Time'], ['(lbs)', '(lbs)', '(sec)']]
widths : iterable
Iterable of field widths, eg: (25, 10, 8) or [25, 10, 8]. If
an element in `widths` is < length of corresponding word in a
header-line, the length of the word is used for that field.
Note that if this doesn't match with `formats`, the columns
will not line up nicely.
formats : list or tuple
List or tuple of format specifiers for the values in the table,
eg: ['{:25s}', '{:10f}', '{:8.3f}']
sep : string, list, tuple, or integer
Defines 'spacer' in front of each word:
- if a string, that string is used in front of all headers
- use a list or tuple of strings for complete control
- if an integer, that many spaces are used in front of all
headers
- use a vector of integers to specify a variable number of
spaces
- if len(sep) < len(headers), the last element is used for
all remaining elements
just : string or integer or list
Justification flag or flags for each header string:
- 'l', 'c', 'r' (or -1, 0, 1) to left, center, or right
justify headers in their fields
- can be a list or tuple of len(headers) for complete
control
ulchar : string
Character to use for underlining of headers.
Returns
-------
hu : string
Contains formatted header string(s) and the underline string.
f : string
Final formatting string.
Examples
--------
>>> import numpy as np
>>> import sys
>>> from pyyeti import writer
>>> descs = ['Item 1', 'A different item']
>>> mx = np.array([[1.2, 2.3], [3.4, 4.5]]) * 1000
>>> time = np.array([[1.234], [2.345]])
>>> headers = [['The']*3, ['Descriptions', 'Maximum', 'Time']]
>>> formats = ['{:<25s}', '{:10.2f}', '{:8.3f}']
>>> widths = [25, 10, 8]
>>> hu, f = writer.formheader(headers, widths, formats,
... sep=[4, 5, 2], just=0)
>>> fout = sys.stdout
>>> if 1: # just so all output is together
... b = fout.write(hu)
... writer.vecwrite(fout, f, descs, mx, time)
The The The
Descriptions Maximum Time
------------------------- ---------- --------
Item 1 1200.00 2300.000
A different item 3400.00 4500.000
"""
if not isinstance(headers, (list, tuple)):
raise ValueError("input 'headers' must be a list or tuple")
if isinstance(headers[0], (list, tuple)):
length = len(headers[0])
nheaders = len(headers)
mxlengths = np.array([len(s) for s in headers[0]])
for j in range(1, nheaders):
if len(headers[j]) != length:
raise ValueError(
f"headers[{len(headers[j])}] != length of previous headers"
)
for k in range(length):
mxlengths[k] = max(mxlengths[k], len(headers[j][k]))
else:
nheaders = 0
mxlengths = np.array([len(s) for s in headers])
length = len(headers)
if not length == len(formats) == len(widths):
s = ""
if isinstance(headers[0], (list, tuple)):
s = "[*]"
raise ValueError(
f"this check failed: ``len(headers{s}) == len(formats) == len(widths)``"
)
def strexp(string, width, just):
if just == -1 or just == "l":
return string.ljust(width)
if just == 0 or just == "c":
return string.center(width)
return string.rjust(width)
if isinstance(just, (str, int)):
just = [just]
if isinstance(sep, int):
sep = " " * sep
if isinstance(sep, str):
sep = [sep]
if nheaders > 0:
h = [""] * nheaders
else:
h = ""
u, f = "", ""
for j in range(length):
if j >= len(just):
cj = just[-1]
else:
cj = just[j]
if j >= len(sep):
csep = sep[-1]
else:
csep = sep[j]
if isinstance(csep, int):
csep = " " * csep
w = max(widths[j], mxlengths[j])
if nheaders > 0:
for k in range(nheaders):
h[k] += csep + strexp(headers[k][j], w, just=cj)
else:
h += csep + strexp(headers[j], w, just=cj)
u += csep + ulchar * w
f += csep + formats[j]
if nheaders > 0:
h = [hj.rstrip() + "\n" for hj in h]
else:
h = h.rstrip() + "\n"
u = u.rstrip() + "\n"
f = f.rstrip() + "\n"
return "".join(h) + u, f
| [
"numpy.size",
"numpy.ndim"
] | [((5983, 5995), 'numpy.ndim', 'np.ndim', (['arg'], {}), '(arg)\n', (5990, 5995), True, 'import numpy as np\n'), ((6068, 6083), 'numpy.size', 'np.size', (['arg', '(0)'], {}), '(arg, 0)\n', (6075, 6083), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
def sigmoid(x: np.ndarray) -> np.ndarray:
sigmoid_range = 34.538776394910684
x = np.clip(x, -sigmoid_range, sigmoid_range)
return 1.0 / (1.0 + np.exp(-x))
# Intersection of Union
# bboxesX[:4] is numpy array of xyxy (xmin, ymin, xmax, ymax)
# bboxes1: the bounding box which has the highest confidence score
# bboxes2: the bounding boxes of same category expect above
def bboxes_iou(
bboxes1: np.ndarray,
bboxes2: np.ndarray,
disable_iou_subset: bool = False
) -> np.ndarray:
bboxes1_area = (
bboxes1[:, 2] - bboxes1[:, 0]
) * (
bboxes1[:, 3] - bboxes1[:, 1]
)
bboxes2_area = (
bboxes2[:, 2] - bboxes2[:, 0]
) * (
bboxes2[:, 3] - bboxes2[:, 1]
)
left_ups = np.maximum(bboxes1[:, :2], bboxes2[:, :2])
right_downs = np.minimum(bboxes1[:, 2:4], bboxes2[:, 2:4])
intersections = np.maximum(right_downs - left_ups, 0.0)
inter_areas = intersections[:, 0] * intersections[:, 1]
union_areas = bboxes1_area + bboxes2_area - inter_areas
ious = np.maximum(
1.0 * inter_areas / union_areas,
np.finfo(np.float32).eps
)
if not disable_iou_subset:
# if the bouding box of bboxes2 is a subset of bboxes1,
# set IoU as 1.0 (should be removed)
is_subset = (
bboxes1[:, 0] <= bboxes2[:, 0]
) * (
bboxes1[:, 1] <= bboxes2[:, 1]
) * (
bboxes1[:, 2] >= bboxes2[:, 2]
) * (
bboxes1[:, 3] >= bboxes2[:, 3]
)
ious = np.maximum(ious, is_subset)
return ious
# filter bounding boxes using (soft) Non-Maximum Suppression
# paper of soft NMS: https://arxiv.org/abs/1704.04503
# bboxes is numpy array of
# offset 0-3: xyxy (xmin, ymin, xmax, ymax)
# offset 4: category id (int)
# offset 5: confidence score
def filter_bboxes(
bboxes: np.ndarray,
conf_threshold: float = 0.3,
iou_threshold: float = 0.45,
disable_soft_nms: bool = False,
disable_iou_subset: bool = False
) -> np.ndarray:
if bboxes.shape[0] == 0:
return bboxes
# filter by confidence threshold
bboxes = bboxes[bboxes[:, 5] > conf_threshold]
if bboxes.shape[0] == 0:
return bboxes
# confidence for soft NMS
bboxes = np.insert(bboxes, 6, bboxes[:, 5], axis=1)
# (soft) NMS for each class
unique_category_ids = list(set(bboxes[:, 4]))
best_bboxes = list()
for cat in unique_category_ids:
cat_bboxes = bboxes[bboxes[:, 4] == cat]
while cat_bboxes.shape[0] > 0:
if cat_bboxes.shape[0] == 1:
best_bboxes.append(cat_bboxes)
break
max_conf = np.argmax(cat_bboxes[:, 6])
best_bbox = cat_bboxes[max_conf:max_conf + 1]
best_bboxes.append(best_bbox)
cat_bboxes = np.delete(cat_bboxes, max_conf, axis=0)
ious = bboxes_iou(
bboxes1=best_bbox,
bboxes2=cat_bboxes,
disable_iou_subset=disable_iou_subset
)
if disable_soft_nms:
cat_bboxes = cat_bboxes[ious < iou_threshold]
else:
iou_mask = (ious >= iou_threshold).astype(np.float)
cat_bboxes[:, 6] = cat_bboxes[:, 6] * (
1.0 - (ious * iou_mask)
)
cat_bboxes = cat_bboxes[cat_bboxes[:, 6] > conf_threshold]
return np.concatenate(best_bboxes, axis=0)[:, :6]
| [
"numpy.minimum",
"numpy.maximum",
"numpy.argmax",
"numpy.clip",
"numpy.insert",
"numpy.finfo",
"numpy.exp",
"numpy.delete",
"numpy.concatenate"
] | [((155, 196), 'numpy.clip', 'np.clip', (['x', '(-sigmoid_range)', 'sigmoid_range'], {}), '(x, -sigmoid_range, sigmoid_range)\n', (162, 196), True, 'import numpy as np\n'), ((809, 851), 'numpy.maximum', 'np.maximum', (['bboxes1[:, :2]', 'bboxes2[:, :2]'], {}), '(bboxes1[:, :2], bboxes2[:, :2])\n', (819, 851), True, 'import numpy as np\n'), ((870, 914), 'numpy.minimum', 'np.minimum', (['bboxes1[:, 2:4]', 'bboxes2[:, 2:4]'], {}), '(bboxes1[:, 2:4], bboxes2[:, 2:4])\n', (880, 914), True, 'import numpy as np\n'), ((935, 974), 'numpy.maximum', 'np.maximum', (['(right_downs - left_ups)', '(0.0)'], {}), '(right_downs - left_ups, 0.0)\n', (945, 974), True, 'import numpy as np\n'), ((2322, 2364), 'numpy.insert', 'np.insert', (['bboxes', '(6)', 'bboxes[:, 5]'], {'axis': '(1)'}), '(bboxes, 6, bboxes[:, 5], axis=1)\n', (2331, 2364), True, 'import numpy as np\n'), ((1599, 1626), 'numpy.maximum', 'np.maximum', (['ious', 'is_subset'], {}), '(ious, is_subset)\n', (1609, 1626), True, 'import numpy as np\n'), ((3477, 3512), 'numpy.concatenate', 'np.concatenate', (['best_bboxes'], {'axis': '(0)'}), '(best_bboxes, axis=0)\n', (3491, 3512), True, 'import numpy as np\n'), ((221, 231), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (227, 231), True, 'import numpy as np\n'), ((1167, 1187), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1175, 1187), True, 'import numpy as np\n'), ((2729, 2756), 'numpy.argmax', 'np.argmax', (['cat_bboxes[:, 6]'], {}), '(cat_bboxes[:, 6])\n', (2738, 2756), True, 'import numpy as np\n'), ((2882, 2921), 'numpy.delete', 'np.delete', (['cat_bboxes', 'max_conf'], {'axis': '(0)'}), '(cat_bboxes, max_conf, axis=0)\n', (2891, 2921), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 23:39:52 2021
@author: Jonas
"""
# Import packages.
import cvxpy as cp
import numpy as np
# Generate data.
m = 20
n = 15
np.random.seed(1)
A = np.random.randn(m, n)
b = np.random.randn(m)
# Define and solve the CVXPY problem.
x = cp.Variable(n)
cost = cp.sum_squares(A @ x - b)
prob = cp.Problem(cp.Minimize(cost))
prob.solve()
# Print result.
print("\nThe optimal value is", prob.value)
print("The optimal x is")
print(x.value)
print("The norm of the residual is ", cp.norm(A @ x - b, p=2).value) | [
"numpy.random.seed",
"numpy.random.randn",
"cvxpy.norm",
"cvxpy.Variable",
"cvxpy.sum_squares",
"cvxpy.Minimize"
] | [((173, 190), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (187, 190), True, 'import numpy as np\n'), ((195, 216), 'numpy.random.randn', 'np.random.randn', (['m', 'n'], {}), '(m, n)\n', (210, 216), True, 'import numpy as np\n'), ((221, 239), 'numpy.random.randn', 'np.random.randn', (['m'], {}), '(m)\n', (236, 239), True, 'import numpy as np\n'), ((283, 297), 'cvxpy.Variable', 'cp.Variable', (['n'], {}), '(n)\n', (294, 297), True, 'import cvxpy as cp\n'), ((305, 330), 'cvxpy.sum_squares', 'cp.sum_squares', (['(A @ x - b)'], {}), '(A @ x - b)\n', (319, 330), True, 'import cvxpy as cp\n'), ((349, 366), 'cvxpy.Minimize', 'cp.Minimize', (['cost'], {}), '(cost)\n', (360, 366), True, 'import cvxpy as cp\n'), ((521, 544), 'cvxpy.norm', 'cp.norm', (['(A @ x - b)'], {'p': '(2)'}), '(A @ x - b, p=2)\n', (528, 544), True, 'import cvxpy as cp\n')] |
#%%
import os
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers, losses, optimizers, Sequential
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
batchsz = 128 # 批量大小
total_words = 10000 # 词汇表大小N_vocab
max_review_len = 80 # 句子最大长度s,大于的句子部分将截断,小于的将填充
embedding_len = 100 # 词向量特征长度f
# 加载IMDB数据集,此处的数据采用数字编码,一个数字代表一个单词
(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words)
print(x_train.shape, len(x_train[0]), y_train.shape)
print(x_test.shape, len(x_test[0]), y_test.shape)
#%%
x_train[0]
#%%
# 数字编码表
word_index = keras.datasets.imdb.get_word_index()
# for k,v in word_index.items():
# print(k,v)
#%%
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
# 翻转编码表
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(x_train[8])
#%%
# x_train:[b, 80]
# x_test: [b, 80]
# 截断和填充句子,使得等长,此处长句子保留句子后面的部分,短句子在前面填充
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)
x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)
# 构建数据集,打散,批量,并丢掉最后一个不够batchsz的batch
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.batch(batchsz, drop_remainder=True)
print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train))
print('x_test shape:', x_test.shape)
#%%
class MyRNN(keras.Model):
# Cell方式构建多层网络
def __init__(self, units):
super(MyRNN, self).__init__()
# 词向量编码 [b, 80] => [b, 80, 100]
self.embedding = layers.Embedding(total_words, embedding_len,
input_length=max_review_len)
# 构建RNN
self.rnn = keras.Sequential([
layers.GRU(units, dropout=0.5, return_sequences=True),
layers.GRU(units, dropout=0.5)
])
# 构建分类网络,用于将CELL的输出特征进行分类,2分类
# [b, 80, 100] => [b, 64] => [b, 1]
self.outlayer = Sequential([
layers.Dense(32),
layers.Dropout(rate=0.5),
layers.ReLU(),
layers.Dense(1)])
def call(self, inputs, training=None):
x = inputs # [b, 80]
# embedding: [b, 80] => [b, 80, 100]
x = self.embedding(x)
# rnn cell compute,[b, 80, 100] => [b, 64]
x = self.rnn(x)
# 末层最后一个输出作为分类网络的输入: [b, 64] => [b, 1]
x = self.outlayer(x,training)
# p(y is pos|x)
prob = tf.sigmoid(x)
return prob
def main():
units = 32 # RNN状态向量长度f
epochs = 50 # 训练epochs
model = MyRNN(units)
# 装配
model.compile(optimizer = optimizers.Adam(0.001),
loss = losses.BinaryCrossentropy(),
metrics=['accuracy'])
# 训练和验证
model.fit(db_train, epochs=epochs, validation_data=db_test)
# 测试
model.evaluate(db_test)
if __name__ == '__main__':
main()
| [
"tensorflow.random.set_seed",
"numpy.random.seed",
"tensorflow.keras.datasets.imdb.get_word_index",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.datasets.imdb.load_data",
"tensorflow.data.Dataset.... | [((165, 187), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(22)'], {}), '(22)\n', (183, 187), True, 'import tensorflow as tf\n'), ((188, 206), 'numpy.random.seed', 'np.random.seed', (['(22)'], {}), '(22)\n', (202, 206), True, 'import numpy as np\n'), ((255, 286), 'tensorflow.__version__.startswith', 'tf.__version__.startswith', (['"""2."""'], {}), "('2.')\n", (280, 286), True, 'import tensorflow as tf\n'), ((497, 549), 'tensorflow.keras.datasets.imdb.load_data', 'keras.datasets.imdb.load_data', ([], {'num_words': 'total_words'}), '(num_words=total_words)\n', (526, 549), False, 'from tensorflow import keras\n'), ((693, 729), 'tensorflow.keras.datasets.imdb.get_word_index', 'keras.datasets.imdb.get_word_index', ([], {}), '()\n', (727, 729), False, 'from tensorflow import keras\n'), ((1250, 1324), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'keras.preprocessing.sequence.pad_sequences', (['x_train'], {'maxlen': 'max_review_len'}), '(x_train, maxlen=max_review_len)\n', (1292, 1324), False, 'from tensorflow import keras\n'), ((1334, 1407), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'keras.preprocessing.sequence.pad_sequences', (['x_test'], {'maxlen': 'max_review_len'}), '(x_test, maxlen=max_review_len)\n', (1376, 1407), False, 'from tensorflow import keras\n'), ((1456, 1510), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), '((x_train, y_train))\n', (1490, 1510), True, 'import tensorflow as tf\n'), ((1591, 1643), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_test, y_test)'], {}), '((x_test, y_test))\n', (1625, 1643), True, 'import tensorflow as tf\n'), ((1737, 1759), 'tensorflow.reduce_max', 'tf.reduce_max', (['y_train'], {}), '(y_train)\n', (1750, 1759), True, 'import tensorflow as tf\n'), ((1761, 1783), 'tensorflow.reduce_min', 'tf.reduce_min', (['y_train'], {}), '(y_train)\n', (1774, 1783), True, 'import tensorflow as tf\n'), ((2008, 2081), 'tensorflow.keras.layers.Embedding', 'layers.Embedding', (['total_words', 'embedding_len'], {'input_length': 'max_review_len'}), '(total_words, embedding_len, input_length=max_review_len)\n', (2024, 2081), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n'), ((2878, 2891), 'tensorflow.sigmoid', 'tf.sigmoid', (['x'], {}), '(x)\n', (2888, 2891), True, 'import tensorflow as tf\n'), ((3046, 3068), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (3061, 3068), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n'), ((3095, 3122), 'tensorflow.keras.losses.BinaryCrossentropy', 'losses.BinaryCrossentropy', ([], {}), '()\n', (3120, 3122), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n'), ((2190, 2243), 'tensorflow.keras.layers.GRU', 'layers.GRU', (['units'], {'dropout': '(0.5)', 'return_sequences': '(True)'}), '(units, dropout=0.5, return_sequences=True)\n', (2200, 2243), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n'), ((2257, 2287), 'tensorflow.keras.layers.GRU', 'layers.GRU', (['units'], {'dropout': '(0.5)'}), '(units, dropout=0.5)\n', (2267, 2287), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n'), ((2427, 2443), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {}), '(32)\n', (2439, 2443), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n'), ((2454, 2478), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (2468, 2478), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n'), ((2489, 2502), 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (2500, 2502), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n'), ((2513, 2528), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (2525, 2528), False, 'from tensorflow.keras import layers, losses, optimizers, Sequential\n')] |
"""
Coordination number
===================
This filter calculates the coordination number of the atoms in the system.
It uses the values specified in the bonding table to determine whether two
atoms are bonded. If no minimum/maximum bond lengths are specified for a
given pair of elements then bonds between them will not be counted.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from . import base
from . import _filtering
from ...system.atoms import elements
from six.moves import range
class CoordinationNumberFilterSettings(base.BaseSettings):
"""
Settings for the coordination number filter
"""
def __init__(self):
super(CoordinationNumberFilterSettings, self).__init__()
self.registerSetting("filteringEnabled", default=False)
self.registerSetting("minCoordNum", default=0)
self.registerSetting("maxCoordNum", default=100)
class CoordinationNumberFilter(base.BaseFilter):
"""
The coordination number filter.
"""
def apply(self, filterInput, settings):
"""Apply the coordination number filter."""
# unpack inputs
inputState = filterInput.inputState
NScalars = filterInput.NScalars
fullScalars = filterInput.fullScalars
NVectors = filterInput.NVectors
fullVectors = filterInput.fullVectors
visibleAtoms = filterInput.visibleAtoms
specieList = inputState.specieList
NSpecies = len(specieList)
bondDict = elements.bondDict
# settings
filteringEnabled = int(settings.getSetting("filteringEnabled"))
minCoordNum = settings.getSetting("minCoordNum")
maxCoordNum = settings.getSetting("maxCoordNum")
# arrays to store min/max bond lengths
bondMinArray = np.zeros((NSpecies, NSpecies), dtype=np.float64)
bondMaxArray = np.zeros((NSpecies, NSpecies), dtype=np.float64)
# construct bonds array (bond distances squared)
calcBonds = False
maxBond = -1
for i in range(NSpecies):
symi = specieList[i]
if symi in bondDict:
d = bondDict[symi]
for j in range(NSpecies):
symj = specieList[j]
if symj in d:
bondMin, bondMax = d[symj]
bondMinArray[i][j] = bondMin * bondMin
bondMinArray[j][i] = bondMinArray[i][j]
bondMaxArray[i][j] = bondMax * bondMax
bondMaxArray[j][i] = bondMaxArray[i][j]
if bondMax > maxBond:
maxBond = bondMax
if bondMax > 0:
calcBonds = True
self.logger.info(" %s - %s; bond range: %f -> %f", symi, symj, bondMin, bondMax)
if not calcBonds:
self.logger.warning("No bonds defined: all coordination numbers will be zero")
# new scalars array
scalars = np.zeros(len(visibleAtoms), dtype=np.float64)
# run filter
NVisible = _filtering.coordNumFilter(visibleAtoms, inputState.pos, inputState.specie, NSpecies, bondMinArray, bondMaxArray,
maxBond, inputState.cellDims, inputState.PBC, scalars, minCoordNum, maxCoordNum,
NScalars, fullScalars, filteringEnabled, NVectors, fullVectors)
# resize visible atoms and scalars
visibleAtoms.resize(NVisible, refcheck=False)
scalars.resize(NVisible, refcheck=False)
# create result and add scalars
result = base.FilterResult()
result.addScalars("Coordination number", scalars)
return result
| [
"numpy.zeros",
"six.moves.range"
] | [((1854, 1902), 'numpy.zeros', 'np.zeros', (['(NSpecies, NSpecies)'], {'dtype': 'np.float64'}), '((NSpecies, NSpecies), dtype=np.float64)\n', (1862, 1902), True, 'import numpy as np\n'), ((1926, 1974), 'numpy.zeros', 'np.zeros', (['(NSpecies, NSpecies)'], {'dtype': 'np.float64'}), '((NSpecies, NSpecies), dtype=np.float64)\n', (1934, 1974), True, 'import numpy as np\n'), ((2105, 2120), 'six.moves.range', 'range', (['NSpecies'], {}), '(NSpecies)\n', (2110, 2120), False, 'from six.moves import range\n'), ((2278, 2293), 'six.moves.range', 'range', (['NSpecies'], {}), '(NSpecies)\n', (2283, 2293), False, 'from six.moves import range\n')] |
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.base import BaseEstimator
from sklearn.pipeline import Pipeline
def bhattacharyya(x, y):
return (1 - np.dot(np.sqrt(x), np.sqrt(y.T)))**2
class SVMClassifier(BaseEstimator):
def __init__(self, C=1.0, kernel='rbf', gamma=0.0, class_weight='auto',
tol=1e-3):
pipeline_steps = []
# Feature normalization
if kernel == 'rbf':
with_mean = True
else:
with_mean = False
self.scaler = StandardScaler(with_mean=with_mean)
pipeline_steps.append(('scaler', self.scaler))
# Feature classification
if kernel == 'chi2':
kernel = chi2_kernel
elif kernel == 'additive_chi2':
kernel = additive_chi2_kernel
svm = SVC(C=C, kernel=kernel, gamma=gamma, class_weight=class_weight,
tol=1e-5)
pipeline_steps.append(('svm', svm))
self.pipeline = Pipeline(pipeline_steps)
def fit(self, X, y=None):
self.pipeline.fit(X, y)
return self
def transform(self, X, y=None, copy=None):
return self.pipeline.transform(X, y, copy)
def predict(self, X):
return self.pipeline.predict(X)
| [
"sklearn.pipeline.Pipeline",
"numpy.sqrt",
"sklearn.preprocessing.StandardScaler",
"sklearn.svm.SVC"
] | [((652, 687), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': 'with_mean'}), '(with_mean=with_mean)\n', (666, 687), False, 'from sklearn.preprocessing import StandardScaler\n'), ((935, 1009), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'kernel': 'kernel', 'gamma': 'gamma', 'class_weight': 'class_weight', 'tol': '(1e-05)'}), '(C=C, kernel=kernel, gamma=gamma, class_weight=class_weight, tol=1e-05)\n', (938, 1009), False, 'from sklearn.svm import SVC\n'), ((1097, 1121), 'sklearn.pipeline.Pipeline', 'Pipeline', (['pipeline_steps'], {}), '(pipeline_steps)\n', (1105, 1121), False, 'from sklearn.pipeline import Pipeline\n'), ((296, 306), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (303, 306), True, 'import numpy as np\n'), ((308, 320), 'numpy.sqrt', 'np.sqrt', (['y.T'], {}), '(y.T)\n', (315, 320), True, 'import numpy as np\n')] |
import numpy as np
def train_test_split(data, test_size, random_state=None):
np.random.seed(random_state)
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_size)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
| [
"numpy.random.seed"
] | [((83, 111), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (97, 111), True, 'import numpy as np\n')] |
import numpy as np
class Model(object):
def __init__(self):
"""Model init
Use python list to store every layer
"""
self.layers = []
def add(self, layer):
"""add layer
Parameters
----------
layer : {Layer-like, scalar}
"""
self.layers.append(layer)
def __mse(self, predict_y, y, is_forward):
"""mean square error of single sample
Parameters
----------
predict_y : {array-like, vector} of shape (sample_label_number)
y : {array-like, vector} of shape (sample_label_number)
is_forward : {bool-like, scalar} whether is forward propagation
Returns
-------
loss_result : {array-like, vector} of shape (sample_label_number)
"""
if is_forward:
return 0.5 * ((predict_y - y) ** 2)
else: # the delta
return predict_y - y
def __cross_entropy(self, predict_y, y, is_forward):
"""cross entropy error of single sample
Parameters
----------
predict_y : {array-like, vector} of shape (sample_label_number)
y : {array-like, vector} of shape (sample_label_number)
is_forward : {bool-like, scalar} whether is forward propagation
Returns
-------
loss_result : {array-like, vector} of shape (sample_label_number)
"""
predict_y[predict_y == 0] =1e-12
if is_forward:
return - y * np.log(predict_y)
else: # backward delta
return - y / predict_y
def __final_loss(self, loss):
"""compute final loss
Parameters
----------
loss : {array-like, vector} of shape (sample_label_number)
Returns
-------
result : {float-like, scalar}
"""
return np.squeeze(np.mean(loss))
def set_loss_function(self, loss_function):
"""set final loss function
Parameters
----------
loss_function : {string-like, scalar} value of {'mse', 'cross_entropy'}
"""
if loss_function == 'mse':
self.__loss_function = self.__mse
elif loss_function == 'cross_entropy':
self.__loss_function = self.__cross_entropy
else:
ValueError("loss function name is wrong")
def train(self, X, y, learn_rate, epochs):
"""train network
Parameters
----------
X : {array-like, tensor(4-dim)} of shape (sample_number, in_data_col, in_data_row, in_data_channel)
y : {array-like, matrix} of shape (sample_number, sample_label_number)
learn_rate : {float-like, scalar}
epochs : {int-like, scalar} dataset learning times
"""
if self.__loss_function is None:
raise Exception("set loss function first")
for epoch_index in range(epochs):
loss = 0
for sample_index in range(len(X)):
single_train_sample_out = X[sample_index]
# forward
for layer in self.layers:
single_train_sample_out = layer.forward_propagation(single_train_sample_out)
loss += self.__loss_function(predict_y = single_train_sample_out, y = y[sample_index], is_forward = True)
error = self.__loss_function(predict_y = single_train_sample_out, y = y[sample_index], is_forward = False)
# backward
for j in range(len(self.layers)):
layer_index = len(self.layers) - j - 1
error = self.layers[layer_index].back_propagation(error, learn_rate)
print("epochs {} / {} loss : {}".format(epoch_index, epochs, self.__final_loss(loss / len(X))))
def train_eval(self, X, y, learn_rate, epochs, X_test, y_test):
"""train network with acc of test
Parameters
----------
X : {array-like, tensor(4-dim)} of shape (sample_number, in_data_col, in_data_row, in_data_channel)
y : {array-like, matrix} of shape (sample_number, sample_label_number)
learn_rate : {float-like, scalar}
epochs : {int-like, scalar} dataset learning times
X_test : {array-like, tensor(4-dim)} of shape (test_sample_number, in_data_col, in_data_row, in_data_channel)
y_test : {array-like, matrix} of shape (test_sample_number, sample_label_number)
"""
if self.__loss_function is None:
raise Exception("set loss function first")
for epoch_index in range(epochs):
loss = 0
print(loss)
for sample_index in range(len(X)):
single_sample_train_out = X[sample_index]
# forward
for layer in self.layers:
single_sample_train_out = layer.forward_propagation(single_sample_train_out)
loss += self.__loss_function(predict_y = single_sample_train_out, y = y[sample_index], is_forward = True)
error = self.__loss_function(predict_y = single_sample_train_out, y = y[sample_index], is_forward = False)
# backward
for j in range(len(self.layers)):
layer_index = len(self.layers) - j - 1
error = self.layers[layer_index].back_propagation(error, learn_rate)
result = self.predict(X_test)
acc_sample_number = 0
for sample_index, y_single_sample in enumerate(y_test):
predict_label_index = np.argmax(result[sample_index])
single_sample_label_index = np.argmax(y_single_sample)
if predict_label_index == single_sample_label_index:
acc_sample_number += 1
print("epochs {} / {} loss : {} acc : {}".format(epoch_index, epochs, self.__final_loss(loss / len(X)), acc_sample_number / len(y_test)))
def predict(self, X):
"""predict result
Parameters
----------
X : {array-like, tensor(4-dim)} of shape (sample_number, in_data_col, in_data_row, in_data_channel)
Returns
-------
result : {array-like, matrix} of shape (sample_number, sample_label_predict_number)
"""
result = []
for sample_index in range(len(X)):
single_sample_train_out = X[sample_index]
for layer in self.layers:
single_sample_train_out = layer.forward_propagation(single_sample_train_out)
result.append(single_sample_train_out)
return np.squeeze(np.array(result)) | [
"numpy.mean",
"numpy.array",
"numpy.log",
"numpy.argmax"
] | [((1950, 1963), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (1957, 1963), True, 'import numpy as np\n'), ((6836, 6852), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (6844, 6852), True, 'import numpy as np\n'), ((1556, 1573), 'numpy.log', 'np.log', (['predict_y'], {}), '(predict_y)\n', (1562, 1573), True, 'import numpy as np\n'), ((5752, 5783), 'numpy.argmax', 'np.argmax', (['result[sample_index]'], {}), '(result[sample_index])\n', (5761, 5783), True, 'import numpy as np\n'), ((5828, 5854), 'numpy.argmax', 'np.argmax', (['y_single_sample'], {}), '(y_single_sample)\n', (5837, 5854), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
from astropy.table import Table
import dateutil
import dateutil.parser
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.cm import viridis_r
import argparse
import sys
import os
import multiprocessing as mp
import datetime
__author__ = ["<NAME>"]
__date__ = '2022/03/25'
def plot_summary_table(filename, plotfile):
"""
Create a summary plot for all sources, identifying which are likely to be variable.
parameters
----------
filename : str
Input table filename
plotfile : str
Filename for the output plot file
"""
tab = Table.read(filename)
pval_peak_flux = tab['pval_peak_flux_ks']
md = tab['md']
mean_peak_flux = tab['mean_peak_flux']
kwargs = {'fontsize':14}
fig = plt.figure(figsize=(5,8))
ax = fig.add_subplot(1,1,1)
cax = ax.scatter(md, np.log10(pval_peak_flux), c = np.log10(mean_peak_flux), cmap=viridis_r)
cb = fig.colorbar(cax,ax=ax)
cb.set_label("log10(Peak flux in epoch 1) (Jy)", **kwargs)
ax.set_ylim((-11,1.001))
ax.set_xlim((-0.3,0.3))
ax.set_ylabel("log(p_val_ks)", **kwargs)
ax.set_xlabel("Debiased modulation index ($m_d$)", **kwargs)
ax.axhline(-3, c='k')
ax.axvline(0.05, c='k')
ax.text(0.1, -5, "variable", **kwargs)
ax.fill_between([-0.3,0.05],-25, y2=2, color='k', alpha=0.2)
ax.fill_betweenx([-3,2],0.05, x2=0.3, color='k', alpha=0.2)
ax.text(-0.25, -5, "not variable", **kwargs)
plt.savefig(plotfile)
return
def plot_lc_table(flux_table, stats_table, start=0, stride=1, plot_dir="plots", dates=False):
"""
Create individual light curve plots.
Each plot is saved to plots/uuid.png
parameters
----------
flux_table : str
Filename of the flux table
stats_table : str
Filename of the stats table
start : int
Starting row (default=0)
stride : int
Process every Nth row of the table. Default =1
dates : bool = False
If true then use dates for the x-axis value/format
"""
flux_tab = Table.read(flux_table).filled(0) # replace numerical blanks with zeros
stats_tab = Table.read(stats_table)
epochs = [a for a in flux_tab.colnames if a.startswith('epoch')]
fluxes = [a for a in flux_tab.colnames if a.startswith('peak_flux')]
err_fluxes = [a for a in flux_tab.colnames if a.startswith('err_peak_flux')]
for row in flux_tab[start::stride]:
fname = '{0}/{1}.png'.format(plot_dir, row['uuid'])
print(fname, end='')
if os.path.exists(fname):
print(" ... skip")
continue
srow = stats_tab[stats_tab['uuid'] == row['uuid']]
# Sort date by date
mask = np.where(['None' not in row[a] for a in epochs])[0]
if len(mask) == 0 :
print(" ... no data")
continue
epoch_mask = list(np.choose(mask, epochs))
flux_mask = list(np.choose(mask, fluxes))
err_flux_mask = list(np.choose(mask, err_fluxes))
if dates:
epoch_times = [datetime.datetime.strptime(a, "%Y-%m-%dT%H:%M:%S") for a in list(row[epochs][epoch_mask])]
else:
epoch_times = list(range(len(epoch_mask)))
# Annotate with stats
s = f"m={srow['m'][0]:5.3f}\nmd={srow['md'][0]:4.2f}\nchisq={srow['chisq_peak_flux'][0]:4.1f}"
yerrs = list(row[err_fluxes][err_flux_mask])
# convert epochs to datetime objects
fig, ax = plt.subplots()
ax.errorbar(epoch_times,
list(row[fluxes][flux_mask]),
yerr=yerrs,
label=s)
ax.set_ylabel('Flux Density (Jy/Beam)')
ax.set_xlabel('Epoch')
ax.set_title('{0}'.format(row['uuid']))
ax.legend()
if dates:
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter("%Y-%m-%dT%H:%M:%S")
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print(" ... done")
return
def plot_lc_table_parallel(flux_table, stats_table, light_curve_dir, dates, nprocs=1, debug=False):
"""
parameters
----------
flux_table : str
Filename of the flux table
stats_table : str
Filename of the stats table
light_curve_dir : str
Location to store the plots
dates : bool
Whether to use dates for the plot horizontal axis (True) or epochs (False)
nprocs : int
Number of processes to use simultaneously
"""
pool = mp.Pool(nprocs)
results = []
for i in range(nprocs):
r=pool.apply_async(
plot_lc_table,
args=[
flux_table,
stats_table
],
kwds={
'start':i,
'stride':nprocs,
'plot_dir':light_curve_dir,
'dates':dates
}
)
if debug:
r.get()
else:
results.append(r)
pool.close()
pool.join()
if not debug:
# This forces any raised exceptions within the apply_async to be re-raised here
for r in results:
r.get()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
group1 = parser.add_argument_group("Create a variability plot")
group1.add_argument("--ftable", dest='ftable', type=str, default=None,
help="flux table")
group1.add_argument("--stable", dest='stable', type=str, default=None,
help="stats table")
group1.add_argument("--plot", dest='plotfile', type=str, default=None,
help="output plot")
group1.add_argument("--all", dest='all', action='store_true', default=False,
help="Also plot individual light curves. Default:False")
group1.add_argument("--lc_dir", dest='light_curve_dir', type=str, default="plots",
help="The light curve plots output directory")
group1.add_argument("--dates", dest='dates', action='store_true', default=False,
help="Individual plots have date on the horizontal axis.")
group1.add_argument("--cores", dest='cores', type=int, default=None,
help="Number of cores to use: Default all")
group1.add_argument("--debug", dest='debug', action='store_true', default=False,
help="Use debug mode")
results = parser.parse_args()
if results.cores is None:
results.cores = mp.cpu_count()
if results.ftable or results.stable:
if not (results.ftable and results.stable):
print("ERROR: --stable and --ftable are both required, only one supplied.")
plot_summary_table(results.stable, results.plotfile)
if results.all:
plot_lc_table_parallel(results.ftable,
results.stable,
results.light_curve_dir,
results.dates,
nprocs=results.cores,
debug=results.debug)
else:
parser.print_help()
sys.exit()
| [
"astropy.table.Table.read",
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.where",
"matplotlib.dates.DateFormatter",
"numpy.choose",
"datetime.datetime.strptime",
"multiprocessing.Pool",
"sys.exit",
"numpy.log10",
"matplotlib.pyplot... | [((648, 668), 'astropy.table.Table.read', 'Table.read', (['filename'], {}), '(filename)\n', (658, 668), False, 'from astropy.table import Table\n'), ((817, 843), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 8)'}), '(figsize=(5, 8))\n', (827, 843), True, 'import matplotlib.pyplot as plt\n'), ((1517, 1538), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plotfile'], {}), '(plotfile)\n', (1528, 1538), True, 'import matplotlib.pyplot as plt\n'), ((2204, 2227), 'astropy.table.Table.read', 'Table.read', (['stats_table'], {}), '(stats_table)\n', (2214, 2227), False, 'from astropy.table import Table\n'), ((4565, 4580), 'multiprocessing.Pool', 'mp.Pool', (['nprocs'], {}), '(nprocs)\n', (4572, 4580), True, 'import multiprocessing as mp\n'), ((5448, 5473), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5471, 5473), False, 'import argparse\n'), ((902, 926), 'numpy.log10', 'np.log10', (['pval_peak_flux'], {}), '(pval_peak_flux)\n', (910, 926), True, 'import numpy as np\n'), ((2591, 2612), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (2605, 2612), False, 'import os\n'), ((3519, 3533), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3531, 3533), True, 'import matplotlib.pyplot as plt\n'), ((3954, 3993), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (3965, 3993), True, 'import matplotlib.pyplot as plt\n'), ((4002, 4016), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4011, 4016), True, 'import matplotlib.pyplot as plt\n'), ((6749, 6763), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (6761, 6763), True, 'import multiprocessing as mp\n'), ((7405, 7415), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7413, 7415), False, 'import sys\n'), ((932, 956), 'numpy.log10', 'np.log10', (['mean_peak_flux'], {}), '(mean_peak_flux)\n', (940, 956), True, 'import numpy as np\n'), ((2117, 2139), 'astropy.table.Table.read', 'Table.read', (['flux_table'], {}), '(flux_table)\n', (2127, 2139), False, 'from astropy.table import Table\n'), ((2769, 2819), 'numpy.where', 'np.where', (["[('None' not in row[a]) for a in epochs]"], {}), "([('None' not in row[a]) for a in epochs])\n", (2777, 2819), True, 'import numpy as np\n'), ((2930, 2953), 'numpy.choose', 'np.choose', (['mask', 'epochs'], {}), '(mask, epochs)\n', (2939, 2953), True, 'import numpy as np\n'), ((2980, 3003), 'numpy.choose', 'np.choose', (['mask', 'fluxes'], {}), '(mask, fluxes)\n', (2989, 3003), True, 'import numpy as np\n'), ((3034, 3061), 'numpy.choose', 'np.choose', (['mask', 'err_fluxes'], {}), '(mask, err_fluxes)\n', (3043, 3061), True, 'import numpy as np\n'), ((3904, 3945), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m-%dT%H:%M:%S"""'], {}), "('%Y-%m-%dT%H:%M:%S')\n", (3924, 3945), True, 'import matplotlib.dates as mdates\n'), ((3108, 3158), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['a', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(a, '%Y-%m-%dT%H:%M:%S')\n", (3134, 3158), False, 'import datetime\n')] |
import numpy as np
from math import sin, exp, pi, ceil
def f(x):
return exp(3*x)*sin(2*x)
def trapezoid( start, end, h, f) :
points= ceil((end - start) / h)
x = np.linspace(start, end, points+1) # N+1 points make N subintervals
y = f(x)
trapezoidIntegral = (h / 2) * (y[0]+y[points]+np.sum(2*y[1:-1])) #approximation result
return trapezoidIntegral
| [
"math.exp",
"numpy.sum",
"math.ceil",
"math.sin",
"numpy.linspace"
] | [((145, 168), 'math.ceil', 'ceil', (['((end - start) / h)'], {}), '((end - start) / h)\n', (149, 168), False, 'from math import sin, exp, pi, ceil\n'), ((177, 212), 'numpy.linspace', 'np.linspace', (['start', 'end', '(points + 1)'], {}), '(start, end, points + 1)\n', (188, 212), True, 'import numpy as np\n'), ((79, 89), 'math.exp', 'exp', (['(3 * x)'], {}), '(3 * x)\n', (82, 89), False, 'from math import sin, exp, pi, ceil\n'), ((88, 98), 'math.sin', 'sin', (['(2 * x)'], {}), '(2 * x)\n', (91, 98), False, 'from math import sin, exp, pi, ceil\n'), ((308, 327), 'numpy.sum', 'np.sum', (['(2 * y[1:-1])'], {}), '(2 * y[1:-1])\n', (314, 327), True, 'import numpy as np\n')] |
import sys
import nltk
import sklearn
import pandas
import numpy
# for checking the versions
print('Python: {}'.format(sys.version))
print('NLTK: {}'.format(nltk.__version__))
print('Scikit-learn: {}'.format(sklearn.__version__))
print('pandas: {}'.format(pandas.__version__))
print('numpy: {}'.format(numpy.__version__))
#1 load the dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df=pd.read_table('SMSSpamCollection', header = None, encoding='utf-8')
print(df.info())
print(df.head())
classes = df[0]
print(classes.value_counts())
# 2 preprocess the data 0 ham and 1 spam (Binary Classification)
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
Y = encoder.fit_transform(classes)
print(classes[:10])
print(Y[:10])
text_messages = df[1]
print(text_messages[:10])
# Use regular expression to replace email addresses , urls, phonenumber, other phone number, symbols
# email
processed = text_messages.str.replace(r'^.+@[^\.].*\.[a-z]{2,}$', 'emailaddr')
# web address
processed = processed.str.replace(r'^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)7$', 'webaddress')
# moneysymb
processed = processed.str.replace(r'£|\$', 'moneysymb')
# phonenumbr
processed = processed.str.replace(r'^\(?[\d]{3}\)?[\s-]?[\d]{3}[\s-]?[\d]{4}$', 'phonenumbr')
# number
processed = processed.str.replace(r'\d+(\.\d+)?', 'numbr')
#remove punctuation
processed = processed.str.replace(r'[^\w\d\s]', ' ')
#remove white space
processed = processed.str.replace(r'\s+', ' ')
#leading and trailing white space
processed = processed.str.replace(r'^\s+|\s+?$', '')
#chenging the words to lower case
processed = processed.str.lower()
print(processed)
#remove stop words from text
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
processed = processed.apply(lambda x: ' '.join(term for term in x.split() if term not in stop_words))
#remove stem from text
ps = nltk.PorterStemmer()
processed = processed.apply(lambda x: ' '.join(ps.stem(term) for term in x.split()))
print(processed)
#number of words and most common words and how many times they have appeared in the text
from nltk.tokenize import word_tokenize
all_words = []
for message in processed:
words = word_tokenize(message)
for w in words:
all_words.append(w)
all_words = nltk.FreqDist(all_words)
print('number of words: {}'.format(len(all_words)))
print('Most common words: {}'.format(all_words.most_common(15)))
#use 1500 most comman words as features
word_features = list(all_words.keys())[:1500]
def find_features(message):
words = word_tokenize(message)
features = {}
for word in word_features:
features[word] = (word in words)
return features
features = find_features(processed[0])
for key, value in features.items():
if value == True:
print(key)
#find features for all messages
messages = list(zip(processed, Y))
#define a seed for reproductivity
seed = 1
np.random.seed = seed
np.random.shuffle(messages)
#call find functions for each messages
featuresets = [(find_features(text), label) for (text, label) in messages]
from sklearn import model_selection
training, testing = model_selection.train_test_split(featuresets, test_size = 0.25, random_state = seed)
print('training: {}'.format(len(training)))
print('testing: {}'.format(len(testing)))
#scikit-learn classifier with nltk
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
#Define models to train
names = ['K Nearest neighbors', 'Decision Tree', 'Random Forest', 'Logistic Regression', 'SGD classifier', 'Naive Bayes', 'SVM Linear']
classifiers = [
KNeighborsClassifier(),
DecisionTreeClassifier(),
RandomForestClassifier(),
LogisticRegression(),
SGDClassifier(max_iter = 100),
MultinomialNB(),
SVC(kernel = 'linear')
]
models = list(zip(names, classifiers))
from nltk.classify.scikitlearn import SklearnClassifier
for name, model in models:
nltk_model = SklearnClassifier(model)
nltk_model.train(training)
accuracy = nltk.classify.accuracy(nltk_model, testing) * 100
print('{}: Accuracy: {}'.format(name, accuracy))
from sklearn.ensemble import VotingClassifier
names = ['K Nearest neighbors', 'Decision Tree', 'Random Forest', 'Logistic Regression', 'SGD classifier', 'Naive Bayes', 'SVM Linear']
classifiers = [
KNeighborsClassifier(),
DecisionTreeClassifier(),
RandomForestClassifier(),
LogisticRegression(),
SGDClassifier(max_iter = 100),
MultinomialNB(),
SVC(kernel = 'linear')
]
models = list(zip(names, classifiers))
nltk_ensemble = SklearnClassifier(VotingClassifier(estimators = models, voting = 'hard', n_jobs = -1))
nltk_ensemble.train(training)
accuracy = nltk.classify.accuracy(nltk_ensemble, testing) * 100
print('Ensemble Method Accuracy: {}'.format(accuracy))
#wrap models in NLTK
txt_features, labels = zip(*testing)
prediction = nltk_ensemble.classify_many(txt_features)
# print a confusion matrix and a classification report
print(classification_report(labels, prediction))
pd.DataFrame(
confusion_matrix(labels, prediction),
index = [['actual', 'actual'], ['ham', 'spam']],
columns = [['predicted', 'predicted'], ['ham', 'spam']])
names = ['KNN', 'DT','RF','LR','SGD','NB','SVM']
acc = [94.40057430007178,97.34386216798278,98.56424982053123,98.56424982053123,98.27709978463747,98.49246231155779,98.49246231155779]
plt.figure(figsize=(8,6))
plt.subplot()
plt.bar(names, acc, width=0.8)
plt.xlabel('Classifiers')
plt.ylabel('Accuracy')
plt.suptitle('Accuracy of Models')
plt.show()
| [
"matplotlib.pyplot.suptitle",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.bar",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"sklearn.ensemble.VotingClassifier",
"sklearn.svm.SVC",
"pandas.read_table",
"sklearn.linea... | [((420, 485), 'pandas.read_table', 'pd.read_table', (['"""SMSSpamCollection"""'], {'header': 'None', 'encoding': '"""utf-8"""'}), "('SMSSpamCollection', header=None, encoding='utf-8')\n", (433, 485), True, 'import pandas as pd\n'), ((694, 708), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (706, 708), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1943, 1963), 'nltk.PorterStemmer', 'nltk.PorterStemmer', ([], {}), '()\n', (1961, 1963), False, 'import nltk\n'), ((2326, 2350), 'nltk.FreqDist', 'nltk.FreqDist', (['all_words'], {}), '(all_words)\n', (2339, 2350), False, 'import nltk\n'), ((2968, 2995), 'numpy.random.shuffle', 'np.random.shuffle', (['messages'], {}), '(messages)\n', (2985, 2995), True, 'import numpy as np\n'), ((3168, 3253), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['featuresets'], {'test_size': '(0.25)', 'random_state': 'seed'}), '(featuresets, test_size=0.25, random_state=seed\n )\n', (3200, 3253), False, 'from sklearn import model_selection\n'), ((5713, 5739), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (5723, 5739), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5752), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (5750, 5752), True, 'import matplotlib.pyplot as plt\n'), ((5753, 5783), 'matplotlib.pyplot.bar', 'plt.bar', (['names', 'acc'], {'width': '(0.8)'}), '(names, acc, width=0.8)\n', (5760, 5783), True, 'import matplotlib.pyplot as plt\n'), ((5784, 5809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Classifiers"""'], {}), "('Classifiers')\n", (5794, 5809), True, 'import matplotlib.pyplot as plt\n'), ((5810, 5832), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (5820, 5832), True, 'import matplotlib.pyplot as plt\n'), ((5833, 5867), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Accuracy of Models"""'], {}), "('Accuracy of Models')\n", (5845, 5867), True, 'import matplotlib.pyplot as plt\n'), ((5868, 5878), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5876, 5878), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1809), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1798, 1809), False, 'from nltk.corpus import stopwords\n'), ((2251, 2273), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['message'], {}), '(message)\n', (2264, 2273), False, 'from nltk.tokenize import word_tokenize\n'), ((2595, 2617), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['message'], {}), '(message)\n', (2608, 2617), False, 'from nltk.tokenize import word_tokenize\n'), ((3936, 3958), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (3956, 3958), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3964, 3988), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (3986, 3988), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3994, 4018), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (4016, 4018), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4024, 4044), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (4042, 4044), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((4050, 4077), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'max_iter': '(100)'}), '(max_iter=100)\n', (4063, 4077), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((4085, 4100), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (4098, 4100), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((4106, 4126), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (4109, 4126), False, 'from sklearn.svm import SVC\n'), ((4270, 4294), 'nltk.classify.scikitlearn.SklearnClassifier', 'SklearnClassifier', (['model'], {}), '(model)\n', (4287, 4294), False, 'from nltk.classify.scikitlearn import SklearnClassifier\n'), ((4647, 4669), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4667, 4669), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4675, 4699), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (4697, 4699), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((4705, 4729), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (4727, 4729), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4735, 4755), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (4753, 4755), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((4761, 4788), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'max_iter': '(100)'}), '(max_iter=100)\n', (4774, 4788), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((4796, 4811), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (4809, 4811), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((4817, 4837), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (4820, 4837), False, 'from sklearn.svm import SVC\n'), ((4917, 4978), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'models', 'voting': '"""hard"""', 'n_jobs': '(-1)'}), "(estimators=models, voting='hard', n_jobs=-1)\n", (4933, 4978), False, 'from sklearn.ensemble import VotingClassifier\n'), ((5027, 5073), 'nltk.classify.accuracy', 'nltk.classify.accuracy', (['nltk_ensemble', 'testing'], {}), '(nltk_ensemble, testing)\n', (5049, 5073), False, 'import nltk\n'), ((5313, 5354), 'sklearn.metrics.classification_report', 'classification_report', (['labels', 'prediction'], {}), '(labels, prediction)\n', (5334, 5354), False, 'from sklearn.metrics import classification_report, accuracy_score, confusion_matrix\n'), ((5375, 5411), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels', 'prediction'], {}), '(labels, prediction)\n', (5391, 5411), False, 'from sklearn.metrics import classification_report, accuracy_score, confusion_matrix\n'), ((4335, 4378), 'nltk.classify.accuracy', 'nltk.classify.accuracy', (['nltk_model', 'testing'], {}), '(nltk_model, testing)\n', (4357, 4378), False, 'import nltk\n')] |
from collections import OrderedDict
import logging
logger = logging.getLogger('tensorprob')
import numpy as np
import tensorflow as tf
from . import config
from .utilities import (
classproperty,
Description,
generate_name,
ModelSubComponet,
Region,
set_logp_to_neg_inf
)
class ModelError(RuntimeError):
pass
class Model(object):
'''The model class is the primary interface of TensorProb. It allows you to
declare random variables, describe the (directed) probabalistic
relationships between them, provide observations for some of them, and
perform inference on the unobserved (latent) variables.
Models are agnostic as to whether you want to follow frequentist or
bayesian paradigms of inference. They allow you to find the maximum
likelihood or maximum a posteriori estimate for you model given the data
using the `.fit` method, or to sample from the likelihood/posterior using
MCMC techniques (See the `.mcmc` method).
Random variables can only be instantiated inside the `with` context of a model,
and each model can only have a single `with` block.
Inside the `with` context of the model, you can define variables and their
relationships by telling a "generative story".
For example, defining a new variable `X` with `X ~ Normal(0, 1)` is written as
`X = Normal(0, 1)`.
Random variables can then be plugged in as the conditional parameters of
other distributions.
After the `.initialize` method is called, the model has a *state* for each latent
variable, which is used for the initial parameters in the `.fit` and `.mcmc` methods,
as well as when using the `.pdf` method.
Parameters
----------
name : string, default None
An optional name for this model. This is currently not used, but
should be useful when working with multiple models simultaneously in
the future.
Examples
--------
>>> with Model() as model:
... n = Parameter(lower=0)
... N = Poisson(n)
... model.observed(N)
... model.initialize({ n: 10 })
... model.fit([20])
'''
_current_model = None
def __init__(self, name=None):
# The description of the model. This is a dictionary from all
# `tensorflow.placeholder`s representing the random variables of the
# model (defined by the user in the model block) to their `Description`s
self._description = dict()
self._full_description = dict()
# A dictionary mapping the `tensorflow.placeholder`s representing the
# observed variables of the model to `tensorflow.Variables`
# These are set in the `model.observed()` method
# If this is none, `model.observed()` has not been called yet
self._observed = None
# A dictionary from `tensorflow.placeholder`s representing the hidden (latent)
# variables of the model to `tensorflow.Variables` carrying the current state
# of the model
self._hidden = None
self._setters = None
# A dictionary mapping `tensorflow.placeholder`s of variables to new
# `tensorflow.placeholder`s which have been substituted using combinators
self._silently_replace = dict()
# The graph that the user's model is originally constructed in
self._model_graph = tf.Graph()
# The session that we will eventually run with
self.session = tf.Session(graph=tf.Graph())
# Whether `model.initialize()` has been called
self.initialized = False
self.name = name or generate_name(self.__class__)
@classproperty
def current_model(self):
'''Returns the currently active `Model` when inside its `with` block.'''
if Model._current_model is None:
raise ModelError("This can only be used inside a model environment")
return Model._current_model
def __enter__(self):
if Model._current_model is not None:
raise ModelError("Can't nest models within each other")
Model._current_model = self
self.graph_ctx = self._model_graph.as_default()
self.graph_ctx.__enter__()
return self
def __exit__(self, e_type, e, tb):
Model._current_model = None
# Normalise all log probabilities contained in _description
for var, (logp, integral, bounds, frac, _) in self._full_description.items():
logp -= tf.log(tf.add_n([integral(l, u) for l, u in bounds]))
# Force logp to negative infinity when outside the allowed bounds
logp = set_logp_to_neg_inf(var, logp, bounds)
# Add the changed logp to the model description
self._full_description[var] = Description(logp, integral, bounds, frac, _)
if var in self._description:
self._description[var] = Description(logp, integral, bounds, frac, _)
# Exit the tensorflow graph
self.graph_ctx.__exit__(e_type, e, tb)
self.graph_ctx = None
# We shouldn't be allowed to edit this one anymore
self._model_graph.finalize()
# Re-raise underlying exceptions
if e_type is not None:
raise
def __getitem__(self, key):
if key not in self._full_description:
raise KeyError
logp, integral, bounds, frac, _ = self._full_description[key]
def pdf(*args):
self._set_data(args)
return self.session.run(
tf.exp(self._get_rewritten(logp)) *
self._get_rewritten(frac)
)
return ModelSubComponet(pdf)
def observed(self, *args):
'''Declares the random variables in `args` as observed, which means
that data is available for them.
The order in which variables are used here defines the order in which
they will have to be passed in later when using methods like `.fit` or
`.mcmc`. All variables in the model that are not declared as observed
are automatically declared as *latent* and become the subject of
inference.
`.observed` can only be called once per `Model` and is a requirement
for calling `.initialize`.
Parameters
----------
*args : random variables
The random variables for which data is available.
'''
if Model._current_model == self:
raise ModelError("Can't call `model.observed()` inside the model block")
for arg in args:
if arg not in self._description:
raise ValueError("Argument {} is not known to the model".format(arg))
self._observed = OrderedDict()
self._setters = dict()
with self.session.graph.as_default():
for arg in args:
dummy = tf.Variable(arg.dtype.as_numpy_dtype())
self._observed[arg] = dummy
setter_var = tf.Variable(arg.dtype.as_numpy_dtype(), name=arg.name.split(':')[0])
setter = tf.assign(dummy, setter_var, validate_shape=False)
self._setters[dummy] = (setter, setter_var)
def _rewrite_graph(self, transform):
input_map = {k.name: v for k, v in transform.items()}
# Modify the input dictionary to replace variables which have been
# superseded with the use of combinators
for k, v in self._silently_replace.items():
input_map[k.name] = self._observed[v]
with self.session.graph.as_default():
try:
tf.import_graph_def(
self._model_graph.as_graph_def(),
input_map=input_map,
name='added',
)
except ValueError:
# Ignore errors that ocour when the input_map tries to
# rewrite a variable that isn't present in the graph
pass
def _get_rewritten(self, tensor):
return self.session.graph.get_tensor_by_name('added/' + tensor.name)
def initialize(self, assign_dict):
'''Allows you to specify the initial state of the unobserved (latent)
variables.
Can only be called after observed variables have been declared with
`.observed`.
Parameters
----------
assign_dict : dict
A dictionary from random variables to values.
This has to specify a value for all unobserved (latent) variables
of the model.
'''
# This is where the `self._hidden` map is created.
# The `tensorflow.Variable`s of the map are initialized
# to the values given by the user in `assign_dict`.
if Model._current_model == self:
raise ModelError("Can't call `model.initialize()` inside the model block")
if self._observed is None:
raise ModelError("Can't initialize latent variables before "
"`model.observed()` has been called.")
if self._hidden is not None:
raise ModelError("Can't call `model.initialize()` twice. Use "
"`model.assign()` to change the state.")
if not isinstance(assign_dict, dict) or not assign_dict:
raise ValueError("Argument to `model.initialize()` must be a "
"dictionary with more than one element")
for key in assign_dict.keys():
if not isinstance(key, tf.Tensor):
raise ValueError("Key in the initialization dict is not a "
"tf.Tensor: {}".format(repr(key)))
hidden = set(self._description.keys()).difference(set(self._observed))
if hidden != set(assign_dict.keys()):
raise ModelError("Not all latent variables have been passed in a "
"call to `model.initialize().\nMissing variables: {}"
.format(hidden.difference(assign_dict.keys())))
# Add variables to the execution graph
with self.session.graph.as_default():
self._hidden = dict()
for var in hidden:
self._hidden[var] = tf.Variable(var.dtype.as_numpy_dtype(assign_dict[var]),
name=var.name.split(':')[0])
self.session.run(tf.initialize_variables(list(self._hidden.values())))
# Sort the hidden variables so we can access them in a consistant order
self._hidden_sorted = sorted(self._hidden.keys(), key=lambda v: v.name)
for h in self._hidden.values():
with self.session.graph.as_default():
var = tf.Variable(h.dtype.as_numpy_dtype(),
name=h.name.split(':')[0] + '_placeholder')
setter = h.assign(var)
self._setters[h] = (setter, var)
all_vars = self._hidden.copy()
all_vars.update(self._observed)
self._rewrite_graph(all_vars)
with self.session.graph.as_default():
# observed_logps contains one element per data point
observed_logps = OrderedDict()
# TODO Remove, see Model.pdf
observed_logp_setters = []
for v in self._observed:
logp_flag = tf.Variable(
np.int32(-42),
name=v.name.split(':')[0] + '_logp'
)
var = tf.Variable(
np.int32(-42),
name=logp_flag.name.split(':')[0] + '_placeholder'
)
setter = logp_flag.assign(var)
observed_logp_setters.append((setter, var, logp_flag))
observed_logps[v] = tf.cond(
tf.equal(logp_flag, -42),
lambda: self._get_rewritten(self._description[v].logp),
lambda: tf.fill(tf.reshape(tf.to_int32(logp_flag), [1]), config.dtype(0))
)
# hidden_logps contains a single value
hidden_logps = [self._get_rewritten(self._description[v].logp) for v in self._hidden]
# Handle the case where we don't have observed variables.
# We define the probability to not observe anything as 1.
if observed_logps:
observed_logps = list(observed_logps.values())
else:
observed_logps = [tf.constant(0, dtype=config.dtype)]
self._logp_flag_setters = observed_logp_setters
self._pdf = tf.exp(tf.add_n(observed_logps))
self._nll = -tf.add_n(
[tf.reduce_sum(logp) for logp in observed_logps] +
hidden_logps
)
variables = [self._hidden[k] for k in self._hidden_sorted]
self._nll_grad = tf.gradients(self._nll, variables)
for i, (v, g) in enumerate(zip(variables, self._nll_grad)):
if g is None:
self._nll_grad[i] = tf.constant(0, dtype=config.dtype)
logger.warn('Model is independent of variable {}'.format(
v.name.split(':')[0]
))
if observed_logp_setters:
self.session.run(tf.initialize_variables([x[2] for x in observed_logp_setters]))
self.initialized = True
def assign(self, assign_dict):
'''Set the state of specific unobserved (latent) variables to the specified
values.
Parameters
----------
assign_dict : dict
A dictionary from random variables to values.
This has to specify a value for a subset of the unobserved (latent)
variables of the model.
'''
if Model._current_model == self:
raise ModelError("Can't call `model.assign()` inside the model block")
if not isinstance(assign_dict, dict) or not assign_dict:
raise ValueError("Argument to assign must be a dictionary with "
"more than one element")
if self._observed is None:
raise ModelError("Can't assign state to the model before "
"`model.observed()` has been called.")
if self._hidden is None:
raise ModelError("Can't assign state to the model before "
"`model.initialize()` has been called.")
# Assign values without adding to the graph
setters = [self._setters[self._hidden[k]][0] for k, v in assign_dict.items()]
feed_dict = {self._setters[self._hidden[k]][1]: v for k, v in assign_dict.items()}
self.session.run(setters, feed_dict=feed_dict)
@property
def state(self):
'''The current state of every unobserved (latent) variable of the
model. This is a dict from random variables to values.
Example
-------
>>> # Assume we have a random variable X with value 42
>>> model.state[X]
42
'''
keys = self._hidden.keys()
variables = list(self._hidden.values())
values = self.session.run(variables)
return {k: v for k, v in zip(keys, values)}
def _check_data(self, data):
if self._hidden is None:
raise ModelError("Can't use the model before it has been "
"initialized with `model.initialize(...)`")
# TODO(ibab) make sure that args all have the correct shape
if len(data) != len(self._observed):
raise ValueError("Different number of arguments passed to model "
"method than declared in `model.observed()`")
def _set_data(self, data):
self._check_data(data)
ops = []
feed_dict = {self._setters[k][1]: v
for k, v in zip(self._observed.values(), data)
if v is not None}
for obs, arg in zip(self._observed.values(), data):
if arg is None:
continue
ops.append(self._setters[obs][0])
for s in ops:
self.session.run(s, feed_dict=feed_dict)
def _run_with_data(self, expr, data):
self._check_data(data)
feed_dict = {k: v for k, v in zip(self._observed.values(), data) if v is not None}
return self.session.run(expr, feed_dict=feed_dict)
def pdf(self, *args_in):
'''The probability density function for observing a single entry
of each random variable that has been declared as observed.
This allows you to easily plot the probability density function.
Parameters
----------
args : lists or ndarrays
The entries for which we want to know the values of the probability
density function. All arguments must have the same shape.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> xs = np.linspace(-1, 1, 200)
>>> plt.plot(xs, model.pdf(xs))
'''
# If there is a None included in args we can use
# `self._logp_flag_setters` to disable parts of the likelihood
# and crudely integrate out dimensions.
#
# If the flags are set equal to -42 the term in the likelihood is
# replaced with a tensor of zeros, where the size of the tensor is
# equal to the flag. This size is determined using the length of the
# first non-`None` element of `args`, if all elements are `None` a
# default value of 1 is used.
#
# TODO Remove this horrible hack and have a better way of integrating
# out dimensions
setters = []
feed_dict = {}
default_size = ([len(a) for a in args_in if a is not None] or [1])[0]
for arg, (setter, var, lop_setter) in zip(args_in, self._logp_flag_setters):
setters.append(setter)
feed_dict[var] = default_size if arg is None else -42
self.session.run(setters, feed_dict=feed_dict)
# A value is still needed for unused datasets so replace `None` with
# -1 in args
result = self._run_with_data(self._pdf, [-1 if a is None else a for a in args_in])
# Set all the flags back to -1 to enable all parts of the likelihood
self.session.run(setters, feed_dict={k: -42 for k in feed_dict})
return result
def nll(self, *args):
'''The negative log-likelihood for all passed datasets.
Parameters
----------
args : lists or ndarrays
The datasets for which we want to know the value of the negative
log-likelihood density function. The arguments don't need to have
the same shape.
'''
return self._run_with_data(self._nll, args)
def fit(self, *args, **kwargs):
'''Perform a maximum likelihood or maximum a posteriori estimate
using one of the available function optimization backends.
Parameters
----------
args : lists or ndarrays
The datasets from which we want to infer the values of unobserved
(latent) variables. The arguments don't need to have the same
shape.
use_gradient : bool
Whether the optimizer should use gradients derived using
TensorFlow. Some optimizers may not be able to use gradient
information, in which case this argument is ignored.
optimizer : subclass of BaseOptimizer
The optimization backend to use.
See the `optimizers` module for which optimizers are available.
'''
optimizer = kwargs.get('optimizer')
use_gradient = kwargs.get('use_gradient', True)
self._set_data(args)
variables = [self._hidden[k] for k in self._hidden_sorted]
gradient = self._nll_grad if use_gradient else None
# Some optimizers need bounds
bounds = []
for h in self._hidden_sorted:
# Take outer bounds into account.
# We can't do better than that here
lower = self._description[h].bounds[0].lower
upper = self._description[h].bounds[-1].upper
bounds.append((lower, upper))
if optimizer is None:
from .optimizers import ScipyLBFGSBOptimizer
optimizer = ScipyLBFGSBOptimizer()
optimizer.session = self.session
out = optimizer.minimize(variables, self._nll, gradient=gradient, bounds=bounds)
self.assign({k: v for k, v in zip(sorted(self._hidden.keys(), key=lambda x: x.name), out.x)})
return out
def mcmc(self, *args, **kwargs):
'''Perform MCMC sampling of the possible values of unobserved (latent)
variables using one of the available sampling backends.
Parameters
----------
args : lists or ndarrays
The datasets from which we want to infer the values of unobserved
(latent) variables. The arguments don't need to have the same
shape.
sampler : subclass of BaseSampler
The sampling backend to use.
See the `samplers` module for which samplers are available.
'''
sampler = kwargs.get('sampler')
samples = kwargs.get('samples')
self._set_data(args)
if sampler is None:
from .samplers import EmceeSampler
sampler = EmceeSampler(walkers=40, session=self.session)
return sampler.sample(list(self._hidden.values()), self._nll, samples=samples)
__all__ = [
Model,
]
| [
"tensorflow.add_n",
"tensorflow.reduce_sum",
"tensorflow.constant",
"tensorflow.initialize_variables",
"tensorflow.assign",
"tensorflow.to_int32",
"tensorflow.equal",
"tensorflow.Graph",
"numpy.int32",
"collections.OrderedDict",
"tensorflow.gradients",
"logging.getLogger"
] | [((60, 91), 'logging.getLogger', 'logging.getLogger', (['"""tensorprob"""'], {}), "('tensorprob')\n", (77, 91), False, 'import logging\n'), ((3363, 3373), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3371, 3373), True, 'import tensorflow as tf\n'), ((6680, 6693), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6691, 6693), False, 'from collections import OrderedDict\n'), ((11134, 11147), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11145, 11147), False, 'from collections import OrderedDict\n'), ((12809, 12843), 'tensorflow.gradients', 'tf.gradients', (['self._nll', 'variables'], {}), '(self._nll, variables)\n', (12821, 12843), True, 'import tensorflow as tf\n'), ((3469, 3479), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3477, 3479), True, 'import tensorflow as tf\n'), ((7031, 7081), 'tensorflow.assign', 'tf.assign', (['dummy', 'setter_var'], {'validate_shape': '(False)'}), '(dummy, setter_var, validate_shape=False)\n', (7040, 7081), True, 'import tensorflow as tf\n'), ((12536, 12560), 'tensorflow.add_n', 'tf.add_n', (['observed_logps'], {}), '(observed_logps)\n', (12544, 12560), True, 'import tensorflow as tf\n'), ((13231, 13293), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['[x[2] for x in observed_logp_setters]'], {}), '([x[2] for x in observed_logp_setters])\n', (13254, 13293), True, 'import tensorflow as tf\n'), ((11326, 11339), 'numpy.int32', 'np.int32', (['(-42)'], {}), '(-42)\n', (11334, 11339), True, 'import numpy as np\n'), ((11470, 11483), 'numpy.int32', 'np.int32', (['(-42)'], {}), '(-42)\n', (11478, 11483), True, 'import numpy as np\n'), ((11758, 11782), 'tensorflow.equal', 'tf.equal', (['logp_flag', '(-42)'], {}), '(logp_flag, -42)\n', (11766, 11782), True, 'import tensorflow as tf\n'), ((12408, 12442), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'config.dtype'}), '(0, dtype=config.dtype)\n', (12419, 12442), True, 'import tensorflow as tf\n'), ((12986, 13020), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'config.dtype'}), '(0, dtype=config.dtype)\n', (12997, 13020), True, 'import tensorflow as tf\n'), ((12615, 12634), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['logp'], {}), '(logp)\n', (12628, 12634), True, 'import tensorflow as tf\n'), ((11907, 11929), 'tensorflow.to_int32', 'tf.to_int32', (['logp_flag'], {}), '(logp_flag)\n', (11918, 11929), True, 'import tensorflow as tf\n')] |
import gym
import numpy as np
from abc import ABC
from io import BytesIO
from PIL import Image
from eventobjects.action import Action, RESET_ACTION
class AndroidDeviceEnv(gym.Env, ABC):
"""
The AndroidDeviceEnv implements the gym Env interface in order to interface
with an Android device through the abstraction layers of the Action and Observation buffers.
Each action is defined in a continuous 2D space, specifically a down and up action on some 2D coordinate.
Each observation is a continuous RGB image.
"""
metadata = {'render.modes': ['rgb_array']}
# Predefined (RGB image)
NUM_CHANNELS = 3
def __init__(self, action_buffer, observation_buffer, image_height, image_width):
"""
Initialize the environment.
:param action_buffer: ActionBuffer object that the environment should populate
:param observation_buffer: ObservationBuffer object that the environment uses to gather image observations.
:param image_height: height of the image observation in terms of num pixels
:param image_width: width of the image observation in terms of num pixels
"""
super(AndroidDeviceEnv, self).__init__()
self.action_buffer = action_buffer
self.observation_buffer = observation_buffer
self.num_steps = 0
# This is set to None initially. It is set in either the step() or reset() fn and is used during rendering.
self.most_recent_observation = None
# 2D continuous grid. Each action corresponds to a (x, y) touch.
self.action_space = gym.spaces.Box(
low=np.array([0.0, 0.0]),
high=np.array([image_width, image_height]),
dtype=np.float32
)
# H x W x C where C is number of channels.
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=[image_height, image_width, self.NUM_CHANNELS],
dtype=np.uint8
)
def step(self, action):
# Wrap the action from the action space into an Action object
action_buffer_elem = Action(tuple(action))
# Add the action into the action buffer
self.action_buffer.put_elem(action_buffer_elem)
# Get new observation once action has been taken.
# This observation corresponds to the image once the action has been taken.
new_observation = self.get_new_observation()
# Increment the num_steps counter
self.num_steps += 1
# Compute reward
reward_val = self.compute_reward(new_observation)
# Reset most_recent_observation tracker
self.most_recent_observation = new_observation
log_info = {
'num_steps': self.num_steps
}
return new_observation, reward_val, log_info
def reset(self):
self.num_steps = 0
# First clear all elements from the buffers
self.action_buffer.clearall()
self.observation_buffer.clearall()
# Send a 'reset' action to the ActionBuffer so that the initial observation can be sent.
self.action_buffer.put_elem(RESET_ACTION)
# Read initial response
self.most_recent_observation = self.get_new_observation()
return self.most_recent_observation
def render(self, mode='human'):
if self.most_recent_observation is None:
raise Exception("most_recent_observation has not been set to a valid image. " +
"Most likely cause is neither step() nor reset() has been called in advance.")
if mode == 'rgb_array':
return self.most_recent_observation
else:
super(AndroidDeviceEnv, self).render(mode=mode) # just raise an exception for invalid mode
def get_new_observation(self):
"""
Gather the observation object from the observation buffer and decode the image into a numpy array
that aligns with the observation space.
:return: np array containing the image (H x W x 3).
"""
# Blocking read from the observation buffer.
observation = self.observation_buffer.blocking_read_elem()
return AndroidDeviceEnv.process_image_from_observation(observation)
def compute_reward(self, new_observation):
"""
Compute reward value based on the new observation image. It utilizes current state information like
current observation, number of steps so far, and this new observation to compute the reward.
Can be overridden to include more information from other state variables if necessary.
:param new_observation: numpy array containing the new screen image (H x W x C) where C is the number of
channels (C = 3 for RGB).
:return: float reward value.
"""
raise NotImplementedError
@staticmethod
def process_image_from_observation(observation):
"""
Helper static method to process an image from an observation to a numpy array
:param observation: Observation object
:return: numpy array of an RGB image contained in the observation
"""
# Decode image bytes into a numpy array.
pil_image = Image.open(BytesIO(observation.image_bytes)).convert('RGB')
image_arr = np.array(pil_image)
return image_arr
| [
"io.BytesIO",
"numpy.array",
"gym.spaces.Box"
] | [((1828, 1934), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '[image_height, image_width, self.NUM_CHANNELS]', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=[image_height, image_width, self.\n NUM_CHANNELS], dtype=np.uint8)\n', (1842, 1934), False, 'import gym\n'), ((5305, 5324), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (5313, 5324), True, 'import numpy as np\n'), ((1626, 1646), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1634, 1646), True, 'import numpy as np\n'), ((1665, 1702), 'numpy.array', 'np.array', (['[image_width, image_height]'], {}), '([image_width, image_height])\n', (1673, 1702), True, 'import numpy as np\n'), ((5235, 5267), 'io.BytesIO', 'BytesIO', (['observation.image_bytes'], {}), '(observation.image_bytes)\n', (5242, 5267), False, 'from io import BytesIO\n')] |
"""module with functions that handle .phn annotation files from the TIMIT dataset
"""
import pathlib
from typing import ClassVar, Optional
import warnings
import attr
import numpy as np
import pandas as pd
import pandera
from pandera.typing import Series
import soundfile
import crowsetta
from crowsetta.typing import PathLike
class TimitTranscriptSchema(pandera.SchemaModel):
"""A ``pandera.SchemaModel`` that validates ``pandas`` dataframes
loaded from a .phn or .wrd file in the TIMIT transcription format.
"""
begin_sample: Optional[Series[int]] = pandera.Field()
end_sample: Optional[Series[int]] = pandera.Field()
text: Series[pd.StringDtype] = pandera.Field(coerce=True)
class Config:
ordered = True
strict = True
@crowsetta.interface.SeqLike.register
@attr.define
class Timit:
"""Class that represents annotations from transcription files in the
DARPA TIMIT Acoustic-Phonetic Continuous Speech Corpus (TIMIT)
Attributes
----------
name: str
Shorthand name for annotation format: ``'timit'``.
ext: str
Extension of files in annotation format:
``('.phn', '.PHN', '.wrd', '.WRD')``
begin_samples : numpy.ndarray
Vector of integer sample numbers corresponding
to beginning of segments, i.e. onsets
end_samples : numpy.ndarray
Vector of integer sample numbers corresponding
to ends of segments, i.e. offsets
text : numpy.ndarray
Vector of string labels for segments;
each element is either a single word,
or a single phonetic transcription code.
annot_path : str, pathlib.Path
Path to TIMIT transcription file from which annotations were loaded.
audio_path : str. pathlib.Path
Path to audio file that the TIMIT transcription file annotates.
"""
name: ClassVar[str] = 'timit'
ext: ClassVar[str] = ('.phn', '.PHN', '.wrd', '.WRD')
begin_samples: np.ndarray = attr.field(eq=attr.cmp_using(eq=np.array_equal))
end_samples: np.ndarray = attr.field(eq=attr.cmp_using(eq=np.array_equal))
text: np.ndarray = attr.field(eq=attr.cmp_using(eq=np.array_equal))
annot_path: pathlib.Path
audio_path: Optional[pathlib.Path] = attr.field(default=None,
converter=attr.converters.optional(pathlib.Path))
@classmethod
def from_file(cls,
annot_path: PathLike,
audio_path: Optional[PathLike] = None) -> 'Self':
"""Load annotations from a TIMIT transcription file
Parameters
----------
annot_path : str, pathlib.Path
Path to a TIMIT transcription file,
with one of the extensions {'.phn', '.PHN', '.wrd', '.WRD'}.
audio_path : str, pathlib.Path
Optional, defaults to ``annot_path`` with the extension
changed to '.wav' or '.WAV'. Both extensions are checked
and if either file exists, that one is used. Otherwise,
defaults to '.wav' in lowercase.
Examples
--------
>>> example = crowsetta.data.get('timit')
>>> timit = crowsetta.formats.seq.Timit.from_file(example.annot_path)
Notes
-----
Versions of the dataset exist with the extensions
in capital letters. Some platforms may not have case-sensitive paths.
"""
annot_path = pathlib.Path(annot_path)
# note multiple extensions, both all-uppercase and all-lowercase `.phn` exist,
# depending on which version of TIMIT dataset you have
crowsetta.validation.validate_ext(annot_path, extension=cls.ext)
# assume file is space-separated with no header
df = pd.read_csv(annot_path, sep=' ', header=None)
df.columns = ['begin_sample', 'end_sample', 'text']
df = TimitTranscriptSchema.validate(df)
if audio_path is None:
for ext in ('.wav', '.WAV'):
audio_path = annot_path.parent / (annot_path.stem + ext)
if audio_path.exists():
break
if not audio_path.exists():
# just default to lower-case .wav
audio_path = annot_path.parent / (annot_path.stem + '.wav')
return cls(
annot_path=annot_path,
begin_samples=df['begin_sample'].values,
end_samples=df['end_sample'].values,
text=df['text'].values,
audio_path=audio_path,
)
def to_seq(self,
round_times: bool = True,
decimals: int = 3,
samplerate: Optional[int] = None) -> crowsetta.Sequence:
"""Convert this TIMIT annotation to a ``crowsetta.Sequence``.
Parameters
----------
round_times : bool
if True, round onsets_s and offsets_s.
Default is True.
decimals : int
number of decimals places to round floating point numbers to.
Only meaningful if round_times is True.
Default is 3, so that times are rounded to milliseconds.
samplerate : int
Sampling rate for wave files. Used to convert
``begin_samples`` and ``end_samples``
from sample number to seconds.
Default is None, in which ths function
tries to open ``audio_path`` and determine
the actual sampling rate. If this does not work,
then the ``onsets_s`` and ``offsets_s`` attributes
of the ``crowsetta.Sequence`` are left as None.
Examples
--------
>>> example = crowsetta.data.get('timit')
>>> timit = crowsetta.formats.seq.Timit.from_file(example.annot_path)
>>> seq = timit.to_seq()
Returns
-------
phn_seq : crowsetta.Sequence
Notes
-----
The ``round_times`` and ``decimals`` arguments are provided
to reduce differences across platforms
due to floating point error, e.g. when loading annotation files
and then sending them to a csv file,
the result should be the same on Windows and Linux.
"""
onset_samples = self.begin_samples
offset_samples = self.end_samples
labels = self.text
if samplerate is None:
try:
samplerate = soundfile.info(self.audio_path).samplerate
except RuntimeError:
warnings.warn(
f'wav file not found: {self.audio_path}.'
f'Could not determine sampling rate to convert onsets and offsets to seconds. '
f'To use a fixed sampling rate for all files, pass in a value for the `samplerate` '
f'argument, but be aware that this may not be the correct sampling rate for some files.',
UserWarning
)
samplerate = None
onsets_s = onset_samples / samplerate
offsets_s = offset_samples / samplerate
if round_times:
onsets_s = np.around(onsets_s, decimals=decimals)
offsets_s = np.around(offsets_s, decimals=decimals)
phn_seq = crowsetta.Sequence.from_keyword(labels=labels,
onset_samples=onset_samples,
offset_samples=offset_samples,
onsets_s=onsets_s,
offsets_s=offsets_s)
return phn_seq
def to_annot(self,
round_times: bool = True,
decimals: int = 3,
samplerate: Optional[int] = None) -> crowsetta.Annotation:
"""Convert this TIMIT annotation to a ``crowsetta.Annotation``.
Parameters
----------
round_times : bool
if True, round onsets_s and offsets_s.
Default is True.
decimals : int
number of decimals places to round floating point numbers to.
Only meaningful if round_times is True.
Default is 3, so that times are rounded to milliseconds.
samplerate : int
Sampling rate for wave files. Used to convert
``begin_samples`` and ``end_samples``
from sample number to seconds.
Default is None, in which ths function
tries to open ``audio_path`` and determine
the actual sampling rate. If this does not work,
then the ``onsets_s`` and ``offsets_s`` attributes
of the ``crowsetta.Sequence`` are left as None.
Examples
--------
>>> example = crowsetta.data.get('timit')
>>> timit = crowsetta.formats.seq.Timit.from_file(example.annot_path)
>>> annot = timit.to_annot()
Returns
-------
annot : crowsetta.Annotation
Notes
-----
The ``round_times`` and ``decimals`` arguments are provided
to reduce differences across platforms
due to floating point error, e.g. when loading annotation files
and then sending them to a csv file,
the result should be the same on Windows and Linux.
"""
phn_seq = self.to_seq(round_times, decimals, samplerate)
return crowsetta.Annotation(annot_path=self.annot_path, notated_path=self.audio_path, seq=phn_seq)
def to_file(self,
annot_path: PathLike) -> None:
"""make a .phn file from an annotation
Parameters
----------
annot_path : str, pahtlib.Path
path including filename where file should be saved.
Must have a valid extension for TIMIT transcription files,
one of {'.phn', '.PHN', '.wrd', '.WRD'}.
"""
crowsetta.validation.validate_ext(annot_path, extension=self.ext)
lines = []
for begin_sample, end_sample, text in zip(self.begin_samples.tolist(),
self.end_samples.tolist(),
list(self.text)):
lines.append(
f'{begin_sample} {end_sample} {text}\n'
)
with annot_path.open('w') as fp:
fp.writelines(lines)
| [
"crowsetta.validation.validate_ext",
"soundfile.info",
"crowsetta.Sequence.from_keyword",
"pandas.read_csv",
"crowsetta.Annotation",
"attr.cmp_using",
"numpy.around",
"pathlib.Path",
"pandera.Field",
"attr.converters.optional",
"warnings.warn"
] | [((572, 587), 'pandera.Field', 'pandera.Field', ([], {}), '()\n', (585, 587), False, 'import pandera\n'), ((628, 643), 'pandera.Field', 'pandera.Field', ([], {}), '()\n', (641, 643), False, 'import pandera\n'), ((679, 705), 'pandera.Field', 'pandera.Field', ([], {'coerce': '(True)'}), '(coerce=True)\n', (692, 705), False, 'import pandera\n'), ((3424, 3448), 'pathlib.Path', 'pathlib.Path', (['annot_path'], {}), '(annot_path)\n', (3436, 3448), False, 'import pathlib\n'), ((3607, 3671), 'crowsetta.validation.validate_ext', 'crowsetta.validation.validate_ext', (['annot_path'], {'extension': 'cls.ext'}), '(annot_path, extension=cls.ext)\n', (3640, 3671), False, 'import crowsetta\n'), ((3743, 3788), 'pandas.read_csv', 'pd.read_csv', (['annot_path'], {'sep': '""" """', 'header': 'None'}), "(annot_path, sep=' ', header=None)\n", (3754, 3788), True, 'import pandas as pd\n'), ((7206, 7356), 'crowsetta.Sequence.from_keyword', 'crowsetta.Sequence.from_keyword', ([], {'labels': 'labels', 'onset_samples': 'onset_samples', 'offset_samples': 'offset_samples', 'onsets_s': 'onsets_s', 'offsets_s': 'offsets_s'}), '(labels=labels, onset_samples=onset_samples,\n offset_samples=offset_samples, onsets_s=onsets_s, offsets_s=offsets_s)\n', (7237, 7356), False, 'import crowsetta\n'), ((9340, 9436), 'crowsetta.Annotation', 'crowsetta.Annotation', ([], {'annot_path': 'self.annot_path', 'notated_path': 'self.audio_path', 'seq': 'phn_seq'}), '(annot_path=self.annot_path, notated_path=self.\n audio_path, seq=phn_seq)\n', (9360, 9436), False, 'import crowsetta\n'), ((9838, 9903), 'crowsetta.validation.validate_ext', 'crowsetta.validation.validate_ext', (['annot_path'], {'extension': 'self.ext'}), '(annot_path, extension=self.ext)\n', (9871, 9903), False, 'import crowsetta\n'), ((1983, 2016), 'attr.cmp_using', 'attr.cmp_using', ([], {'eq': 'np.array_equal'}), '(eq=np.array_equal)\n', (1997, 2016), False, 'import attr\n'), ((2062, 2095), 'attr.cmp_using', 'attr.cmp_using', ([], {'eq': 'np.array_equal'}), '(eq=np.array_equal)\n', (2076, 2095), False, 'import attr\n'), ((2134, 2167), 'attr.cmp_using', 'attr.cmp_using', ([], {'eq': 'np.array_equal'}), '(eq=np.array_equal)\n', (2148, 2167), False, 'import attr\n'), ((2326, 2364), 'attr.converters.optional', 'attr.converters.optional', (['pathlib.Path'], {}), '(pathlib.Path)\n', (2350, 2364), False, 'import attr\n'), ((7084, 7122), 'numpy.around', 'np.around', (['onsets_s'], {'decimals': 'decimals'}), '(onsets_s, decimals=decimals)\n', (7093, 7122), True, 'import numpy as np\n'), ((7147, 7186), 'numpy.around', 'np.around', (['offsets_s'], {'decimals': 'decimals'}), '(offsets_s, decimals=decimals)\n', (7156, 7186), True, 'import numpy as np\n'), ((6373, 6404), 'soundfile.info', 'soundfile.info', (['self.audio_path'], {}), '(self.audio_path)\n', (6387, 6404), False, 'import soundfile\n'), ((6465, 6786), 'warnings.warn', 'warnings.warn', (['f"""wav file not found: {self.audio_path}.Could not determine sampling rate to convert onsets and offsets to seconds. To use a fixed sampling rate for all files, pass in a value for the `samplerate` argument, but be aware that this may not be the correct sampling rate for some files."""', 'UserWarning'], {}), "(\n f'wav file not found: {self.audio_path}.Could not determine sampling rate to convert onsets and offsets to seconds. To use a fixed sampling rate for all files, pass in a value for the `samplerate` argument, but be aware that this may not be the correct sampling rate for some files.'\n , UserWarning)\n", (6478, 6786), False, 'import warnings\n')] |
import numpy as np
from tqdm import trange, tqdm
from utils import frokf
import scipy.io as sio
## 配置
con_terms_linear5 = ['x1(t-1)', 'x1(t-2)', 'x1(t-2)', 'x1(t-3)', 'x1(t-2)', 'x4(t-1)', 'x5(t-1)', 'x4(t-1)', 'x5(t-1)'] # 9
con_terms_nonlinear5 = ['x1(t-1)', 'x1(t-2)', 'x1(t-2)*x1(t-2)', 'x1(t-3)', 'x1(t-2)*x1(t-2)', 'x4(t-1)', 'x5(t-1)', 'x4(t-1)', 'x5(t-1)'] # 9
true_coefs5 = [0.95*np.sqrt(2), -0.9025, 0.5, -0.4, -0.5, 0.25*np.sqrt(2), 0.25*np.sqrt(2), -0.25*np.sqrt(2), 0.25*np.sqrt(2)] # 9
con_terms_linear10 = ['x1(t-1)', 'x1(t-2)', 'x1(t-2)', 'x2(t-3)', 'x1(t-2)', 'x4(t-4)', 'x9(t-2)', 'x4(t-4)', 'x1(t-1)', 'x1(t-2)', 'x7(t-2)',
'x8(t-3)', 'x9(t-3)', 'x8(t-3)', 'x9(t-3)', 'x7(t-4)'] # 16
con_terms_nonlinear10 = ['x1(t-1)', 'x1(t-2)', 'x1(t-2)*x1(t-10)', 'x2(t-3)', 'x1(t-2)', 'x4(t-4)', 'x9(t-2)', 'x4(t-4)', 'x1(t-1)*x1(t-10)', 'x1(t-2)', 'x7(t-2)',
'x8(t-3)', 'x9(t-3)', 'x8(t-3)', 'x9(t-3)', 'x7(t-4)'] # 16
true_coefs10 = [0.95*np.sqrt(2), -0.9025, 0.5, 0.9, -0.5, 0.8, -0.4, -0.8, 0.4, -0.4, -0.9, 0.4, 0.3, -0.3, 0.4, -0.75] # 16
noises = np.linspace(0.5, 4, 8)
con_terms5 = [2, 1, 1, 3, 2]
con_terms10 = [2, 1, 1, 1, 2, 1, 2, 3, 2, 1]
root = '../data/'
## 批量计算保存
ret = []
for dtype in tqdm(['linear', 'nonlinear']):
for ndim in tqdm([5, 10]):
for noise_id in trange(8):
noise_var = noises[noise_id]
ret = []
for trial in trange(1, 101):
ret.append(frokf(noise_var, trial, ndim, dtype, eval(f"con_terms_{dtype}{ndim}"), eval(f"con_terms{ndim}")))
sio.savemat(f"{root}FROKF_{dtype}{ndim}D_{noise_var:2.2f}", {'frokf_coef': np.stack(ret)})
| [
"numpy.stack",
"tqdm.tqdm",
"tqdm.trange",
"numpy.linspace",
"numpy.sqrt"
] | [((1115, 1137), 'numpy.linspace', 'np.linspace', (['(0.5)', '(4)', '(8)'], {}), '(0.5, 4, 8)\n', (1126, 1137), True, 'import numpy as np\n'), ((1263, 1292), 'tqdm.tqdm', 'tqdm', (["['linear', 'nonlinear']"], {}), "(['linear', 'nonlinear'])\n", (1267, 1292), False, 'from tqdm import trange, tqdm\n'), ((1310, 1323), 'tqdm.tqdm', 'tqdm', (['[5, 10]'], {}), '([5, 10])\n', (1314, 1323), False, 'from tqdm import trange, tqdm\n'), ((393, 403), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (400, 403), True, 'import numpy as np\n'), ((436, 446), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (443, 446), True, 'import numpy as np\n'), ((453, 463), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (460, 463), True, 'import numpy as np\n'), ((471, 481), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (478, 481), True, 'import numpy as np\n'), ((488, 498), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (495, 498), True, 'import numpy as np\n'), ((1001, 1011), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1008, 1011), True, 'import numpy as np\n'), ((1349, 1358), 'tqdm.trange', 'trange', (['(8)'], {}), '(8)\n', (1355, 1358), False, 'from tqdm import trange, tqdm\n'), ((1447, 1461), 'tqdm.trange', 'trange', (['(1)', '(101)'], {}), '(1, 101)\n', (1453, 1461), False, 'from tqdm import trange, tqdm\n'), ((1675, 1688), 'numpy.stack', 'np.stack', (['ret'], {}), '(ret)\n', (1683, 1688), True, 'import numpy as np\n')] |
import time
from os import environ
import numpy as np
from smartredis import Client, Dataset
from ..error import SmartSimError
from ..log import get_logger
logger = get_logger(__name__)
def form_name(*args):
return "_".join(str(arg) for arg in args if arg is not None)
class TrainingDataUploader:
"""A class to simplify uploading batches of samples to train a model.
This class can be used to upload samples following a simple convention
for naming. Once created, the function `publish_info` can be used
to put all details about the data set on the Orchestrator. A training
process can thus access them and get all relevant information to download
the batches which are uploaded.
Each time a new batch is available, it is sufficient to call `put_batch`,
and the data will be stored following the naming convention specified
by the attributes of this class.
:param name: Name of the dataset as stored on the Orchestrator
:type name: str
:param sample_prefix: Prefix of samples batches
:type sample_prefix: str
:param target_prefix: Prefix of target batches (if needed)
:type target_prefix: str
:param num_classes: Number of classes of targets, if categorical
:type num_classes: int
:param producer_prefixes: Prefixes of processes which will be producing batches.
This can be useful in case the consumer processes also
have other incoming entities.
:type producer_prefixes: str or list[str]
:param cluster: Whether the SmartSim Orchestrator is being run as a cluster
:type cluster: bool
:param address: Address of Redis DB as <ip_address>:<port>
:type address: str
:param num_ranks: Number of processes (e.g. MPI ranks) of application using
DataUploader.
:type num_ranks: int
:param rank: Rank of DataUploader in multi-process application (e.g. MPI rank).
:type rank: int
:param verbose: If output should be logged to screen.
:type verbose: bool
"""
def __init__(
self,
name="training_data",
sample_prefix="samples",
target_prefix="targets",
num_classes=None,
producer_prefixes=None,
cluster=True,
address=None,
num_ranks=None,
rank=None,
verbose=False,
):
if not name:
raise ValueError("Name can not be empty")
if not sample_prefix:
raise ValueError("Sample prefix can not be empty")
self.name = name
self.sample_prefix = sample_prefix
self.target_prefix = target_prefix
if isinstance(producer_prefixes, str):
producer_prefixes = [producer_prefixes]
self.producer_prefixes = producer_prefixes
self.num_classes = num_classes
if num_ranks is None:
self.num_ranks = None
else:
self.num_ranks = int(num_ranks)
self.rank = rank
self.client = Client(address=address, cluster=cluster)
self.batch_idx = 0
self.verbose = verbose
def publish_info(self):
info_ds = Dataset(form_name(self.name, "info"))
info_ds.add_meta_string("sample_prefix", self.sample_prefix)
if self.target_prefix:
info_ds.add_meta_string("target_prefix", self.target_prefix)
if self.producer_prefixes:
for producer_prefix in self.producer_prefixes:
info_ds.add_meta_string("producer_prefixes", producer_prefix)
if self.num_classes:
info_ds.add_meta_scalar("num_classes", self.num_classes)
if self.num_ranks:
info_ds.add_meta_scalar("num_ranks", self.num_ranks)
self.client.put_dataset(info_ds)
def put_batch(self, samples, targets=None):
batch_key = form_name(self.sample_prefix, self.batch_idx, self.rank)
self.client.put_tensor(batch_key, samples)
if self.verbose:
logger.info(f"Put batch {batch_key}")
if (
targets is not None
and self.target_prefix
and (self.target_prefix != self.sample_prefix)
):
labels_key = form_name(self.target_prefix, self.batch_idx, self.rank)
self.client.put_tensor(labels_key, targets)
self.batch_idx += 1
class StaticDataDownloader:
"""A class to download a dataset from the DB.
By default, the StaticDataDownloader has to be created in a process
launched through SmartSim, with sample producers listed as incoming
entities.
All details about the batches must be defined in
the constructor; two mechanisms are available, `manual` and
`auto`.
- When specifying `auto`, the user must also specify
`uploader_name`. StaticDataDownloader will get all needed information
from the database (this expects a Dataset like the one created
by TrainingDataUploader to be available and stored as `uploader_name`
on the DB).
- When specifying `manual`, the user must also specify details
of batch naming. Specifically, for each incoming entity with
a name starting with an element of `producer_prefixes`,
StaticDataDownloader will query the DB
for all batches named <sample_prefix>_<sub_index> for all indices
in `sub_indexes` if supplied, and, if
`target_prefix` is supplied, it will also query for all targets
named <target_prefix>.<sub_index>. If `producer_prefixes` is
None, then all incoming entities will be treated as producers,
and for each one, the corresponding batches will be downloaded.
The flag `init_samples` defines whether sources (the list of batches
to be fetched) and samples (the actual data) should automatically
be set up in the costructor.
If the user needs to modify the list of sources, then `init_samples=False`
has to be set. In that case, to set up a `BatchDownlaoder`, the user has to call
`init_sources()` (which initializes the list of sources and the SmartRedis client)
and `init_samples()`. After `init_sources()` is called,
a list of data sources is populated, representing the batches which
will be downloaded.
Each source is represented as a tuple `(producer_name, sub_index)`.
Before `init_samples()` is called, the user can modify the list.
Once `init_samples()` is called, all data is downloaded and batches
can be obtained with iter().
After initialization, samples and targets will not be updated. The data can
be shuffled by calling `update_data()`, if `shuffle` is set to ``True`` at
initialization.
:param batch_size: Size of batches obtained with __iter__
:type batch_size: int
:param shuffle: whether order of samples has to be shuffled when calling `update_data`
:type shuffle: bool
:param uploader_info: Set to `auto` uploader information has to be downloaded from DB,
or to `manual` if it is provided by the user
:type uploader_info: str
:param uploader_name: Name of uploader info dataset, only used if `uploader_info` is `auto`
:type uploader_name: str
:param sample_prefix: prefix of keys representing batches
:type sample_prefix: str
:param target_prefix: prefix of keys representing targets
:type target_prefix: str
:param uploader_ranks: Number of processes every uploader runs on (e.g, if each
rank in an MPI simulation is uploading its own batches,
this will be the MPI comm world size of the simulation).
:type uploader_ranks: int
:param num_classes: Number of classes of targets, if categorical
:type num_classes: int
:param producer_prefixes: Prefixes of names of which will be producing batches.
These can be e.g. prefixes of SmartSim entity names in
an ensemble.
:type producer_prefixes: str
:param cluster: Whether the Orchestrator will be run as a cluster
:type cluster: bool
:param address: Address of Redis client as <ip_address>:<port>
:type address: str
:param replica_rank: When StaticDataDownloader is used distributedly, indicates
the rank of this object
:type replica_rank: int
:param num_replicas: When BatchDownlaoder is used distributedly, indicates
the total number of ranks
:type num_replicas: int
:param verbose: Whether log messages should be printed
:type verbose: bool
:param init_samples: whether samples should be initialized in the constructor
:type init_samples: bool
"""
def __init__(
self,
batch_size=32,
shuffle=True,
uploader_info="auto",
uploader_name="training_data",
sample_prefix="samples",
target_prefix="targets",
uploader_ranks=None,
num_classes=None,
producer_prefixes=None,
cluster=True,
address=None,
replica_rank=0,
num_replicas=1,
verbose=False,
init_samples=True,
**kwargs,
):
self.replica_rank = replica_rank
self.num_replicas = num_replicas
self.address = address
self.cluster = cluster
self.uploader_info = uploader_info
self.uploader_name = uploader_name
self.verbose = verbose
self.samples = None
self.targets = None
self.num_samples = 0
self.indices = np.arange(0)
self.shuffle = shuffle
self.batch_size = batch_size
if uploader_info == "manual":
self.sample_prefix = sample_prefix
self.target_prefix = target_prefix
if uploader_ranks is not None:
self.sub_indices = [str(rank) for rank in range(uploader_ranks)]
else:
self.sub_indices = None
if producer_prefixes:
self.producer_prefixes = list(producer_prefixes)
else:
producer_prefixes = [""]
self.num_classes = num_classes
elif self.uploader_info == "auto":
pass
else:
raise ValueError(
f"uploader_info must be one of 'auto' or 'manual', but was {self.uploader_info}"
)
if init_samples:
self.init_sources()
self.init_samples()
else:
self.client = Client(self.address, self.cluster)
if self.uploader_info == "auto":
if not self.uploader_name:
raise ValueError(
"uploader_name can not be empty if uploader_info is 'auto'"
)
self._get_uploader_info(self.uploader_name)
# This avoids problems with Pytorch
self.client = None
def log(self, message):
if self.verbose:
logger.info(message)
def _list_all_sources(self):
uploaders = environ["SSKEYIN"].split(",")
sources = []
for uploader in uploaders:
if any(
[
uploader.startswith(producer_prefix)
for producer_prefix in self.producer_prefixes
]
):
if self.sub_indices:
sources.extend(
[[uploader, sub_index] for sub_index in self.sub_indices]
)
else:
sources.append([uploader, None])
per_replica = len(sources) // self.num_replicas
if per_replica > 0:
if self.replica_rank < self.num_replicas - 1:
sources = sources[
self.replica_rank
* per_replica : (self.replica_rank + 1)
* per_replica
]
else:
sources = sources[self.replica_rank * per_replica :]
else:
self.log(
"Number of loader replicas is higher than number of sources, automatic split cannot be performed, "
"all replicas will have the same dataset. If this is not intended, then implement a distribution strategy "
"and modify `sources`."
)
return sources
def init_sources(self):
"""Initalize list of data sources based on incoming entitites and self.sub_indices.
Each source is represented as a tuple `(producer_name, sub_index)`.
Before `init_samples()` is called, the user can modify the list.
Once `init_samples()` is called, all data is downloaded and batches
can be obtained with iter(). The list of all sources is stored as `self.sources`.
:raises ValueError: If self.uploader_info is set to `auto` but no `uploader_name` is specified.
:raises ValueError: If self.uploader_info is not set to `auto` or `manual`.
"""
self.client = Client(self.address, self.cluster)
if self.uploader_info == "auto":
if not self.uploader_name:
raise ValueError(
"uploader_name can not be empty if uploader_info is 'auto'"
)
self._get_uploader_info(self.uploader_name)
elif self.uploader_info == "manual":
pass
else:
raise ValueError(
f"uploader_info must be one of 'auto' or 'manual', but was {self.uploader_info}"
)
self.sources = self._list_all_sources()
@property
def need_targets(self):
"""Compute if targets have to be downloaded.
:return: Whether targets (or labels) should be downloaded
:rtype: bool
"""
return self.target_prefix and not self.autoencoding
def __len__(self):
length = int(np.floor(self.num_samples / self.batch_size))
return length
def __iter__(self):
if self.sources:
self.update_data()
# Generate data
if len(self) < 1:
msg = "Not enough samples in generator for one batch. "
msg += "Please run init_samples() or initialize generator with init_samples=True"
raise ValueError(msg)
for index in range(len(self)):
indices = self.indices[
index * self.batch_size : (index + 1) * self.batch_size
]
x, y = self.__data_generation(indices)
if y is not None:
yield x, y
else:
yield x
def init_samples(self, sources=None):
"""Initialize samples (and targets, if needed).
This function will not return until samples have been downloaded
from all sources.
:param sources: List of sources as defined in `init_sources`, defaults to None,
in which case sources will be initialized, unless `self.sources`
is already set
:type sources: list[tuple], optional
"""
self.autoencoding = self.sample_prefix == self.target_prefix
if sources is not None:
self.sources = sources
if self.sources is None:
self.sources = self._list_all_sources()
self.log("Generator initialization complete")
self._update_samples_and_targets()
if self.shuffle:
np.random.shuffle(self.indices)
def _data_exists(self, batch_name, target_name):
if self.need_targets:
return self.client.tensor_exists(batch_name) and self.client.tensor_exists(
target_name
)
else:
return self.client.tensor_exists(batch_name)
def _add_samples(self, batch_name, target_name):
if self.samples is None:
self.samples = self.client.get_tensor(batch_name)
if self.need_targets:
self.targets = self.client.get_tensor(target_name)
else:
self.samples = np.concatenate(
(self.samples, self.client.get_tensor(batch_name))
)
if self.need_targets:
self.targets = np.concatenate(
(self.targets, self.client.get_tensor(target_name))
)
self.num_samples = self.samples.shape[0]
self.indices = np.arange(self.num_samples)
self.log("Success!")
self.log(f"New dataset size: {self.num_samples}, batches: {len(self)}")
def _get_uploader_info(self, uploader_name):
dataset_name = form_name(uploader_name, "info")
self.log(f"Uploader dataset name: {dataset_name}")
ds_exists = False
try:
ds_exists = self.client.dataset_exists(dataset_name)
# As long as required SmartRedis version is not 0.3 we
# need a workaround for the missing function
except AttributeError:
try:
uploaders = environ["SSKEYIN"].split(",")
for uploader in uploaders:
if self.client.key_exists(uploader + "." + dataset_name):
ds_exists = True
except KeyError:
msg = "Uploader must be launched with SmartSim and added to incoming entity, "
msg += "when setting uploader_info to 'auto'"
raise SmartSimError(msg)
trials = 6
while not ds_exists:
trials -= 1
if trials == 0:
raise SmartSimError("Could not find uploader dataset")
time.sleep(5)
try:
ds_exists = self.client.dataset_exists(dataset_name)
except AttributeError:
try:
uploaders = environ["SSKEYIN"].split(",")
for uploader in uploaders:
if self.client.key_exists(uploader + "." + dataset_name):
ds_exists = True
except KeyError:
msg = "Uploader must be launched with SmartSim and added to incoming entity, "
msg += "when setting uploader_info to 'auto'"
raise SmartSimError(msg)
uploader_info = self.client.get_dataset(dataset_name)
self.sample_prefix = uploader_info.get_meta_strings("sample_prefix")[0]
self.log(f"Uploader sample prefix: {self.sample_prefix}")
try:
self.target_prefix = uploader_info.get_meta_strings("target_prefix")[0]
except:
self.target_prefix = None
self.log(f"Uploader target prefix: {self.target_prefix}")
try:
self.producer_prefixes = uploader_info.get_meta_strings("producer_prefixes")
except:
self.producer_prefixes = [""]
self.log(f"Uploader producer prefixes: {self.producer_prefixes}")
try:
self.num_classes = uploader_info.get_meta_scalars("num_classes")[0]
except:
self.num_classes = None
self.log(f"Uploader num classes: {self.num_classes}")
try:
num_ranks = uploader_info.get_meta_scalars("num_ranks")[0]
self.sub_indices = [str(rank) for rank in range(num_ranks)]
except:
self.sub_indices = None
self.log(f"Uploader sub-indices: {self.sub_indices}")
def _update_samples_and_targets(self):
for source in self.sources:
entity = source[0]
sub_index = source[1]
self.client.set_data_source(entity)
batch_name = form_name(self.sample_prefix, sub_index)
if self.need_targets:
target_name = form_name(self.target_prefix, sub_index)
else:
target_name = None
self.log(f"Retrieving {batch_name} from {entity}")
trials = 6
while not self._data_exists(batch_name, target_name):
trials -= 1
if trials == 0:
raise SmartSimError(
f"Could not retrieve batch {batch_name} from entity {entity}"
)
time.sleep(5)
self._add_samples(batch_name, target_name)
def update_data(self):
self._update_samples_and_targets()
def __data_generation(self, indices):
# Initialization
x = self.samples[indices]
if self.need_targets:
y = self.targets[indices]
elif self.autoencoding:
y = x
else:
y = None
return x, y
def __len__(self):
length = int(np.floor(self.num_samples / self.batch_size))
return length
class DynamicDataDownloader(StaticDataDownloader):
"""A class to download batches from the DB as they are produced.
By default, the DynamicDataDownloader has to be created in a process
launched through SmartSim, with sample producers listed as incoming
entities.
All details about the batches must be defined in
the constructor; two mechanisms are available, `manual` and
`auto`.
- When specifying `auto`, the user must also specify
`uploader_name`. DynamicDataDownloader will get all needed information
from the database (this expects a Dataset like the one created
by TrainingDataUploader to be available and stored as `uploader_name`
on the DB).
- When specifying `manual`, the user must also specify details
of batch naming. Specifically, for each incoming entity with
a name starting with an element of `producer_prefixes`,
DynamicDataDownloader will query the DB
for all batches named <sample_prefix>_<sub_index>_<iteration> for all indices
in `sub_indices` if supplied, and, if
`target_prefix` is supplied, it will also query for all targets
named <target_prefix>.<sub_index>.<iteration>. If `producer_prefixes` is
None, then all incoming entities will be treated as producers,
and for each one, the corresponding batches will be downloaded.
The flag `init_samples` defines whether sources (the list of batches
to be fetched) and samples (the actual data) should automatically
be set up in the costructor.
If the user needs to modify the list of sources, then `init_samples=False`
has to be set. In that case, to set up a `DynamicDataDownloader`, the user has to call
`init_sources()` (which initializes the list of sources and the SmartRedis client)
and `init_samples()`. After `init_sources()` is called,
a list of data sources is populated, representing the batches which
will be downloaded. See `init_sources()`
Each source is represented as a tuple `(producer_name, sub_index, iteration)`.
Before `init_samples()` is called, the user can modify the list.
Once `init_samples()` is called, all data is downloaded and batches
can be obtained with iter().
After initialization, samples and targets can be updated calling `update_data()`,
which shuffles the available samples, if `shuffle` is set to ``True`` at initialization.
:param batch_size: Size of batches obtained with __iter__
:type batch_size: int
:param shuffle: whether order of samples has to be shuffled when calling `update_data`
:type shuffle: bool
:param uploader_info: Set to `auto` uploader information has to be downloaded from DB,
or to `manual` if it is provided by the user
:type uploader_info: str
:param uploader_name: Name of uploader info dataset, only used if `uploader_info` is `auto`
:type uploader_name: str
:param sample_prefix: prefix of keys representing batches
:type sample_prefix: str
:param target_prefix: prefix of keys representing targets
:type target_prefix: str
:param uploader_ranks: Number of processes every uploader runs on (e.g, if each
rank in an MPI simulation is uploading its own batches,
this will be the MPI comm world size of the simulation).
:type uploader_ranks: int
:param num_classes: Number of classes of targets, if categorical
:type num_classes: int
:param producer_prefixes: Prefixes of processes which will be producing batches.
This can be useful in case the consumer processes also
have other incoming entities.
:type producer_prefixes: str
:param cluster: Whether the Orchestrator is being run as a cluster
:type cluster: bool
:param address: Address of Redis DB as <ip_address>:<port>
:type address: str
:param replica_rank: When StaticDataDownloader is used in a distributed setting, indicates
the rank of this replica
:type replica_rank: int
:param num_replicas: When ContinuousBatchDownlaoder is used in a distributed setting,
indicates the total number of replicas (ranks)
:type num_replicas: int
:param verbose: Whether log messages should be printed
:type verbose: bool
:param init_samples: whether samples should be initialized in the constructor
:type init_samples: bool
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _list_all_sources(self):
sources = super()._list_all_sources()
# Append the batch index to each source
for source in sources:
source.append(0)
return sources
def _update_samples_and_targets(self):
for source in self.sources:
entity = source[0]
sub_index = source[1]
index = source[2]
self.client.set_data_source(entity)
batch_name = form_name(self.sample_prefix, index, sub_index)
if self.need_targets:
target_name = form_name(self.target_prefix, index, sub_index)
else:
target_name = None
self.log(f"Retrieving {batch_name} from {entity}")
# Poll next batch based on index, if available: retrieve it, update index and loop
while self._data_exists(batch_name, target_name):
self._add_samples(batch_name, target_name)
source[2] += 1
index = source[2]
batch_name = form_name(self.sample_prefix, index, sub_index)
if self.need_targets:
target_name = form_name(self.target_prefix, index, sub_index)
self.log(f"Retrieving {batch_name}...")
def update_data(self):
"""Update data.
Fetch new batches (if available) from the DB. Also shuffle
list of samples if `self.shuffle` is set to ``True``.
"""
self._update_samples_and_targets()
if self.shuffle:
np.random.shuffle(self.indices)
def init_samples(self, sources=None):
"""Initialize samples (and targets, if needed).
This function will not return until at least one batch worth of data
has been downloaded.
:param sources: List of sources as defined in `init_sources`, defaults to None,
in which case sources will be initialized, unless `self.sources`
is already set
:type sources: list[tuple], optional
"""
self.autoencoding = self.sample_prefix == self.target_prefix
if sources is not None:
self.sources = sources
if self.sources is None:
self.sources = self._list_all_sources()
if self.sources:
while len(self) < 1:
self._update_samples_and_targets()
trials = 6
if len(self) < 1:
trials -= 1
if trials == 0:
raise SmartSimError("Could not find samples")
time.sleep(5)
self.log("Generator initialization complete")
else:
self.log(
"Generator has no associated sources, this can happen if the number of "
"loader workers is larger than the number of available sources."
)
| [
"smartredis.Client",
"numpy.floor",
"time.sleep",
"numpy.arange",
"numpy.random.shuffle"
] | [((3008, 3048), 'smartredis.Client', 'Client', ([], {'address': 'address', 'cluster': 'cluster'}), '(address=address, cluster=cluster)\n', (3014, 3048), False, 'from smartredis import Client, Dataset\n'), ((9527, 9539), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (9536, 9539), True, 'import numpy as np\n'), ((12977, 13011), 'smartredis.Client', 'Client', (['self.address', 'self.cluster'], {}), '(self.address, self.cluster)\n', (12983, 13011), False, 'from smartredis import Client, Dataset\n'), ((16395, 16422), 'numpy.arange', 'np.arange', (['self.num_samples'], {}), '(self.num_samples)\n', (16404, 16422), True, 'import numpy as np\n'), ((10468, 10502), 'smartredis.Client', 'Client', (['self.address', 'self.cluster'], {}), '(self.address, self.cluster)\n', (10474, 10502), False, 'from smartredis import Client, Dataset\n'), ((13847, 13891), 'numpy.floor', 'np.floor', (['(self.num_samples / self.batch_size)'], {}), '(self.num_samples / self.batch_size)\n', (13855, 13891), True, 'import numpy as np\n'), ((15445, 15476), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (15462, 15476), True, 'import numpy as np\n'), ((17597, 17610), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (17607, 17610), False, 'import time\n'), ((20632, 20676), 'numpy.floor', 'np.floor', (['(self.num_samples / self.batch_size)'], {}), '(self.num_samples / self.batch_size)\n', (20640, 20676), True, 'import numpy as np\n'), ((26834, 26865), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (26851, 26865), True, 'import numpy as np\n'), ((20168, 20181), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (20178, 20181), False, 'import time\n'), ((27899, 27912), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (27909, 27912), False, 'import time\n')] |
'''
Usage
'''
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules import Module
from torch.nn import functional as F
from torch.autograd import Variable
import math
__all__ = ['Orth_Plane_Conv2d','Orth_Plane_Mani_Conv2d','Orth_UV_Conv2d','Orth_UV_Mani_Conv2d','GroupOrthConv']
class ManiGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input
@staticmethod
def backward(ctx, grad_out):
input, = ctx.saved_tensors
input = Variable(input)
originSize = input.size()
outputSize = originSize[0]
W = input.view(outputSize, -1)
Wt = torch.t(W)
WWt = W.mm(Wt)
d_p = grad_out.view(outputSize, -1)
# Version1: WWtG-WGtW
d_p = (WWt.mm(d_p) - W.mm(d_p.t()).mm(W))
# Version2: G - WGtW
# d_p = d_p - W.mm(d_p.t()).mm(W)
grad_in = d_p.view(originSize)
return grad_in
mani_grad = ManiGrad.apply
class Orth_Plane_Conv2d(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
norm=False, w_norm=False):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Orth_Plane_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
self.total_in_dim = in_channels*kernel_size[0]*kernel_size[1]
if out_channels > self.total_in_dim:
raise ValueError('out_channels must not be greater than input dimension (in_channels*kernel_size[0]*kernel_size[1])')
self.eps = 1e-8
self.norm = norm
self.w_norm = w_norm
if norm:
self.register_buffer('input_norm_wei',torch.ones(1, in_channels // groups, *kernel_size))
n = self.kernel_size[0] * self.kernel_size[1] * self.in_channels
self.weight.data.normal_(0, math.sqrt(2. / n))
if bias:
self.bias.data.fill_(0)
self.projectiter = 0
self.project(style='qr', interval = 1)
def forward(self, input):
_weight = self.weight
_input = input
# if self.w_norm:
# _weight = _weight/ torch.norm(_weight.view(self.out_channels,-1),2,1).clamp(min = self.eps).view(-1,1,1,1)
_output = F.conv2d(input, _weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
if self.norm:
input_norm = torch.sqrt(F.conv2d(_input**2, Variable(self.input_norm_wei), None,
self.stride, self.padding, self.dilation, self.groups).clamp(min = self.eps))
_output = _output/input_norm
return _output
def orth_penalty(self):
originSize = self.weight.size()
outputSize = originSize[0]
W = self.weight.view(outputSize, -1)
Wt = torch.t(W)
WWt = W.mm(Wt)
I = Variable(torch.eye(WWt.size()[0]).cuda(), requires_grad=False)
return ((WWt.sub(I))**2).sum()
def project(self, style='qr', interval = 1):
'''
Project weight to l2 ball
'''
self.projectiter = self.projectiter+1
originSize = self.weight.data.size()
outputSize = self.weight.data.size()[0]
if style=='qr' and self.projectiter%interval == 0:
# Compute the qr factorization
q, r = torch.qr(self.weight.data.view(outputSize,-1).t())
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph
self.weight.data = q.t().view(originSize)
elif style=='svd' and self.projectiter%interval == 0:
"""
Problematic
"""
# Compute the svd factorization (may be not stable)
u, s, v = torch.svd(self.weight.data.view(outputSize,-1))
self.weight.data = u.mm(v.t()).view(originSize)
elif self.w_norm:
self.weight.data = self.weight.data/ torch.norm(self.weight.data.view(outputSize,-1),2,1).clamp(min = 1e-8).view(-1,1,1,1)
def showOrthInfo(self):
originSize = self.weight.data.size()
outputSize = self.weight.data.size()[0]
W = self.weight.data.view(outputSize,-1)
_, s, _ = torch.svd(W.t())
print('Singular Value Summary: ')
print('max :',s.max().item())
print('mean:',s.mean().item())
print('min :',s.min().item())
print('var :',s.var().item())
print('penalty :', ((W.mm(W.t())-torch.eye(outputSize).cuda())**2).sum().item() )
return s
class Orth_Plane_Mani_Conv2d(Orth_Plane_Conv2d):
def forward(self, input):
_weight = mani_grad(self.weight)
_input = input
_output = F.conv2d(input, _weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
if self.norm:
input_norm = torch.sqrt(F.conv2d(_input**2, Variable(self.input_norm_wei), None,
self.stride, self.padding, self.dilation, self.groups).clamp(min = self.eps))
_output = _output/input_norm
return _output
class Orth_UV_Conv2d(Module):
'''
W = UdV
Mode 1: ! divided by max
Mode 2: ! truncate by 1
Mode 3: ! penalize sum of all log max spectral
Mode 4: divided by max and then clip to [0.5,1]
Mode 5: penalize E(-log(q(x))) q(x)~|N(0,0.2)| & sum of all log max spectral (fail to directly apply)
Mode 6: ! penalize E(-log(q(x))) q(x)~|N(0,0.2)| & divided by max
Mode 7: ! penalize E(-log(q(x))) q(x)~|N(0,0.2)| & truncate by 1 (worked)
Mode 8: ! penalize dlogd & divided by max
Mode 9: penalize expd & divided by max
Mode 10: penalize logd & divided by max
'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, norm = False):
self.eps = 1e-8
self.norm = norm
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Orth_UV_Conv2d, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.total_in_dim = in_channels*kernel_size[0]*kernel_size[1]
self.weiSize = (self.out_channels,in_channels,kernel_size[0],kernel_size[1])
self.stride = stride
self.padding = padding
self.dilation = dilation
self.output_padding = _pair(0)
self.groups = groups
if self.out_channels <= self.total_in_dim:
self.Uweight = Parameter(torch.Tensor(self.out_channels, self.out_channels))
self.Dweight = Parameter(torch.Tensor(self.out_channels))
self.Vweight = Parameter(torch.Tensor(self.out_channels, self.total_in_dim))
self.Uweight.data.normal_(0, math.sqrt(2. / self.out_channels))
self.Vweight.data.normal_(0, math.sqrt(2. / self.total_in_dim))
self.Dweight.data.fill_(1)
else:
self.Uweight = Parameter(torch.Tensor(self.out_channels, self.total_in_dim))
self.Dweight = Parameter(torch.Tensor(self.total_in_dim))
self.Vweight = Parameter(torch.Tensor(self.total_in_dim, self.total_in_dim))
self.Uweight.data.normal_(0, math.sqrt(2. / self.out_channels))
self.Vweight.data.normal_(0, math.sqrt(2. / self.total_in_dim))
self.Dweight.data.fill_(1)
self.projectiter = 0
self.project(style='qr', interval = 1)
if bias:
self.bias = Parameter(torch.Tensor(self.out_channels))
self.bias.data.fill_(0)
else:
self.register_parameter('bias', None)
if norm:
self.register_buffer('input_norm_wei',torch.ones(1, in_channels // groups, *kernel_size))
def setmode(self, mode):
self.mode = mode
def update_sigma(self):
if self.mode in (1,6,8,9,10):
self.Dweight.data = self.Dweight.data/self.Dweight.data.abs().max()
elif self.mode in (2,7):
self.Dweight.data.clamp_(-1, 1)
elif self.mode == 4:
self.Dweight.data = self.Dweight.data/self.Dweight.data.abs().max()
self.Dweight.data.clamp_(0.4, 1)
def log_spectral(self):
return torch.log(self.Dweight.abs().max())
def spectral_penalty(self):
if self.mode in (5,6,7):
if(len(self.Dweight)==1):
return 0
sd2 = 0.1**2
_d, _ = self.Dweight.sort()
return ( (1 - _d[:-1])**2/sd2-torch.log((_d[1:] - _d[:-1])+1e-8) ).mean()
elif self.mode == 8:
return (self.Dweight*torch.log(self.Dweight)).mean()
elif self.mode == 9:
return (torch.exp(self.Dweight)).mean()
elif self.mode == 10:
return -(torch.log(self.Dweight)).mean()
else:
raise RuntimeError("error mode")
@property
def W_(self):
self.update_sigma()
return self.Uweight.mm(self.Dweight.diag()).mm(self.Vweight).view(self.weiSize)
def forward(self, input):
_output = F.conv2d(input, self.W_, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return _output
def orth_penalty(self):
penalty = 0
if self.out_channels <= self.total_in_dim:
W = self.Uweight
else:
W = self.Uweight.t()
Wt = torch.t(W)
WWt = W.mm(Wt)
I = Variable(torch.eye(WWt.size()[0]).cuda())
penalty = penalty+((WWt.sub(I))**2).sum()
W = self.Vweight
Wt = torch.t(W)
WWt = W.mm(Wt)
I = Variable(torch.eye(WWt.size()[0]).cuda())
penalty = penalty+((WWt.sub(I))**2).sum()
return penalty
def project(self, style='none', interval = 1):
'''
Project weight to l2 ball
'''
self.projectiter = self.projectiter+1
if style=='qr' and self.projectiter%interval == 0:
# Compute the qr factorization for U
if self.out_channels <= self.total_in_dim:
q, r = torch.qr(self.Uweight.data.t())
else:
q, r = torch.qr(self.Uweight.data)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph
if self.out_channels <= self.total_in_dim:
self.Uweight.data = q.t()
else:
self.Uweight.data = q
# Compute the qr factorization for V
q, r = torch.qr(self.Vweight.data.t())
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph
self.Vweight.data = q.t()
elif style=='svd' and self.projectiter%interval == 0:
# Compute the svd factorization (may be not stable) for U
u, s, v = torch.svd(self.Uweight.data)
self.Uweight.data = u.mm(v.t())
# Compute the svd factorization (may be not stable) for V
u, s, v = torch.svd(self.Vweight.data)
self.Vweight.data = u.mm(v.t())
def showOrthInfo(self):
s= self.Dweight.data
_D = self.Dweight.data.diag()
W = self.Uweight.data.mm(_D).mm(self.Vweight.data)
_, ss, _ = torch.svd(W.t())
print('Singular Value Summary: ')
print('max :',s.max().item(),'max* :',ss.max().item())
print('mean:',s.mean().item(),'mean*:',ss.mean().item())
print('min :',s.min().item(),'min* :',ss.min().item())
print('var :',s.var().item(),'var* :',ss.var().item())
print('s RMSE: ', ((s-ss)**2).mean().item()**0.5)
if self.out_channels <= self.total_in_dim:
pu = (self.Uweight.data.mm(self.Uweight.data.t())-torch.eye(self.Uweight.size()[0]).cuda()).norm().item()**2
else:
pu = (self.Uweight.data.t().mm(self.Uweight.data)-torch.eye(self.Uweight.size()[1]).cuda()).norm().item()**2
pv = (self.Vweight.data.mm(self.Vweight.data.t())-torch.eye(self.Vweight.size()[0]).cuda()).norm().item()**2
print('penalty :', pu, ' (U) + ', pv, ' (V)' )
return ss
class Orth_UV_Mani_Conv2d(Orth_UV_Conv2d):
def forward(self, input):
#_weight = mani_grad(self.Uweight).mm(self.Dweight.diag()).mm(mani_grad(self.Vweight)).view(self.weiSize)
_weight = self.Uweight.mm(self.Dweight.diag()).mm(self.Vweight).view(self.weiSize)
_input = input
# if self.w_norm:
# _weight = _weight/ torch.norm(_weight.view(self.out_channels,-1),2,1).clamp(min = self.eps).view(-1,1,1,1)
_output = F.conv2d(input, _weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
if self.norm:
input_norm = torch.sqrt(F.conv2d(_input**2, Variable(self.input_norm_wei), None,
self.stride, self.padding, self.dilation, self.groups).clamp(min = self.eps))
_output = _output/input_norm
return _output
class GroupOrthConv(nn.Module):
'''
devide output channels into 'groups'
'''
def __init__(self, Orth_Conv2d, in_channels, out_channels, kernel_size,
stride=1, padding=0, bias=False, groups=None):
super(GroupOrthConv, self).__init__()
if groups == None:
groups = (out_channels-1)//(in_channels*kernel_size*kernel_size)+1
self.groups = groups
self.gourp_out_channels = np.ones(groups) * (out_channels//groups)
if out_channels%groups > 0:
self.gourp_out_channels[:out_channels%groups] += 1
self.sconvs = []
for i in range(groups):
newsconv = Orth_Conv2d(in_channels, int(self.gourp_out_channels[i]),
kernel_size=kernel_size, stride=stride, padding=padding,
bias=bias)
self.add_module('sconv{0}'.format(i),newsconv)
self.sconvs.append(newsconv)
def forward(self,x):
out = []
for i in range(self.groups):
out.append(self.sconvs[i](x))
return torch.cat(out,1)
| [
"torch.t",
"torch.ones",
"torch.eye",
"math.sqrt",
"torch.log",
"torch.svd",
"torch.autograd.Variable",
"torch.nn.functional.conv2d",
"torch.cat",
"torch.diag",
"numpy.ones",
"torch.exp",
"torch.Tensor",
"torch.qr",
"torch.nn.modules.utils._pair"
] | [((695, 710), 'torch.autograd.Variable', 'Variable', (['input'], {}), '(input)\n', (703, 710), False, 'from torch.autograd import Variable\n'), ((833, 843), 'torch.t', 'torch.t', (['W'], {}), '(W)\n', (840, 843), False, 'import torch\n'), ((1389, 1407), 'torch.nn.modules.utils._pair', '_pair', (['kernel_size'], {}), '(kernel_size)\n', (1394, 1407), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((1425, 1438), 'torch.nn.modules.utils._pair', '_pair', (['stride'], {}), '(stride)\n', (1430, 1438), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((1457, 1471), 'torch.nn.modules.utils._pair', '_pair', (['padding'], {}), '(padding)\n', (1462, 1471), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((1491, 1506), 'torch.nn.modules.utils._pair', '_pair', (['dilation'], {}), '(dilation)\n', (1496, 1506), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((2630, 2725), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', '_weight', 'self.bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(input, _weight, self.bias, self.stride, self.padding, self.\n dilation, self.groups)\n', (2638, 2725), True, 'from torch.nn import functional as F\n'), ((3197, 3207), 'torch.t', 'torch.t', (['W'], {}), '(W)\n', (3204, 3207), False, 'import torch\n'), ((5126, 5221), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', '_weight', 'self.bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(input, _weight, self.bias, self.stride, self.padding, self.\n dilation, self.groups)\n', (5134, 5221), True, 'from torch.nn import functional as F\n'), ((6357, 6375), 'torch.nn.modules.utils._pair', '_pair', (['kernel_size'], {}), '(kernel_size)\n', (6362, 6375), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((6393, 6406), 'torch.nn.modules.utils._pair', '_pair', (['stride'], {}), '(stride)\n', (6398, 6406), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((6425, 6439), 'torch.nn.modules.utils._pair', '_pair', (['padding'], {}), '(padding)\n', (6430, 6439), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((6459, 6474), 'torch.nn.modules.utils._pair', '_pair', (['dilation'], {}), '(dilation)\n', (6464, 6474), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((7144, 7152), 'torch.nn.modules.utils._pair', '_pair', (['(0)'], {}), '(0)\n', (7149, 7152), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((9816, 9911), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', 'self.W_', 'self.bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(input, self.W_, self.bias, self.stride, self.padding, self.\n dilation, self.groups)\n', (9824, 9911), True, 'from torch.nn import functional as F\n'), ((10145, 10155), 'torch.t', 'torch.t', (['W'], {}), '(W)\n', (10152, 10155), False, 'import torch\n'), ((10323, 10333), 'torch.t', 'torch.t', (['W'], {}), '(W)\n', (10330, 10333), False, 'import torch\n'), ((13453, 13548), 'torch.nn.functional.conv2d', 'F.conv2d', (['input', '_weight', 'self.bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(input, _weight, self.bias, self.stride, self.padding, self.\n dilation, self.groups)\n', (13461, 13548), True, 'from torch.nn import functional as F\n'), ((14942, 14959), 'torch.cat', 'torch.cat', (['out', '(1)'], {}), '(out, 1)\n', (14951, 14959), False, 'import torch\n'), ((1654, 1662), 'torch.nn.modules.utils._pair', '_pair', (['(0)'], {}), '(0)\n', (1659, 1662), False, 'from torch.nn.modules.utils import _single, _pair, _triple\n'), ((2232, 2250), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (2241, 2250), False, 'import math\n'), ((3864, 3880), 'torch.diag', 'torch.diag', (['r', '(0)'], {}), '(r, 0)\n', (3874, 3880), False, 'import torch\n'), ((11028, 11044), 'torch.diag', 'torch.diag', (['r', '(0)'], {}), '(r, 0)\n', (11038, 11044), False, 'import torch\n'), ((11446, 11462), 'torch.diag', 'torch.diag', (['r', '(0)'], {}), '(r, 0)\n', (11456, 11462), False, 'import torch\n'), ((14302, 14317), 'numpy.ones', 'np.ones', (['groups'], {}), '(groups)\n', (14309, 14317), True, 'import numpy as np\n'), ((2070, 2120), 'torch.ones', 'torch.ones', (['(1)', '(in_channels // groups)', '*kernel_size'], {}), '(1, in_channels // groups, *kernel_size)\n', (2080, 2120), False, 'import torch\n'), ((7272, 7322), 'torch.Tensor', 'torch.Tensor', (['self.out_channels', 'self.out_channels'], {}), '(self.out_channels, self.out_channels)\n', (7284, 7322), False, 'import torch\n'), ((7361, 7392), 'torch.Tensor', 'torch.Tensor', (['self.out_channels'], {}), '(self.out_channels)\n', (7373, 7392), False, 'import torch\n'), ((7431, 7481), 'torch.Tensor', 'torch.Tensor', (['self.out_channels', 'self.total_in_dim'], {}), '(self.out_channels, self.total_in_dim)\n', (7443, 7481), False, 'import torch\n'), ((7524, 7558), 'math.sqrt', 'math.sqrt', (['(2.0 / self.out_channels)'], {}), '(2.0 / self.out_channels)\n', (7533, 7558), False, 'import math\n'), ((7600, 7634), 'math.sqrt', 'math.sqrt', (['(2.0 / self.total_in_dim)'], {}), '(2.0 / self.total_in_dim)\n', (7609, 7634), False, 'import math\n'), ((7725, 7775), 'torch.Tensor', 'torch.Tensor', (['self.out_channels', 'self.total_in_dim'], {}), '(self.out_channels, self.total_in_dim)\n', (7737, 7775), False, 'import torch\n'), ((7814, 7845), 'torch.Tensor', 'torch.Tensor', (['self.total_in_dim'], {}), '(self.total_in_dim)\n', (7826, 7845), False, 'import torch\n'), ((7884, 7934), 'torch.Tensor', 'torch.Tensor', (['self.total_in_dim', 'self.total_in_dim'], {}), '(self.total_in_dim, self.total_in_dim)\n', (7896, 7934), False, 'import torch\n'), ((7977, 8011), 'math.sqrt', 'math.sqrt', (['(2.0 / self.out_channels)'], {}), '(2.0 / self.out_channels)\n', (7986, 8011), False, 'import math\n'), ((8053, 8087), 'math.sqrt', 'math.sqrt', (['(2.0 / self.total_in_dim)'], {}), '(2.0 / self.total_in_dim)\n', (8062, 8087), False, 'import math\n'), ((8255, 8286), 'torch.Tensor', 'torch.Tensor', (['self.out_channels'], {}), '(self.out_channels)\n', (8267, 8286), False, 'import torch\n'), ((8456, 8506), 'torch.ones', 'torch.ones', (['(1)', '(in_channels // groups)', '*kernel_size'], {}), '(1, in_channels // groups, *kernel_size)\n', (8466, 8506), False, 'import torch\n'), ((10900, 10927), 'torch.qr', 'torch.qr', (['self.Uweight.data'], {}), '(self.Uweight.data)\n', (10908, 10927), False, 'import torch\n'), ((11701, 11729), 'torch.svd', 'torch.svd', (['self.Uweight.data'], {}), '(self.Uweight.data)\n', (11710, 11729), False, 'import torch\n'), ((11867, 11895), 'torch.svd', 'torch.svd', (['self.Vweight.data'], {}), '(self.Vweight.data)\n', (11876, 11895), False, 'import torch\n'), ((9257, 9292), 'torch.log', 'torch.log', (['(_d[1:] - _d[:-1] + 1e-08)'], {}), '(_d[1:] - _d[:-1] + 1e-08)\n', (9266, 9292), False, 'import torch\n'), ((2823, 2852), 'torch.autograd.Variable', 'Variable', (['self.input_norm_wei'], {}), '(self.input_norm_wei)\n', (2831, 2852), False, 'from torch.autograd import Variable\n'), ((5319, 5348), 'torch.autograd.Variable', 'Variable', (['self.input_norm_wei'], {}), '(self.input_norm_wei)\n', (5327, 5348), False, 'from torch.autograd import Variable\n'), ((9363, 9386), 'torch.log', 'torch.log', (['self.Dweight'], {}), '(self.Dweight)\n', (9372, 9386), False, 'import torch\n'), ((9444, 9467), 'torch.exp', 'torch.exp', (['self.Dweight'], {}), '(self.Dweight)\n', (9453, 9467), False, 'import torch\n'), ((13646, 13675), 'torch.autograd.Variable', 'Variable', (['self.input_norm_wei'], {}), '(self.input_norm_wei)\n', (13654, 13675), False, 'from torch.autograd import Variable\n'), ((9527, 9550), 'torch.log', 'torch.log', (['self.Dweight'], {}), '(self.Dweight)\n', (9536, 9550), False, 'import torch\n'), ((4897, 4918), 'torch.eye', 'torch.eye', (['outputSize'], {}), '(outputSize)\n', (4906, 4918), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/statestream
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import skimage.measure
from statestream.ccpp.cgraphics import cgraphics_tensor_dist
def array_property(a, prop):
"""Function to compute a property of an array.
Parameter
---------
a : np.ndarray
The array for which to compute the property.
prop : str
String specifying the property.
"""
value = None
if prop == "mean":
value = np.mean(a)
elif prop == "var":
value = np.var(a)
elif prop == "std":
value = np.std(a)
elif prop == "max":
value = np.max(a)
elif prop == "min":
value = np.min(a)
elif prop == "median":
value = np.median(a)
elif prop[0:3] in ["mv-", "vm-"]:
# mean(var(*, axis)) or var(mean(*, axis))
if len(prop) == 4:
axis = int(prop[-1])
elif len(prop) == 5:
axis = (int(prop[-2]), int(prop[-1]))
elif len(prop) == 6:
axis = (int(prop[-3]), int(prop[-2]), int(prop[-1]))
if prop[0] == "m":
value = np.mean(np.var(a, axis=axis))
else:
value = np.var(np.mean(a, axis=axis))
elif prop == "L0":
value = np.linalg.norm(a, ord=0)
elif prop == "L1":
value = np.linalg.norm(a, ord=1)
elif prop in ["L2", "norm"]:
value = np.linalg.norm(a)
elif prop == "Linf":
value = np.linalg.norm(a, ord=np.inf)
elif prop == "13-mean":
arr = np.abs(a)
value = 1.0 / (float(np.prod(arr.shape)) ** (2.0 / 3.0))
value *= np.sum(arr)
value *= np.sum(arr * arr * arr) ** (-1.0 / 3.0)
return value
def np_feature_metric(x, y, metric, samples):
"""Function to compute a metric between two tensors.
We assume that x and y are 4D:
[agents, features, dim_x, dim_y]
In a first step we scale the larger (in sense of space)
array down to the size of the smaller one.
In the second step the metric for all feature
combinations is computed and returned.
Parameter
---------
x,y : np.ndarray
4D arrays.
metric : str
String specifying the metric:
cos:
L0:
L1:
L2:
dot:
Return
------
z : np.ndarray
A 2D array of dimension [feature_x, feature_y]
"""
metric_val = np.zeros([x.shape[1], y.shape[1]], dtype=np.float32).flatten()
# Determine larger (in sense of space) tensor and rescale it down.
if x.shape[2] == y.shape[2] and x.shape[3] == y.shape[3]:
X = x[0:samples,:,:,:]
Y = y[0:samples,:,:,:]
elif x.shape[2] > y.shape[2]:
X = np.zeros([samples, x.shape[1]] + list(y.shape[2:]), dtype=np.float32)
factor = (int(x.shape[2] / y.shape[2]),
int(x.shape[3] / y.shape[3]))
for a in range(samples):
for f in range(x.shape[1]):
X[a,f,:,:] = skimage.measure.block_reduce(x[a,f,:,:],
factor,
np.max)
Y = y[0:samples,:,:,:]
else:
X = x[0:samples,:,:,:]
Y = np.zeros([samples, y.shape[1]] + list(x.shape[2:]), dtype=np.float32)
factor = (int(y.shape[2] / x.shape[2]),
int(y.shape[3] / x.shape[3]))
for a in range(samples):
for f in range(y.shape[1]):
Y[a,f,:,:] = skimage.measure.block_reduce(y[a,f,:,:],
factor,
np.max)
# Convert metric to c-modus.
if metric in ['inf', 'L-inf', 'Linf']:
m = -1
elif metric == 'L0':
m = 0
elif metric == 'L1':
m = 1
elif metric == 'L2':
m = 2
elif metric == 'dot':
m = 3
elif metric in ['cos', 'cosine']:
m = 4
# Compute metric.
cgraphics_tensor_dist(X.flatten(),
Y.flatten(),
metric_val,
samples,
X.shape[1],
Y.shape[1],
X.shape[2],
X.shape[3],
m)
return np.reshape(metric_val, [x.shape[1], y.shape[1]])
| [
"numpy.abs",
"numpy.sum",
"numpy.std",
"numpy.median",
"numpy.zeros",
"numpy.max",
"numpy.mean",
"numpy.min",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.var",
"numpy.prod"
] | [((5014, 5062), 'numpy.reshape', 'np.reshape', (['metric_val', '[x.shape[1], y.shape[1]]'], {}), '(metric_val, [x.shape[1], y.shape[1]])\n', (5024, 5062), True, 'import numpy as np\n'), ((1143, 1153), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (1150, 1153), True, 'import numpy as np\n'), ((1194, 1203), 'numpy.var', 'np.var', (['a'], {}), '(a)\n', (1200, 1203), True, 'import numpy as np\n'), ((3073, 3125), 'numpy.zeros', 'np.zeros', (['[x.shape[1], y.shape[1]]'], {'dtype': 'np.float32'}), '([x.shape[1], y.shape[1]], dtype=np.float32)\n', (3081, 3125), True, 'import numpy as np\n'), ((1244, 1253), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (1250, 1253), True, 'import numpy as np\n'), ((1294, 1303), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (1300, 1303), True, 'import numpy as np\n'), ((1344, 1353), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (1350, 1353), True, 'import numpy as np\n'), ((1397, 1409), 'numpy.median', 'np.median', (['a'], {}), '(a)\n', (1406, 1409), True, 'import numpy as np\n'), ((1912, 1936), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {'ord': '(0)'}), '(a, ord=0)\n', (1926, 1936), True, 'import numpy as np\n'), ((1787, 1807), 'numpy.var', 'np.var', (['a'], {'axis': 'axis'}), '(a, axis=axis)\n', (1793, 1807), True, 'import numpy as np\n'), ((1850, 1871), 'numpy.mean', 'np.mean', (['a'], {'axis': 'axis'}), '(a, axis=axis)\n', (1857, 1871), True, 'import numpy as np\n'), ((1976, 2000), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {'ord': '(1)'}), '(a, ord=1)\n', (1990, 2000), True, 'import numpy as np\n'), ((2050, 2067), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (2064, 2067), True, 'import numpy as np\n'), ((2109, 2138), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {'ord': 'np.inf'}), '(a, ord=np.inf)\n', (2123, 2138), True, 'import numpy as np\n'), ((2181, 2190), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (2187, 2190), True, 'import numpy as np\n'), ((2273, 2284), 'numpy.sum', 'np.sum', (['arr'], {}), '(arr)\n', (2279, 2284), True, 'import numpy as np\n'), ((2302, 2325), 'numpy.sum', 'np.sum', (['(arr * arr * arr)'], {}), '(arr * arr * arr)\n', (2308, 2325), True, 'import numpy as np\n'), ((2220, 2238), 'numpy.prod', 'np.prod', (['arr.shape'], {}), '(arr.shape)\n', (2227, 2238), True, 'import numpy as np\n')] |
from pandas import DataFrame
import seaborn
from sgld_test.gradients_of_likelihood import manual_grad, grad_log_prior
from sgld_test.mcmc_convergance.cosnt import CHAIN_SIZE, NUMBER_OF_TESTS, NO_OF_SAMPELS_IN_TEST, SEED, SAMPLE_SIZE
from sgld_test.likelihoods import gen_X, log_probability
from stat_test.linear_time import GaussianSteinTest
from stat_test.quadratic_time import GaussianQuadraticTest, QuadraticMultiple
__author__ = 'kcx'
import numpy as np
samples = np.load('./samples.npy')
np.random.seed(SEED)
X = gen_X(SAMPLE_SIZE)
def grad_log_pob(theta):
s=[]
for t in theta:
s.append( np.sum(manual_grad(t[0],t[1],X),axis=0))
return np.array(s)
me = GaussianSteinTest(grad_log_pob,1)
times_we_look_at = range(0,CHAIN_SIZE,1)
# arr = np.empty((0,2))
#
#
# for time in times_we_look_at:
# chain_at_time = samples[:,time]
# print(time)
# list_of_chain_slices = np.split(chain_at_time,NUMBER_OF_TESTS)
# for chains_slice in list_of_chain_slices:
# assert chains_slice.shape == (NO_OF_SAMPELS_IN_TEST,2)
# pval = me.compute_pvalue(chains_slice)
# arr = np.vstack((arr, np.array([time,pval])))
#
#
#
# df = DataFrame(arr)
#
# pr = seaborn.boxplot(x=0,y=1,data=df)
# seaborn.plt.show()
# fig = pr.get_figure()
# fig.savefig('../../write_up/img/mcmc_mixing.pdf')
arr = []
me = GaussianSteinTest(grad_log_pob,1)
for time in times_we_look_at:
chain_at_time = samples[:,time]
# print(time)
# pval = me.compute_pvalue(chain_at_time)
# arr.append(pval)
def grad_log_pob(t):
a = np.sum(manual_grad(t[0],t[1],X),axis=0) + grad_log_prior(t)
return a
P_CHANGE =0.1
me = GaussianQuadraticTest(grad_log_pob)
qm = QuadraticMultiple(me)
reject, p = qm.is_from_null(0.05, chain_at_time, 0.1)
print(reject)
# import matplotlib.pyplot as plt
#
# print(arr)
#
# plt.plot(arr)
#
# plt.show() | [
"numpy.load",
"numpy.random.seed",
"sgld_test.gradients_of_likelihood.manual_grad",
"sgld_test.gradients_of_likelihood.grad_log_prior",
"sgld_test.likelihoods.gen_X",
"stat_test.quadratic_time.QuadraticMultiple",
"numpy.array",
"stat_test.quadratic_time.GaussianQuadraticTest",
"stat_test.linear_time... | [((471, 495), 'numpy.load', 'np.load', (['"""./samples.npy"""'], {}), "('./samples.npy')\n", (478, 495), True, 'import numpy as np\n'), ((497, 517), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (511, 517), True, 'import numpy as np\n'), ((522, 540), 'sgld_test.likelihoods.gen_X', 'gen_X', (['SAMPLE_SIZE'], {}), '(SAMPLE_SIZE)\n', (527, 540), False, 'from sgld_test.likelihoods import gen_X, log_probability\n'), ((684, 718), 'stat_test.linear_time.GaussianSteinTest', 'GaussianSteinTest', (['grad_log_pob', '(1)'], {}), '(grad_log_pob, 1)\n', (701, 718), False, 'from stat_test.linear_time import GaussianSteinTest\n'), ((1347, 1381), 'stat_test.linear_time.GaussianSteinTest', 'GaussianSteinTest', (['grad_log_pob', '(1)'], {}), '(grad_log_pob, 1)\n', (1364, 1381), False, 'from stat_test.linear_time import GaussianSteinTest\n'), ((666, 677), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (674, 677), True, 'import numpy as np\n'), ((1679, 1714), 'stat_test.quadratic_time.GaussianQuadraticTest', 'GaussianQuadraticTest', (['grad_log_pob'], {}), '(grad_log_pob)\n', (1700, 1714), False, 'from stat_test.quadratic_time import GaussianQuadraticTest, QuadraticMultiple\n'), ((1724, 1745), 'stat_test.quadratic_time.QuadraticMultiple', 'QuadraticMultiple', (['me'], {}), '(me)\n', (1741, 1745), False, 'from stat_test.quadratic_time import GaussianQuadraticTest, QuadraticMultiple\n'), ((1614, 1631), 'sgld_test.gradients_of_likelihood.grad_log_prior', 'grad_log_prior', (['t'], {}), '(t)\n', (1628, 1631), False, 'from sgld_test.gradients_of_likelihood import manual_grad, grad_log_prior\n'), ((621, 647), 'sgld_test.gradients_of_likelihood.manual_grad', 'manual_grad', (['t[0]', 't[1]', 'X'], {}), '(t[0], t[1], X)\n', (632, 647), False, 'from sgld_test.gradients_of_likelihood import manual_grad, grad_log_prior\n'), ((1579, 1605), 'sgld_test.gradients_of_likelihood.manual_grad', 'manual_grad', (['t[0]', 't[1]', 'X'], {}), '(t[0], t[1], X)\n', (1590, 1605), False, 'from sgld_test.gradients_of_likelihood import manual_grad, grad_log_prior\n')] |
import numpy as np
from scipy.spatial.transform import Rotation
def no_termination(observation):
return False
def position_close_to_goal(observation):
dist_to_goal = np.linalg.norm(
observation["goal_object_position"]
- observation["object_position"]
)
# return dist_to_goal < 0.01
return dist_to_goal < 0.05
def pos_and_rot_close_to_goal(observation):
rot_error_deg = _orientation_error(observation) * 180
# allowance = 5.0
allowance = 15.0
return position_close_to_goal(observation) and rot_error_deg < allowance
def _orientation_error(observation):
'''copied from reward_fns.py'''
goal_rot = Rotation.from_quat(observation['goal_object_orientation'])
actual_rot = Rotation.from_quat(observation['object_orientation'])
error_rot = goal_rot.inv() * actual_rot
return error_rot.magnitude() / np.pi
class StayCloseToGoal(object):
def __init__(self, success_steps=80, is_level_4=False):
self.counter = 0
self.success_steps = success_steps
self.goal_check = pos_and_rot_close_to_goal if is_level_4 else position_close_to_goal
def __call__(self, observation):
if self.goal_check(observation):
self.counter += 1
if self.counter >= self.success_steps:
self.counter = 0
return True
else:
self.counter = 0
return False
stay_close_to_goal = StayCloseToGoal(is_level_4=False)
stay_close_to_goal_level_4 = StayCloseToGoal(is_level_4=True)
| [
"scipy.spatial.transform.Rotation.from_quat",
"numpy.linalg.norm"
] | [((178, 267), 'numpy.linalg.norm', 'np.linalg.norm', (["(observation['goal_object_position'] - observation['object_position'])"], {}), "(observation['goal_object_position'] - observation[\n 'object_position'])\n", (192, 267), True, 'import numpy as np\n'), ((663, 721), 'scipy.spatial.transform.Rotation.from_quat', 'Rotation.from_quat', (["observation['goal_object_orientation']"], {}), "(observation['goal_object_orientation'])\n", (681, 721), False, 'from scipy.spatial.transform import Rotation\n'), ((739, 792), 'scipy.spatial.transform.Rotation.from_quat', 'Rotation.from_quat', (["observation['object_orientation']"], {}), "(observation['object_orientation'])\n", (757, 792), False, 'from scipy.spatial.transform import Rotation\n')] |
#!/usr/bin/python3
'''Advent of Code 2018 Day 17 solution'''
# pylint: disable=too-many-arguments
import re
import sys
from typing import TextIO, Tuple, List, Set
import numpy
sys.setrecursionlimit(10000)
Input = Tuple[Tuple[int, int, int, int], List[List[int]]]
def printmap(inputs: Input, visited: Set[Tuple[int, int]], water: Set[Tuple[int, int]]) -> None:
'''Display map for debug purposes'''
for y in range(inputs[0][0], inputs[0][1]+1):
for x in range(inputs[0][2], inputs[0][2]+1):
if (y, x) in water:
print('~', end='')
elif (y, x) in visited:
print('|', end='')
elif inputs[1][y][x]:
print('#', end='')
else:
print('.', end='')
print('')
def readinputdata(f: TextIO) -> Input:
'''Read input data from file handle'''
m = re.compile(r'([xy])=(\d+), [xy]=(\d+)\.\.(\d+)')
# Find how big a grid we need
maxx = maxy = 0
minx = miny = 9999
for line in f:
r = m.match(line)
if r:
if r.group(1) == 'x':
x1 = x2 = int(r.group(2))
y1 = int(r.group(3))
y2 = int(r.group(4))
else:
x1 = int(r.group(3))
x2 = int(r.group(4))
y1 = y2 = int(r.group(2))
minx = min(x1, minx)
maxx = max(x2, maxx)
miny = min(y1, miny)
maxy = max(y2, maxy)
# Now initalise the grid and read the input
grid = numpy.full((maxy+1, maxx+1), False, dtype=bool)
f.seek(0, 0)
for line in f:
r = m.match(line)
if r:
if r.group(1) == 'x':
x = int(r.group(2))
for y in range(int(r.group(3)), int(r.group(4))+1):
grid[y][x] = True
else:
y = int(r.group(2))
for x in range(int(r.group(3)), int(r.group(4))+1):
grid[y][x] = True
return ((miny, maxy, minx, maxx), grid)
def flow(y: int, x: int, inputs: Input, visited: Set[Tuple[int, int]],
water: Set[Tuple[int, int]]) -> bool:
'''Follow water flow'''
if y > inputs[0][1] or (y, x) in visited and (y, x) not in water:
# Free flow from here to the end of the world
return False
if inputs[1][y][x] or (y, x) in water:
# This square is blocked, you cannot go here.
return True
visited.add((y, x))
# Flow down if we can
if flow(y+1, x, inputs, visited, water):
# Can't flow down, blocked. Go left/right instead.
left = flowleft(y, x-1, inputs, visited, water)
right = flowright(y, x+1, inputs, visited, water)
if left and right:
flowleft(y, x-1, inputs, visited, water, blocked=True)
flowright(y, x+1, inputs, visited, water, blocked=True)
water.add((y, x))
return True
return False
return False
def flowleft(y: int, x: int, inputs: Input, visited: Set[Tuple[int, int]],
water: Set[Tuple[int, int]], blocked: bool = False) -> bool:
'''Follow water flow left, when we've hit bottom'''
if y > inputs[0][1] and (y, x) not in water:
# Free flow from here to the end of the world
return False
if inputs[1][y][x] or (y, x) in water:
# This square is blocked, you cannot go here.
return True
visited.add((y, x))
if blocked:
water.add((y, x))
# Flow down if we can
if flow(y+1, x, inputs, visited, water):
# Can't flow down, blocked. Go left/right instead.
return flowleft(y, x-1, inputs, visited, water, blocked=blocked)
return False
def flowright(y: int, x: int, inputs: Input, visited: Set[Tuple[int, int]],
water: Set[Tuple[int, int]], blocked: bool = False) -> bool:
'''Follow water flor right, when we've hit bottom'''
if y > inputs[0][1] and (y, x) not in water:
# Free flow from here to the end of the world
return False
if inputs[1][y][x] or (y, x) in water:
# This square is blocked, you cannot go here.
return True
visited.add((y, x))
if blocked:
water.add((y, x))
# Flow down if we can
if flow(y+1, x, inputs, visited, water):
# Can't flow down, blocked. Go left/right instead.
return flowright(y, x+1, inputs, visited, water, blocked=blocked)
return False
def runsolution(inputs: Input) -> Tuple[int, int]:
'''Solve problem'''
visited: Set[Tuple[int, int]] = set()
water: Set[Tuple[int, int]] = set()
flow(0, 500, inputs, visited, water)
return (len([v for v in visited if v[0] >= inputs[0][0]]), len(water))
def run() -> Tuple[int, int]:
'''Main'''
with open('inputs/day17.txt', 'r') as f:
inputs = readinputdata(f)
return runsolution(inputs)
if __name__ == '__main__':
print(run())
| [
"numpy.full",
"sys.setrecursionlimit",
"re.compile"
] | [((179, 207), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (200, 207), False, 'import sys\n'), ((879, 931), 're.compile', 're.compile', (['"""([xy])=(\\\\d+), [xy]=(\\\\d+)\\\\.\\\\.(\\\\d+)"""'], {}), "('([xy])=(\\\\d+), [xy]=(\\\\d+)\\\\.\\\\.(\\\\d+)')\n", (889, 931), False, 'import re\n'), ((1542, 1593), 'numpy.full', 'numpy.full', (['(maxy + 1, maxx + 1)', '(False)'], {'dtype': 'bool'}), '((maxy + 1, maxx + 1), False, dtype=bool)\n', (1552, 1593), False, 'import numpy\n')] |
import numpy as np
class PolicyIteration:
def __init__(self, grid, policy, actions, small_change: float, gamma: float) -> None:
self.grid = grid
self.policy = policy
self.actions = actions
self.rows, self.cols = self.grid.grid_board.shape
self.value_function = np.zeros((self.rows,self.cols))
self.SMALL_CHANGE = small_change
self.gamma = gamma
self.actions.set_actions()
self.initial_policy = self.policy.get_random_policy(self.rows, self.cols)
def value_evaluation(self, states):
max_expected_value_change = 0
for state in states:
old_value_function = self.value_function[state]
action = self.initial_policy[state]
if action in self.actions.grid_actions[state]:
# Not taking sum because there will be only one action
# multiplying by 1 because there will be only one action and probability of that action will be 1
# That means if we try to go up, agent would end up going up, it is deterministic
self.value_function[state] = (1 * (self.actions.get_action_reward(state,action)) + (self.gamma * self.value_function[self.actions.get_next_state_number(state,action)]))
max_expected_value_change = max(max_expected_value_change, np.abs(self.value_function[state] - old_value_function))
return max_expected_value_change
def get_best_action(self,state, actions):
best_action_value = float('-inf')
best_action = None
for action in actions:
probability = 1
reward = self.actions.get_action_reward(state,action)
next_state_value = self.value_function[self.actions.get_next_state_number(state,action)]
value = (probability * reward) + (self.gamma * next_state_value )
if value > best_action_value:
best_action_value = value
best_action = action
return best_action
def run_policy_iteration(self):
print("Random Policy")
self.grid.print_policy(self.initial_policy)
# Policy evaluation for deterministic policy
# Probability for any action will be 1 as there is only one action
states = list(range(self.rows * self.cols))
# we stop if we have 10 iterations under which the policy doesn't improve
count = 0
self.value_function = np.zeros((self.rows * self.cols))
# we continute doing policy iteration and policy improvement until policy stops improving
while True:
# policy evaluation phase
# In our random policy we have one action for each state.
# For that action we do policy evaluation and we get value function of state which we store
# we do this for all the states
while True:
# holds the maximum change
max_expected_value_change = self.value_evaluation(states)
if max_expected_value_change < self.SMALL_CHANGE:
break
# policy improvement phase
# In policy improvement, we try to find other actions of each state that can have better value.
# If we find any action for state that has better value than the current action, we update our policy to that action
for state in states:
old_policy = self.initial_policy.copy()
actions = self.actions.grid_actions[state]
self.initial_policy[state] = self.get_best_action(state, actions)
# The number of time the policy remains same. It it is 10 we assume that it has converged
if np.all(old_policy == self.initial_policy):
count += 1
if count == 10:
break
self.grid.print_values(self.value_function)
self.grid.print_policy(self.initial_policy)
| [
"numpy.abs",
"numpy.zeros",
"numpy.all"
] | [((306, 338), 'numpy.zeros', 'np.zeros', (['(self.rows, self.cols)'], {}), '((self.rows, self.cols))\n', (314, 338), True, 'import numpy as np\n'), ((2449, 2480), 'numpy.zeros', 'np.zeros', (['(self.rows * self.cols)'], {}), '(self.rows * self.cols)\n', (2457, 2480), True, 'import numpy as np\n'), ((3716, 3757), 'numpy.all', 'np.all', (['(old_policy == self.initial_policy)'], {}), '(old_policy == self.initial_policy)\n', (3722, 3757), True, 'import numpy as np\n'), ((1346, 1401), 'numpy.abs', 'np.abs', (['(self.value_function[state] - old_value_function)'], {}), '(self.value_function[state] - old_value_function)\n', (1352, 1401), True, 'import numpy as np\n')] |
from copy import deepcopy
from typing import Any, Dict, List
import numpy as np
import torch
import torch.optim as opt
from genrl.agents import OffPolicyAgent
from genrl.utils import get_env_properties, get_model, safe_mean
class DQN(OffPolicyAgent):
"""Base DQN Class
Paper: https://arxiv.org/abs/1312.5602
Attributes:
network (str): The network type of the Q-value function.
Supported types: ["cnn", "mlp"]
env (Environment): The environment that the agent is supposed to act on
create_model (bool): Whether the model of the algo should be created when initialised
batch_size (int): Mini batch size for loading experiences
gamma (float): The discount factor for rewards
value_layers (:obj:`tuple` of :obj:`int`): Layers in the Neural Network
of the Q-value function
lr_value (float): Learning rate for the Q-value function
replay_size (int): Capacity of the Replay Buffer
buffer_type (str): Choose the type of Buffer: ["push", "prioritized"]
max_epsilon (str): Maximum epsilon for exploration
min_epsilon (str): Minimum epsilon for exploration
epsilon_decay (str): Rate of decay of epsilon (in order to decrease
exploration with time)
seed (int): Seed for randomness
render (bool): Should the env be rendered during training?
device (str): Hardware being used for training. Options:
["cuda" -> GPU, "cpu" -> CPU]
"""
def __init__(
self,
*args,
max_epsilon: float = 1.0,
min_epsilon: float = 0.01,
epsilon_decay: int = 1000,
**kwargs
):
super(DQN, self).__init__(*args, **kwargs)
self.max_epsilon = max_epsilon
self.min_epsilon = min_epsilon
self.epsilon_decay = epsilon_decay
self.dqn_type = ""
self.noisy = False
self.empty_logs()
if self.create_model:
self._create_model()
def _create_model(self, *args, **kwargs) -> None:
"""Function to initialize Q-value model
This will create the Q-value function of the agent.
"""
state_dim, action_dim, discrete, _ = get_env_properties(self.env, self.network)
if not discrete:
raise Exception("Only Discrete Environments are supported for DQN")
if isinstance(self.network, str):
self.model = get_model("v", self.network + self.dqn_type)(
state_dim, action_dim, "Qs", self.value_layers, **kwargs
)
else:
self.model = self.network
self.target_model = deepcopy(self.model)
self.optimizer = opt.Adam(self.model.parameters(), lr=self.lr_value)
def update_target_model(self) -> None:
"""Function to update the target Q model
Updates the target model with the training model's weights when called
"""
self.target_model.load_state_dict(self.model.state_dict())
def update_params_before_select_action(self, timestep: int) -> None:
"""Update necessary parameters before selecting an action
This updates the epsilon (exploration rate) of the agent every timestep
Args:
timestep (int): Timestep of training
"""
self.timestep = timestep
self.epsilon = self.calculate_epsilon_by_frame()
self.logs["epsilon"].append(self.epsilon)
def get_greedy_action(self, state: torch.Tensor) -> np.ndarray:
"""Greedy action selection
Args:
state (:obj:`np.ndarray`): Current state of the environment
Returns:
action (:obj:`np.ndarray`): Action taken by the agent
"""
q_values = self.model(state.unsqueeze(0)).detach().numpy()
action = np.argmax(q_values, axis=-1).squeeze(0)
return action
def select_action(
self, state: np.ndarray, deterministic: bool = False
) -> np.ndarray:
"""Select action given state
Epsilon-greedy action-selection
Args:
state (:obj:`np.ndarray`): Current state of the environment
deterministic (bool): Should the policy be deterministic or stochastic
Returns:
action (:obj:`np.ndarray`): Action taken by the agent
"""
state = torch.as_tensor(state).float()
action = self.get_greedy_action(state)
if not deterministic:
if np.random.rand() < self.epsilon:
action = np.asarray(self.env.sample())
return action
def _reshape_batch(self, batch: List):
"""Function to reshape experiences for DQN
Most of the DQN experiences need to be reshaped before sending to the
Neural Networks
"""
states = batch[0]
actions = batch[1].unsqueeze(-1).long()
rewards = batch[2]
next_states = batch[3]
dones = batch[4]
return states, actions, rewards, next_states, dones
def get_q_values(self, states: torch.Tensor, actions: torch.Tensor) -> torch.Tensor:
"""Get Q values corresponding to specific states and actions
Args:
states (:obj:`torch.Tensor`): States for which Q-values need to be found
actions (:obj:`torch.Tensor`): Actions taken at respective states
Returns:
q_values (:obj:`torch.Tensor`): Q values for the given states and actions
"""
q_values = self.model(states)
q_values = q_values.gather(2, actions)
return q_values
def get_target_q_values(
self, next_states: torch.Tensor, rewards: List[float], dones: List[bool]
) -> torch.Tensor:
"""Get target Q values for the DQN
Args:
next_states (:obj:`torch.Tensor`): Next states for which target Q-values
need to be found
rewards (:obj:`list`): Rewards at each timestep for each environment
dones (:obj:`list`): Game over status for each environment
Returns:
target_q_values (:obj:`torch.Tensor`): Target Q values for the DQN
"""
# Next Q-values according to target model
next_q_target_values = self.target_model(next_states)
# Maximum of next q_target values
max_next_q_target_values = next_q_target_values.max(2)[0]
target_q_values = rewards + self.gamma * torch.mul( # Expected Target Q values
max_next_q_target_values, (1 - dones)
)
# Needs to be unsqueezed to match dimension of q_values
return target_q_values.unsqueeze(-1)
def update_params(self, update_interval: int) -> None:
"""Update parameters of the model
Args:
update_interval (int): Interval between successive updates of the target model
"""
self.update_target_model()
for timestep in range(update_interval):
batch = self.sample_from_buffer()
loss = self.get_q_loss(batch)
self.logs["value_loss"].append(loss.item())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# In case the model uses Noisy layers, we must reset the noise every timestep
if self.noisy:
self.model.reset_noise()
self.target_model.reset_noise()
def calculate_epsilon_by_frame(self) -> float:
"""Helper function to calculate epsilon after every timestep
Exponentially decays exploration rate from max epsilon to min epsilon
The greater the value of epsilon_decay, the slower the decrease in epsilon
"""
return self.min_epsilon + (self.max_epsilon - self.min_epsilon) * np.exp(
-1.0 * self.timestep / self.epsilon_decay
)
def get_hyperparams(self) -> Dict[str, Any]:
"""Get relevant hyperparameters to save
Returns:
hyperparams (:obj:`dict`): Hyperparameters to be saved
"""
hyperparams = {
"gamma": self.gamma,
"batch_size": self.batch_size,
"lr": self.lr_value,
"replay_size": self.replay_size,
"weights": self.model.state_dict(),
"timestep": self.timestep,
}
return hyperparams
def load_weights(self, weights) -> None:
"""Load weights for the agent from pretrained model
Args:
weights (:obj:`Dict`): Dictionary of different neural net weights
"""
self.model.load_state_dict(weights["weights"])
def get_logging_params(self) -> Dict[str, Any]:
"""Gets relevant parameters for logging
Returns:
logs (:obj:`dict`): Logging parameters for monitoring training
"""
logs = {
"value_loss": safe_mean(self.logs["value_loss"]),
"epsilon": safe_mean(self.logs["epsilon"]),
}
self.empty_logs()
return logs
def empty_logs(self) -> None:
"""Empties logs
"""
self.logs = {}
self.logs["value_loss"] = []
self.logs["epsilon"] = []
| [
"copy.deepcopy",
"numpy.argmax",
"numpy.random.rand",
"torch.mul",
"genrl.utils.safe_mean",
"numpy.exp",
"genrl.utils.get_model",
"torch.as_tensor",
"genrl.utils.get_env_properties"
] | [((2222, 2264), 'genrl.utils.get_env_properties', 'get_env_properties', (['self.env', 'self.network'], {}), '(self.env, self.network)\n', (2240, 2264), False, 'from genrl.utils import get_env_properties, get_model, safe_mean\n'), ((2652, 2672), 'copy.deepcopy', 'deepcopy', (['self.model'], {}), '(self.model)\n', (2660, 2672), False, 'from copy import deepcopy\n'), ((8820, 8854), 'genrl.utils.safe_mean', 'safe_mean', (["self.logs['value_loss']"], {}), "(self.logs['value_loss'])\n", (8829, 8854), False, 'from genrl.utils import get_env_properties, get_model, safe_mean\n'), ((8879, 8910), 'genrl.utils.safe_mean', 'safe_mean', (["self.logs['epsilon']"], {}), "(self.logs['epsilon'])\n", (8888, 8910), False, 'from genrl.utils import get_env_properties, get_model, safe_mean\n'), ((2438, 2482), 'genrl.utils.get_model', 'get_model', (['"""v"""', '(self.network + self.dqn_type)'], {}), "('v', self.network + self.dqn_type)\n", (2447, 2482), False, 'from genrl.utils import get_env_properties, get_model, safe_mean\n'), ((3811, 3839), 'numpy.argmax', 'np.argmax', (['q_values'], {'axis': '(-1)'}), '(q_values, axis=-1)\n', (3820, 3839), True, 'import numpy as np\n'), ((4339, 4361), 'torch.as_tensor', 'torch.as_tensor', (['state'], {}), '(state)\n', (4354, 4361), False, 'import torch\n'), ((4462, 4478), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4476, 4478), True, 'import numpy as np\n'), ((6402, 6448), 'torch.mul', 'torch.mul', (['max_next_q_target_values', '(1 - dones)'], {}), '(max_next_q_target_values, 1 - dones)\n', (6411, 6448), False, 'import torch\n'), ((7736, 7785), 'numpy.exp', 'np.exp', (['(-1.0 * self.timestep / self.epsilon_decay)'], {}), '(-1.0 * self.timestep / self.epsilon_decay)\n', (7742, 7785), True, 'import numpy as np\n')] |
import os
import pickle
import shlex
import tempfile
import time
from threading import Thread, Event
from typing import Optional, Dict
import numpy as np
import pytest
from numpy.random import RandomState
from rlai.agents.mdp import StochasticMdpAgent
from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor
from rlai.environments.mdp import TrajectorySamplingMdpPlanningEnvironment
from rlai.gpi.temporal_difference.evaluation import Mode
from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi
from rlai.gpi.utils import update_policy_iteration_plot, plot_policy_iteration
from rlai.planning.environment_models import StochasticEnvironmentModel
from rlai.q_S_A.function_approximation.estimators import ApproximateStateActionValueEstimator
from rlai.q_S_A.function_approximation.models.feature_extraction import (
StateActionIdentityFeatureExtractor
)
from rlai.q_S_A.function_approximation.models.sklearn import SKLearnSGD
from rlai.q_S_A.tabular import TabularStateActionValueEstimator
from rlai.runners.trainer import run
from rlai.utils import RunThreadManager
from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq
def test_sarsa_iterate_value_q_pi():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.05, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=10,
num_episodes_per_improvement=100,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.SARSA,
n_steps=1,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_td_iteration_of_value_q_pi.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_td_iteration_of_value_q_pi.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert tabular_pi_legacy_eq(mdp_agent.pi, pi_fixture) and tabular_estimator_legacy_eq(q_S_A, q_S_A_fixture)
def test_sarsa_iterate_value_q_pi_make_greedy():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.05, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=10,
num_episodes_per_improvement=100,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.SARSA,
n_steps=1,
planning_environment=None,
make_final_policy_greedy=True,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_td_iteration_of_value_q_pi_make_greedy.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_td_iteration_of_value_q_pi_make_greedy.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert tabular_pi_legacy_eq(mdp_agent.pi, pi_fixture) and tabular_estimator_legacy_eq(q_S_A, q_S_A_fixture)
def test_sarsa_iterate_value_q_pi_with_trajectory_planning():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.05, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
planning_environment = TrajectorySamplingMdpPlanningEnvironment(
'test planning',
random_state,
StochasticEnvironmentModel(),
10,
None
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=100,
num_episodes_per_improvement=1,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.SARSA,
n_steps=1,
planning_environment=planning_environment,
make_final_policy_greedy=True,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_td_iteration_of_value_q_pi_planning.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_td_iteration_of_value_q_pi_planning.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert tabular_pi_legacy_eq(mdp_agent.pi, pi_fixture) and tabular_estimator_legacy_eq(q_S_A, q_S_A_fixture)
def test_q_learning_iterate_value_q_pi():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.05, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=10,
num_episodes_per_improvement=100,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.Q_LEARNING,
n_steps=1,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_td_q_learning_iteration_of_value_q_pi.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_td_q_learning_iteration_of_value_q_pi.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert tabular_pi_legacy_eq(mdp_agent.pi, pi_fixture) and tabular_estimator_legacy_eq(q_S_A, q_S_A_fixture)
def test_q_learning_iterate_value_q_pi_function_approximation_with_formula():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, 20)
q_S_A = ApproximateStateActionValueEstimator(
mdp_environment,
0.05,
SKLearnSGD(random_state=random_state, scale_eta0_for_y=False),
StateActionIdentityFeatureExtractor(mdp_environment),
f'C(s, levels={[s.i for s in mdp_environment.SS]}):C(a, levels={[a.i for a in mdp_environment.SS[0].AA]})',
False,
None,
None
)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=5,
num_episodes_per_improvement=5,
num_updates_per_improvement=None,
alpha=None,
mode=Mode.Q_LEARNING,
n_steps=None,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_q_learning_iterate_value_q_pi_function_approximation.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_q_learning_iterate_value_q_pi_function_approximation.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert np.allclose(mdp_agent.pi.estimator.model.model.coef_, pi_fixture.estimator.model.model.coef_)
def test_q_learning_iterate_value_q_pi_function_approximation_no_formula():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, 20)
q_S_A = ApproximateStateActionValueEstimator(
mdp_environment,
0.05,
SKLearnSGD(random_state=random_state, scale_eta0_for_y=False),
GridworldFeatureExtractor(mdp_environment),
None,
False,
None,
None
)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=10,
num_episodes_per_improvement=20,
num_updates_per_improvement=None,
alpha=None,
mode=Mode.Q_LEARNING,
n_steps=None,
planning_environment=None,
make_final_policy_greedy=True,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_q_learning_iterate_value_q_pi_function_approximation_no_formula.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_q_learning_iterate_value_q_pi_function_approximation_no_formula.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert np.allclose(mdp_agent.pi.estimator.model.model.coef_, pi_fixture.estimator.model.model.coef_)
assert mdp_agent.pi.format_state_action_probs(mdp_environment.SS) == pi_fixture.format_state_action_probs(mdp_environment.SS)
assert mdp_agent.pi.format_state_action_values(mdp_environment.SS) == pi_fixture.format_state_action_values(mdp_environment.SS)
def test_q_learning_iterate_value_q_pi_function_approximation_invalid_formula():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, 20)
q_S_A = ApproximateStateActionValueEstimator(
mdp_environment,
0.05,
SKLearnSGD(random_state=random_state, scale_eta0_for_y=False),
GridworldFeatureExtractor(mdp_environment),
f'C(s, levels={[s.i for s in mdp_environment.SS]}):C(a, levels={[a.i for a in mdp_environment.SS[0].AA]})',
False,
None,
None
)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
with pytest.raises(ValueError, match='Invalid combination of formula'):
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=5,
num_episodes_per_improvement=5,
num_updates_per_improvement=None,
alpha=None,
mode=Mode.Q_LEARNING,
n_steps=None,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A
)
def test_expected_sarsa_iterate_value_q_pi():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.05, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=10,
num_episodes_per_improvement=100,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.EXPECTED_SARSA,
n_steps=1,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_td_expected_sarsa_iteration_of_value_q_pi.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_td_expected_sarsa_iteration_of_value_q_pi.pickle', 'rb') as file:
pi_fixture, q_S_A_fixture = pickle.load(file)
assert tabular_pi_legacy_eq(mdp_agent.pi, pi_fixture) and tabular_estimator_legacy_eq(q_S_A, q_S_A_fixture)
def test_n_step_q_learning_iterate_value_q_pi():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.05, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=10,
num_episodes_per_improvement=100,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.Q_LEARNING,
n_steps=3,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A
)
# uncomment the following line and run test to update fixture
# with open(f'{os.path.dirname(__file__)}/fixtures/test_td_n_step_q_learning_iteration_of_value_q_pi.pickle', 'wb') as file:
# pickle.dump((mdp_agent.pi, q_S_A), file)
with open(f'{os.path.dirname(__file__)}/fixtures/test_td_n_step_q_learning_iteration_of_value_q_pi.pickle', 'rb') as file:
fixture_pi, fixture_q_S_A = pickle.load(file)
assert tabular_pi_legacy_eq(mdp_agent.pi, fixture_pi) and tabular_estimator_legacy_eq(q_S_A, fixture_q_S_A)
def test_invalid_epsilon_iterate_value_q_pi():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.0, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
with pytest.raises(ValueError, match='epsilon must be strictly > 0 for TD-learning'):
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=10,
num_episodes_per_improvement=100,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.Q_LEARNING,
n_steps=3,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A
)
def test_iterate_value_q_pi_with_pdf():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.05, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=10,
num_episodes_per_improvement=100,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.Q_LEARNING,
n_steps=1,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A,
num_improvements_per_plot=5,
pdf_save_path=tempfile.NamedTemporaryFile(delete=False).name
)
def test_iterate_value_q_pi_multi_threaded():
thread_manager = RunThreadManager(True)
def train_thread_target():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, None)
q_S_A = TabularStateActionValueEstimator(mdp_environment, 0.1, None)
mdp_agent = StochasticMdpAgent(
'test',
random_state,
q_S_A.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent,
environment=mdp_environment,
num_improvements=1000000,
num_episodes_per_improvement=10,
num_updates_per_improvement=None,
alpha=0.1,
mode=Mode.SARSA,
n_steps=None,
planning_environment=None,
make_final_policy_greedy=False,
q_S_A=q_S_A,
thread_manager=thread_manager,
num_improvements_per_plot=10
)
# premature update should have no effect
assert update_policy_iteration_plot() is None
# initialize plot from main thread
plot_policy_iteration(
iteration_average_reward=[],
iteration_total_states=[],
iteration_num_states_improved=[],
elapsed_seconds_average_rewards={},
pdf=None
)
# run training thread
run_thread = Thread(target=train_thread_target)
run_thread.start()
time.sleep(1)
# update plot asynchronously
update_policy_iteration_plot()
time.sleep(1)
# should be allowed to update plot from non-main thread
def bad_update():
with pytest.raises(ValueError, match='Can only update plot on main thread.'):
update_policy_iteration_plot()
bad_thread = Thread(target=bad_update)
bad_thread.start()
bad_thread.join()
thread_manager.abort = True
run_thread.join()
def test_iterate_value_q_pi_func_approx_multi_threaded():
thread_manager = RunThreadManager(True)
train_args_wait_event = Event()
q_S_A: Optional[ApproximateStateActionValueEstimator] = None
def train_args_callback(
train_args: Dict
):
nonlocal q_S_A
q_S_A = train_args['q_S_A']
train_args_wait_event.set()
cmd = '--random-seed 12345 --agent rlai.agents.mdp.StochasticMdpAgent --gamma 1 --environment rlai.environments.gridworld.Gridworld --id example_4_1 --T 25 --train-function rlai.gpi.temporal_difference.iteration.iterate_value_q_pi --mode SARSA --num-improvements 10 --num-episodes-per-improvement 10 --epsilon 0.05 --q-S-A rlai.q_S_A.function_approximation.estimators.ApproximateStateActionValueEstimator --function-approximation-model rlai.q_S_A.function_approximation.models.sklearn.SKLearnSGD --plot-model --feature-extractor rlai.environments.gridworld.GridworldFeatureExtractor --make-final-policy-greedy True'
args = shlex.split(cmd)
def train_thread_target():
run(
args=args,
thread_manager=thread_manager,
train_function_args_callback=train_args_callback
)
train_thread = Thread(target=train_thread_target)
train_thread.start()
train_args_wait_event.wait()
# premature update should do nothing
assert q_S_A.update_plot(-1) is None
time.sleep(1)
assert q_S_A.plot(True, None) is not None
# should be allowed to update plot from non-main thread
def bad_update():
with pytest.raises(ValueError, match='Can only update plot on main thread.'):
q_S_A.update_plot(-1)
bad_thread = Thread(target=bad_update)
bad_thread.start()
bad_thread.join()
q_S_A.update_plot(-1)
def test_q_learning_iterate_value_q_pi_function_approximation_policy_ne():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, 20)
epsilon = 0.05
q_S_A_1 = ApproximateStateActionValueEstimator(
mdp_environment,
epsilon,
SKLearnSGD(random_state=random_state, scale_eta0_for_y=False),
GridworldFeatureExtractor(mdp_environment),
None,
False,
None,
None
)
mdp_agent_1 = StochasticMdpAgent(
'test',
random_state,
q_S_A_1.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent_1,
environment=mdp_environment,
num_improvements=5,
num_episodes_per_improvement=10,
num_updates_per_improvement=None,
alpha=None,
mode=Mode.Q_LEARNING,
n_steps=None,
planning_environment=None,
make_final_policy_greedy=True,
q_S_A=q_S_A_1
)
q_S_A_2 = ApproximateStateActionValueEstimator(
mdp_environment,
epsilon,
SKLearnSGD(random_state=random_state, scale_eta0_for_y=False),
GridworldFeatureExtractor(mdp_environment),
None,
False,
None,
None
)
mdp_agent_2 = StochasticMdpAgent(
'test',
random_state,
q_S_A_2.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent_2,
environment=mdp_environment,
num_improvements=5,
num_episodes_per_improvement=5,
num_updates_per_improvement=None,
alpha=None,
mode=Mode.Q_LEARNING,
n_steps=None,
planning_environment=None,
make_final_policy_greedy=True,
q_S_A=q_S_A_2
)
assert mdp_agent_1.pi.estimator != mdp_agent_2.pi.estimator
assert mdp_agent_1.pi.estimator.model != mdp_agent_2.pi.estimator.model
def test_q_learning_iterate_value_q_pi_tabular_policy_ne():
random_state = RandomState(12345)
mdp_environment: Gridworld = Gridworld.example_4_1(random_state, 20)
epsilon = 0.05
q_S_A_1 = TabularStateActionValueEstimator(
mdp_environment,
epsilon,
None
)
mdp_agent_1 = StochasticMdpAgent(
'test',
random_state,
q_S_A_1.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent_1,
environment=mdp_environment,
num_improvements=5,
num_episodes_per_improvement=10,
num_updates_per_improvement=None,
alpha=None,
mode=Mode.Q_LEARNING,
n_steps=None,
planning_environment=None,
make_final_policy_greedy=True,
q_S_A=q_S_A_1
)
q_S_A_2 = TabularStateActionValueEstimator(
mdp_environment,
epsilon,
None
)
mdp_agent_2 = StochasticMdpAgent(
'test',
random_state,
q_S_A_2.get_initial_policy(),
1
)
iterate_value_q_pi(
agent=mdp_agent_2,
environment=mdp_environment,
num_improvements=5,
num_episodes_per_improvement=5,
num_updates_per_improvement=None,
alpha=None,
mode=Mode.Q_LEARNING,
n_steps=None,
planning_environment=None,
make_final_policy_greedy=True,
q_S_A=q_S_A_2
)
test_state = mdp_environment.SS[5]
test_action = test_state.AA[0]
assert q_S_A_1 != q_S_A_2
assert q_S_A_1[test_state] != q_S_A_2[test_state]
assert q_S_A_1[test_state][test_action] != q_S_A_2[test_state][test_action]
| [
"numpy.allclose",
"rlai.q_S_A.tabular.TabularStateActionValueEstimator",
"pickle.load",
"rlai.gpi.utils.update_policy_iteration_plot",
"rlai.gpi.temporal_difference.iteration.iterate_value_q_pi",
"os.path.dirname",
"shlex.split",
"numpy.random.RandomState",
"rlai.q_S_A.function_approximation.models.... | [((1244, 1262), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (1255, 1262), False, 'from numpy.random import RandomState\n'), ((1297, 1338), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (1318, 1338), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((1352, 1413), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.05)', 'None'], {}), '(mdp_environment, 0.05, None)\n', (1384, 1413), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((1546, 1822), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(10)', 'num_episodes_per_improvement': '(100)', 'num_updates_per_improvement': 'None', 'alpha': '(0.1)', 'mode': 'Mode.SARSA', 'n_steps': '(1)', 'planning_environment': 'None', 'make_final_policy_greedy': '(False)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=10, num_episodes_per_improvement=100,\n num_updates_per_improvement=None, alpha=0.1, mode=Mode.SARSA, n_steps=1,\n planning_environment=None, make_final_policy_greedy=False, q_S_A=q_S_A)\n', (1564, 1822), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((2482, 2500), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (2493, 2500), False, 'from numpy.random import RandomState\n'), ((2535, 2576), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (2556, 2576), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((2590, 2651), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.05)', 'None'], {}), '(mdp_environment, 0.05, None)\n', (2622, 2651), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((2784, 3059), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(10)', 'num_episodes_per_improvement': '(100)', 'num_updates_per_improvement': 'None', 'alpha': '(0.1)', 'mode': 'Mode.SARSA', 'n_steps': '(1)', 'planning_environment': 'None', 'make_final_policy_greedy': '(True)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=10, num_episodes_per_improvement=100,\n num_updates_per_improvement=None, alpha=0.1, mode=Mode.SARSA, n_steps=1,\n planning_environment=None, make_final_policy_greedy=True, q_S_A=q_S_A)\n', (2802, 3059), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((3756, 3774), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (3767, 3774), False, 'from numpy.random import RandomState\n'), ((3809, 3850), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (3830, 3850), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((3864, 3925), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.05)', 'None'], {}), '(mdp_environment, 0.05, None)\n', (3896, 3925), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((4244, 4539), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(100)', 'num_episodes_per_improvement': '(1)', 'num_updates_per_improvement': 'None', 'alpha': '(0.1)', 'mode': 'Mode.SARSA', 'n_steps': '(1)', 'planning_environment': 'planning_environment', 'make_final_policy_greedy': '(True)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=100, num_episodes_per_improvement=1,\n num_updates_per_improvement=None, alpha=0.1, mode=Mode.SARSA, n_steps=1,\n planning_environment=planning_environment, make_final_policy_greedy=\n True, q_S_A=q_S_A)\n', (4262, 4539), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((5205, 5223), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (5216, 5223), False, 'from numpy.random import RandomState\n'), ((5258, 5299), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (5279, 5299), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((5313, 5374), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.05)', 'None'], {}), '(mdp_environment, 0.05, None)\n', (5345, 5374), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((5507, 5792), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(10)', 'num_episodes_per_improvement': '(100)', 'num_updates_per_improvement': 'None', 'alpha': '(0.1)', 'mode': 'Mode.Q_LEARNING', 'n_steps': '(1)', 'planning_environment': 'None', 'make_final_policy_greedy': '(False)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=10, num_episodes_per_improvement=100,\n num_updates_per_improvement=None, alpha=0.1, mode=Mode.Q_LEARNING,\n n_steps=1, planning_environment=None, make_final_policy_greedy=False,\n q_S_A=q_S_A)\n', (5525, 5792), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((6499, 6517), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (6510, 6517), False, 'from numpy.random import RandomState\n'), ((6552, 6591), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', '(20)'], {}), '(random_state, 20)\n', (6573, 6591), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((7111, 7397), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(5)', 'num_episodes_per_improvement': '(5)', 'num_updates_per_improvement': 'None', 'alpha': 'None', 'mode': 'Mode.Q_LEARNING', 'n_steps': 'None', 'planning_environment': 'None', 'make_final_policy_greedy': '(False)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=5, num_episodes_per_improvement=5,\n num_updates_per_improvement=None, alpha=None, mode=Mode.Q_LEARNING,\n n_steps=None, planning_environment=None, make_final_policy_greedy=False,\n q_S_A=q_S_A)\n', (7129, 7397), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((7933, 8031), 'numpy.allclose', 'np.allclose', (['mdp_agent.pi.estimator.model.model.coef_', 'pi_fixture.estimator.model.model.coef_'], {}), '(mdp_agent.pi.estimator.model.model.coef_, pi_fixture.estimator.\n model.model.coef_)\n', (7944, 8031), True, 'import numpy as np\n'), ((8125, 8143), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (8136, 8143), False, 'from numpy.random import RandomState\n'), ((8178, 8217), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', '(20)'], {}), '(random_state, 20)\n', (8199, 8217), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((8625, 8912), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(10)', 'num_episodes_per_improvement': '(20)', 'num_updates_per_improvement': 'None', 'alpha': 'None', 'mode': 'Mode.Q_LEARNING', 'n_steps': 'None', 'planning_environment': 'None', 'make_final_policy_greedy': '(True)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=10, num_episodes_per_improvement=20,\n num_updates_per_improvement=None, alpha=None, mode=Mode.Q_LEARNING,\n n_steps=None, planning_environment=None, make_final_policy_greedy=True,\n q_S_A=q_S_A)\n', (8643, 8912), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((9470, 9568), 'numpy.allclose', 'np.allclose', (['mdp_agent.pi.estimator.model.model.coef_', 'pi_fixture.estimator.model.model.coef_'], {}), '(mdp_agent.pi.estimator.model.model.coef_, pi_fixture.estimator.\n model.model.coef_)\n', (9481, 9568), True, 'import numpy as np\n'), ((9929, 9947), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (9940, 9947), False, 'from numpy.random import RandomState\n'), ((9982, 10021), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', '(20)'], {}), '(random_state, 20)\n', (10003, 10021), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((11092, 11110), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (11103, 11110), False, 'from numpy.random import RandomState\n'), ((11145, 11186), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (11166, 11186), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((11200, 11261), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.05)', 'None'], {}), '(mdp_environment, 0.05, None)\n', (11232, 11261), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((11394, 11683), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(10)', 'num_episodes_per_improvement': '(100)', 'num_updates_per_improvement': 'None', 'alpha': '(0.1)', 'mode': 'Mode.EXPECTED_SARSA', 'n_steps': '(1)', 'planning_environment': 'None', 'make_final_policy_greedy': '(False)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=10, num_episodes_per_improvement=100,\n num_updates_per_improvement=None, alpha=0.1, mode=Mode.EXPECTED_SARSA,\n n_steps=1, planning_environment=None, make_final_policy_greedy=False,\n q_S_A=q_S_A)\n', (11412, 11683), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((12369, 12387), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (12380, 12387), False, 'from numpy.random import RandomState\n'), ((12422, 12463), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (12443, 12463), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((12477, 12538), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.05)', 'None'], {}), '(mdp_environment, 0.05, None)\n', (12509, 12538), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((12671, 12956), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(10)', 'num_episodes_per_improvement': '(100)', 'num_updates_per_improvement': 'None', 'alpha': '(0.1)', 'mode': 'Mode.Q_LEARNING', 'n_steps': '(3)', 'planning_environment': 'None', 'make_final_policy_greedy': '(False)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=10, num_episodes_per_improvement=100,\n num_updates_per_improvement=None, alpha=0.1, mode=Mode.Q_LEARNING,\n n_steps=3, planning_environment=None, make_final_policy_greedy=False,\n q_S_A=q_S_A)\n', (12689, 12956), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((13646, 13664), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (13657, 13664), False, 'from numpy.random import RandomState\n'), ((13699, 13740), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (13720, 13740), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((13754, 13814), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.0)', 'None'], {}), '(mdp_environment, 0.0, None)\n', (13786, 13814), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((14515, 14533), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (14526, 14533), False, 'from numpy.random import RandomState\n'), ((14568, 14609), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (14589, 14609), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((14623, 14684), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.05)', 'None'], {}), '(mdp_environment, 0.05, None)\n', (14655, 14684), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((15358, 15380), 'rlai.utils.RunThreadManager', 'RunThreadManager', (['(True)'], {}), '(True)\n', (15374, 15380), False, 'from rlai.utils import RunThreadManager\n'), ((16413, 16579), 'rlai.gpi.utils.plot_policy_iteration', 'plot_policy_iteration', ([], {'iteration_average_reward': '[]', 'iteration_total_states': '[]', 'iteration_num_states_improved': '[]', 'elapsed_seconds_average_rewards': '{}', 'pdf': 'None'}), '(iteration_average_reward=[], iteration_total_states=[\n ], iteration_num_states_improved=[], elapsed_seconds_average_rewards={},\n pdf=None)\n', (16434, 16579), False, 'from rlai.gpi.utils import update_policy_iteration_plot, plot_policy_iteration\n'), ((16661, 16695), 'threading.Thread', 'Thread', ([], {'target': 'train_thread_target'}), '(target=train_thread_target)\n', (16667, 16695), False, 'from threading import Thread, Event\n'), ((16723, 16736), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (16733, 16736), False, 'import time\n'), ((16775, 16805), 'rlai.gpi.utils.update_policy_iteration_plot', 'update_policy_iteration_plot', ([], {}), '()\n', (16803, 16805), False, 'from rlai.gpi.utils import update_policy_iteration_plot, plot_policy_iteration\n'), ((16810, 16823), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (16820, 16823), False, 'import time\n'), ((17054, 17079), 'threading.Thread', 'Thread', ([], {'target': 'bad_update'}), '(target=bad_update)\n', (17060, 17079), False, 'from threading import Thread, Event\n'), ((17262, 17284), 'rlai.utils.RunThreadManager', 'RunThreadManager', (['(True)'], {}), '(True)\n', (17278, 17284), False, 'from rlai.utils import RunThreadManager\n'), ((17314, 17321), 'threading.Event', 'Event', ([], {}), '()\n', (17319, 17321), False, 'from threading import Thread, Event\n'), ((18181, 18197), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (18192, 18197), False, 'import shlex\n'), ((18400, 18434), 'threading.Thread', 'Thread', ([], {'target': 'train_thread_target'}), '(target=train_thread_target)\n', (18406, 18434), False, 'from threading import Thread, Event\n'), ((18582, 18595), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (18592, 18595), False, 'import time\n'), ((18863, 18888), 'threading.Thread', 'Thread', ([], {'target': 'bad_update'}), '(target=bad_update)\n', (18869, 18888), False, 'from threading import Thread, Event\n'), ((19058, 19076), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (19069, 19076), False, 'from numpy.random import RandomState\n'), ((19111, 19150), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', '(20)'], {}), '(random_state, 20)\n', (19132, 19150), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((19587, 19877), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent_1', 'environment': 'mdp_environment', 'num_improvements': '(5)', 'num_episodes_per_improvement': '(10)', 'num_updates_per_improvement': 'None', 'alpha': 'None', 'mode': 'Mode.Q_LEARNING', 'n_steps': 'None', 'planning_environment': 'None', 'make_final_policy_greedy': '(True)', 'q_S_A': 'q_S_A_1'}), '(agent=mdp_agent_1, environment=mdp_environment,\n num_improvements=5, num_episodes_per_improvement=10,\n num_updates_per_improvement=None, alpha=None, mode=Mode.Q_LEARNING,\n n_steps=None, planning_environment=None, make_final_policy_greedy=True,\n q_S_A=q_S_A_1)\n', (19605, 19877), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((20372, 20661), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent_2', 'environment': 'mdp_environment', 'num_improvements': '(5)', 'num_episodes_per_improvement': '(5)', 'num_updates_per_improvement': 'None', 'alpha': 'None', 'mode': 'Mode.Q_LEARNING', 'n_steps': 'None', 'planning_environment': 'None', 'make_final_policy_greedy': '(True)', 'q_S_A': 'q_S_A_2'}), '(agent=mdp_agent_2, environment=mdp_environment,\n num_improvements=5, num_episodes_per_improvement=5,\n num_updates_per_improvement=None, alpha=None, mode=Mode.Q_LEARNING,\n n_steps=None, planning_environment=None, make_final_policy_greedy=True,\n q_S_A=q_S_A_2)\n', (20390, 20661), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((20963, 20981), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (20974, 20981), False, 'from numpy.random import RandomState\n'), ((21016, 21055), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', '(20)'], {}), '(random_state, 20)\n', (21037, 21055), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((21091, 21155), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', 'epsilon', 'None'], {}), '(mdp_environment, epsilon, None)\n', (21123, 21155), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((21322, 21612), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent_1', 'environment': 'mdp_environment', 'num_improvements': '(5)', 'num_episodes_per_improvement': '(10)', 'num_updates_per_improvement': 'None', 'alpha': 'None', 'mode': 'Mode.Q_LEARNING', 'n_steps': 'None', 'planning_environment': 'None', 'make_final_policy_greedy': '(True)', 'q_S_A': 'q_S_A_1'}), '(agent=mdp_agent_1, environment=mdp_environment,\n num_improvements=5, num_episodes_per_improvement=10,\n num_updates_per_improvement=None, alpha=None, mode=Mode.Q_LEARNING,\n n_steps=None, planning_environment=None, make_final_policy_greedy=True,\n q_S_A=q_S_A_1)\n', (21340, 21612), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((21706, 21770), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', 'epsilon', 'None'], {}), '(mdp_environment, epsilon, None)\n', (21738, 21770), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((21937, 22226), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent_2', 'environment': 'mdp_environment', 'num_improvements': '(5)', 'num_episodes_per_improvement': '(5)', 'num_updates_per_improvement': 'None', 'alpha': 'None', 'mode': 'Mode.Q_LEARNING', 'n_steps': 'None', 'planning_environment': 'None', 'make_final_policy_greedy': '(True)', 'q_S_A': 'q_S_A_2'}), '(agent=mdp_agent_2, environment=mdp_environment,\n num_improvements=5, num_episodes_per_improvement=5,\n num_updates_per_improvement=None, alpha=None, mode=Mode.Q_LEARNING,\n n_steps=None, planning_environment=None, make_final_policy_greedy=True,\n q_S_A=q_S_A_2)\n', (21955, 22226), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((2280, 2297), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2291, 2297), False, 'import pickle\n'), ((2310, 2356), 'test.rlai.utils.tabular_pi_legacy_eq', 'tabular_pi_legacy_eq', (['mdp_agent.pi', 'pi_fixture'], {}), '(mdp_agent.pi, pi_fixture)\n', (2330, 2356), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((2361, 2410), 'test.rlai.utils.tabular_estimator_legacy_eq', 'tabular_estimator_legacy_eq', (['q_S_A', 'q_S_A_fixture'], {}), '(q_S_A, q_S_A_fixture)\n', (2388, 2410), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((3541, 3558), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3552, 3558), False, 'import pickle\n'), ((3571, 3617), 'test.rlai.utils.tabular_pi_legacy_eq', 'tabular_pi_legacy_eq', (['mdp_agent.pi', 'pi_fixture'], {}), '(mdp_agent.pi, pi_fixture)\n', (3591, 3617), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((3622, 3671), 'test.rlai.utils.tabular_estimator_legacy_eq', 'tabular_estimator_legacy_eq', (['q_S_A', 'q_S_A_fixture'], {}), '(q_S_A, q_S_A_fixture)\n', (3649, 3671), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((4178, 4206), 'rlai.planning.environment_models.StochasticEnvironmentModel', 'StochasticEnvironmentModel', ([], {}), '()\n', (4204, 4206), False, 'from rlai.planning.environment_models import StochasticEnvironmentModel\n'), ((5010, 5027), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5021, 5027), False, 'import pickle\n'), ((5040, 5086), 'test.rlai.utils.tabular_pi_legacy_eq', 'tabular_pi_legacy_eq', (['mdp_agent.pi', 'pi_fixture'], {}), '(mdp_agent.pi, pi_fixture)\n', (5060, 5086), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((5091, 5140), 'test.rlai.utils.tabular_estimator_legacy_eq', 'tabular_estimator_legacy_eq', (['q_S_A', 'q_S_A_fixture'], {}), '(q_S_A, q_S_A_fixture)\n', (5118, 5140), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((6268, 6285), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (6279, 6285), False, 'import pickle\n'), ((6298, 6344), 'test.rlai.utils.tabular_pi_legacy_eq', 'tabular_pi_legacy_eq', (['mdp_agent.pi', 'pi_fixture'], {}), '(mdp_agent.pi, pi_fixture)\n', (6318, 6344), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((6349, 6398), 'test.rlai.utils.tabular_estimator_legacy_eq', 'tabular_estimator_legacy_eq', (['q_S_A', 'q_S_A_fixture'], {}), '(q_S_A, q_S_A_fixture)\n', (6376, 6398), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((6690, 6751), 'rlai.q_S_A.function_approximation.models.sklearn.SKLearnSGD', 'SKLearnSGD', ([], {'random_state': 'random_state', 'scale_eta0_for_y': '(False)'}), '(random_state=random_state, scale_eta0_for_y=False)\n', (6700, 6751), False, 'from rlai.q_S_A.function_approximation.models.sklearn import SKLearnSGD\n'), ((6761, 6813), 'rlai.q_S_A.function_approximation.models.feature_extraction.StateActionIdentityFeatureExtractor', 'StateActionIdentityFeatureExtractor', (['mdp_environment'], {}), '(mdp_environment)\n', (6796, 6813), False, 'from rlai.q_S_A.function_approximation.models.feature_extraction import StateActionIdentityFeatureExtractor\n'), ((7903, 7920), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (7914, 7920), False, 'import pickle\n'), ((8316, 8377), 'rlai.q_S_A.function_approximation.models.sklearn.SKLearnSGD', 'SKLearnSGD', ([], {'random_state': 'random_state', 'scale_eta0_for_y': '(False)'}), '(random_state=random_state, scale_eta0_for_y=False)\n', (8326, 8377), False, 'from rlai.q_S_A.function_approximation.models.sklearn import SKLearnSGD\n'), ((8387, 8429), 'rlai.environments.gridworld.GridworldFeatureExtractor', 'GridworldFeatureExtractor', (['mdp_environment'], {}), '(mdp_environment)\n', (8412, 8429), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((9440, 9457), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (9451, 9457), False, 'import pickle\n'), ((10120, 10181), 'rlai.q_S_A.function_approximation.models.sklearn.SKLearnSGD', 'SKLearnSGD', ([], {'random_state': 'random_state', 'scale_eta0_for_y': '(False)'}), '(random_state=random_state, scale_eta0_for_y=False)\n', (10130, 10181), False, 'from rlai.q_S_A.function_approximation.models.sklearn import SKLearnSGD\n'), ((10191, 10233), 'rlai.environments.gridworld.GridworldFeatureExtractor', 'GridworldFeatureExtractor', (['mdp_environment'], {}), '(mdp_environment)\n', (10216, 10233), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((10536, 10601), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid combination of formula"""'}), "(ValueError, match='Invalid combination of formula')\n", (10549, 10601), False, 'import pytest\n'), ((10611, 10897), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(5)', 'num_episodes_per_improvement': '(5)', 'num_updates_per_improvement': 'None', 'alpha': 'None', 'mode': 'Mode.Q_LEARNING', 'n_steps': 'None', 'planning_environment': 'None', 'make_final_policy_greedy': '(False)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=5, num_episodes_per_improvement=5,\n num_updates_per_improvement=None, alpha=None, mode=Mode.Q_LEARNING,\n n_steps=None, planning_environment=None, make_final_policy_greedy=False,\n q_S_A=q_S_A)\n', (10629, 10897), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((12167, 12184), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (12178, 12184), False, 'import pickle\n'), ((12197, 12243), 'test.rlai.utils.tabular_pi_legacy_eq', 'tabular_pi_legacy_eq', (['mdp_agent.pi', 'pi_fixture'], {}), '(mdp_agent.pi, pi_fixture)\n', (12217, 12243), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((12248, 12297), 'test.rlai.utils.tabular_estimator_legacy_eq', 'tabular_estimator_legacy_eq', (['q_S_A', 'q_S_A_fixture'], {}), '(q_S_A, q_S_A_fixture)\n', (12275, 12297), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((13446, 13463), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (13457, 13463), False, 'import pickle\n'), ((13476, 13522), 'test.rlai.utils.tabular_pi_legacy_eq', 'tabular_pi_legacy_eq', (['mdp_agent.pi', 'fixture_pi'], {}), '(mdp_agent.pi, fixture_pi)\n', (13496, 13522), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((13527, 13576), 'test.rlai.utils.tabular_estimator_legacy_eq', 'tabular_estimator_legacy_eq', (['q_S_A', 'fixture_q_S_A'], {}), '(q_S_A, fixture_q_S_A)\n', (13554, 13576), False, 'from test.rlai.utils import tabular_estimator_legacy_eq, tabular_pi_legacy_eq\n'), ((13952, 14031), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""epsilon must be strictly > 0 for TD-learning"""'}), "(ValueError, match='epsilon must be strictly > 0 for TD-learning')\n", (13965, 14031), False, 'import pytest\n'), ((14041, 14326), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(10)', 'num_episodes_per_improvement': '(100)', 'num_updates_per_improvement': 'None', 'alpha': '(0.1)', 'mode': 'Mode.Q_LEARNING', 'n_steps': '(3)', 'planning_environment': 'None', 'make_final_policy_greedy': '(False)', 'q_S_A': 'q_S_A'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=10, num_episodes_per_improvement=100,\n num_updates_per_improvement=None, alpha=0.1, mode=Mode.Q_LEARNING,\n n_steps=3, planning_environment=None, make_final_policy_greedy=False,\n q_S_A=q_S_A)\n', (14059, 14326), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((15437, 15455), 'numpy.random.RandomState', 'RandomState', (['(12345)'], {}), '(12345)\n', (15448, 15455), False, 'from numpy.random import RandomState\n'), ((15494, 15535), 'rlai.environments.gridworld.Gridworld.example_4_1', 'Gridworld.example_4_1', (['random_state', 'None'], {}), '(random_state, None)\n', (15515, 15535), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((15553, 15613), 'rlai.q_S_A.tabular.TabularStateActionValueEstimator', 'TabularStateActionValueEstimator', (['mdp_environment', '(0.1)', 'None'], {}), '(mdp_environment, 0.1, None)\n', (15585, 15613), False, 'from rlai.q_S_A.tabular import TabularStateActionValueEstimator\n'), ((15774, 16124), 'rlai.gpi.temporal_difference.iteration.iterate_value_q_pi', 'iterate_value_q_pi', ([], {'agent': 'mdp_agent', 'environment': 'mdp_environment', 'num_improvements': '(1000000)', 'num_episodes_per_improvement': '(10)', 'num_updates_per_improvement': 'None', 'alpha': '(0.1)', 'mode': 'Mode.SARSA', 'n_steps': 'None', 'planning_environment': 'None', 'make_final_policy_greedy': '(False)', 'q_S_A': 'q_S_A', 'thread_manager': 'thread_manager', 'num_improvements_per_plot': '(10)'}), '(agent=mdp_agent, environment=mdp_environment,\n num_improvements=1000000, num_episodes_per_improvement=10,\n num_updates_per_improvement=None, alpha=0.1, mode=Mode.SARSA, n_steps=\n None, planning_environment=None, make_final_policy_greedy=False, q_S_A=\n q_S_A, thread_manager=thread_manager, num_improvements_per_plot=10)\n', (15792, 16124), False, 'from rlai.gpi.temporal_difference.iteration import iterate_value_q_pi\n'), ((16330, 16360), 'rlai.gpi.utils.update_policy_iteration_plot', 'update_policy_iteration_plot', ([], {}), '()\n', (16358, 16360), False, 'from rlai.gpi.utils import update_policy_iteration_plot, plot_policy_iteration\n'), ((18238, 18338), 'rlai.runners.trainer.run', 'run', ([], {'args': 'args', 'thread_manager': 'thread_manager', 'train_function_args_callback': 'train_args_callback'}), '(args=args, thread_manager=thread_manager, train_function_args_callback=\n train_args_callback)\n', (18241, 18338), False, 'from rlai.runners.trainer import run\n'), ((19274, 19335), 'rlai.q_S_A.function_approximation.models.sklearn.SKLearnSGD', 'SKLearnSGD', ([], {'random_state': 'random_state', 'scale_eta0_for_y': '(False)'}), '(random_state=random_state, scale_eta0_for_y=False)\n', (19284, 19335), False, 'from rlai.q_S_A.function_approximation.models.sklearn import SKLearnSGD\n'), ((19345, 19387), 'rlai.environments.gridworld.GridworldFeatureExtractor', 'GridworldFeatureExtractor', (['mdp_environment'], {}), '(mdp_environment)\n', (19370, 19387), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((20059, 20120), 'rlai.q_S_A.function_approximation.models.sklearn.SKLearnSGD', 'SKLearnSGD', ([], {'random_state': 'random_state', 'scale_eta0_for_y': '(False)'}), '(random_state=random_state, scale_eta0_for_y=False)\n', (20069, 20120), False, 'from rlai.q_S_A.function_approximation.models.sklearn import SKLearnSGD\n'), ((20130, 20172), 'rlai.environments.gridworld.GridworldFeatureExtractor', 'GridworldFeatureExtractor', (['mdp_environment'], {}), '(mdp_environment)\n', (20155, 20172), False, 'from rlai.environments.gridworld import Gridworld, GridworldFeatureExtractor\n'), ((16920, 16991), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Can only update plot on main thread."""'}), "(ValueError, match='Can only update plot on main thread.')\n", (16933, 16991), False, 'import pytest\n'), ((17005, 17035), 'rlai.gpi.utils.update_policy_iteration_plot', 'update_policy_iteration_plot', ([], {}), '()\n', (17033, 17035), False, 'from rlai.gpi.utils import update_policy_iteration_plot, plot_policy_iteration\n'), ((18738, 18809), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Can only update plot on main thread."""'}), "(ValueError, match='Can only update plot on main thread.')\n", (18751, 18809), False, 'import pytest\n'), ((15235, 15276), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (15262, 15276), False, 'import tempfile\n'), ((2152, 2177), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2167, 2177), False, 'import os\n'), ((3401, 3426), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3416, 3426), False, 'import os\n'), ((4873, 4898), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4888, 4898), False, 'import os\n'), ((6129, 6154), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6144, 6154), False, 'import os\n'), ((7749, 7774), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7764, 7774), False, 'import os\n'), ((9275, 9300), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9290, 9300), False, 'import os\n'), ((12024, 12049), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (12039, 12049), False, 'import os\n'), ((13300, 13325), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13315, 13325), False, 'import os\n')] |
import json
import random
import numpy as np
import MDAnalysis as mda
class ZLayer():
def __init__(self):
self.lattice = None
self.name = None
self.zdepth = 0
class Molecule():
def __init__(self):
self.path = None
self.name = "Empty"
self.color = "#FFFFFF"
self.index = 0
self.flipbool = 0
self.rotatebool = 0
def get_paint_props(self):
"""
Gives a tuple of properties needed to paint this molecule: ID and color.
"""
return (self.index, self.color)
class Blender():
def __init__(self):
self.name = "Blender"
self.index = -1
self.molecules = {}
def get_paint_props(self):
"""
Gives a tuple of properties needed to paint a single molecule from this
blend: ID and color. The molecule to be painted is given by the distribution function.
"""
mol = self.distribute_next()
return (mol.index, mol.color)
def distribute_next(self):
"""
Distribution function that picks one molecule from this blend
"""
return random.choices(population=list(self.molecules.keys()), weights=list(self.molecules.values()))[0]
class SolventLayer():
def __init__(self):
self.name = "Solvent layer"
self.lattice_spacing = 4.0
self.start_offset = 0
self.end_offset = -10
self.molecules = {}
class Project():
def __init__(self):
pass
def init_defaults(self):
"""
Set everything to default values for a new project
"""
self.lattice_width = 25
self.lattice_height = 25
self.lattice_spacing = 8
self.lattice_major_gridlines = 5
self.layer_count = 1
self.molecule_count = 0
self.solvent_molecule_count = 0
self.solvent_layer_count = 0
self.blender_count = 0
self.blender_offset = 1000
self.import_solute = None
self.solute_buffer_space = 3
self.solute_z = None
self.layers = []
self.molecules = []
self.blenders = []
self.solvent_molecules = []
self.solvent_layers = []
def edit_lattice_params(self, width, height, spacing, lines):
"""
Change the lattice size of this project. The layers must resize to match.
"""
self.lattice_spacing = spacing
self.lattice_width = width
self.lattice_height = height
self.lattice_major_gridlines = lines
for layer in self.layers:
new_lattice = np.zeros((self.lattice_height, self.lattice_width))
for i in range(np.amin([layer.lattice.shape[0], new_lattice.shape[0]])):
for j in range(np.amin([layer.lattice.shape[1], new_lattice.shape[1]])):
new_lattice[i,j] = layer.lattice[i,j]
layer.lattice = new_lattice
def new_layer(self):
"""
Come up with starting parameters for a potential new layer
A name and Z depth may already be provided
"""
layer = ZLayer()
layer.lattice = np.zeros((self.lattice_height, self.lattice_width), dtype='int')
layer.name = "Layer {}".format(self.layer_count)
layer.zdepth = 0
return layer
def add_layer(self, layer):
"""
Add this Z layer to the project
"""
self.layer_count += 1
self.layers.append(layer)
def delete_layer(self, layer):
"""
Delete this layer from the project
"""
self.layers.remove(layer)
layer = None
def add_molecule(self, molecule):
"""
Add this molecule to the project
"""
self.molecules.append(molecule)
self.molecule_count += 1
def add_solvent_molecule(self, molecule):
"""
Add this as a solvent molecule to the project
"""
self.solvent_molecules.append(molecule)
self.solvent_molecule_count += 1
def delete_molecule(self, molecule):
"""
Delete this molecule from the project
"""
self.molecules.remove(molecule)
molecule = None
def delete_solvent_molecule(self, molecule):
"""
Delete this solvent molecule from the project
"""
self.solvent_molecules.remove(molecule)
molecule = None
def new_molecule(self):
"""
Come up with starting parameters for a potential new molecule
"""
default_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
molecule = Molecule()
molecule.index = self.molecule_count
molecule.name = "Mol {}".format(self.molecule_count + self.solvent_molecule_count)
molecule.color = default_colors[(self.molecule_count - 1) % 10]
return molecule
def new_solvent_molecule(self):
"""
Come up with starting parameters for a potential new solvent molecule
"""
default_colors = ['#8175aa', '#6fb899', '#31a1b3', '#ccb22b', '#a39fc9', '#94d0c0', '#959c9e', '#027b8e', '#9f8f12', '#767676']
molecule = Molecule()
molecule.index = self.solvent_molecule_count
molecule.name = "Mol {}".format(self.molecule_count + self.solvent_molecule_count)
molecule.color = default_colors[(self.solvent_molecule_count) % 10]
return molecule
def project_loaded(self):
"""
Makes sure the counts match what was in the previous project
"""
for mol in self.molecules:
if mol.index >= self.molecule_count:
self.molecule_count = mol.index + 1
for mol in self.solvent_molecules:
if mol.index >= self.solvent_molecule_count:
self.solvent_molecule_count = mol.index + 1
def new_blender(self):
"""
Create an empty blender.
To make it 1-indexed, the default name is blender_count + 1, since
there is no "default" or "empty" blender on a new project.
"""
blender = Blender()
blender.name = "Blend {}".format(self.blender_count + 1)
blender.index = self.blender_count + self.blender_offset
return blender
def add_blender(self, blender):
"""
Add this blender to the project
"""
self.blenders.append(blender)
self.blender_count += 1
def delete_blender(self, blender):
"""
Delete this blender from the project
"""
self.blenders.remove(blender)
blender = None
def new_solvent_layer(self):
"""
Come up with starting parameters for a new solvent layer
"""
layer = SolventLayer()
layer.name = "Solvent layer {}".format(self.solvent_layer_count + 1)
return layer
def add_solvent_layer(self, layer):
"""
Add this solvent layer to the project
"""
self.solvent_layers.append(layer)
self.solvent_layer_count += 1
def delete_solvent_layer(self, layer):
"""
Remove this solvent layer from the project
"""
self.solvent_layers.remove(layer)
layer = None
def edit_solute_settings(self, file, bufspace, center):
"""
Apply solute configuration so it can be loaded
"""
self.import_solute = file
self.solute_buffer_space = float(bufspace)
if center is not None:
self.solute_z = float(center)
else:
self.solute_z = None
def load_solute(self, should_expand):
"""
Import a solute into the project. Expands the lattice if needed.
"""
if self.import_solute is not None:
self.solute = mda.Universe(self.import_solute)
else:
return
if self.solute_z is not None:
positions = self.solute.atoms.positions
solute_indices = list(set(list(np.where(positions[:,2] > self.solute_z-self.lattice_spacing)[0])) & set(list(np.where(positions[:,2] < self.solute_z+self.lattice_spacing)[0])))
solute_center = np.array([np.mean(positions[:,0]), np.mean(positions[:,1]), np.mean(positions[:,2])])
lattice_center = np.array([(self.lattice_spacing*self.lattice_width)/2., (self.lattice_spacing*self.lattice_height)/2., self.solute_z])
self.solute.atoms.positions = positions + (lattice_center - solute_center)
pass
if should_expand:
xmax = np.amax(self.solute.atoms.positions[:,0])
ymax = np.amax(self.solute.atoms.positions[:,1])
newwidth = self.lattice_width
newheight = self.lattice_height
if xmax > (self.lattice_width*self.lattice_spacing):
newwidth = int((xmax + self.solute_buffer_space + self.lattice_spacing) // self.lattice_spacing)
if ymax > (self.lattice_height*self.lattice_spacing):
newheight = int((ymax + self.solute_buffer_space + self.lattice_spacing) // self.lattice_spacing)
self.edit_lattice_params(newwidth, newheight, self.lattice_spacing, self.lattice_major_gridlines)
for layer in self.layers:
self.overlay_solute(layer)
def overlay_solute(self, layer):
"""
Overlay the imported solute onto one layer so that the lattice regions
occupied by the solute become obstructed.
"""
positions = self.solute.atoms.positions
solute_indices = list(set(list(np.where(positions[:,2] > layer.zdepth-self.lattice_spacing)[0])) & set(list(np.where(positions[:,2] < layer.zdepth+self.lattice_spacing)[0])))
self.remove_overlay(layer)
for ind in solute_indices:
x = positions[ind][0]
y = positions[ind][1]
n_pos_x = int((x+self.solute_buffer_space) // self.lattice_spacing)
n_neg_x = int((x-self.solute_buffer_space) // self.lattice_spacing)
n_pos_y = int((y+self.solute_buffer_space) // self.lattice_spacing)
n_neg_y = int((y-self.solute_buffer_space) // self.lattice_spacing)
n_range_x = list(range(n_neg_x, n_pos_x+1))
n_range_y = list(range(n_neg_y, n_pos_y+1))
for row in n_range_y:
for col in n_range_x:
if (row >= 0) and (row < layer.lattice.shape[0]) and (col >= 0) and (col < layer.lattice.shape[1]):
np.flip(layer.lattice.swapaxes(0,1), axis=1)[col][row] = -1
def remove_overlay(self, layer):
"""
Set obstructed lattice site IDs from -1 back to 0
"""
for row in range(layer.lattice.shape[0]):
for col in range(layer.lattice.shape[1]):
if layer.lattice[row][col] == -1:
layer.lattice[row][col] = 0
| [
"numpy.amin",
"numpy.zeros",
"MDAnalysis.Universe",
"numpy.amax",
"numpy.mean",
"numpy.array",
"numpy.where"
] | [((3143, 3207), 'numpy.zeros', 'np.zeros', (['(self.lattice_height, self.lattice_width)'], {'dtype': '"""int"""'}), "((self.lattice_height, self.lattice_width), dtype='int')\n", (3151, 3207), True, 'import numpy as np\n'), ((2601, 2652), 'numpy.zeros', 'np.zeros', (['(self.lattice_height, self.lattice_width)'], {}), '((self.lattice_height, self.lattice_width))\n', (2609, 2652), True, 'import numpy as np\n'), ((7817, 7849), 'MDAnalysis.Universe', 'mda.Universe', (['self.import_solute'], {}), '(self.import_solute)\n', (7829, 7849), True, 'import MDAnalysis as mda\n'), ((8306, 8435), 'numpy.array', 'np.array', (['[self.lattice_spacing * self.lattice_width / 2.0, self.lattice_spacing *\n self.lattice_height / 2.0, self.solute_z]'], {}), '([self.lattice_spacing * self.lattice_width / 2.0, self.\n lattice_spacing * self.lattice_height / 2.0, self.solute_z])\n', (8314, 8435), True, 'import numpy as np\n'), ((8575, 8617), 'numpy.amax', 'np.amax', (['self.solute.atoms.positions[:, 0]'], {}), '(self.solute.atoms.positions[:, 0])\n', (8582, 8617), True, 'import numpy as np\n'), ((8636, 8678), 'numpy.amax', 'np.amax', (['self.solute.atoms.positions[:, 1]'], {}), '(self.solute.atoms.positions[:, 1])\n', (8643, 8678), True, 'import numpy as np\n'), ((2680, 2735), 'numpy.amin', 'np.amin', (['[layer.lattice.shape[0], new_lattice.shape[0]]'], {}), '([layer.lattice.shape[0], new_lattice.shape[0]])\n', (2687, 2735), True, 'import numpy as np\n'), ((2770, 2825), 'numpy.amin', 'np.amin', (['[layer.lattice.shape[1], new_lattice.shape[1]]'], {}), '([layer.lattice.shape[1], new_lattice.shape[1]])\n', (2777, 2825), True, 'import numpy as np\n'), ((8201, 8225), 'numpy.mean', 'np.mean', (['positions[:, 0]'], {}), '(positions[:, 0])\n', (8208, 8225), True, 'import numpy as np\n'), ((8226, 8250), 'numpy.mean', 'np.mean', (['positions[:, 1]'], {}), '(positions[:, 1])\n', (8233, 8250), True, 'import numpy as np\n'), ((8251, 8275), 'numpy.mean', 'np.mean', (['positions[:, 2]'], {}), '(positions[:, 2])\n', (8258, 8275), True, 'import numpy as np\n'), ((9584, 9647), 'numpy.where', 'np.where', (['(positions[:, 2] > layer.zdepth - self.lattice_spacing)'], {}), '(positions[:, 2] > layer.zdepth - self.lattice_spacing)\n', (9592, 9647), True, 'import numpy as np\n'), ((9661, 9724), 'numpy.where', 'np.where', (['(positions[:, 2] < layer.zdepth + self.lattice_spacing)'], {}), '(positions[:, 2] < layer.zdepth + self.lattice_spacing)\n', (9669, 9724), True, 'import numpy as np\n'), ((8017, 8081), 'numpy.where', 'np.where', (['(positions[:, 2] > self.solute_z - self.lattice_spacing)'], {}), '(positions[:, 2] > self.solute_z - self.lattice_spacing)\n', (8025, 8081), True, 'import numpy as np\n'), ((8095, 8159), 'numpy.where', 'np.where', (['(positions[:, 2] < self.solute_z + self.lattice_spacing)'], {}), '(positions[:, 2] < self.solute_z + self.lattice_spacing)\n', (8103, 8159), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
bar_width = 0.012
line_width = 2.5
opacity = 0.7
num_layouts=5
df = pd.read_csv('blocksize_new.csv' , delimiter=',')
buck_arr = df.values[:,0]
lat_arr = df.values[:,1]
block_widths = [4, 8, 16, 32, 64]
index = np.arange(0, 0.1, 0.1/5)
ticks = range(1, num_layouts+1)
color_arr = ['yellow', 'tomato', 'skyblue', 'fuchsia', 'greenyellow']
hatch_arr = ['||', '-', '++', '//', 'oo']
#fig, (ax1, ax2) = plt.subplots(figsize = (1.5, 2.5))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9,5), sharex=True)
bucket_bar = ax1.bar(index,
buck_arr,
bar_width,
alpha=opacity,
ecolor='k',
edgecolor='k',
)
for i in range(num_layouts):
bucket_bar[i].set_color(color_arr[i])
bucket_bar[i].set_hatch(hatch_arr[i])
bucket_bar[i].set_edgecolor('k')
lat_bar = ax2.bar(index,
lat_arr,
bar_width,
alpha=opacity,
ecolor='k',
edgecolor='k',
)
for i in range(num_layouts):
lat_bar[i].set_color(color_arr[i])
lat_bar[i].set_hatch(hatch_arr[i])
lat_bar[i].set_edgecolor('k')
ax1.set_ylabel('Number of Blocks Read', fontsize='x-large')
ax2.set_ylabel('Latency per read', fontsize='x-large')
plt.tick_params(axis='both', which='major', labelsize='x-large')
plt.xticks(index, ticks, rotation=0)
ax1.set_xlabel('Block size (Number of nodes per block)', fontsize='x-large')
ax2.set_xlabel('Block size (Number of nodes per block)', fontsize='x-large')
#plt.subplots_adjust(top=2.8)
plt.figlegend(bucket_bar, block_widths, bbox_to_anchor=(0.485,0.9), fontsize='medium', ncol=5, columnspacing=0.2 )
#plt.legend(bucket_bar, layout_names, fontsize='small')
#plt.tight_layout()
#plt.subplots_adjust(top=0.8)
plt.tight_layout()
plt.savefig('Lambda_blocks.pdf', fbbox_inches='tight', bbox_inches='tight', format='pdf')
plt.show()
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.figlegend",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((142, 189), 'pandas.read_csv', 'pd.read_csv', (['"""blocksize_new.csv"""'], {'delimiter': '""","""'}), "('blocksize_new.csv', delimiter=',')\n", (153, 189), True, 'import pandas as pd\n'), ((285, 311), 'numpy.arange', 'np.arange', (['(0)', '(0.1)', '(0.1 / 5)'], {}), '(0, 0.1, 0.1 / 5)\n', (294, 311), True, 'import numpy as np\n'), ((529, 576), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 5)', 'sharex': '(True)'}), '(1, 2, figsize=(9, 5), sharex=True)\n', (541, 576), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1514), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '"""x-large"""'}), "(axis='both', which='major', labelsize='x-large')\n", (1465, 1514), True, 'import matplotlib.pyplot as plt\n'), ((1515, 1551), 'matplotlib.pyplot.xticks', 'plt.xticks', (['index', 'ticks'], {'rotation': '(0)'}), '(index, ticks, rotation=0)\n', (1525, 1551), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1854), 'matplotlib.pyplot.figlegend', 'plt.figlegend', (['bucket_bar', 'block_widths'], {'bbox_to_anchor': '(0.485, 0.9)', 'fontsize': '"""medium"""', 'ncol': '(5)', 'columnspacing': '(0.2)'}), "(bucket_bar, block_widths, bbox_to_anchor=(0.485, 0.9),\n fontsize='medium', ncol=5, columnspacing=0.2)\n", (1749, 1854), True, 'import matplotlib.pyplot as plt\n'), ((1959, 1977), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1975, 1977), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2071), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Lambda_blocks.pdf"""'], {'fbbox_inches': '"""tight"""', 'bbox_inches': '"""tight"""', 'format': '"""pdf"""'}), "('Lambda_blocks.pdf', fbbox_inches='tight', bbox_inches='tight',\n format='pdf')\n", (1989, 2071), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2078), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2076, 2078), True, 'import matplotlib.pyplot as plt\n')] |
import math
import numpy as np
def Rotation_euler_vector(euler_vector, theta):
ux = euler_vector[0]
uy = euler_vector[1]
uz = euler_vector[2]
# Check if vector are normalized
norm = math.sqrt(ux**2+uy**2+uz*2)
if norm != 1:
ux = ux/norm
uy = uy/norm
uz = uz/norm
R11 = np.cos(theta) + ux**2*(1-np.cos(theta))
R12 = ux*uy*(1-np.cos(theta)) - uz*np.sin(theta)
R13 = ux*uz*(1-np.cos(theta)) + uy*np.sin(theta)
R21 = ux*uy*(1-np.cos(theta)) + uz*np.sin(theta)
R22 = np.cos(theta) + uy**2*(1-np.cos(theta))
R23 = uy*uz*(1-np.cos(theta)) - ux*np.sin(theta)
R31 = ux*uz*(1-np.cos(theta)) - uy*np.sin(theta)
R32 = uy*uz*(1-np.cos(theta)) + ux*np.sin(theta)
R33 = np.cos(theta) + uz**2*(1-np.cos(theta))
R = np.array([[R11, R12, R13],
[R21, R22, R23],
[R31, R32, R33]])
return R
def Rotation_euler_angles(theta):
R_x = np.array([[1, 0, 0 ],
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
[0, math.sin(theta[0]), math.cos(theta[0]) ]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
[0, 1, 0 ],
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R | [
"math.sqrt",
"math.sin",
"numpy.sin",
"numpy.array",
"math.cos",
"numpy.cos",
"numpy.dot"
] | [((204, 241), 'math.sqrt', 'math.sqrt', (['(ux ** 2 + uy ** 2 + uz * 2)'], {}), '(ux ** 2 + uy ** 2 + uz * 2)\n', (213, 241), False, 'import math\n'), ((790, 851), 'numpy.array', 'np.array', (['[[R11, R12, R13], [R21, R22, R23], [R31, R32, R33]]'], {}), '([[R11, R12, R13], [R21, R22, R23], [R31, R32, R33]])\n', (798, 851), True, 'import numpy as np\n'), ((324, 337), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (330, 337), True, 'import numpy as np\n'), ((533, 546), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (539, 546), True, 'import numpy as np\n'), ((742, 755), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (748, 755), True, 'import numpy as np\n'), ((1755, 1771), 'numpy.dot', 'np.dot', (['R_y', 'R_x'], {}), '(R_y, R_x)\n', (1761, 1771), True, 'import numpy as np\n'), ((403, 416), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (409, 416), True, 'import numpy as np\n'), ((456, 469), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (462, 469), True, 'import numpy as np\n'), ((509, 522), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (515, 522), True, 'import numpy as np\n'), ((612, 625), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (618, 625), True, 'import numpy as np\n'), ((665, 678), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (671, 678), True, 'import numpy as np\n'), ((718, 731), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (724, 731), True, 'import numpy as np\n'), ((349, 362), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (355, 362), True, 'import numpy as np\n'), ((383, 396), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (389, 396), True, 'import numpy as np\n'), ((436, 449), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (442, 449), True, 'import numpy as np\n'), ((489, 502), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (495, 502), True, 'import numpy as np\n'), ((558, 571), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (564, 571), True, 'import numpy as np\n'), ((592, 605), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (598, 605), True, 'import numpy as np\n'), ((645, 658), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (651, 658), True, 'import numpy as np\n'), ((698, 711), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (704, 711), True, 'import numpy as np\n'), ((767, 780), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (773, 780), True, 'import numpy as np\n'), ((1045, 1063), 'math.cos', 'math.cos', (['theta[0]'], {}), '(theta[0])\n', (1053, 1063), False, 'import math\n'), ((1120, 1138), 'math.sin', 'math.sin', (['theta[0]'], {}), '(theta[0])\n', (1128, 1138), False, 'import math\n'), ((1140, 1158), 'math.cos', 'math.cos', (['theta[0]'], {}), '(theta[0])\n', (1148, 1158), False, 'import math\n'), ((1228, 1246), 'math.cos', 'math.cos', (['theta[1]'], {}), '(theta[1])\n', (1236, 1246), False, 'import math\n'), ((1259, 1277), 'math.sin', 'math.sin', (['theta[1]'], {}), '(theta[1])\n', (1267, 1277), False, 'import math\n'), ((1409, 1427), 'math.cos', 'math.cos', (['theta[1]'], {}), '(theta[1])\n', (1417, 1427), False, 'import math\n'), ((1493, 1511), 'math.cos', 'math.cos', (['theta[2]'], {}), '(theta[2])\n', (1501, 1511), False, 'import math\n'), ((1565, 1583), 'math.sin', 'math.sin', (['theta[2]'], {}), '(theta[2])\n', (1573, 1583), False, 'import math\n'), ((1588, 1606), 'math.cos', 'math.cos', (['theta[2]'], {}), '(theta[2])\n', (1596, 1606), False, 'import math\n'), ((1066, 1084), 'math.sin', 'math.sin', (['theta[0]'], {}), '(theta[0])\n', (1074, 1084), False, 'import math\n'), ((1379, 1397), 'math.sin', 'math.sin', (['theta[1]'], {}), '(theta[1])\n', (1387, 1397), False, 'import math\n'), ((1517, 1535), 'math.sin', 'math.sin', (['theta[2]'], {}), '(theta[2])\n', (1525, 1535), False, 'import math\n')] |
# Import pandas
import pandas as pd
# Use numpy to convert to arrays
import numpy as np
# Import csv
import csv
# To do square root on MSE
from math import sqrt
# To enables split (train and test) of the data
from sklearn.model_selection import train_test_split
# Import svm
from sklearn import svm
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Read the data for SVR
df = pd.read_csv('D:/TIF/TIF SEM.7/Frontier Technology/Hotel Recommendations/GitHub Documentation/clean_train_one_percent.csv', sep=',')
# Labels are the values we want to predict
labels = np.array(df['hotel_cluster'])
# Remove the labels from the features
# axis 1 refers to the columns
df = df.drop('hotel_cluster', axis=1)
# Saving feature names for later use
df_list = list(df.columns)
# Convert to numpy array
df = np.array(df)
# Split the data into training and testing sets
train_df, test_df, train_labels, test_labels = train_test_split(
df, labels, test_size=0.25, random_state=50)
# Create a svr regression
# Radial Basis Function Kernel
clf = svm.SVR(kernel='rbf', gamma='auto')
# Train the model using the training sets
clf.fit(train_df, train_labels)
# Predict the response for test dataset
pred = clf.predict(test_df)
# Calculate the absolute errors
errors = abs(pred - test_labels)
# Print out the mean absolute error (MAE)
print('MAE:', round(np.mean(errors), 2))
# Print out the mean squared error (MSE)
print('MSE: ', metrics.mean_squared_error(test_labels, pred))
# Print out the root mean squared error (RMSE)
print('RMSE: ', np.sqrt(metrics.mean_squared_error(test_labels, pred))) | [
"sklearn.svm.SVR",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.mean",
"numpy.array",
"sklearn.metrics.mean_squared_error"
] | [((435, 576), 'pandas.read_csv', 'pd.read_csv', (['"""D:/TIF/TIF SEM.7/Frontier Technology/Hotel Recommendations/GitHub Documentation/clean_train_one_percent.csv"""'], {'sep': '""","""'}), "(\n 'D:/TIF/TIF SEM.7/Frontier Technology/Hotel Recommendations/GitHub Documentation/clean_train_one_percent.csv'\n , sep=',')\n", (446, 576), True, 'import pandas as pd\n'), ((623, 652), 'numpy.array', 'np.array', (["df['hotel_cluster']"], {}), "(df['hotel_cluster'])\n", (631, 652), True, 'import numpy as np\n'), ((863, 875), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (871, 875), True, 'import numpy as np\n'), ((975, 1036), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df', 'labels'], {'test_size': '(0.25)', 'random_state': '(50)'}), '(df, labels, test_size=0.25, random_state=50)\n', (991, 1036), False, 'from sklearn.model_selection import train_test_split\n'), ((1111, 1146), 'sklearn.svm.SVR', 'svm.SVR', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""'}), "(kernel='rbf', gamma='auto')\n", (1118, 1146), False, 'from sklearn import svm\n'), ((1511, 1556), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['test_labels', 'pred'], {}), '(test_labels, pred)\n', (1537, 1556), False, 'from sklearn import metrics\n'), ((1432, 1447), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (1439, 1447), True, 'import numpy as np\n'), ((1631, 1676), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['test_labels', 'pred'], {}), '(test_labels, pred)\n', (1657, 1676), False, 'from sklearn import metrics\n')] |
"""Pair plot between variables of latent space"""
from configparser import ConfigParser, ExtendedInterpolation
import glob
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sdss.utils.managefiles import FileDirectory
from sdss.utils.configfile import ConfigurationFile
###############################################################################
start_time = time.time()
###############################################################################
parser = ConfigParser(interpolation=ExtendedInterpolation())
config_file_name = "pair_plots.ini"
parser.read(f"{config_file_name}")
config = ConfigurationFile()
manage_files = FileDirectory()
###############################################################################
print(f"Load metadata", end="\n")
data_directory = parser.get("directory", "data")
science_df = parser.get("file", "science")
science_df = pd.read_csv(
f"{data_directory}/{science_df}", index_col="specobjid"
)
bin_directory = parser.get("directory", "bin_data")
specobjid_name = parser.get("file", "specobjid")
specobjid = np.load(f"{bin_directory}/{specobjid_name}")
bin_df = science_df.loc[specobjid[:, 1]]
del science_df
###############################################################################
print(f"Load embedding and latent representations", end="\n")
latent_directory = parser.get("directory", "latent")
latent_directories = glob.glob(f"{latent_directory}/*/")
latent_name = parser.get("file", "latent")
_ = [
manage_files.file_exists(
f"{latent_location}/{latent_name}", exit_program=True
)
for latent_location in latent_directories
]
bin_id = parser.get("common", "bin")
# set plot parameters
parameters_of_plot = config.section_to_dictionary(
parser.items("plot"), value_separators=[","]
)
size = config.entry_to_list(parser.get("plot", "size"), float, ",")
size = tuple(size)
parameters_of_plot["size"] = size
fig, ax = plt.subplots(figsize=size, tight_layout=True)
# flags
number_latent_variables = None
bin_df_of_plot = None
models_ids = [model_id.split("/")[-2] for model_id in latent_directories]
for model_idx, latent_directory in enumerate(latent_directories):
latent = np.load(f"{latent_directory}/{latent_name}")
number_latent_variables = latent.shape[1]
# load latent representation to data frame
for idx in range(number_latent_variables):
bin_df[f"{idx:02d}Latent"] = latent[:, idx]
print(f"model {models_ids[model_idx]}: pair plots", end="\n")
for hue in parameters_of_plot["hues"]:
bin_df_of_plot = bin_df[bin_df[hue] != "undefined"]
for latent_x in range(number_latent_variables):
for latent_y in range(latent_x, number_latent_variables):
if latent_x == latent_y:
continue
print(
f"Pair plot: {latent_x:02d} vs {latent_y:02d}"
f"Hue: {hue}",
end="\r"
)
# pair_plot = sns.scatterplot(
sns.scatterplot(
x=f"{latent_x:02d}Latent",
y=f"{latent_y:02d}Latent",
ax=ax,
data=bin_df_of_plot,
hue=hue,
alpha=parameters_of_plot["alpha"],
s = parameters_of_plot["marker_size"],
edgecolors = parameters_of_plot["edgecolors"],
)
save_to = f"{latent_directory}/pair_plots"
manage_files.check_directory(save_to, exit_program=False)
fig.savefig(
f"{save_to}/"
f"pair_{latent_x:02d}_{latent_y:02d}_"
f"{hue}.{parameters_of_plot['format']}"
)
ax.clear()
###########################################################################
print(f"Save configuration file", end="\n")
with open(f"{latent_directory}/{config_file_name}", "w") as config_file:
parser.write(config_file)
###############################################################################
finish_time = time.time()
print(f"\nRun time: {finish_time - start_time:.2f}")
| [
"numpy.load",
"sdss.utils.managefiles.FileDirectory",
"seaborn.scatterplot",
"pandas.read_csv",
"sdss.utils.configfile.ConfigurationFile",
"time.time",
"configparser.ExtendedInterpolation",
"glob.glob",
"matplotlib.pyplot.subplots"
] | [((425, 436), 'time.time', 'time.time', ([], {}), '()\n', (434, 436), False, 'import time\n'), ((659, 678), 'sdss.utils.configfile.ConfigurationFile', 'ConfigurationFile', ([], {}), '()\n', (676, 678), False, 'from sdss.utils.configfile import ConfigurationFile\n'), ((694, 709), 'sdss.utils.managefiles.FileDirectory', 'FileDirectory', ([], {}), '()\n', (707, 709), False, 'from sdss.utils.managefiles import FileDirectory\n'), ((931, 999), 'pandas.read_csv', 'pd.read_csv', (['f"""{data_directory}/{science_df}"""'], {'index_col': '"""specobjid"""'}), "(f'{data_directory}/{science_df}', index_col='specobjid')\n", (942, 999), True, 'import pandas as pd\n'), ((1120, 1164), 'numpy.load', 'np.load', (['f"""{bin_directory}/{specobjid_name}"""'], {}), "(f'{bin_directory}/{specobjid_name}')\n", (1127, 1164), True, 'import numpy as np\n'), ((1439, 1474), 'glob.glob', 'glob.glob', (['f"""{latent_directory}/*/"""'], {}), "(f'{latent_directory}/*/')\n", (1448, 1474), False, 'import glob\n'), ((1969, 2014), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'size', 'tight_layout': '(True)'}), '(figsize=size, tight_layout=True)\n', (1981, 2014), True, 'import matplotlib.pyplot as plt\n'), ((4184, 4195), 'time.time', 'time.time', ([], {}), '()\n', (4193, 4195), False, 'import time\n'), ((2234, 2278), 'numpy.load', 'np.load', (['f"""{latent_directory}/{latent_name}"""'], {}), "(f'{latent_directory}/{latent_name}')\n", (2241, 2278), True, 'import numpy as np\n'), ((553, 576), 'configparser.ExtendedInterpolation', 'ExtendedInterpolation', ([], {}), '()\n', (574, 576), False, 'from configparser import ConfigParser, ExtendedInterpolation\n'), ((3080, 3317), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'f"""{latent_x:02d}Latent"""', 'y': 'f"""{latent_y:02d}Latent"""', 'ax': 'ax', 'data': 'bin_df_of_plot', 'hue': 'hue', 'alpha': "parameters_of_plot['alpha']", 's': "parameters_of_plot['marker_size']", 'edgecolors': "parameters_of_plot['edgecolors']"}), "(x=f'{latent_x:02d}Latent', y=f'{latent_y:02d}Latent', ax=ax,\n data=bin_df_of_plot, hue=hue, alpha=parameters_of_plot['alpha'], s=\n parameters_of_plot['marker_size'], edgecolors=parameters_of_plot[\n 'edgecolors'])\n", (3095, 3317), True, 'import seaborn as sns\n')] |
from __future__ import print_function
import numpy as np
from copy import deepcopy
import os
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
sample_size = len(labels_dense)
labels_one_hot = np.zeros((sample_size, num_classes))
labels_one_hot[np.arange(sample_size), np.array(labels_dense).astype(int)] = 1
return labels_one_hot.astype(int)
def save_h5weights(model,filename='network.h5'):
import h5py
W_list, b_list = model.get_weights_bias()
h5f = h5py.File(filename,'w')
for i in range(0,len(W_list)):
h5f.create_dataset("W"+str(1+i), data=W_list[i])
for i in range(0,len(b_list)):
h5f.create_dataset("b"+str(i), data=b_list[i])
h5f.close()
return
def plot_matrices(
matrix_list,
shape = None,
images_per_row = 10,
scale_limit = None,
figsize = (20, 8),
x_axis_list = None,
filename = None,
title = None,
highlight_bad_values = True,
plt = None,
pdf = None,
):
"""Plot the images for each matrix in the matrix_list."""
import matplotlib
from matplotlib import pyplot as plt
fig = plt.figure(figsize = figsize)
fig.set_canvas(plt.gcf().canvas)
if title is not None:
fig.suptitle(title, fontsize = 18, horizontalalignment = 'left', x=0.1)
num_matrixs = len(matrix_list)
rows = np.ceil(num_matrixs / float(images_per_row))
try:
matrix_list_reshaped = np.reshape(np.array(matrix_list), (-1, shape[0],shape[1])) \
if shape is not None else np.array(matrix_list)
except:
matrix_list_reshaped = matrix_list
if scale_limit == "auto":
scale_min = np.Inf
scale_max = -np.Inf
for matrix in matrix_list:
scale_min = min(scale_min, np.min(matrix))
scale_max = max(scale_max, np.max(matrix))
scale_limit = (scale_min, scale_max)
for i in range(len(matrix_list)):
ax = fig.add_subplot(rows, images_per_row, i + 1)
image = matrix_list_reshaped[i].astype(float)
if len(image.shape) == 1:
image = np.expand_dims(image, 1)
if highlight_bad_values:
cmap = matplotlib.cm.binary
cmap.set_bad('red', alpha = 0.2)
mask_key = []
mask_key.append(np.isnan(image))
mask_key.append(np.isinf(image))
mask_key = np.any(np.array(mask_key), axis = 0)
image = np.ma.array(image, mask = mask_key)
else:
cmap = matplotlib.cm.binary
if scale_limit is None:
ax.matshow(image, cmap = cmap)
else:
assert len(scale_limit) == 2, "scale_limit should be a 2-tuple!"
ax.matshow(image, cmap = cmap, vmin = scale_limit[0], vmax = scale_limit[1])
try:
xlabel = "({0:.4f},{1:.4f})\nshape: ({2}, {3})".format(np.min(image), np.max(image), image.shape[0], image.shape[1])
if x_axis_list is not None:
xlabel += "\n{0}".format(x_axis_list[i])
plt.xlabel(xlabel)
except:
pass
plt.xticks(np.array([]))
plt.yticks(np.array([]))
if filename is not None:
plt.tight_layout()
plt.savefig(filename)
if pdf is not None:
pdf.savefig() # saves the current figure into a pdf page
plt.close()
else:
plt.show()
if scale_limit is not None:
print("scale_limit: ({0:.6f}, {1:.6f})".format(scale_limit[0], scale_limit[1]))
print()
class Gradient_Noise_Scale_Gen(object):
def __init__(
self,
gamma = 0.55,
eta = 0.01,
noise_scale_start = 1e-2,
noise_scale_end = 1e-6,
gradient_noise_interval_batch = 1,
fun_pointer = "generate_scale_simple",
batch_size = 50,
):
self.gamma = gamma
self.eta = eta
self.noise_scale_start = noise_scale_start
self.noise_scale_end = noise_scale_end
self.gradient_noise_interval_batch = gradient_noise_interval_batch
self.batch_size = batch_size
self.generate_scale = getattr(self, fun_pointer) # Sets the default function to generate scale
def get_max_iter(self, epochs, num_examples):
self.epochs = epochs
self.num_examples = num_examples
self.max_iter = int(self.epochs * self.num_examples / self.batch_size / self.gradient_noise_interval_batch) + 1
def generate_scale_simple(
self,
epochs,
num_examples,
verbose = True
):
self.get_max_iter(epochs, num_examples)
gradient_noise_scale = np.sqrt(self.eta * (np.array(range(self.max_iter)) + 1) ** (- self.gamma))
if verbose:
print("gradient_noise_scale: start = {0}, end = {1:.6f}, gamma = {2}, length = {3}".format(gradient_noise_scale[0], gradient_noise_scale[-1], self.gamma, self.max_iter))
return gradient_noise_scale
def generate_scale_fix_ends(
self,
epochs,
num_examples,
verbose = True,
):
self.get_max_iter(epochs, num_examples)
ratio = (self.noise_scale_start / float(self.noise_scale_end)) ** (1 / self.gamma) - 1
self.bb = self.max_iter / ratio
self.aa = self.noise_scale_start * self.bb ** self.gamma
gradient_noise_scale = np.sqrt(self.aa * (np.array(range(self.max_iter)) + self.bb) ** (- self.gamma))
if verbose:
print("gradient_noise_scale: start = {0}, end = {1:.6f}, gamma = {2}, length = {3}".format(gradient_noise_scale[0], gradient_noise_scale[-1], self.gamma, self.max_iter))
return gradient_noise_scale
def plot_pdf(input_, sigma_value, plot_threshold = 0.001):
"""Plot the density function of the weights.
The input_ can either be a tuple of (x_axis, density) or a single weight tensor
"""
from matplotlib import pyplot as plt
import tensorflow as tf
if isinstance(input_, tuple) and len(input_) == 2:
x_axis, density_tensor = input_
density = density_tensor.eval({sigma: sigma_value})
if plot_threshold is not None and plot_threshold > 0:
for i in range(len(x_axis)):
if density[i] > plot_threshold:
start = i
break
for i in range(len(x_axis) - 1, 0, -1):
if density[i] > plot_threshold:
end = i
break
x_axis = x_axis[start: end]
density = density[start: end]
plt.plot(x_axis, density)
else:
weight = input_
def get_mixed_Gaussian(weight, x, sigma_value):
"""helper function to calculate the integrand value at specific x"""
weight_flatten = tf.reshape(weight, [tf.size(weight).eval()]).eval()
out = (np.sum(np.exp( - (x - weight_flatten) ** 2 / (2 * sigma_value ** 2)))) ** 2
return out
value_min = np.min(weight.eval())
value_max = np.max(weight.eval())
x_axis = np.linspace(value_min - 2 * sigma_value, value_max + 2 * sigma_value, 100)
y_axis = []
for x in x_axis:
y_axis.append(get_mixed_Gaussian(weight, x, sigma_value))
plt.plot(x_axis, y_axis)
plt.show()
def plot_density(input_, sigma = None, x_label = None, y_label = None, xlim = None):
from scipy.stats import gaussian_kde
from matplotlib import pyplot as plt
density = gaussian_kde(input_)
xs = np.linspace(np.min(input_), np.max(input_), 400)
if sigma is None:
sigma = (np.max(input_) - np.max(input_)) / 100
density.covariance_factor = lambda : sigma
density._compute_covariance()
plt.plot(xs, density(xs))
if xlim is not None:
plt.xlim(xlim)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
plt.show()
def record_weights(weight_record_list, weights_to_reg, chosen_index = None):
"""transform the weight tensor into a numpy array and save as the same list structure as weights_to_reg"""
if len(weight_record_list) == 0:
for weights in weights_to_reg:
if isinstance(weights, list):
length = len(weights)
else:
length = 1
weight_record_list.append([[] for i in range(length)])
for i in range(len(weights_to_reg)):
weights = weights_to_reg[i]
if isinstance(weights, list):
for j in range(len(weights)):
weight = weights[j]
if chosen_index is None:
weight_record_list[i][j].append(np.ndarray.flatten(weight.eval()))
else:
weight_record_list[i][j].append(np.ndarray.flatten(weight.eval())[chosen_index])
else:
if chosen_index is None:
weight_record_list[i][0].append(np.ndarray.flatten(weights.eval()))
else:
weight_record_list[i][0].append(np.ndarray.flatten(weights.eval())[chosen_index])
def record_info(info_record_list, info_list, feed_dict = {}, tf_Tensor = None):
"""Record the information into info_record_list"""
if tf_Tensor is None:
import tensorflow as tf
tf_Tensor = tf.Tensor
info_record = []
for info in info_list:
if isinstance(info, list):
info_ele_list = []
for info_ele in info:
if isinstance(info_ele, tf_Tensor):
info_ele_list.append(info_ele.eval(feed_dict = feed_dict))
else:
info_ele_list.append(info_ele)
info_record.append(info_ele_list)
else:
if isinstance(info, tf_Tensor):
info_record.append(info.eval(feed_dict = feed_dict))
else:
info_record.append(info)
info_record_list.append(info_record)
def decompose_list(input_):
"""Recusively decompose any list structure into a flat list"""
def dec(input_, output_):
if type(input_) is list:
for subitem in input_:
dec(subitem, output_)
else:
output_.append(input_)
output_ = []
dec(input_, output_)
return output_
def get_dir(filename):
"""Get the full directory path. If not exist, create one."""
current_directory = os.path.dirname(os.path.realpath(__file__))
# Create the directory if it does not exist:
index = filename.rfind("/")
dir_name = os.path.join(current_directory, filename[:index])
isExist = os.path.isdir(dir_name)
if not isExist:
os.makedirs(dir_name)
return os.path.join(current_directory, filename)
def make_dir(filename):
import os
import errno
if not os.path.exists(os.path.dirname(filename)):
print("directory {0} does not exist, created.".format(os.path.dirname(filename)))
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
print(exc)
raise
def print_struct_param(struct_param, transition_mode = "layertype-only", print_mode = "long"):
"""Print the struct_param in a concise way."""
if print_mode == "long":
struct_param_new = []
for layer_struct_param in struct_param:
num_neurons, layer_mode, layer_hyperparam = layer_struct_param
struct_param_new.append([num_neurons, layer_hyperparam["weight"]["type"], layer_hyperparam["bias"]["type"]])
return struct_param_new
elif print_mode == "short":
if transition_mode == "num-neurons-only":
return [layer_struct_param[0] for layer_struct_param in struct_param]
elif transition_mode == "layertype-only":
return [[layer_struct_param[2]["weight"]["type"], layer_struct_param[2]["bias"]["type"]]
for layer_struct_param in struct_param]
else:
struct_param_new = []
for layer_struct_param in struct_param:
num_neurons, layer_mode, layer_hyperparam = layer_struct_param
struct_param_new.append([num_neurons, layer_hyperparam["weight"]["type"], layer_hyperparam["bias"]["type"]])
return struct_param_new
else:
raise Exception("print_mode must be either 'long' or 'short'!")
def rotate_matrix_cw(matrix, angle = 90):
"""Rotate the matrix clockwise by certain angle (multiples of 90 deg)."""
def rotate_matrix_90(matrix):
rows, columns = matrix.shape
matrix_new = np.zeros((columns, rows))
for i in range(rows):
for j in range(columns):
matrix_new[j, rows - 1 - i] = matrix[i, j]
return matrix_new
assert isinstance(angle, int) and angle % 90 == 0, "The rotation angle must be multiples of 90 deg!"
times = (angle % 360) / 90
for k in range(times):
matrix = rotate_matrix_90(matrix)
return matrix
def record_data(data_record_dict, data_list, key_list):
"""Record data to the dictionary data_record_dict. It records each key: value pair in the corresponding location of
key_list and data_list into the dictionary."""
assert len(data_list) == len(key_list), "the data_list and key_list should have the same length!"
for data, key in zip(data_list, key_list):
if key not in data_record_dict:
data_record_dict[key] = [data]
else:
data_record_dict[key].append(data)
def sort_two_lists(list1, list2, reverse = False):
from operator import itemgetter
if reverse:
List = deepcopy([list(x) for x in zip(*sorted(zip(deepcopy(list1), deepcopy(list2)), key=itemgetter(0), reverse=True))])
else:
List = deepcopy([list(x) for x in zip(*sorted(zip(deepcopy(list1), deepcopy(list2)), key=itemgetter(0)))])
if len(List) == 0:
return [], []
else:
return List[0], List[1]
def get_new_name(name_prev):
List = name_prev.split("_")
try:
suffix = str(eval(List[-1]) + 1)
name = List[:-1] + [suffix]
except:
suffix = "0"
name = List + [suffix]
name = "_".join(name)
return name
def truncated_normal(shape, init_mean, init_std):
"""Truncated normal function, where the examples that are outside of 2 init_std are thrown out."""
from scipy.stats import truncnorm
sample = truncnorm.rvs(-2, 2, size = shape)
return sample * init_std + init_mean
def add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
import tensorflow as tf
from tensorflow.python.framework import ops
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = tf.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def plot_record(model_param, key_list = ["loss_train", "loss_valid", "reg_S_entropy", "reg_L1", "reg_L1_selector"], log_scale = False):
import matplotlib.pyplot as plt
if isinstance(model_param, dict):
data_record = model_param["data_record"]
else:
data_record = model_param.data_record
record_list = {}
for key in key_list:
if key not in data_record:
continue
record_list[key] = data_record[key]
plt.plot(data_record["epoch"], record_list[key], label = key)
if log_scale:
plt.yscale('log')
plt.legend()
plt.show()
def softmax(X, axis = -1):
X_max = np.amax(X, axis, keepdims = True)
X = np.exp(X - X_max)
return X / X.sum(axis = axis, keepdims = True)
def manifold_embedding(X, color = None, all_methods = None):
from matplotlib import pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from time import time
if color is None:
color = np.ones(X.shape[0])
elif color == "linspace":
color = np.linspace(0, 1, X.shape[0])
if all_methods is None:
all_methods = ['standard', 'ltsa', 'hessian', 'modified', "Isomap", "MDS", "SpectralEmbedding", "t-SNE"]
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = len(X)
n_neighbors = 10
n_components = 2
marker_size = 1
cmap_scale = (np.min(color), np.max(color))
fig = plt.figure(figsize=(20, 15))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (n_points, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
cax = ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral, s = marker_size, vmin= cmap_scale[0], vmax=cmap_scale[1])
ax.view_init(4, -72)
cbar = fig.colorbar(cax, ticks=[cmap_scale[0], np.mean(cmap_scale), cmap_scale[1]]) # color bar
cbar.ax.set_yticklabels(['{0:.3f}'.format(cmap_scale[0]), '{0:.3f}'.format(np.mean(cmap_scale)), '{0:.3f}'.format(cmap_scale[1])])
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
if method in all_methods:
try:
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s = marker_size, vmin= cmap_scale[0], vmax=cmap_scale[1])
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
except:
print("method {0} failed!".format(method))
if "Isomap" in all_methods:
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s = marker_size, vmin= cmap_scale[0], vmax=cmap_scale[1])
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
if "MDS" in all_methods:
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s = marker_size, vmin= cmap_scale[0], vmax=cmap_scale[1])
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
if "SpectralEmbedding" in all_methods:
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s = marker_size, vmin= cmap_scale[0], vmax=cmap_scale[1])
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
if "t-SNE" in all_methods:
t0 = time()
tsne = manifold.TSNE(n_components=n_components, perplexity = 30, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s = marker_size, vmin= cmap_scale[0], vmax=cmap_scale[1])
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
def get_struct_str(struct_param):
def get_struct_str_ele(struct_param):
return "-".join(["{0}{1}".format(struct_param[k][0], struct_param[k][1][:2]) for k in range(len(struct_param))])
if isinstance(struct_param, tuple):
return ",".join([get_struct_str_ele(struct_param_ele) for struct_param_ele in struct_param])
else:
return get_struct_str_ele(struct_param)
def get_args(arg, arg_id = 1, type = "str"):
try:
get_ipython().run_line_magic('matplotlib', 'inline')
arg_return = arg
except:
import sys
try:
arg_return = sys.argv[arg_id]
if type == "int":
arg_return = int(arg_return)
elif type == "float":
arg_return = float(arg_return)
elif type == "bool":
arg_return = eval(arg_return)
elif type == "eval":
arg_return = eval(arg_return)
elif type == "tuple":
splitted = arg_return[1:-1].split(",")
List = []
for item in splitted:
try:
item = eval(item)
except:
pass
List.append(item)
arg_return = tuple(List)
elif type == "str":
pass
else:
raise Exception("type {0} not recognized!".format(type))
except:
raise
arg_return = arg
return arg_return
class Early_Stopping(object):
def __init__(self, patience = 100, epsilon = 0, mode = "min"):
self.patience = patience
self.epsilon = epsilon
self.mode = "min"
self.best_value = None
self.wait = 0
def monitor(self, value):
to_stop = False
if self.patience is not None:
if self.best_value is None:
self.best_value = value
self.wait = 0
else:
if (self.mode == "min" and value < self.best_value - self.epsilon) or \
(self.mode == "max" and value > self.best_value + self.epsilon):
self.best_value = value
self.wait = 0
else:
if self.wait >= self.patience:
to_stop = True
else:
self.wait += 1
return to_stop
def reset(self):
self.best_value = None
self.wait = 0
def get_highlight_fun(highlight_columns = None, mode = "min"):
"""For pandas dataframe, highlighting the min/max values in a column"""
def highlight(s):
if mode == "min":
if highlight_columns is None:
chosen = (s == s.min())
else:
chosen = (s == s.min()) & (s.name in highlight_columns)
elif mode == "max":
if highlight_columns is None:
chosen = (s == s.max())
else:
chosen = (s == s.max()) & (s.name in highlight_columns)
return ['background-color: darkorange' if v else '' for v in chosen]
return highlight
def get_int_str(start, end):
string = ""
for i in range(start, end + 1):
string += "{0} ".format(i)
return string
def new_dict(Dict, new_content_dict):
new_Dict = deepcopy(Dict)
new_Dict.update(new_content_dict)
return new_Dict
def base_repr(n, base, length):
assert n < base ** length, "n should be smaller than b ** length"
base_repr_str = np.base_repr(n, base, padding = length)[-length:]
return [int(ele) for ele in base_repr_str]
def base_repr_2_int(List, base):
if len(List) == 1:
return List[0]
elif len(List) == 0:
return 0
else:
return base * base_repr_2_int(List[:-1], base) + List[-1] | [
"numpy.ones",
"numpy.isnan",
"matplotlib.pylab.axis",
"numpy.base_repr",
"matplotlib.pylab.gcf",
"sklearn.manifold.LocallyLinearEmbedding",
"numpy.mean",
"matplotlib.pylab.suptitle",
"numpy.exp",
"numpy.arange",
"matplotlib.pylab.close",
"matplotlib.pylab.title",
"sklearn.manifold.MDS",
"o... | [((265, 301), 'numpy.zeros', 'np.zeros', (['(sample_size, num_classes)'], {}), '((sample_size, num_classes))\n', (273, 301), True, 'import numpy as np\n'), ((546, 570), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (555, 570), False, 'import h5py\n'), ((1181, 1208), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1191, 1208), True, 'from matplotlib import pylab as plt\n'), ((7326, 7336), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7334, 7336), True, 'from matplotlib import pylab as plt\n'), ((7520, 7540), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['input_'], {}), '(input_)\n', (7532, 7540), False, 'from scipy.stats import gaussian_kde\n'), ((7952, 7962), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7960, 7962), True, 'from matplotlib import pylab as plt\n'), ((10593, 10642), 'os.path.join', 'os.path.join', (['current_directory', 'filename[:index]'], {}), '(current_directory, filename[:index])\n', (10605, 10642), False, 'import os\n'), ((10657, 10680), 'os.path.isdir', 'os.path.isdir', (['dir_name'], {}), '(dir_name)\n', (10670, 10680), False, 'import os\n'), ((10742, 10783), 'os.path.join', 'os.path.join', (['current_directory', 'filename'], {}), '(current_directory, filename)\n', (10754, 10783), False, 'import os\n'), ((14535, 14567), 'scipy.stats.truncnorm.rvs', 'truncnorm.rvs', (['(-2)', '(2)'], {'size': 'shape'}), '(-2, 2, size=shape)\n', (14548, 14567), False, 'from scipy.stats import truncnorm\n'), ((15953, 15965), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (15963, 15965), True, 'from matplotlib import pylab as plt\n'), ((15970, 15980), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (15978, 15980), True, 'from matplotlib import pylab as plt\n'), ((16022, 16053), 'numpy.amax', 'np.amax', (['X', 'axis'], {'keepdims': '(True)'}), '(X, axis, keepdims=True)\n', (16029, 16053), True, 'import numpy as np\n'), ((16064, 16081), 'numpy.exp', 'np.exp', (['(X - X_max)'], {}), '(X - X_max)\n', (16070, 16081), True, 'import numpy as np\n'), ((16878, 16906), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (16888, 16906), True, 'from matplotlib import pylab as plt\n'), ((16911, 17016), 'matplotlib.pylab.suptitle', 'plt.suptitle', (["('Manifold Learning with %i points, %i neighbors' % (n_points, n_neighbors))"], {'fontsize': '(14)'}), "('Manifold Learning with %i points, %i neighbors' % (n_points,\n n_neighbors), fontsize=14)\n", (16923, 17016), True, 'from matplotlib import pylab as plt\n'), ((20886, 20896), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (20894, 20896), True, 'from matplotlib import pylab as plt\n'), ((24269, 24283), 'copy.deepcopy', 'deepcopy', (['Dict'], {}), '(Dict)\n', (24277, 24283), False, 'from copy import deepcopy\n'), ((3240, 3258), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3256, 3258), True, 'from matplotlib import pylab as plt\n'), ((3267, 3288), 'matplotlib.pylab.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3278, 3288), True, 'from matplotlib import pylab as plt\n'), ((3387, 3398), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (3396, 3398), True, 'from matplotlib import pylab as plt\n'), ((3417, 3427), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (3425, 3427), True, 'from matplotlib import pylab as plt\n'), ((6597, 6622), 'matplotlib.pylab.plot', 'plt.plot', (['x_axis', 'density'], {}), '(x_axis, density)\n', (6605, 6622), True, 'from matplotlib import pylab as plt\n'), ((7099, 7173), 'numpy.linspace', 'np.linspace', (['(value_min - 2 * sigma_value)', '(value_max + 2 * sigma_value)', '(100)'], {}), '(value_min - 2 * sigma_value, value_max + 2 * sigma_value, 100)\n', (7110, 7173), True, 'import numpy as np\n'), ((7297, 7321), 'matplotlib.pylab.plot', 'plt.plot', (['x_axis', 'y_axis'], {}), '(x_axis, y_axis)\n', (7305, 7321), True, 'from matplotlib import pylab as plt\n'), ((7562, 7576), 'numpy.min', 'np.min', (['input_'], {}), '(input_)\n', (7568, 7576), True, 'import numpy as np\n'), ((7578, 7592), 'numpy.max', 'np.max', (['input_'], {}), '(input_)\n', (7584, 7592), True, 'import numpy as np\n'), ((7821, 7835), 'matplotlib.pylab.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (7829, 7835), True, 'from matplotlib import pylab as plt\n'), ((7872, 7891), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (7882, 7891), True, 'from matplotlib import pylab as plt\n'), ((7928, 7947), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (7938, 7947), True, 'from matplotlib import pylab as plt\n'), ((10469, 10495), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (10485, 10495), False, 'import os\n'), ((10709, 10730), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (10720, 10730), False, 'import os\n'), ((12702, 12727), 'numpy.zeros', 'np.zeros', (['(columns, rows)'], {}), '((columns, rows))\n', (12710, 12727), True, 'import numpy as np\n'), ((15843, 15902), 'matplotlib.pylab.plot', 'plt.plot', (["data_record['epoch']", 'record_list[key]'], {'label': 'key'}), "(data_record['epoch'], record_list[key], label=key)\n", (15851, 15902), True, 'from matplotlib import pylab as plt\n'), ((15931, 15948), 'matplotlib.pylab.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (15941, 15948), True, 'from matplotlib import pylab as plt\n'), ((16425, 16444), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (16432, 16444), True, 'import numpy as np\n'), ((16837, 16850), 'numpy.min', 'np.min', (['color'], {}), '(color)\n', (16843, 16850), True, 'import numpy as np\n'), ((16852, 16865), 'numpy.max', 'np.max', (['color'], {}), '(color)\n', (16858, 16865), True, 'import numpy as np\n'), ((18572, 18578), 'time.time', 'time', ([], {}), '()\n', (18576, 18578), False, 'from time import time\n'), ((18664, 18670), 'time.time', 'time', ([], {}), '()\n', (18668, 18670), False, 'from time import time\n'), ((18759, 18878), 'matplotlib.pylab.scatter', 'plt.scatter', (['Y[:, 0]', 'Y[:, 1]'], {'c': 'color', 'cmap': 'plt.cm.Spectral', 's': 'marker_size', 'vmin': 'cmap_scale[0]', 'vmax': 'cmap_scale[1]'}), '(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=marker_size,\n vmin=cmap_scale[0], vmax=cmap_scale[1])\n', (18770, 18878), True, 'from matplotlib import pylab as plt\n'), ((18886, 18928), 'matplotlib.pylab.title', 'plt.title', (["('Isomap (%.2g sec)' % (t1 - t0))"], {}), "('Isomap (%.2g sec)' % (t1 - t0))\n", (18895, 18928), True, 'from matplotlib import pylab as plt\n'), ((19045, 19062), 'matplotlib.pylab.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (19053, 19062), True, 'from matplotlib import pylab as plt\n'), ((19106, 19112), 'time.time', 'time', ([], {}), '()\n', (19110, 19112), False, 'from time import time\n'), ((19127, 19177), 'sklearn.manifold.MDS', 'manifold.MDS', (['n_components'], {'max_iter': '(100)', 'n_init': '(1)'}), '(n_components, max_iter=100, n_init=1)\n', (19139, 19177), False, 'from sklearn import manifold\n'), ((19224, 19230), 'time.time', 'time', ([], {}), '()\n', (19228, 19230), False, 'from time import time\n'), ((19316, 19435), 'matplotlib.pylab.scatter', 'plt.scatter', (['Y[:, 0]', 'Y[:, 1]'], {'c': 'color', 'cmap': 'plt.cm.Spectral', 's': 'marker_size', 'vmin': 'cmap_scale[0]', 'vmax': 'cmap_scale[1]'}), '(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=marker_size,\n vmin=cmap_scale[0], vmax=cmap_scale[1])\n', (19327, 19435), True, 'from matplotlib import pylab as plt\n'), ((19443, 19482), 'matplotlib.pylab.title', 'plt.title', (["('MDS (%.2g sec)' % (t1 - t0))"], {}), "('MDS (%.2g sec)' % (t1 - t0))\n", (19452, 19482), True, 'from matplotlib import pylab as plt\n'), ((19599, 19616), 'matplotlib.pylab.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (19607, 19616), True, 'from matplotlib import pylab as plt\n'), ((19674, 19680), 'time.time', 'time', ([], {}), '()\n', (19678, 19680), False, 'from time import time\n'), ((19694, 19772), 'sklearn.manifold.SpectralEmbedding', 'manifold.SpectralEmbedding', ([], {'n_components': 'n_components', 'n_neighbors': 'n_neighbors'}), '(n_components=n_components, n_neighbors=n_neighbors)\n', (19720, 19772), False, 'from sklearn import manifold\n'), ((19858, 19864), 'time.time', 'time', ([], {}), '()\n', (19862, 19864), False, 'from time import time\n'), ((19964, 20083), 'matplotlib.pylab.scatter', 'plt.scatter', (['Y[:, 0]', 'Y[:, 1]'], {'c': 'color', 'cmap': 'plt.cm.Spectral', 's': 'marker_size', 'vmin': 'cmap_scale[0]', 'vmax': 'cmap_scale[1]'}), '(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=marker_size,\n vmin=cmap_scale[0], vmax=cmap_scale[1])\n', (19975, 20083), True, 'from matplotlib import pylab as plt\n'), ((20091, 20144), 'matplotlib.pylab.title', 'plt.title', (["('SpectralEmbedding (%.2g sec)' % (t1 - t0))"], {}), "('SpectralEmbedding (%.2g sec)' % (t1 - t0))\n", (20100, 20144), True, 'from matplotlib import pylab as plt\n'), ((20261, 20278), 'matplotlib.pylab.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (20269, 20278), True, 'from matplotlib import pylab as plt\n'), ((20324, 20330), 'time.time', 'time', ([], {}), '()\n', (20328, 20330), False, 'from time import time\n'), ((20346, 20433), 'sklearn.manifold.TSNE', 'manifold.TSNE', ([], {'n_components': 'n_components', 'perplexity': '(30)', 'init': '"""pca"""', 'random_state': '(0)'}), "(n_components=n_components, perplexity=30, init='pca',\n random_state=0)\n", (20359, 20433), False, 'from sklearn import manifold\n'), ((20479, 20485), 'time.time', 'time', ([], {}), '()\n', (20483, 20485), False, 'from time import time\n'), ((20578, 20697), 'matplotlib.pylab.scatter', 'plt.scatter', (['Y[:, 0]', 'Y[:, 1]'], {'c': 'color', 'cmap': 'plt.cm.Spectral', 's': 'marker_size', 'vmin': 'cmap_scale[0]', 'vmax': 'cmap_scale[1]'}), '(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=marker_size,\n vmin=cmap_scale[0], vmax=cmap_scale[1])\n', (20589, 20697), True, 'from matplotlib import pylab as plt\n'), ((20705, 20746), 'matplotlib.pylab.title', 'plt.title', (["('t-SNE (%.2g sec)' % (t1 - t0))"], {}), "('t-SNE (%.2g sec)' % (t1 - t0))\n", (20714, 20746), True, 'from matplotlib import pylab as plt\n'), ((20863, 20880), 'matplotlib.pylab.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (20871, 20880), True, 'from matplotlib import pylab as plt\n'), ((24466, 24503), 'numpy.base_repr', 'np.base_repr', (['n', 'base'], {'padding': 'length'}), '(n, base, padding=length)\n', (24478, 24503), True, 'import numpy as np\n'), ((321, 343), 'numpy.arange', 'np.arange', (['sample_size'], {}), '(sample_size)\n', (330, 343), True, 'import numpy as np\n'), ((1230, 1239), 'matplotlib.pylab.gcf', 'plt.gcf', ([], {}), '()\n', (1237, 1239), True, 'from matplotlib import pylab as plt\n'), ((1589, 1610), 'numpy.array', 'np.array', (['matrix_list'], {}), '(matrix_list)\n', (1597, 1610), True, 'import numpy as np\n'), ((2145, 2169), 'numpy.expand_dims', 'np.expand_dims', (['image', '(1)'], {}), '(image, 1)\n', (2159, 2169), True, 'import numpy as np\n'), ((2484, 2517), 'numpy.ma.array', 'np.ma.array', (['image'], {'mask': 'mask_key'}), '(image, mask=mask_key)\n', (2495, 2517), True, 'import numpy as np\n'), ((3080, 3098), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (3090, 3098), True, 'from matplotlib import pylab as plt\n'), ((3151, 3163), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3159, 3163), True, 'import numpy as np\n'), ((3184, 3196), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3192, 3196), True, 'import numpy as np\n'), ((10867, 10892), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (10882, 10892), False, 'import os\n'), ((15215, 15250), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['gradient_shape'], {}), '(gradient_shape)\n', (15234, 15250), True, 'import tensorflow as tf\n'), ((16491, 16520), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'X.shape[0]'], {}), '(0, 1, X.shape[0])\n', (16502, 16520), True, 'import numpy as np\n'), ((18966, 18981), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (18979, 18981), False, 'from matplotlib.ticker import NullFormatter\n'), ((19020, 19035), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (19033, 19035), False, 'from matplotlib.ticker import NullFormatter\n'), ((19520, 19535), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (19533, 19535), False, 'from matplotlib.ticker import NullFormatter\n'), ((19574, 19589), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (19587, 19589), False, 'from matplotlib.ticker import NullFormatter\n'), ((20182, 20197), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (20195, 20197), False, 'from matplotlib.ticker import NullFormatter\n'), ((20236, 20251), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (20249, 20251), False, 'from matplotlib.ticker import NullFormatter\n'), ((20784, 20799), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (20797, 20799), False, 'from matplotlib.ticker import NullFormatter\n'), ((20838, 20853), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (20851, 20853), False, 'from matplotlib.ticker import NullFormatter\n'), ((1501, 1522), 'numpy.array', 'np.array', (['matrix_list'], {}), '(matrix_list)\n', (1509, 1522), True, 'import numpy as np\n'), ((1825, 1839), 'numpy.min', 'np.min', (['matrix'], {}), '(matrix)\n', (1831, 1839), True, 'import numpy as np\n'), ((1880, 1894), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (1886, 1894), True, 'import numpy as np\n'), ((2342, 2357), 'numpy.isnan', 'np.isnan', (['image'], {}), '(image)\n', (2350, 2357), True, 'import numpy as np\n'), ((2387, 2402), 'numpy.isinf', 'np.isinf', (['image'], {}), '(image)\n', (2395, 2402), True, 'import numpy as np\n'), ((2434, 2452), 'numpy.array', 'np.array', (['mask_key'], {}), '(mask_key)\n', (2442, 2452), True, 'import numpy as np\n'), ((2909, 2922), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (2915, 2922), True, 'import numpy as np\n'), ((2924, 2937), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (2930, 2937), True, 'import numpy as np\n'), ((7638, 7652), 'numpy.max', 'np.max', (['input_'], {}), '(input_)\n', (7644, 7652), True, 'import numpy as np\n'), ((7655, 7669), 'numpy.max', 'np.max', (['input_'], {}), '(input_)\n', (7661, 7669), True, 'import numpy as np\n'), ((10957, 10982), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (10972, 10982), False, 'import os\n'), ((11022, 11047), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (11037, 11047), False, 'import os\n'), ((17291, 17310), 'numpy.mean', 'np.mean', (['cmap_scale'], {}), '(cmap_scale)\n', (17298, 17310), True, 'import numpy as np\n'), ((17419, 17438), 'numpy.mean', 'np.mean', (['cmap_scale'], {}), '(cmap_scale)\n', (17426, 17438), True, 'import numpy as np\n'), ((17708, 17714), 'time.time', 'time', ([], {}), '()\n', (17712, 17714), False, 'from time import time\n'), ((17972, 17978), 'time.time', 'time', ([], {}), '()\n', (17976, 17978), False, 'from time import time\n'), ((18104, 18223), 'matplotlib.pylab.scatter', 'plt.scatter', (['Y[:, 0]', 'Y[:, 1]'], {'c': 'color', 'cmap': 'plt.cm.Spectral', 's': 'marker_size', 'vmin': 'cmap_scale[0]', 'vmax': 'cmap_scale[1]'}), '(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral, s=marker_size,\n vmin=cmap_scale[0], vmax=cmap_scale[1])\n', (18115, 18223), True, 'from matplotlib import pylab as plt\n'), ((18239, 18288), 'matplotlib.pylab.title', 'plt.title', (["('%s (%.2g sec)' % (labels[i], t1 - t0))"], {}), "('%s (%.2g sec)' % (labels[i], t1 - t0))\n", (18248, 18288), True, 'from matplotlib import pylab as plt\n'), ((18429, 18446), 'matplotlib.pylab.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (18437, 18446), True, 'from matplotlib import pylab as plt\n'), ((18591, 18633), 'sklearn.manifold.Isomap', 'manifold.Isomap', (['n_neighbors', 'n_components'], {}), '(n_neighbors, n_components)\n', (18606, 18633), False, 'from sklearn import manifold\n'), ((345, 367), 'numpy.array', 'np.array', (['labels_dense'], {}), '(labels_dense)\n', (353, 367), True, 'import numpy as np\n'), ((6901, 6960), 'numpy.exp', 'np.exp', (['(-(x - weight_flatten) ** 2 / (2 * sigma_value ** 2))'], {}), '(-(x - weight_flatten) ** 2 / (2 * sigma_value ** 2))\n', (6907, 6960), True, 'import numpy as np\n'), ((18334, 18349), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (18347, 18349), False, 'from matplotlib.ticker import NullFormatter\n'), ((18396, 18411), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (18409, 18411), False, 'from matplotlib.ticker import NullFormatter\n'), ((17735, 17834), 'sklearn.manifold.LocallyLinearEmbedding', 'manifold.LocallyLinearEmbedding', (['n_neighbors', 'n_components'], {'eigen_solver': '"""auto"""', 'method': 'method'}), "(n_neighbors, n_components, eigen_solver=\n 'auto', method=method)\n", (17766, 17834), False, 'from sklearn import manifold\n'), ((6843, 6858), 'tensorflow.size', 'tf.size', (['weight'], {}), '(weight)\n', (6850, 6858), True, 'import tensorflow as tf\n'), ((13791, 13806), 'copy.deepcopy', 'deepcopy', (['list1'], {}), '(list1)\n', (13799, 13806), False, 'from copy import deepcopy\n'), ((13808, 13823), 'copy.deepcopy', 'deepcopy', (['list2'], {}), '(list2)\n', (13816, 13823), False, 'from copy import deepcopy\n'), ((13830, 13843), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (13840, 13843), False, 'from operator import itemgetter\n'), ((13930, 13945), 'copy.deepcopy', 'deepcopy', (['list1'], {}), '(list1)\n', (13938, 13945), False, 'from copy import deepcopy\n'), ((13947, 13962), 'copy.deepcopy', 'deepcopy', (['list2'], {}), '(list2)\n', (13955, 13962), False, 'from copy import deepcopy\n'), ((13969, 13982), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (13979, 13982), False, 'from operator import itemgetter\n')] |
#!/usr/bin/python
import os
import sys
import time
import math
import config.config_read as rsd
import config.config_init as cfg
import config.ColorPrompt as CP
import services.tools as tls
import io_data.data_acces_file as daf
import interface.inline_print as iprint
import services.process as prc
import plot.plot_data as plot_data
import plot.plot_3D_ROI as plot_3D_data
from models.HippModel import HippModel
from PIL import Image
from sys import getsizeof
import scipy.misc
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
#------------------------------------------------------------------------------------------
# Function: generate_lists() generates and saves list of data of MRI with augmentation
# (tain, Valid and Test)
#------------------------------------------------------------------------------------------
def generate_lists(data_params):
list_data = []
file_path = data_params['adni_data_des'] + tls.get_convention_name(data_params) + '/List_data.pkl'
adni_out = generate_lists_from_adni_dataset(data_params)
list_data.append(adni_out)
daf.save_lists_to_file(path_file=file_path, data_list=list_data)
#------------------------------------------------------------------------------------------
# Function: split data by using class name (from txt file)
#------------------------------------------------------------------------------------------
def split_classses_data(liste):
liste_AD = []
liste_MCI = []
liste_NC = []
for item in liste:
if 'AD' in item.split(':')[1]:
liste_AD.append(item.split(':')[0])
for item in liste:
if 'MCI' in item.split(':')[1]:
liste_MCI.append(item.split(':')[0])
for item in liste:
if 'NC' in item.split(':')[1]:
liste_NC.append(item.split(':')[0])
return liste_AD, liste_MCI, liste_NC
#------------------------------------------------------------------------------------------
# Function: generates lists from ADNI folder dataset
#------------------------------------------------------------------------------------------
def get_subjects_with_classes(data_params):
AD, MCI, NC = split_classses_data(daf.read_data_file(str(data_params['adni_1_classes'])))
time.sleep(1)
return [AD, MCI, NC]
#------------------------------------------------------------------------------------------
# Function: generates lists from ADNI folder dataset
#------------------------------------------------------------------------------------------
def generate_lists_from_adni_dataset(data_params, shuffle_data=False, debug=False):
stage_classes = ['AD', 'MCI', 'NC']
max_blur = float(data_params['sigma'])
max_shift = int(data_params['shift'])
default_augm = (0, 0, 0, 0.0)
adni1_list = get_subjects_with_classes(data_params)
adni1_size = {'AD': len(adni1_list[0]), 'MCI': len(adni1_list[1]), 'NC': len(adni1_list[2])}
adni_1_labels = {'AD': adni1_list[0], 'MCI': adni1_list[1], 'NC': adni1_list[2]}
adni_1_dirs_root = {k: [data_params['adni_1_brain_data'] + '/' + i for i in adni_1_labels[k]] for k in stage_classes}
# Test statement for spliting data (check "SPLIT_SET_PARAMS" in config file)
if(data_params['static_split']):
test_selected_size = {k: int(data_params['select_test'][k]) for k in stage_classes}
valid_selected_size = {k: int(data_params['select_valid'][k]) for k in stage_classes}
else:
test_selected_size = {k: int(data_params['select_test'][k]) for k in stage_classes}
valid_selected_size = {k: int(math.ceil(int(adni1_size[k]) * 20) / 100.0) for k in stage_classes}
train_selected_size = {k: adni1_size[k] - valid_selected_size[k] - test_selected_size[k] for k in stage_classes}
# split checked
adni_1_test = {k: adni_1_dirs_root[k][:int(test_selected_size[k])] for k in stage_classes}
adni_1_valid = {k: adni_1_dirs_root[k][int(test_selected_size[k]):int(test_selected_size[k]) + int(valid_selected_size[k])] for k in stage_classes}
adni_1_train = {k: adni_1_dirs_root[k][int(test_selected_size[k]) + int(valid_selected_size[k]): int(test_selected_size[k]) + int(valid_selected_size[k]) + int(train_selected_size[k])] for k in stage_classes}
adni_1_train_size_balanced = int(max(train_selected_size.values()) * int(data_params['factor']))
adni_1_valid_size_balanced = int(max(valid_selected_size.values()) * int(data_params['factor']))
if data_params['augm_test']:
adni_1_test_size = int(max(test_selected_size.values()) * int(data_params['factor']))
else:
adni_1_test_size = int(min(test_selected_size.values()))
adni_1_train_size_print = adni_1_train_size_balanced
adni_1_valid_size_print = adni_1_valid_size_balanced
adni_1_test_size_print = adni_1_test_size
if data_params['flip']:
adni_1_train_size_print = adni_1_train_size_balanced * 2
adni_1_valid_size_print = adni_1_valid_size_balanced * 2
adni_1_test_size_print = adni_1_test_size * 2
print('\n--------------------------------------------------------------------------')
print('* [' + CP.fg.YELLOW + 'train'+ CP.fg.WHITE + '] data will be augmented to {} samples by each class'.format(adni_1_train_size_print))
print('* [' + CP.fg.YELLOW + 'valid'+ CP.fg.WHITE + '] data will be augmented to {} samples by each class'.format(adni_1_valid_size_print))
print('* [' + CP.fg.YELLOW + 'test' + CP.fg.WHITE + '] data will be augmented to {} samples by each class'.format(adni_1_test_size_print))
print('--------------------------------------------------------------------------\n')
# print table of data augmentation
iprint.print_augmentation_table([
[int(train_selected_size['AD']), int(train_selected_size['MCI']), int(train_selected_size['NC']), adni_1_train_size_print],
[int(valid_selected_size['AD']), int(valid_selected_size['MCI']), int(valid_selected_size['NC']), adni_1_valid_size_print],
[int(test_selected_size['AD']), int(test_selected_size['MCI']), int(test_selected_size['NC']), adni_1_test_size_print]])
adni_1_train_lists_out = []
adni_1_valid_lists_out = []
adni_1_test_lists_out = []
for k in stage_classes:
adni_1_test_lists = [[k, i + '/MRI/'] for i in adni_1_test[k]]
adni_1_valid_lists = [[k, i + '/MRI/'] for i in adni_1_valid[k]]
adni_1_train_lists = [[k, i + '/MRI/'] for i in adni_1_train[k]]
adni_1_test_lists_out += tls.generate_augm_lists_v2(adni_1_test_lists, None, None, None, default_augm_params=default_augm)
adni_1_valid_lists_out += tls.generate_augm_lists_v2(adni_1_valid_lists, adni_1_valid_size_balanced, max_blur, max_shift, default_augm_params=default_augm)
adni_1_train_lists_out += tls.generate_augm_lists_v2(adni_1_train_lists, adni_1_train_size_balanced, max_blur, max_shift, default_augm_params=default_augm)
if shuffle_data:
rnd.shuffle(adni_1_train_lists_out)
rnd.shuffle(adni_1_valid_lists_out)
if debug:
print ('########################### MRI ##########################')
print('### train lists (%d instances):' % len(adni_1_train_lists_out))
for i in adni_1_train_lists_out:
print(i)
# ########################
time.sleep(3)
# ########################
print('### valid lists (%d instances):' % len(adni_1_valid_lists_out))
for i in adni_1_valid_lists_out:
print(i)
# ########################
time.sleep(3)
# ########################
print('### test lists (%d instances):' % len(adni_1_test_lists_out))
for i in adni_1_test_lists_out:
print(i)
# ########################
time.sleep(3)
print(len(adni_1_train_lists_out))
print(len(adni_1_valid_lists_out))
print(len(adni_1_test_lists_out))
# ########################
time.sleep(3)
# ########################
return [adni_1_train_lists_out, adni_1_valid_lists_out, adni_1_test_lists_out]
#------------------------------------------------------------------------------------------
# Function: generate Data from lists
# -> data_params: dict of parameters
# -> selected_label: if you want only generate a specific binary classification
# example: selected_label="AD_NC" or =None
#------------------------------------------------------------------------------------------
def generate_data_from_lists(data_params, selected_label=None):
file_path = data_params['adni_data_des'] + tls.get_convention_name(data_params) + '/List_data.pkl'
data_list = daf.read_lists_from_file(file_path)
adni_1_in = data_list[0]
lists_with_names = zip([adni_1_in[0], adni_1_in[1], adni_1_in[2]], ['alz_ADNI_1_train', 'alz_ADNI_1_valid', 'alz_ADNI_1_test'])
time.sleep(1)
generate_data_from_selected_dataset(data_params, lists_with_names, selected_label)
#------------------------------------------------------------------------------------------
# Function: generate Data from lists
#------------------------------------------------------------------------------------------
def generate_data_from_selected_dataset(data_params, lists_with_names, selected_label=None, create_binary_data=True):
print(CP.style.BRIGHT + CP.fg.GREEN + "\n--------------------------------------------------------------------------")
print(" $ [{} - {} ROI(s)] is selected ... ".format(data_params['3D_or_2D'] , data_params['ROI_list'][data_params['ROI_selection']]))
print("--------------------------------------------------------------------------\n" + CP.fg.WHITE + CP.style.RESET_ALL)
if create_binary_data:
queue = []
if (selected_label is None): # create All Data
for (lst, name) in lists_with_names:
bin_groups = tls.split_lists_to_binary_groups(lst)
for k in bin_groups:
label_code = rsd.get_label_binary_codes()[k]
queue.append((bin_groups[k], name + '_' + k, label_code))
for (l, n, c) in queue:
generate_data(data_params, l, n, c)
else:
print("Create lmdbs for : {} ".format(selected_label))
for (lst, name) in lists_with_names:
bin_groups = tls.split_lists_to_binary_groups(lst)
for k in bin_groups:
label_code = rsd.get_label_binary_codes()[k]
queue.append((bin_groups[k], name + '_' + k, label_code))
for (l, n, c) in queue:
for slt in tls.generate_selected_label_list(selected_label):
if n == slt:
generate_data(data_params, l, n, c)
else:
print("create 3 way Data") # extensible for future
#------------------------------------------------------------------------------------------
# generate Data (2D slices patches or 3D Volumes)
#------------------------------------------------------------------------------------------
def generate_data(data_params, lst, data_name, label_code):
if data_params['3D_or_2D'] == '2D':
generate_2D_data(data_params, lst, data_name, label_code)
else:
generate_3D_data(data_params, lst, data_name, label_code)
#######################################################################################################
# 2D extracting process
#######################################################################################################
def generate_2D_data(data_params, lst, data_name, label_code):
print(CP.style.BRIGHT + CP.fg.MAGENTA + "--------------------------------------------------------------------------")
print("> Selected Data: {} for {} - Data size : {}".format(str(data_name).split('_')[3].capitalize(), str(data_name).split('_')[4], len(lst)))
print("--------------------------------------------------------------------------\n" + CP.fg.WHITE + CP.style.RESET_ALL)
process_extracting_2D_data(data_params, lst, data_name, label_code, indice_ROI=data_params['ROI_list'][data_params['ROI_selection']])
#######################################################################################################
# 3D extracting process
#######################################################################################################
def generate_3D_data(data_params, lst, data_name, label_code):
print(CP.style.BRIGHT + CP.fg.MAGENTA + "--------------------------------------------------------------------------")
print("> {} Data for [{}] & Data length: [{}]".format(str(data_name).split('_')[3].capitalize(), str(data_name).split('_')[4], len(lst)))
print("--------------------------------------------------------------------------\n" + CP.fg.WHITE + CP.style.RESET_ALL)
process_extracting_3D_data(data_params, lst, data_name, label_code, indice_ROI=data_params['ROI_list'][data_params['ROI_selection']])
#------------------------------------------------------------------------------------------
# 2D extracting
#------------------------------------------------------------------------------------------
def process_extracting_2D_data(data_params, lst, data_name, label_code, indice_ROI):
# To do
pass
#------------------------------------------------------------------------------------------
# 3D extracting
#------------------------------------------------------------------------------------------
def process_extracting_3D_data(data_params, lst, data_name, label_code, indice_ROI):
if("HIPP" in indice_ROI):
l, r = tls.get_dimensions_cubes_HIPP(data_params) # exctract only Hippocampus ROI
elif ("PPC" in indice_ROI):
l, r = tls.get_dimensions_cubes_PPC(data_params) # exctract only Posterior PC ROI
else:
# compute both ROIs (in future)
pass
# get dimensions from the selected ROI (max - min)
names = ['sag', 'cor', 'axi']
list_cord_l = [int(l[i+1] - l[i]) for i in range(0, 6, 2)]
list_cord_r = [int(r[i+1] - r[i]) for i in range(0, 6, 2)]
# compute the indexes for selected slices
neighbors = int(data_params['neighbors']) # used for how many of slices we will select
sag_l, cor_l, axi_l = [[(int(i/2) - neighbors), (int(i/2)+ neighbors + 1)] for i in { "l_" + str(names[j]) : list_cord_l[j] for j in range(len(list_cord_l))}.values()]
sag_r, cor_r, axi_r = [[(int(i/2) - neighbors), (int(i/2)+ neighbors + 1)] for i in { "r_" + str(names[j]) : list_cord_r[j] for j in range(len(list_cord_r))}.values()]
data_selection = str(str(data_name).split('_')[1]).upper() + '_' + str(str(data_name).split('_')[2]).upper()
data_set = str(data_name).split('_')[3]
binary_label = str(data_name).split('_')[4]
target_path = data_params['adni_data_des'] + tls.get_convention_name(data_params) + '/' +indice_ROI + "/3D/"
data_size = 0
key = 0
for input_line in lst:
#-----------------------------------------------------------------------------------------------------------------------
# Mean ROI (L & R)
# data_roi_mean = prc.process_mean_hippocampus(input_line, data_params) # mean cube
# cross mean between cubes (in future)
# return computed cubes ROIs Left and Right
#-----------------------------------------------------------------------------------------------------------------------
data_roi_left, data_roi_right = prc.process_cube_HIPP(input_line, data_params) # left, right cube
# [ID, Date, Class, Age, Sex, MMSE, GDS, CDR] # meta-data
subject_ID = str(input_line[1]).split('/')[7]
meta_data = tls.get_meta_data_xml(data_params, subject_ID)
# print(meta_data, binary_label, data_set, label_code[input_line[0]])
model_object_normal = HippModel(data_roi_left, data_roi_right, meta_data, int(label_code[input_line[0]]))
data_size += getsizeof(model_object_normal)
model_abs_normal_path = target_path + binary_label + '/' + str(data_set) + '/' + str(input_line[0]) + '/' + str(key) + str('_' + indice_ROI + '_').upper() + data_name +'_'+ subject_ID + '_['+ str(input_line[0]) + ']' + str('_normal') + '.pkl'
# store model data
daf.save_model(model_object_normal, model_abs_normal_path)
if data_params['flip']:
# Fliped Felt & Right ROI
data_roi_left_flip = prc.flip_3d(data_roi_left)
data_roi_right_flip = prc.flip_3d(data_roi_right)
#cross fliped model
model_object_fliped = HippModel(data_roi_right_flip, data_roi_left_flip, meta_data, int(label_code[input_line[0]]))
data_size += getsizeof(model_object_fliped)
model_abs_fliped_path = target_path + binary_label + '/' + str(data_set) + '/' + str(input_line[0]) + '/' + str(key) + str('_' + indice_ROI + '_').upper() + data_name +'_'+ subject_ID + '_['+ str(input_line[0]) + ']' + str('_fliped') + '.pkl'
# store data model
daf.save_model(model_object_fliped, model_abs_fliped_path)
key += 1
# Progress of computation
print(CP.bg.RED + CP.style.BRIGHT + " {} % percent complete of 100% ".format(round(key/len(lst)*100, 2)) + " " + CP.style.RESET_ALL + CP.bg.RESET, end='\r')
#==========================================================================================================================
print("\n", end='\r')
print(CP.style.BRIGHT + "\n>> Data Size is: {} Mb -> {} Gb\n".format(round((data_size/1024) * 6.43, 2), round(((data_size/1024) * 6.43)/1024, 2)) + CP.style.RESET_ALL)
| [
"io_data.data_acces_file.save_lists_to_file",
"services.process.flip_3d",
"services.process.process_cube_HIPP",
"config.config_read.get_label_binary_codes",
"services.tools.get_convention_name",
"services.tools.split_lists_to_binary_groups",
"services.tools.get_dimensions_cubes_PPC",
"io_data.data_acc... | [((1111, 1175), 'io_data.data_acces_file.save_lists_to_file', 'daf.save_lists_to_file', ([], {'path_file': 'file_path', 'data_list': 'list_data'}), '(path_file=file_path, data_list=list_data)\n', (1133, 1175), True, 'import io_data.data_acces_file as daf\n'), ((2264, 2277), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2274, 2277), False, 'import time\n'), ((8822, 8857), 'io_data.data_acces_file.read_lists_from_file', 'daf.read_lists_from_file', (['file_path'], {}), '(file_path)\n', (8846, 8857), True, 'import io_data.data_acces_file as daf\n'), ((9023, 9036), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9033, 9036), False, 'import time\n'), ((6535, 6636), 'services.tools.generate_augm_lists_v2', 'tls.generate_augm_lists_v2', (['adni_1_test_lists', 'None', 'None', 'None'], {'default_augm_params': 'default_augm'}), '(adni_1_test_lists, None, None, None,\n default_augm_params=default_augm)\n', (6561, 6636), True, 'import services.tools as tls\n'), ((6667, 6800), 'services.tools.generate_augm_lists_v2', 'tls.generate_augm_lists_v2', (['adni_1_valid_lists', 'adni_1_valid_size_balanced', 'max_blur', 'max_shift'], {'default_augm_params': 'default_augm'}), '(adni_1_valid_lists, adni_1_valid_size_balanced,\n max_blur, max_shift, default_augm_params=default_augm)\n', (6693, 6800), True, 'import services.tools as tls\n'), ((6831, 6964), 'services.tools.generate_augm_lists_v2', 'tls.generate_augm_lists_v2', (['adni_1_train_lists', 'adni_1_train_size_balanced', 'max_blur', 'max_shift'], {'default_augm_params': 'default_augm'}), '(adni_1_train_lists, adni_1_train_size_balanced,\n max_blur, max_shift, default_augm_params=default_augm)\n', (6857, 6964), True, 'import services.tools as tls\n'), ((13817, 13859), 'services.tools.get_dimensions_cubes_HIPP', 'tls.get_dimensions_cubes_HIPP', (['data_params'], {}), '(data_params)\n', (13846, 13859), True, 'import services.tools as tls\n'), ((15705, 15751), 'services.process.process_cube_HIPP', 'prc.process_cube_HIPP', (['input_line', 'data_params'], {}), '(input_line, data_params)\n', (15726, 15751), True, 'import services.process as prc\n'), ((15946, 15992), 'services.tools.get_meta_data_xml', 'tls.get_meta_data_xml', (['data_params', 'subject_ID'], {}), '(data_params, subject_ID)\n', (15967, 15992), True, 'import services.tools as tls\n'), ((16207, 16237), 'sys.getsizeof', 'getsizeof', (['model_object_normal'], {}), '(model_object_normal)\n', (16216, 16237), False, 'from sys import getsizeof\n'), ((16524, 16582), 'io_data.data_acces_file.save_model', 'daf.save_model', (['model_object_normal', 'model_abs_normal_path'], {}), '(model_object_normal, model_abs_normal_path)\n', (16538, 16582), True, 'import io_data.data_acces_file as daf\n'), ((959, 995), 'services.tools.get_convention_name', 'tls.get_convention_name', (['data_params'], {}), '(data_params)\n', (982, 995), True, 'import services.tools as tls\n'), ((7009, 7044), 'numpy.random.shuffle', 'rnd.shuffle', (['adni_1_train_lists_out'], {}), '(adni_1_train_lists_out)\n', (7020, 7044), True, 'import numpy.random as rnd\n'), ((7057, 7092), 'numpy.random.shuffle', 'rnd.shuffle', (['adni_1_valid_lists_out'], {}), '(adni_1_valid_lists_out)\n', (7068, 7092), True, 'import numpy.random as rnd\n'), ((7397, 7410), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (7407, 7410), False, 'import time\n'), ((7654, 7667), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (7664, 7667), False, 'import time\n'), ((7908, 7921), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (7918, 7921), False, 'import time\n'), ((8113, 8126), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (8123, 8126), False, 'import time\n'), ((8750, 8786), 'services.tools.get_convention_name', 'tls.get_convention_name', (['data_params'], {}), '(data_params)\n', (8773, 8786), True, 'import services.tools as tls\n'), ((13939, 13980), 'services.tools.get_dimensions_cubes_PPC', 'tls.get_dimensions_cubes_PPC', (['data_params'], {}), '(data_params)\n', (13967, 13980), True, 'import services.tools as tls\n'), ((16695, 16721), 'services.process.flip_3d', 'prc.flip_3d', (['data_roi_left'], {}), '(data_roi_left)\n', (16706, 16721), True, 'import services.process as prc\n'), ((16756, 16783), 'services.process.flip_3d', 'prc.flip_3d', (['data_roi_right'], {}), '(data_roi_right)\n', (16767, 16783), True, 'import services.process as prc\n'), ((17007, 17037), 'sys.getsizeof', 'getsizeof', (['model_object_fliped'], {}), '(model_object_fliped)\n', (17016, 17037), False, 'from sys import getsizeof\n'), ((17356, 17414), 'io_data.data_acces_file.save_model', 'daf.save_model', (['model_object_fliped', 'model_abs_fliped_path'], {}), '(model_object_fliped, model_abs_fliped_path)\n', (17370, 17414), True, 'import io_data.data_acces_file as daf\n'), ((10046, 10083), 'services.tools.split_lists_to_binary_groups', 'tls.split_lists_to_binary_groups', (['lst'], {}), '(lst)\n', (10078, 10083), True, 'import services.tools as tls\n'), ((10531, 10568), 'services.tools.split_lists_to_binary_groups', 'tls.split_lists_to_binary_groups', (['lst'], {}), '(lst)\n', (10563, 10568), True, 'import services.tools as tls\n'), ((10812, 10860), 'services.tools.generate_selected_label_list', 'tls.generate_selected_label_list', (['selected_label'], {}), '(selected_label)\n', (10844, 10860), True, 'import services.tools as tls\n'), ((15035, 15071), 'services.tools.get_convention_name', 'tls.get_convention_name', (['data_params'], {}), '(data_params)\n', (15058, 15071), True, 'import services.tools as tls\n'), ((10174, 10202), 'config.config_read.get_label_binary_codes', 'rsd.get_label_binary_codes', ([], {}), '()\n', (10200, 10202), True, 'import config.config_read as rsd\n'), ((10639, 10667), 'config.config_read.get_label_binary_codes', 'rsd.get_label_binary_codes', ([], {}), '()\n', (10665, 10667), True, 'import config.config_read as rsd\n')] |
import sys, os
import numpy as np
import math
sys.path.insert (0, '/home/tensor/aa_dpe_emulate/include/')
sys.path.insert (0, '/home/tensor/aa_dpe_emulate/src/')
from data_convert import *
from instrn_proto import *
from tile_instrn_proto import *
dict_temp = {}
dict_list = []
i_temp = i_receive(mem_addr=768, vtile_id=0, receive_width=16, counter=2, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_receive(mem_addr=896, vtile_id=0, receive_width=16, counter=2, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_receive(mem_addr=1024, vtile_id=0, receive_width=16, counter=2, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_receive(mem_addr=1152, vtile_id=0, receive_width=16, counter=2, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_receive(mem_addr=1280, vtile_id=0, receive_width=16, counter=2, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_receive(mem_addr=1408, vtile_id=0, receive_width=16, counter=2, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_receive(mem_addr=1536, vtile_id=0, receive_width=16, counter=2, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_receive(mem_addr=1664, vtile_id=0, receive_width=16, counter=2, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_send(mem_addr=1920, vtile_id=2, send_width=16, target_addr=1, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_send(mem_addr=1792, vtile_id=2, send_width=16, target_addr=1, vec=8)
dict_list.append(i_temp.copy())
i_temp = i_halt()
dict_list.append(i_temp.copy())
filename = 'large/tile2/tile_imem.npy'
np.save(filename, dict_list)
| [
"numpy.save",
"sys.path.insert"
] | [((47, 105), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/tensor/aa_dpe_emulate/include/"""'], {}), "(0, '/home/tensor/aa_dpe_emulate/include/')\n", (62, 105), False, 'import sys, os\n'), ((107, 161), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/tensor/aa_dpe_emulate/src/"""'], {}), "(0, '/home/tensor/aa_dpe_emulate/src/')\n", (122, 161), False, 'import sys, os\n'), ((1502, 1530), 'numpy.save', 'np.save', (['filename', 'dict_list'], {}), '(filename, dict_list)\n', (1509, 1530), True, 'import numpy as np\n')] |
import math
import os
import random
import albumentations as A
import cv2
import dlib
import numpy as np
import skimage
from albumentations import DualTransform
from albumentations.pytorch import ToTensorV2
from scipy.ndimage import binary_dilation
from skimage import measure, draw
from config import BASE_DIR
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(os.path.join(BASE_DIR, 'libs', 'shape_predictor_68_face_landmarks.dat'))
def prepare_bit_masks(mask):
h, w = mask.shape
mid_w = w // 2
mid_h = w // 2
masks = []
ones = np.ones_like(mask)
ones[:mid_h] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[mid_h:] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[:, :mid_w] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[:, mid_w:] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[:mid_h, :mid_w] = 0
ones[mid_h:, mid_w:] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[:mid_h, mid_w:] = 0
ones[mid_h:, :mid_w] = 0
masks.append(ones)
return masks
def blackout_convex_hull(img, mask):
try:
rect = detector(img)[0]
sp = predictor(img, rect)
landmarks = np.array([[p.x, p.y] for p in sp.parts()])
outline = landmarks[[*range(17), *range(26, 16, -1)]]
Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0])
cropped_img = np.zeros(img.shape[:2], dtype=np.uint8)
cropped_img[Y, X] = 1
y, x = measure.centroid(cropped_img)
y = int(y)
x = int(x)
first = random.random() > 0.5
if random.random() > 0.5:
if first:
cropped_img[:y, :] = 0
else:
cropped_img[y:, :] = 0
else:
if first:
cropped_img[:, :x] = 0
else:
cropped_img[:, x:] = 0
img[cropped_img > 0] = 0
mask[cropped_img > 0] = 0
except Exception as e:
pass
def drop_background(img, mask):
try:
rect = detector(img)[0]
sp = predictor(img, rect)
landmarks = np.array([[p.x, p.y] for p in sp.parts()])
outline = landmarks[[*range(17), *range(26, 16, -1)]]
Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0])
cropped_img = np.zeros(img.shape[:2], dtype=np.uint8)
cropped_img[Y, X] = 1
img[cropped_img == 0] = 0
mask[cropped_img == 0] = 0
except Exception as e:
pass
def blend_back(img_ori, img, mask_ori, mask):
img_ori[img > 50] = img[img > 50]
mask_ori[mask > 0] = mask[mask > 0]
return img_ori, mask_ori
def dist(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def remove_eyes(image, mask, landmarks):
image = image.copy()
(x1, y1), (x2, y2) = landmarks[:2]
shadow = np.zeros_like(image[..., 0])
line = cv2.line(shadow, (x1, y1), (x2, y2), color=1, thickness=2)
w = dist((x1, y1), (x2, y2))
dilation = int(w // 4)
line = binary_dilation(line, iterations=dilation)
image[line, :] = 0
mask[line] = 0
return image, mask
def remove_nose(image, mask, landmarks):
image = image.copy()
(x1, y1), (x2, y2) = landmarks[:2]
x3, y3 = landmarks[2]
shadow = np.zeros_like(image[..., 0])
x4 = int((x1 + x2) / 2)
y4 = int((y1 + y2) / 2)
line = cv2.line(shadow, (x3, y3), (x4, y4), color=1, thickness=2)
w = dist((x1, y1), (x2, y2))
dilation = int(w // 4)
line = binary_dilation(line, iterations=dilation)
image[line, :] = 0
mask[line] = 0
return image, mask
def remove_mouth(image, mask, landmarks):
image = image.copy()
(x1, y1), (x2, y2) = landmarks[-2:]
shadow = np.zeros_like(image[..., 0])
line = cv2.line(shadow, (x1, y1), (x2, y2), color=(1), thickness=2)
w = dist((x1, y1), (x2, y2))
dilation = int(w // 3)
line = binary_dilation(line, iterations=dilation)
image[line, :] = 0
mask[line] = 0
return image, mask
def remove_background(image, mask, landmarks):
image = image.copy()
(x1, y1), (x2, y2) = landmarks[-2:]
shadow = np.zeros_like(image[..., 0])
line = cv2.line(shadow, (x1, y1), (x2, y2), color=(1), thickness=2)
w = dist((x1, y1), (x2, y2))
dilation = int(w // 3)
line = binary_dilation(line, iterations=dilation)
image[line, :] = 0
mask[line] = 0
return image, mask
def remove_landmark(image, mask, landmarks):
if random.random() > 0.5:
image, mask = remove_eyes(image, mask, landmarks)
elif random.random() > 0.5:
image, mask = remove_mouth(image, mask, landmarks)
elif random.random() > 0.5:
image, mask = remove_nose(image, mask, landmarks)
return image, mask
def isotropically_resize_image(img, size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC):
h, w = img.shape[:2]
if max(w, h) == size:
return img
if w > h:
scale = size / w
h = h * scale
w = size
else:
scale = size / h
w = w * scale
h = size
interpolation = interpolation_up if scale > 1 else interpolation_down
resized = cv2.resize(img, (int(w), int(h)), interpolation=interpolation)
return resized
class IsotropicResize(DualTransform):
def __init__(self, max_side, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC,
always_apply=False, p=1):
super(IsotropicResize, self).__init__(always_apply, p)
self.max_side = max_side
self.interpolation_down = interpolation_down
self.interpolation_up = interpolation_up
def apply(self, img, **params):
return isotropically_resize_image(img, size=self.max_side, interpolation_down=self.interpolation_down,
interpolation_up=self.interpolation_up)
def apply_to_mask(self, img, **params):
return self.apply(img, interpolation_down=cv2.INTER_NEAREST, interpolation_up=cv2.INTER_NEAREST, **params)
def get_transform_init_args_names(self):
return "max_side", "interpolation_down", "interpolation_up"
def apply_to_bbox(self, bbox, **params):
pass
def apply_to_keypoint(self, keypoint, **params):
pass
def get_params_dependent_on_targets(self, params):
pass
def generalization_preprocessing(landmark_path, image, label, mask, generalization_transform=None):
# if os.path.exists(landmark_path) and random.random() < 0.3:
# landmarks = np.load(landmark_path)
# image, mask = remove_landmark(image, mask, landmarks)
# elif random.random() < 0.3:
# blackout_convex_hull(image, mask)
if random.random() < 0.3:
bitmap_masks = prepare_bit_masks(mask)
bitmap_mask = random.choice(bitmap_masks)
image = np.multiply(image, np.expand_dims(bitmap_mask, axis=2))
mask = np.multiply(mask, bitmap_mask)
# elif generalization_transform is not None and label == 1 and random.random() < 0.5:
# image_tmp, mask_tmp = np.copy(image), np.copy(mask)
# drop_background(image, mask)
# g_transformed = generalization_transform(image=image, mask=mask)
# image = g_transformed["image"]
# mask = g_transformed["mask"]
# if random.random() < 0.5:
# image, mask = blend_back(image_tmp, image, mask_tmp, mask)
return image, mask
def create_generalization_transform():
return A.Compose([
A.Blur(blur_limit=(5, 10), p=0.7),
A.OneOf([A.RandomBrightnessContrast(), A.FancyPCA(), A.HueSaturationValue()], p=0.7),
A.OpticalDistortion(distort_limit=(1., 2.), border_mode=cv2.BORDER_CONSTANT, p=0.5)
])
def create_train_transform(model_cfg):
size = model_cfg['input_size'][1]
mean = model_cfg['mean']
std = model_cfg['std']
return A.Compose([
A.OneOf([
IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR),
IsotropicResize(max_side=size, interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR),
], p=1),
A.PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT, value=0),
A.Normalize(mean=mean, std=std),
ToTensorV2(),
])
def create_val_test_transform(model_cfg):
size = model_cfg['input_size'][1]
mean = model_cfg['mean']
std = model_cfg['std']
return A.Compose([
IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
A.PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT, value=0),
A.Normalize(mean=mean, std=std),
ToTensorV2(),
])
| [
"albumentations.Normalize",
"os.path.join",
"albumentations.Blur",
"cv2.line",
"numpy.zeros_like",
"numpy.multiply",
"albumentations.OpticalDistortion",
"scipy.ndimage.binary_dilation",
"numpy.ones_like",
"albumentations.FancyPCA",
"math.sqrt",
"skimage.draw.polygon",
"random.random",
"dli... | [((325, 357), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (355, 357), False, 'import dlib\n'), ((391, 462), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""libs"""', '"""shape_predictor_68_face_landmarks.dat"""'], {}), "(BASE_DIR, 'libs', 'shape_predictor_68_face_landmarks.dat')\n", (403, 462), False, 'import os\n'), ((581, 599), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (593, 599), True, 'import numpy as np\n'), ((655, 673), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (667, 673), True, 'import numpy as np\n'), ((729, 747), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (741, 747), True, 'import numpy as np\n'), ((806, 824), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (818, 824), True, 'import numpy as np\n'), ((883, 901), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (895, 901), True, 'import numpy as np\n'), ((994, 1012), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (1006, 1012), True, 'import numpy as np\n'), ((2709, 2763), 'math.sqrt', 'math.sqrt', (['((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)'], {}), '((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n', (2718, 2763), False, 'import math\n'), ((2884, 2912), 'numpy.zeros_like', 'np.zeros_like', (['image[..., 0]'], {}), '(image[..., 0])\n', (2897, 2912), True, 'import numpy as np\n'), ((2924, 2982), 'cv2.line', 'cv2.line', (['shadow', '(x1, y1)', '(x2, y2)'], {'color': '(1)', 'thickness': '(2)'}), '(shadow, (x1, y1), (x2, y2), color=1, thickness=2)\n', (2932, 2982), False, 'import cv2\n'), ((3054, 3096), 'scipy.ndimage.binary_dilation', 'binary_dilation', (['line'], {'iterations': 'dilation'}), '(line, iterations=dilation)\n', (3069, 3096), False, 'from scipy.ndimage import binary_dilation\n'), ((3308, 3336), 'numpy.zeros_like', 'np.zeros_like', (['image[..., 0]'], {}), '(image[..., 0])\n', (3321, 3336), True, 'import numpy as np\n'), ((3404, 3462), 'cv2.line', 'cv2.line', (['shadow', '(x3, y3)', '(x4, y4)'], {'color': '(1)', 'thickness': '(2)'}), '(shadow, (x3, y3), (x4, y4), color=1, thickness=2)\n', (3412, 3462), False, 'import cv2\n'), ((3534, 3576), 'scipy.ndimage.binary_dilation', 'binary_dilation', (['line'], {'iterations': 'dilation'}), '(line, iterations=dilation)\n', (3549, 3576), False, 'from scipy.ndimage import binary_dilation\n'), ((3764, 3792), 'numpy.zeros_like', 'np.zeros_like', (['image[..., 0]'], {}), '(image[..., 0])\n', (3777, 3792), True, 'import numpy as np\n'), ((3804, 3862), 'cv2.line', 'cv2.line', (['shadow', '(x1, y1)', '(x2, y2)'], {'color': '(1)', 'thickness': '(2)'}), '(shadow, (x1, y1), (x2, y2), color=1, thickness=2)\n', (3812, 3862), False, 'import cv2\n'), ((3936, 3978), 'scipy.ndimage.binary_dilation', 'binary_dilation', (['line'], {'iterations': 'dilation'}), '(line, iterations=dilation)\n', (3951, 3978), False, 'from scipy.ndimage import binary_dilation\n'), ((4171, 4199), 'numpy.zeros_like', 'np.zeros_like', (['image[..., 0]'], {}), '(image[..., 0])\n', (4184, 4199), True, 'import numpy as np\n'), ((4211, 4269), 'cv2.line', 'cv2.line', (['shadow', '(x1, y1)', '(x2, y2)'], {'color': '(1)', 'thickness': '(2)'}), '(shadow, (x1, y1), (x2, y2), color=1, thickness=2)\n', (4219, 4269), False, 'import cv2\n'), ((4343, 4385), 'scipy.ndimage.binary_dilation', 'binary_dilation', (['line'], {'iterations': 'dilation'}), '(line, iterations=dilation)\n', (4358, 4385), False, 'from scipy.ndimage import binary_dilation\n'), ((1365, 1415), 'skimage.draw.polygon', 'skimage.draw.polygon', (['outline[:, 1]', 'outline[:, 0]'], {}), '(outline[:, 1], outline[:, 0])\n', (1385, 1415), False, 'import skimage\n'), ((1438, 1477), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': 'np.uint8'}), '(img.shape[:2], dtype=np.uint8)\n', (1446, 1477), True, 'import numpy as np\n'), ((1524, 1553), 'skimage.measure.centroid', 'measure.centroid', (['cropped_img'], {}), '(cropped_img)\n', (1540, 1553), False, 'from skimage import measure, draw\n'), ((2271, 2321), 'skimage.draw.polygon', 'skimage.draw.polygon', (['outline[:, 1]', 'outline[:, 0]'], {}), '(outline[:, 1], outline[:, 0])\n', (2291, 2321), False, 'import skimage\n'), ((2344, 2383), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': 'np.uint8'}), '(img.shape[:2], dtype=np.uint8)\n', (2352, 2383), True, 'import numpy as np\n'), ((4505, 4520), 'random.random', 'random.random', ([], {}), '()\n', (4518, 4520), False, 'import random\n'), ((6741, 6756), 'random.random', 'random.random', ([], {}), '()\n', (6754, 6756), False, 'import random\n'), ((6833, 6860), 'random.choice', 'random.choice', (['bitmap_masks'], {}), '(bitmap_masks)\n', (6846, 6860), False, 'import random\n'), ((6948, 6978), 'numpy.multiply', 'np.multiply', (['mask', 'bitmap_mask'], {}), '(mask, bitmap_mask)\n', (6959, 6978), True, 'import numpy as np\n'), ((1608, 1623), 'random.random', 'random.random', ([], {}), '()\n', (1621, 1623), False, 'import random\n'), ((1641, 1656), 'random.random', 'random.random', ([], {}), '()\n', (1654, 1656), False, 'import random\n'), ((4595, 4610), 'random.random', 'random.random', ([], {}), '()\n', (4608, 4610), False, 'import random\n'), ((6896, 6931), 'numpy.expand_dims', 'np.expand_dims', (['bitmap_mask'], {'axis': '(2)'}), '(bitmap_mask, axis=2)\n', (6910, 6931), True, 'import numpy as np\n'), ((7530, 7563), 'albumentations.Blur', 'A.Blur', ([], {'blur_limit': '(5, 10)', 'p': '(0.7)'}), '(blur_limit=(5, 10), p=0.7)\n', (7536, 7563), True, 'import albumentations as A\n'), ((7667, 7757), 'albumentations.OpticalDistortion', 'A.OpticalDistortion', ([], {'distort_limit': '(1.0, 2.0)', 'border_mode': 'cv2.BORDER_CONSTANT', 'p': '(0.5)'}), '(distort_limit=(1.0, 2.0), border_mode=cv2.\n BORDER_CONSTANT, p=0.5)\n', (7686, 7757), True, 'import albumentations as A\n'), ((8302, 8395), 'albumentations.PadIfNeeded', 'A.PadIfNeeded', ([], {'min_height': 'size', 'min_width': 'size', 'border_mode': 'cv2.BORDER_CONSTANT', 'value': '(0)'}), '(min_height=size, min_width=size, border_mode=cv2.\n BORDER_CONSTANT, value=0)\n', (8315, 8395), True, 'import albumentations as A\n'), ((8400, 8431), 'albumentations.Normalize', 'A.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (8411, 8431), True, 'import albumentations as A\n'), ((8441, 8453), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (8451, 8453), False, 'from albumentations.pytorch import ToTensorV2\n'), ((8740, 8833), 'albumentations.PadIfNeeded', 'A.PadIfNeeded', ([], {'min_height': 'size', 'min_width': 'size', 'border_mode': 'cv2.BORDER_CONSTANT', 'value': '(0)'}), '(min_height=size, min_width=size, border_mode=cv2.\n BORDER_CONSTANT, value=0)\n', (8753, 8833), True, 'import albumentations as A\n'), ((8838, 8869), 'albumentations.Normalize', 'A.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (8849, 8869), True, 'import albumentations as A\n'), ((8879, 8891), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (8889, 8891), False, 'from albumentations.pytorch import ToTensorV2\n'), ((4686, 4701), 'random.random', 'random.random', ([], {}), '()\n', (4699, 4701), False, 'import random\n'), ((7582, 7610), 'albumentations.RandomBrightnessContrast', 'A.RandomBrightnessContrast', ([], {}), '()\n', (7608, 7610), True, 'import albumentations as A\n'), ((7612, 7624), 'albumentations.FancyPCA', 'A.FancyPCA', ([], {}), '()\n', (7622, 7624), True, 'import albumentations as A\n'), ((7626, 7648), 'albumentations.HueSaturationValue', 'A.HueSaturationValue', ([], {}), '()\n', (7646, 7648), True, 'import albumentations as A\n')] |
import cobra
import re
import numpy as np
import pandas as pd
from cobra.flux_analysis.sampling import OptGPSampler
from cobra.core.reaction import Reaction as cobraReaction
from cobra.util.solver import set_objective
import rpy2.robjects as ro
from rpy2.robjects import numpy2ri
import warnings
import copy
from six import iteritems
from Order_module import FluxOrder
from Data_module import DataParser
from Helper_methods import isCandidatePair
ro.r['source']('Rfunctions.R')
FVA = ro.r['FVA']
sampleFluxCone = ro.r['sampleFluxCone']
numpy2ri.activate()
class Model:
"""
Methods to load the GEM, convert it to the required form and update the
flux bounds to match the carbon source.
"""
def __init__(self, fileName, workDir=None, v_eps=1e-9, verbose=True):
self.workDir = workDir
self.name = ''
self.carbonSource = 'all'
self.__loadModel(fileName, workDir)
self.__enableUptakeOfCarbonSources()
self.__removeBlockedReactions(v_eps)
self.__splitReversibleReactions()
self.__removeReactionsWithZeroUpperBound()
self. __addMetabolicMacrosystems()
self.geneIDs = [gene.id for gene in self.GEM.genes]
self.numberOfReactions = len(self.GEM.reactions)
self.numberOfMetabolites = len(self.GEM.metabolites)
self.original_lb = self.getLowerBounds()
self.original_ub = self.getUpperBounds()
self.verbose = verbose
print('Split GEM generated with ' + str(self.numberOfReactions)
+ ' non-blocked reactions and ' + str(self.numberOfMetabolites)
+ ' metabolites')
def getLowerBounds(self):
return np.array([rxn.lower_bound for rxn in self.GEM.reactions])
def getUpperBounds(self):
return np.array([rxn.upper_bound for rxn in self.GEM.reactions])
def setLowerBounds(self, lb):
for i, rxn in enumerate(self.GEM.reactions):
rxn.lower_bound = lb[i]
def setUpperBounds(self, ub):
for i, rxn in enumerate(self.GEM.reactions):
rxn.upper_bound = ub[i]
def getStoichiometricMatrix(self):
return np.array(
cobra.util.array.create_stoichiometric_matrix(self.GEM, array_type='dense'))
def getSubsystems(self):
return np.array([rxn.subsystem for rxn in self.GEM.reactions])
def getMacrosystems(self):
return np.array([rxn.macrosystem for rxn in self.GEM.reactions])
def getReactionNames(self):
return [rxn.name for rxn in self.GEM.reactions]
def getReactionIDs(self):
return [rxn.id for rxn in self.GEM.reactions]
def setCarbonSource(self, carbonSource, uptakeRate=20, fractionOfBiomassOptimum=0.95):
"""
Sets current carbon source: opens importer for carbon source, closes all order
organic imports and maximizes biomass. Wrapper to the two methods that follow.
"""
self.setLowerBounds(self.original_lb)
self.setUpperBounds(self.original_ub)
if carbonSource.lower() not in 'all':
self.updateExchangeReactionBounds(carbonSource, carbonUptakeRate=uptakeRate)
self.setMinimumBiomassProduction(fractionOfOptimum=fractionOfBiomassOptimum)
def __loadModel(self, fileName, workDir=None):
"""
Reads the SBML file containing the GEM. Removes blocked
reactions i.e., reactions that cannot carry flux in Sv = 0, and splits
reversible reactions into two irreversible reactions.
Parameters
----------
fileName: string
The path to the file containing the SBML model
Returns
-------
GEM: cobrapy class model
The transformed genome-scale model
"""
if workDir is not None:
path2File = workDir + '/' + fileName
else:
path2File = fileName
modelName, fileExtension = fileName.split('.')
self.name = modelName
warnings.filterwarnings('ignore')
if fileExtension in ['xml', 'sbml']:
self.GEM = cobra.io.read_sbml_model(path2File)
elif fileExtension == 'json':
self.GEM = cobra.io.load_json_model(path2File)
elif fileExtension == 'mat':
self.GEM = cobra.io.load_matlab_model(path2File)
warnings.resetwarnings()
def __addMetabolicMacrosystems(self):
df = pd.read_excel(self.workDir + '/' + self.name + '_subsystems.xlsx')
met_systems = pd.Series(df.Systems.values, index=df.Subsystems).to_dict()
for idx, rxn in enumerate(self.GEM.reactions):
try:
self.GEM.reactions[idx].macrosystem = met_systems[rxn.subsystem]
except Exception:
self.GEM.reactions[idx].macrosystem = 'Unassigned'
def __enableUptakeOfCarbonSources(self):
self.GEM.reactions.get_by_id('EX_glyc__R_e').lower_bound = -1000
self.GEM.reactions.get_by_id('EX_ac_e').lower_bound = -1000
def __removeBlockedReactions(self, v_eps):
blockedRxns = cobra.flux_analysis.find_blocked_reactions(
self.GEM, zero_cutoff=v_eps, open_exchanges=False)
self.blockedReactions = blockedRxns
for rxn in blockedRxns:
self.GEM.reactions.get_by_id(rxn).remove_from_model(remove_orphans=True)
def __splitReversibleReactions(self):
convert_to_irreversible(self.GEM)
def __removeReactionsWithZeroUpperBound(self):
FakeRev = [rxn for rxn in range(len(self.GEM.reactions))
if self.GEM.reactions[rxn].upper_bound == 0]
for rxn in FakeRev:
self.GEM.reactions[rxn].remove_from_model(remove_orphans=True)
def saveMatlabModel(self, workDir=None):
if workDir is None:
workDir = self.workDir
cobra.io.save_matlab_model(self.GEM, workDir + '/' + self.name + '.mat')
def updateExchangeReactionBounds(self, carbonSource=None, carbonUptakeRate=20):
"""
Update exchange reaction bounds to simulate appropriate growth medium
conditions.
"""
ExchangeRxnIDs = [rxn.id for rxn in self.GEM.exchanges if len(rxn.reactants) == 0]
for ID in ExchangeRxnIDs:
try:
if self.__isOrganicExchange(ID):
self.GEM.reactions.get_by_id(ID).upper_bound = 0
except Exception:
pass
self.__setEcoliCarbonSourceUptake(carbonSource, carbonUptakeRate)
self.carbonSource = carbonSource
def __isOrganicExchange(self, ID):
compoundAtoms = list(self.GEM.reactions.get_by_id(ID).products[0].formula)
cond = (('C' in compoundAtoms)
& ('H' in compoundAtoms)
& ('o' not in compoundAtoms)) # discards cobalamine
return cond
def __setEcoliCarbonSourceUptake(self, carbonSource, carbonUptakeRate):
"""
Set uptake rate for the selected carbon source for the E. coli model (iJO1366)
"""
carbonSource = carbonSource.lower()
if carbonSource in 'glucose':
uptakeRxns = ['EX_glc__D_e_reverse']
elif carbonSource in 'acetate':
uptakeRxns = ['EX_ac_e_reverse']
elif carbonSource in 'glycerate':
uptakeRxns = ['EX_glyc__R_e_reverse']
elif carbonSource in 'all':
uptakeRxns = ['EX_glc__D_e_reverse', 'EX_ac_e_reverse', 'EX_glyc__R_e_reverse']
for rxn in uptakeRxns:
self.GEM.reactions.get_by_id(rxn).upper_bound = carbonUptakeRate
def setMinimumBiomassProduction(self, ReactionID='biomass', fractionOfOptimum=0.95):
"""
Constraints the reaction indicated in ReactionID to produce a fraction of the
optimal value, specified in fractionOfOptimum. If ReactionID is left as None or
'biomass', the function tries to find the biomass reaction and constraints biomass
production. Either an ID or a reaction index can be given for the reaction.
"""
if ReactionID is None or ReactionID.lower() in 'biomass':
BiomassID = self.__getBiomassReactionID()
if len(BiomassID) > 1:
ReactionID = BiomassID[0]
self.ObjectiveReactionID = ReactionID
# Block alternative biomass reaction(s)
for ID in BiomassID[1:]:
self.GEM.reactions.get_by_id(ID).upper_bound = 0
if isinstance(ReactionID, list):
ReactionID = ReactionID[0]
# Optimize model
ReactionName = self.GEM.reactions.get_by_id(ReactionID).name
self.GEM.objective = self.GEM.reactions.get_by_any(ReactionID)
vbioMax = self.GEM.optimize().objective_value
self.GEM.reactions.get_by_id(ReactionID).lower_bound = fractionOfOptimum*vbioMax
if self.verbose:
print('Maximizing: ' + ReactionID + ', ' + ReactionName)
print('Maximum growth rate under ' + self.carbonSource + ': ' + str(vbioMax) + ' h^{-1}')
def __getBiomassReactionID(self):
"""
Tries to find the biomass reaction in the GEM
"""
reactionIDs = np.array(self.getReactionIDs())
def getBiomassReactionByName():
reactionNames = [rxn.name for rxn in self.GEM.reactions]
biomassReactionName = [name for name in reactionNames
if re.search('(?i)(biomass|growth)', name)]
if biomassReactionName:
return [reactionIDs[reactionNames.index(name)]
for name in biomassReactionName]
else:
return []
biomassReactionID = [ID for ID in reactionIDs
if re.search('(?i)(biomass|growth)', ID)]
if not biomassReactionID:
biomassReactionID = getBiomassReactionByName()
if not biomassReactionID:
raise ValueError('Biomass reaction not found, provide biomass reaction ID')
return biomassReactionID
def getExtremeFluxes(self):
"""
Conducts a Flux Variabilty Analysis in R and returns the 2d arrays FVAmin, FVAmax,
containing the flux distributions which are solutions to each minimization and
maximizing of each reaction in the model, as well as FVArange, a 2d array with the
classic fva flux ranges per reaction in the model. The native cobra version of this
function is not employed here because it does not return FVAmin and FVAmax, only
the flux ranges. Returns a dictionary with fields: FVArange, FVAmin and FVAmax.
"""
S = self.getStoichiometricMatrix()
FVArange, FVAmin, FVAmax = FVA(S, v_lb=self.getLowerBounds(), v_ub=self.getUpperBounds())
return {'FVArange': np.asarray(FVArange),
'FVAmin': np.asarray(FVAmin),
'FVAmax': np.asarray(FVAmax)}
def getFluxSample(self, nsamples=5000):
"""
Generates a sample of the flux cone. It uses the default sampler in cobrapy
"""
optGPS = OptGPSampler(self.GEM, thinning=100, processes=3)
samplerSample = optGPS.sample(nsamples)
sample = samplerSample[optGPS.validate(samplerSample) == "v"]
return sample
def getFluxSampleInRprogram(self, nsamples=5000, lb=None, ub=None):
"""
Generates a sample of the flux cone. It uses a custom R program ("sampleFluxCone")
to obtain the sample, the lower and upper flux bounds can be specified as numpy
arrays, otherwise they are taken from the model object.
m
"""
if lb is None:
lb = self.getLowerBounds()
if ub is None:
ub = self.getUpperBounds()
S = self.getStoichiometricMatrix()
sample = np.array(sampleFluxCone(S, n_samples=nsamples, v_lb=lb, v_ub=ub))
df = pd.DataFrame(sample.transpose(), columns=self.getReactionIDs())
return df
def findFullyCoupledWithSameFlux(self, fctable=None):
"""
Find fully coupled reaction pairs with the same flux value across the flux cone
"""
if fctable is None:
raise AttributeError('fctable missing!')
fcRatios = self.__computeFullyCoupledRatios(fctable)
fcRatios[np.where(fcRatios != 1)] = 0
equalFlux = []
for column in range(np.size(fcRatios, 1)):
rxns = np.where(fcRatios[:, column] == 1)[0].tolist()
if rxns:
rxns.append(column)
rxns.sort()
equalFlux.append(rxns)
equalFlux = np.unique(np.array(equalFlux)).tolist()
return equalFlux
def __computeFullyCoupledRatios(self, fctable):
"""
Finds the flux ratio of all fully coupled pairs in a GEM. Returns a 2D array
where entries equal to 1 indicate that these rection pairs have equal flux values.
"""
temp = copy.deepcopy(fctable)
temp[np.diag_indices_from(temp)] = 0
# temp[np.tril_indices_from(temp)] = 0
fcPairs = np.where(temp == 1)
npairs = len(fcPairs[0])
nrxns = self.numberOfReactions
tempGEM = self.GEM.copy()
fcRatios = np.zeros((nrxns, nrxns))
for pairIdx in range(npairs):
rxn_i, rxn_j = fcPairs[0][pairIdx], fcPairs[1][pairIdx]
tempGEM.objective = tempGEM.reactions[rxn_i]
fluxes = np.round(tempGEM.optimize().fluxes, 6)
# try:
fcRatios[rxn_i, rxn_j] = fluxes[rxn_i] / fluxes[rxn_j]
# fcRatios[rxn_j, rxn_i] = 1 / fcRatios[rxn_i, rxn_j]
return fcRatios
def findCandidatePairs(self, nsamples=5000, fva_filter=True):
"""
Discard pairs with v_j > v_i in the sample and where vjmax_j <= vjmax_i in
the vjmax vector and vimin_i >= vimin_j.
"""
if self.verbose:
print('Finding candidate ordered reaction pairs')
if fva_filter:
FVAout = self.getExtremeFluxes()
FVAmin, FVAmax = FVAout['FVAmin'], FVAout['FVAmax']
else:
FVAmin, FVAmax = None, None
# fluxSample = np.round(self.getFluxSample(nsamples).values.transpose(), 5)
fluxSample = np.round(self.getFluxSampleInRprogram(nsamples).values.transpose(), 5)
candidatePairs = []
for rxn_i in range(self.numberOfReactions):
for rxn_j in range(self.numberOfReactions):
if isCandidatePair(fluxSample, rxn_i, rxn_j, FVAmin=FVAmin, FVAmax=FVAmax):
candidatePairs.append([rxn_i, rxn_j])
candidatePairs = np.asarray(candidatePairs)
if self.verbose:
print('There are: ' + str(len(candidatePairs)) + ' candidate pairs out of ' +
str(0.5*self.numberOfReactions*(self.numberOfReactions - 1)) + ' total pairs')
self.candidatePairs = candidatePairs
def exportToCSV(self, directory, attributes=['S', 'lb', 'ub', 'candidatePairs'],
nametag=None):
"""
Export attributes to csv in the specified directory. Default directory is the
working directory defined for the class Model
"""
if directory is None:
raise ValueError('Missing directory to save files in!')
if nametag is None:
tag = ''
else:
tag = '_' + nametag
self.lb = self.getLowerBounds()
self.ub = self.getUpperBounds()
self.S = self.getStoichiometricMatrix()
for attribute in attributes:
item = getattr(self, attribute)
np.savetxt(f'{directory}/{self.name}_{attribute}{tag}.csv',
item, delimiter=',')
def getFluxOrders(self, AdjacencyMatrix=None, fctable=None):
"""
Instantiates class FluxOrder
Arguments
---------
A: numpy 2D array,
The adjacency matrix of the Hasse diagram containing the flux order
relations.
"""
FluxOrders = FluxOrder(Model=self, AdjacencyMatrix=AdjacencyMatrix, fctable=fctable)
return FluxOrders
def parseData(self):
"""
Instantiates class DataParser
"""
Parser = DataParser(self, workDir=self.workDir + '/Data')
return Parser
def getReactionsWithGeneData(self, geneWithDataIDs):
"""
Returns a list with all reactions in the model that have associated gene data
Arguments:
---------
geneWithDataIDs: a list containing the IDs of genes with available data
"""
rxnsWithData = []
for rxn in self.GEM.reactions:
genes = [gene.id for gene in rxn.genes if gene.id in geneWithDataIDs]
if len(genes) > 0:
rxnsWithData.append(rxn.id)
return rxnsWithData
# Other functions
def getFluxSampleInRprogram(S, nsamples=5000, lb=None, ub=None):
"""
Generates a sample of the flux cone. It uses a custom R program ("sampleFluxCone") to
obtain the sample, the lower and upper flux bounds and the stoichiometric
matrix have to be specified as numpy arrays.
"""
sample = sampleFluxCone(S, n_samples=nsamples, v_lb=lb, v_ub=ub)
return sample
def convert_to_irreversible(cobra_model):
"""
Split reversible reactions into two irreversible reactions: one going in
the forward direction, the other in the backward direction. In this manner,
all reactions in the model carry non-negative flux values. Forward reactions
are tagged as "forward" while backward reactions as "backward".
cobra_model: A Model object which will be modified in place.
Modified from the deprecated cobrapy version by <NAME>,
February 2019.
"""
reactions_to_add = []
coefficients = {}
def onlyBackward(reaction):
return reaction.lower_bound < 0 and reaction.upper_bound <= 0
def backwardAndForward(reaction):
return reaction.lower_bound < 0 and reaction.upper_bound > 0
def changeDirection(reaction):
def swapSign(number):
return -number
lb = swapSign(reaction.upper_bound)
ub = swapSign(reaction.lower_bound)
reaction.lower_bound = lb
reaction.upper_bound = ub
reaction.objective_coefficient * -1
reaction.notes["reflection"] = 'only reverse'
reaction.id += '_reverse'
reaction.name += '_reverse'
for met in reaction._metabolites.keys():
reaction._metabolites[met] *= -1
def createBackwardReaction(reaction):
backward_reaction = cobraReaction(reaction.id + '_reverse')
backward_reaction.lower_bound = 0
backward_reaction.upper_bound = -reaction.lower_bound
reaction_dict = {k: v * -1
for k, v in iteritems(reaction._metabolites)}
backward_reaction.add_metabolites(reaction_dict)
backward_reaction._model = reaction._model
backward_reaction._genes = reaction._genes
for gene in reaction._genes:
gene._reaction.add(backward_reaction)
backward_reaction.subsystem = reaction.subsystem
backward_reaction.name = reaction.name + '_reverse'
backward_reaction._gene_reaction_rule = reaction._gene_reaction_rule
coefficients[backward_reaction] = reaction.objective_coefficient * -1
return backward_reaction
for reaction in cobra_model.reactions:
if onlyBackward(reaction):
changeDirection(reaction)
elif backwardAndForward(reaction):
backward_reaction = createBackwardReaction(reaction)
reactions_to_add.append(backward_reaction)
reaction.lower_bound = 0
cobra_model.add_reactions(reactions_to_add)
set_objective(cobra_model, coefficients, additive=True)
| [
"numpy.diag_indices_from",
"cobra.util.solver.set_objective",
"cobra.io.load_matlab_model",
"Data_module.DataParser",
"Order_module.FluxOrder",
"cobra.io.save_matlab_model",
"six.iteritems",
"cobra.core.reaction.Reaction",
"cobra.flux_analysis.find_blocked_reactions",
"numpy.savetxt",
"Helper_me... | [((537, 556), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (554, 556), False, 'from rpy2.robjects import numpy2ri\n'), ((19753, 19808), 'cobra.util.solver.set_objective', 'set_objective', (['cobra_model', 'coefficients'], {'additive': '(True)'}), '(cobra_model, coefficients, additive=True)\n', (19766, 19808), False, 'from cobra.util.solver import set_objective\n'), ((1675, 1732), 'numpy.array', 'np.array', (['[rxn.lower_bound for rxn in self.GEM.reactions]'], {}), '([rxn.lower_bound for rxn in self.GEM.reactions])\n', (1683, 1732), True, 'import numpy as np\n'), ((1779, 1836), 'numpy.array', 'np.array', (['[rxn.upper_bound for rxn in self.GEM.reactions]'], {}), '([rxn.upper_bound for rxn in self.GEM.reactions])\n', (1787, 1836), True, 'import numpy as np\n'), ((2284, 2339), 'numpy.array', 'np.array', (['[rxn.subsystem for rxn in self.GEM.reactions]'], {}), '([rxn.subsystem for rxn in self.GEM.reactions])\n', (2292, 2339), True, 'import numpy as np\n'), ((2387, 2444), 'numpy.array', 'np.array', (['[rxn.macrosystem for rxn in self.GEM.reactions]'], {}), '([rxn.macrosystem for rxn in self.GEM.reactions])\n', (2395, 2444), True, 'import numpy as np\n'), ((3968, 4001), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (3991, 4001), False, 'import warnings\n'), ((4309, 4333), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (4331, 4333), False, 'import warnings\n'), ((4390, 4456), 'pandas.read_excel', 'pd.read_excel', (["(self.workDir + '/' + self.name + '_subsystems.xlsx')"], {}), "(self.workDir + '/' + self.name + '_subsystems.xlsx')\n", (4403, 4456), True, 'import pandas as pd\n'), ((5046, 5143), 'cobra.flux_analysis.find_blocked_reactions', 'cobra.flux_analysis.find_blocked_reactions', (['self.GEM'], {'zero_cutoff': 'v_eps', 'open_exchanges': '(False)'}), '(self.GEM, zero_cutoff=v_eps,\n open_exchanges=False)\n', (5088, 5143), False, 'import cobra\n'), ((5800, 5872), 'cobra.io.save_matlab_model', 'cobra.io.save_matlab_model', (['self.GEM', "(workDir + '/' + self.name + '.mat')"], {}), "(self.GEM, workDir + '/' + self.name + '.mat')\n", (5826, 5872), False, 'import cobra\n'), ((11055, 11104), 'cobra.flux_analysis.sampling.OptGPSampler', 'OptGPSampler', (['self.GEM'], {'thinning': '(100)', 'processes': '(3)'}), '(self.GEM, thinning=100, processes=3)\n', (11067, 11104), False, 'from cobra.flux_analysis.sampling import OptGPSampler\n'), ((12917, 12939), 'copy.deepcopy', 'copy.deepcopy', (['fctable'], {}), '(fctable)\n', (12930, 12939), False, 'import copy\n'), ((13050, 13069), 'numpy.where', 'np.where', (['(temp == 1)'], {}), '(temp == 1)\n', (13058, 13069), True, 'import numpy as np\n'), ((13195, 13219), 'numpy.zeros', 'np.zeros', (['(nrxns, nrxns)'], {}), '((nrxns, nrxns))\n', (13203, 13219), True, 'import numpy as np\n'), ((14603, 14629), 'numpy.asarray', 'np.asarray', (['candidatePairs'], {}), '(candidatePairs)\n', (14613, 14629), True, 'import numpy as np\n'), ((16003, 16074), 'Order_module.FluxOrder', 'FluxOrder', ([], {'Model': 'self', 'AdjacencyMatrix': 'AdjacencyMatrix', 'fctable': 'fctable'}), '(Model=self, AdjacencyMatrix=AdjacencyMatrix, fctable=fctable)\n', (16012, 16074), False, 'from Order_module import FluxOrder\n'), ((16206, 16254), 'Data_module.DataParser', 'DataParser', (['self'], {'workDir': "(self.workDir + '/Data')"}), "(self, workDir=self.workDir + '/Data')\n", (16216, 16254), False, 'from Data_module import DataParser\n'), ((18582, 18621), 'cobra.core.reaction.Reaction', 'cobraReaction', (["(reaction.id + '_reverse')"], {}), "(reaction.id + '_reverse')\n", (18595, 18621), True, 'from cobra.core.reaction import Reaction as cobraReaction\n'), ((2162, 2237), 'cobra.util.array.create_stoichiometric_matrix', 'cobra.util.array.create_stoichiometric_matrix', (['self.GEM'], {'array_type': '"""dense"""'}), "(self.GEM, array_type='dense')\n", (2207, 2237), False, 'import cobra\n'), ((4070, 4105), 'cobra.io.read_sbml_model', 'cobra.io.read_sbml_model', (['path2File'], {}), '(path2File)\n', (4094, 4105), False, 'import cobra\n'), ((10771, 10791), 'numpy.asarray', 'np.asarray', (['FVArange'], {}), '(FVArange)\n', (10781, 10791), True, 'import numpy as np\n'), ((10819, 10837), 'numpy.asarray', 'np.asarray', (['FVAmin'], {}), '(FVAmin)\n', (10829, 10837), True, 'import numpy as np\n'), ((10865, 10883), 'numpy.asarray', 'np.asarray', (['FVAmax'], {}), '(FVAmax)\n', (10875, 10883), True, 'import numpy as np\n'), ((12270, 12293), 'numpy.where', 'np.where', (['(fcRatios != 1)'], {}), '(fcRatios != 1)\n', (12278, 12293), True, 'import numpy as np\n'), ((12350, 12370), 'numpy.size', 'np.size', (['fcRatios', '(1)'], {}), '(fcRatios, 1)\n', (12357, 12370), True, 'import numpy as np\n'), ((12953, 12979), 'numpy.diag_indices_from', 'np.diag_indices_from', (['temp'], {}), '(temp)\n', (12973, 12979), True, 'import numpy as np\n'), ((15587, 15672), 'numpy.savetxt', 'np.savetxt', (['f"""{directory}/{self.name}_{attribute}{tag}.csv"""', 'item'], {'delimiter': '""","""'}), "(f'{directory}/{self.name}_{attribute}{tag}.csv', item, delimiter=','\n )\n", (15597, 15672), True, 'import numpy as np\n'), ((4167, 4202), 'cobra.io.load_json_model', 'cobra.io.load_json_model', (['path2File'], {}), '(path2File)\n', (4191, 4202), False, 'import cobra\n'), ((4479, 4528), 'pandas.Series', 'pd.Series', (['df.Systems.values'], {'index': 'df.Subsystems'}), '(df.Systems.values, index=df.Subsystems)\n', (4488, 4528), True, 'import pandas as pd\n'), ((9717, 9754), 're.search', 're.search', (['"""(?i)(biomass|growth)"""', 'ID'], {}), "('(?i)(biomass|growth)', ID)\n", (9726, 9754), False, 'import re\n'), ((14446, 14517), 'Helper_methods.isCandidatePair', 'isCandidatePair', (['fluxSample', 'rxn_i', 'rxn_j'], {'FVAmin': 'FVAmin', 'FVAmax': 'FVAmax'}), '(fluxSample, rxn_i, rxn_j, FVAmin=FVAmin, FVAmax=FVAmax)\n', (14461, 14517), False, 'from Helper_methods import isCandidatePair\n'), ((18798, 18830), 'six.iteritems', 'iteritems', (['reaction._metabolites'], {}), '(reaction._metabolites)\n', (18807, 18830), False, 'from six import iteritems\n'), ((4263, 4300), 'cobra.io.load_matlab_model', 'cobra.io.load_matlab_model', (['path2File'], {}), '(path2File)\n', (4289, 4300), False, 'import cobra\n'), ((9389, 9428), 're.search', 're.search', (['"""(?i)(biomass|growth)"""', 'name'], {}), "('(?i)(biomass|growth)', name)\n", (9398, 9428), False, 'import re\n'), ((12593, 12612), 'numpy.array', 'np.array', (['equalFlux'], {}), '(equalFlux)\n', (12601, 12612), True, 'import numpy as np\n'), ((12392, 12426), 'numpy.where', 'np.where', (['(fcRatios[:, column] == 1)'], {}), '(fcRatios[:, column] == 1)\n', (12400, 12426), True, 'import numpy as np\n')] |
import numpy as np
def body_hash(body):
"""
:param body: 1-D np array with n atoms, for bodies (-1 for not a body)
:return:
"""
ret = {}
print('Build body hash...')
natoms = len(body)
bodies = list(set(list(body)))
bodies.remove(-1)
idxes = np.arange(natoms)
for b in bodies:
ret[b] = idxes[body == b]
print('Done.')
return ret
| [
"numpy.arange"
] | [((283, 300), 'numpy.arange', 'np.arange', (['natoms'], {}), '(natoms)\n', (292, 300), True, 'import numpy as np\n')] |
# !/usr/bin/python
# -*- coding: UTF-8 -*-
##########################
# Creator: Javy
# Creat Time: 20170416
# Email: <EMAIL>
# Description: Machine Learning - Chapter tree
##########################
import sys
sys.path.append('/home/javy/Documents/python')
import imp
imp.reload(module)
# Sample zero
from sklearn import datasets
import matplotlib
matplotlib.Use('Agg')
import matplotlib.pyplot as plt
import numpy as np
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
from sklearn.linear_model import Perceptron
ppn = Perceptron(n_iter=40, eta0=0.1, random_state=0)
ppn.fit(X_train_std, y_train)
y_pred = ppn.predict(X_test_std)
#print('Misclassified Sample: %d' % (y_test != y_pred).sum())
print('Misclassified Sample: {0}'.format((y_test != y_pred).sum()))
from sklearn.metrics import accuracy_score
#print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
print('Accuracy:{0}'.format(accuracy_score(y_test, y_pred)))
# Sample one
# import plot_decision_regions
plot_decision_regions.plot_decision_regions(X=X_combined_std, y=y_combined,
classifier=ppn, test_idx=range(105,150))
plt.xlabel('Petal length [standardlized]')
plt.ylabel('Petal width [standardlized]')
plt.legend(loc='upper left')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_1.png')
#Sample two
import matplotlib.pyplot as plt
import numpy as np
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.axhspan(0.0, 1.0, facecolor='1.0', alpha=1.0, ls='dotted')
plt.axhline(y=0.5, ls='dotted', color='k')
plt.yticks([0.0, 0.5, 1.0])
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_2.png')
#Sample Three
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
# lr.predict_proba(X_test_std[0:1])
plot_decision_regions.plot_decision_regions(X_combined_std, y_combined, classifier=lr,
test_idx=range(105, 150))
plt.xlabel('Petal length [standardlized]')
plt.ylabel('Petal width [standardlized]')
plt.legend(loc='upper left')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_3.png')
#Sample Four
weights, params = [], []
#for c in np.arange(-5, 5): #Integers to negative integer powers are not allowed
for c in np.arange(0, 5):
lr = LogisticRegression(C=10**c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**c)
weights = np.array(weights)
plt.plot(params, weights[:, 0], label='Petal length')
plt.plot(params, weights[:, 1], linestyle='--', label='Petal width')
plt.xlabel('C')
plt.ylabel('weight cofficient')
plt.legend(loc='upper left')
plt.xscale('log')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_4.png')
#Sample nine
from sklearn.svm import SVC
svm = SVC(kernel='linear', C=1.0, random_state=0)
svm.fit(X_train_std, y_train)
plot_decision_regions.plot_decision_regions(X_combined_std, y_combined, classifier=svm,
test_idx=range(105, 150))
plt.xlabel('Petal length [standardlized]')
plt.ylabel('Petal width [standardlized]')
plt.legend(loc='upper left')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_5.png')
#Sample Five
from sklearn.linear_model import SGDClassifier
ppn = SGDClassifier(loss='perceptron')
lr = SGDClassifier(loss='log')
svm = SGDClassifier(loss='hinge')
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
plt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1],
c='b', marker='x', label='1')
plt.scatter(X_xor[y_xor==-1, 0], X_xor[y_xor==-1, 1],
c='r', marker='s', label='-1')
plt.ylim(-3.0)
plt.legend()
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_6.png')
#Sample six
svm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)
svm.fit(X_xor, y_xor)
plot_decision_regions.plot_decision_regions(X_xor, y_xor, classifier=svm)
plt.legend(loc='upper left')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_7.png')
#Sample seven
svm = SVC(kernel='rbf', random_state=0, gamma=0.20, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions.plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150))
plt.xlabel('Petal length [standardlized]')
plt.ylabel('Petal width [standardlized]')
plt.legend(loc='upper left')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_8.png')
#Sample eight
svm = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions.plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150))
plt.xlabel('Petal length [standardlized]')
plt.ylabel('Petal width [standardlized]')
plt.legend(loc='upper left')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_9.png')
#Sample nine
import matplotlib.pyplot as plt
import numpy
def gini(p):
return (p)*(1 - (p)) + (1 - p)*(1 - (1-p))
def entropy(p):
return - p*np.log2(p) - (1 - p)*np.log2((1-p))
def error(p):
return 1 - np.max([p, 1 - p])
x = np.arange(0.0, 1.0, 0.01)
ent = [entropy(p) if p != 0 else None for p in x]
sc_ent = [e*0.5 if e else None for e in ent]
err = [error(i) for i in x]
fig = plt.figure()
ax = plt.subplot(111)
for i, lab, ls, c, in zip([ent, sc_ent, gini(x), err],
['Entropy', 'Entropy (scaled)',
'Gini Impurity', 'misclassification Error'],
['-', '-', '--', '-.'],
['black', 'lightgreen', 'red', 'green', 'cyan']):
line = ax.plot(x, i, label=lab, linestyle=ls, lw=2, color=c)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),
ncol=3, fancybox=True, shadow=False)
ax.axhline(y=0.5, linewidth=1, color='k', linestyle='--')
ax.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.ylim(0, 1.1)
plt.xlabel('p(i=1)')
plt.ylabel('Impurity Index')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_10.png')
#Sample ten
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion='entropy',
max_depth=3, random_state=0)
tree.fit(X_train, y_train)
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions.plot_decision_regions(X_combined, y_combined,
classifier=tree, test_idx=range(105,150))
plt.xlabel('petal length [cm]')
plt.ylabel('petal width [cm]')
plt.legend(loc='upper left')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_11.png')
#Sample eleven
from sklearn.tree import export_graphviz
export_graphviz(tree, out_file='tree.dot',
feature_names=['petal length', 'petal width'])
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(criterion='entropy', n_estimators=10,
random_state=1, n_jobs=2)
forest.fit(X_train, y_train)
plot_decision_regions.plot_decision_regions(X_combined, y_combined,
classifier=forest, test_idx=range(105, 150))
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.legend(loc='upper left')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_12.png')
#Sample twelve
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, p=2,
metric='minkowski')
knn.fit(X_train_std, y_train)
plot_decision_regions.plot_decision_regions(X_combined_std, y_combined,
classifier=knn, test_idx=range(105,150))
plt.xlabel('petal length [standardlized]')
plt.ylabel('petal width [standardlized]')
plt.show()
#plt.savefig('/home/javy/Documents/DeepLearningResult/test02_13.png')
| [
"imp.reload",
"sklearn.datasets.load_iris",
"sklearn.preprocessing.StandardScaler",
"numpy.random.seed",
"sklearn.metrics.accuracy_score",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"sklearn.svm.SVC",
"sys.path.append",
"matplotlib.pyplot.ax... | [((213, 259), 'sys.path.append', 'sys.path.append', (['"""/home/javy/Documents/python"""'], {}), "('/home/javy/Documents/python')\n", (228, 259), False, 'import sys\n'), ((272, 290), 'imp.reload', 'imp.reload', (['module'], {}), '(module)\n', (282, 290), False, 'import imp\n'), ((353, 374), 'matplotlib.Use', 'matplotlib.Use', (['"""Agg"""'], {}), "('Agg')\n", (367, 374), False, 'import matplotlib\n'), ((434, 454), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (452, 454), False, 'from sklearn import datasets\n'), ((586, 639), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (602, 639), False, 'from sklearn.cross_validation import train_test_split\n'), ((695, 711), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (709, 711), False, 'from sklearn.preprocessing import StandardScaler\n'), ((816, 852), 'numpy.vstack', 'np.vstack', (['(X_train_std, X_test_std)'], {}), '((X_train_std, X_test_std))\n', (825, 852), True, 'import numpy as np\n'), ((866, 894), 'numpy.hstack', 'np.hstack', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (875, 894), True, 'import numpy as np\n'), ((946, 993), 'sklearn.linear_model.Perceptron', 'Perceptron', ([], {'n_iter': '(40)', 'eta0': '(0.1)', 'random_state': '(0)'}), '(n_iter=40, eta0=0.1, random_state=0)\n', (956, 993), False, 'from sklearn.linear_model import Perceptron\n'), ((1556, 1598), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal length [standardlized]"""'], {}), "('Petal length [standardlized]')\n", (1566, 1598), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1640), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal width [standardlized]"""'], {}), "('Petal width [standardlized]')\n", (1609, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1669), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1651, 1669), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1680), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1678, 1680), True, 'import matplotlib.pyplot as plt\n'), ((1871, 1892), 'numpy.arange', 'np.arange', (['(-7)', '(7)', '(0.1)'], {}), '(-7, 7, 0.1)\n', (1880, 1892), True, 'import numpy as np\n'), ((1912, 1930), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'phi_z'], {}), '(z, phi_z)\n', (1920, 1930), True, 'import matplotlib.pyplot as plt\n'), ((1931, 1958), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0.0)'], {'color': '"""k"""'}), "(0.0, color='k')\n", (1942, 1958), True, 'import matplotlib.pyplot as plt\n'), ((1959, 2021), 'matplotlib.pyplot.axhspan', 'plt.axhspan', (['(0.0)', '(1.0)'], {'facecolor': '"""1.0"""', 'alpha': '(1.0)', 'ls': '"""dotted"""'}), "(0.0, 1.0, facecolor='1.0', alpha=1.0, ls='dotted')\n", (1970, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2064), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0.5)', 'ls': '"""dotted"""', 'color': '"""k"""'}), "(y=0.5, ls='dotted', color='k')\n", (2033, 2064), True, 'import matplotlib.pyplot as plt\n'), ((2065, 2092), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0.0, 0.5, 1.0]'], {}), '([0.0, 0.5, 1.0])\n', (2075, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2093, 2112), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (2101, 2112), True, 'import matplotlib.pyplot as plt\n'), ((2113, 2128), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""z"""'], {}), "('z')\n", (2123, 2128), True, 'import matplotlib.pyplot as plt\n'), ((2129, 2154), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\phi (z)$"""'], {}), "('$\\\\phi (z)$')\n", (2139, 2154), True, 'import matplotlib.pyplot as plt\n'), ((2154, 2164), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2162, 2164), True, 'import matplotlib.pyplot as plt\n'), ((2306, 2350), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1000.0)', 'random_state': '(0)'}), '(C=1000.0, random_state=0)\n', (2324, 2350), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2553, 2595), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal length [standardlized]"""'], {}), "('Petal length [standardlized]')\n", (2563, 2595), True, 'import matplotlib.pyplot as plt\n'), ((2596, 2637), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal width [standardlized]"""'], {}), "('Petal width [standardlized]')\n", (2606, 2637), True, 'import matplotlib.pyplot as plt\n'), ((2638, 2666), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2648, 2666), True, 'import matplotlib.pyplot as plt\n'), ((2667, 2677), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2675, 2677), True, 'import matplotlib.pyplot as plt\n'), ((2877, 2892), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (2886, 2892), True, 'import numpy as np\n'), ((3047, 3064), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (3055, 3064), True, 'import numpy as np\n'), ((3065, 3118), 'matplotlib.pyplot.plot', 'plt.plot', (['params', 'weights[:, 0]'], {'label': '"""Petal length"""'}), "(params, weights[:, 0], label='Petal length')\n", (3073, 3118), True, 'import matplotlib.pyplot as plt\n'), ((3119, 3187), 'matplotlib.pyplot.plot', 'plt.plot', (['params', 'weights[:, 1]'], {'linestyle': '"""--"""', 'label': '"""Petal width"""'}), "(params, weights[:, 1], linestyle='--', label='Petal width')\n", (3127, 3187), True, 'import matplotlib.pyplot as plt\n'), ((3188, 3203), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""C"""'], {}), "('C')\n", (3198, 3203), True, 'import matplotlib.pyplot as plt\n'), ((3204, 3235), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""weight cofficient"""'], {}), "('weight cofficient')\n", (3214, 3235), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3264), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3246, 3264), True, 'import matplotlib.pyplot as plt\n'), ((3265, 3282), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (3275, 3282), True, 'import matplotlib.pyplot as plt\n'), ((3283, 3293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3291, 3293), True, 'import matplotlib.pyplot as plt\n'), ((3411, 3454), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'C': '(1.0)', 'random_state': '(0)'}), "(kernel='linear', C=1.0, random_state=0)\n", (3414, 3454), False, 'from sklearn.svm import SVC\n'), ((3623, 3665), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal length [standardlized]"""'], {}), "('Petal length [standardlized]')\n", (3633, 3665), True, 'import matplotlib.pyplot as plt\n'), ((3666, 3707), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal width [standardlized]"""'], {}), "('Petal width [standardlized]')\n", (3676, 3707), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3736), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3718, 3736), True, 'import matplotlib.pyplot as plt\n'), ((3737, 3747), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3745, 3747), True, 'import matplotlib.pyplot as plt\n'), ((3884, 3916), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""perceptron"""'}), "(loss='perceptron')\n", (3897, 3916), False, 'from sklearn.linear_model import SGDClassifier\n'), ((3922, 3947), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""'}), "(loss='log')\n", (3935, 3947), False, 'from sklearn.linear_model import SGDClassifier\n'), ((3954, 3981), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""'}), "(loss='hinge')\n", (3967, 3981), False, 'from sklearn.linear_model import SGDClassifier\n'), ((3982, 3999), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3996, 3999), True, 'import numpy as np\n'), ((4008, 4031), 'numpy.random.randn', 'np.random.randn', (['(200)', '(2)'], {}), '(200, 2)\n', (4023, 4031), True, 'import numpy as np\n'), ((4040, 4088), 'numpy.logical_xor', 'np.logical_xor', (['(X_xor[:, 0] > 0)', '(X_xor[:, 1] > 0)'], {}), '(X_xor[:, 0] > 0, X_xor[:, 1] > 0)\n', (4054, 4088), True, 'import numpy as np\n'), ((4097, 4119), 'numpy.where', 'np.where', (['y_xor', '(1)', '(-1)'], {}), '(y_xor, 1, -1)\n', (4105, 4119), True, 'import numpy as np\n'), ((4120, 4209), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_xor[y_xor == 1, 0]', 'X_xor[y_xor == 1, 1]'], {'c': '"""b"""', 'marker': '"""x"""', 'label': '"""1"""'}), "(X_xor[y_xor == 1, 0], X_xor[y_xor == 1, 1], c='b', marker='x',\n label='1')\n", (4131, 4209), True, 'import matplotlib.pyplot as plt\n'), ((4214, 4306), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_xor[y_xor == -1, 0]', 'X_xor[y_xor == -1, 1]'], {'c': '"""r"""', 'marker': '"""s"""', 'label': '"""-1"""'}), "(X_xor[y_xor == -1, 0], X_xor[y_xor == -1, 1], c='r', marker='s',\n label='-1')\n", (4225, 4306), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4325), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3.0)'], {}), '(-3.0)\n', (4319, 4325), True, 'import matplotlib.pyplot as plt\n'), ((4326, 4338), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4336, 4338), True, 'import matplotlib.pyplot as plt\n'), ((4339, 4349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4347, 4349), True, 'import matplotlib.pyplot as plt\n'), ((4438, 4490), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'random_state': '(0)', 'gamma': '(0.1)', 'C': '(10.0)'}), "(kernel='rbf', random_state=0, gamma=0.1, C=10.0)\n", (4441, 4490), False, 'from sklearn.svm import SVC\n'), ((4588, 4616), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (4598, 4616), True, 'import matplotlib.pyplot as plt\n'), ((4617, 4627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4625, 4627), True, 'import matplotlib.pyplot as plt\n'), ((4718, 4769), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'random_state': '(0)', 'gamma': '(0.2)', 'C': '(1.0)'}), "(kernel='rbf', random_state=0, gamma=0.2, C=1.0)\n", (4721, 4769), False, 'from sklearn.svm import SVC\n'), ((4915, 4957), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal length [standardlized]"""'], {}), "('Petal length [standardlized]')\n", (4925, 4957), True, 'import matplotlib.pyplot as plt\n'), ((4958, 4999), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal width [standardlized]"""'], {}), "('Petal width [standardlized]')\n", (4968, 4999), True, 'import matplotlib.pyplot as plt\n'), ((5000, 5028), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (5010, 5028), True, 'import matplotlib.pyplot as plt\n'), ((5029, 5039), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5037, 5039), True, 'import matplotlib.pyplot as plt\n'), ((5130, 5183), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'random_state': '(0)', 'gamma': '(100.0)', 'C': '(1.0)'}), "(kernel='rbf', random_state=0, gamma=100.0, C=1.0)\n", (5133, 5183), False, 'from sklearn.svm import SVC\n'), ((5328, 5370), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Petal length [standardlized]"""'], {}), "('Petal length [standardlized]')\n", (5338, 5370), True, 'import matplotlib.pyplot as plt\n'), ((5371, 5412), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Petal width [standardlized]"""'], {}), "('Petal width [standardlized]')\n", (5381, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5413, 5441), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (5423, 5441), True, 'import matplotlib.pyplot as plt\n'), ((5442, 5452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5450, 5452), True, 'import matplotlib.pyplot as plt\n'), ((5760, 5785), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', '(0.01)'], {}), '(0.0, 1.0, 0.01)\n', (5769, 5785), True, 'import numpy as np\n'), ((5915, 5927), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5925, 5927), True, 'import matplotlib.pyplot as plt\n'), ((5933, 5949), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (5944, 5949), True, 'import matplotlib.pyplot as plt\n'), ((6548, 6564), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.1)'], {}), '(0, 1.1)\n', (6556, 6564), True, 'import matplotlib.pyplot as plt\n'), ((6565, 6585), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""p(i=1)"""'], {}), "('p(i=1)')\n", (6575, 6585), True, 'import matplotlib.pyplot as plt\n'), ((6586, 6614), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Impurity Index"""'], {}), "('Impurity Index')\n", (6596, 6614), True, 'import matplotlib.pyplot as plt\n'), ((6615, 6625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6623, 6625), True, 'import matplotlib.pyplot as plt\n'), ((6764, 6836), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'max_depth': '(3)', 'random_state': '(0)'}), "(criterion='entropy', max_depth=3, random_state=0)\n", (6786, 6836), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((6907, 6935), 'numpy.vstack', 'np.vstack', (['(X_train, X_test)'], {}), '((X_train, X_test))\n', (6916, 6935), True, 'import numpy as np\n'), ((6949, 6977), 'numpy.hstack', 'np.hstack', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (6958, 6977), True, 'import numpy as np\n'), ((7110, 7141), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""petal length [cm]"""'], {}), "('petal length [cm]')\n", (7120, 7141), True, 'import matplotlib.pyplot as plt\n'), ((7142, 7172), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""petal width [cm]"""'], {}), "('petal width [cm]')\n", (7152, 7172), True, 'import matplotlib.pyplot as plt\n'), ((7173, 7201), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (7183, 7201), True, 'import matplotlib.pyplot as plt\n'), ((7202, 7212), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7210, 7212), True, 'import matplotlib.pyplot as plt\n'), ((7340, 7433), 'sklearn.tree.export_graphviz', 'export_graphviz', (['tree'], {'out_file': '"""tree.dot"""', 'feature_names': "['petal length', 'petal width']"}), "(tree, out_file='tree.dot', feature_names=['petal length',\n 'petal width'])\n", (7355, 7433), False, 'from sklearn.tree import export_graphviz\n'), ((7508, 7598), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'criterion': '"""entropy"""', 'n_estimators': '(10)', 'random_state': '(1)', 'n_jobs': '(2)'}), "(criterion='entropy', n_estimators=10, random_state=1,\n n_jobs=2)\n", (7530, 7598), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7789, 7815), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""petal length"""'], {}), "('petal length')\n", (7799, 7815), True, 'import matplotlib.pyplot as plt\n'), ((7816, 7841), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""petal width"""'], {}), "('petal width')\n", (7826, 7841), True, 'import matplotlib.pyplot as plt\n'), ((7842, 7870), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (7852, 7870), True, 'import matplotlib.pyplot as plt\n'), ((7871, 7881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7879, 7881), True, 'import matplotlib.pyplot as plt\n'), ((8025, 8085), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)', 'p': '(2)', 'metric': '"""minkowski"""'}), "(n_neighbors=5, p=2, metric='minkowski')\n", (8045, 8085), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((8281, 8323), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""petal length [standardlized]"""'], {}), "('petal length [standardlized]')\n", (8291, 8323), True, 'import matplotlib.pyplot as plt\n'), ((8324, 8365), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""petal width [standardlized]"""'], {}), "('petal width [standardlized]')\n", (8334, 8365), True, 'import matplotlib.pyplot as plt\n'), ((8366, 8376), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8374, 8376), True, 'import matplotlib.pyplot as plt\n'), ((2903, 2948), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(10 ** c)', 'random_state': '(0)'}), '(C=10 ** c, random_state=0)\n', (2921, 2948), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1319, 1349), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1333, 1349), False, 'from sklearn.metrics import accuracy_score\n'), ((5737, 5755), 'numpy.max', 'np.max', (['[p, 1 - p]'], {}), '([p, 1 - p])\n', (5743, 5755), True, 'import numpy as np\n'), ((1854, 1864), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (1860, 1864), True, 'import numpy as np\n'), ((5672, 5682), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (5679, 5682), True, 'import numpy as np\n'), ((5693, 5707), 'numpy.log2', 'np.log2', (['(1 - p)'], {}), '(1 - p)\n', (5700, 5707), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
2019/12/18
READ GDSC DRUG RESPONSE DATA
"""
import os
import pandas as pd
import numpy as np
from functools import reduce
def read_GDSC_response(GDSC_drug_response_frames, GDSC_drug_name, X_source, GDSC_drug_id=None):
# X_source has to be a DataFrame with genes in columns
if GDSC_drug_name in ['Cetuximab',
'Doxorubicin',
'Etoposide',
'Bleomycin',
'Bicalutamide',
'Bleomycin (50 uM)',
'Pemetrexed',
'AICA Ribonucleotide']:
GDSC_drug_response_df = GDSC_drug_response_frames['GDSC1'].copy()
else:
GDSC_drug_response_df = GDSC_drug_response_frames['GDSC2'].copy()
y_source = GDSC_drug_response_df[GDSC_drug_response_df['DRUG_NAME'] == GDSC_drug_name]
if GDSC_drug_id is not None:
y_source = GDSC_drug_response_df[GDSC_drug_response_df['DRUG_ID'] == GDSC_drug_id]
else:
print(np.unique(GDSC_drug_response_df['DRUG_ID']).shape)
y_source = y_source[['CELL_LINE_NAME', 'AUC']]
y_source = y_source.set_index('CELL_LINE_NAME')
while type(X_source.index) == pd.core.indexes.multi.MultiIndex:
X_source.index = X_source.index.droplevel(1)
X_source_response = X_source.groupby(X_source.index).mean()
common_samples = np.intersect1d(y_source.index,
X_source_response.index)
y_source = y_source.loc[common_samples]
X_source_response = X_source_response.loc[common_samples]
return X_source_response, y_source | [
"numpy.intersect1d",
"numpy.unique"
] | [((1429, 1484), 'numpy.intersect1d', 'np.intersect1d', (['y_source.index', 'X_source_response.index'], {}), '(y_source.index, X_source_response.index)\n', (1443, 1484), True, 'import numpy as np\n'), ((1067, 1110), 'numpy.unique', 'np.unique', (["GDSC_drug_response_df['DRUG_ID']"], {}), "(GDSC_drug_response_df['DRUG_ID'])\n", (1076, 1110), True, 'import numpy as np\n')] |
# Third Party
import numpy as np
import tensorflow as tf
# First Party
import smdebug.tensorflow as smd
from smdebug.core.collection import CollectionKeys
from smdebug.trials import create_trial
def get_data():
images = np.zeros((64, 224))
labels = np.zeros((64, 5))
inputs = {"Image_input": images}
outputs = {"output-softmax": labels}
return inputs, outputs
def create_hook(trial_dir):
hook = smd.KerasHook(trial_dir, save_all=True)
return hook
def create_model():
input_layer = tf.keras.layers.Input(name="Image_input", shape=(224), dtype="float32")
model = tf.keras.layers.Dense(5)(input_layer)
model = tf.keras.layers.Activation("softmax", name="output-softmax")(model)
model = tf.keras.models.Model(inputs=input_layer, outputs=[model])
return model
def test_support_dicts(out_dir):
model = create_model()
optimizer = tf.keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
model.compile(loss="categorical_crossentropy", optimizer=optimizer)
inputs, labels = get_data()
smdebug_hook = create_hook(out_dir)
model.fit(inputs, labels, batch_size=16, epochs=10, callbacks=[smdebug_hook])
model.save(out_dir, save_format="tf")
trial = create_trial(out_dir)
assert trial.tensor_names(collection=CollectionKeys.INPUTS) == ["model_input"]
assert trial.tensor_names(collection=CollectionKeys.OUTPUTS) == ["labels", "predictions"]
| [
"tensorflow.keras.optimizers.Adadelta",
"smdebug.tensorflow.KerasHook",
"tensorflow.keras.layers.Dense",
"numpy.zeros",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Input",
"smdebug.trials.create_trial"
] | [((227, 246), 'numpy.zeros', 'np.zeros', (['(64, 224)'], {}), '((64, 224))\n', (235, 246), True, 'import numpy as np\n'), ((260, 277), 'numpy.zeros', 'np.zeros', (['(64, 5)'], {}), '((64, 5))\n', (268, 277), True, 'import numpy as np\n'), ((424, 463), 'smdebug.tensorflow.KerasHook', 'smd.KerasHook', (['trial_dir'], {'save_all': '(True)'}), '(trial_dir, save_all=True)\n', (437, 463), True, 'import smdebug.tensorflow as smd\n'), ((520, 589), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'name': '"""Image_input"""', 'shape': '(224)', 'dtype': '"""float32"""'}), "(name='Image_input', shape=224, dtype='float32')\n", (541, 589), True, 'import tensorflow as tf\n'), ((734, 792), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'input_layer', 'outputs': '[model]'}), '(inputs=input_layer, outputs=[model])\n', (755, 792), True, 'import tensorflow as tf\n'), ((888, 959), 'tensorflow.keras.optimizers.Adadelta', 'tf.keras.optimizers.Adadelta', ([], {'lr': '(1.0)', 'rho': '(0.95)', 'epsilon': 'None', 'decay': '(0.0)'}), '(lr=1.0, rho=0.95, epsilon=None, decay=0.0)\n', (916, 959), True, 'import tensorflow as tf\n'), ((1240, 1261), 'smdebug.trials.create_trial', 'create_trial', (['out_dir'], {}), '(out_dir)\n', (1252, 1261), False, 'from smdebug.trials import create_trial\n'), ((604, 628), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(5)'], {}), '(5)\n', (625, 628), True, 'import tensorflow as tf\n'), ((654, 714), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {'name': '"""output-softmax"""'}), "('softmax', name='output-softmax')\n", (680, 714), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 10:14:53 2020
@author: <NAME>
"""
# to remove the warning in my code
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
# Reading the train and testing data
train_data = pd.read_csv("D:\\Studies\\Machine Learning\\Titanic Prediction\\data\\train.csv")
test_data = pd.read_csv("D:\\Studies\\Machine Learning\\Titanic Prediction\\data\\test.csv")
check=pd.read_csv("D:\\Studies\\Machine Learning\\Titanic Prediction\\data\\gender_submission.csv")
# calculating the null values
def print_null():
print("\nTRAIN")
print(train_data.isnull().sum())
print("\nTEST")
print(test_data.isnull().sum())
def print_shape():
print("Train:",train_data.shape)
print("\nTest:",test_data.shape)
def replacenull_train_embarked():
train_data['Embarked']=np.where((train_data.Pclass==1),'C',train_data.Embarked)
def fare_test_null():
test_data['Fare'].fillna((test_data['Fare'].mean()),inplace=True)
def process_age(df,cut_points,label_names):
df["Age"] = df["Age"].fillna(-0.5)
df["Age_categories"] = pd.cut(df["Age"],cut_points,labels=label_names)
return df
# we now drop the cabin which is of no use
def drop_Cabin():
test_data.drop(['Cabin'],axis=1)
train_data.drop(['Cabin'],axis=1)
def replace_malefemale(): # 1 is male and 0 is female
train_data['Sex']=np.where((train_data.Sex=='male'),1,train_data.Sex)
test_data['Sex']=np.where((test_data.Sex=='male'),1,test_data.Sex)
train_data['Sex']=np.where((train_data.Sex=='female'),0,train_data.Sex)
test_data['Sex']=np.where((test_data.Sex=='female'),0,test_data.Sex)
cut_points = [-1,0,5,12,18,35,60,100]
#label_names = ["Missing","Infant","Child","Teenager","Young Adult","Adult","Senior"]
label_names = [0,1,2,3,4,5,6]
train = process_age(train_data,cut_points,label_names)
test = process_age(test_data,cut_points,label_names)
def plot_agecategory():
pivot = train.pivot_table(index="Age_categories",values='Survived')
pivot.plot.bar()
plt.show()
def model_run():
fare_test_null()
drop_Cabin()
replacenull_train_embarked()
replace_malefemale()
# print_null()
# print_shape()
'''
Now we have our dataset free from the null values now we are going to
use various classifier by taking into an account of AGE, PClass ,Sex
'''
X=[]
model_run()
# Selecting the Age, pclass and sex from train and test as below
xtrain = train_data.iloc[:,[2,4,5,12]] # [2,4,5]
ytrain = train_data["Survived"]
xtest = test_data.iloc[:,[1,3,4,11]] # [1,3,5]
ytest = check["Survived"]
print(xtest.shape)
# Logistic Regression model
classifier = LogisticRegression(random_state = 0)
classifier.fit(xtrain, ytrain)
y_pred = classifier.predict(xtest)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(ytest, y_pred)
print ("Confusion Matrix : \n", cm)
from sklearn.metrics import accuracy_score
print ("Accuracy : ", accuracy_score(ytest, y_pred))
y_pred = pd.DataFrame(y_pred, columns=['predictions']).to_csv('D:\\Studies\\Machine Learning\\Titanic Prediction\\data\\prediction.csv')
'''
# ploting the graph
sex_pivot = train_data.pivot_table(index="Sex",values="Survived")
sex_pivot.plot.bar()
plt.show()
pclass_pivot = train_data.pivot_table(index = 'Pclass', values = 'Survived')
pclass_pivot.plot.bar()
plt.show()
emb_pivot = train_data.pivot_table(index = 'Embarked', values = 'Pclass')
emb_pivot.plot.bar()
plt.show()
''' | [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"warnings.simplefilter",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"pandas.cut",
"sklearn.linear_model.LogisticRegression",
"numpy.where",
"sklearn.metrics.confusion_matrix"
] | [((162, 215), 'warnings.simplefilter', 'simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (174, 215), False, 'from warnings import simplefilter\n'), ((768, 854), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\Studies\\\\Machine Learning\\\\Titanic Prediction\\\\data\\\\train.csv"""'], {}), "(\n 'D:\\\\Studies\\\\Machine Learning\\\\Titanic Prediction\\\\data\\\\train.csv')\n", (779, 854), True, 'import pandas as pd\n'), ((865, 950), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\Studies\\\\Machine Learning\\\\Titanic Prediction\\\\data\\\\test.csv"""'], {}), "('D:\\\\Studies\\\\Machine Learning\\\\Titanic Prediction\\\\data\\\\test.csv'\n )\n", (876, 950), True, 'import pandas as pd\n'), ((955, 1058), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\Studies\\\\Machine Learning\\\\Titanic Prediction\\\\data\\\\gender_submission.csv"""'], {}), "(\n 'D:\\\\Studies\\\\Machine Learning\\\\Titanic Prediction\\\\data\\\\gender_submission.csv'\n )\n", (966, 1058), True, 'import pandas as pd\n'), ((3195, 3229), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3213, 3229), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3360, 3391), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['ytest', 'y_pred'], {}), '(ytest, y_pred)\n', (3376, 3391), False, 'from sklearn.metrics import confusion_matrix\n'), ((1368, 1426), 'numpy.where', 'np.where', (['(train_data.Pclass == 1)', '"""C"""', 'train_data.Embarked'], {}), "(train_data.Pclass == 1, 'C', train_data.Embarked)\n", (1376, 1426), True, 'import numpy as np\n'), ((1633, 1682), 'pandas.cut', 'pd.cut', (["df['Age']", 'cut_points'], {'labels': 'label_names'}), "(df['Age'], cut_points, labels=label_names)\n", (1639, 1682), True, 'import pandas as pd\n'), ((1910, 1963), 'numpy.where', 'np.where', (["(train_data.Sex == 'male')", '(1)', 'train_data.Sex'], {}), "(train_data.Sex == 'male', 1, train_data.Sex)\n", (1918, 1963), True, 'import numpy as np\n'), ((1981, 2032), 'numpy.where', 'np.where', (["(test_data.Sex == 'male')", '(1)', 'test_data.Sex'], {}), "(test_data.Sex == 'male', 1, test_data.Sex)\n", (1989, 2032), True, 'import numpy as np\n'), ((2051, 2106), 'numpy.where', 'np.where', (["(train_data.Sex == 'female')", '(0)', 'train_data.Sex'], {}), "(train_data.Sex == 'female', 0, train_data.Sex)\n", (2059, 2106), True, 'import numpy as np\n'), ((2124, 2177), 'numpy.where', 'np.where', (["(test_data.Sex == 'female')", '(0)', 'test_data.Sex'], {}), "(test_data.Sex == 'female', 0, test_data.Sex)\n", (2132, 2177), True, 'import numpy as np\n'), ((2574, 2584), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2582, 2584), True, 'import matplotlib.pyplot as plt\n'), ((3501, 3530), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['ytest', 'y_pred'], {}), '(ytest, y_pred)\n', (3515, 3530), False, 'from sklearn.metrics import accuracy_score\n'), ((3545, 3590), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {'columns': "['predictions']"}), "(y_pred, columns=['predictions'])\n", (3557, 3590), True, 'import pandas as pd\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import pytest
import fairlearn.metrics as metrics
from test.unit.input_convertors import conversions_for_1d
# ===========================================================
def mock_func(y_true, y_pred):
return np.sum(y_true)
def mock_func_weight(y_true, y_pred, sample_weight):
return np.sum(np.multiply(y_true, sample_weight))
def mock_func_matrix_return(y_true, y_pred):
return np.ones([len(y_true), sum(y_pred)])
class TestMetricByGroup:
@pytest.mark.parametrize("transform_gid", conversions_for_1d)
@pytest.mark.parametrize("transform_y_p", conversions_for_1d)
@pytest.mark.parametrize("transform_y_a", conversions_for_1d)
def test_smoke(self, transform_y_a, transform_y_p, transform_gid):
y_a = transform_y_a([0, 0, 1, 1, 0, 1, 1, 1])
y_p = transform_y_p([0, 1, 1, 1, 1, 0, 0, 1])
gid = transform_gid([0, 0, 0, 0, 1, 1, 1, 1])
result = metrics.metric_by_group(mock_func, y_a, y_p, gid)
assert result.overall == 5
assert len(result.by_group) == 2
assert result.by_group[0] == 2
assert result.by_group[1] == 3
assert result.minimum == 2
assert result.argmin_set == {0}
assert result.maximum == 3
assert result.argmax_set == {1}
assert result.range == 1
assert result.range_ratio == pytest.approx(0.6666666667)
@pytest.mark.parametrize("transform_gid", conversions_for_1d)
@pytest.mark.parametrize("transform_y_p", conversions_for_1d)
@pytest.mark.parametrize("transform_y_a", conversions_for_1d)
def test_string_groups(self, transform_y_a, transform_y_p, transform_gid):
a = "ABC"
b = "DEF"
c = "GHI"
y_a = transform_y_a([0, 0, 1, 1, 0, 1, 1, 1])
y_p = transform_y_p([0, 1, 1, 1, 1, 0, 0, 1])
gid = transform_gid([a, a, a, b, b, c, c, c])
result = metrics.metric_by_group(mock_func, y_a, y_p, gid)
assert result.overall == 5
assert len(result.by_group) == 3
assert result.by_group[a] == 1
assert result.by_group[b] == 1
assert result.by_group[c] == 3
assert result.minimum == 1
assert result.argmin_set == {a, b}
assert result.maximum == 3
assert result.argmax_set == {c}
assert result.range == 2
assert result.range_ratio == pytest.approx(0.33333333333333)
@pytest.mark.parametrize("transform_gid", conversions_for_1d)
@pytest.mark.parametrize("transform_y_p", conversions_for_1d)
@pytest.mark.parametrize("transform_y_a", conversions_for_1d)
def test_matrix_metric(self, transform_y_a, transform_y_p, transform_gid):
a = "ABC"
b = "DEF"
c = "GHI"
y_a = transform_y_a([0, 0, 1, 1, 0, 1, 1, 1])
y_p = transform_y_p([0, 1, 1, 1, 1, 0, 0, 1])
gid = transform_gid([a, a, a, b, b, c, c, c])
result = metrics.metric_by_group(mock_func_matrix_return, y_a, y_p, gid)
assert np.array_equal(result.overall, np.ones([8, 5]))
assert np.array_equal(result.by_group[a], np.ones([3, 2]))
assert np.array_equal(result.by_group[b], np.ones([2, 2]))
assert np.array_equal(result.by_group[c], np.ones([3, 1]))
assert result.minimum is None
assert result.argmin_set is None
assert result.maximum is None
assert result.argmax_set is None
assert result.range is None
assert result.range_ratio is None
@pytest.mark.parametrize("transform_s_w", conversions_for_1d)
@pytest.mark.parametrize("transform_gid", conversions_for_1d)
@pytest.mark.parametrize("transform_y_p", conversions_for_1d)
@pytest.mark.parametrize("transform_y_a", conversions_for_1d)
def test_with_weights(self, transform_y_a, transform_y_p, transform_gid, transform_s_w):
y_a = transform_y_a([0, 0, 1, 1, 0, 1, 1, 1])
y_p = transform_y_p([0, 1, 1, 1, 1, 0, 0, 1])
gid = transform_gid([0, 0, 0, 0, 1, 1, 2, 2])
s_w = transform_s_w([1, 1, 1, 1, 2, 2, 3, 3])
result = metrics.metric_by_group(mock_func_weight, y_a, y_p, gid, sample_weight=s_w)
assert result.overall == 10
assert len(result.by_group) == 3
assert result.by_group[0] == 2
assert result.by_group[1] == 2
assert result.by_group[2] == 6
assert result.minimum == 2
assert result.argmin_set == {0, 1}
assert result.maximum == 6
assert result.argmax_set == {2}
assert result.range == 4
assert result.range_ratio == pytest.approx(0.33333333333333)
@pytest.mark.parametrize("transform_y_p", conversions_for_1d)
@pytest.mark.parametrize("transform_y_a", conversions_for_1d)
def test_true_predict_length_mismatch(self, transform_y_a, transform_y_p):
y_a = transform_y_a([0, 0, 1, 1, 0, 1, 1, 1])
y_p = transform_y_p([0, 1, 1, 1, 1, 0, 0])
gid = [0, 0, 0, 0, 1, 1, 2, 2]
s_w = [1, 1, 1, 1, 2, 2, 3, 3]
with pytest.raises(ValueError) as exception_context:
_ = metrics.metric_by_group(mock_func_weight, y_a, y_p, gid, s_w)
expected = "Array y_pred is not the same size as y_true"
assert exception_context.value.args[0] == expected
@pytest.mark.parametrize("transform_gid", conversions_for_1d)
@pytest.mark.parametrize("transform_y_a", conversions_for_1d)
def test_true_group_length_mismatch(self, transform_y_a, transform_gid):
y_a = transform_y_a([0, 0, 1, 1, 0, 1, 1, 1])
y_p = [0, 1, 1, 1, 1, 0, 0, 0]
gid = transform_gid([0, 0, 0, 0, 1, 1, 2])
s_w = [1, 1, 1, 1, 2, 2, 3, 3]
with pytest.raises(ValueError) as exception_context:
_ = metrics.metric_by_group(mock_func_weight, y_a, y_p, gid, s_w)
expected = "Array group_membership is not the same size as y_true"
assert exception_context.value.args[0] == expected
@pytest.mark.parametrize("transform_s_w", conversions_for_1d)
@pytest.mark.parametrize("transform_y_a", conversions_for_1d)
def test_true_weight_length_mismatch(self, transform_y_a, transform_s_w):
y_a = transform_y_a([0, 0, 1, 1, 0, 1, 1, 1])
y_p = [0, 1, 1, 1, 1, 0, 0, 0]
gid = [0, 0, 0, 0, 1, 1, 2, 3]
s_w = transform_s_w([1, 1, 1, 1, 2, 2, 3])
with pytest.raises(ValueError) as exception_context:
_ = metrics.metric_by_group(mock_func_weight, y_a, y_p, gid, s_w)
expected = "Array sample_weight is not the same size as y_true"
assert exception_context.value.args[0] == expected
def test_negative_results(self):
y_a = [0, 0, 1, 1, 0, 1, 1, 1]
y_p = [0, 1, 1, 1, 1, 0, 0, 1]
gid = [0, 0, 0, 0, 0, 1, 1, 1]
def negative_results(y_true, y_pred):
return -(len(y_true) + len(y_pred))
result = metrics.metric_by_group(negative_results, y_a, y_p, gid)
assert result.overall == -16
assert result.by_group[0] == -10
assert result.by_group[1] == -6
assert result.minimum == -10
assert result.maximum == -6
assert result.range == 4
assert np.isnan(result.range_ratio)
def test_metric_results_zero(self):
y_a = [0, 0, 1, 1, 0, 1, 1, 1]
y_p = [0, 1, 1, 1, 1, 0, 0, 1]
gid = [0, 0, 0, 0, 0, 1, 1, 1]
def zero_results(y_true, y_pred):
# Arrays will always be same length
return len(y_true)-len(y_pred)
result = metrics.metric_by_group(zero_results, y_a, y_p, gid)
assert result.overall == 0
assert result.by_group[0] == 0
assert result.by_group[1] == 0
assert result.minimum == 0
assert result.maximum == 0
assert result.range == 0
# Following is special case
assert result.range_ratio == 1
def test_single_element_input(self):
y_t = [0]
y_p = [0]
gid = [0]
s_w = [0]
def sum_lengths(y_true, y_pred, sample_weight):
return len(y_true) + len(y_pred) + len(sample_weight)
result = metrics.metric_by_group(sum_lengths, y_t, y_p, gid, sample_weight=s_w)
assert result.overall == 3
assert result.by_group[0] == 3
assert result.minimum == 3
assert result.maximum == 3
assert result.range == 0
assert result.range_ratio == 1
def test_groups_only_one_element(self):
y_t = [1, 2]
y_p = [1, 2]
gid = [0, 1]
def sum_lengths(y_true, y_pred):
return len(y_true) + len(y_pred)
result = metrics.metric_by_group(sum_lengths, y_t, y_p, gid)
assert result.overall == 4
assert result.by_group[0] == 2
assert result.by_group[1] == 2
assert result.minimum == 2
assert result.maximum == 2
assert result.range == 0
assert result.range_ratio == 1
class TestMakeGroupMetric:
def test_smoke(self):
y_a = [0, 0, 1, 1, 0, 1, 1, 1]
y_p = [0, 1, 1, 1, 1, 0, 0, 1]
gid = [0, 0, 0, 0, 1, 1, 1, 1]
grouped_metric_func = metrics.make_group_metric(mock_func)
result = grouped_metric_func(y_a, y_p, gid)
assert result.overall == 5
assert len(result.by_group) == 2
assert result.by_group[0] == 2
assert result.by_group[1] == 3
assert result.minimum == 2
assert result.maximum == 3
assert result.argmin_set == {0}
assert result.argmax_set == {1}
assert result.range == 1
assert result.range_ratio == pytest.approx(0.66666666667)
@pytest.mark.parametrize("transform_s_w", conversions_for_1d)
@pytest.mark.parametrize("transform_gid", conversions_for_1d)
@pytest.mark.parametrize("transform_y_p", conversions_for_1d)
@pytest.mark.parametrize("transform_y_a", conversions_for_1d)
def test_keys_and_weights(self, transform_y_a, transform_y_p, transform_gid, transform_s_w):
a = "ABC"
b = "DEF"
c = "GHI"
z = "something_longer"
y_a = transform_y_a([0, 1, 1, 1, 0, 1, 1, 1])
y_p = transform_y_p([0, 1, 1, 1, 1, 0, 0, 1])
gid = transform_gid([a, z, a, b, b, c, c, c])
s_w = transform_s_w([1, 1, 1, 5, 5, 7, 7, 7])
grouped_metric_func = metrics.make_group_metric(mock_func_weight)
result = grouped_metric_func(y_a, y_p, gid, s_w)
assert result.overall == 28
assert len(result.by_group) == 4
assert result.by_group[a] == 1
assert result.by_group[b] == 5
assert result.by_group[c] == 21
assert result.by_group[z] == 1
assert result.minimum == 1
assert result.maximum == 21
assert result.argmin_set == {a, z}
assert result.argmax_set == {c}
assert result.range == 20
assert result.range_ratio == pytest.approx(1.0/21.0)
| [
"numpy.multiply",
"numpy.sum",
"numpy.ones",
"numpy.isnan",
"fairlearn.metrics.make_group_metric",
"pytest.raises",
"fairlearn.metrics.metric_by_group",
"pytest.mark.parametrize",
"pytest.approx"
] | [((331, 345), 'numpy.sum', 'np.sum', (['y_true'], {}), '(y_true)\n', (337, 345), True, 'import numpy as np\n'), ((581, 641), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_gid"""', 'conversions_for_1d'], {}), "('transform_gid', conversions_for_1d)\n", (604, 641), False, 'import pytest\n'), ((647, 707), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_p"""', 'conversions_for_1d'], {}), "('transform_y_p', conversions_for_1d)\n", (670, 707), False, 'import pytest\n'), ((713, 773), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_a"""', 'conversions_for_1d'], {}), "('transform_y_a', conversions_for_1d)\n", (736, 773), False, 'import pytest\n'), ((1484, 1544), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_gid"""', 'conversions_for_1d'], {}), "('transform_gid', conversions_for_1d)\n", (1507, 1544), False, 'import pytest\n'), ((1550, 1610), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_p"""', 'conversions_for_1d'], {}), "('transform_y_p', conversions_for_1d)\n", (1573, 1610), False, 'import pytest\n'), ((1616, 1676), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_a"""', 'conversions_for_1d'], {}), "('transform_y_a', conversions_for_1d)\n", (1639, 1676), False, 'import pytest\n'), ((2495, 2555), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_gid"""', 'conversions_for_1d'], {}), "('transform_gid', conversions_for_1d)\n", (2518, 2555), False, 'import pytest\n'), ((2561, 2621), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_p"""', 'conversions_for_1d'], {}), "('transform_y_p', conversions_for_1d)\n", (2584, 2621), False, 'import pytest\n'), ((2627, 2687), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_a"""', 'conversions_for_1d'], {}), "('transform_y_a', conversions_for_1d)\n", (2650, 2687), False, 'import pytest\n'), ((3572, 3632), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_s_w"""', 'conversions_for_1d'], {}), "('transform_s_w', conversions_for_1d)\n", (3595, 3632), False, 'import pytest\n'), ((3638, 3698), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_gid"""', 'conversions_for_1d'], {}), "('transform_gid', conversions_for_1d)\n", (3661, 3698), False, 'import pytest\n'), ((3704, 3764), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_p"""', 'conversions_for_1d'], {}), "('transform_y_p', conversions_for_1d)\n", (3727, 3764), False, 'import pytest\n'), ((3770, 3830), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_a"""', 'conversions_for_1d'], {}), "('transform_y_a', conversions_for_1d)\n", (3793, 3830), False, 'import pytest\n'), ((4690, 4750), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_p"""', 'conversions_for_1d'], {}), "('transform_y_p', conversions_for_1d)\n", (4713, 4750), False, 'import pytest\n'), ((4756, 4816), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_a"""', 'conversions_for_1d'], {}), "('transform_y_a', conversions_for_1d)\n", (4779, 4816), False, 'import pytest\n'), ((5350, 5410), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_gid"""', 'conversions_for_1d'], {}), "('transform_gid', conversions_for_1d)\n", (5373, 5410), False, 'import pytest\n'), ((5416, 5476), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_a"""', 'conversions_for_1d'], {}), "('transform_y_a', conversions_for_1d)\n", (5439, 5476), False, 'import pytest\n'), ((6018, 6078), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_s_w"""', 'conversions_for_1d'], {}), "('transform_s_w', conversions_for_1d)\n", (6041, 6078), False, 'import pytest\n'), ((6084, 6144), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_a"""', 'conversions_for_1d'], {}), "('transform_y_a', conversions_for_1d)\n", (6107, 6144), False, 'import pytest\n'), ((9690, 9750), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_s_w"""', 'conversions_for_1d'], {}), "('transform_s_w', conversions_for_1d)\n", (9713, 9750), False, 'import pytest\n'), ((9756, 9816), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_gid"""', 'conversions_for_1d'], {}), "('transform_gid', conversions_for_1d)\n", (9779, 9816), False, 'import pytest\n'), ((9822, 9882), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_p"""', 'conversions_for_1d'], {}), "('transform_y_p', conversions_for_1d)\n", (9845, 9882), False, 'import pytest\n'), ((9888, 9948), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transform_y_a"""', 'conversions_for_1d'], {}), "('transform_y_a', conversions_for_1d)\n", (9911, 9948), False, 'import pytest\n'), ((419, 453), 'numpy.multiply', 'np.multiply', (['y_true', 'sample_weight'], {}), '(y_true, sample_weight)\n', (430, 453), True, 'import numpy as np\n'), ((1025, 1074), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['mock_func', 'y_a', 'y_p', 'gid'], {}), '(mock_func, y_a, y_p, gid)\n', (1048, 1074), True, 'import fairlearn.metrics as metrics\n'), ((1990, 2039), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['mock_func', 'y_a', 'y_p', 'gid'], {}), '(mock_func, y_a, y_p, gid)\n', (2013, 2039), True, 'import fairlearn.metrics as metrics\n'), ((3001, 3064), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['mock_func_matrix_return', 'y_a', 'y_p', 'gid'], {}), '(mock_func_matrix_return, y_a, y_p, gid)\n', (3024, 3064), True, 'import fairlearn.metrics as metrics\n'), ((4158, 4233), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['mock_func_weight', 'y_a', 'y_p', 'gid'], {'sample_weight': 's_w'}), '(mock_func_weight, y_a, y_p, gid, sample_weight=s_w)\n', (4181, 4233), True, 'import fairlearn.metrics as metrics\n'), ((6946, 7002), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['negative_results', 'y_a', 'y_p', 'gid'], {}), '(negative_results, y_a, y_p, gid)\n', (6969, 7002), True, 'import fairlearn.metrics as metrics\n'), ((7243, 7271), 'numpy.isnan', 'np.isnan', (['result.range_ratio'], {}), '(result.range_ratio)\n', (7251, 7271), True, 'import numpy as np\n'), ((7582, 7634), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['zero_results', 'y_a', 'y_p', 'gid'], {}), '(zero_results, y_a, y_p, gid)\n', (7605, 7634), True, 'import fairlearn.metrics as metrics\n'), ((8182, 8252), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['sum_lengths', 'y_t', 'y_p', 'gid'], {'sample_weight': 's_w'}), '(sum_lengths, y_t, y_p, gid, sample_weight=s_w)\n', (8205, 8252), True, 'import fairlearn.metrics as metrics\n'), ((8682, 8733), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['sum_lengths', 'y_t', 'y_p', 'gid'], {}), '(sum_lengths, y_t, y_p, gid)\n', (8705, 8733), True, 'import fairlearn.metrics as metrics\n'), ((9192, 9228), 'fairlearn.metrics.make_group_metric', 'metrics.make_group_metric', (['mock_func'], {}), '(mock_func)\n', (9217, 9228), True, 'import fairlearn.metrics as metrics\n'), ((10378, 10421), 'fairlearn.metrics.make_group_metric', 'metrics.make_group_metric', (['mock_func_weight'], {}), '(mock_func_weight)\n', (10403, 10421), True, 'import fairlearn.metrics as metrics\n'), ((1450, 1477), 'pytest.approx', 'pytest.approx', (['(0.6666666667)'], {}), '(0.6666666667)\n', (1463, 1477), False, 'import pytest\n'), ((2457, 2488), 'pytest.approx', 'pytest.approx', (['(0.33333333333333)'], {}), '(0.33333333333333)\n', (2470, 2488), False, 'import pytest\n'), ((3112, 3127), 'numpy.ones', 'np.ones', (['[8, 5]'], {}), '([8, 5])\n', (3119, 3127), True, 'import numpy as np\n'), ((3179, 3194), 'numpy.ones', 'np.ones', (['[3, 2]'], {}), '([3, 2])\n', (3186, 3194), True, 'import numpy as np\n'), ((3246, 3261), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (3253, 3261), True, 'import numpy as np\n'), ((3313, 3328), 'numpy.ones', 'np.ones', (['[3, 1]'], {}), '([3, 1])\n', (3320, 3328), True, 'import numpy as np\n'), ((4652, 4683), 'pytest.approx', 'pytest.approx', (['(0.33333333333333)'], {}), '(0.33333333333333)\n', (4665, 4683), False, 'import pytest\n'), ((5093, 5118), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5106, 5118), False, 'import pytest\n'), ((5157, 5218), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['mock_func_weight', 'y_a', 'y_p', 'gid', 's_w'], {}), '(mock_func_weight, y_a, y_p, gid, s_w)\n', (5180, 5218), True, 'import fairlearn.metrics as metrics\n'), ((5751, 5776), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5764, 5776), False, 'import pytest\n'), ((5815, 5876), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['mock_func_weight', 'y_a', 'y_p', 'gid', 's_w'], {}), '(mock_func_weight, y_a, y_p, gid, s_w)\n', (5838, 5876), True, 'import fairlearn.metrics as metrics\n'), ((6420, 6445), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6433, 6445), False, 'import pytest\n'), ((6484, 6545), 'fairlearn.metrics.metric_by_group', 'metrics.metric_by_group', (['mock_func_weight', 'y_a', 'y_p', 'gid', 's_w'], {}), '(mock_func_weight, y_a, y_p, gid, s_w)\n', (6507, 6545), True, 'import fairlearn.metrics as metrics\n'), ((9655, 9683), 'pytest.approx', 'pytest.approx', (['(0.66666666667)'], {}), '(0.66666666667)\n', (9668, 9683), False, 'import pytest\n'), ((10938, 10963), 'pytest.approx', 'pytest.approx', (['(1.0 / 21.0)'], {}), '(1.0 / 21.0)\n', (10951, 10963), False, 'import pytest\n')] |
import os
import numpy as np
import pickle
import argparse
import glob
import scipy
from tqdm import tqdm
from youtube8m.video_level_nn_models.defaults import YOUTUBE8M_LABELS_N
from scipy.sparse import lil_matrix
def build_index(video_ids, features, scores):
unique_ids = np.unique(video_ids)
index = {unique_ids[i]: i for i in range(len(unique_ids))}
sparse_matrix = lil_matrix((len(unique_ids), YOUTUBE8M_LABELS_N))
for vid, lidx, score in tqdm(zip(video_ids, features, scores), total=len(video_ids)):
sparse_matrix[index[vid], lidx] = score
return index, sparse_matrix
def main(args):
os.makedirs(os.path.dirname(args.output_filename), exist_ok=True)
folds_paths = sorted(glob.glob(os.path.join(args.path_with_folds, 'fold_*')))
features_paths = sorted(glob.glob(os.path.join(args.path_with_features, 'folds', 'fold_*')))
features, scores, video_ids = _load_train(folds_paths, features_paths)
features = np.concatenate(features, axis=0)
scores = np.concatenate(scores, axis=0)
video_ids = np.concatenate(video_ids, axis=0)
index, sparse_matrix = build_index(video_ids, features, scores)
with open(args.output_filename, 'wb') as f:
pickle.dump(index, f)
scipy.sparse.save_npz(args.output_filename + '.matr', sparse_matrix.tocsr())
def _load_train(folds_paths, features_paths):
features = []
scores = []
video_ids = []
for fold_path, features_path in zip(folds_paths, features_paths):
features.append(np.load(os.path.join(features_path, 'features'))[:, 0].astype(int))
scores.append(np.load(os.path.join(fold_path, 'predictions.npy')))
video_ids.append(np.load(os.path.join(fold_path, 'video_ids.npy')))
return features, scores, video_ids
if __name__ == '__main__':
parser = argparse.ArgumentParser("This script gathers all level 2 data to single video_id -> label score index")
parser.add_argument("path_with_folds")
parser.add_argument("path_with_features")
parser.add_argument("output_filename")
args = parser.parse_args()
main(args)
| [
"pickle.dump",
"argparse.ArgumentParser",
"os.path.dirname",
"numpy.unique",
"os.path.join",
"numpy.concatenate"
] | [((279, 299), 'numpy.unique', 'np.unique', (['video_ids'], {}), '(video_ids)\n', (288, 299), True, 'import numpy as np\n'), ((960, 992), 'numpy.concatenate', 'np.concatenate', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (974, 992), True, 'import numpy as np\n'), ((1006, 1036), 'numpy.concatenate', 'np.concatenate', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (1020, 1036), True, 'import numpy as np\n'), ((1053, 1086), 'numpy.concatenate', 'np.concatenate', (['video_ids'], {'axis': '(0)'}), '(video_ids, axis=0)\n', (1067, 1086), True, 'import numpy as np\n'), ((1809, 1922), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""This script gathers all level 2 data to single video_id -> label score index"""'], {}), "(\n 'This script gathers all level 2 data to single video_id -> label score index'\n )\n", (1832, 1922), False, 'import argparse\n'), ((637, 674), 'os.path.dirname', 'os.path.dirname', (['args.output_filename'], {}), '(args.output_filename)\n', (652, 674), False, 'import os\n'), ((1211, 1232), 'pickle.dump', 'pickle.dump', (['index', 'f'], {}), '(index, f)\n', (1222, 1232), False, 'import pickle\n'), ((726, 770), 'os.path.join', 'os.path.join', (['args.path_with_folds', '"""fold_*"""'], {}), "(args.path_with_folds, 'fold_*')\n", (738, 770), False, 'import os\n'), ((811, 867), 'os.path.join', 'os.path.join', (['args.path_with_features', '"""folds"""', '"""fold_*"""'], {}), "(args.path_with_features, 'folds', 'fold_*')\n", (823, 867), False, 'import os\n'), ((1607, 1649), 'os.path.join', 'os.path.join', (['fold_path', '"""predictions.npy"""'], {}), "(fold_path, 'predictions.npy')\n", (1619, 1649), False, 'import os\n'), ((1685, 1725), 'os.path.join', 'os.path.join', (['fold_path', '"""video_ids.npy"""'], {}), "(fold_path, 'video_ids.npy')\n", (1697, 1725), False, 'import os\n'), ((1517, 1556), 'os.path.join', 'os.path.join', (['features_path', '"""features"""'], {}), "(features_path, 'features')\n", (1529, 1556), False, 'import os\n')] |
from __future__ import print_function
import unittest
import numpy as np
from keras import Input, Model
from keras.layers import BatchNormalization, Dense, Flatten
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
from mode_normalization import ModeNormalization
class ExecutionTest(unittest.TestCase):
def test_1(self):
# ModeNormalization with only one mode is equivalent to BatchNormalization
a = np.random.uniform(size=(50, 10, 10, 3))
i1 = Input(shape=(10, 10, 3))
x1 = ModeNormalization(k=1)(i1)
m1 = Model(inputs=[i1], outputs=[x1])
p1 = m1.predict(a)
print(p1.shape)
i2 = Input(shape=(10, 10, 3))
x2 = BatchNormalization()(i2)
m2 = Model(inputs=[i2], outputs=[x2])
p2 = m2.predict(a)
print(p2.shape)
np.testing.assert_almost_equal(p1, p2)
def test_2(self):
num_modes = 6
input_shape = (50, 10, 10, 3)
a = np.random.uniform(size=input_shape)
i1 = Input(shape=(10, 10, 3))
x1 = ModeNormalization(k=num_modes)(i1)
m1 = Model(inputs=[i1], outputs=[x1])
p1 = m1.predict(a)
assert input_shape == p1.shape
def test_3(self):
num_modes = 6
h, w, num_channels = 10, 10, 3
input_shape = (50, h, w, num_channels)
a = np.random.uniform(size=input_shape)
i1 = Input(shape=(h, w, num_channels))
x1 = ModeNormalization(k=num_modes)(i1)
m1 = Model(inputs=[i1], outputs=[x1])
weight_shapes = [a.shape for a in m1.get_weights()]
assert weight_shapes == [(num_channels, num_modes), # gates_kernel
(num_modes,), # gates_bias
(num_channels,), # gates_gamma
(num_channels,), # gates_beta
(num_modes, num_channels), # moving_mean
(num_modes, num_channels)] # moving_variance
def test_4(self):
num_modes = 3
h, w, num_channels = 10, 10, 3
input_shape = (50, h, w, num_channels)
a = np.random.uniform(size=input_shape)
b = np.random.uniform(size=input_shape)
i1 = Input(shape=(h, w, num_channels))
x1 = ModeNormalization(k=num_modes)(i1)
m1 = Model(inputs=[i1], outputs=[x1])
m1.compile(optimizer='adam', loss='mse')
p1 = m1.predict(b)
m1.fit(a, a, epochs=2)
p2 = m1.predict(b)
np.testing.assert_equal(np.any(np.not_equal(p1, p2)), True)
def test_5(self):
h, w, num_channels = 10, 10, 3
input_shape = (50, h, w, num_channels)
a = np.random.uniform(size=input_shape)
b = np.random.uniform(size=input_shape)
i1 = Input(shape=(h, w, num_channels))
x1 = ModeNormalization(k=1)(i1)
m1 = Model(inputs=[i1], outputs=[x1])
m1.compile(optimizer='adam', loss='mse')
m1.fit(a, a, epochs=1)
p1 = m1.predict(b)
i2 = Input(shape=(h, w, num_channels))
x2 = BatchNormalization()(i2)
m2 = Model(inputs=[i2], outputs=[x2])
m2.compile(optimizer='adam', loss='mse')
m2.fit(a, a, epochs=1)
p2 = m2.predict(b)
np.testing.assert_almost_equal(p1, p2, decimal=1)
def test_6(self):
h, w, num_channels = 10, 10, 3
input_shape = (50, h, w, num_channels)
mode1 = np.random.uniform(size=input_shape, low=0, high=1)
mode2 = np.random.uniform(size=input_shape, low=-1, high=0)
x_data = np.vstack([mode1, mode2])
y_data = to_categorical(np.vstack([0] * 50 + [1] * 50), num_classes=2)
i1 = Input(shape=(h, w, num_channels))
x1 = ModeNormalization(k=2)(i1)
x1 = Flatten()(x1)
x1 = Dense(2, activation='softmax')(x1)
m1 = Model(inputs=[i1], outputs=[x1])
m1.compile(optimizer=Adam(lr=0.01), loss='categorical_crossentropy')
m1.fit(x_data, y_data, epochs=10, shuffle=True)
def gate_inference(x):
return (np.dot(np.mean(x, axis=(1, 2)), m1.get_weights()[0]) + m1.get_weights()[1]).argmax(axis=-1)
mode1_val = np.mean(gate_inference(mode1))
mode2_val = np.mean(gate_inference(mode2))
# It's possible that in some cases, the network cannot really separate the two modes.
# I would say it fails ~5% of the time.
if mode1_val < mode2_val:
assert mode1_val < 0.3 and mode2_val >= 0.7
else:
assert mode2_val < 0.3 and mode1_val >= 0.7
if __name__ == '__main__':
ExecutionTest().test_6()
| [
"numpy.random.uniform",
"keras.Input",
"keras.Model",
"numpy.testing.assert_almost_equal",
"keras.layers.Flatten",
"keras.optimizers.Adam",
"numpy.not_equal",
"mode_normalization.ModeNormalization",
"keras.layers.BatchNormalization",
"keras.layers.Dense",
"numpy.mean",
"numpy.vstack"
] | [((458, 497), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(50, 10, 10, 3)'}), '(size=(50, 10, 10, 3))\n', (475, 497), True, 'import numpy as np\n'), ((512, 536), 'keras.Input', 'Input', ([], {'shape': '(10, 10, 3)'}), '(shape=(10, 10, 3))\n', (517, 536), False, 'from keras import Input, Model\n'), ((590, 622), 'keras.Model', 'Model', ([], {'inputs': '[i1]', 'outputs': '[x1]'}), '(inputs=[i1], outputs=[x1])\n', (595, 622), False, 'from keras import Input, Model\n'), ((688, 712), 'keras.Input', 'Input', ([], {'shape': '(10, 10, 3)'}), '(shape=(10, 10, 3))\n', (693, 712), False, 'from keras import Input, Model\n'), ((764, 796), 'keras.Model', 'Model', ([], {'inputs': '[i2]', 'outputs': '[x2]'}), '(inputs=[i2], outputs=[x2])\n', (769, 796), False, 'from keras import Input, Model\n'), ((857, 895), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['p1', 'p2'], {}), '(p1, p2)\n', (887, 895), True, 'import numpy as np\n'), ((991, 1026), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (1008, 1026), True, 'import numpy as np\n'), ((1041, 1065), 'keras.Input', 'Input', ([], {'shape': '(10, 10, 3)'}), '(shape=(10, 10, 3))\n', (1046, 1065), False, 'from keras import Input, Model\n'), ((1127, 1159), 'keras.Model', 'Model', ([], {'inputs': '[i1]', 'outputs': '[x1]'}), '(inputs=[i1], outputs=[x1])\n', (1132, 1159), False, 'from keras import Input, Model\n'), ((1369, 1404), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (1386, 1404), True, 'import numpy as np\n'), ((1419, 1452), 'keras.Input', 'Input', ([], {'shape': '(h, w, num_channels)'}), '(shape=(h, w, num_channels))\n', (1424, 1452), False, 'from keras import Input, Model\n'), ((1514, 1546), 'keras.Model', 'Model', ([], {'inputs': '[i1]', 'outputs': '[x1]'}), '(inputs=[i1], outputs=[x1])\n', (1519, 1546), False, 'from keras import Input, Model\n'), ((2171, 2206), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (2188, 2206), True, 'import numpy as np\n'), ((2219, 2254), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (2236, 2254), True, 'import numpy as np\n'), ((2269, 2302), 'keras.Input', 'Input', ([], {'shape': '(h, w, num_channels)'}), '(shape=(h, w, num_channels))\n', (2274, 2302), False, 'from keras import Input, Model\n'), ((2364, 2396), 'keras.Model', 'Model', ([], {'inputs': '[i1]', 'outputs': '[x1]'}), '(inputs=[i1], outputs=[x1])\n', (2369, 2396), False, 'from keras import Input, Model\n'), ((2720, 2755), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (2737, 2755), True, 'import numpy as np\n'), ((2768, 2803), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (2785, 2803), True, 'import numpy as np\n'), ((2818, 2851), 'keras.Input', 'Input', ([], {'shape': '(h, w, num_channels)'}), '(shape=(h, w, num_channels))\n', (2823, 2851), False, 'from keras import Input, Model\n'), ((2905, 2937), 'keras.Model', 'Model', ([], {'inputs': '[i1]', 'outputs': '[x1]'}), '(inputs=[i1], outputs=[x1])\n', (2910, 2937), False, 'from keras import Input, Model\n'), ((3059, 3092), 'keras.Input', 'Input', ([], {'shape': '(h, w, num_channels)'}), '(shape=(h, w, num_channels))\n', (3064, 3092), False, 'from keras import Input, Model\n'), ((3144, 3176), 'keras.Model', 'Model', ([], {'inputs': '[i2]', 'outputs': '[x2]'}), '(inputs=[i2], outputs=[x2])\n', (3149, 3176), False, 'from keras import Input, Model\n'), ((3293, 3342), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['p1', 'p2'], {'decimal': '(1)'}), '(p1, p2, decimal=1)\n', (3323, 3342), True, 'import numpy as np\n'), ((3468, 3518), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape', 'low': '(0)', 'high': '(1)'}), '(size=input_shape, low=0, high=1)\n', (3485, 3518), True, 'import numpy as np\n'), ((3535, 3586), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape', 'low': '(-1)', 'high': '(0)'}), '(size=input_shape, low=-1, high=0)\n', (3552, 3586), True, 'import numpy as np\n'), ((3604, 3629), 'numpy.vstack', 'np.vstack', (['[mode1, mode2]'], {}), '([mode1, mode2])\n', (3613, 3629), True, 'import numpy as np\n'), ((3723, 3756), 'keras.Input', 'Input', ([], {'shape': '(h, w, num_channels)'}), '(shape=(h, w, num_channels))\n', (3728, 3756), False, 'from keras import Input, Model\n'), ((3885, 3917), 'keras.Model', 'Model', ([], {'inputs': '[i1]', 'outputs': '[x1]'}), '(inputs=[i1], outputs=[x1])\n', (3890, 3917), False, 'from keras import Input, Model\n'), ((550, 572), 'mode_normalization.ModeNormalization', 'ModeNormalization', ([], {'k': '(1)'}), '(k=1)\n', (567, 572), False, 'from mode_normalization import ModeNormalization\n'), ((726, 746), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (744, 746), False, 'from keras.layers import BatchNormalization, Dense, Flatten\n'), ((1079, 1109), 'mode_normalization.ModeNormalization', 'ModeNormalization', ([], {'k': 'num_modes'}), '(k=num_modes)\n', (1096, 1109), False, 'from mode_normalization import ModeNormalization\n'), ((1466, 1496), 'mode_normalization.ModeNormalization', 'ModeNormalization', ([], {'k': 'num_modes'}), '(k=num_modes)\n', (1483, 1496), False, 'from mode_normalization import ModeNormalization\n'), ((2316, 2346), 'mode_normalization.ModeNormalization', 'ModeNormalization', ([], {'k': 'num_modes'}), '(k=num_modes)\n', (2333, 2346), False, 'from mode_normalization import ModeNormalization\n'), ((2865, 2887), 'mode_normalization.ModeNormalization', 'ModeNormalization', ([], {'k': '(1)'}), '(k=1)\n', (2882, 2887), False, 'from mode_normalization import ModeNormalization\n'), ((3106, 3126), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3124, 3126), False, 'from keras.layers import BatchNormalization, Dense, Flatten\n'), ((3662, 3692), 'numpy.vstack', 'np.vstack', (['([0] * 50 + [1] * 50)'], {}), '([0] * 50 + [1] * 50)\n', (3671, 3692), True, 'import numpy as np\n'), ((3770, 3792), 'mode_normalization.ModeNormalization', 'ModeNormalization', ([], {'k': '(2)'}), '(k=2)\n', (3787, 3792), False, 'from mode_normalization import ModeNormalization\n'), ((3810, 3819), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3817, 3819), False, 'from keras.layers import BatchNormalization, Dense, Flatten\n'), ((3837, 3867), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (3842, 3867), False, 'from keras.layers import BatchNormalization, Dense, Flatten\n'), ((2570, 2590), 'numpy.not_equal', 'np.not_equal', (['p1', 'p2'], {}), '(p1, p2)\n', (2582, 2590), True, 'import numpy as np\n'), ((3947, 3960), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (3951, 3960), False, 'from keras.optimizers import Adam\n'), ((4110, 4133), 'numpy.mean', 'np.mean', (['x'], {'axis': '(1, 2)'}), '(x, axis=(1, 2))\n', (4117, 4133), True, 'import numpy as np\n')] |
# This file is distributed under MIT license as part of the project flocksims.
# See LICENSE file for details.
# (c) <NAME>, 2019
# Quick script to sweep some parameters for the flocking simulations
import os, sys
import numpy as np
import mkl
mkl.set_num_threads(1)
from multiprocessing import Pool
def flocking(args):
T = args[0]
J = args[1]
rep = args[2]
base_folder = '/net/levsha/share/simongh/sims/flocking/sweep_Jordan'
# base_folder = '/home/simongh/simulations/flocking/dense_sweep_dt0.00001'
filename = os.path.join(base_folder, '{2}_T={0}_J={1}.h5'.format(T, J, rep))
cmd = './c++/flocking -N 128 -o {2} -dt 1e-4 -v0 0.088 -doTheory -ae 1000 -runtime 500 -T {0} -J {1}'.format(T, J, filename)
os.system(cmd)
if __name__ == '__main__':
Ts = np.logspace(-3, 2, 50);
Js = np.array([1]);#np.logspace(-3, 3, 30);
rep = 0;
paramlist = []
for i in range(15):
for T in Ts:
for J in Js:
paramlist.append((T, J, rep))
rep += 1
with Pool(32) as mypool:
mypool.map(flocking, paramlist)
| [
"numpy.logspace",
"mkl.set_num_threads",
"os.system",
"numpy.array",
"multiprocessing.Pool"
] | [((246, 268), 'mkl.set_num_threads', 'mkl.set_num_threads', (['(1)'], {}), '(1)\n', (265, 268), False, 'import mkl\n'), ((739, 753), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (748, 753), False, 'import os, sys\n'), ((791, 813), 'numpy.logspace', 'np.logspace', (['(-3)', '(2)', '(50)'], {}), '(-3, 2, 50)\n', (802, 813), True, 'import numpy as np\n'), ((824, 837), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (832, 837), True, 'import numpy as np\n'), ((1046, 1054), 'multiprocessing.Pool', 'Pool', (['(32)'], {}), '(32)\n', (1050, 1054), False, 'from multiprocessing import Pool\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# RUN: python3 %s | FileCheck %s
from typing import Tuple
import unittest
import numpy as np
from numpy.core.fromnumeric import shape
import oneflow.compatible.single_client as flow
import oneflow.compatible.single_client.typing as oft
import oneflow.framework.dtype as dtype_util
from test_util import GenArgDict
from collections import OrderedDict
def _get_regularizer(model_name):
# all decay
return flow.regularizers.l2(0.00004)
def _batch_norm(inputs, last=False):
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
weight_regularizer = flow.regularizers.l2(0.5)
trainable = True
training = True
data_format = "NHWC"
if data_format == "NHWC":
axis = 3
return flow.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=0.9, # 97,
epsilon=1e-5,
center=True,
scale=True,
trainable=trainable,
training=training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=weight_regularizer,
beta_regularizer=weight_regularizer,
)
@flow.unittest.skip_unless_1n1d()
class TestMLIROptimizations(flow.unittest.TestCase):
@unittest.skip("")
def test_cpu(self):
d = OrderedDict(
{"shape": [(2, 96, 96, 3)], "in_type": [flow.float32], "device": ["cpu"],}
)
for arg in GenArgDict(d):
self.run_job(**arg)
def test_gpu(self):
d = OrderedDict(
{"shape": [(2, 96, 96, 3)], "in_type": [flow.float32], "device": ["gpu"],}
)
for arg in GenArgDict(d):
self.run_job(**arg)
def run_job(test_case, device=None, in_type=None, shape=None):
assert shape is not None
flow.clear_default_session()
func_config = flow.FunctionConfig()
@flow.global_function(type="train", function_config=func_config)
def FuseBnAddReluJob(
x: oft.Numpy.Placeholder(shape, dtype=in_type)
) -> oft.Numpy:
addend = flow.constant_like(x, 2)
with flow.scope.placement(device, "0:0-0"):
x = (
flow.get_variable(
"x1",
shape=shape,
dtype=in_type,
initializer=flow.random_uniform_initializer(
minval=-10, maxval=10
),
trainable=True,
)
+ x
)
loss = flow.nn.relu(_batch_norm(x, last=False) + addend) + 1
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0
).minimize(loss)
return loss
np_in_type = dtype_util.convert_oneflow_dtype_to_numpy_dtype(in_type)
x = (np.random.rand(*shape) * 10).astype(np_in_type)
FuseBnAddReluJob(x)
# CHECK: %y, %reserve_space, %mean, %inv_variance = "oneflow.normalization_add_relu"
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.zeros_initializer",
"unittest.main",
"oneflow.compatible.single_client.FunctionConfig",
"test_util.GenArgDict",
"numpy.random.rand",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_clie... | [((1751, 1783), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1781, 1783), True, 'import oneflow.compatible.single_client as flow\n'), ((1003, 1030), 'oneflow.compatible.single_client.regularizers.l2', 'flow.regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (1023, 1030), True, 'import oneflow.compatible.single_client as flow\n'), ((1190, 1215), 'oneflow.compatible.single_client.regularizers.l2', 'flow.regularizers.l2', (['(0.5)'], {}), '(0.5)\n', (1210, 1215), True, 'import oneflow.compatible.single_client as flow\n'), ((1340, 1657), 'oneflow.compatible.single_client.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': 'axis', 'momentum': '(0.9)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': 'trainable', 'training': 'training', 'gamma_initializer': 'initializer', 'moving_variance_initializer': 'initializer', 'gamma_regularizer': 'weight_regularizer', 'beta_regularizer': 'weight_regularizer'}), '(inputs=inputs, axis=axis, momentum=0.9,\n epsilon=1e-05, center=True, scale=True, trainable=trainable, training=\n training, gamma_initializer=initializer, moving_variance_initializer=\n initializer, gamma_regularizer=weight_regularizer, beta_regularizer=\n weight_regularizer)\n', (1371, 1657), True, 'import oneflow.compatible.single_client as flow\n'), ((1842, 1859), 'unittest.skip', 'unittest.skip', (['""""""'], {}), "('')\n", (1855, 1859), False, 'import unittest\n'), ((3722, 3737), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3735, 3737), False, 'import unittest\n'), ((1090, 1114), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1112, 1114), True, 'import oneflow.compatible.single_client as flow\n'), ((1128, 1151), 'oneflow.compatible.single_client.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (1149, 1151), True, 'import oneflow.compatible.single_client as flow\n'), ((1896, 1986), 'collections.OrderedDict', 'OrderedDict', (["{'shape': [(2, 96, 96, 3)], 'in_type': [flow.float32], 'device': ['cpu']}"], {}), "({'shape': [(2, 96, 96, 3)], 'in_type': [flow.float32], 'device':\n ['cpu']})\n", (1907, 1986), False, 'from collections import OrderedDict\n'), ((2025, 2038), 'test_util.GenArgDict', 'GenArgDict', (['d'], {}), '(d)\n', (2035, 2038), False, 'from test_util import GenArgDict\n'), ((2109, 2199), 'collections.OrderedDict', 'OrderedDict', (["{'shape': [(2, 96, 96, 3)], 'in_type': [flow.float32], 'device': ['gpu']}"], {}), "({'shape': [(2, 96, 96, 3)], 'in_type': [flow.float32], 'device':\n ['gpu']})\n", (2120, 2199), False, 'from collections import OrderedDict\n'), ((2238, 2251), 'test_util.GenArgDict', 'GenArgDict', (['d'], {}), '(d)\n', (2248, 2251), False, 'from test_util import GenArgDict\n'), ((2394, 2422), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2420, 2422), True, 'import oneflow.compatible.single_client as flow\n'), ((2445, 2466), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2464, 2466), True, 'import oneflow.compatible.single_client as flow\n'), ((2477, 2540), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2497, 2540), True, 'import oneflow.compatible.single_client as flow\n'), ((3457, 3513), 'oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['in_type'], {}), '(in_type)\n', (3504, 3513), True, 'import oneflow.framework.dtype as dtype_util\n'), ((2675, 2699), 'oneflow.compatible.single_client.constant_like', 'flow.constant_like', (['x', '(2)'], {}), '(x, 2)\n', (2693, 2699), True, 'import oneflow.compatible.single_client as flow\n'), ((2586, 2629), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['shape'], {'dtype': 'in_type'}), '(shape, dtype=in_type)\n', (2607, 2629), True, 'import oneflow.compatible.single_client.typing as oft\n'), ((2717, 2754), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device', '"""0:0-0"""'], {}), "(device, '0:0-0')\n", (2737, 2754), True, 'import oneflow.compatible.single_client as flow\n'), ((3527, 3549), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (3541, 3549), True, 'import numpy as np\n'), ((2959, 3013), 'oneflow.compatible.single_client.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(-10)', 'maxval': '(10)'}), '(minval=-10, maxval=10)\n', (2990, 3013), True, 'import oneflow.compatible.single_client as flow\n'), ((3306, 3361), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (3347, 3361), True, 'import oneflow.compatible.single_client as flow\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 MIT Probabilistic Computing Project.
# Released under Apache 2.0; refer to LICENSE.txt.
import numpy as np
import pytest
from cgpm.utils.general import get_prng
from cgpm.utils.test import gen_data_table
from cgpm2.categorical import Categorical
from cgpm2.crp import CRP
from cgpm2.normal import Normal
from cgpm2.poisson import Poisson
from cgpm2.flexible_rowmix import FlexibleRowMixture
from cgpm2.product import Product
from cgpm2.walks import add_cgpm
from cgpm2.walks import remove_cgpm
from cgpm2.transition_views import get_cgpm_view_proposals_existing
from cgpm2.transition_views import get_cgpm_view_proposals_singleton
from cgpm2.transition_views import get_dataset
def get_crosscat(prng):
view0 = FlexibleRowMixture(
cgpm_row_divide=CRP([-1], [], rng=prng),
cgpm_components_base=Product([
Normal([0], [], rng=prng),
Normal([1], [], rng=prng),
], rng=prng),
rng=prng)
view1 = FlexibleRowMixture(
cgpm_row_divide=CRP([-2], [], rng=prng),
cgpm_components_base=Product([
Poisson([2], [], rng=prng),
Normal([3], [], rng=prng),
Normal([4], [], rng=prng),
], rng=prng),
rng=prng)
view2 = FlexibleRowMixture(
cgpm_row_divide=CRP([-3], [], rng=prng),
cgpm_components_base=Product([
Categorical([5], [], distargs={'k':4}, rng=prng),
], rng=prng),
rng=prng)
return Product([view0, view1, view2], rng=prng)
def populate_crosscat(crosscat, prng):
X, Zv, Zrv = gen_data_table(
n_rows=10,
view_weights=[.4, .6],
cluster_weights=[[.3,.4,.3],[.5,.5]],
cctypes=['normal','normal','poisson','normal','normal','categorical'],
distargs=[None, None, None, None, None, {'k':4}],
separation=[0.99]*6,
rng=prng)
X[0,1] = X[3,1] = float('nan')
dataset = np.transpose(X)
for rowid, row in enumerate(dataset):
observation = {c:v for c,v in enumerate(row)}
crosscat.observe(rowid, observation)
return crosscat
def test_crosscat_add_remove():
prng = get_prng(2)
crosscat = get_crosscat(prng)
infinite_mixture4 = FlexibleRowMixture(
cgpm_row_divide=CRP([-4], [], rng=prng),
cgpm_components_base=Product([
Categorical([6], [], distargs={'k':4}, rng=prng),
], rng=prng),
rng=prng)
crosscat = add_cgpm(crosscat, infinite_mixture4)
assert crosscat.outputs == [-1, 0, 1, -2, 2, 3, 4, -3, 5, -4, 6]
crosscat = remove_cgpm(crosscat, -1)
assert crosscat.outputs == [-2, 2, 3, 4, -3, 5, -4, 6]
crosscat = remove_cgpm(crosscat, 5)
assert crosscat.outputs == [-2, 2, 3, 4, -4, 6]
def test_get_view_proposals():
prng = get_prng(2)
crosscat = get_crosscat(prng)
# Get block proposals of outputs [0,1] into all three views.
proposals = get_cgpm_view_proposals_existing(crosscat, [0,1])
assert len(proposals) == 3
assert proposals[0].outputs == crosscat.cgpms[0].outputs
assert proposals[1].outputs == crosscat.cgpms[1].outputs + [0, 1]
assert proposals[2].outputs == crosscat.cgpms[2].outputs + [0, 1]
# Get proposals of outputs [0,1] into 2 singleton views.
proposals = get_cgpm_view_proposals_singleton(crosscat, [0,1], 2)
assert len(proposals) == 2
assert proposals[0].outputs[1:] == [0,1]
assert proposals[1].outputs[1:] == [0,1]
# Fail to get proposals for outputs in different views.
with pytest.raises(Exception):
proposals = get_cgpm_view_proposals_existing(crosscat, [0,2])
def test_logpdf_basic():
prng = get_prng(2)
crosscat = get_crosscat(prng)
crosscat = populate_crosscat(crosscat, prng)
for _rowid, row in get_dataset(crosscat, 0):
logp = crosscat.logpdf(None, row)
if np.isnan(row.values()[0]):
assert np.allclose(logp, 0)
else:
assert logp < 0
| [
"cgpm2.normal.Normal",
"cgpm2.walks.remove_cgpm",
"cgpm2.transition_views.get_cgpm_view_proposals_existing",
"numpy.allclose",
"cgpm2.categorical.Categorical",
"cgpm2.walks.add_cgpm",
"numpy.transpose",
"cgpm2.product.Product",
"cgpm.utils.general.get_prng",
"cgpm2.transition_views.get_cgpm_view_p... | [((1504, 1544), 'cgpm2.product.Product', 'Product', (['[view0, view1, view2]'], {'rng': 'prng'}), '([view0, view1, view2], rng=prng)\n', (1511, 1544), False, 'from cgpm2.product import Product\n'), ((1602, 1872), 'cgpm.utils.test.gen_data_table', 'gen_data_table', ([], {'n_rows': '(10)', 'view_weights': '[0.4, 0.6]', 'cluster_weights': '[[0.3, 0.4, 0.3], [0.5, 0.5]]', 'cctypes': "['normal', 'normal', 'poisson', 'normal', 'normal', 'categorical']", 'distargs': "[None, None, None, None, None, {'k': 4}]", 'separation': '([0.99] * 6)', 'rng': 'prng'}), "(n_rows=10, view_weights=[0.4, 0.6], cluster_weights=[[0.3, \n 0.4, 0.3], [0.5, 0.5]], cctypes=['normal', 'normal', 'poisson',\n 'normal', 'normal', 'categorical'], distargs=[None, None, None, None,\n None, {'k': 4}], separation=[0.99] * 6, rng=prng)\n", (1616, 1872), False, 'from cgpm.utils.test import gen_data_table\n'), ((1947, 1962), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (1959, 1962), True, 'import numpy as np\n'), ((2168, 2179), 'cgpm.utils.general.get_prng', 'get_prng', (['(2)'], {}), '(2)\n', (2176, 2179), False, 'from cgpm.utils.general import get_prng\n'), ((2464, 2501), 'cgpm2.walks.add_cgpm', 'add_cgpm', (['crosscat', 'infinite_mixture4'], {}), '(crosscat, infinite_mixture4)\n', (2472, 2501), False, 'from cgpm2.walks import add_cgpm\n'), ((2586, 2611), 'cgpm2.walks.remove_cgpm', 'remove_cgpm', (['crosscat', '(-1)'], {}), '(crosscat, -1)\n', (2597, 2611), False, 'from cgpm2.walks import remove_cgpm\n'), ((2686, 2710), 'cgpm2.walks.remove_cgpm', 'remove_cgpm', (['crosscat', '(5)'], {}), '(crosscat, 5)\n', (2697, 2710), False, 'from cgpm2.walks import remove_cgpm\n'), ((2806, 2817), 'cgpm.utils.general.get_prng', 'get_prng', (['(2)'], {}), '(2)\n', (2814, 2817), False, 'from cgpm.utils.general import get_prng\n'), ((2933, 2983), 'cgpm2.transition_views.get_cgpm_view_proposals_existing', 'get_cgpm_view_proposals_existing', (['crosscat', '[0, 1]'], {}), '(crosscat, [0, 1])\n', (2965, 2983), False, 'from cgpm2.transition_views import get_cgpm_view_proposals_existing\n'), ((3292, 3346), 'cgpm2.transition_views.get_cgpm_view_proposals_singleton', 'get_cgpm_view_proposals_singleton', (['crosscat', '[0, 1]', '(2)'], {}), '(crosscat, [0, 1], 2)\n', (3325, 3346), False, 'from cgpm2.transition_views import get_cgpm_view_proposals_singleton\n'), ((3669, 3680), 'cgpm.utils.general.get_prng', 'get_prng', (['(2)'], {}), '(2)\n', (3677, 3680), False, 'from cgpm.utils.general import get_prng\n'), ((3787, 3811), 'cgpm2.transition_views.get_dataset', 'get_dataset', (['crosscat', '(0)'], {}), '(crosscat, 0)\n', (3798, 3811), False, 'from cgpm2.transition_views import get_dataset\n'), ((3536, 3560), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3549, 3560), False, 'import pytest\n'), ((3582, 3632), 'cgpm2.transition_views.get_cgpm_view_proposals_existing', 'get_cgpm_view_proposals_existing', (['crosscat', '[0, 2]'], {}), '(crosscat, [0, 2])\n', (3614, 3632), False, 'from cgpm2.transition_views import get_cgpm_view_proposals_existing\n'), ((811, 834), 'cgpm2.crp.CRP', 'CRP', (['[-1]', '[]'], {'rng': 'prng'}), '([-1], [], rng=prng)\n', (814, 834), False, 'from cgpm2.crp import CRP\n'), ((1049, 1072), 'cgpm2.crp.CRP', 'CRP', (['[-2]', '[]'], {'rng': 'prng'}), '([-2], [], rng=prng)\n', (1052, 1072), False, 'from cgpm2.crp import CRP\n'), ((1327, 1350), 'cgpm2.crp.CRP', 'CRP', (['[-3]', '[]'], {'rng': 'prng'}), '([-3], [], rng=prng)\n', (1330, 1350), False, 'from cgpm2.crp import CRP\n'), ((2283, 2306), 'cgpm2.crp.CRP', 'CRP', (['[-4]', '[]'], {'rng': 'prng'}), '([-4], [], rng=prng)\n', (2286, 2306), False, 'from cgpm2.crp import CRP\n'), ((3912, 3932), 'numpy.allclose', 'np.allclose', (['logp', '(0)'], {}), '(logp, 0)\n', (3923, 3932), True, 'import numpy as np\n'), ((887, 912), 'cgpm2.normal.Normal', 'Normal', (['[0]', '[]'], {'rng': 'prng'}), '([0], [], rng=prng)\n', (893, 912), False, 'from cgpm2.normal import Normal\n'), ((926, 951), 'cgpm2.normal.Normal', 'Normal', (['[1]', '[]'], {'rng': 'prng'}), '([1], [], rng=prng)\n', (932, 951), False, 'from cgpm2.normal import Normal\n'), ((1125, 1151), 'cgpm2.poisson.Poisson', 'Poisson', (['[2]', '[]'], {'rng': 'prng'}), '([2], [], rng=prng)\n', (1132, 1151), False, 'from cgpm2.poisson import Poisson\n'), ((1165, 1190), 'cgpm2.normal.Normal', 'Normal', (['[3]', '[]'], {'rng': 'prng'}), '([3], [], rng=prng)\n', (1171, 1190), False, 'from cgpm2.normal import Normal\n'), ((1204, 1229), 'cgpm2.normal.Normal', 'Normal', (['[4]', '[]'], {'rng': 'prng'}), '([4], [], rng=prng)\n', (1210, 1229), False, 'from cgpm2.normal import Normal\n'), ((1403, 1452), 'cgpm2.categorical.Categorical', 'Categorical', (['[5]', '[]'], {'distargs': "{'k': 4}", 'rng': 'prng'}), "([5], [], distargs={'k': 4}, rng=prng)\n", (1414, 1452), False, 'from cgpm2.categorical import Categorical\n'), ((2359, 2408), 'cgpm2.categorical.Categorical', 'Categorical', (['[6]', '[]'], {'distargs': "{'k': 4}", 'rng': 'prng'}), "([6], [], distargs={'k': 4}, rng=prng)\n", (2370, 2408), False, 'from cgpm2.categorical import Categorical\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _opt-conv-tensorcore:
"""
################################################################
# TensorCore Introduction
# -----------------------
import tvm
from tvm import te
import numpy as np
from tvm.contrib import nvcc
from tvm import auto_tensorize as at
from functools import reduce
# The sizes of inputs and filters
batch_size = 256
height = 14
width = 14
in_channels = 256
out_channels = 512
kernel_h = 3
kernel_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
# TensorCore shape
block_size = 16
assert batch_size % block_size == 0
assert in_channels % block_size == 0
assert out_channels % block_size == 0
# Input feature map: (N, H, W, IC, n, ic)
data_shape = (
batch_size // block_size,
height,
width,
in_channels // block_size,
block_size,
block_size,
)
# Kernel: (H, W, IC, OC, ic, oc)
kernel_shape = (
kernel_h,
kernel_w,
in_channels // block_size,
out_channels // block_size,
block_size,
block_size,
)
# Output feature map: (N, H, W, OC, n, oc)
output_shape = (
batch_size // block_size,
height,
width,
out_channels // block_size,
block_size,
block_size,
)
# Reduction axes
kh = te.reduce_axis((0, kernel_h), name="kh")
kw = te.reduce_axis((0, kernel_w), name="kw")
ic = te.reduce_axis((0, in_channels // block_size), name="ic")
ii = te.reduce_axis((0, block_size), name="ii")
# Algorithm
A = te.placeholder(data_shape, name="A", dtype="float16")
W = te.placeholder(kernel_shape, name="W", dtype="float16")
bias = te.placeholder(output_shape, name="bias", dtype="float16")
Apad = te.compute(
(
batch_size // block_size,
height + 2 * pad_h,
width + 2 * pad_w,
in_channels // block_size,
block_size,
block_size,
),
lambda n, h, w, i, nn, ii: tvm.tir.if_then_else(
tvm.tir.all(h >= pad_h, h - pad_h < height,
w >= pad_w, w - pad_w < width),
A[n, h - pad_h, w - pad_w, i, nn, ii],
tvm.tir.const(0.0, "float16"),
),
name="Apad",
)
Conv = te.compute(
output_shape,
lambda n, h, w, o, nn, oo: te.sum(
(Apad[n, h * stride_h + kh, w * stride_w +
kw, ic, nn, ii]
* W[kh, kw, ic, o, ii, oo]).astype("float32"),
axis=[ic, kh, kw, ii],
),
name="Conv",
)
Output = te.compute(
output_shape,
lambda n, h, w, o, nn, oo:
Conv[n, h, w, o, nn, oo] + bias[n, h, w, o, nn, oo].astype("float32"),
name="Output"
)
###############################################################################
# Memory Scope
# ------------
#
hw_abs_dag = at.WMMAFp16Fp32Bias()
compute_key = "nnn"
shape_key = "<KEY>"
input_names, output_names, nodes, read_graph, feed_graph = \
at.construct_dag(
hw_abs_dag, compute_key, shape_key, [Apad, W], [Conv], [bias], [Output])
output_tensors = reduce(
lambda x, y: x + y, [nodes[x] for x in output_names], [])
s = tvm.te.create_schedule([x.op for x in output_tensors])
for cap in hw_abs_dag.hw_abs_dict.keys():
if cap not in output_names:
tensors = nodes[cap]
for t in tensors:
s[t].set_scope("local")
shared_tensors = []
for inp_name in input_names[:2]:
inps = nodes[inp_name]
assert len(inps) == 1
readers = reduce(
lambda x, y: x + y, [nodes[x] for x in feed_graph[inp_name]], [])
SS = s.cache_read(inps[0], "shared", readers)
shared_tensors.append(SS)
print(shared_tensors)
AS = shared_tensors[0]
WS = shared_tensors[1]
AF = nodes["load_a"][0]
WF = nodes["load_b"][0]
ConvF = nodes["mma"][0]
ConvFF = output_tensors[0].op.input_tensors[0]
biasF = ConvFF.op.input_tensors[1]
Conv = output_tensors[0]
s[Apad].compute_inline()
# Define tiling sizes
block_row_warps = 4
block_col_warps = 2
warp_row_tiles = 2
warp_col_tiles = 4
warp_size = 32
chunk = 2
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
block_z = te.thread_axis("blockIdx.z")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
nc, hc, wc, oc, nnc, ooc = Conv.op.axis
block_k = s[Conv].fuse(hc, wc)
s[Conv].bind(block_k, block_z)
nc, nci = s[Conv].split(nc, factor=warp_row_tiles)
block_i, nc = s[Conv].split(nc, factor=block_row_warps)
oc, oci = s[Conv].split(oc, factor=warp_col_tiles)
block_j, oc = s[Conv].split(oc, factor=block_col_warps)
s[Conv].reorder(block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc)
s[Conv].bind(block_i, block_x)
s[Conv].bind(block_j, block_y)
s[Conv].bind(nc, thread_y)
s[Conv].bind(oc, thread_z)
# Schedule local computation
s[ConvF].compute_at(s[Conv], oc)
n, h, w, o, nnf, oof = ConvF.op.axis
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii)
s[ConvFF].compute_at(s[Conv], oc)
s[biasF].compute_at(s[Conv], oc)
# Move intermediate computation into each output compute tile
s[AF].compute_at(s[ConvF], kw)
s[WF].compute_at(s[ConvF], kw)
# Schedule for A's share memory
s[AS].compute_at(s[ConvF], kh)
n, h, w, i, nn, ii = AS.op.axis
tx, xo = s[AS].split(n, nparts=block_row_warps)
ty, yo = s[AS].split(xo, nparts=block_col_warps)
t = s[AS].fuse(nn, ii)
to, ti = s[AS].split(t, factor=warp_size)
s[AS].bind(tx, thread_y)
s[AS].bind(ty, thread_z)
s[AS].bind(ti, thread_x)
# Schedule for W's share memory
s[WS].compute_at(s[ConvF], kh)
kh, kw, ic, o, ii, oo = WS.op.axis
tx, xo = s[WS].split(o, nparts=block_row_warps)
ty, yo = s[WS].split(xo, nparts=block_col_warps)
t = s[WS].fuse(ii, oo)
to, ti = s[WS].split(t, nparts=warp_size)
s[WS].bind(tx, thread_y)
s[WS].bind(ty, thread_z)
s[WS].bind(to, thread_x)
s[WS].vectorize(ti)
# print(tvm.lower(s, [A, W, Conv], simple_mode=True))
load_a = hw_abs_dag.get_intrinsic(compute_key, shape_key, "load_a")
load_b = hw_abs_dag.get_intrinsic(compute_key, shape_key, "load_b")
load_bias = hw_abs_dag.get_intrinsic(compute_key, shape_key, "load_bias")
store = hw_abs_dag.get_intrinsic(compute_key, shape_key, "store")
mma = hw_abs_dag.get_intrinsic(compute_key, shape_key, "mma")
add_bias = hw_abs_dag.get_intrinsic(compute_key, shape_key, "bias")
print(load_a)
print(load_b)
print(load_bias)
print(store)
print(mma)
print(bias)
s[AF].tensorize(AF.op.axis[-2], load_a)
s[WF].tensorize(WF.op.axis[-2], load_b)
s[biasF].tensorize(biasF.op.axis[-2], load_bias)
s[Conv].tensorize(nnc, store)
s[ConvF].tensorize(nnf, mma)
s[ConvFF].tensorize(ConvFF.op.axis[-2], add_bias)
ir_module = tvm.lower(s, [A, W, bias, Conv], simple_mode=True)
print("Lowered IRModule")
print(ir_module)
func = tvm.build(ir_module, target="cuda")
print("Source Code")
print(func.imported_modules[0].get_source())
ctx = tvm.gpu(0)
if nvcc.have_tensorcore(ctx.compute_version):
with tvm.transform.PassContext(config={"tir.UnrollLoop":
{"auto_max_step": 16}}):
func = tvm.build(s, [A, W, bias, Conv], "cuda")
a_np = np.random.uniform(size=data_shape).astype(A.dtype)
w_np = np.random.uniform(size=kernel_shape).astype(W.dtype)
bias_np = np.random.uniform(size=output_shape).astype(bias.dtype)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
bias = tvm.nd.array(bias_np, ctx)
c = tvm.nd.array(np.zeros(output_shape, dtype=Conv.dtype), ctx)
evaluator = func.time_evaluator(func.entry_name, ctx, number=10)
print("conv2d with tensor core: %f ms" % (
evaluator(a, w, bias, c).mean * 1e3))
| [
"tvm.tir.const",
"tvm.te.placeholder",
"tvm.te.reduce_axis",
"numpy.random.uniform",
"tvm.nd.array",
"tvm.transform.PassContext",
"numpy.zeros",
"tvm.build",
"tvm.contrib.nvcc.have_tensorcore",
"tvm.auto_tensorize.WMMAFp16Fp32Bias",
"tvm.te.thread_axis",
"tvm.te.create_schedule",
"tvm.auto_t... | [((1965, 2005), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kernel_h)'], {'name': '"""kh"""'}), "((0, kernel_h), name='kh')\n", (1979, 2005), False, 'from tvm import te\n'), ((2011, 2051), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kernel_w)'], {'name': '"""kw"""'}), "((0, kernel_w), name='kw')\n", (2025, 2051), False, 'from tvm import te\n'), ((2057, 2114), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, in_channels // block_size)'], {'name': '"""ic"""'}), "((0, in_channels // block_size), name='ic')\n", (2071, 2114), False, 'from tvm import te\n'), ((2120, 2162), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, block_size)'], {'name': '"""ii"""'}), "((0, block_size), name='ii')\n", (2134, 2162), False, 'from tvm import te\n'), ((2180, 2233), 'tvm.te.placeholder', 'te.placeholder', (['data_shape'], {'name': '"""A"""', 'dtype': '"""float16"""'}), "(data_shape, name='A', dtype='float16')\n", (2194, 2233), False, 'from tvm import te\n'), ((2238, 2293), 'tvm.te.placeholder', 'te.placeholder', (['kernel_shape'], {'name': '"""W"""', 'dtype': '"""float16"""'}), "(kernel_shape, name='W', dtype='float16')\n", (2252, 2293), False, 'from tvm import te\n'), ((2301, 2359), 'tvm.te.placeholder', 'te.placeholder', (['output_shape'], {'name': '"""bias"""', 'dtype': '"""float16"""'}), "(output_shape, name='bias', dtype='float16')\n", (2315, 2359), False, 'from tvm import te\n'), ((3390, 3411), 'tvm.auto_tensorize.WMMAFp16Fp32Bias', 'at.WMMAFp16Fp32Bias', ([], {}), '()\n', (3409, 3411), True, 'from tvm import auto_tensorize as at\n'), ((3517, 3611), 'tvm.auto_tensorize.construct_dag', 'at.construct_dag', (['hw_abs_dag', 'compute_key', 'shape_key', '[Apad, W]', '[Conv]', '[bias]', '[Output]'], {}), '(hw_abs_dag, compute_key, shape_key, [Apad, W], [Conv], [\n bias], [Output])\n', (3533, 3611), True, 'from tvm import auto_tensorize as at\n'), ((3634, 3698), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[nodes[x] for x in output_names]', '[]'], {}), '(lambda x, y: x + y, [nodes[x] for x in output_names], [])\n', (3640, 3698), False, 'from functools import reduce\n'), ((3709, 3763), 'tvm.te.create_schedule', 'tvm.te.create_schedule', (['[x.op for x in output_tensors]'], {}), '([x.op for x in output_tensors])\n', (3731, 3763), False, 'import tvm\n'), ((4624, 4652), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (4638, 4652), False, 'from tvm import te\n'), ((4663, 4691), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (4677, 4691), False, 'from tvm import te\n'), ((4702, 4730), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.z"""'], {}), "('blockIdx.z')\n", (4716, 4730), False, 'from tvm import te\n'), ((4742, 4771), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (4756, 4771), False, 'from tvm import te\n'), ((4783, 4812), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (4797, 4812), False, 'from tvm import te\n'), ((4824, 4853), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.z"""'], {}), "('threadIdx.z')\n", (4838, 4853), False, 'from tvm import te\n'), ((7228, 7278), 'tvm.lower', 'tvm.lower', (['s', '[A, W, bias, Conv]'], {'simple_mode': '(True)'}), '(s, [A, W, bias, Conv], simple_mode=True)\n', (7237, 7278), False, 'import tvm\n'), ((7329, 7364), 'tvm.build', 'tvm.build', (['ir_module'], {'target': '"""cuda"""'}), "(ir_module, target='cuda')\n", (7338, 7364), False, 'import tvm\n'), ((7439, 7449), 'tvm.gpu', 'tvm.gpu', (['(0)'], {}), '(0)\n', (7446, 7449), False, 'import tvm\n'), ((7453, 7494), 'tvm.contrib.nvcc.have_tensorcore', 'nvcc.have_tensorcore', (['ctx.compute_version'], {}), '(ctx.compute_version)\n', (7473, 7494), False, 'from tvm.contrib import nvcc\n'), ((4050, 4122), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[nodes[x] for x in feed_graph[inp_name]]', '[]'], {}), '(lambda x, y: x + y, [nodes[x] for x in feed_graph[inp_name]], [])\n', (4056, 4122), False, 'from functools import reduce\n'), ((7888, 7911), 'tvm.nd.array', 'tvm.nd.array', (['a_np', 'ctx'], {}), '(a_np, ctx)\n', (7900, 7911), False, 'import tvm\n'), ((7920, 7943), 'tvm.nd.array', 'tvm.nd.array', (['w_np', 'ctx'], {}), '(w_np, ctx)\n', (7932, 7943), False, 'import tvm\n'), ((7955, 7981), 'tvm.nd.array', 'tvm.nd.array', (['bias_np', 'ctx'], {}), '(bias_np, ctx)\n', (7967, 7981), False, 'import tvm\n'), ((7505, 7580), 'tvm.transform.PassContext', 'tvm.transform.PassContext', ([], {'config': "{'tir.UnrollLoop': {'auto_max_step': 16}}"}), "(config={'tir.UnrollLoop': {'auto_max_step': 16}})\n", (7530, 7580), False, 'import tvm\n'), ((7643, 7683), 'tvm.build', 'tvm.build', (['s', '[A, W, bias, Conv]', '"""cuda"""'], {}), "(s, [A, W, bias, Conv], 'cuda')\n", (7652, 7683), False, 'import tvm\n'), ((8003, 8043), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': 'Conv.dtype'}), '(output_shape, dtype=Conv.dtype)\n', (8011, 8043), True, 'import numpy as np\n'), ((2617, 2691), 'tvm.tir.all', 'tvm.tir.all', (['(h >= pad_h)', '(h - pad_h < height)', '(w >= pad_w)', '(w - pad_w < width)'], {}), '(h >= pad_h, h - pad_h < height, w >= pad_w, w - pad_w < width)\n', (2628, 2691), False, 'import tvm\n'), ((2768, 2797), 'tvm.tir.const', 'tvm.tir.const', (['(0.0)', '"""float16"""'], {}), "(0.0, 'float16')\n", (2781, 2797), False, 'import tvm\n'), ((7695, 7729), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data_shape'}), '(size=data_shape)\n', (7712, 7729), True, 'import numpy as np\n'), ((7757, 7793), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'kernel_shape'}), '(size=kernel_shape)\n', (7774, 7793), True, 'import numpy as np\n'), ((7824, 7860), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'output_shape'}), '(size=output_shape)\n', (7841, 7860), True, 'import numpy as np\n')] |
import time
import torch
import torch.nn.functional as F
import random
import numpy as np
from modules.optimizer import Optimizer
from model.biaffine_ner import NERParser
from config.conf import args_config, data_config
from utils.dataset import DataLoader
from utils.datautil import load_data, create_vocab, batch_variable
import torch.nn.utils as nn_utils
from logger.logger import logger
class Trainer(object):
def __init__(self, args, data_config):
self.args = args
self.data_config = data_config
genre = args.genre
self.train_set, self.val_set, self.test_set = self.build_dataset(data_config, genre)
# self.vocabs = self.build_vocabs(data_config[genre]['train'],
# data_config['pretrain']['word_embedding'],
# data_config['pretrain']['bert_vocab'])
self.vocabs = self.build_vocabs(self.train_set,
data_config['pretrain']['word_embedding'],
data_config['pretrain']['bert_vocab'])
self.model = NERParser(num_wds=len(self.vocabs['word']),
num_chars=len(self.vocabs['char']),
num_tags=len(self.vocabs['tag']),
wd_embed_dim=args.wd_embed_dim,
char_embed_dim=args.char_embed_dim,
tag_embed_dim=args.tag_embed_dim,
bert_embed_dim=args.bert_embed_dim,
hidden_size=args.hidden_size,
num_rnn_layer=args.rnn_depth,
ffnn_size=args.ffnn_size,
num_lbl=len(self.vocabs['ner']),
bert_path=data_path['pretrain']['bert_model'],
num_bert_layer=args.bert_layers,
ffnn_drop=args.ffnn_drop,
dropout=args.dropout,
embed_weight=self.vocabs['word'].embeddings).to(args.device)
print(self.model)
total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
print("Training %d trainable parameters..." % total_params)
def build_dataset(self, data_config, genre='conll_2003'):
train_set = load_data(data_config[genre]['train'])
val_set = load_data(data_config[genre]['dev'])
test_set = load_data(data_config[genre]['test'])
print('train data size:', len(train_set))
print('validate data size:', len(val_set))
print('test data size:', len(test_set))
return train_set, val_set, test_set
# def build_vocabs(self, train_data_path, embed_file=None, bert_vocab_path=None):
# vocabs = create_vocab(train_data_path, embed_file, bert_vocab_path)
# # save_to(self.args.vocab_chkp, vocabs)
# return vocabs
def build_vocabs(self, datasets, embed_file=None, bert_vocab_path=None):
vocabs = create_vocab(datasets, embed_file, bert_vocab_path)
# save_to(self.args.vocab_chkp, vocabs)
return vocabs
def calc_loss(self, span_score, ner_ids):
'''
:param span_score: (b, t, t, c)
:param ner_ids: (b, t, t)
:return:
'''
num_ner = ner_ids.gt(0).sum()
num_cls = span_score.size(-1)
loss = F.cross_entropy(span_score.reshape(-1, num_cls), ner_ids.reshape(-1), ignore_index=0, reduction='sum')
return loss / num_ner
def ner_gold(self, ner_ids, sent_lens, ner_vocab=None):
'''
:param ner_ids: (b, t, t)
:param sent_lens: (b, )
:param ner_vocab:
:return:
'''
gold_res = []
for ner_id, l in zip(ner_ids, sent_lens):
res = []
for s in range(l):
for e in range(s, l):
type_id = ner_id[s, e].item()
if type_id not in [ner_vocab.pad_idx, ner_vocab.unk_idx]:
res.append((s, e, type_id))
gold_res.append(res)
return gold_res
def ner_pred(self, pred_score, sent_lens, ner_vocab=None):
'''
:param pred_score: (b, t, t, c)
:param sent_lens: (b, )
# :param mask: (b, t) 1对应有效部分,0对应pad填充
:return:
'''
# (b, t, t)
type_idxs = pred_score.detach().argmax(dim=-1)
# (b, t, t)
span_max_score = pred_score.detach().gather(dim=-1, index=type_idxs.unsqueeze(-1)).squeeze(-1)
final = []
for span_score, tids, l in zip(span_max_score, type_idxs, sent_lens):
cands = []
for s in range(l):
for e in range(s, l):
type_id = tids[s, e].item()
if type_id not in [ner_vocab.pad_idx, ner_vocab.unk_idx]:
cands.append((s, e, type_id, span_score[s, e].item()))
pre_res = []
for s, e, cls, _ in sorted(cands, key=lambda x: x[3], reverse=True):
for s_, e_, _ in pre_res:
if s_ < s <= e_ < e or s < s_ <= e < e_: # flat ner
break
if s <= s_ <= e_ <= e or s_ <= s <= e <= e_: # nested ner
break
else:
pre_res.append((s, e, cls))
final.append(pre_res)
return final
def calc_acc(self, preds, golds, return_prf=False):
'''
:param preds: [(s, e, cls_id) ...]
:param golds: [(s, e, cls_id) ...]
:param return_prf: if True, return prf value, otherwise return number value
:return:
'''
assert len(preds) == len(golds)
nb_pred, nb_gold, nb_right = 0, 0, 0
for pred_spans, gold_spans in zip(preds, golds):
pred_span_set = set(pred_spans)
gold_span_set = set(gold_spans)
nb_pred += len(pred_span_set)
nb_gold += len(gold_span_set)
nb_right += len(pred_span_set & gold_span_set)
if return_prf:
return self.calc_prf(nb_right, nb_pred, nb_gold)
else:
return nb_right, nb_pred, nb_gold
def calc_prf(self, nb_right, nb_pred, nb_gold):
p = nb_right / (nb_pred + 1e-30)
r = nb_right / (nb_gold + 1e-30)
f = (2 * nb_right) / (nb_gold + nb_pred + 1e-30)
return p, r, f
def train_eval(self):
train_loader = DataLoader(self.train_set, batch_size=self.args.batch_size, shuffle=True)
self.args.max_step = self.args.epoch * (len(train_loader) // self.args.update_step)
print('max step:', self.args.max_step)
optimizer = Optimizer(filter(lambda p: p.requires_grad, self.model.parameters()), args)
best_dev_metric, best_test_metric = dict(), dict()
patient = 0
for ep in range(1, 1+self.args.epoch):
train_loss = 0.
self.model.train()
t1 = time.time()
train_right, train_pred, train_gold = 0, 0, 0
for i, batcher in enumerate(train_loader):
batch = batch_variable(batcher, self.vocabs)
batch.to_device(self.args.device)
pred_score = self.model(batch.wd_ids, batch.ch_ids, batch.tag_ids, batch.bert_inps, batch.mask)
loss = self.calc_loss(pred_score, batch.ner_ids)
loss_val = loss.data.item()
train_loss += loss_val
sent_lens = batch.wd_ids.gt(0).sum(dim=1)
gold_res = self.ner_gold(batch.ner_ids, sent_lens, self.vocabs['ner'])
pred_res = self.ner_pred(pred_score, sent_lens, self.vocabs['ner'])
nb_right, nb_pred, nb_gold = self.calc_acc(pred_res, gold_res, return_prf=False)
train_right += nb_right
train_pred += nb_pred
train_gold += nb_gold
train_p, train_r, train_f = self.calc_prf(train_right, train_pred, train_gold)
if self.args.update_step > 1:
loss = loss / self.args.update_step
loss.backward()
if (i + 1) % self.args.update_step == 0 or (i == self.args.max_step - 1):
nn_utils.clip_grad_norm_(filter(lambda p: p.requires_grad, self.model.parameters()),
max_norm=self.args.grad_clip)
optimizer.step()
self.model.zero_grad()
logger.info('[Epoch %d] Iter%d time cost: %.2fs, lr: %.6f, train loss: %.3f, P: %.3f, R: %.3f, F: %.3f' % (
ep, i + 1, (time.time() - t1), optimizer.get_lr(), loss_val, train_p, train_r, train_f))
dev_metric = self.evaluate('dev')
if dev_metric['f'] > best_dev_metric.get('f', 0):
best_dev_metric = dev_metric
test_metric = self.evaluate('test')
if test_metric['f'] > best_test_metric.get('f', 0):
# check_point = {'model': self.model.state_dict(), 'settings': args}
# torch.save(check_point, self.args.model_chkp)
best_test_metric = test_metric
patient = 0
else:
patient += 1
logger.info('[Epoch %d] train loss: %.4f, lr: %f, patient: %d, dev_metric: %s, test_metric: %s' % (
ep, train_loss, optimizer.get_lr(), patient, best_dev_metric, best_test_metric))
# if patient >= (self.args.patient // 2 + 1): # 训练一定epoch, dev性能不上升, decay lr
# optimizer.lr_decay(0.95)
if patient >= self.args.patient: # early stopping
break
logger.info('Final Metric: %s' % best_test_metric)
def evaluate(self, mode='test'):
if mode == 'dev':
test_loader = DataLoader(self.val_set, batch_size=self.args.test_batch_size)
elif mode == 'test':
test_loader = DataLoader(self.test_set, batch_size=self.args.test_batch_size)
else:
raise ValueError('Invalid Mode!!!')
self.model.eval()
nb_right_all, nb_pred_all, nb_gold_all = 0, 0, 0
with torch.no_grad():
for i, batcher in enumerate(test_loader):
batch = batch_variable(batcher, self.vocabs)
batch.to_device(self.args.device)
pred_score = self.model(batch.wd_ids, batch.ch_ids, batch.tag_ids, batch.bert_inps, batch.mask)
sent_lens = batch.wd_ids.gt(0).sum(dim=1)
gold_res = self.ner_gold(batch.ner_ids, sent_lens, self.vocabs['ner'])
pred_res = self.ner_pred(pred_score, sent_lens, self.vocabs['ner'])
nb_right, nb_pred, nb_gold = self.calc_acc(pred_res, gold_res, return_prf=False)
nb_right_all += nb_right
nb_pred_all += nb_pred
nb_gold_all += nb_gold
p, r, f = self.calc_prf(nb_right_all, nb_pred_all, nb_gold_all)
return dict(p=p, r=r, f=f)
if __name__ == '__main__':
random.seed(1347)
np.random.seed(2343)
torch.manual_seed(1453)
torch.cuda.manual_seed(1347)
torch.cuda.manual_seed_all(1453)
print('cuda available:', torch.cuda.is_available())
print('cuDNN available:', torch.backends.cudnn.enabled)
print('gpu numbers:', torch.cuda.device_count())
args = args_config()
if torch.cuda.is_available() and args.cuda >= 0:
args.device = torch.device('cuda', args.cuda)
torch.cuda.empty_cache()
else:
args.device = torch.device('cpu')
data_path = data_config('./config/data_path.json')
trainer = Trainer(args, data_path)
trainer.train_eval()
| [
"numpy.random.seed",
"torch.manual_seed",
"config.conf.data_config",
"torch.cuda.manual_seed",
"torch.cuda.device_count",
"time.time",
"utils.datautil.batch_variable",
"torch.cuda.manual_seed_all",
"logger.logger.logger.info",
"random.seed",
"torch.cuda.is_available",
"torch.device",
"utils.... | [((11161, 11178), 'random.seed', 'random.seed', (['(1347)'], {}), '(1347)\n', (11172, 11178), False, 'import random\n'), ((11183, 11203), 'numpy.random.seed', 'np.random.seed', (['(2343)'], {}), '(2343)\n', (11197, 11203), True, 'import numpy as np\n'), ((11208, 11231), 'torch.manual_seed', 'torch.manual_seed', (['(1453)'], {}), '(1453)\n', (11225, 11231), False, 'import torch\n'), ((11236, 11264), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1347)'], {}), '(1347)\n', (11258, 11264), False, 'import torch\n'), ((11269, 11301), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(1453)'], {}), '(1453)\n', (11295, 11301), False, 'import torch\n'), ((11484, 11497), 'config.conf.args_config', 'args_config', ([], {}), '()\n', (11495, 11497), False, 'from config.conf import args_config, data_config\n'), ((11707, 11745), 'config.conf.data_config', 'data_config', (['"""./config/data_path.json"""'], {}), "('./config/data_path.json')\n", (11718, 11745), False, 'from config.conf import args_config, data_config\n'), ((2399, 2437), 'utils.datautil.load_data', 'load_data', (["data_config[genre]['train']"], {}), "(data_config[genre]['train'])\n", (2408, 2437), False, 'from utils.datautil import load_data, create_vocab, batch_variable\n'), ((2456, 2492), 'utils.datautil.load_data', 'load_data', (["data_config[genre]['dev']"], {}), "(data_config[genre]['dev'])\n", (2465, 2492), False, 'from utils.datautil import load_data, create_vocab, batch_variable\n'), ((2512, 2549), 'utils.datautil.load_data', 'load_data', (["data_config[genre]['test']"], {}), "(data_config[genre]['test'])\n", (2521, 2549), False, 'from utils.datautil import load_data, create_vocab, batch_variable\n'), ((3077, 3128), 'utils.datautil.create_vocab', 'create_vocab', (['datasets', 'embed_file', 'bert_vocab_path'], {}), '(datasets, embed_file, bert_vocab_path)\n', (3089, 3128), False, 'from utils.datautil import load_data, create_vocab, batch_variable\n'), ((6535, 6608), 'utils.dataset.DataLoader', 'DataLoader', (['self.train_set'], {'batch_size': 'self.args.batch_size', 'shuffle': '(True)'}), '(self.train_set, batch_size=self.args.batch_size, shuffle=True)\n', (6545, 6608), False, 'from utils.dataset import DataLoader\n'), ((9799, 9849), 'logger.logger.logger.info', 'logger.info', (["('Final Metric: %s' % best_test_metric)"], {}), "('Final Metric: %s' % best_test_metric)\n", (9810, 9849), False, 'from logger.logger import logger\n'), ((11332, 11357), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11355, 11357), False, 'import torch\n'), ((11445, 11470), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (11468, 11470), False, 'import torch\n'), ((11505, 11530), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11528, 11530), False, 'import torch\n'), ((11573, 11604), 'torch.device', 'torch.device', (['"""cuda"""', 'args.cuda'], {}), "('cuda', args.cuda)\n", (11585, 11604), False, 'import torch\n'), ((11613, 11637), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (11635, 11637), False, 'import torch\n'), ((11670, 11689), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (11682, 11689), False, 'import torch\n'), ((7046, 7057), 'time.time', 'time.time', ([], {}), '()\n', (7055, 7057), False, 'import time\n'), ((9940, 10002), 'utils.dataset.DataLoader', 'DataLoader', (['self.val_set'], {'batch_size': 'self.args.test_batch_size'}), '(self.val_set, batch_size=self.args.test_batch_size)\n', (9950, 10002), False, 'from utils.dataset import DataLoader\n'), ((10281, 10296), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10294, 10296), False, 'import torch\n'), ((7195, 7231), 'utils.datautil.batch_variable', 'batch_variable', (['batcher', 'self.vocabs'], {}), '(batcher, self.vocabs)\n', (7209, 7231), False, 'from utils.datautil import load_data, create_vocab, batch_variable\n'), ((10058, 10121), 'utils.dataset.DataLoader', 'DataLoader', (['self.test_set'], {'batch_size': 'self.args.test_batch_size'}), '(self.test_set, batch_size=self.args.test_batch_size)\n', (10068, 10121), False, 'from utils.dataset import DataLoader\n'), ((10376, 10412), 'utils.datautil.batch_variable', 'batch_variable', (['batcher', 'self.vocabs'], {}), '(batcher, self.vocabs)\n', (10390, 10412), False, 'from utils.datautil import load_data, create_vocab, batch_variable\n'), ((8725, 8736), 'time.time', 'time.time', ([], {}), '()\n', (8734, 8736), False, 'import time\n')] |
import pdb
import sys
import operator
from collections import OrderedDict
import subprocess
import numpy as np
import json
import math
from transformers import *
import sys
import random
import time
SINGLETONS_TAG = "_singletons_ "
EMPTY_TAG = "_empty_ "
OTHER_TAG = "OTHER"
AMBIGUOUS = "AMB"
MAX_VAL = 20
TAIL_THRESH = 10
SUBWORD_COS_THRESHOLD = .1
MAX_SUBWORD_PICKS = 20
UNK_ID = 1
IGNORE_CONTINUATIONS=True
USE_PRESERVE=True
try:
from subprocess import DEVNULL # Python 3.
except ImportError:
DEVNULL = open(os.devnull, 'wb')
def read_embeddings(embeds_file):
with open(embeds_file) as fp:
embeds_list = json.loads(fp.read())
arr = np.array(embeds_list)
return arr
def consolidate_labels(existing_node,new_labels,new_counts):
"""Consolidates all the labels and counts for terms ignoring casing
For instance, egfr may not have an entity label associated with it
but eGFR and EGFR may have. So if input is egfr, then this function ensures
the combined entities set fo eGFR and EGFR is made so as to return that union
for egfr
"""
new_dict = {}
existing_labels_arr = existing_node["label"].split('/')
existing_counts_arr = existing_node["counts"].split('/')
new_labels_arr = new_labels.split('/')
new_counts_arr = new_counts.split('/')
assert(len(existing_labels_arr) == len(existing_counts_arr))
assert(len(new_labels_arr) == len(new_counts_arr))
for i in range(len(existing_labels_arr)):
new_dict[existing_labels_arr[i]] = int(existing_counts_arr[i])
for i in range(len(new_labels_arr)):
if (new_labels_arr[i] in new_dict):
new_dict[new_labels_arr[i]] += int(new_counts_arr[i])
else:
new_dict[new_labels_arr[i]] = int(new_counts_arr[i])
sorted_d = OrderedDict(sorted(new_dict.items(), key=lambda kv: kv[1], reverse=True))
ret_labels_str = ""
ret_counts_str = ""
count = 0
for key in sorted_d:
if (count == 0):
ret_labels_str = key
ret_counts_str = str(sorted_d[key])
else:
ret_labels_str += '/' + key
ret_counts_str += '/' + str(sorted_d[key])
count += 1
return {"label":ret_labels_str,"counts":ret_counts_str}
def read_labels(labels_file):
terms_dict = OrderedDict()
lc_terms_dict = OrderedDict()
with open(labels_file,encoding="utf-8") as fin:
count = 1
for term in fin:
term = term.strip("\n")
term = term.split()
if (len(term) == 3):
terms_dict[term[2]] = {"label":term[0],"counts":term[1]}
lc_term = term[2].lower()
if (lc_term in lc_terms_dict):
lc_terms_dict[lc_term] = consolidate_labels(lc_terms_dict[lc_term],term[0],term[1])
else:
lc_terms_dict[lc_term] = {"label":term[0],"counts":term[1]}
count += 1
else:
print("Invalid line:",term)
assert(0)
print("count of labels in " + labels_file + ":", len(terms_dict))
return terms_dict,lc_terms_dict
def read_entities(terms_file):
''' Read bootstrap entities file
'''
terms_dict = OrderedDict()
with open(terms_file,encoding="utf-8") as fin:
count = 1
for term in fin:
term = term.strip("\n")
if (len(term) >= 1):
nodes = term.split()
assert(len(nodes) == 2)
lc_node = nodes[1].lower()
if (lc_node in terms_dict):
pdb.set_trace()
assert(0)
assert('/'.join(terms_dict[lc_node]) == nodes[0])
terms_dict[lc_node] = nodes[0].split('/')
count += 1
print("count of entities in ",terms_file,":", len(terms_dict))
return terms_dict
def read_terms(terms_file):
terms_dict = OrderedDict()
with open(terms_file,encoding="utf-8") as fin:
count = 1
for term in fin:
term = term.strip("\n")
if (len(term) >= 1):
terms_dict[term] = count
count += 1
print("count of tokens in ",terms_file,":", len(terms_dict))
return terms_dict
def is_subword(key):
return True if str(key).startswith('#') else False
def is_filtered_term(key): #Words selector. skiping all unused and special tokens
if (IGNORE_CONTINUATIONS):
return True if (is_subword(key) or str(key).startswith('[')) else False
else:
return True if (str(key).startswith('[')) else False
def filter_2g(term,preserve_dict):
if (USE_PRESERVE):
return True if (len(term) <= 2 and term not in preserve_dict) else False
else:
return True if (len(term) <= 2 ) else False
class BertEmbeds:
def __init__(self, model_path,do_lower, terms_file,embeds_file,cache_embeds,normalize,labels_file,stats_file,preserve_2g_file,glue_words_file,bootstrap_entities_file):
do_lower = True if do_lower == 1 else False
self.tokenizer = BertTokenizer.from_pretrained(model_path,do_lower_case=do_lower)
self.terms_dict = read_terms(terms_file)
self.labels_dict,self.lc_labels_dict = read_labels(labels_file)
self.stats_dict = read_terms(stats_file) #Not used anymore
self.preserve_dict = read_terms(preserve_2g_file)
self.gw_dict = read_terms(glue_words_file)
self.bootstrap_entities = read_entities(bootstrap_entities_file)
self.embeddings = read_embeddings(embeds_file)
self.dist_threshold_cache = {}
self.dist_zero_cache = {}
self.normalize = normalize
self.similarity_matrix = self.cache_matrix(True)
def cache_matrix(self,normalize):
b_embeds = self
print("Computing similarity matrix (takes approx 5 minutes for ~100,000x100,000 matrix ...)")
start = time.time()
#pdb.set_trace()
vec_a = b_embeds.embeddings.T #vec_a shape (1024,)
if (normalize):
vec_a = vec_a/np.linalg.norm(vec_a,axis=0) #Norm is along axis 0 - rows
vec_a = vec_a.T #vec_a shape becomes (,1024)
similarity_matrix = np.inner(vec_a,vec_a)
end = time.time()
time_val = (end-start)*1000
print("Similarity matrix computation complete.Elapsed:",time_val/(1000*60)," minutes")
return similarity_matrix
def dump_vocab(self):
#pdb.set_trace()
size = self.tokenizer.vocab_size
for i in range(size):
names = self.tokenizer.convert_ids_to_tokens([i])
print(names[0])
def labeled_term(self,k):
if (k not in self.bootstrap_entities):
return False
labels = self.bootstrap_entities[k]
if (len(labels) > 1):
return True
assert(len(labels) == 1)
if (labels[0] == "UNTAGGED_ENTITY"):
return False
return True
def subword_clustering(self):
'''
Generate clusters for terms in vocab
This is used for unsupervised NER (with subword usage)
'''
tokenize = False
count = 1
total = len(self.terms_dict)
pivots_dict = OrderedDict()
singletons_arr = []
full_entities_dict = OrderedDict()
untagged_items_dict = OrderedDict()
empty_arr = []
total = len(self.terms_dict)
dfp = open("adaptive_debug_pivots.txt","w")
esupfp = open("entity_support.txt","w")
for key in self.terms_dict:
if (key.startswith('[') or len(key) < 2):
count += 1
continue
count += 1
#print(":",key)
print("Processing: ",key,"count:",count," of ",total)
temp_sorted_d,dummy = self.get_distribution_for_term(key,False)
sorted_d = self.get_terms_above_threshold(key,SUBWORD_COS_THRESHOLD,tokenize)
arr = []
for k in sorted_d:
if (is_subword(k)):
continue
if (not self.labeled_term(k.lower())):
continue
arr.append(k)
if (len(arr) > MAX_SUBWORD_PICKS):
break
if (len(arr) > MAX_SUBWORD_PICKS/2):
max_mean_term,max_mean, std_dev,s_dict = self.find_pivot_subgraph(arr,tokenize)
if (max_mean_term not in pivots_dict):
new_key = max_mean_term
else:
print("****Term already a pivot node:",max_mean_term, "key is :",key)
new_key = max_mean_term + "++" + key
pivots_dict[new_key] = {"key":new_key,"orig":key,"mean":max_mean,"terms":arr}
entity_type,entity_counts,curr_entities_dict = self.get_entity_type(arr,new_key,esupfp)
self.aggregate_entities_for_terms(arr,curr_entities_dict,full_entities_dict,untagged_items_dict)
print(entity_type,entity_counts,new_key,max_mean,std_dev,arr)
dfp.write(entity_type + " " + entity_counts + " " + new_key + " " + new_key + " " + new_key+" "+key+" "+str(max_mean)+" "+ str(std_dev) + " " +str(arr)+"\n")
else:
if (len(arr) != 0):
print("***Sparse arr for term:",key)
singletons_arr.append(key)
else:
print("***Empty arr for term:",key)
empty_arr.append(key)
#if (count >= 500):
# break
dfp.write(SINGLETONS_TAG + str(singletons_arr) + "\n")
dfp.write(EMPTY_TAG + str(empty_arr) + "\n")
with open("pivots.json","w") as fp:
fp.write(json.dumps(pivots_dict))
with open("pivots.txt","w") as fp:
for k in pivots_dict:
fp.write(k + '\n')
dfp.close()
esupfp.close()
self.create_entity_labels_file(full_entities_dict)
self.create_inferred_entities_file(untagged_items_dict)
def adaptive_gen_pivot_graphs(self):
'''
Generate clusters for terms in vocab
This is used for unsupervised NER
'''
tokenize = False
count = 1
total = len(self.terms_dict)
picked_dict = OrderedDict()
pivots_dict = OrderedDict()
singletons_arr = []
full_entities_dict = OrderedDict()
untagged_items_dict = OrderedDict()
empty_arr = []
total = len(self.terms_dict)
dfp = open("adaptive_debug_pivots.txt","w")
esupfp = open("entity_support.txt","w")
for key in self.terms_dict:
if (is_filtered_term(key)):
count += 1
continue
count += 1
#print(":",key)
if (key in picked_dict or len(key) <= 2):
continue
print("Processing ",count," of ",total)
picked_dict[key] = 1
temp_sorted_d,dummy = self.get_distribution_for_term(key,False)
dummy,threshold = self.get_tail_length(key,temp_sorted_d)
sorted_d = self.get_terms_above_threshold(key,threshold,tokenize)
arr = []
for k in sorted_d:
if (is_filtered_term(k) or filter_2g(k,self.preserve_dict)):
picked_dict[k] = 1
continue
picked_dict[k] = 1
arr.append(k)
if (len(arr) > 1):
max_mean_term,max_mean, std_dev,s_dict = self.find_pivot_subgraph(arr,tokenize)
if (max_mean_term not in pivots_dict):
new_key = max_mean_term
else:
print("****Term already a pivot node:",max_mean_term, "key is :",key)
new_key = max_mean_term + "++" + key
pivots_dict[new_key] = {"key":new_key,"orig":key,"mean":max_mean,"terms":arr}
entity_type,entity_counts,curr_entities_dict = self.get_entity_type(arr,new_key,esupfp)
self.aggregate_entities_for_terms(arr,curr_entities_dict,full_entities_dict,untagged_items_dict)
print(entity_type,entity_counts,new_key,max_mean,std_dev,arr)
dfp.write(entity_type + " " + entity_counts + " " + new_key + " " + new_key + " " + new_key+" "+key+" "+str(max_mean)+" "+ str(std_dev) + " " +str(arr)+"\n")
else:
if (len(arr) == 1):
print("***Singleton arr for term:",key)
singletons_arr.append(key)
else:
print("***Empty arr for term:",key)
empty_arr.append(key)
#if (count >= 500):
# break
dfp.write(SINGLETONS_TAG + str(singletons_arr) + "\n")
dfp.write(EMPTY_TAG + str(empty_arr) + "\n")
with open("pivots.json","w") as fp:
fp.write(json.dumps(pivots_dict))
with open("pivots.txt","w") as fp:
for k in pivots_dict:
fp.write(k + '\n')
dfp.close()
esupfp.close()
self.create_entity_labels_file(full_entities_dict)
self.create_inferred_entities_file(untagged_items_dict)
def aggregate_entities_for_terms(self,arr,curr_entities_dict,full_entities_dict,untagged_items_dict):
if (len(curr_entities_dict) == 0):
return
for term in arr:
if (term.lower() in self.bootstrap_entities): #Note this is a case insensitive check
term_entities = self.bootstrap_entities[term.lower()]
else:
if (term not in untagged_items_dict):
untagged_items_dict[term] = OrderedDict()
for entity in curr_entities_dict:
if (entity not in untagged_items_dict[term]):
untagged_items_dict[term][entity] = curr_entities_dict[entity]
else:
untagged_items_dict[term][entity] += curr_entities_dict[entity]
continue
#We come here only for terms that were present in the bootstrap list
if term not in full_entities_dict: #This is case sensitive. We want vocab entries eGFR and EGFR to pick up separate weights for their entities
full_entities_dict[term] = OrderedDict()
for entity in curr_entities_dict:
if (entity not in term_entities): #aggregate counts only for entities present for this term in original manual harvesting list(bootstrap list)
continue
if (entity not in full_entities_dict[term]):
full_entities_dict[term][entity] = curr_entities_dict[entity]
else:
full_entities_dict[term][entity] += curr_entities_dict[entity]
def create_entity_labels_file(self,full_entities_dict):
with open("labels.txt","w") as fp:
for term in self.terms_dict:
if (term not in full_entities_dict and term.lower() not in self.bootstrap_entities):
fp.write("OTHER 0 " + term + "\n")
continue
if (term not in full_entities_dict): #These are vocab terms that did not show up in a cluster but are present in bootstrap list
lc_term = term.lower()
counts_str = len(self.bootstrap_entities[lc_term])*"0/"
fp.write('/'.join(self.bootstrap_entities[lc_term]) + ' ' + counts_str.rstrip('/') + ' ' + term + '\n') #Note the term output is case sensitive. Just the indexed version is case insenstive
continue
out_entity_dict = {}
for entity in full_entities_dict[term]:
assert(entity not in out_entity_dict)
out_entity_dict[entity] = full_entities_dict[term][entity]
sorted_d = OrderedDict(sorted(out_entity_dict.items(), key=lambda kv: kv[1], reverse=True))
entity_str = ""
count_str = ""
for entity in sorted_d:
if (len(entity_str) == 0):
entity_str = entity
count_str = str(sorted_d[entity])
else:
entity_str += '/' + entity
count_str += '/' + str(sorted_d[entity])
if (len(entity_str) > 0):
fp.write(entity_str + ' ' + count_str + ' ' + term + "\n")
def sort_and_consolidate_inferred_entities_file(self,untagged_items_dict):
for term in untagged_items_dict:
out_entity_dict = {}
for entity in untagged_items_dict[term]:
assert(entity not in out_entity_dict)
out_entity_dict[entity] = untagged_items_dict[term][entity]
sorted_d = OrderedDict(sorted(out_entity_dict.items(), key=lambda kv: kv[1], reverse=True))
first = next(iter(sorted_d))
#untagged_items_dict[term] = {first:sorted_d[first]} #Just pick the first entity
untagged_items_dict[term] = sorted_d
ci_untagged_items_dict = OrderedDict()
for term in untagged_items_dict:
lc_term = term.lower()
if (lc_term not in ci_untagged_items_dict):
ci_untagged_items_dict[lc_term] = OrderedDict()
for entity in untagged_items_dict[term]:
if (entity not in ci_untagged_items_dict[lc_term]):
ci_untagged_items_dict[lc_term][entity] = untagged_items_dict[term][entity]
else:
ci_untagged_items_dict[lc_term][entity] += untagged_items_dict[term][entity]
return ci_untagged_items_dict
def create_inferred_entities_file(self,untagged_items_dict):
with open("inferred.txt","w") as fp:
untagged_items_dict = self.sort_and_consolidate_inferred_entities_file(untagged_items_dict)
for term in untagged_items_dict:
out_entity_dict = {}
for entity in untagged_items_dict[term]:
assert(entity not in out_entity_dict)
out_entity_dict[entity] = untagged_items_dict[term][entity]
sorted_d = OrderedDict(sorted(out_entity_dict.items(), key=lambda kv: kv[1], reverse=True))
entity_str = ""
count_str = ""
count_val = 0
for entity in sorted_d:
if (len(entity_str) == 0):
entity_str = entity
count_str = str(sorted_d[entity])
else:
entity_str += '/' + entity
count_str += '/' + str(sorted_d[entity])
count_val += int(sorted_d[entity])
if (len(entity_str) > 0):
fp.write(entity_str + ' ' + count_str + ' ' + str(count_val) + ' ' + term + "\n")
def get_entity_type(self,arr,new_key,esupfp):
e_dict = {}
#print("GET:",arr)
for term in arr:
term = term.lower() #bootstrap entities is all lowercase.
if (term in self.bootstrap_entities):
entities = self.bootstrap_entities[term]
for entity in entities:
if (entity in e_dict):
#print(term,entity)
e_dict[entity] += 1
else:
#print(term,entity)
e_dict[entity] = 1
ret_str = ""
count_str = ""
entities_dict = OrderedDict()
if (len(e_dict) >= 1):
sorted_d = OrderedDict(sorted(e_dict.items(), key=lambda kv: kv[1], reverse=True))
#print(new_key + ":" + str(sorted_d))
esupfp.write(new_key + ' ' + str(sorted_d) + '\n')
count = 0
for k in sorted_d:
if (len(ret_str) > 0):
ret_str += '/' + k
count_str += '/' + str(sorted_d[k])
else:
ret_str = k
count_str = str(sorted_d[k])
entities_dict[k] = int(sorted_d[k])
count += 1
if (len(ret_str) <= 0):
ret_str = "OTHER"
count_str = str(len(arr))
#print(ret_str)
count_str += '/' + str(len(arr))
return ret_str,count_str,entities_dict
def fixed_gen_pivot_graphs(self,threshold,count_limit):
tokenize = False
count = 1
total = len(self.terms_dict)
picked_dict = OrderedDict()
pivots_dict = OrderedDict()
singletons_arr = []
empty_arr = []
total = len(self.terms_dict)
dfp = open("debug_pivots.txt","w")
for key in self.terms_dict:
if (is_filtered_term(key) ):
count += 1
continue
count += 1
#print(":",key)
if (key in picked_dict or len(key) <= 2):
continue
print("Processing ",count," of ",total)
picked_dict[key] = 1
sorted_d = self.get_terms_above_threshold(key,threshold,tokenize)
arr = []
for k in sorted_d:
if (is_filtered_term(k) or filter_2g(k,self.preserve_dict)):
picked_dict[k] = 1
continue
if (sorted_d[k] < count_limit):
picked_dict[k] = 1
arr.append(k)
else:
break
if (len(arr) > 1):
max_mean_term,max_mean, std_dev,s_dict = self.find_pivot_subgraph(arr,tokenize)
if (max_mean_term not in pivots_dict):
new_key = max_mean_term
else:
print("****Term already a pivot node:",max_mean_term, "key is :",key)
new_key = max_mean_term + "++" + key
pivots_dict[new_key] = {"key":new_key,"orig":key,"mean":max_mean,"terms":arr}
print(new_key,max_mean,std_dev,arr)
dfp.write(new_key + " " + new_key + " " + new_key+" "+key+" "+str(max_mean)+" "+ str(std_dev) + " " +str(arr)+"\n")
else:
if (len(arr) == 1):
print("***Singleton arr for term:",key)
singletons_arr.append(key)
else:
print("***Empty arr for term:",key)
empty_arr.append(key)
dfp.write(SINGLETONS_TAG + str(singletons_arr) + "\n")
dfp.write(EMPTY_TAG + str(empty_arr) + "\n")
with open("pivots.json","w") as fp:
fp.write(json.dumps(pivots_dict))
dfp.close()
def get_tail_length(self,key,sorted_d):
rev_sorted_d = OrderedDict(sorted(sorted_d.items(), key=lambda kv: kv[0], reverse=True))
prev_val = 0
prev_cosine_val = 0
count = 0
cosine_val = 0
for k in rev_sorted_d:
if (rev_sorted_d[k] >= MAX_VAL):
if (prev_val >= TAIL_THRESH):
count -= prev_val
cosine_val = prev_cosine_val
else:
cosine_val = k
break
if (rev_sorted_d[k] >= TAIL_THRESH and prev_val >= TAIL_THRESH):
count -= prev_val
cosine_val = prev_cosine_val
break
prev_val = rev_sorted_d[k]
prev_cosine_val = k
count += rev_sorted_d[k]
return count,cosine_val
def gen_dist_for_vocabs(self):
print("Random pick? (Full run will take approximately 3 hours) Y/n:")
resp = input()
is_rand = (resp == "Y")
if (is_rand):
print("Sampling run:")
count = 1
picked_count = 0
skip_count = 0
cum_dict = OrderedDict()
cum_dict_count = OrderedDict()
zero_dict = OrderedDict()
tail_lengths = OrderedDict()
total_tail_length = 0
for key in self.terms_dict:
if (is_filtered_term(key) ):
count += 1
continue
if (is_rand):
val = random.randint(0,100)
if (val < 97): # this is a biased skip to do a fast cum dist check (3% sample ~ 1000)
skip_count+= 1
print("Processed:",picked_count,"Skipped:",skip_count,end='\r')
continue
#print(":",key)
picked_count += 1
sorted_d,dummy = self.get_distribution_for_term(key,False)
tail_len,dummy = self.get_tail_length(key,sorted_d)
tail_lengths[key] = tail_len
total_tail_length += tail_len
for k in sorted_d:
val = round(float(k),1)
#print(str(val)+","+str(sorted_d[k]))
if (val == 0):
zero_dict[key] = sorted_d[k]
if (val in cum_dict):
cum_dict[val] += sorted_d[k]
cum_dict_count[val] += 1
else:
cum_dict[val] = sorted_d[k]
cum_dict_count[val] = 1
for k in cum_dict:
cum_dict[k] = round(float(cum_dict[k])/cum_dict_count[k],0)
final_sorted_d = OrderedDict(sorted(cum_dict.items(), key=lambda kv: kv[0], reverse=False))
print("\nTotal picked:",picked_count)
with open("cum_dist.txt","w") as fp:
fp.write("Total picked:" + str(picked_count) + "\n")
for k in final_sorted_d:
print(k,final_sorted_d[k])
p_str = str(k) + " " + str(final_sorted_d[k]) + "\n"
fp.write(p_str)
with open("zero_vec_counts.txt","w",encoding="utf-8") as fp:
fp.write("Total picked:" + str(picked_count) + "\n")
final_sorted_d = OrderedDict(sorted(zero_dict.items(), key=lambda kv: kv[1], reverse=True))
try:
for k in final_sorted_d:
#print(k,final_sorted_d[k])
p_str = str(k) + " " + str(final_sorted_d[k]) + "\n"
fp.write(p_str)
except:
print("Exception 1")
with open("tail_counts.txt","w",encoding="utf-8") as fp:
fp.write("Total picked:" + str(picked_count) + " Average tail len: " + str(round(float(total_tail_length)/picked_count,1)) + "\n")
final_sorted_d = OrderedDict(sorted(tail_lengths.items(), key=lambda kv: kv[1], reverse=True))
try:
for k in final_sorted_d:
#print(k,final_sorted_d[k])
p_str = str(k) + " " + str(final_sorted_d[k]) + "\n"
fp.write(p_str)
except:
print("Exception 2")
def get_embedding_index(self,text,tokenize=False):
if (tokenize):
assert(0)
tokenized_text = self.tokenizer.tokenize(text)
else:
if (not text.startswith('[')):
tokenized_text = text.split()
else:
tokenized_text = [text]
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
assert(len(indexed_tokens) == 1)
return indexed_tokens[0]
def calc_inner_prod(self,text1,text2,tokenize):
assert(tokenize == False)
index1 = self.get_embedding_index(text1)
index2 = self.get_embedding_index(text2)
return self.similarity_matrix[index1][index2]
def get_distribution_for_term(self,term1,tokenize):
debug_fp = None
hack_check = False
if (term1 in self.dist_threshold_cache):
return self.dist_threshold_cache[term1],self.dist_zero_cache
terms_count = self.terms_dict
dist_dict = {}
val_dict = {}
zero_dict = {}
if (hack_check and debug_fp is None):
debug_fp = open("debug.txt","w")
for k in self.terms_dict:
term2 = k.strip("\n")
val = self.calc_inner_prod(term1,term2,tokenize)
#if (hack_check and val >= .8 and term1 != term2):
if (hack_check and val >= .6 and val < .8 and term1 != term2):
print(term1,term2)
str_val = term1 + " " + term2 + "\n"
debug_fp.write(str_val)
debug_fp.flush()
val = round(val,2)
if (val in dist_dict):
dist_dict[val] += 1
else:
dist_dict[val] = 1
val = round(val,1)
if (val >= -.05 and val <= .05):
zero_dict[term2] = 0
sorted_d = OrderedDict(sorted(dist_dict.items(), key=lambda kv: kv[0], reverse=False))
self.dist_threshold_cache[term1] = sorted_d
self.dist_zero_cache = zero_dict
return sorted_d,zero_dict
def get_terms_above_threshold(self,term1,threshold,tokenize):
final_dict = {}
for k in self.terms_dict:
term2 = k.strip("\n")
val = self.calc_inner_prod(term1,term2,tokenize)
val = round(val,2)
if (val > threshold):
final_dict[term2] = val
sorted_d = OrderedDict(sorted(final_dict.items(), key=lambda kv: kv[1], reverse=True))
return sorted_d
def print_terms_above_threshold(self,term1,threshold,tokenize):
fp = open("above_t.txt","w")
sorted_d = self.get_terms_above_threshold(term1,threshold,tokenize)
for k in sorted_d:
print(k," ",sorted_d[k])
fp.write(str(k) + " " + str(sorted_d[k]) + "\n")
fp.close()
#given n terms, find the mean of the connection strengths of subgraphs considering each term as pivot.
#return the mean of max strength term subgraph
def find_pivot_subgraph(self,terms,tokenize):
max_mean = 0
std_dev = 0
max_mean_term = None
means_dict = {}
if (len(terms) == 1):
return terms[0],1,0,{terms[0]:1}
for i in terms:
full_score = 0
count = 0
full_dict = {}
for j in terms:
if (i != j):
val = self.calc_inner_prod(i,j,tokenize)
#print(i+"-"+j,val)
full_score += val
full_dict[count] = val
count += 1
if (len(full_dict) > 0):
mean = float(full_score)/len(full_dict)
means_dict[i] = mean
#print(i,mean)
if (mean > max_mean):
#print("MAX MEAN:",i)
max_mean_term = i
max_mean = mean
std_dev = 0
for k in full_dict:
std_dev += (full_dict[k] - mean)*(full_dict[k] - mean)
std_dev = math.sqrt(std_dev/len(full_dict))
#print("MEAN:",i,mean,std_dev)
#print("MAX MEAN TERM:",max_mean_term)
sorted_d = OrderedDict(sorted(means_dict.items(), key=lambda kv: kv[1], reverse=True))
return max_mean_term,round(max_mean,2),round(std_dev,2),sorted_d
def calc_bipartite_graph_strength_score(self,terms1,terms2,tokenize,normalize):
full_score = 0
max_val = 0
for i in terms1:
for j in terms2:
val = self.calc_inner_prod(i,j,tokenize)
print(i,j,val)
if (val > max_val):
max_val = val
full_score += val
val = float(full_score)/(len(terms1)*len(terms2)) if normalize else float(full_score)
return round(val,2),round(max_val,2)
def filter_glue_words(self,words):
ret_words = []
for dummy,i in enumerate(words):
if (i not in self.gw_dict):
ret_words.append(i)
if (len(ret_words) == 0):
ret_words.append(words[0])
return ret_words
def find_entities(self,words):
entities = self.labels_dict
lc_entities = self.lc_labels_dict
#words = self.filter_glue_words(words) #do not filter glue words anymore. Let them pass through
ret_arr = []
for word in words:
l_word = word.lower()
if l_word.isdigit():
ret_label = "MEASURE"
ret_counts = str(1)
elif (word in entities):
ret_label = entities[word]["label"]
ret_counts = entities[word]["counts"]
elif (l_word in entities):
ret_label = entities[l_word]["label"]
ret_counts = entities[l_word]["counts"]
elif (l_word in lc_entities):
ret_label = lc_entities[l_word]["label"]
ret_counts = lc_entities[l_word]["counts"]
else:
ret_label = "OTHER"
ret_counts = "1"
if (ret_label == "OTHER"):
ret_label = "UNTAGGED_ENTITY"
ret_counts = "1"
print(word,ret_label,ret_counts)
ret_arr.append(ret_label)
ret_arr.append(ret_counts)
return ret_arr
def get_word():
while (True):
print("Enter a word : q to quit")
sent = input()
#print(sent)
if (sent == "q"):
print("Exitting")
sys.exit(1)
if (len(sent) > 0):
break
return sent
def get_words():
while (True):
print("Enter words separated by spaces : q to quit")
sent = input()
#print(sent)
if (sent == "q"):
print("Exitting")
sys.exit(1)
if (len(sent) > 0):
break
return sent.split()
def pick_threshold():
while (True):
print("Enter threshold to see words above threshold: q to quit")
sent = input()
if (sent == "q"):
print("Exitting")
sys.exit(1)
try:
thres = float(sent)
return thres
except:
print("Invalid input. Retry")
def neigh_test(b_embeds,tokenize):
while (True):
word = get_word()
if (tokenize):
tokenized_text = b_embeds.tokenizer.tokenize(word)
print("Tokenized text:", tokenized_text)
sorted_d,zero_dict = b_embeds.get_distribution_for_term(word,tokenize)
for k in sorted_d:
print(str(k)+","+str(sorted_d[k]))
if (tokenize):
print("Tokenized text:", tokenized_text)
else:
indexed_tokens = b_embeds.tokenizer.convert_tokens_to_ids(word)
if (indexed_tokens == UNK_ID):
print("Warning! This is not a token in vocab. Distribution is for UNK token")
fp = open(word +"_zero.txt","w")
for term in zero_dict:
fp.write(term + '\n')
fp.close()
threshold = pick_threshold()
b_embeds.print_terms_above_threshold(word,threshold,tokenize)
def graph_test(b_embeds,tokenize):
while (True):
words = get_words()
max_mean_term,max_mean, std_dev,s_dict = b_embeds.find_pivot_subgraph(words,True)
desc = ""
for i in s_dict:
desc += i + " "
print("PSG score:",max_mean_term,max_mean, std_dev,s_dict)
print(desc)
def bipartite_test(b_embeds,tokenize):
while (True):
print("First set")
words1 = get_words()
print("Second set")
words2 = get_words()
print("BF score:",b_embeds.calc_bipartite_graph_strength_score(words1,words2,True,False))
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def impl_entities(b_embeds,tokenize,pick_threshold):
while (True):
words = get_words()
ret_arr = b_embeds.find_entities(words)
print(' '.join(ret_arr))
def main():
if (len(sys.argv) != 11):
print("Usage: <Bert model path - to load tokenizer> do_lower_case[1/0] <vocab file> <vector file> <tokenize text>1/0 <labels_file> <preserve_1_2_grams_file> < glue words file> <bootstrap entities file>")
else:
tokenize = True if int(sys.argv[5]) == 1 else False
if (tokenize == True):
print("Forcing tokenize to false. Ignoring input value")
tokenize = False #Adding this override to avoid inadvertant subword token generation error for pivot cluster generation
print("Tokenize is set to :",tokenize)
b_embeds =BertEmbeds(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],True,True,sys.argv[6],sys.argv[7],sys.argv[8],sys.argv[9],sys.argv[10]) #True - for cache embeds; normalize - True
display_threshold = .4
while (True):
print("Enter test type (0-gen cum dist for vocabs; 1-generate clusters (will take approx 2 hours); 2-neigh/3-pivot graph/4-bipartite/5-Entity test/6-Subword neighbor cluster: q to quit")
val = input()
if (val == "0"):
try:
b_embeds.gen_dist_for_vocabs()
except:
print("Trapped exception")
sys.exit(-1)
elif (val == "1"):
print("Enter Input threshold .5 works well for both pretraining and fine tuned. Enter 0 for adaptive thresholding(0 is recommended)")
val = .5
tail = 10
try:
val = float(input())
except:
val = .5
if (val != 0):
print("Using value for fixed thresholding: ",val)
b_embeds.fixed_gen_pivot_graphs(val,tail)
else:
print("Performing adaptive thresholding")
b_embeds.adaptive_gen_pivot_graphs()
sys.exit(-1)
elif (val == "2"):
neigh_test(b_embeds,tokenize)
elif (val == "3"):
graph_test(b_embeds,tokenize)
elif (val == "4"):
bipartite_test(b_embeds,tokenize)
elif (val == "5"):
impl_entities(b_embeds,tokenize,display_threshold)
elif (val == 'q'):
sys.exit(-1)
elif (val == "6"):
b_embeds.subword_clustering()
else:
print("invalid option")
if __name__ == '__main__':
main()
| [
"random.randint",
"json.dumps",
"time.time",
"numpy.array",
"numpy.exp",
"numpy.inner",
"numpy.linalg.norm",
"collections.OrderedDict",
"pdb.set_trace",
"sys.exit"
] | [((669, 690), 'numpy.array', 'np.array', (['embeds_list'], {}), '(embeds_list)\n', (677, 690), True, 'import numpy as np\n'), ((2311, 2324), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2322, 2324), False, 'from collections import OrderedDict\n'), ((2345, 2358), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2356, 2358), False, 'from collections import OrderedDict\n'), ((3242, 3255), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3253, 3255), False, 'from collections import OrderedDict\n'), ((3941, 3954), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3952, 3954), False, 'from collections import OrderedDict\n'), ((5928, 5939), 'time.time', 'time.time', ([], {}), '()\n', (5937, 5939), False, 'import time\n'), ((6257, 6268), 'time.time', 'time.time', ([], {}), '()\n', (6266, 6268), False, 'import time\n'), ((7254, 7267), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7265, 7267), False, 'from collections import OrderedDict\n'), ((7325, 7338), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7336, 7338), False, 'from collections import OrderedDict\n'), ((7369, 7382), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7380, 7382), False, 'from collections import OrderedDict\n'), ((10343, 10356), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10354, 10356), False, 'from collections import OrderedDict\n'), ((10379, 10392), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10390, 10392), False, 'from collections import OrderedDict\n'), ((10450, 10463), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10461, 10463), False, 'from collections import OrderedDict\n'), ((10494, 10507), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10505, 10507), False, 'from collections import OrderedDict\n'), ((17281, 17294), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17292, 17294), False, 'from collections import OrderedDict\n'), ((19810, 19823), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19821, 19823), False, 'from collections import OrderedDict\n'), ((20847, 20860), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20858, 20860), False, 'from collections import OrderedDict\n'), ((20883, 20896), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20894, 20896), False, 'from collections import OrderedDict\n'), ((24183, 24196), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24194, 24196), False, 'from collections import OrderedDict\n'), ((24222, 24235), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24233, 24235), False, 'from collections import OrderedDict\n'), ((24256, 24269), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24267, 24269), False, 'from collections import OrderedDict\n'), ((24293, 24306), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24304, 24306), False, 'from collections import OrderedDict\n'), ((36065, 36074), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (36071, 36074), True, 'import numpy as np\n'), ((6221, 6243), 'numpy.inner', 'np.inner', (['vec_a', 'vec_a'], {}), '(vec_a, vec_a)\n', (6229, 6243), True, 'import numpy as np\n'), ((33753, 33764), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (33761, 33764), False, 'import sys\n'), ((34037, 34048), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (34045, 34048), False, 'import sys\n'), ((34326, 34337), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (34334, 34337), False, 'import sys\n'), ((36084, 36093), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (36090, 36093), True, 'import numpy as np\n'), ((6074, 6103), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_a'], {'axis': '(0)'}), '(vec_a, axis=0)\n', (6088, 6103), True, 'import numpy as np\n'), ((9770, 9793), 'json.dumps', 'json.dumps', (['pivots_dict'], {}), '(pivots_dict)\n', (9780, 9793), False, 'import json\n'), ((12976, 12999), 'json.dumps', 'json.dumps', (['pivots_dict'], {}), '(pivots_dict)\n', (12986, 12999), False, 'import json\n'), ((14395, 14408), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14406, 14408), False, 'from collections import OrderedDict\n'), ((17494, 17507), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17505, 17507), False, 'from collections import OrderedDict\n'), ((22955, 22978), 'json.dumps', 'json.dumps', (['pivots_dict'], {}), '(pivots_dict)\n', (22965, 22978), False, 'import json\n'), ((24514, 24536), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (24528, 24536), False, 'import random\n'), ((37552, 37564), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (37560, 37564), False, 'import sys\n'), ((3603, 3618), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3616, 3618), False, 'import pdb\n'), ((13760, 13773), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13771, 13773), False, 'from collections import OrderedDict\n'), ((38249, 38261), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (38257, 38261), False, 'import sys\n'), ((38642, 38654), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (38650, 38654), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import numpy as np
def plot2d_simplex(simplex, ind):
fig_dir = "./"
plt.cla()
n = 1000
x1 = np.linspace(-256, 1024, n)
x2 = np.linspace(-256, 1024, n)
X, Y = np.meshgrid(x1, x2)
Z = np.sqrt(X ** 2 + Y ** 2)
plt.contour(X, Y, Z, levels=list(np.arange(0, 1200, 10)))
plt.gca().set_aspect("equal")
plt.xlim((-256, 768))
plt.ylim((-256, 768))
plt.plot([simplex[0].x[0], simplex[1].x[0]],
[simplex[0].x[1], simplex[1].x[1]], color="#000000")
plt.plot([simplex[1].x[0], simplex[2].x[0]],
[simplex[1].x[1], simplex[2].x[1]], color="#000000")
plt.plot([simplex[2].x[0], simplex[0].x[0]],
[simplex[2].x[1], simplex[0].x[1]], color="#000000")
plt.savefig(os.path.join(fig_dir, "{:03d}.png".format(ind)))
| [
"matplotlib.pyplot.xlim",
"numpy.meshgrid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.cla",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.gca",
"numpy.sqrt"
] | [((145, 154), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (152, 154), True, 'import matplotlib.pyplot as plt\n'), ((177, 203), 'numpy.linspace', 'np.linspace', (['(-256)', '(1024)', 'n'], {}), '(-256, 1024, n)\n', (188, 203), True, 'import numpy as np\n'), ((213, 239), 'numpy.linspace', 'np.linspace', (['(-256)', '(1024)', 'n'], {}), '(-256, 1024, n)\n', (224, 239), True, 'import numpy as np\n'), ((251, 270), 'numpy.meshgrid', 'np.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (262, 270), True, 'import numpy as np\n'), ((279, 303), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (286, 303), True, 'import numpy as np\n'), ((404, 425), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-256, 768)'], {}), '((-256, 768))\n', (412, 425), True, 'import matplotlib.pyplot as plt\n'), ((430, 451), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-256, 768)'], {}), '((-256, 768))\n', (438, 451), True, 'import matplotlib.pyplot as plt\n'), ((457, 559), 'matplotlib.pyplot.plot', 'plt.plot', (['[simplex[0].x[0], simplex[1].x[0]]', '[simplex[0].x[1], simplex[1].x[1]]'], {'color': '"""#000000"""'}), "([simplex[0].x[0], simplex[1].x[0]], [simplex[0].x[1], simplex[1].x\n [1]], color='#000000')\n", (465, 559), True, 'import matplotlib.pyplot as plt\n'), ((572, 674), 'matplotlib.pyplot.plot', 'plt.plot', (['[simplex[1].x[0], simplex[2].x[0]]', '[simplex[1].x[1], simplex[2].x[1]]'], {'color': '"""#000000"""'}), "([simplex[1].x[0], simplex[2].x[0]], [simplex[1].x[1], simplex[2].x\n [1]], color='#000000')\n", (580, 674), True, 'import matplotlib.pyplot as plt\n'), ((687, 789), 'matplotlib.pyplot.plot', 'plt.plot', (['[simplex[2].x[0], simplex[0].x[0]]', '[simplex[2].x[1], simplex[0].x[1]]'], {'color': '"""#000000"""'}), "([simplex[2].x[0], simplex[0].x[0]], [simplex[2].x[1], simplex[0].x\n [1]], color='#000000')\n", (695, 789), True, 'import matplotlib.pyplot as plt\n'), ((370, 379), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (377, 379), True, 'import matplotlib.pyplot as plt\n'), ((341, 363), 'numpy.arange', 'np.arange', (['(0)', '(1200)', '(10)'], {}), '(0, 1200, 10)\n', (350, 363), True, 'import numpy as np\n')] |
from glob import glob
import io
import itertools
import os
import platform
import sys
import tempfile
from cachetools import LRUCache, cached
import numpy as np
from quaternion import rotate_vectors
from cadquery import Compound, Location
from cadquery.occ_impl.shapes import downcast
from .utils import distance
from OCP.TopAbs import (
TopAbs_EDGE,
TopAbs_FACE,
)
from OCP.TopoDS import TopoDS_Compound, TopoDS_Shape
from OCP.TopExp import TopExp_Explorer
from OCP.StlAPI import StlAPI_Writer
from OCP.gp import gp_Trsf, gp_Quaternion, gp_Vec
from OCP.TopLoc import TopLoc_Location
# Bounding Box
from OCP.TopoDS import TopoDS_Shape
from OCP.BinTools import BinTools
from OCP.Bnd import Bnd_Box
from OCP.BRep import BRep_Tool
from OCP.BRepBndLib import BRepBndLib
from OCP.BRepMesh import BRepMesh_IncrementalMesh
from OCP.BRepTools import BRepTools
from OCP.BRepGProp import BRepGProp
from OCP.GProp import GProp_GProps
MAX_HASH_KEY = 2147483647
#
# Caching helpers
#
def make_key(objs, loc=None, optimal=False): # pylint: disable=unused-argument
# optimal is not used and as such ignored
if not isinstance(objs, (tuple, list)):
objs = [objs]
key = (tuple((s.HashCode(MAX_HASH_KEY) for s in objs)), loc_to_tq(loc))
return key
def get_size(obj):
size = sys.getsizeof(obj)
if isinstance(obj, dict):
size += sum([get_size(v) + len(k) for k, v in obj.items()])
elif isinstance(obj, (tuple, list)):
size += sum([get_size(i) for i in obj])
return size
cache = LRUCache(maxsize=16 * 1024 * 1024, getsizeof=get_size)
#
# Version
#
def ocp_version():
lib = glob(f"{os.environ['CONDA_PREFIX']}/lib/libTKBRep.*.*.*")[0]
return lib.split(".so.")[-1]
#
# Bounding Box
#
class BoundingBox(object):
def __init__(self, obj=None, optimal=False):
self.optimal = optimal
if obj is None:
self.xmin = self.xmax = self.ymin = self.ymax = self.zmin = self.zmax = 0
elif isinstance(obj, BoundingBox):
self.xmin = obj.xmin
self.xmax = obj.xmax
self.ymin = obj.ymin
self.ymax = obj.ymax
self.zmin = obj.zmin
self.zmax = obj.zmax
elif isinstance(obj, dict):
self.xmin = obj["xmin"]
self.xmax = obj["xmax"]
self.ymin = obj["ymin"]
self.ymax = obj["ymax"]
self.zmin = obj["zmin"]
self.zmax = obj["zmax"]
else:
bbox = self._bounding_box(obj)
self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax = bbox
self._calc()
def _center_of_mass(self, obj):
Properties = GProp_GProps()
BRepGProp.VolumeProperties_s(obj, Properties)
com = Properties.CentreOfMass()
return (com.X(), com.Y(), com.Z())
def _bounding_box(self, obj, tol=1e-6):
bbox = Bnd_Box()
if self.optimal:
BRepTools.Clean_s(obj)
BRepBndLib.AddOptimal_s(obj, bbox)
else:
BRepBndLib.Add_s(obj, bbox)
if not bbox.IsVoid():
values = bbox.Get()
return (values[0], values[3], values[1], values[4], values[2], values[5])
else:
c = self._center_of_mass(obj)
bb = (c[0] - tol, c[0] + tol, c[1] - tol, c[1] + tol, c[2] - tol, c[2] + tol)
print("\nVoid Bounding Box", bb)
return bb
def _calc(self):
self.xsize = self.xmax - self.xmin
self.ysize = self.ymax - self.ymin
self.zsize = self.zmax - self.zmin
self.center = (
self.xmin + self.xsize / 2.0,
self.ymin + self.ysize / 2.0,
self.zmin + self.zsize / 2.0,
)
self.max = max([abs(x) for x in (self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax)])
def is_empty(self):
return (
(abs(self.xmax - self.xmin) < 0.01)
and (abs(self.ymax - self.ymin) < 0.01)
and (abs(self.zmax - self.zmin) < 0.01)
)
def max_dist_from_center(self):
return max(
[
distance(self.center, v)
for v in itertools.product((self.xmin, self.xmax), (self.ymin, self.ymax), (self.zmin, self.zmax))
]
)
def max_dist_from_origin(self):
return max(
[
np.linalg.norm(v)
for v in itertools.product((self.xmin, self.xmax), (self.ymin, self.ymax), (self.zmin, self.zmax))
]
)
def update(self, bb, minimize=False):
lower, upper = (max, min) if minimize else (min, max)
if isinstance(bb, BoundingBox):
self.xmin = lower(bb.xmin, self.xmin)
self.xmax = upper(bb.xmax, self.xmax)
self.ymin = lower(bb.ymin, self.ymin)
self.ymax = upper(bb.ymax, self.ymax)
self.zmin = lower(bb.zmin, self.zmin)
self.zmax = upper(bb.zmax, self.zmax)
elif isinstance(bb, dict):
self.xmin = lower(bb["xmin"], self.xmin)
self.xmax = upper(bb["xmax"], self.xmax)
self.ymin = lower(bb["ymin"], self.ymin)
self.ymax = upper(bb["ymax"], self.ymax)
self.zmin = lower(bb["zmin"], self.zmin)
self.zmax = upper(bb["zmax"], self.zmax)
else:
raise "Wrong bounding box param"
self._calc()
def to_dict(self):
return {
"xmin": self.xmin,
"xmax": self.xmax,
"ymin": self.ymin,
"ymax": self.ymax,
"zmin": self.zmin,
"zmax": self.zmax,
}
def __repr__(self):
return "{xmin:%.2f, xmax:%.2f, ymin:%.2f, ymax:%.2f, zmin:%.2f, zmax:%.2f}" % (
self.xmin,
self.xmax,
self.ymin,
self.ymax,
self.zmin,
self.zmax,
)
@cached(cache, key=make_key)
def bounding_box(objs, loc=None, optimal=False):
if isinstance(objs, (list, tuple)):
compound = Compound._makeCompound(objs) # pylint: disable=protected-access
else:
compound = objs
return BoundingBox(compound if loc is None else compound.Moved(loc), optimal=optimal)
def np_bbox(p, t, q):
if p.size == 0:
return None
n_p = p.reshape(-1, 3)
if t is None and q is None:
v = n_p
else:
n_t = np.asarray(t)
n_q = np.quaternion(q[-1], *q[:-1])
v = rotate_vectors([n_q], n_p)[0] + n_t
bbmin = np.min(v, axis=0)
bbmax = np.max(v, axis=0)
return {"xmin": bbmin[0], "xmax": bbmax[0], "ymin": bbmin[1], "ymax": bbmax[1], "zmin": bbmin[2], "zmax": bbmax[2]}
# Export STL
def write_stl_file(compound, filename, tolerance=None, angular_tolerance=None):
# Remove previous mesh data
BRepTools.Clean_s(compound)
mesh = BRepMesh_IncrementalMesh(compound, tolerance, True, angular_tolerance)
mesh.Perform()
writer = StlAPI_Writer()
result = writer.Write(compound, filename)
# Remove the mesh data again
BRepTools.Clean_s(compound)
return result
# OCP serialisation
def serialize(shape):
if shape is None:
return None
if platform.system() == "Darwin":
with tempfile.NamedTemporaryFile() as tf:
BinTools.Write_s(shape, tf.name)
with open(tf.name, "rb") as fd:
buffer = fd.read()
else:
bio = io.BytesIO()
BinTools.Write_s(shape, bio)
buffer = bio.getvalue()
return buffer
def deserialize(buffer):
if buffer is None:
return None
shape = TopoDS_Shape()
if platform.system() == "Darwin":
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "wb") as fd:
fd.write(buffer)
BinTools.Read_s(shape, tf.name)
else:
bio = io.BytesIO(buffer)
BinTools.Read_s(shape, bio)
return shape
# OCP types and accessors
def is_compound(topods_shape):
return isinstance(topods_shape, TopoDS_Compound)
def is_shape(topods_shape):
return isinstance(topods_shape, TopoDS_Shape)
def _get_topo(shape, topo):
explorer = TopExp_Explorer(shape, topo)
hashes = {}
while explorer.More():
item = explorer.Current()
hash_value = item.HashCode(MAX_HASH_KEY)
if hashes.get(hash_value) is None:
hashes[hash_value] = True
yield downcast(item)
explorer.Next()
def get_faces(shape):
return _get_topo(shape, TopAbs_FACE)
def get_edges(shape):
return _get_topo(shape, TopAbs_EDGE)
def get_point(vertex):
p = BRep_Tool.Pnt_s(vertex)
return (p.X(), p.Y(), p.Z())
def get_rgb(color):
if color is None:
return (176, 176, 176)
rgb = color.wrapped.GetRGB()
return (int(255 * rgb.Red()), int(255 * rgb.Green()), int(255 * rgb.Blue()))
def tq_to_loc(t, q):
T = gp_Trsf()
Q = gp_Quaternion(*q)
V = gp_Vec(*t)
T.SetTransformation(Q, V)
return TopLoc_Location(T)
def loc_to_tq(loc):
if loc is None:
return (None, None)
T = loc.Transformation()
t = T.TranslationPart()
q = T.GetRotation()
return ((t.X(), t.Y(), t.Z()), (q.X(), q.Y(), q.Z(), q.W()))
def wrapped_or_None(obj):
return None if obj is None else obj.wrapped
def __location__repr__(self):
f = lambda x: f"{x:8.3f}"
t, q = loc_to_tq(self.wrapped)
return f"Location: t=({f(t[0])}, {f(t[1])}, {f(t[2])}), q=({f(q[0])}, {f(q[1])}, {f(q[2])}, {f(q[3])})"
Location.__repr__ = __location__repr__ # type: ignore
| [
"OCP.gp.gp_Quaternion",
"OCP.gp.gp_Vec",
"OCP.StlAPI.StlAPI_Writer",
"OCP.TopExp.TopExp_Explorer",
"cadquery.occ_impl.shapes.downcast",
"OCP.BinTools.BinTools.Read_s",
"numpy.linalg.norm",
"sys.getsizeof",
"glob.glob",
"OCP.Bnd.Bnd_Box",
"cachetools.LRUCache",
"OCP.BRepGProp.BRepGProp.VolumePr... | [((1540, 1594), 'cachetools.LRUCache', 'LRUCache', ([], {'maxsize': '(16 * 1024 * 1024)', 'getsizeof': 'get_size'}), '(maxsize=16 * 1024 * 1024, getsizeof=get_size)\n', (1548, 1594), False, 'from cachetools import LRUCache, cached\n'), ((5933, 5960), 'cachetools.cached', 'cached', (['cache'], {'key': 'make_key'}), '(cache, key=make_key)\n', (5939, 5960), False, 'from cachetools import LRUCache, cached\n'), ((1308, 1326), 'sys.getsizeof', 'sys.getsizeof', (['obj'], {}), '(obj)\n', (1321, 1326), False, 'import sys\n'), ((6542, 6559), 'numpy.min', 'np.min', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (6548, 6559), True, 'import numpy as np\n'), ((6572, 6589), 'numpy.max', 'np.max', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (6578, 6589), True, 'import numpy as np\n'), ((6844, 6871), 'OCP.BRepTools.BRepTools.Clean_s', 'BRepTools.Clean_s', (['compound'], {}), '(compound)\n', (6861, 6871), False, 'from OCP.BRepTools import BRepTools\n'), ((6884, 6954), 'OCP.BRepMesh.BRepMesh_IncrementalMesh', 'BRepMesh_IncrementalMesh', (['compound', 'tolerance', '(True)', 'angular_tolerance'], {}), '(compound, tolerance, True, angular_tolerance)\n', (6908, 6954), False, 'from OCP.BRepMesh import BRepMesh_IncrementalMesh\n'), ((6988, 7003), 'OCP.StlAPI.StlAPI_Writer', 'StlAPI_Writer', ([], {}), '()\n', (7001, 7003), False, 'from OCP.StlAPI import StlAPI_Writer\n'), ((7089, 7116), 'OCP.BRepTools.BRepTools.Clean_s', 'BRepTools.Clean_s', (['compound'], {}), '(compound)\n', (7106, 7116), False, 'from OCP.BRepTools import BRepTools\n'), ((7643, 7657), 'OCP.TopoDS.TopoDS_Shape', 'TopoDS_Shape', ([], {}), '()\n', (7655, 7657), False, 'from OCP.TopoDS import TopoDS_Shape\n'), ((8202, 8230), 'OCP.TopExp.TopExp_Explorer', 'TopExp_Explorer', (['shape', 'topo'], {}), '(shape, topo)\n', (8217, 8230), False, 'from OCP.TopExp import TopExp_Explorer\n'), ((8658, 8681), 'OCP.BRep.BRep_Tool.Pnt_s', 'BRep_Tool.Pnt_s', (['vertex'], {}), '(vertex)\n', (8673, 8681), False, 'from OCP.BRep import BRep_Tool\n'), ((8935, 8944), 'OCP.gp.gp_Trsf', 'gp_Trsf', ([], {}), '()\n', (8942, 8944), False, 'from OCP.gp import gp_Trsf, gp_Quaternion, gp_Vec\n'), ((8953, 8970), 'OCP.gp.gp_Quaternion', 'gp_Quaternion', (['*q'], {}), '(*q)\n', (8966, 8970), False, 'from OCP.gp import gp_Trsf, gp_Quaternion, gp_Vec\n'), ((8979, 8989), 'OCP.gp.gp_Vec', 'gp_Vec', (['*t'], {}), '(*t)\n', (8985, 8989), False, 'from OCP.gp import gp_Trsf, gp_Quaternion, gp_Vec\n'), ((9031, 9049), 'OCP.TopLoc.TopLoc_Location', 'TopLoc_Location', (['T'], {}), '(T)\n', (9046, 9049), False, 'from OCP.TopLoc import TopLoc_Location\n'), ((1641, 1698), 'glob.glob', 'glob', (['f"""{os.environ[\'CONDA_PREFIX\']}/lib/libTKBRep.*.*.*"""'], {}), '(f"{os.environ[\'CONDA_PREFIX\']}/lib/libTKBRep.*.*.*")\n', (1645, 1698), False, 'from glob import glob\n'), ((2689, 2703), 'OCP.GProp.GProp_GProps', 'GProp_GProps', ([], {}), '()\n', (2701, 2703), False, 'from OCP.GProp import GProp_GProps\n'), ((2712, 2757), 'OCP.BRepGProp.BRepGProp.VolumeProperties_s', 'BRepGProp.VolumeProperties_s', (['obj', 'Properties'], {}), '(obj, Properties)\n', (2740, 2757), False, 'from OCP.BRepGProp import BRepGProp\n'), ((2901, 2910), 'OCP.Bnd.Bnd_Box', 'Bnd_Box', ([], {}), '()\n', (2908, 2910), False, 'from OCP.Bnd import Bnd_Box\n'), ((6069, 6097), 'cadquery.Compound._makeCompound', 'Compound._makeCompound', (['objs'], {}), '(objs)\n', (6091, 6097), False, 'from cadquery import Compound, Location\n'), ((6423, 6436), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (6433, 6436), True, 'import numpy as np\n'), ((6451, 6480), 'numpy.quaternion', 'np.quaternion', (['q[-1]', '*q[:-1]'], {}), '(q[-1], *q[:-1])\n', (6464, 6480), True, 'import numpy as np\n'), ((7231, 7248), 'platform.system', 'platform.system', ([], {}), '()\n', (7246, 7248), False, 'import platform\n'), ((7460, 7472), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (7470, 7472), False, 'import io\n'), ((7481, 7509), 'OCP.BinTools.BinTools.Write_s', 'BinTools.Write_s', (['shape', 'bio'], {}), '(shape, bio)\n', (7497, 7509), False, 'from OCP.BinTools import BinTools\n'), ((7665, 7682), 'platform.system', 'platform.system', ([], {}), '()\n', (7680, 7682), False, 'import platform\n'), ((7891, 7909), 'io.BytesIO', 'io.BytesIO', (['buffer'], {}), '(buffer)\n', (7901, 7909), False, 'import io\n'), ((7918, 7945), 'OCP.BinTools.BinTools.Read_s', 'BinTools.Read_s', (['shape', 'bio'], {}), '(shape, bio)\n', (7933, 7945), False, 'from OCP.BinTools import BinTools\n'), ((2948, 2970), 'OCP.BRepTools.BRepTools.Clean_s', 'BRepTools.Clean_s', (['obj'], {}), '(obj)\n', (2965, 2970), False, 'from OCP.BRepTools import BRepTools\n'), ((2983, 3017), 'OCP.BRepBndLib.BRepBndLib.AddOptimal_s', 'BRepBndLib.AddOptimal_s', (['obj', 'bbox'], {}), '(obj, bbox)\n', (3006, 3017), False, 'from OCP.BRepBndLib import BRepBndLib\n'), ((3044, 3071), 'OCP.BRepBndLib.BRepBndLib.Add_s', 'BRepBndLib.Add_s', (['obj', 'bbox'], {}), '(obj, bbox)\n', (3060, 3071), False, 'from OCP.BRepBndLib import BRepBndLib\n'), ((7275, 7304), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (7302, 7304), False, 'import tempfile\n'), ((7324, 7356), 'OCP.BinTools.BinTools.Write_s', 'BinTools.Write_s', (['shape', 'tf.name'], {}), '(shape, tf.name)\n', (7340, 7356), False, 'from OCP.BinTools import BinTools\n'), ((7709, 7738), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (7736, 7738), False, 'import tempfile\n'), ((7835, 7866), 'OCP.BinTools.BinTools.Read_s', 'BinTools.Read_s', (['shape', 'tf.name'], {}), '(shape, tf.name)\n', (7850, 7866), False, 'from OCP.BinTools import BinTools\n'), ((4395, 4412), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (4409, 4412), True, 'import numpy as np\n'), ((6493, 6519), 'quaternion.rotate_vectors', 'rotate_vectors', (['[n_q]', 'n_p'], {}), '([n_q], n_p)\n', (6507, 6519), False, 'from quaternion import rotate_vectors\n'), ((8456, 8470), 'cadquery.occ_impl.shapes.downcast', 'downcast', (['item'], {}), '(item)\n', (8464, 8470), False, 'from cadquery.occ_impl.shapes import downcast\n'), ((4194, 4288), 'itertools.product', 'itertools.product', (['(self.xmin, self.xmax)', '(self.ymin, self.ymax)', '(self.zmin, self.zmax)'], {}), '((self.xmin, self.xmax), (self.ymin, self.ymax), (self.\n zmin, self.zmax))\n', (4211, 4288), False, 'import itertools\n'), ((4438, 4532), 'itertools.product', 'itertools.product', (['(self.xmin, self.xmax)', '(self.ymin, self.ymax)', '(self.zmin, self.zmax)'], {}), '((self.xmin, self.xmax), (self.ymin, self.ymax), (self.\n zmin, self.zmax))\n', (4455, 4532), False, 'import itertools\n')] |
# Tencent is pleased to support the open source community by making PocketFlow available.
#
# Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Uniform Quantization Learner.
Without buckets, min/max is calculated per layer, otherwise per bucket_size.
Actually with bucket, better performance could be achieved in most time.
"""
import os
from timeit import default_timer as timer
import numpy as np
import tensorflow as tf
from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw
from learners.abstract_learner import AbstractLearner
from learners.distillation_helper import DistillationHelper
from learners.uniform_quantization.utils import UniformQuantization
from learners.uniform_quantization.bit_optimizer import BitOptimizer
FLAGS = tf.app.flags.FLAGS
# Quantize parameters
tf.app.flags.DEFINE_integer('uql_weight_bits', 4, \
'Number of bits to use for quantizing weights')
tf.app.flags.DEFINE_integer('uql_activation_bits', 4, \
'Number of bits to use for quantizing activations')
tf.app.flags.DEFINE_boolean('uql_use_buckets', False, 'Use bucketing or not')
tf.app.flags.DEFINE_integer('uql_bucket_size', 256, 'Number of bucket size')
tf.app.flags.DEFINE_integer('uql_quant_epochs', 60, 'To be determined by datasets')
tf.app.flags.DEFINE_string('uql_save_quant_model_path', \
'./uql_quant_models/uql_quant_model.ckpt', 'dir to save quantization model')
tf.app.flags.DEFINE_boolean('uql_quantize_all_layers', False, \
'If False, leaving first and last layers unquantized')
tf.app.flags.DEFINE_string('uql_bucket_type', 'channel', \
'Two types for now: [channel, split]')
def setup_bnds_decay_rates(model_name, dataset_name):
""" NOTE: The bnd_decay_rates here is mgw_size invariant """
batch_size = FLAGS.batch_size if not FLAGS.enbl_multi_gpu else FLAGS.batch_size * mgw.size()
nb_batches_per_epoch = int(FLAGS.nb_smpls_train / batch_size)
mgw_size = int(mgw.size()) if FLAGS.enbl_multi_gpu else 1
init_lr = FLAGS.lrn_rate_init * FLAGS.batch_size * mgw_size / FLAGS.batch_size_norm if FLAGS.enbl_multi_gpu else FLAGS.lrn_rate_init
if dataset_name == 'cifar_10':
if model_name.startswith('resnet'):
bnds = [nb_batches_per_epoch * 15, nb_batches_per_epoch * 40]
decay_rates = [1e-3, 1e-4, 1e-5]
elif dataset_name == 'ilsvrc_12':
if model_name.startswith('resnet'):
bnds = [nb_batches_per_epoch * 5, nb_batches_per_epoch * 20]
decay_rates = [1e-4, 1e-5, 1e-6]
elif model_name.startswith('mobilenet'):
bnds = [nb_batches_per_epoch * 5, nb_batches_per_epoch * 30]
decay_rates = [1e-4, 1e-5, 1e-6]
finetune_steps = nb_batches_per_epoch * FLAGS.uql_quant_epochs
init_lr = init_lr if FLAGS.enbl_warm_start else FLAGS.lrn_rate_init
return init_lr, bnds, decay_rates, finetune_steps
class UniformQuantLearner(AbstractLearner):
# pylint: disable=too-many-instance-attributes
'''
Uniform quantization for weights and activations
'''
def __init__(self, sm_writer, model_helper):
# class-independent initialization
super(UniformQuantLearner, self).__init__(sm_writer, model_helper)
# class-dependent initialization
if FLAGS.enbl_dst:
self.helper_dst = DistillationHelper(sm_writer, model_helper, self.mpi_comm)
# initialize class attributes
self.ops = {}
self.bit_placeholders = {}
self.statistics = {}
self.__build_train() # for train
self.__build_eval() # for eval
if self.is_primary_worker('local'):
self.download_model() # pre-trained model is required
self.auto_barrier()
# determine the optimal policy.
bit_optimizer = BitOptimizer(self.dataset_name,
self.weights,
self.statistics,
self.bit_placeholders,
self.ops,
self.layerwise_tune_list,
self.sess_train,
self.sess_eval,
self.saver_train,
self.saver_eval,
self.auto_barrier)
self.optimal_w_bit_list, self.optimal_a_bit_list = bit_optimizer.run()
self.auto_barrier()
def train(self):
# initialization
self.sess_train.run(self.ops['init'])
# mgw_size = int(mgw.size()) if FLAGS.enbl_multi_gpu else 1
total_iters = self.finetune_steps
if FLAGS.enbl_warm_start:
self.__restore_model(is_train=True) # use the latest model for warm start
self.auto_barrier()
if FLAGS.enbl_multi_gpu:
self.sess_train.run(self.ops['bcast'])
time_prev = timer()
# build the quantization bits
feed_dict = {self.bit_placeholders['w_train']: self.optimal_w_bit_list,
self.bit_placeholders['a_train']: self.optimal_a_bit_list}
for idx_iter in range(total_iters):
# train the model
if (idx_iter + 1) % FLAGS.summ_step != 0:
self.sess_train.run(self.ops['train'], feed_dict=feed_dict)
else:
_, summary, log_rslt = self.sess_train.run([self.ops['train'],
self.ops['summary'],
self.ops['log']],
feed_dict=feed_dict)
time_prev = self.__monitor_progress(summary, log_rslt, time_prev, idx_iter)
# save & evaluate the model at certain steps
if (idx_iter + 1) % FLAGS.save_step == 0:
self.__save_model()
self.evaluate()
self.auto_barrier()
# save the final model
self.__save_model()
self.evaluate()
def evaluate(self):
# early break for non-primary workers
if not self.is_primary_worker():
return
# evaluate the model
self.__restore_model(is_train=False)
losses, accuracies = [], []
nb_iters = int(np.ceil(float(FLAGS.nb_smpls_eval) / FLAGS.batch_size_eval))
# build the quantization bits
feed_dict = {self.bit_placeholders['w_eval']: self.optimal_w_bit_list,
self.bit_placeholders['a_eval']: self.optimal_a_bit_list}
for _ in range(nb_iters):
eval_rslt = self.sess_eval.run(self.ops['eval'], feed_dict=feed_dict)
losses.append(eval_rslt[0])
accuracies.append(eval_rslt[1])
tf.logging.info('loss: {}'.format(np.mean(np.array(losses))))
tf.logging.info('accuracy: {}'.format(np.mean(np.array(accuracies))))
tf.logging.info("Optimal Weight Quantization:{}".format(self.optimal_w_bit_list))
if FLAGS.uql_use_buckets:
bucket_storage = self.sess_eval.run(self.ops['bucket_storage'], feed_dict=feed_dict)
self.__show_bucket_storage(bucket_storage)
def __build_train(self):
with tf.Graph().as_default():
# TensorFlow session
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(mgw.local_rank() \
if FLAGS.enbl_multi_gpu else 0)
self.sess_train = tf.Session(config=config)
# data input pipeline
with tf.variable_scope(self.data_scope):
iterator = self.build_dataset_train()
images, labels = iterator.get_next()
images.set_shape((FLAGS.batch_size, images.shape[1], images.shape[2],
images.shape[3]))
# model definition - distilled model
if FLAGS.enbl_dst:
logits_dst = self.helper_dst.calc_logits(self.sess_train, images)
# model definition
with tf.variable_scope(self.model_scope, reuse=tf.AUTO_REUSE):
# forward pass
logits = self.forward_train(images)
self.weights = [v for v in self.trainable_vars if 'kernel' in v.name or 'weight' in v.name]
if not FLAGS.uql_quantize_all_layers:
self.weights = self.weights[1:-1]
self.statistics['num_weights'] = \
[tf.reshape(v, [-1]).shape[0].value for v in self.weights]
self.__quantize_train_graph()
# loss & accuracy
loss, metrics = self.calc_loss(labels, logits, self.trainable_vars)
if self.dataset_name == 'cifar_10':
acc_top1, acc_top5 = metrics['accuracy'], tf.constant(0.)
elif self.dataset_name == 'ilsvrc_12':
acc_top1, acc_top5 = metrics['acc_top1'], metrics['acc_top5']
else:
raise ValueError("Unrecognized dataset name")
model_loss = loss
if FLAGS.enbl_dst:
dst_loss = self.helper_dst.calc_loss(logits, logits_dst)
loss += dst_loss
tf.summary.scalar('dst_loss', dst_loss)
tf.summary.scalar('model_loss', model_loss)
tf.summary.scalar('loss', loss)
tf.summary.scalar('acc_top1', acc_top1)
tf.summary.scalar('acc_top5', acc_top5)
self.saver_train = tf.train.Saver(self.vars)
self.ft_step = tf.get_variable('finetune_step', shape=[], dtype=tf.int32, trainable=False)
# optimizer & gradients
init_lr, bnds, decay_rates, self.finetune_steps = \
setup_bnds_decay_rates(self.model_name, self.dataset_name)
lrn_rate = tf.train.piecewise_constant(self.ft_step,
[i for i in bnds],
[init_lr * decay_rate for decay_rate in decay_rates])
# optimizer = tf.train.MomentumOptimizer(lrn_rate, FLAGS.momentum)
optimizer = tf.train.AdamOptimizer(learning_rate=lrn_rate)
if FLAGS.enbl_multi_gpu:
optimizer = mgw.DistributedOptimizer(optimizer)
grads = optimizer.compute_gradients(loss, self.trainable_vars)
# sm write graph
self.sm_writer.add_graph(self.sess_train.graph)
with tf.control_dependencies(self.update_ops):
self.ops['train'] = optimizer.apply_gradients(grads, global_step=self.ft_step)
self.ops['summary'] = tf.summary.merge_all()
if FLAGS.enbl_dst:
self.ops['log'] = [lrn_rate, dst_loss, model_loss, loss, acc_top1, acc_top5]
else:
self.ops['log'] = [lrn_rate, model_loss, loss, acc_top1, acc_top5]
self.ops['reset_ft_step'] = tf.assign(self.ft_step, tf.constant(0, dtype=tf.int32))
self.ops['init'] = tf.global_variables_initializer()
self.ops['bcast'] = mgw.broadcast_global_variables(0) if FLAGS.enbl_multi_gpu else None
self.saver_quant = tf.train.Saver(self.vars)
def __build_eval(self):
with tf.Graph().as_default():
# TensorFlow session
# create a TF session for the current graph
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(mgw.local_rank() \
if FLAGS.enbl_multi_gpu else 0)
self.sess_eval = tf.Session(config=config)
# data input pipeline
with tf.variable_scope(self.data_scope):
iterator = self.build_dataset_eval()
images, labels = iterator.get_next()
images.set_shape((FLAGS.batch_size, images.shape[1], images.shape[2],
images.shape[3]))
self.images_eval = images
# model definition - distilled model
if FLAGS.enbl_dst:
logits_dst = self.helper_dst.calc_logits(self.sess_eval, images)
# model definition
with tf.variable_scope(self.model_scope, reuse=tf.AUTO_REUSE):
# forward pass
logits = self.forward_eval(images)
self.__quantize_eval_graph()
# loss & accuracy
loss, metrics = self.calc_loss(labels, logits, self.trainable_vars)
if self.dataset_name == 'cifar_10':
acc_top1, acc_top5 = metrics['accuracy'], tf.constant(0.)
elif self.dataset_name == 'ilsvrc_12':
acc_top1, acc_top5 = metrics['acc_top1'], metrics['acc_top5']
else:
raise ValueError("Unrecognized dataset name")
if FLAGS.enbl_dst:
dst_loss = self.helper_dst.calc_loss(logits, logits_dst)
loss += dst_loss
# TF operations & model saver
self.ops['eval'] = [loss, acc_top1, acc_top5]
self.saver_eval = tf.train.Saver(self.vars)
def __quantize_train_graph(self):
""" Insert quantization nodes to the training graph. """
uni_quant = UniformQuantization(self.sess_train,
FLAGS.uql_bucket_size,
FLAGS.uql_use_buckets,
FLAGS.uql_bucket_type)
# Find Conv2d Op
matmul_ops = uni_quant.search_matmul_op(FLAGS.uql_quantize_all_layers)
act_ops = uni_quant.search_activation_op()
self.statistics['nb_matmuls'] = len(matmul_ops)
self.statistics['nb_activations'] = len(act_ops)
# Replace Conv2d Op with quantized weights
matmul_op_names = [op.name for op in matmul_ops]
act_op_names = [op.name for op in act_ops]
# build the placeholder for
self.bit_placeholders['w_train'] = tf.placeholder(tf.int64, shape=[self.statistics['nb_matmuls']], name="w_bit_list")
self.bit_placeholders['a_train'] = tf.placeholder(tf.int64, shape=[self.statistics['nb_activations']], name="a_bit_list")
w_bit_dict_train = self.__build_quant_dict(matmul_op_names, self.bit_placeholders['w_train'])
a_bit_dict_train = self.__build_quant_dict(act_op_names, self.bit_placeholders['a_train'])
uni_quant.insert_quant_op_for_weights(w_bit_dict_train)
uni_quant.insert_quant_op_for_activations(a_bit_dict_train)
# add layerwise finetuning. TODO: working not very well
self.layerwise_tune_list = uni_quant.get_layerwise_tune_op(self.weights) \
if FLAGS.uql_enbl_rl_layerwise_tune else (None, None)
def __quantize_eval_graph(self):
""" Insert quantization nodes to the evaluation graph. """
uni_quant = UniformQuantization(self.sess_eval,
FLAGS.uql_bucket_size,
FLAGS.uql_use_buckets,
FLAGS.uql_bucket_type)
# Find matmul ops
matmul_ops = uni_quant.search_matmul_op(FLAGS.uql_quantize_all_layers)
act_ops = uni_quant.search_activation_op()
assert self.statistics['nb_matmuls'] == len(matmul_ops), \
'the length of matmul_ops on train and eval graphs does not match'
assert self.statistics['nb_activations'] == len(act_ops), \
'the length of act_ops on train and eval graphs does not match'
# Replace Conv2d Op with quantized weights
matmul_op_names = [op.name for op in matmul_ops]
act_op_names = [op.name for op in act_ops]
# build the placeholder for eval
self.bit_placeholders['w_eval'] = tf.placeholder(tf.int64, shape=[self.statistics['nb_matmuls']], name="w_bit_list")
self.bit_placeholders['a_eval'] = tf.placeholder(tf.int64, shape=[self.statistics['nb_activations']], name="a_bit_list")
w_bit_dict_eval = self.__build_quant_dict(matmul_op_names, self.bit_placeholders['w_eval'])
a_bit_dict_eval = self.__build_quant_dict(act_op_names, self.bit_placeholders['a_eval'])
uni_quant.insert_quant_op_for_weights(w_bit_dict_eval)
uni_quant.insert_quant_op_for_activations(a_bit_dict_eval)
self.ops['bucket_storage'] = uni_quant.bucket_storage
def __save_model(self):
# early break for non-primary workers
if not self.is_primary_worker():
return
save_quant_model_path = self.saver_quant.save(self.sess_train,
FLAGS.uql_save_quant_model_path,
self.ft_step)
#tf.logging.info('full precision model saved to ' + save_path)
tf.logging.info('quantized model saved to ' + save_quant_model_path)
def __restore_model(self, is_train):
if is_train:
save_path = tf.train.latest_checkpoint(os.path.dirname(FLAGS.save_path))
save_dir = os.path.dirname(save_path)
for item in os.listdir(save_dir):
print('Print directory: ' + item)
self.saver_train.restore(self.sess_train, save_path)
else:
save_path = tf.train.latest_checkpoint(os.path.dirname(FLAGS.uql_save_quant_model_path))
self.saver_eval.restore(self.sess_eval, save_path)
tf.logging.info('model restored from ' + save_path)
def __monitor_progress(self, summary, log_rslt, time_prev, idx_iter):
# early break for non-primary workers
if not self.is_primary_worker():
return None
# write summaries for TensorBoard visualization
self.sm_writer.add_summary(summary, idx_iter)
# display monitored statistics
speed = FLAGS.batch_size * FLAGS.summ_step / (timer() - time_prev)
if FLAGS.enbl_multi_gpu:
speed *= mgw.size()
# NOTE: for cifar-10, acc_top5 is 0.
if FLAGS.enbl_dst:
lrn_rate, dst_loss, model_loss, loss, acc_top1, acc_top5 = log_rslt[0], \
log_rslt[1], log_rslt[2], log_rslt[3], log_rslt[4], log_rslt[5]
tf.logging.info('iter #%d: lr = %e | dst_loss = %.4f | model_loss = %.4f | loss = %.4f | acc_top1 = %.4f | acc_top5 = %.4f | speed = %.2f pics / sec' \
% (idx_iter + 1, lrn_rate, dst_loss, model_loss, loss, acc_top1, acc_top5, speed))
else:
lrn_rate, model_loss, loss, acc_top1, acc_top5 = log_rslt[0], \
log_rslt[1], log_rslt[2], log_rslt[3], log_rslt[4]
tf.logging.info('iter #%d: lr = %e | model_loss = %.4f | loss = %.4f | acc_top1 = %.4f | acc_top5 = %.4f | speed = %.2f pics / sec' \
% (idx_iter + 1, lrn_rate, model_loss, loss, acc_top1, acc_top5, speed))
return timer()
def __show_bucket_storage(self, bucket_storage):
# show the bucket storage and ratios
weight_storage = sum(self.statistics['num_weights']) * FLAGS.uql_weight_bits \
if not FLAGS.uql_enbl_rl_agent else sum(self.statistics['num_weights']) * FLAGS.uql_equivalent_bits
tf.logging.info('bucket storage: %d bit / %.3f kb | weight storage: %d bit / %.3f kb | ratio: %.3f' \
% (bucket_storage, bucket_storage / (8.*1024.), weight_storage, \
weight_storage / (8.*1024.), bucket_storage * 1./weight_storage))
@staticmethod
def __build_quant_dict(keys, values):
""" Bind keys and values to dictionaries.
Args:
* keys: A list of op_names
* values: A Tensor with len(op_names) elements
Returns:
* dict: (key, value) for weight name and quant bits respectively
"""
dict_ = {}
for (idx, v) in enumerate(keys):
dict_[v] = values[idx]
return dict_
| [
"tensorflow.logging.info",
"tensorflow.reshape",
"tensorflow.ConfigProto",
"tensorflow.app.flags.DEFINE_boolean",
"utils.multi_gpu_wrapper.MultiGpuWrapper.broadcast_global_variables",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.get_variable",
"os.path.dirname",
"tensorflow.variable_scope",
... | [((1443, 1544), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""uql_weight_bits"""', '(4)', '"""Number of bits to use for quantizing weights"""'], {}), "('uql_weight_bits', 4,\n 'Number of bits to use for quantizing weights')\n", (1470, 1544), True, 'import tensorflow as tf\n'), ((1547, 1656), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""uql_activation_bits"""', '(4)', '"""Number of bits to use for quantizing activations"""'], {}), "('uql_activation_bits', 4,\n 'Number of bits to use for quantizing activations')\n", (1574, 1656), True, 'import tensorflow as tf\n'), ((1659, 1736), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""uql_use_buckets"""', '(False)', '"""Use bucketing or not"""'], {}), "('uql_use_buckets', False, 'Use bucketing or not')\n", (1686, 1736), True, 'import tensorflow as tf\n'), ((1737, 1813), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""uql_bucket_size"""', '(256)', '"""Number of bucket size"""'], {}), "('uql_bucket_size', 256, 'Number of bucket size')\n", (1764, 1813), True, 'import tensorflow as tf\n'), ((1814, 1901), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""uql_quant_epochs"""', '(60)', '"""To be determined by datasets"""'], {}), "('uql_quant_epochs', 60,\n 'To be determined by datasets')\n", (1841, 1901), True, 'import tensorflow as tf\n'), ((1898, 2039), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""uql_save_quant_model_path"""', '"""./uql_quant_models/uql_quant_model.ckpt"""', '"""dir to save quantization model"""'], {}), "('uql_save_quant_model_path',\n './uql_quant_models/uql_quant_model.ckpt', 'dir to save quantization model'\n )\n", (1924, 2039), True, 'import tensorflow as tf\n'), ((2037, 2157), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""uql_quantize_all_layers"""', '(False)', '"""If False, leaving first and last layers unquantized"""'], {}), "('uql_quantize_all_layers', False,\n 'If False, leaving first and last layers unquantized')\n", (2064, 2157), True, 'import tensorflow as tf\n'), ((2160, 2259), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""uql_bucket_type"""', '"""channel"""', '"""Two types for now: [channel, split]"""'], {}), "('uql_bucket_type', 'channel',\n 'Two types for now: [channel, split]')\n", (2186, 2259), True, 'import tensorflow as tf\n'), ((4261, 4478), 'learners.uniform_quantization.bit_optimizer.BitOptimizer', 'BitOptimizer', (['self.dataset_name', 'self.weights', 'self.statistics', 'self.bit_placeholders', 'self.ops', 'self.layerwise_tune_list', 'self.sess_train', 'self.sess_eval', 'self.saver_train', 'self.saver_eval', 'self.auto_barrier'], {}), '(self.dataset_name, self.weights, self.statistics, self.\n bit_placeholders, self.ops, self.layerwise_tune_list, self.sess_train,\n self.sess_eval, self.saver_train, self.saver_eval, self.auto_barrier)\n', (4273, 4478), False, 'from learners.uniform_quantization.bit_optimizer import BitOptimizer\n'), ((5313, 5320), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5318, 5320), True, 'from timeit import default_timer as timer\n'), ((12760, 12870), 'learners.uniform_quantization.utils.UniformQuantization', 'UniformQuantization', (['self.sess_train', 'FLAGS.uql_bucket_size', 'FLAGS.uql_use_buckets', 'FLAGS.uql_bucket_type'], {}), '(self.sess_train, FLAGS.uql_bucket_size, FLAGS.\n uql_use_buckets, FLAGS.uql_bucket_type)\n', (12779, 12870), False, 'from learners.uniform_quantization.utils import UniformQuantization\n'), ((13444, 13531), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': "[self.statistics['nb_matmuls']]", 'name': '"""w_bit_list"""'}), "(tf.int64, shape=[self.statistics['nb_matmuls']], name=\n 'w_bit_list')\n", (13458, 13531), True, 'import tensorflow as tf\n'), ((13566, 13657), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': "[self.statistics['nb_activations']]", 'name': '"""a_bit_list"""'}), "(tf.int64, shape=[self.statistics['nb_activations']], name=\n 'a_bit_list')\n", (13580, 13657), True, 'import tensorflow as tf\n'), ((14288, 14397), 'learners.uniform_quantization.utils.UniformQuantization', 'UniformQuantization', (['self.sess_eval', 'FLAGS.uql_bucket_size', 'FLAGS.uql_use_buckets', 'FLAGS.uql_bucket_type'], {}), '(self.sess_eval, FLAGS.uql_bucket_size, FLAGS.\n uql_use_buckets, FLAGS.uql_bucket_type)\n', (14307, 14397), False, 'from learners.uniform_quantization.utils import UniformQuantization\n'), ((15143, 15230), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': "[self.statistics['nb_matmuls']]", 'name': '"""w_bit_list"""'}), "(tf.int64, shape=[self.statistics['nb_matmuls']], name=\n 'w_bit_list')\n", (15157, 15230), True, 'import tensorflow as tf\n'), ((15264, 15355), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': "[self.statistics['nb_activations']]", 'name': '"""a_bit_list"""'}), "(tf.int64, shape=[self.statistics['nb_activations']], name=\n 'a_bit_list')\n", (15278, 15355), True, 'import tensorflow as tf\n'), ((16128, 16196), 'tensorflow.logging.info', 'tf.logging.info', (["('quantized model saved to ' + save_quant_model_path)"], {}), "('quantized model saved to ' + save_quant_model_path)\n", (16143, 16196), True, 'import tensorflow as tf\n'), ((16684, 16735), 'tensorflow.logging.info', 'tf.logging.info', (["('model restored from ' + save_path)"], {}), "('model restored from ' + save_path)\n", (16699, 16735), True, 'import tensorflow as tf\n'), ((18009, 18016), 'timeit.default_timer', 'timer', ([], {}), '()\n', (18014, 18016), True, 'from timeit import default_timer as timer\n'), ((18305, 18560), 'tensorflow.logging.info', 'tf.logging.info', (["('bucket storage: %d bit / %.3f kb | weight storage: %d bit / %.3f kb | ratio: %.3f'\n % (bucket_storage, bucket_storage / (8.0 * 1024.0), weight_storage, \n weight_storage / (8.0 * 1024.0), bucket_storage * 1.0 / weight_storage))"], {}), "(\n 'bucket storage: %d bit / %.3f kb | weight storage: %d bit / %.3f kb | ratio: %.3f'\n % (bucket_storage, bucket_storage / (8.0 * 1024.0), weight_storage, \n weight_storage / (8.0 * 1024.0), bucket_storage * 1.0 / weight_storage))\n", (18320, 18560), True, 'import tensorflow as tf\n'), ((2465, 2475), 'utils.multi_gpu_wrapper.MultiGpuWrapper.size', 'mgw.size', ([], {}), '()\n', (2473, 2475), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((2557, 2567), 'utils.multi_gpu_wrapper.MultiGpuWrapper.size', 'mgw.size', ([], {}), '()\n', (2565, 2567), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((3836, 3894), 'learners.distillation_helper.DistillationHelper', 'DistillationHelper', (['sm_writer', 'model_helper', 'self.mpi_comm'], {}), '(sm_writer, model_helper, self.mpi_comm)\n', (3854, 3894), False, 'from learners.distillation_helper import DistillationHelper\n'), ((7482, 7498), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (7496, 7498), True, 'import tensorflow as tf\n'), ((7635, 7660), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (7645, 7660), True, 'import tensorflow as tf\n'), ((9718, 9838), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['self.ft_step', '[i for i in bnds]', '[(init_lr * decay_rate) for decay_rate in decay_rates]'], {}), '(self.ft_step, [i for i in bnds], [(init_lr *\n decay_rate) for decay_rate in decay_rates])\n', (9745, 9838), True, 'import tensorflow as tf\n'), ((10015, 10061), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lrn_rate'}), '(learning_rate=lrn_rate)\n', (10037, 10061), True, 'import tensorflow as tf\n'), ((10465, 10487), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (10485, 10487), True, 'import tensorflow as tf\n'), ((10802, 10835), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10833, 10835), True, 'import tensorflow as tf\n'), ((10955, 10980), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.vars'], {}), '(self.vars)\n', (10969, 10980), True, 'import tensorflow as tf\n'), ((11134, 11150), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (11148, 11150), True, 'import tensorflow as tf\n'), ((11286, 11311), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (11296, 11311), True, 'import tensorflow as tf\n'), ((16350, 16376), 'os.path.dirname', 'os.path.dirname', (['save_path'], {}), '(save_path)\n', (16365, 16376), False, 'import os\n'), ((16395, 16415), 'os.listdir', 'os.listdir', (['save_dir'], {}), '(save_dir)\n', (16405, 16415), False, 'import os\n'), ((17160, 17170), 'utils.multi_gpu_wrapper.MultiGpuWrapper.size', 'mgw.size', ([], {}), '()\n', (17168, 17170), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((17392, 17638), 'tensorflow.logging.info', 'tf.logging.info', (["('iter #%d: lr = %e | dst_loss = %.4f | model_loss = %.4f | loss = %.4f | acc_top1 = %.4f | acc_top5 = %.4f | speed = %.2f pics / sec'\n % (idx_iter + 1, lrn_rate, dst_loss, model_loss, loss, acc_top1,\n acc_top5, speed))"], {}), "(\n 'iter #%d: lr = %e | dst_loss = %.4f | model_loss = %.4f | loss = %.4f | acc_top1 = %.4f | acc_top5 = %.4f | speed = %.2f pics / sec'\n % (idx_iter + 1, lrn_rate, dst_loss, model_loss, loss, acc_top1,\n acc_top5, speed))\n", (17407, 17638), True, 'import tensorflow as tf\n'), ((17780, 17994), 'tensorflow.logging.info', 'tf.logging.info', (["('iter #%d: lr = %e | model_loss = %.4f | loss = %.4f | acc_top1 = %.4f | acc_top5 = %.4f | speed = %.2f pics / sec'\n % (idx_iter + 1, lrn_rate, model_loss, loss, acc_top1, acc_top5, speed))"], {}), "(\n 'iter #%d: lr = %e | model_loss = %.4f | loss = %.4f | acc_top1 = %.4f | acc_top5 = %.4f | speed = %.2f pics / sec'\n % (idx_iter + 1, lrn_rate, model_loss, loss, acc_top1, acc_top5, speed))\n", (17795, 17994), True, 'import tensorflow as tf\n'), ((7701, 7735), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.data_scope'], {}), '(self.data_scope)\n', (7718, 7735), True, 'import tensorflow as tf\n'), ((8130, 8186), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.model_scope'], {'reuse': 'tf.AUTO_REUSE'}), '(self.model_scope, reuse=tf.AUTO_REUSE)\n', (8147, 8186), True, 'import tensorflow as tf\n'), ((9209, 9252), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""model_loss"""', 'model_loss'], {}), "('model_loss', model_loss)\n", (9226, 9252), True, 'import tensorflow as tf\n'), ((9261, 9292), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (9278, 9292), True, 'import tensorflow as tf\n'), ((9301, 9340), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""acc_top1"""', 'acc_top1'], {}), "('acc_top1', acc_top1)\n", (9318, 9340), True, 'import tensorflow as tf\n'), ((9349, 9388), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""acc_top5"""', 'acc_top5'], {}), "('acc_top5', acc_top5)\n", (9366, 9388), True, 'import tensorflow as tf\n'), ((9417, 9442), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.vars'], {}), '(self.vars)\n', (9431, 9442), True, 'import tensorflow as tf\n'), ((9467, 9542), 'tensorflow.get_variable', 'tf.get_variable', (['"""finetune_step"""'], {'shape': '[]', 'dtype': 'tf.int32', 'trainable': '(False)'}), "('finetune_step', shape=[], dtype=tf.int32, trainable=False)\n", (9482, 9542), True, 'import tensorflow as tf\n'), ((10113, 10148), 'utils.multi_gpu_wrapper.MultiGpuWrapper.DistributedOptimizer', 'mgw.DistributedOptimizer', (['optimizer'], {}), '(optimizer)\n', (10137, 10148), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((10308, 10348), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['self.update_ops'], {}), '(self.update_ops)\n', (10331, 10348), True, 'import tensorflow as tf\n'), ((10745, 10775), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (10756, 10775), True, 'import tensorflow as tf\n'), ((10862, 10895), 'utils.multi_gpu_wrapper.MultiGpuWrapper.broadcast_global_variables', 'mgw.broadcast_global_variables', (['(0)'], {}), '(0)\n', (10892, 10895), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((11352, 11386), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.data_scope'], {}), '(self.data_scope)\n', (11369, 11386), True, 'import tensorflow as tf\n'), ((11813, 11869), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.model_scope'], {'reuse': 'tf.AUTO_REUSE'}), '(self.model_scope, reuse=tf.AUTO_REUSE)\n', (11830, 11869), True, 'import tensorflow as tf\n'), ((12620, 12645), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.vars'], {}), '(self.vars)\n', (12634, 12645), True, 'import tensorflow as tf\n'), ((16299, 16331), 'os.path.dirname', 'os.path.dirname', (['FLAGS.save_path'], {}), '(FLAGS.save_path)\n', (16314, 16331), False, 'import os\n'), ((16573, 16621), 'os.path.dirname', 'os.path.dirname', (['FLAGS.uql_save_quant_model_path'], {}), '(FLAGS.uql_save_quant_model_path)\n', (16588, 16621), False, 'import os\n'), ((17095, 17102), 'timeit.default_timer', 'timer', ([], {}), '()\n', (17100, 17102), True, 'from timeit import default_timer as timer\n'), ((7027, 7043), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (7035, 7043), True, 'import numpy as np\n'), ((7097, 7117), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (7105, 7117), True, 'import numpy as np\n'), ((7415, 7425), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (7423, 7425), True, 'import tensorflow as tf\n'), ((7550, 7566), 'utils.multi_gpu_wrapper.MultiGpuWrapper.local_rank', 'mgw.local_rank', ([], {}), '()\n', (7564, 7566), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((9161, 9200), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""dst_loss"""', 'dst_loss'], {}), "('dst_loss', dst_loss)\n", (9178, 9200), True, 'import tensorflow as tf\n'), ((11017, 11027), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (11025, 11027), True, 'import tensorflow as tf\n'), ((11202, 11218), 'utils.multi_gpu_wrapper.MultiGpuWrapper.local_rank', 'mgw.local_rank', ([], {}), '()\n', (11216, 11218), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((8798, 8814), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (8809, 8814), True, 'import tensorflow as tf\n'), ((12174, 12190), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (12185, 12190), True, 'import tensorflow as tf\n'), ((8502, 8521), 'tensorflow.reshape', 'tf.reshape', (['v', '[-1]'], {}), '(v, [-1])\n', (8512, 8521), True, 'import tensorflow as tf\n')] |
#
# Copyright 2019-2021 <NAME>
# 2020-2021 <NAME>
# 2019 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# The DI file format is described in detail here:
# http://www.physics.arizona.edu/~smanne/DI/software/fileformats.html
#
import re
from datetime import datetime
import numpy as np
from ..Exceptions import MetadataAlreadyFixedByFile
from ..UniformLineScanAndTopography import Topography
from ..UnitConversion import get_unit_conversion_factor, length_units, mangle_length_unit_utf8
from .Reader import ReaderBase, ChannelInfo
###
class DIReader(ReaderBase):
_format = 'di'
_name = 'Bruker Dimension; Veeco/Digital Instruments Nanoscope'
_description = '''
Digitial Instruments Nanoscope files typically have a three-digit number as
the file extension (.001, .002, .003, ...). Newer versions of this file format
have th extension .spm. This format contains information on the physical size
of the topography map as well as its units. The reader supports V4.3 and later
version of the format.
'''
def __init__(self, fobj):
"""
Load Digital Instrument's Nanoscope files.
Arguments
---------
fobj : filename or file object
File or data stream to open.
"""
self._fobj = fobj
close_file = False
if not hasattr(fobj, 'read'):
fobj = open(fobj, 'rb')
close_file = True
try:
parameters = []
section_name = None
section_dict = {}
L = fobj.readline().decode('latin-1').strip()
while L and L.lower() != r'\*file list end':
if L.startswith('\\*'):
if section_name is not None:
parameters += [(section_name, section_dict)]
new_section_name = L[2:].lower()
if section_name is None:
if new_section_name != 'file list':
raise IOError("Header must start with the "
"'File list' section.")
section_name = new_section_name
section_dict = {}
elif L.startswith('\\'):
if section_name is None:
raise IOError('Encountered key before section '
'header.')
s = L[1:].split(': ', 1)
try:
key, value = s
except ValueError:
key, = s
value = ''
section_dict[key.lower()] = value.strip()
else:
raise IOError(f"Header line '{L}' does not start with a slash.")
L = fobj.readline().decode('latin-1').strip()
if section_name is None:
raise IOError('No sections found in header.')
parameters += [(section_name, section_dict)]
self._channels = []
self._offsets = []
scanner = {}
equipment = {}
info = {}
for n, p in parameters:
if n == 'file list':
if 'date' in p:
info['acquisition_time'] = str(datetime.strptime(p['date'], '%I:%M:%S %p %a %b %d %Y'))
elif n == 'scanner list' or n == 'ciao scan list':
scanner.update(p)
elif n == 'equipment list':
equipment.update(p)
elif n == 'ciao image list':
image_data_key = re.match(r'^S \[(.*?)\] ',
p['@2:image data']).group(1)
nx = int(p['samps/line'])
ny = int(p['number of lines'])
s = p['scan size'].split(' ', 2)
sx = float(s[0])
sy = float(s[1])
xy_unit = mangle_length_unit_utf8(s[2])
offset = int(p['data offset'])
self._offsets.append(offset)
length = int(p['data length'])
elsize = int(p['bytes/pixel'])
binary_scale = 1
info['bytes_per_pixel'] = elsize
if elsize == 4:
binary_scale = 1 / 65536 # Rescale 32-bit integer to a 16-bit range
elif elsize != 2:
raise IOError(f"Don't know how to handle {elsize} bytes per pixel data.")
if nx * ny * elsize != length:
raise IOError(f'File reports a data block of length {length}, but computing the size of the '
f'data block from the number of grid points and the per-pixel storage yields '
f'a value of {nx * ny * elsize}.')
scale_re = re.match(
r'^V \[(.*?)\] \(([0-9\.]+) (.*)\/LSB\) (.*) '
r'(.*)', p['@2:z scale'])
quantity = scale_re.group(1).lower()
hard_scale = float(scale_re.group(4)) / 65536
hard_unit = scale_re.group(5)
s = scanner['@' + quantity].split()
if s[0] != 'V' or len(s) < 2:
raise IOError('Malformed Nanoscope DI file.')
soft_scale = float(s[1])
height_unit = None
hard_to_soft = 1.0
if len(s) > 2:
# Check units
height_unit, soft_unit = s[2].split('/')
hard_to_soft = get_unit_conversion_factor(hard_unit,
soft_unit)
if hard_to_soft is None:
raise RuntimeError(
"Units for hard (={}) and soft (={}) "
"scale differ for '{}'. Don't know how "
"to handle this.".format(hard_unit,
soft_unit,
image_data_key))
if height_unit in length_units:
height_unit = mangle_length_unit_utf8(height_unit)
if xy_unit != height_unit:
fac = get_unit_conversion_factor(xy_unit,
height_unit)
sx *= fac
sy *= fac
xy_unit = height_unit
unit = height_unit
else:
unit = (xy_unit, height_unit)
height_scale_factor = hard_scale * hard_to_soft * soft_scale * binary_scale
if 'microscope' in equipment:
info['instrument'] = {'name': equipment['microscope']}
elif 'description' in equipment:
info['instrument'] = {'name': equipment['description']}
channel = ChannelInfo(self,
len(self._channels),
name=image_data_key,
dim=2,
nb_grid_pts=(nx, ny),
physical_sizes=(sx, sy),
height_scale_factor=height_scale_factor,
periodic=False,
unit=unit,
info=info)
self._channels.append(channel)
finally:
if close_file:
fobj.close()
@property
def channels(self):
return self._channels
def topography(self, channel_index=None, physical_sizes=None,
height_scale_factor=None, unit=None, info={}, periodic=False,
subdomain_locations=None, nb_subdomain_grid_pts=None):
if channel_index is None:
channel_index = self._default_channel_index
if subdomain_locations is not None or \
nb_subdomain_grid_pts is not None:
raise RuntimeError(
'This reader does not support MPI parallelization.')
close_file = False
if not hasattr(self._fobj, 'read'):
fobj = open(self._fobj, 'rb')
close_file = True
else:
fobj = self._fobj
channel = self._channels[channel_index]
if unit is not None:
raise MetadataAlreadyFixedByFile('unit')
sx, sy = self._check_physical_sizes(physical_sizes,
channel.physical_sizes)
nx, ny = channel.nb_grid_pts
offset = self._offsets[channel_index]
if channel.info['bytes_per_pixel'] == 2:
dtype = np.dtype('<i2')
elif channel.info['bytes_per_pixel'] == 4:
dtype = np.dtype('<i4')
else:
raise IOError(f"Don't know how to handle {info['bytes_per_pixel']} bytes per pixel data.")
###################################
fobj.seek(offset)
rawdata = fobj.read(nx * ny * dtype.itemsize)
unscaleddata = np.frombuffer(rawdata, count=nx * ny, dtype=dtype).reshape(nx, ny)
# internal information from file
_info = dict(data_source=channel.name)
_info.update(info)
if 'acquisition_time' in channel.info:
_info['acquisition_time'] = channel.info['acquisition_time']
if 'instrument' in channel.info:
try:
# This can be a nested dictionary!
_info['instrument'].update(channel.info['instrument'])
except KeyError:
_info['instrument'] = channel.info['instrument']
# it is not allowed to provide extra `physical_sizes` here:
if physical_sizes is not None:
raise MetadataAlreadyFixedByFile('physical_sizes')
# the orientation of the heights is modified in order to match
# the image of gwyddion when plotted with imshow(t.heights().T)
# or pcolormesh(t.heights().T) for origin in lower left and
# with inverted y axis (cartesian coordinate system)
surface = Topography(np.fliplr(unscaleddata.T), physical_sizes=(sx, sy), unit=channel.unit, info=_info,
periodic=periodic)
if height_scale_factor is None:
height_scale_factor = channel.height_scale_factor
elif channel.height_scale_factor is not None:
raise MetadataAlreadyFixedByFile('height_scale_factor')
if height_scale_factor is not None:
surface = surface.scale(height_scale_factor)
if close_file:
fobj.close()
return surface
channels.__doc__ = ReaderBase.channels.__doc__
topography.__doc__ = ReaderBase.topography.__doc__
| [
"numpy.frombuffer",
"numpy.dtype",
"re.match",
"numpy.fliplr",
"datetime.datetime.strptime"
] | [((10193, 10208), 'numpy.dtype', 'np.dtype', (['"""<i2"""'], {}), "('<i2')\n", (10201, 10208), True, 'import numpy as np\n'), ((11613, 11638), 'numpy.fliplr', 'np.fliplr', (['unscaleddata.T'], {}), '(unscaleddata.T)\n', (11622, 11638), True, 'import numpy as np\n'), ((10280, 10295), 'numpy.dtype', 'np.dtype', (['"""<i4"""'], {}), "('<i4')\n", (10288, 10295), True, 'import numpy as np\n'), ((10562, 10612), 'numpy.frombuffer', 'np.frombuffer', (['rawdata'], {'count': '(nx * ny)', 'dtype': 'dtype'}), '(rawdata, count=nx * ny, dtype=dtype)\n', (10575, 10612), True, 'import numpy as np\n'), ((4337, 4392), 'datetime.datetime.strptime', 'datetime.strptime', (["p['date']", '"""%I:%M:%S %p %a %b %d %Y"""'], {}), "(p['date'], '%I:%M:%S %p %a %b %d %Y')\n", (4354, 4392), False, 'from datetime import datetime\n'), ((6003, 6090), 're.match', 're.match', (['"""^V \\\\[(.*?)\\\\] \\\\(([0-9\\\\.]+) (.*)\\\\/LSB\\\\) (.*) (.*)"""', "p['@2:z scale']"], {}), "('^V \\\\[(.*?)\\\\] \\\\(([0-9\\\\.]+) (.*)\\\\/LSB\\\\) (.*) (.*)', p[\n '@2:z scale'])\n", (6011, 6090), False, 'import re\n'), ((4665, 4712), 're.match', 're.match', (['"""^S \\\\[(.*?)\\\\] """', "p['@2:image data']"], {}), "('^S \\\\[(.*?)\\\\] ', p['@2:image data'])\n", (4673, 4712), False, 'import re\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs centralied training and personalization on EMNIST."""
import collections
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from basisnet.personalization.centralized_emnist import data_processing
from basisnet.personalization.centralized_emnist import emnist_models
from basisnet.personalization.centralized_emnist import training_specs
# Training hyperparameters
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
flags.DEFINE_float('client_learning_rate', 1e-3,
'learning rate for client training.')
# Training loop configuration
flags.DEFINE_string(
'experiment_name', 'test',
'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.mark_flag_as_required('experiment_name')
flags.DEFINE_string('root_output_dir', '/tmp/basisnet/centralized_emnist',
'Root directory for writing experiment output.')
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_integer(
'rounds_per_eval', 100,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer('rounds_per_checkpoint', 100,
'How often to checkpoint the global model.')
flags.DEFINE_string('modeldir', '', 'The dir for saving checkpoints and logs.')
flags.DEFINE_bool('debug', False, 'If true, reduce batch size and do not use'
'tf_function.')
# For personalization
flags.DEFINE_integer(
'fine_tune_epoch', 20, 'number of epochs for fine-tuning'
'to use from test set for per-round validation.')
flags.DEFINE_integer('num_basis', 4,
'number of basis to learn, 1 = original model.')
flags.DEFINE_float(
'num_filters_expand', 1,
'number of expanding Conv channel size.')
flags.DEFINE_float(
'temp', 1.0, 'temperature for softmax of generating the client embedding.')
_SUPPORTED_EMBEDDING_TYPE = ['lookup']
flags.DEFINE_enum('embedding_type', 'lookup', _SUPPORTED_EMBEDDING_TYPE,
'The type of the client embedding.')
flags.DEFINE_boolean('run_sweep', False, 'Whether to'
' run hyper parameter tunning with sweep.')
flags.DEFINE_boolean('digit_only', False, 'digit_only for emnist')
flags.DEFINE_boolean('global_embedding', False,
'train with global_embedding only')
flags.DEFINE_boolean('with_dist', False, 'use label distribution as the inputs')
FLAGS = flags.FLAGS
def main(argv):
tf.compat.v2.enable_v2_behavior()
# necessary to enable hyperparameter explorations.
# xm.setup_work_unit()
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=FLAGS.digit_only)
if 'test' in FLAGS.experiment_name:
logging.info('Test run ...')
num_client = 20
num_test_client = 20
epochs = 1
else:
num_client = 2500
num_test_client = 900
epochs = 40
train_batch_size = 256
cliend_encodings = {}
for i, idx in enumerate(emnist_train.client_ids):
cliend_encodings[idx] = i
all_client_ids = np.array(emnist_train.client_ids)
np.random.shuffle(all_client_ids)
train_client_ids = all_client_ids[:num_client]
test_client_ids = all_client_ids[num_client:num_client + num_test_client]
train_tuple, _, test_tuple = data_processing.parse_data(
emnist_train,
emnist_test,
train_client_ids,
cliend_encodings,
with_dist=FLAGS.with_dist)
ft_train_tuple, ft_sp_train_tuple, ft_test_tuple = data_processing.parse_data(
emnist_train,
emnist_test,
test_client_ids,
cliend_encodings,
with_dist=FLAGS.with_dist)
dataset = data_processing.pack_dataset(
train_tuple, mode='train', with_dist=FLAGS.with_dist)
val_dataset = data_processing.pack_dataset(
test_tuple, mode='test', with_dist=FLAGS.with_dist)
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
task_spec = training_specs.TaskSpec(
fine_tune_epoch=FLAGS.fine_tune_epoch,
num_basis=FLAGS.num_basis,
num_filters_expand=FLAGS.num_filters_expand,
temp=FLAGS.temp,
embedding_type=FLAGS.embedding_type)
model_builder = emnist_models.get_model_builder(
task_spec,
only_digits=FLAGS.digit_only,
batch_size=train_batch_size,
with_dist=FLAGS.with_dist,
global_embedding_only=FLAGS.global_embedding)
basisnet = model_builder()
basisnet.summary()
learning_rate = FLAGS.client_learning_rate
logging.info(learning_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
basisnet.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
basisnet.fit(
dataset, epochs=epochs, validation_data=val_dataset, verbose=2)
results = basisnet.evaluate(val_dataset)
acc = results[1]
logging.info(acc)
checkpoint_path = FLAGS.modeldir + 'emnist_basis_%d_lr%f_%s.ckpt' % (
FLAGS.num_basis, FLAGS.client_learning_rate, FLAGS.experiment_name)
basisnet.save_weights(checkpoint_path)
# Personalization
per_batch_size = 20
def eval_per_acc(preds, dataset):
pred_cls = np.argmax(preds, -1)
dataset = dataset.unbatch()
per_acc_dict = collections.OrderedDict()
for y_hat, (x, y)in zip(pred_cls, dataset):
clnt_id = str(x['input_id'])
if clnt_id not in per_acc_dict:
per_acc_dict[clnt_id] = {'cnt': 0, 'correct': 0}
per_acc_dict[clnt_id]['cnt'] += 1
per_acc_dict[clnt_id]['correct'] += int(y_hat == y.numpy())
per_acc_list = [d['correct'] / d['cnt'] for d in per_acc_dict.values()]
return per_acc_list
def finetuning(mode,
ft_dataset,
ft_dataset_test,
train_size=1,
fix_basis=True,
global_exp=False):
logging.info('==============')
logging.info(mode)
logging.info(train_size)
logging.info('Bases fixed' if fix_basis else 'Bases not fixed')
logging.info(
'Global experiment' if global_exp else 'Personalized experiment')
logging.info('==============')
per_model_builder = emnist_models.get_model_builder(
task_spec,
only_digits=FLAGS.digit_only,
batch_size=per_batch_size,
with_dist=FLAGS.with_dist,
global_embedding_only=global_exp)
local_basisnet = per_model_builder()
local_basisnet.summary()
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
local_basisnet.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
local_basisnet.set_weights(basisnet.get_weights())
if fix_basis:
if FLAGS.global_embedding or FLAGS.num_basis == 1:
# local fine-tune the whole network
pass
else:
# only fine-tune the embedding
logging.info('Fix basis')
for layer in local_basisnet.layers:
if layer.name != 'embedding':
layer.trainable = False
preds = local_basisnet.predict(ft_dataset_test)
per_acc_list = eval_per_acc(preds, ft_dataset_test)
logging.info('Before fine-tuning')
logging.info(np.nanmean(per_acc_list))
logging.info(per_acc_list)
for ep in range(FLAGS.fine_tune_epoch):
local_basisnet.fit(
ft_dataset, epochs=1, verbose=0, validation_data=ft_dataset_test)
preds = local_basisnet.predict(ft_dataset_test)
post_acc_list = eval_per_acc(preds, ft_dataset_test)
logging.info('Fine-tune epoch%d', ep)
logging.info(np.nanmean(post_acc_list))
logging.info(post_acc_list)
return local_basisnet
ft_dataset = data_processing.pack_dataset(
ft_train_tuple,
mode='train',
batch_size=per_batch_size,
with_dist=FLAGS.with_dist)
sp_ft_dataset = data_processing.pack_dataset(
ft_sp_train_tuple,
mode='train',
batch_size=per_batch_size,
with_dist=FLAGS.with_dist)
ft_val_dataset = data_processing.pack_dataset(
ft_test_tuple,
mode='test',
batch_size=per_batch_size,
with_dist=FLAGS.with_dist)
# Not fix bases
finetuning(
mode='test',
ft_dataset=ft_dataset,
ft_dataset_test=ft_val_dataset,
fix_basis=False)
finetuning(
mode='test',
ft_dataset=sp_ft_dataset,
ft_dataset_test=ft_val_dataset,
fix_basis=False,
train_size=0.1)
if FLAGS.num_basis == 1:
return
# Fix bases
finetuning(mode='test', ft_dataset=ft_dataset, ft_dataset_test=ft_val_dataset)
finetuning(
mode='test',
ft_dataset=sp_ft_dataset,
ft_dataset_test=ft_val_dataset,
train_size=0.1)
# Global Acc
local_basisnet = finetuning(
mode='test',
ft_dataset=ft_dataset,
ft_dataset_test=ft_val_dataset,
global_exp=True)
finetuning(
mode='test',
ft_dataset=sp_ft_dataset,
ft_dataset_test=ft_val_dataset,
train_size=0.1,
global_exp=True)
global_embedding = local_basisnet.get_layer('embedding').get_weights()[0][0]
new_embedding = np.tile(global_embedding, (3402, 1))
basisnet.get_layer('embedding').set_weights([new_embedding])
finetuning(mode='test', ft_dataset=ft_dataset, ft_dataset_test=ft_val_dataset)
finetuning(
mode='test',
ft_dataset=sp_ft_dataset,
ft_dataset_test=ft_val_dataset,
train_size=0.1)
if __name__ == '__main__':
app.run(main)
| [
"numpy.argmax",
"basisnet.personalization.centralized_emnist.emnist_models.get_model_builder",
"absl.logging.info",
"absl.flags.DEFINE_boolean",
"numpy.tile",
"numpy.nanmean",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"absl.flags.DEFINE_bool",
"absl.flags.mark_flag_as_required",
"ab... | [((1079, 1173), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""client_datasets_random_seed"""', '(1)', '"""Random seed for client sampling."""'], {}), "('client_datasets_random_seed', 1,\n 'Random seed for client sampling.')\n", (1099, 1173), False, 'from absl import flags\n'), ((1191, 1282), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""client_learning_rate"""', '(0.001)', '"""learning rate for client training."""'], {}), "('client_learning_rate', 0.001,\n 'learning rate for client training.')\n", (1209, 1282), False, 'from absl import flags\n'), ((1327, 1482), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""experiment_name"""', '"""test"""', '"""The name of this experiment. Will be append to --root_output_dir to separate experiment results."""'], {}), "('experiment_name', 'test',\n 'The name of this experiment. Will be append to --root_output_dir to separate experiment results.'\n )\n", (1346, 1482), False, 'from absl import flags\n'), ((1490, 1536), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""experiment_name"""'], {}), "('experiment_name')\n", (1517, 1536), False, 'from absl import flags\n'), ((1537, 1664), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""root_output_dir"""', '"""/tmp/basisnet/centralized_emnist"""', '"""Root directory for writing experiment output."""'], {}), "('root_output_dir', '/tmp/basisnet/centralized_emnist',\n 'Root directory for writing experiment output.')\n", (1556, 1664), False, 'from absl import flags\n'), ((1681, 1758), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""total_rounds"""', '(200)', '"""Number of total training rounds."""'], {}), "('total_rounds', 200, 'Number of total training rounds.')\n", (1701, 1758), False, 'from absl import flags\n'), ((1759, 1876), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""rounds_per_eval"""', '(100)', '"""How often to evaluate the global model on the validation dataset."""'], {}), "('rounds_per_eval', 100,\n 'How often to evaluate the global model on the validation dataset.')\n", (1779, 1876), False, 'from absl import flags\n'), ((1882, 1981), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""rounds_per_checkpoint"""', '(100)', '"""How often to checkpoint the global model."""'], {}), "('rounds_per_checkpoint', 100,\n 'How often to checkpoint the global model.')\n", (1902, 1981), False, 'from absl import flags\n'), ((2000, 2079), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""modeldir"""', '""""""', '"""The dir for saving checkpoints and logs."""'], {}), "('modeldir', '', 'The dir for saving checkpoints and logs.')\n", (2019, 2079), False, 'from absl import flags\n'), ((2080, 2174), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""debug"""', '(False)', '"""If true, reduce batch size and do not usetf_function."""'], {}), "('debug', False,\n 'If true, reduce batch size and do not usetf_function.')\n", (2097, 2174), False, 'from absl import flags\n'), ((2216, 2350), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""fine_tune_epoch"""', '(20)', '"""number of epochs for fine-tuningto use from test set for per-round validation."""'], {}), "('fine_tune_epoch', 20,\n 'number of epochs for fine-tuningto use from test set for per-round validation.'\n )\n", (2236, 2350), False, 'from absl import flags\n'), ((2355, 2444), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_basis"""', '(4)', '"""number of basis to learn, 1 = original model."""'], {}), "('num_basis', 4,\n 'number of basis to learn, 1 = original model.')\n", (2375, 2444), False, 'from absl import flags\n'), ((2463, 2552), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""num_filters_expand"""', '(1)', '"""number of expanding Conv channel size."""'], {}), "('num_filters_expand', 1,\n 'number of expanding Conv channel size.')\n", (2481, 2552), False, 'from absl import flags\n'), ((2559, 2657), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""temp"""', '(1.0)', '"""temperature for softmax of generating the client embedding."""'], {}), "('temp', 1.0,\n 'temperature for softmax of generating the client embedding.')\n", (2577, 2657), False, 'from absl import flags\n'), ((2700, 2813), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""embedding_type"""', '"""lookup"""', '_SUPPORTED_EMBEDDING_TYPE', '"""The type of the client embedding."""'], {}), "('embedding_type', 'lookup', _SUPPORTED_EMBEDDING_TYPE,\n 'The type of the client embedding.')\n", (2717, 2813), False, 'from absl import flags\n'), ((2829, 2927), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""run_sweep"""', '(False)', '"""Whether to run hyper parameter tunning with sweep."""'], {}), "('run_sweep', False,\n 'Whether to run hyper parameter tunning with sweep.')\n", (2849, 2927), False, 'from absl import flags\n'), ((2949, 3015), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""digit_only"""', '(False)', '"""digit_only for emnist"""'], {}), "('digit_only', False, 'digit_only for emnist')\n", (2969, 3015), False, 'from absl import flags\n'), ((3016, 3103), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""global_embedding"""', '(False)', '"""train with global_embedding only"""'], {}), "('global_embedding', False,\n 'train with global_embedding only')\n", (3036, 3103), False, 'from absl import flags\n'), ((3121, 3206), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""with_dist"""', '(False)', '"""use label distribution as the inputs"""'], {}), "('with_dist', False, 'use label distribution as the inputs'\n )\n", (3141, 3206), False, 'from absl import flags\n'), ((3243, 3276), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.compat.v2.enable_v2_behavior', ([], {}), '()\n', (3274, 3276), True, 'import tensorflow as tf\n'), ((3386, 3456), 'tensorflow_federated.simulation.datasets.emnist.load_data', 'tff.simulation.datasets.emnist.load_data', ([], {'only_digits': 'FLAGS.digit_only'}), '(only_digits=FLAGS.digit_only)\n', (3426, 3456), True, 'import tensorflow_federated as tff\n'), ((3821, 3854), 'numpy.array', 'np.array', (['emnist_train.client_ids'], {}), '(emnist_train.client_ids)\n', (3829, 3854), True, 'import numpy as np\n'), ((3857, 3890), 'numpy.random.shuffle', 'np.random.shuffle', (['all_client_ids'], {}), '(all_client_ids)\n', (3874, 3890), True, 'import numpy as np\n'), ((4049, 4169), 'basisnet.personalization.centralized_emnist.data_processing.parse_data', 'data_processing.parse_data', (['emnist_train', 'emnist_test', 'train_client_ids', 'cliend_encodings'], {'with_dist': 'FLAGS.with_dist'}), '(emnist_train, emnist_test, train_client_ids,\n cliend_encodings, with_dist=FLAGS.with_dist)\n', (4075, 4169), False, 'from basisnet.personalization.centralized_emnist import data_processing\n'), ((4250, 4369), 'basisnet.personalization.centralized_emnist.data_processing.parse_data', 'data_processing.parse_data', (['emnist_train', 'emnist_test', 'test_client_ids', 'cliend_encodings'], {'with_dist': 'FLAGS.with_dist'}), '(emnist_train, emnist_test, test_client_ids,\n cliend_encodings, with_dist=FLAGS.with_dist)\n', (4276, 4369), False, 'from basisnet.personalization.centralized_emnist import data_processing\n'), ((4410, 4497), 'basisnet.personalization.centralized_emnist.data_processing.pack_dataset', 'data_processing.pack_dataset', (['train_tuple'], {'mode': '"""train"""', 'with_dist': 'FLAGS.with_dist'}), "(train_tuple, mode='train', with_dist=FLAGS.\n with_dist)\n", (4438, 4497), False, 'from basisnet.personalization.centralized_emnist import data_processing\n'), ((4516, 4601), 'basisnet.personalization.centralized_emnist.data_processing.pack_dataset', 'data_processing.pack_dataset', (['test_tuple'], {'mode': '"""test"""', 'with_dist': 'FLAGS.with_dist'}), "(test_tuple, mode='test', with_dist=FLAGS.with_dist\n )\n", (4544, 4601), False, 'from basisnet.personalization.centralized_emnist import data_processing\n'), ((4753, 4951), 'basisnet.personalization.centralized_emnist.training_specs.TaskSpec', 'training_specs.TaskSpec', ([], {'fine_tune_epoch': 'FLAGS.fine_tune_epoch', 'num_basis': 'FLAGS.num_basis', 'num_filters_expand': 'FLAGS.num_filters_expand', 'temp': 'FLAGS.temp', 'embedding_type': 'FLAGS.embedding_type'}), '(fine_tune_epoch=FLAGS.fine_tune_epoch, num_basis=\n FLAGS.num_basis, num_filters_expand=FLAGS.num_filters_expand, temp=\n FLAGS.temp, embedding_type=FLAGS.embedding_type)\n', (4776, 4951), False, 'from basisnet.personalization.centralized_emnist import training_specs\n'), ((4992, 5174), 'basisnet.personalization.centralized_emnist.emnist_models.get_model_builder', 'emnist_models.get_model_builder', (['task_spec'], {'only_digits': 'FLAGS.digit_only', 'batch_size': 'train_batch_size', 'with_dist': 'FLAGS.with_dist', 'global_embedding_only': 'FLAGS.global_embedding'}), '(task_spec, only_digits=FLAGS.digit_only,\n batch_size=train_batch_size, with_dist=FLAGS.with_dist,\n global_embedding_only=FLAGS.global_embedding)\n', (5023, 5174), False, 'from basisnet.personalization.centralized_emnist import emnist_models\n'), ((5297, 5324), 'absl.logging.info', 'logging.info', (['learning_rate'], {}), '(learning_rate)\n', (5309, 5324), False, 'from absl import logging\n'), ((5339, 5392), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (5363, 5392), True, 'import tensorflow as tf\n'), ((5696, 5713), 'absl.logging.info', 'logging.info', (['acc'], {}), '(acc)\n', (5708, 5713), False, 'from absl import logging\n'), ((8518, 8635), 'basisnet.personalization.centralized_emnist.data_processing.pack_dataset', 'data_processing.pack_dataset', (['ft_train_tuple'], {'mode': '"""train"""', 'batch_size': 'per_batch_size', 'with_dist': 'FLAGS.with_dist'}), "(ft_train_tuple, mode='train', batch_size=\n per_batch_size, with_dist=FLAGS.with_dist)\n", (8546, 8635), False, 'from basisnet.personalization.centralized_emnist import data_processing\n'), ((8674, 8794), 'basisnet.personalization.centralized_emnist.data_processing.pack_dataset', 'data_processing.pack_dataset', (['ft_sp_train_tuple'], {'mode': '"""train"""', 'batch_size': 'per_batch_size', 'with_dist': 'FLAGS.with_dist'}), "(ft_sp_train_tuple, mode='train', batch_size=\n per_batch_size, with_dist=FLAGS.with_dist)\n", (8702, 8794), False, 'from basisnet.personalization.centralized_emnist import data_processing\n'), ((8834, 8949), 'basisnet.personalization.centralized_emnist.data_processing.pack_dataset', 'data_processing.pack_dataset', (['ft_test_tuple'], {'mode': '"""test"""', 'batch_size': 'per_batch_size', 'with_dist': 'FLAGS.with_dist'}), "(ft_test_tuple, mode='test', batch_size=\n per_batch_size, with_dist=FLAGS.with_dist)\n", (8862, 8949), False, 'from basisnet.personalization.centralized_emnist import data_processing\n'), ((9922, 9958), 'numpy.tile', 'np.tile', (['global_embedding', '(3402, 1)'], {}), '(global_embedding, (3402, 1))\n', (9929, 9958), True, 'import numpy as np\n'), ((10260, 10273), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (10267, 10273), False, 'from absl import app\n'), ((3507, 3535), 'absl.logging.info', 'logging.info', (['"""Test run ..."""'], {}), "('Test run ...')\n", (3519, 3535), False, 'from absl import logging\n'), ((5997, 6017), 'numpy.argmax', 'np.argmax', (['preds', '(-1)'], {}), '(preds, -1)\n', (6006, 6017), True, 'import numpy as np\n'), ((6070, 6095), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (6093, 6095), False, 'import collections\n'), ((6672, 6702), 'absl.logging.info', 'logging.info', (['"""=============="""'], {}), "('==============')\n", (6684, 6702), False, 'from absl import logging\n'), ((6707, 6725), 'absl.logging.info', 'logging.info', (['mode'], {}), '(mode)\n', (6719, 6725), False, 'from absl import logging\n'), ((6730, 6754), 'absl.logging.info', 'logging.info', (['train_size'], {}), '(train_size)\n', (6742, 6754), False, 'from absl import logging\n'), ((6759, 6822), 'absl.logging.info', 'logging.info', (["('Bases fixed' if fix_basis else 'Bases not fixed')"], {}), "('Bases fixed' if fix_basis else 'Bases not fixed')\n", (6771, 6822), False, 'from absl import logging\n'), ((6827, 6905), 'absl.logging.info', 'logging.info', (["('Global experiment' if global_exp else 'Personalized experiment')"], {}), "('Global experiment' if global_exp else 'Personalized experiment')\n", (6839, 6905), False, 'from absl import logging\n'), ((6919, 6949), 'absl.logging.info', 'logging.info', (['"""=============="""'], {}), "('==============')\n", (6931, 6949), False, 'from absl import logging\n'), ((6975, 7143), 'basisnet.personalization.centralized_emnist.emnist_models.get_model_builder', 'emnist_models.get_model_builder', (['task_spec'], {'only_digits': 'FLAGS.digit_only', 'batch_size': 'per_batch_size', 'with_dist': 'FLAGS.with_dist', 'global_embedding_only': 'global_exp'}), '(task_spec, only_digits=FLAGS.digit_only,\n batch_size=per_batch_size, with_dist=FLAGS.with_dist,\n global_embedding_only=global_exp)\n', (7006, 7143), False, 'from basisnet.personalization.centralized_emnist import emnist_models\n'), ((7265, 7311), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (7289, 7311), True, 'import tensorflow as tf\n'), ((7982, 8016), 'absl.logging.info', 'logging.info', (['"""Before fine-tuning"""'], {}), "('Before fine-tuning')\n", (7994, 8016), False, 'from absl import logging\n'), ((8064, 8090), 'absl.logging.info', 'logging.info', (['per_acc_list'], {}), '(per_acc_list)\n', (8076, 8090), False, 'from absl import logging\n'), ((5451, 5515), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (5496, 5515), True, 'import tensorflow as tf\n'), ((8034, 8058), 'numpy.nanmean', 'np.nanmean', (['per_acc_list'], {}), '(per_acc_list)\n', (8044, 8058), True, 'import numpy as np\n'), ((8357, 8394), 'absl.logging.info', 'logging.info', (['"""Fine-tune epoch%d"""', 'ep'], {}), "('Fine-tune epoch%d', ep)\n", (8369, 8394), False, 'from absl import logging\n'), ((8447, 8474), 'absl.logging.info', 'logging.info', (['post_acc_list'], {}), '(post_acc_list)\n', (8459, 8474), False, 'from absl import logging\n'), ((7380, 7444), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (7425, 7444), True, 'import tensorflow as tf\n'), ((7723, 7748), 'absl.logging.info', 'logging.info', (['"""Fix basis"""'], {}), "('Fix basis')\n", (7735, 7748), False, 'from absl import logging\n'), ((8414, 8439), 'numpy.nanmean', 'np.nanmean', (['post_acc_list'], {}), '(post_acc_list)\n', (8424, 8439), True, 'import numpy as np\n')] |
"""
Theory:
morphological transformation are some simple operation based on the image shape.
It needs 2 inputs, one is our original image, second one is called
<structuring element> or <kernel> which describe the nature of the operation.
"""
import cv2
import numpy as np
def org_imge_kernel():
img = cv2.imread('morphological_j.png', 0)
kernel = np.ones((5, 5), np.uint8)
return img, kernel
def show_image(images):
for k,v in images.items():
cv2.imshow(k,v)
cv2.waitKey()
cv2.destroyAllWindows()
def test_erosion():
"""
it enrods away the boundaries of foreground object(always try to keep foreground
in white. The kernel slides through the image. A pixel in the original image,
either 0 or 1) will be considered 1 only if all the pixels under the kenel
is 1, otherwise it is erodes(made to 0)
So what happens it that, all the pixels near boundary will be discarded
depending upon the the size of kernel. So the thickness or size of the
foreground object decreases or simply the white region decreases. It is useful
for removing samll white noises, detach 2 connected objects etc.
"""
img, kernel = org_imge_kernel()
erosion = cv2.erode(img, kernel, iterations=1)
cv2.imshow("erosion", erosion)
cv2.imshow("original", img)
cv2.waitKey()
cv2.destroyAllWindows()
def test_dilation():
"""
It just the opposite of erosion. A pixel is 1 if at least 1 pixel under the
kernel is 1. So it increases the white region in the image or size of foreground
object.
"""
img, kernel = org_imge_kernel()
dilation = cv2.dilate(img, kernel, iterations=1)
cv2.imshow('original', img)
cv2.imshow('dilationed', dilation)
cv2.waitKey()
cv2.destroyAllWindows()
def test_open_close():
"""
Opending: another name of erosion followed by dilation. useful for removing
noise
Closing: reverse of opening, dialation followed by erosion. useful for Closing
small holes inside the foreground objects.
"""
img, kernel = org_imge_kernel()
open_img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
close_img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
cv2.imshow("original", img)
cv2.imshow("open", open_img)
cv2.imshow("close", close_img)
cv2.waitKey()
cv2.destroyAllWindows()
def test_morph_gradient():
"""
It is the difference between dialation and erosion of an image
The result will look like the outline of the object
"""
img, kernel = org_imge_kernel()
gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
show_image({"original":img, "gradient":gradient})
def test_tophat():
"""
It is the difference between input image and opening image.
"""
img, kernel = org_imge_kernel()
tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
show_image({"original": img, "tophat": tophat})
def test_blackhat():
"""
It is the difference between input image and closing image
"""
img, kernel = org_imge_kernel()
blackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)
show_image({"original": img, "blackhat":blackhat})
def test_get_structuring_element():
"""
when you need other shape of kernel other than rectangular (eg, elliptical,
circular). you can use this function: cv2.getStructuringElement(), just pass
the shape and size, you can get desired kernel
"""
print(cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
| [
"cv2.dilate",
"cv2.waitKey",
"cv2.morphologyEx",
"cv2.getStructuringElement",
"cv2.imshow",
"numpy.ones",
"cv2.imread",
"cv2.erode",
"cv2.destroyAllWindows"
] | [((306, 342), 'cv2.imread', 'cv2.imread', (['"""morphological_j.png"""', '(0)'], {}), "('morphological_j.png', 0)\n", (316, 342), False, 'import cv2\n'), ((356, 381), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (363, 381), True, 'import numpy as np\n'), ((490, 503), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (501, 503), False, 'import cv2\n'), ((508, 531), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (529, 531), False, 'import cv2\n'), ((1212, 1248), 'cv2.erode', 'cv2.erode', (['img', 'kernel'], {'iterations': '(1)'}), '(img, kernel, iterations=1)\n', (1221, 1248), False, 'import cv2\n'), ((1253, 1283), 'cv2.imshow', 'cv2.imshow', (['"""erosion"""', 'erosion'], {}), "('erosion', erosion)\n", (1263, 1283), False, 'import cv2\n'), ((1288, 1315), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img'], {}), "('original', img)\n", (1298, 1315), False, 'import cv2\n'), ((1320, 1333), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1331, 1333), False, 'import cv2\n'), ((1338, 1361), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1359, 1361), False, 'import cv2\n'), ((1629, 1666), 'cv2.dilate', 'cv2.dilate', (['img', 'kernel'], {'iterations': '(1)'}), '(img, kernel, iterations=1)\n', (1639, 1666), False, 'import cv2\n'), ((1671, 1698), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img'], {}), "('original', img)\n", (1681, 1698), False, 'import cv2\n'), ((1703, 1737), 'cv2.imshow', 'cv2.imshow', (['"""dilationed"""', 'dilation'], {}), "('dilationed', dilation)\n", (1713, 1737), False, 'import cv2\n'), ((1742, 1755), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1753, 1755), False, 'import cv2\n'), ((1760, 1783), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1781, 1783), False, 'import cv2\n'), ((2096, 2141), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img, cv2.MORPH_OPEN, kernel)\n', (2112, 2141), False, 'import cv2\n'), ((2158, 2204), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(img, cv2.MORPH_CLOSE, kernel)\n', (2174, 2204), False, 'import cv2\n'), ((2209, 2236), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img'], {}), "('original', img)\n", (2219, 2236), False, 'import cv2\n'), ((2241, 2269), 'cv2.imshow', 'cv2.imshow', (['"""open"""', 'open_img'], {}), "('open', open_img)\n", (2251, 2269), False, 'import cv2\n'), ((2274, 2304), 'cv2.imshow', 'cv2.imshow', (['"""close"""', 'close_img'], {}), "('close', close_img)\n", (2284, 2304), False, 'import cv2\n'), ((2309, 2322), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2320, 2322), False, 'import cv2\n'), ((2327, 2350), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2348, 2350), False, 'import cv2\n'), ((2570, 2619), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_GRADIENT', 'kernel'], {}), '(img, cv2.MORPH_GRADIENT, kernel)\n', (2586, 2619), False, 'import cv2\n'), ((2824, 2871), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_TOPHAT', 'kernel'], {}), '(img, cv2.MORPH_TOPHAT, kernel)\n', (2840, 2871), False, 'import cv2\n'), ((3077, 3126), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_BLACKHAT', 'kernel'], {}), '(img, cv2.MORPH_BLACKHAT, kernel)\n', (3093, 3126), False, 'import cv2\n'), ((470, 486), 'cv2.imshow', 'cv2.imshow', (['k', 'v'], {}), '(k, v)\n', (480, 486), False, 'import cv2\n'), ((3458, 3510), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv2.MORPH_ELLIPSE, (5, 5))\n', (3483, 3510), False, 'import cv2\n')] |
import time
import prettytable as pt
import numpy as np
import mxnet as mx
from poi.eval.reid.re_ranking import re_ranking, c_distance_opt
import logging
class ReIDMetric(object):
def __init__(self, pTest):
self.p = pTest
self.gpu = self.p.gpus[0]
self.distmat = None
self.q_pids = None
self.g_pids = None
self.q_camids = None
self.g_camids = None
self.cmc = None
self.mAP = None
self.logger = None
def eval_func(self):
""" Evaluation with market1501 metric. """
tik = time.time()
p = self.p
max_rank = p.max_rank
num_q, num_g = self.distmat.shape
self.logger.info("num_q: {}, num_g: {}".format(num_q, num_g))
if num_g < max_rank:
max_rank = num_g
self.logger.info("Note: number of gallery samples is quite small, "
"got {} as max_rank".format(num_g))
indices = np.argsort(self.distmat, axis=1)
matches = (self.g_pids[indices] == self.q_pids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_AP = []
num_valid_q = 0 # number of valid query
for q_idx in range(num_q):
# get query pid and camid
q_pid = self.q_pids[q_idx]
q_camid = self.q_camids[q_idx]
# remove gallery samples that have the same pid and camid with query
order = indices[q_idx]
remove = (self.g_pids[order] == q_pid) & (self.g_camids[order] == q_camid)
keep = np.invert(remove)
# binary vector, positions with value 1 are correct matches
orig_cmc = matches[q_idx][keep]
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1
# compute average precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery."
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
self.cmc = all_cmc
self.mAP = mAP
tok = time.time()
self.logger.info("eval uses {:.1f}".format(tok - tik))
def parse_results(self, results):
tik = time.time()
p = self.p
dist_type = p.dist_type
q_pids = list()
g_pids = list()
q_camids = list()
g_camids = list()
q_features = list()
g_features = list()
for r in results:
split = r["split"]
if split.startswith("gallery"):
g_pids.append(r["pid"])
g_camids.append(r["cid"])
g_features.append(r["feature"])
elif split.endswith("query"):
q_pids.append(r["pid"])
q_camids.append(r["cid"])
q_features.append(r["feature"])
else:
raise ValueError("No setting for split {}".format(split))
tok1 = time.time()
self.logger.info(
"{} instances in gallery and {} in query.".format(len(g_pids), len(q_pids)))
self.logger.info("collect gallery and query uses {:.1f}".format(tok1 - tik))
self.q_pids = np.asarray(q_pids)
self.g_pids = np.asarray(g_pids)
self.q_camids = np.asarray(q_camids)
self.g_camids = np.asarray(g_camids)
if dist_type == "euclidean":
q_features_np = np.array(q_features, dtype=np.float32)
g_features_np = np.array(g_features, dtype=np.float32)
distmat = c_distance_opt(q_features_np, g_features_np, ctx=mx.gpu(self.gpu),
normalize=False)
self.distmat = distmat
elif dist_type == "cosine":
q_features_np = np.array(q_features)
g_features_np = np.array(g_features)
distmat = c_distance_opt(q_features_np, g_features_np, ctx=mx.gpu(self.gpu),
normalize=True)
self.distmat = distmat
elif dist_type == "reranking":
self.distmat = re_ranking(q_features, g_features, k1=20, k2=6, lambda_value=0.3,
ctx=mx.gpu(self.gpu))
else:
raise ValueError("No setting for dist type {}".format(dist_type))
tok2 = time.time()
self.logger.info("compute dist matrix uses {:.1f}".format(tok2 - tok1))
def process(self, results, logger=None):
self.logger = logger if logger is not None else logging.getLogger()
# parse results into gallery and query
self.parse_results(results)
# compute rank# and mAP
self.eval_func()
def summarize(self):
table = pt.PrettyTable()
field_names = ["mAP", "Rank-1", "Rank-5", "Rank-10"]
row_values = []
row_values.append("{:.1%}".format(self.mAP))
for r in [1, 5, 10]:
row_values.append("{:.1%}".format(self.cmc[r - 1]))
max_name_length = max([len(name) for name in field_names + row_values])
field_names = [name.rjust(max_name_length, " ") for name in field_names]
row_values = [name.rjust(max_name_length, " ") for name in row_values]
table.field_names = field_names
table.add_row(row_values)
self.logger.info("validation results: \n{}".format(table))
if __name__ == "__main__":
import json
path = "logs/strong_baseline_market1501_r50v1_xent_tri_cent/market1501_gallery_result.json"
with open(path, "r") as f:
results = json.load(f)
class Config(object):
gpus = [0]
dist_type = "cosine"
max_rank = 50
pConfig = Config()
metric = ReIDMetric(pConfig)
metric.process(results)
metric.summarize()
| [
"json.load",
"numpy.invert",
"numpy.asarray",
"time.time",
"numpy.argsort",
"numpy.any",
"numpy.mean",
"numpy.array",
"prettytable.PrettyTable",
"mxnet.gpu",
"logging.getLogger"
] | [((576, 587), 'time.time', 'time.time', ([], {}), '()\n', (585, 587), False, 'import time\n'), ((970, 1002), 'numpy.argsort', 'np.argsort', (['self.distmat'], {'axis': '(1)'}), '(self.distmat, axis=1)\n', (980, 1002), True, 'import numpy as np\n'), ((2545, 2560), 'numpy.mean', 'np.mean', (['all_AP'], {}), '(all_AP)\n', (2552, 2560), True, 'import numpy as np\n'), ((2626, 2637), 'time.time', 'time.time', ([], {}), '()\n', (2635, 2637), False, 'import time\n'), ((2754, 2765), 'time.time', 'time.time', ([], {}), '()\n', (2763, 2765), False, 'import time\n'), ((3485, 3496), 'time.time', 'time.time', ([], {}), '()\n', (3494, 3496), False, 'import time\n'), ((3720, 3738), 'numpy.asarray', 'np.asarray', (['q_pids'], {}), '(q_pids)\n', (3730, 3738), True, 'import numpy as np\n'), ((3761, 3779), 'numpy.asarray', 'np.asarray', (['g_pids'], {}), '(g_pids)\n', (3771, 3779), True, 'import numpy as np\n'), ((3804, 3824), 'numpy.asarray', 'np.asarray', (['q_camids'], {}), '(q_camids)\n', (3814, 3824), True, 'import numpy as np\n'), ((3849, 3869), 'numpy.asarray', 'np.asarray', (['g_camids'], {}), '(g_camids)\n', (3859, 3869), True, 'import numpy as np\n'), ((4831, 4842), 'time.time', 'time.time', ([], {}), '()\n', (4840, 4842), False, 'import time\n'), ((5227, 5243), 'prettytable.PrettyTable', 'pt.PrettyTable', ([], {}), '()\n', (5241, 5243), True, 'import prettytable as pt\n'), ((6046, 6058), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6055, 6058), False, 'import json\n'), ((1603, 1620), 'numpy.invert', 'np.invert', (['remove'], {}), '(remove)\n', (1612, 1620), True, 'import numpy as np\n'), ((3936, 3974), 'numpy.array', 'np.array', (['q_features'], {'dtype': 'np.float32'}), '(q_features, dtype=np.float32)\n', (3944, 3974), True, 'import numpy as np\n'), ((4003, 4041), 'numpy.array', 'np.array', (['g_features'], {'dtype': 'np.float32'}), '(g_features, dtype=np.float32)\n', (4011, 4041), True, 'import numpy as np\n'), ((5025, 5044), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5042, 5044), False, 'import logging\n'), ((1757, 1773), 'numpy.any', 'np.any', (['orig_cmc'], {}), '(orig_cmc)\n', (1763, 1773), True, 'import numpy as np\n'), ((2235, 2254), 'numpy.asarray', 'np.asarray', (['tmp_cmc'], {}), '(tmp_cmc)\n', (2245, 2254), True, 'import numpy as np\n'), ((2445, 2464), 'numpy.asarray', 'np.asarray', (['all_cmc'], {}), '(all_cmc)\n', (2455, 2464), True, 'import numpy as np\n'), ((4284, 4304), 'numpy.array', 'np.array', (['q_features'], {}), '(q_features)\n', (4292, 4304), True, 'import numpy as np\n'), ((4333, 4353), 'numpy.array', 'np.array', (['g_features'], {}), '(g_features)\n', (4341, 4353), True, 'import numpy as np\n'), ((4113, 4129), 'mxnet.gpu', 'mx.gpu', (['self.gpu'], {}), '(self.gpu)\n', (4119, 4129), True, 'import mxnet as mx\n'), ((4425, 4441), 'mxnet.gpu', 'mx.gpu', (['self.gpu'], {}), '(self.gpu)\n', (4431, 4441), True, 'import mxnet as mx\n'), ((4705, 4721), 'mxnet.gpu', 'mx.gpu', (['self.gpu'], {}), '(self.gpu)\n', (4711, 4721), True, 'import mxnet as mx\n')] |
import itertools
import numpy as np
import scipy.ndimage
import scipy.spatial
from .. import utils
from ..preprocessing import image as preprocessing_image
def _generate_centers_and_final_shape(img_shape,
stride,
first_center):
"""
generates final shape and centers for applying a sliding window
"""
dim_ranges = []
for dim_min, dim_max, dim_stride in zip(first_center, img_shape, stride):
dim_ranges.append(range(dim_min, dim_max, dim_stride))
final_shape = tuple([len(dim_range) for dim_range in dim_ranges])
centers = itertools.product(*dim_ranges)
return final_shape, centers
def _generate_patches(img, patch_shape, centers):
"""
generate image patches from centers
"""
for center in centers:
ndimage = preprocessing_image.get_block_with_center_and_shape(
img,
center,
patch_shape,
fill_value=0)
yield ndimage
def sliding_window_apply(img,
fn,
patch_shape,
batch_size,
stride,
first_center):
"""
applies a function in a sliding window fashion to patches in an image
batch_size:
maximum number of patches to run through the fn at a time, -1 for all
patches at once
first_center:
the center point where all iteration starts at (eg. (0, 0))
"""
assert len(stride) == len(img.shape) == len(first_center)
# generate centers
final_shape, centers = _generate_centers_and_final_shape(img.shape,
stride,
first_center)
# generate patches
patches = _generate_patches(img, patch_shape, centers)
# batch batches
batches = utils.toolz.partition_all(batch_size, patches)
# run fn
result_list = []
for batch in batches:
result = fn(batch)
result_list.append(result)
flat_result = np.concatenate(result_list)
# reshape into img
reshaped_result = flat_result.reshape(*final_shape)
return reshaped_result
def find_maxima(img,
blur_sigma,
max_filter_size,
threshold):
"""
find local maxima of an image
"""
blurred = scipy.ndimage.gaussian_filter(img, sigma=blur_sigma)
maxed = scipy.ndimage.maximum_filter(blurred, size=max_filter_size)
points = zip(*np.where(((maxed == blurred) & (blurred > threshold))))
return points
def convert_points(points, stride, first_center):
"""
converts points that were computed in a strided fashion to the point
that they would correspond to in the original image
"""
if len(points) > 0:
return np.array(points) * stride + first_center
else:
return np.zeros((0, len(stride)))
def match_true_and_predicted(list_of_true_points,
list_of_predicted_points,
correctness_threshold):
"""
given 2 lists of lists of tuples (points), the former as the true points
and the latter as predicted points, returns a map with lists of lists of
matched and unmatched points.
"""
true_matched = []
true_unmatched = []
pred_matched = []
pred_unmatched = []
for p_trues, p_preds in zip(list_of_true_points,
list_of_predicted_points):
# make a copy to mutate
is_true_matched = [False] * len(p_trues)
is_pred_matched = [False] * len(p_preds)
for true_idx, p_true in enumerate(p_trues):
for pred_idx, p_pred in enumerate(p_preds):
dist = scipy.spatial.distance.euclidean(p_true, p_pred)
if dist < correctness_threshold:
is_pred_matched[pred_idx] = True
is_true_matched[true_idx] = True
true_matched_one_image = []
true_unmatched_one_image = []
for true_idx, p_true in enumerate(p_trues):
if is_true_matched[true_idx]:
true_matched_one_image.append(p_true)
else:
true_unmatched_one_image.append(p_true)
pred_matched_one_image = []
pred_unmatched_one_image = []
for pred_idx, p_pred in enumerate(p_preds):
if is_pred_matched[pred_idx]:
pred_matched_one_image.append(p_pred)
else:
pred_unmatched_one_image.append(p_pred)
true_matched.append(true_matched_one_image)
true_unmatched.append(true_unmatched_one_image)
pred_matched.append(pred_matched_one_image)
pred_unmatched.append(pred_unmatched_one_image)
return dict(
true_matched=true_matched,
true_unmatched=true_unmatched,
pred_matched=pred_matched,
pred_unmatched=pred_unmatched,
)
def localization_metrics(list_of_true_points,
list_of_predicted_points,
correctness_threshold):
"""
given 2 lists of lists of tuples (points), the former as the true points
and the latter as predicted points, returns number of matched and unmatched preds
and true points
"""
matched = match_true_and_predicted(list_of_true_points,
list_of_predicted_points,
correctness_threshold)
return {k: sum(map(len,matched[k])) for k in matched.keys()}
| [
"numpy.where",
"numpy.array",
"numpy.concatenate",
"itertools.product"
] | [((637, 667), 'itertools.product', 'itertools.product', (['*dim_ranges'], {}), '(*dim_ranges)\n', (654, 667), False, 'import itertools\n'), ((2117, 2144), 'numpy.concatenate', 'np.concatenate', (['result_list'], {}), '(result_list)\n', (2131, 2144), True, 'import numpy as np\n'), ((2571, 2623), 'numpy.where', 'np.where', (['((maxed == blurred) & (blurred > threshold))'], {}), '((maxed == blurred) & (blurred > threshold))\n', (2579, 2623), True, 'import numpy as np\n'), ((2881, 2897), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (2889, 2897), True, 'import numpy as np\n')] |
import numpy as np
from MLP_Classification.MLP import MLP
from mnist import MNIST
import math
mndata = MNIST('/Users/elvis/Documents/DSI/python-mnist/data')
train_images, train_labels = mndata.load_training()
test_images, test_labels = mndata.load_testing()
train_x = []
train_y = []
test_x = []
test_y = []
for i in range(len(train_images)):
train_x.append(np.reshape(np.multiply(1.0/255, train_images[i]), 28*28))
y = np.zeros(10)
y[train_labels[i]] = 1
train_y.append(y)
for i in range(len(test_images)):
test_x.append(np.reshape(np.multiply(1.0/255, test_images[i]), 28*28))
y = np.zeros(10)
y[test_labels[i]] = 1
test_y.append(y)
training_data = zip(train_x, train_y)
testing_data = zip(test_x, test_y)
network = MLP(28*28, 3)
network.add_layer(32)
network.add_layer(32)
network.add_layer(10)
network.train(training_data, 30, testing_data)
| [
"numpy.zeros",
"numpy.multiply",
"MLP_Classification.MLP.MLP",
"mnist.MNIST"
] | [((104, 157), 'mnist.MNIST', 'MNIST', (['"""/Users/elvis/Documents/DSI/python-mnist/data"""'], {}), "('/Users/elvis/Documents/DSI/python-mnist/data')\n", (109, 157), False, 'from mnist import MNIST\n'), ((757, 772), 'MLP_Classification.MLP.MLP', 'MLP', (['(28 * 28)', '(3)'], {}), '(28 * 28, 3)\n', (760, 772), False, 'from MLP_Classification.MLP import MLP\n'), ((431, 443), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (439, 443), True, 'import numpy as np\n'), ((612, 624), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (620, 624), True, 'import numpy as np\n'), ((376, 415), 'numpy.multiply', 'np.multiply', (['(1.0 / 255)', 'train_images[i]'], {}), '(1.0 / 255, train_images[i])\n', (387, 415), True, 'import numpy as np\n'), ((558, 596), 'numpy.multiply', 'np.multiply', (['(1.0 / 255)', 'test_images[i]'], {}), '(1.0 / 255, test_images[i])\n', (569, 596), True, 'import numpy as np\n')] |
import script_tools as st
import registration as reg
import numpy as np
import landmarks
import dist_table as dt
#import matplotlib
import matplotlib.pyplot as plt
from matplotlib import mlab
import scipy.ndimage
# Windows
#dataset_path = "C:/cygwin64/home/johan/itkAlphaCut/assets/01.png"
#bin_path = "C:/cygwin64/home/johan/itkAlphaCut-release/Release/"
# Linux
dataset_path = "/home/johof680/work/cilia_dataset/"
bin_path = "/home/johof680/work/itkAlphaCut-4j/build-release/"
def register_cilia_small(ref, flo_list, out_list, landmarks_list, rigid, initial_list):
im_ext = "png"
# create registration object
parallel_count = 6
image_dimensions = 2
r = reg.Registration(parallel_count, bin_path, dim = image_dimensions, enable_logging = True)
for index in xrange(len(flo_list)):
flo = flo_list[index]
pth = out_list[index]
rpar = r.get_register_param_defaults()
#rpar.pop("weights1", None)
#rpar.pop("weights2", None)
rpar["weights1"] = "hann2"
rpar["weights2"] = "hann2"
rpar["multiscale_sampling_factors"] = "1"
rpar["multiscale_smoothing_sigmas"] = "0"
rpar["metric"] = "alpha_smd"
rpar["alpha_levels"] = "7"
rpar["learning_rate"] = "0.1"
rpar["alpha_max_distance"] = "128"
rpar["alpha_outlier_rejection"] = "0.0"
rpar["sampling_fraction"] = "1.0"#0.05
rpar["normalization"] = "0.0"
#rpar["mask1"] = dataset_path + "circle_shrunk.png"
#rpar["mask2"] = dataset_path + "circle_shrunk.png"
#rpar.pop("mask1", None)
#rpar.pop("mask2", None)
rpar["mask1"] = dataset_path + "small_circle2.png"
rpar["mask2"] = dataset_path + "small_circle2.png"
rpar["init_transform"] = initial_list[index]
if rigid:
r.register_rigid(dataset_path + ref, dataset_path + flo, pth, rpar)
else:
r.register_affine(dataset_path + ref, dataset_path + flo, pth, rpar)
if rigid:
name = "Rigid"
else:
name = "Affine"
r.run(name)
for index in xrange(len(flo_list)):
flo = flo_list[index]
pth = out_list[index]
rpar = r.get_transform_param_defaults()
r.transform(dataset_path + ref, dataset_path + flo, pth, "outimg.png", pth + "transform_complete.txt")
r.run("Transform")
for index in xrange(len(landmarks_list)):
pth = out_list[index]
r.landmark_transform(landmarks_list[index], pth, "transformed_landmarks.csv", pth + "transform_complete.txt")
r.run("Landmarks")
def register_cilia_large(ref, flo_list, out_list, landmarks_list, rigid, initial_list):
im_ext = "png"
# create registration object
parallel_count = 6
image_dimensions = 2
r = reg.Registration(parallel_count, bin_path, dim = image_dimensions, enable_logging = True)
for index in xrange(len(flo_list)):
flo = flo_list[index]
pth = out_list[index]
rpar = r.get_register_param_defaults()
rpar.pop("weights1", None)
rpar.pop("weights2", None)
#rpar["weights1"] = "hann4"#dataset_path + "hann3.png"
#rpar["weights2"] = "hann4"#dataset_path + "hann3.png"
rpar["multiscale_sampling_factors"] = "1"
rpar["multiscale_smoothing_sigmas"] = "0"
rpar["metric"] = "alpha_smd"
rpar["alpha_levels"] = "7"
rpar["learning_rate"] = "0.5"
rpar["alpha_max_distance"] = "128"
rpar["alpha_outlier_rejection"] = "0.0"
rpar["sampling_fraction"] = "1.0"
rpar["normalization"] = "0.01"
rpar["mask1"] = dataset_path + "circle_shrunk.png"
rpar["mask2"] = dataset_path + "circle_shrunk.png"
rpar["init_transform"] = initial_list[index]
if rigid:
r.register_rigid(dataset_path + ref, dataset_path + flo, pth, rpar)
else:
r.register_affine(dataset_path + ref, dataset_path + flo, pth, rpar)
if rigid:
name = "Rigid"
else:
name = "Affine"
r.run(name)
for index in xrange(len(flo_list)):
flo = flo_list[index]
pth = out_list[index]
rpar = r.get_transform_param_defaults()
r.transform(dataset_path + ref, dataset_path + flo, pth, "outimg.png", pth + "transform_complete.txt")
r.run("Transform")
for index in xrange(len(landmarks_list)):
pth = out_list[index]
r.landmark_transform(landmarks_list[index], pth, "transformed_landmarks.csv", pth + "transform_complete.txt")
r.run("Landmarks")
def load_distances(out_list, k, n, output):
d = np.zeros(len(out_list))
for (index, out) in enumerate(out_list):
distance_path = out + "distance.csv"
v = st.read_csv(distance_path)
d[index] = v[0]
d = d.reshape([n, k])
np.savetxt(output, d, fmt = "%.7f", delimiter = ",")
best_index = np.argmin(d, axis=1)
return (d, best_index)
def merge_images(d, out_list, k, n, w, h, output):
result = np.zeros([h*n,w*k])
inds_x = [kk * w for nn in xrange(n) for kk in xrange(k)]
inds_y = [nn * h for nn in xrange(n) for kk in xrange(k)]
inds_x.append(w*k)
inds_y.append(h*n)
for (index, out) in enumerate(out_list):
image_path = out + "outimg.png"
img = scipy.ndimage.imread(image_path)
result[inds_y[index]:inds_y[index]+h, inds_x[index]:inds_x[index]+w] = img
mn = np.argmin(d, axis=1)
#result[50:150,100:120] = 65335/2
stroke = 5
for nn in xrange(n):
#print(str(mn[nn]))
start_x = (mn[nn]*w)
end_x = ((mn[nn]+1)*w)
start_y = (nn * h)
end_y = ((nn * h)+stroke)
result[start_y:end_y, start_x:end_x] = 65335/2
result[(start_y + h):(end_y + h), start_x:end_x] = 65335/2
start_x = (mn[nn]*w)
end_x = start_x + stroke
start_y = (nn * h)
end_y = ((nn+1) * h)
result[start_y:end_y, start_x:end_x] = 65335/2
result[start_y:end_y, start_x+w:end_x+w] = 65335/2
#result[(((nn+1) * h)):((nn+1) * h), ((mn[nn]+1)*w-stroke):(((mn[nn]+1))*w)] = 65335/2
scipy.misc.imsave(output, result)
def min_distance(p, set):
mind = float('inf')
mini = -1
for i in xrange(set.shape[0]):
setp = set[i, :]
d = np.linalg.norm(p-setp, ord=2)#np.sqrt(np.sum(np.square(p-setp)))
if d < mind:
mind = d
mini = i
return (mind, mini)
def smd(ref, target):
refshape = ref.shape
acc = 0
for i in xrange(refshape[0]):
(d, ind) = min_distance(ref[i, :], target)
acc = acc + d
return acc
ttype_first = "rigid"
ttype_second = "affine"
def first(N):
initial_transforms = [dataset_path + "transforms/transform%d.txt" % (k+1) for n in xrange(N) for k in xrange(9)]
landmark_paths = [dataset_path + "cilia_landmarks_%d.csv" % (n+1) for n in xrange(1, N+1) for k in xrange(9)]
out_paths = [dataset_path + "first/" + ttype_first + "_%d/%d/" % (k+1, n+1) for n in xrange(N) for k in xrange(9)]
register_cilia_small("cilia_1.png", ["cilia_" + str(i+1) + ".png" for i in xrange(1, N+1) for k in xrange(9)], out_paths, landmark_paths, ttype_first == "rigid", initial_transforms)
(d, best_index) = load_distances(out_paths, 9, N, dataset_path + "first/distances.csv")
merge_images(d, out_paths, 9, N, 129, 129, dataset_path + "first/collage.png")
return best_index
def second(N, bind):
initial_transforms = [dataset_path + "first/" + (ttype_first + "_%d/%d/transform_" % (bind[n]+1, n+1)) + ttype_first + ".txt" for n in xrange(N)]
landmark_paths = [dataset_path + "cilia_landmarks_%d.csv" % (n+1) for n in xrange(1, N+1)]
out_paths = [dataset_path + "second/" + ttype_second + "_%d/%d/" % (1, n+1) for n in xrange(N)]
register_cilia_large("cilia_1.png", ["cilia_" + str(i+1) + ".png" for i in xrange(1, N+1)], out_paths, landmark_paths, ttype_second == "rigid", initial_transforms)
(d, best_index2) = load_distances(out_paths, 1, N, dataset_path + "second/distances.csv")
merge_images(d, out_paths, 1, N, 129, 129, dataset_path + "second/collage.png")
def split_landmarks(lm):
cp_lm = lm[0:2, :]
odd_lm = lm[2::2, :]
even_lm = lm[3::2, :]
return (cp_lm, odd_lm, even_lm)
def eval(ref, landmarks):
ref_landmarks = st.read_csv(ref)
(ref_cp_lm, ref_odd_lm, ref_even_lm) = split_landmarks(ref_landmarks)
# create registration object
parallel_count = 6
image_dimensions = 2
r = reg.Registration(parallel_count, bin_path, dim = image_dimensions, enable_logging = True)
all_smd_list = []
cp_smd_list = []
outer_smd_list = []
for landmark_path in landmarks:
flo_landmarks = st.read_csv(landmark_path)
(flo_cp_lm, flo_odd_lm, flo_even_lm) = split_landmarks(flo_landmarks)
cp_lm_smd = smd(ref_cp_lm, flo_cp_lm)
odd_lm_smd = smd(ref_odd_lm, flo_odd_lm)
even_lm_smd = smd(ref_even_lm, flo_even_lm)
outer_lm_smd = odd_lm_smd + even_lm_smd
all_lm_smd = cp_lm_smd + outer_lm_smd
cp_smd_list.append(cp_lm_smd / 2.0)
outer_smd_list.append(outer_lm_smd / 18.0)
all_smd_list.append(all_lm_smd / 20.0)
final_smd = np.array([all_smd_list, cp_smd_list, outer_smd_list])
print(final_smd)
all_smd = np.array(all_smd_list)
cp_smd = np.array(cp_smd_list)
outer_smd = np.array(outer_smd_list)
print("All: %.5f +- %.5f" % (np.mean(all_smd), np.std(all_smd)))
print("CP: %.5f +- %.5f" % (np.mean(cp_smd), np.std(cp_smd)))
print("Outer: %.5f +- %.5f" % (np.mean(outer_smd), np.std(outer_smd)))
if __name__ == "__main__":
N = 19
best_index = first(N)
#eval_first(N, best_index)
second(N, best_index)
eval(dataset_path + "cilia_landmarks_1.csv", [dataset_path + "first/" + (ttype_first + "_%d/%d/transformed_landmarks.csv" % (best_index[n-1]+1, n)) for n in xrange(1, N+1)] )
eval(dataset_path + "cilia_landmarks_1.csv", [dataset_path + "second/" + (ttype_second + "_1/%d/transformed_landmarks.csv" % n) for n in xrange(1, N+1)] )
eval(dataset_path + "cilia_landmarks_1.csv", [dataset_path + "cilia_landmarks_%d.csv" % n for n in xrange(2, N+2)])
#ttype = "affine"
#ttypeflag = False
#second_stage = True
#if second_stage:
# initial_transform = #dataset_path + ttype + "_out_1/1/transform_affine.txt"
# out_path = dataset_path + ttype + "_out2_%d/" % (k+1)
# register_cilia("cilia_1.png", ["cilia_" + str(i+1) + ".png" for i in xrange(1, N+1)], out_path, ttypeflag, initial_transform)
#else:
# for k in xrange(9):
# initial_transform = [dataset_path + "transforms/transform%d.txt" % (k+1) for k in xrange(N)]
# out_paths = [dataset_path + ttype + "_out_%d/" % (k+1) for k in xrange(N)]
# register_cilia("cilia_1.png", ["cilia_" + str(i+1) + ".png" for i in xrange(1, N+1)], out_path, ttypeflag, initial_transform)
#if second_stage:
# initial_transform = #dataset_path + ttype + "_out_1/1/transform_affine.txt"
# out_path = dataset_path + ttype + "_out2_%d/" % (k+1)
#else:
# initial_transform = [dataset_path + "transforms/transform%d.txt" % (k+1),
# out_path = dataset_path + ttype + "_out_%d/" % (k+1)
#register_cilia("cilia_1.png", ["cilia_" + str(i+1) + ".png" for i in xrange(1, 2)], out_path, ttypeflag, initial_transform)
| [
"registration.Registration",
"numpy.std",
"numpy.savetxt",
"numpy.zeros",
"numpy.argmin",
"numpy.mean",
"numpy.array",
"numpy.linalg.norm",
"script_tools.read_csv"
] | [((672, 761), 'registration.Registration', 'reg.Registration', (['parallel_count', 'bin_path'], {'dim': 'image_dimensions', 'enable_logging': '(True)'}), '(parallel_count, bin_path, dim=image_dimensions,\n enable_logging=True)\n', (688, 761), True, 'import registration as reg\n'), ((2613, 2702), 'registration.Registration', 'reg.Registration', (['parallel_count', 'bin_path'], {'dim': 'image_dimensions', 'enable_logging': '(True)'}), '(parallel_count, bin_path, dim=image_dimensions,\n enable_logging=True)\n', (2629, 2702), True, 'import registration as reg\n'), ((4483, 4531), 'numpy.savetxt', 'np.savetxt', (['output', 'd'], {'fmt': '"""%.7f"""', 'delimiter': '""","""'}), "(output, d, fmt='%.7f', delimiter=',')\n", (4493, 4531), True, 'import numpy as np\n'), ((4551, 4571), 'numpy.argmin', 'np.argmin', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (4560, 4571), True, 'import numpy as np\n'), ((4661, 4685), 'numpy.zeros', 'np.zeros', (['[h * n, w * k]'], {}), '([h * n, w * k])\n', (4669, 4685), True, 'import numpy as np\n'), ((5053, 5073), 'numpy.argmin', 'np.argmin', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (5062, 5073), True, 'import numpy as np\n'), ((7861, 7877), 'script_tools.read_csv', 'st.read_csv', (['ref'], {}), '(ref)\n', (7872, 7877), True, 'import script_tools as st\n'), ((8031, 8120), 'registration.Registration', 'reg.Registration', (['parallel_count', 'bin_path'], {'dim': 'image_dimensions', 'enable_logging': '(True)'}), '(parallel_count, bin_path, dim=image_dimensions,\n enable_logging=True)\n', (8047, 8120), True, 'import registration as reg\n'), ((8720, 8773), 'numpy.array', 'np.array', (['[all_smd_list, cp_smd_list, outer_smd_list]'], {}), '([all_smd_list, cp_smd_list, outer_smd_list])\n', (8728, 8773), True, 'import numpy as np\n'), ((8806, 8828), 'numpy.array', 'np.array', (['all_smd_list'], {}), '(all_smd_list)\n', (8814, 8828), True, 'import numpy as np\n'), ((8840, 8861), 'numpy.array', 'np.array', (['cp_smd_list'], {}), '(cp_smd_list)\n', (8848, 8861), True, 'import numpy as np\n'), ((8876, 8900), 'numpy.array', 'np.array', (['outer_smd_list'], {}), '(outer_smd_list)\n', (8884, 8900), True, 'import numpy as np\n'), ((4410, 4436), 'script_tools.read_csv', 'st.read_csv', (['distance_path'], {}), '(distance_path)\n', (4421, 4436), True, 'import script_tools as st\n'), ((5868, 5899), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - setp)'], {'ord': '(2)'}), '(p - setp, ord=2)\n', (5882, 5899), True, 'import numpy as np\n'), ((8240, 8266), 'script_tools.read_csv', 'st.read_csv', (['landmark_path'], {}), '(landmark_path)\n', (8251, 8266), True, 'import script_tools as st\n'), ((8935, 8951), 'numpy.mean', 'np.mean', (['all_smd'], {}), '(all_smd)\n', (8942, 8951), True, 'import numpy as np\n'), ((8953, 8968), 'numpy.std', 'np.std', (['all_smd'], {}), '(all_smd)\n', (8959, 8968), True, 'import numpy as np\n'), ((9004, 9019), 'numpy.mean', 'np.mean', (['cp_smd'], {}), '(cp_smd)\n', (9011, 9019), True, 'import numpy as np\n'), ((9021, 9035), 'numpy.std', 'np.std', (['cp_smd'], {}), '(cp_smd)\n', (9027, 9035), True, 'import numpy as np\n'), ((9071, 9089), 'numpy.mean', 'np.mean', (['outer_smd'], {}), '(outer_smd)\n', (9078, 9089), True, 'import numpy as np\n'), ((9091, 9108), 'numpy.std', 'np.std', (['outer_smd'], {}), '(outer_smd)\n', (9097, 9108), True, 'import numpy as np\n')] |
# Module to import functions from in examples for multiprocessing backend
import numpy as np
def stochastic_function_seeded(max_value, random_state):
rng = np.random.RandomState(random_state)
return rng.randint(max_value, size=5)
def stochastic_function(max_value):
"""Randomly generate integer up to a maximum value."""
return np.random.randint(max_value, size=5)
def func_async(i, *args):
"""Asynchronous function to multiply the first argument by two."""
return 2 * i
| [
"numpy.random.randint",
"numpy.random.RandomState"
] | [((162, 197), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (183, 197), True, 'import numpy as np\n'), ((348, 384), 'numpy.random.randint', 'np.random.randint', (['max_value'], {'size': '(5)'}), '(max_value, size=5)\n', (365, 384), True, 'import numpy as np\n')] |
import mxnet as mx
import numpy as np
import minibatch
class TestLoader(mx.io.DataIter):
def __init__(self, imdb, batch_size=1, shuffle=False):
self.imdb = imdb
self.batch_size = batch_size
self.shuffle = shuffle
self.size = len(imdb)
self.index = np.arange(self.size)
self.cur = 0
self.data = None
self.label = None
self.data_names = ['data']
self.label_names = []
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_names, self.data)]
@property
def provide_label(self):
return []#[(k, v.shape) for k, v in zip(self.label_names, self.label)]
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
imdb = []
for i in range(cur_from,cur_to):
idx = self.index[i]
imdb_ = dict()
annotation = self.imdb[idx].strip().split(' ')
imdb_['image'] = annotation[0]
imdb.append(imdb_)
#print imdb
data, label = minibatch.get_testbatch(imdb)
self.data = [mx.nd.array(data[name]) for name in self.data_names]
#self.label = [mx.nd.array(label[name]) for name in self.label_names]
class ImageLoader(mx.io.DataIter):
def __init__(self, imdb, im_size, with_cls, with_bbox, with_landmark, batch_size, thread_num, flip=True, shuffle=False, ctx=None, work_load_list=None):
super(ImageLoader, self).__init__()
self.imdb = imdb
self.batch_size = batch_size
self.thread_num = thread_num
self.im_size = im_size
self.with_cls = with_cls
self.with_bbox = with_bbox
self.with_landmark = with_landmark
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.cur = 0
self.image_num = len(imdb)
if flip:
self.size = self.image_num*2
else:
self.size = self.image_num
self.index = np.arange(self.size)
self.num_classes = 2
self.batch = None
self.data = None
self.label = None
if self.with_landmark:
if self.with_cls:
if self.with_bbox:
self.label_names = ['type_label', 'label', 'bbox_target', 'landmark_target']
self.with_type = True
else:
self.label_names = ['type_label', 'label', 'landmark_target']
self.with_type = True
else:
if self.with_bbox:
self.label_names = ['type_label', 'bbox_target', 'landmark_target']
self.with_type = True
else:
self.label_names = ['landmark_target']
self.with_type = False
else:
self.label_names= ['label', 'bbox_target']
self.with_type = False
self.reset()
self.get_batch()
@property
def provide_data(self):
return [('data', self.data[0].shape)]
# return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_names, self.label)]
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
#print cur_from,cur_to,self.index[cur_from:cur_to]
imdb = []
for i in range(cur_from,cur_to):
idx = self.index[i]
imdb_ = dict()
is_flip = False
if idx >= self.image_num:
imdb_['flipped'] = True
is_flip = True
idx = idx - self.image_num
else:
imdb_['flipped'] = False
annotation = self.imdb[idx].strip().split(' ')
imdb_['image'] = annotation[0]+'.jpg'
#print(imdb_['image'])
label = int(annotation[1])
if self.with_type:
imdb_['type_label'] = int(label)
if label == 1: #pos
if self.with_cls:
imdb_['label'] = 1
if self.with_bbox:
bbox_target = np.array(annotation[2:],dtype=np.float32)
if is_flip:
bbox_target[0], bbox_target[2] = -bbox_target[2], -bbox_target[0]
imdb_['bbox_target'] = bbox_target
if self.with_landmark:
imdb_['landmark_target'] = np.zeros((10,))
elif label == 0: #neg
if self.with_cls:
imdb_['label'] = 0
if self.with_bbox:
imdb_['bbox_target'] = np.zeros((4,))
if self.with_landmark:
imdb_['landmark_target'] = np.zeros((10,))
elif label == -1:
if self.with_cls:
imdb_['label'] = -1
if self.with_bbox:
bbox_target = np.array(annotation[2:],dtype=np.float32)
if is_flip:
bbox_target[0], bbox_target[2] = -bbox_target[2], -bbox_target[0]
imdb_['bbox_target'] = bbox_target
if self.with_landmark:
imdb_['landmark_target'] = np.zeros((10,))
elif label == -2: #landmark
if self.with_cls:
imdb_['label'] = -1
if self.with_bbox:
imdb_['bbox_target'] = np.zeros((4,))
if self.with_landmark:
landmark_target = np.array(annotation[2:],dtype=np.float32)
if is_flip:
landmark_target[0], landmark_target[1] = 1.0-landmark_target[1], 1.0-landmark_target[0]
landmark_target[2] = 1.0-landmark_target[2]
landmark_target[3], landmark_target[4] = 1.0-landmark_target[4], 1.0-landmark_target[3]
imdb_['landmark_target'] = landmark_target
imdb.append(imdb_)
data, label = minibatch.get_minibatch(imdb, self.num_classes, self.im_size, self.with_type, self.with_cls, self.with_bbox, self.with_landmark, self.thread_num)
self.data = [mx.nd.array(data['data'])]
self.label = [mx.nd.array(label[name]) for name in self.label_names]
| [
"numpy.zeros",
"minibatch.get_minibatch",
"numpy.arange",
"numpy.array",
"mxnet.nd.array",
"mxnet.cpu",
"minibatch.get_testbatch",
"numpy.random.shuffle"
] | [((293, 313), 'numpy.arange', 'np.arange', (['self.size'], {}), '(self.size)\n', (302, 313), True, 'import numpy as np\n'), ((1997, 2026), 'minibatch.get_testbatch', 'minibatch.get_testbatch', (['imdb'], {}), '(imdb)\n', (2020, 2026), False, 'import minibatch\n'), ((3009, 3029), 'numpy.arange', 'np.arange', (['self.size'], {}), '(self.size)\n', (3018, 3029), True, 'import numpy as np\n'), ((8008, 8163), 'minibatch.get_minibatch', 'minibatch.get_minibatch', (['imdb', 'self.num_classes', 'self.im_size', 'self.with_type', 'self.with_cls', 'self.with_bbox', 'self.with_landmark', 'self.thread_num'], {}), '(imdb, self.num_classes, self.im_size, self.\n with_type, self.with_cls, self.with_bbox, self.with_landmark, self.\n thread_num)\n', (8031, 8163), False, 'import minibatch\n'), ((820, 849), 'numpy.random.shuffle', 'np.random.shuffle', (['self.index'], {}), '(self.index)\n', (837, 849), True, 'import numpy as np\n'), ((2048, 2071), 'mxnet.nd.array', 'mx.nd.array', (['data[name]'], {}), '(data[name])\n', (2059, 2071), True, 'import mxnet as mx\n'), ((4344, 4373), 'numpy.random.shuffle', 'np.random.shuffle', (['self.index'], {}), '(self.index)\n', (4361, 4373), True, 'import numpy as np\n'), ((8175, 8200), 'mxnet.nd.array', 'mx.nd.array', (["data['data']"], {}), "(data['data'])\n", (8186, 8200), True, 'import mxnet as mx\n'), ((8224, 8248), 'mxnet.nd.array', 'mx.nd.array', (['label[name]'], {}), '(label[name])\n', (8235, 8248), True, 'import mxnet as mx\n'), ((2765, 2773), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (2771, 2773), True, 'import mxnet as mx\n'), ((6089, 6131), 'numpy.array', 'np.array', (['annotation[2:]'], {'dtype': 'np.float32'}), '(annotation[2:], dtype=np.float32)\n', (6097, 6131), True, 'import numpy as np\n'), ((6394, 6409), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (6402, 6409), True, 'import numpy as np\n'), ((6608, 6622), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (6616, 6622), True, 'import numpy as np\n'), ((6709, 6724), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (6717, 6724), True, 'import numpy as np\n'), ((6898, 6940), 'numpy.array', 'np.array', (['annotation[2:]'], {'dtype': 'np.float32'}), '(annotation[2:], dtype=np.float32)\n', (6906, 6940), True, 'import numpy as np\n'), ((7203, 7218), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (7211, 7218), True, 'import numpy as np\n'), ((7423, 7437), 'numpy.zeros', 'np.zeros', (['(4,)'], {}), '((4,))\n', (7431, 7437), True, 'import numpy as np\n'), ((7516, 7558), 'numpy.array', 'np.array', (['annotation[2:]'], {'dtype': 'np.float32'}), '(annotation[2:], dtype=np.float32)\n', (7524, 7558), True, 'import numpy as np\n')] |
import networkx as nx
import scipy.sparse.csgraph
import numpy as np
import gym
import pickle
WALLS = {
'Small':
np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]),
'Cross':
np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]),
'FourRooms':
np.array([[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]),
'Spiral5x5':
np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 1],
[0, 1, 0, 0, 1],
[0, 1, 1, 0, 1],
[0, 0, 0, 0, 1]]),
'Spiral7x7':
np.array([[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 0]]),
'Spiral9x9':
np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 1, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 1, 0, 1, 0, 1],
[0, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1]]),
'Spiral11x11':
np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]]),
'Maze5x5':
# np.array([[0, 0, 0],
# [1, 1, 0],
# [0, 0, 0]]),
np.array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0]]),
'Maze6x6':
np.array([[0, 0, 1, 0, 0, 0],
[1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 1],
[0, 1, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 1]]),
'Maze11x11':
np.array([[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]),
'Tunnel':
np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]),
'U':
np.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[1, 1, 0],
[1, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]]),
'Tree':
np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0],
]),
'UMulti':
np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]),
'FlyTrapSmall':
np.array([
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
]),
'FlyTrapBig':
np.array([
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
]),
'Galton':
np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
]),
}
ACT_DICT = {
0: [0.,0.],
1: [0., -1.],
2: [0., 1.],
3: [-1., 0.],
4: [1., 0.],
}
def resize_walls(walls, factor):
"""Increase the environment by rescaling.
Args:
walls: 0/1 array indicating obstacle locations.
factor: (int) factor by which to rescale the environment."""
(height, width) = walls.shape
row_indices = np.array([i for i in range(height) for _ in range(factor)])
col_indices = np.array([i for i in range(width) for _ in range(factor)])
walls = walls[row_indices]
walls = walls[:, col_indices]
assert walls.shape == (factor * height, factor * width)
return walls
class Pointmass(gym.Env):
"""Abstract class for 2D navigation environments."""
def __init__(self,
difficulty=0,
dense_reward=False,
):
"""Initialize the point environment.
Args:
walls: (str) name of one of the maps defined above.
resize_factor: (int) Scale the map by this factor.
action_noise: (float) Standard deviation of noise to add to actions. Use 0
to add no noise.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self.plt = plt
self.fig = self.plt.figure()
self.action_dim = self.ac_dim = 2
self.observation_dim = self.obs_dim = 2
self.env_name = 'pointmass'
self.is_gym = True
if difficulty == 0:
walls = 'Maze5x5'
resize_factor = 2
self.fixed_start = np.array([0.5, 0.5]) * resize_factor
self.fixed_goal = np.array([4.5, 4.5]) * resize_factor
self.max_episode_steps = 50
elif difficulty == 1:
walls = 'Maze6x6'
resize_factor = 1
self.fixed_start = np.array([0.5, 0.5]) * resize_factor
self.fixed_goal = np.array([1.5, 5.5]) * resize_factor
self.max_episode_steps = 150
elif difficulty == 2:
walls = 'FourRooms'
resize_factor = 2
self.fixed_start = np.array([1.0, 1.0]) * resize_factor
self.fixed_goal = np.array([10.0, 10.0]) * resize_factor
self.max_episode_steps = 100
elif difficulty == 3:
#NOTE TO STUDENTS: FEEL FREE TO EDIT THESE PARAMS FOR THE EXTRA CREDIT PROBLEM!
walls = 'Maze11x11'
resize_factor = 1
self.fixed_start = np.array([0.5, 0.5]) * resize_factor
self.fixed_goal = np.array([0.5, 10.5]) * resize_factor
self.max_episode_steps = 200
else:
print('Invalid difficulty setting')
return 1/0
if resize_factor > 1:
self._walls = resize_walls(WALLS[walls], resize_factor)
else:
self._walls = WALLS[walls]
(height, width) = self._walls.shape
self._apsp = self._compute_apsp(self._walls)
self._height = height
self._width = width
self.action_space = gym.spaces.Discrete(5)
self.observation_space = gym.spaces.Box(
low=np.array([0,0]),
high=np.array([self._height, self._width]),
dtype=np.float32)
self.dense_reward = dense_reward
self.num_actions = 5
self.epsilon = resize_factor
self.action_noise = 0.5
self.obs_vec = []
self.last_trajectory = None
self.difficulty = difficulty
self.num_runs = 0
self.reset()
def seed(self, seed):
np.random.seed(seed)
def reset(self, seed=None):
if seed: self.seed(seed)
if len(self.obs_vec) > 0:
self.last_trajectory = self.plot_trajectory()
self.plt.clf()
self.timesteps_left = self.max_episode_steps
self.obs_vec = [self._normalize_obs(self.fixed_start.copy())]
self.state = self.fixed_start.copy()
self.num_runs += 1
return self._normalize_obs(self.state.copy())
def set_logdir(self, path):
self.traj_filepath = path + 'last_traj.png'
def _get_distance(self, obs, goal):
"""Compute the shortest path distance.
Note: This distance is *not* used for training."""
(i1, j1) = self._discretize_state(obs.copy())
(i2, j2) = self._discretize_state(goal.copy())
return self._apsp[i1, j1, i2, j2]
def simulate_step(self, state, action):
num_substeps = 10
dt = 1.0 / num_substeps
num_axis = len(action)
for _ in np.linspace(0, 1, num_substeps):
for axis in range(num_axis):
new_state = state.copy()
new_state[axis] += dt * action[axis]
if not self._is_blocked(new_state):
state = new_state
return state
def get_optimal_action(self, state):
state = self._unnormalize_obs(state)
best_action = 0
best_dist = np.inf
for i in range(self.num_actions):
action = np.array(ACT_DICT[i])
s_prime = self.simulate_step(state, action)
dist = self._get_distance(s_prime, self.fixed_goal)
if dist < best_dist:
best_dist = dist
best_action = i
return best_action
def _discretize_state(self, state, resolution=1.0):
(i, j) = np.floor(resolution * state).astype(np.int)
# Round down to the nearest cell if at the boundary.
if i == self._height:
i -= 1
if j == self._width:
j -= 1
return (i, j)
def _normalize_obs(self, obs):
return np.array([
obs[0] / float(self._height),
obs[1] / float(self._width)
])
def _unnormalize_obs(self, obs):
return np.array([
obs[0] * float(self._height),
obs[1] * float(self._width)
])
def _is_blocked(self, state):
if not self.observation_space.contains(state):
return True
(i, j) = self._discretize_state(state)
return (self._walls[i, j] == 1)
def step(self, action):
self.timesteps_left -= 1
if isinstance(action, np.ndarray):
action = action.item()
action = np.array(ACT_DICT[action])
action = np.random.normal(action, self.action_noise)
self.state = self.simulate_step(self.state, action)
dist = np.linalg.norm(self.state - self.fixed_goal)
done = (dist < self.epsilon) or (self.timesteps_left == 0)
ns = self._normalize_obs(self.state.copy())
self.obs_vec.append(ns.copy())
if self.dense_reward:
reward = -dist
else:
reward = int(dist < self.epsilon) - 1
return ns, reward, done, {}
@property
def walls(self):
return self._walls
@property
def goal(self):
return self._normalize_obs(self.fixed_goal.copy())
def _compute_apsp(self, walls):
(height, width) = walls.shape
g = nx.Graph()
# Add all the nodes
for i in range(height):
for j in range(width):
if walls[i, j] == 0:
g.add_node((i, j))
# Add all the edges
for i in range(height):
for j in range(width):
for di in [-1, 0, 1]:
for dj in [-1, 0, 1]:
if di == dj == 0: continue # Don't add self loops
if i + di < 0 or i + di > height - 1: continue # No cell here
if j + dj < 0 or j + dj > width - 1: continue # No cell here
if walls[i, j] == 1: continue # Don't add edges to walls
if walls[i + di, j + dj] == 1: continue # Don't add edges to walls
g.add_edge((i, j), (i + di, j + dj))
# dist[i, j, k, l] is path from (i, j) -> (k, l)
dist = np.full((height, width, height, width), np.float('inf'))
for ((i1, j1), dist_dict) in nx.shortest_path_length(g):
for ((i2, j2), d) in dist_dict.items():
dist[i1, j1, i2, j2] = d
return dist
def render(self, mode=None):
self.plot_walls()
# current and end
self.plt.plot(self.fixed_goal[0], self.fixed_goal[1], 'go')
self.plt.plot(self.state[0], self.state[1], 'ko')
self.plt.pause(0.1)
img = np.frombuffer(self.fig.canvas.tostring_rgb(), dtype=np.uint8)
img = img.reshape(self.fig.canvas.get_width_height()[::-1] + (3,))
return img
def plot_trajectory(self):
self.plt.clf()
self.plot_walls()
obs_vec, goal = np.array(self.obs_vec), self.goal
self.plt.plot(obs_vec[:, 0], obs_vec[:, 1], 'b-o', alpha=0.3)
self.plt.scatter([obs_vec[0, 0]], [obs_vec[0, 1]], marker='+',
color='red', s=200, label='start')
self.plt.scatter([obs_vec[-1, 0]], [obs_vec[-1, 1]], marker='+',
color='green', s=200, label='end')
self.plt.scatter([goal[0]], [goal[1]], marker='*',
color='green', s=200, label='goal')
self.plt.legend(loc='upper left')
self.plt.savefig(self.traj_filepath)
def get_last_trajectory(self):
return self.last_trajectory
def plot_walls(self, walls=None):
if walls is None:
walls = self._walls.T
(height, width) = walls.shape
for (i, j) in zip(*np.where(walls)):
x = np.array([j, j+1]) / float(width)
y0 = np.array([i, i]) / float(height)
y1 = np.array([i+1, i+1]) / float(height)
self.plt.fill_between(x, y0, y1, color='grey')
self.plt.xlim([0, 1])
self.plt.ylim([0, 1])
self.plt.xticks([])
self.plt.yticks([])
def _sample_normalized_empty_state(self):
s = self._sample_empty_state()
return self._normalize_obs(s)
def _sample_empty_state(self):
candidate_states = np.where(self._walls == 0)
num_candidate_states = len(candidate_states[0])
state_index = np.random.choice(num_candidate_states)
state = np.array([candidate_states[0][state_index],
candidate_states[1][state_index]],
dtype=np.float)
state += np.random.uniform(size=2)
assert not self._is_blocked(state)
return state
def refresh_path():
path = dict()
path['observations'] = []
path['actions'] = []
path['next_observations'] = []
path['terminals'] = []
path['rewards'] = []
return path
if __name__ == '__main__':
env = Pointmass(difficulty=0, dense_reward=False)
num_samples = 50000
total_samples = 0
path = refresh_path()
all_paths = []
num_positive_rewards = 0
while total_samples < num_samples:
path = refresh_path()
start_state = env._sample_empty_state()
bern = (np.random.rand() > 0.5)
if bern:
goal_state = env._sample_empty_state()
else:
goal_state = env.fixed_goal
print ('Start: ', start_state, ' Goal state: ', goal_state, total_samples)
# curr_state = start_state
curr_state = env.reset(start_state)
done = False
for i in range(env.max_episode_steps):
action = env.get_optimal_action(goal_state)
temp_bern = (np.random.rand() < 0.2)
if temp_bern:
action = np.random.randint(5)
next_state, reward, done, _ = env.step(action)
if reward >= 0:
num_positive_rewards += 1
path['observations'].append(curr_state)
path['actions'].append(action)
path['next_observations'].append(next_state)
path['terminals'].append(done)
path['rewards'].append(reward)
if done == True:
total_samples += i
break
all_paths.append(path)
print ('Num Positive Rewards: ', num_positive_rewards)
with open('buffer_debug_final' + str(env.difficulty) +'.pkl', 'wb') as f:
pickle.dump(all_paths, f)
| [
"networkx.shortest_path_length",
"numpy.random.choice",
"numpy.random.uniform",
"pickle.dump",
"numpy.random.seed",
"numpy.random.rand",
"numpy.floor",
"gym.spaces.Discrete",
"numpy.float",
"matplotlib.use",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.random.normal",
"ne... | [((126, 192), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n', (134, 192), True, 'import numpy as np\n'), ((269, 449), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 1,\n 1, 1, 1, 1, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, \n 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]])\n', (277, 449), True, 'import numpy as np\n'), ((575, 992), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 1, 1, 1, 0, 1, 1], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0,\n 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0,\n 1, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [\n 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]])\n', (583, 992), True, 'import numpy as np\n'), ((1177, 1277), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 1, 1, 1, 1], [0, 1, 0, 0, 1], [0, 1, 1, 0, 1], [0, 0,\n 0, 0, 1]]'], {}), '([[0, 0, 0, 0, 0], [0, 1, 1, 1, 1], [0, 1, 0, 0, 1], [0, 1, 1, 0, 1\n ], [0, 0, 0, 0, 1]])\n', (1185, 1277), True, 'import numpy as np\n'), ((1371, 1551), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0], [1, 0,\n 1, 0, 0, 1, 0], [1, 0, 1, 1, 0, 1, 0], [1, 0, 0, 0, 0, 1, 0], [1, 1, 1,\n 1, 1, 1, 0]]'], {}), '([[1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 1, 1, \n 0], [1, 0, 1, 0, 0, 1, 0], [1, 0, 1, 1, 0, 1, 0], [1, 0, 0, 0, 0, 1, 0],\n [1, 1, 1, 1, 1, 1, 0]])\n', (1379, 1551), True, 'import numpy as np\n'), ((1677, 1961), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 0, \n 0, 0, 0, 1], [0, 1, 0, 1, 1, 1, 1, 0, 1], [0, 1, 0, 1, 0, 0, 1, 0, 1],\n [0, 1, 0, 1, 1, 0, 1, 0, 1], [0, 1, 0, 0, 0, 0, 1, 0, 1], [0, 1, 1, 1, \n 1, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, \n 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 1, 1, 1, 1, 0, 1], [0, 1, 0, 1, 0, 0, 1,\n 0, 1], [0, 1, 0, 1, 1, 0, 1, 0, 1], [0, 1, 0, 0, 0, 0, 1, 0, 1], [0, 1,\n 1, 1, 1, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (1685, 1961), True, 'import numpy as np\n'), ((2121, 2538), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, \n 0, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0], [1, 0,\n 1, 0, 1, 1, 1, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0], [1, 0, 1,\n 0, 1, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0], [1, 0, 1, 1,\n 1, 1, 1, 1, 0, 1, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 0]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0\n ], [1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0],\n [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0], [\n 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]])\n', (2129, 2538), True, 'import numpy as np\n'), ((2816, 2916), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 0], [1, 1,\n 1, 1, 0]]'], {}), '([[0, 0, 0, 0, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 0], [1, 1, 1, 1, 0\n ], [1, 1, 1, 1, 0]])\n', (2824, 2916), True, 'import numpy as np\n'), ((3008, 3142), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 0, 0], [1, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 1, 1, 0, 0,\n 1], [0, 0, 1, 1, 0, 1], [1, 0, 0, 0, 0, 1]]'], {}), '([[0, 0, 1, 0, 0, 0], [1, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 1,\n 1, 0, 0, 1], [0, 0, 1, 1, 0, 1], [1, 0, 0, 0, 0, 1]])\n', (3016, 3142), True, 'import numpy as np\n'), ((3255, 3672), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, \n 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 0,\n 0, 1, 0, 0, 0, 1, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0], [1, 1, 1,\n 0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0], [0, 0, 0, 0,\n 1, 0, 0, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1,\n 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0\n ], [0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],\n [1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0], [1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0], [\n 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n', (3263, 3672), True, 'import numpy as np\n'), ((3854, 5391), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0], [0, 1, 1, 1, 1, 1, 1, \n 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 0, 0, 0, 0, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, \n 0, 0, 1, 0, 1, 0], [0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, \n 1, 0, 1, 0], [0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0], [0, 1,\n 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], [0, 1, 0, 1, \n 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 1, 1, 0, \n 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 1, 0, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 1, 1, 0, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, \n 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, \n 0, 0, 1, 1, 0], [0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, \n 1, 1, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0], [0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0], [0, 1, 1, 1, \n 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 0, 0, 0, \n 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 1, 1, 0, 1, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, \n 0, 0, 0, 0, 0, 1, 0, 1, 0], [0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, \n 1, 1, 1, 1, 0, 1, 0], [0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, \n 0, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, \n 0], [0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], [0,\n 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, \n 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 1, \n 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 1, 1, 0, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 1, 1, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, \n 0, 0, 1, 0, 0, 1, 1, 0], [0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, \n 1, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0, 0]])\n', (3862, 5391), True, 'import numpy as np\n'), ((5691, 5815), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 1, 0], [1, 1, 0], [0, 1, 0\n ], [0, 1, 0], [0, 1, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 1, 0], [1, 1, 0],\n [0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0]])\n', (5699, 5815), True, 'import numpy as np\n'), ((5995, 6903), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1,\n 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 0, \n 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 1, 0, 1, 1, \n 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 1, 0, 1, 1, 1, 1, \n 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1], [0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, \n 1, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, \n 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, \n 0, 0, 0, 0, 1, 0], [0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, \n 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, \n 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1,\n 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1,\n 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 1, \n 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 1, 0, 1, \n 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1], [0, 0, 0, 1, 0, 1, 0, 0, \n 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, \n 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, \n 0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, \n 1, 1, 0, 1, 1, 1, 0], [0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, \n 0, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, \n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, \n 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0]])\n', (6003, 6903), True, 'import numpy as np\n'), ((7041, 8048), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1,\n 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 0, 0, 0, 0, \n 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0,\n 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, \n 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 0, 0, 0,\n 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, \n 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0,\n 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, \n 1, 0], [0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0,\n 0, 0, 0, 0, 0, 1, 0, 1, 0], [0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 0,\n 0, 0, 0, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0],\n [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1,\n 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0,\n 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, \n 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1,\n 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0, 0, 1, \n 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], [0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, \n 1, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0]])\n', (7049, 8048), True, 'import numpy as np\n'), ((8273, 9027), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 1,\n 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0], [0, 1,\n 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, \n 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1,\n 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0], [0, 1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, \n 0, 1, 0], [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0,\n 0, 1, 0, 1, 0, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, \n 1, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [1, 1, 1, 1, 1, 1,\n 1, 0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 1,\n 0, 1, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0], [0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 1, 0, 1, \n 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0], [0, 1,\n 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, \n 1, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])\n', (8281, 9027), True, 'import numpy as np\n'), ((9208, 10960), 'numpy.array', 'np.array', (['[[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0], [0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, \n 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [1, \n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, \n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]'], {}), '([[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n 1], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, \n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, \n 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])\n', (9216, 10960), True, 'import numpy as np\n'), ((11163, 12406), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, \n 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, \n 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, \n 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, \n 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, \n 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, \n 1, 0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, \n 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 1, 1, 1, 1, \n 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, \n 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1,\n 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, \n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, \n 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, \n 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, \n 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, \n 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, \n 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 1, \n 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, \n 0, 0, 1, 0, 0, 0, 0]])\n', (11171, 12406), True, 'import numpy as np\n'), ((13622, 13643), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (13636, 13643), False, 'import matplotlib\n'), ((15258, 15280), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(5)'], {}), '(5)\n', (15277, 15280), False, 'import gym\n'), ((15718, 15738), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (15732, 15738), True, 'import numpy as np\n'), ((16649, 16680), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_substeps'], {}), '(0, 1, num_substeps)\n', (16660, 16680), True, 'import numpy as np\n'), ((18145, 18171), 'numpy.array', 'np.array', (['ACT_DICT[action]'], {}), '(ACT_DICT[action])\n', (18153, 18171), True, 'import numpy as np\n'), ((18185, 18228), 'numpy.random.normal', 'np.random.normal', (['action', 'self.action_noise'], {}), '(action, self.action_noise)\n', (18201, 18228), True, 'import numpy as np\n'), ((18297, 18341), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.state - self.fixed_goal)'], {}), '(self.state - self.fixed_goal)\n', (18311, 18341), True, 'import numpy as np\n'), ((18849, 18859), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (18857, 18859), True, 'import networkx as nx\n'), ((19709, 19735), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['g'], {}), '(g)\n', (19732, 19735), True, 'import networkx as nx\n'), ((21519, 21545), 'numpy.where', 'np.where', (['(self._walls == 0)'], {}), '(self._walls == 0)\n', (21527, 21545), True, 'import numpy as np\n'), ((21616, 21654), 'numpy.random.choice', 'np.random.choice', (['num_candidate_states'], {}), '(num_candidate_states)\n', (21632, 21654), True, 'import numpy as np\n'), ((21667, 21766), 'numpy.array', 'np.array', (['[candidate_states[0][state_index], candidate_states[1][state_index]]'], {'dtype': 'np.float'}), '([candidate_states[0][state_index], candidate_states[1][state_index\n ]], dtype=np.float)\n', (21675, 21766), True, 'import numpy as np\n'), ((21818, 21843), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(2)'}), '(size=2)\n', (21835, 21843), True, 'import numpy as np\n'), ((23440, 23465), 'pickle.dump', 'pickle.dump', (['all_paths', 'f'], {}), '(all_paths, f)\n', (23451, 23465), False, 'import pickle\n'), ((17062, 17083), 'numpy.array', 'np.array', (['ACT_DICT[i]'], {}), '(ACT_DICT[i])\n', (17070, 17083), True, 'import numpy as np\n'), ((19659, 19674), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (19667, 19674), True, 'import numpy as np\n'), ((20303, 20325), 'numpy.array', 'np.array', (['self.obs_vec'], {}), '(self.obs_vec)\n', (20311, 20325), True, 'import numpy as np\n'), ((22395, 22411), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (22409, 22411), True, 'import numpy as np\n'), ((13972, 13992), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (13980, 13992), True, 'import numpy as np\n'), ((14033, 14053), 'numpy.array', 'np.array', (['[4.5, 4.5]'], {}), '([4.5, 4.5])\n', (14041, 14053), True, 'import numpy as np\n'), ((15338, 15354), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (15346, 15354), True, 'import numpy as np\n'), ((15368, 15405), 'numpy.array', 'np.array', (['[self._height, self._width]'], {}), '([self._height, self._width])\n', (15376, 15405), True, 'import numpy as np\n'), ((17359, 17387), 'numpy.floor', 'np.floor', (['(resolution * state)'], {}), '(resolution * state)\n', (17367, 17387), True, 'import numpy as np\n'), ((21037, 21052), 'numpy.where', 'np.where', (['walls'], {}), '(walls)\n', (21045, 21052), True, 'import numpy as np\n'), ((21065, 21085), 'numpy.array', 'np.array', (['[j, j + 1]'], {}), '([j, j + 1])\n', (21073, 21085), True, 'import numpy as np\n'), ((21110, 21126), 'numpy.array', 'np.array', (['[i, i]'], {}), '([i, i])\n', (21118, 21126), True, 'import numpy as np\n'), ((21154, 21178), 'numpy.array', 'np.array', (['[i + 1, i + 1]'], {}), '([i + 1, i + 1])\n', (21162, 21178), True, 'import numpy as np\n'), ((22801, 22817), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (22815, 22817), True, 'import numpy as np\n'), ((22862, 22882), 'numpy.random.randint', 'np.random.randint', (['(5)'], {}), '(5)\n', (22879, 22882), True, 'import numpy as np\n'), ((14203, 14223), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (14211, 14223), True, 'import numpy as np\n'), ((14264, 14284), 'numpy.array', 'np.array', (['[1.5, 5.5]'], {}), '([1.5, 5.5])\n', (14272, 14284), True, 'import numpy as np\n'), ((14437, 14457), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (14445, 14457), True, 'import numpy as np\n'), ((14498, 14520), 'numpy.array', 'np.array', (['[10.0, 10.0]'], {}), '([10.0, 10.0])\n', (14506, 14520), True, 'import numpy as np\n'), ((14759, 14779), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (14767, 14779), True, 'import numpy as np\n'), ((14820, 14841), 'numpy.array', 'np.array', (['[0.5, 10.5]'], {}), '([0.5, 10.5])\n', (14828, 14841), True, 'import numpy as np\n')] |
import csv
import cpsc2018
import os
import argparse
import numpy as np
import sys
'''
cspc2018_challenge score
Written by: <NAME>, <NAME>, <NAME>
School of Instrument Science and Engineering
Southeast University, China
<EMAIL>
'''
'''
Score the prediction answers by comparing answers.csv and REFERENCE.csv in validation_set folder,
The scoring uses a F1 measure, which is an average of the nice F1 values from each classification
type. The specific score rules will be found on http://www.icbeb.org/Challenge.html.
Matrix A follows the format as:
Predicted
Normal AF I-AVB LBBB RBBB PAC PVC STD STE
Normal N11 N12 N13 N14 N15 N16 N17 N18 N19
AF N21 N22 N23 N24 N25 N26 N27 N28 N29
I-AVB N31 N32 N33 N34 N35 N36 N37 N38 N39
LBBB N41 N42 N43 N44 N45 N46 N47 N48 N49
Reference RBBB N51 N52 N53 N54 N55 N56 N57 N58 N59
PAC N61 N62 N63 N64 N65 N66 N67 N68 N69
PVC N71 N72 N73 N74 N75 N76 N77 N78 N79
STD N81 N82 N83 N84 N85 N86 N87 N88 N89
STE N91 N92 N93 N94 N95 N96 N97 N98 N99
For each of the nine types, F1 is defined as:
Normal: F11=2*N11/(N1x+Nx1) AF: F12=2*N22/(N2x+Nx2) I-AVB: F13=2*N33/(N3x+Nx3) LBBB: F14=2*N44/(N4x+Nx4) RBBB: F15=2*N55/(N5x+Nx5)
PAC: F16=2*N66/(N6x+Nx6) PVC: F17=2*N77/(N7x+Nx7) STD: F18=2*N88/(N8x+Nx8) STE: F19=2*N99/(N9x+Nx9)
The final challenge score is defined as:
F1 = (F11+F12+F13+F14+F15+F16+F17+F18+F19)/9
In addition, we alse calculate the F1 measures for each of the four sub-abnormal types:
AF: Faf=2*N22/(N2x+Nx2) Block: Fblock=2*(N33+N44+N55)/(N3x+Nx3+N4x+Nx4+N5x+Nx5)
Premature contraction: Fpc=2*(N66+N77)/(N6x+Nx6+N7x+Nx7) ST-segment change: Fst=2*(N88+N99)/(N8x+Nx8+N9x+Nx9)
The static of predicted answers and the final score are saved to score.txt in local path.
'''
def score(answers_csv_path, reference_csv_path):
answers = dict()
reference = dict()
A = np.zeros((9, 9), dtype=np.float)
with open(answers_csv_path) as f:
reader = csv.DictReader(f)
for row in reader:
answers.setdefault(row['Recording'], []).append(row['Result'])
f.close()
with open(reference_csv_path) as ref:
reader = csv.DictReader(ref)
for row in reader:
reference.setdefault(row['Recording'], []).append([row['First_label'], row['Second_label'], row['Third_label']])
ref.close()
for key in answers.keys():
value = []
for item in answers[key]:
predict = np.int(item)
for item in reference[key][0]:
if item == '':
item = 0
value.append(np.int(item))
if predict in value:
A[predict-1][predict-1] += 1
else:
A[value[0]-1][predict-1] += 1
F11 = 2 * A[0][0] / (np.sum(A[0, :]) + np.sum(A[:, 0]))
F12 = 2 * A[1][1] / (np.sum(A[1, :]) + np.sum(A[:, 1]))
F13 = 2 * A[2][2] / (np.sum(A[2, :]) + np.sum(A[:, 2]))
F14 = 2 * A[3][3] / (np.sum(A[3, :]) + np.sum(A[:, 3]))
F15 = 2 * A[4][4] / (np.sum(A[4, :]) + np.sum(A[:, 4]))
F16 = 2 * A[5][5] / (np.sum(A[5, :]) + np.sum(A[:, 5]))
F17 = 2 * A[6][6] / (np.sum(A[6, :]) + np.sum(A[:, 6]))
F18 = 2 * A[7][7] / (np.sum(A[7, :]) + np.sum(A[:, 7]))
F19 = 2 * A[8][8] / (np.sum(A[8, :]) + np.sum(A[:, 8]))
F1 = (F11+F12+F13+F14+F15+F16+F17+F18+F19) / 9
## following is calculating scores for 4 types: AF, Block, Premature contraction, ST-segment change.
Faf = 2 * A[1][1] / (np.sum(A[1, :]) + np.sum(A[:, 1]))
Fblock = 2 * (A[2][2] + A[3][3] + A[4][4]) / (np.sum(A[2:5, :]) + np.sum(A[:, 2:5]))
Fpc = 2 * (A[5][5] + A[6][6]) / (np.sum(A[5:7, :]) + np.sum(A[:, 5:7]))
Fst = 2 * (A[7][7] + A[8][8]) / (np.sum(A[7:9, :]) + np.sum(A[:, 7:9]))
# print(A)
print('Total File Number: ', np.sum(A))
print("F11: ", F11)
print("F12: ", F12)
print("F13: ", F13)
print("F14: ", F14)
print("F15: ", F15)
print("F16: ", F16)
print("F17: ", F17)
print("F18: ", F18)
print("F19: ", F19)
print("F1: ", F1)
print("Faf: ", Faf)
print("Fblock: ", Fblock)
print("Fpc: ", Fpc)
print("Fst: ", Fst)
with open('score.txt', 'w') as score_file:
# print (A, file=score_file)
print ('Total File Number: %d\n' %(np.sum(A)), file=score_file)
print ('F11: %0.3f' %F11, file=score_file)
print ('F12: %0.3f' %F12, file=score_file)
print ('F13: %0.3f' %F13, file=score_file)
print ('F14: %0.3f' %F14, file=score_file)
print ('F15: %0.3f' %F15, file=score_file)
print ('F16: %0.3f' %F16, file=score_file)
print ('F17: %0.3f' %F17, file=score_file)
print ('F18: %0.3f' %F18, file=score_file)
print ('F19: %0.3f\n' %F19, file=score_file)
print ('F1: %0.3f\n' %F1, file=score_file)
print ('Faf: %0.3f' %Faf, file=score_file)
print ('Fblock: %0.3f' %Fblock, file=score_file)
print ('Fpc: %0.3f' %Fpc, file=score_file)
print ('Fst: %0.3f' %Fst, file=score_file)
score_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r',
'--reference_path',
help='path saving reference file')
args = parser.parse_args()
score('answers.csv', args.reference_path)
| [
"numpy.sum",
"argparse.ArgumentParser",
"csv.DictReader",
"numpy.zeros",
"numpy.int"
] | [((2247, 2279), 'numpy.zeros', 'np.zeros', (['(9, 9)'], {'dtype': 'np.float'}), '((9, 9), dtype=np.float)\n', (2255, 2279), True, 'import numpy as np\n'), ((5452, 5477), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5475, 5477), False, 'import argparse\n'), ((2335, 2352), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (2349, 2352), False, 'import csv\n'), ((2532, 2551), 'csv.DictReader', 'csv.DictReader', (['ref'], {}), '(ref)\n', (2546, 2551), False, 'import csv\n'), ((4151, 4160), 'numpy.sum', 'np.sum', (['A'], {}), '(A)\n', (4157, 4160), True, 'import numpy as np\n'), ((2831, 2843), 'numpy.int', 'np.int', (['item'], {}), '(item)\n', (2837, 2843), True, 'import numpy as np\n'), ((3127, 3142), 'numpy.sum', 'np.sum', (['A[0, :]'], {}), '(A[0, :])\n', (3133, 3142), True, 'import numpy as np\n'), ((3145, 3160), 'numpy.sum', 'np.sum', (['A[:, 0]'], {}), '(A[:, 0])\n', (3151, 3160), True, 'import numpy as np\n'), ((3187, 3202), 'numpy.sum', 'np.sum', (['A[1, :]'], {}), '(A[1, :])\n', (3193, 3202), True, 'import numpy as np\n'), ((3205, 3220), 'numpy.sum', 'np.sum', (['A[:, 1]'], {}), '(A[:, 1])\n', (3211, 3220), True, 'import numpy as np\n'), ((3247, 3262), 'numpy.sum', 'np.sum', (['A[2, :]'], {}), '(A[2, :])\n', (3253, 3262), True, 'import numpy as np\n'), ((3265, 3280), 'numpy.sum', 'np.sum', (['A[:, 2]'], {}), '(A[:, 2])\n', (3271, 3280), True, 'import numpy as np\n'), ((3307, 3322), 'numpy.sum', 'np.sum', (['A[3, :]'], {}), '(A[3, :])\n', (3313, 3322), True, 'import numpy as np\n'), ((3325, 3340), 'numpy.sum', 'np.sum', (['A[:, 3]'], {}), '(A[:, 3])\n', (3331, 3340), True, 'import numpy as np\n'), ((3367, 3382), 'numpy.sum', 'np.sum', (['A[4, :]'], {}), '(A[4, :])\n', (3373, 3382), True, 'import numpy as np\n'), ((3385, 3400), 'numpy.sum', 'np.sum', (['A[:, 4]'], {}), '(A[:, 4])\n', (3391, 3400), True, 'import numpy as np\n'), ((3427, 3442), 'numpy.sum', 'np.sum', (['A[5, :]'], {}), '(A[5, :])\n', (3433, 3442), True, 'import numpy as np\n'), ((3445, 3460), 'numpy.sum', 'np.sum', (['A[:, 5]'], {}), '(A[:, 5])\n', (3451, 3460), True, 'import numpy as np\n'), ((3487, 3502), 'numpy.sum', 'np.sum', (['A[6, :]'], {}), '(A[6, :])\n', (3493, 3502), True, 'import numpy as np\n'), ((3505, 3520), 'numpy.sum', 'np.sum', (['A[:, 6]'], {}), '(A[:, 6])\n', (3511, 3520), True, 'import numpy as np\n'), ((3547, 3562), 'numpy.sum', 'np.sum', (['A[7, :]'], {}), '(A[7, :])\n', (3553, 3562), True, 'import numpy as np\n'), ((3565, 3580), 'numpy.sum', 'np.sum', (['A[:, 7]'], {}), '(A[:, 7])\n', (3571, 3580), True, 'import numpy as np\n'), ((3607, 3622), 'numpy.sum', 'np.sum', (['A[8, :]'], {}), '(A[8, :])\n', (3613, 3622), True, 'import numpy as np\n'), ((3625, 3640), 'numpy.sum', 'np.sum', (['A[:, 8]'], {}), '(A[:, 8])\n', (3631, 3640), True, 'import numpy as np\n'), ((3826, 3841), 'numpy.sum', 'np.sum', (['A[1, :]'], {}), '(A[1, :])\n', (3832, 3841), True, 'import numpy as np\n'), ((3844, 3859), 'numpy.sum', 'np.sum', (['A[:, 1]'], {}), '(A[:, 1])\n', (3850, 3859), True, 'import numpy as np\n'), ((3911, 3928), 'numpy.sum', 'np.sum', (['A[2:5, :]'], {}), '(A[2:5, :])\n', (3917, 3928), True, 'import numpy as np\n'), ((3931, 3948), 'numpy.sum', 'np.sum', (['A[:, 2:5]'], {}), '(A[:, 2:5])\n', (3937, 3948), True, 'import numpy as np\n'), ((3987, 4004), 'numpy.sum', 'np.sum', (['A[5:7, :]'], {}), '(A[5:7, :])\n', (3993, 4004), True, 'import numpy as np\n'), ((4007, 4024), 'numpy.sum', 'np.sum', (['A[:, 5:7]'], {}), '(A[:, 5:7])\n', (4013, 4024), True, 'import numpy as np\n'), ((4063, 4080), 'numpy.sum', 'np.sum', (['A[7:9, :]'], {}), '(A[7:9, :])\n', (4069, 4080), True, 'import numpy as np\n'), ((4083, 4100), 'numpy.sum', 'np.sum', (['A[:, 7:9]'], {}), '(A[:, 7:9])\n', (4089, 4100), True, 'import numpy as np\n'), ((2960, 2972), 'numpy.int', 'np.int', (['item'], {}), '(item)\n', (2966, 2972), True, 'import numpy as np\n'), ((4632, 4641), 'numpy.sum', 'np.sum', (['A'], {}), '(A)\n', (4638, 4641), True, 'import numpy as np\n')] |
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import functools
import platform
import numpy as np
import pytest
from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid
from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField
from pde.tools.misc import module_available
def test_storage_write(tmp_path):
"""test simple memory storage"""
dim = 5
grid = UnitGrid([dim])
field = ScalarField(grid)
storage_classes = {"MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_write.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
for name, storage_cls in storage_classes.items():
storage = storage_cls(info={"a": 1})
storage.start_writing(field, info={"b": 2})
field.data = np.arange(dim)
storage.append(field, 0)
field.data = np.arange(dim)
storage.append(field, 1)
storage.end_writing()
assert not storage.has_collection
np.testing.assert_allclose(storage.times, np.arange(2))
for f in storage:
np.testing.assert_array_equal(f.data, np.arange(dim))
for i in range(2):
np.testing.assert_array_equal(storage[i].data, np.arange(dim))
assert {"a": 1, "b": 2}.items() <= storage.info.items()
storage = storage_cls()
storage.clear()
for i in range(3):
storage.start_writing(field)
field.data = np.arange(dim) + i
storage.append(field, i)
storage.end_writing()
np.testing.assert_allclose(
storage.times, np.arange(3), err_msg="storage class: " + name
)
def test_storage_truncation(tmp_path):
"""test whether simple trackers can be used"""
file = tmp_path / "test_storage_truncation.hdf5"
for truncate in [True, False]:
storages = [MemoryStorage()]
if module_available("h5py"):
storages.append(FileStorage(file))
tracker_list = [s.tracker(interval=0.01) for s in storages]
grid = UnitGrid([8, 8])
state = ScalarField.random_uniform(grid, 0.2, 0.3)
eq = DiffusionPDE()
eq.solve(state, t_range=0.1, dt=0.001, tracker=tracker_list)
if truncate:
for storage in storages:
storage.clear()
eq.solve(state, t_range=[0.1, 0.2], dt=0.001, tracker=tracker_list)
times = np.arange(0.1, 0.201, 0.01)
if not truncate:
times = np.r_[np.arange(0, 0.101, 0.01), times]
for storage in storages:
msg = f"truncate={truncate}, storage={storage}"
np.testing.assert_allclose(storage.times, times, err_msg=msg)
if any(platform.win32_ver()):
for storage in storages:
if isinstance(storage, FileStorage):
storage.close()
assert not storage.has_collection
def test_storing_extract_range(tmp_path):
"""test methods specific to FieldCollections in memory storage"""
sf = ScalarField(UnitGrid([1]))
storage_classes = {"MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_write.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
for storage_cls in storage_classes.values():
# store some data
s1 = storage_cls()
s1.start_writing(sf)
sf.data = np.array([0])
s1.append(sf, 0)
sf.data = np.array([2])
s1.append(sf, 1)
s1.end_writing()
np.testing.assert_equal(s1[0].data, 0)
np.testing.assert_equal(s1[1].data, 2)
np.testing.assert_equal(s1[-1].data, 2)
np.testing.assert_equal(s1[-2].data, 0)
with pytest.raises(IndexError):
s1[2]
with pytest.raises(IndexError):
s1[-3]
# test extraction
s2 = s1.extract_time_range()
assert s2.times == list(s1.times)
np.testing.assert_allclose(s2.data, s1.data)
s3 = s1.extract_time_range(0.5)
assert s3.times == s1.times[:1]
np.testing.assert_allclose(s3.data, s1.data[:1])
s4 = s1.extract_time_range((0.5, 1.5))
assert s4.times == s1.times[1:]
np.testing.assert_allclose(s4.data, s1.data[1:])
def test_storing_collection(tmp_path):
"""test methods specific to FieldCollections in memory storage"""
grid = UnitGrid([2, 2])
f1 = ScalarField.random_uniform(grid, 0.1, 0.4, label="a")
f2 = VectorField.random_uniform(grid, 0.1, 0.4, label="b")
f3 = Tensor2Field.random_uniform(grid, 0.1, 0.4, label="c")
fc = FieldCollection([f1, f2, f3])
storage_classes = {"MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_write.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
for storage_cls in storage_classes.values():
# store some data
storage = storage_cls()
storage.start_writing(fc)
storage.append(fc, 0)
storage.append(fc, 1)
storage.end_writing()
assert storage.has_collection
assert storage.extract_field(0)[0] == f1
assert storage.extract_field(1)[0] == f2
assert storage.extract_field(2)[0] == f3
assert storage.extract_field(0)[0].label == "a"
assert storage.extract_field(0, label="new label")[0].label == "new label"
assert storage.extract_field(0)[0].label == "a" # do not alter label
assert storage.extract_field("a")[0] == f1
assert storage.extract_field("b")[0] == f2
assert storage.extract_field("c")[0] == f3
with pytest.raises(ValueError):
storage.extract_field("nonsense")
def test_storage_apply(tmp_path):
"""test the apply function of StorageBase"""
grid = UnitGrid([2])
field = ScalarField(grid)
storage_classes = {"None": None, "MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_apply.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
s1 = MemoryStorage()
s1.start_writing(field, info={"b": 2})
field.data = np.array([0, 1])
s1.append(field, 0)
field.data = np.array([1, 2])
s1.append(field, 1)
s1.end_writing()
for name, storage_cls in storage_classes.items():
out = None if storage_cls is None else storage_cls()
s2 = s1.apply(lambda x: x + 1, out=out)
assert storage_cls is None or s2 is out
assert len(s2) == 2
np.testing.assert_allclose(s2.times, s1.times)
assert s2[0] == ScalarField(grid, [1, 2]), name
assert s2[1] == ScalarField(grid, [2, 3]), name
# test empty storage
s1 = MemoryStorage()
s2 = s1.apply(lambda x: x + 1)
assert len(s2) == 0
def test_storage_copy(tmp_path):
"""test the copy function of StorageBase"""
grid = UnitGrid([2])
field = ScalarField(grid)
storage_classes = {"None": None, "MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_apply.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
s1 = MemoryStorage()
s1.start_writing(field, info={"b": 2})
field.data = np.array([0, 1])
s1.append(field, 0)
field.data = np.array([1, 2])
s1.append(field, 1)
s1.end_writing()
for name, storage_cls in storage_classes.items():
out = None if storage_cls is None else storage_cls()
s2 = s1.copy(out=out)
assert storage_cls is None or s2 is out
assert len(s2) == 2
np.testing.assert_allclose(s2.times, s1.times)
assert s2[0] == s1[0], name
assert s2[1] == s1[1], name
# test empty storage
s1 = MemoryStorage()
s2 = s1.copy()
assert len(s2) == 0
@pytest.mark.parametrize("dtype", [bool, complex])
def test_storage_types(dtype, tmp_path):
"""test storing different types"""
grid = UnitGrid([32])
field = ScalarField.random_uniform(grid).copy(dtype=dtype)
if dtype == complex:
field += 1j * ScalarField.random_uniform(grid)
storage_classes = {"MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_apply.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
for storage_cls in storage_classes.values():
s = storage_cls()
s.start_writing(field)
s.append(field, 0)
s.append(field, 1)
s.end_writing()
assert len(s) == 2
np.testing.assert_allclose(s.times, [0, 1])
np.testing.assert_equal(s[0].data, field.data)
np.testing.assert_equal(s[1].data, field.data)
| [
"functools.partial",
"pde.DiffusionPDE",
"numpy.testing.assert_allclose",
"pde.fields.FieldCollection",
"pde.UnitGrid",
"pde.fields.VectorField.random_uniform",
"pde.FileStorage",
"pde.tools.misc.module_available",
"pytest.raises",
"numpy.array",
"numpy.arange",
"pde.fields.ScalarField.random_... | [((7957, 8006), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[bool, complex]'], {}), "('dtype', [bool, complex])\n", (7980, 8006), False, 'import pytest\n'), ((396, 411), 'pde.UnitGrid', 'UnitGrid', (['[dim]'], {}), '([dim])\n', (404, 411), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((424, 441), 'pde.fields.ScalarField', 'ScalarField', (['grid'], {}), '(grid)\n', (435, 441), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((505, 529), 'pde.tools.misc.module_available', 'module_available', (['"""h5py"""'], {}), "('h5py')\n", (521, 529), False, 'from pde.tools.misc import module_available\n'), ((3161, 3185), 'pde.tools.misc.module_available', 'module_available', (['"""h5py"""'], {}), "('h5py')\n", (3177, 3185), False, 'from pde.tools.misc import module_available\n'), ((4469, 4485), 'pde.UnitGrid', 'UnitGrid', (['[2, 2]'], {}), '([2, 2])\n', (4477, 4485), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((4495, 4548), 'pde.fields.ScalarField.random_uniform', 'ScalarField.random_uniform', (['grid', '(0.1)', '(0.4)'], {'label': '"""a"""'}), "(grid, 0.1, 0.4, label='a')\n", (4521, 4548), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((4558, 4611), 'pde.fields.VectorField.random_uniform', 'VectorField.random_uniform', (['grid', '(0.1)', '(0.4)'], {'label': '"""b"""'}), "(grid, 0.1, 0.4, label='b')\n", (4584, 4611), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((4621, 4675), 'pde.fields.Tensor2Field.random_uniform', 'Tensor2Field.random_uniform', (['grid', '(0.1)', '(0.4)'], {'label': '"""c"""'}), "(grid, 0.1, 0.4, label='c')\n", (4648, 4675), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((4685, 4714), 'pde.fields.FieldCollection', 'FieldCollection', (['[f1, f2, f3]'], {}), '([f1, f2, f3])\n', (4700, 4714), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((4778, 4802), 'pde.tools.misc.module_available', 'module_available', (['"""h5py"""'], {}), "('h5py')\n", (4794, 4802), False, 'from pde.tools.misc import module_available\n'), ((5914, 5927), 'pde.UnitGrid', 'UnitGrid', (['[2]'], {}), '([2])\n', (5922, 5927), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((5940, 5957), 'pde.fields.ScalarField', 'ScalarField', (['grid'], {}), '(grid)\n', (5951, 5957), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((6035, 6059), 'pde.tools.misc.module_available', 'module_available', (['"""h5py"""'], {}), "('h5py')\n", (6051, 6059), False, 'from pde.tools.misc import module_available\n'), ((6211, 6226), 'pde.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (6224, 6226), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((6287, 6303), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (6295, 6303), True, 'import numpy as np\n'), ((6345, 6361), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (6353, 6361), True, 'import numpy as np\n'), ((6849, 6864), 'pde.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (6862, 6864), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((7018, 7031), 'pde.UnitGrid', 'UnitGrid', (['[2]'], {}), '([2])\n', (7026, 7031), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((7044, 7061), 'pde.fields.ScalarField', 'ScalarField', (['grid'], {}), '(grid)\n', (7055, 7061), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((7139, 7163), 'pde.tools.misc.module_available', 'module_available', (['"""h5py"""'], {}), "('h5py')\n", (7155, 7163), False, 'from pde.tools.misc import module_available\n'), ((7315, 7330), 'pde.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (7328, 7330), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((7391, 7407), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (7399, 7407), True, 'import numpy as np\n'), ((7449, 7465), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (7457, 7465), True, 'import numpy as np\n'), ((7895, 7910), 'pde.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (7908, 7910), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((8098, 8112), 'pde.UnitGrid', 'UnitGrid', (['[32]'], {}), '([32])\n', (8106, 8112), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((8319, 8343), 'pde.tools.misc.module_available', 'module_available', (['"""h5py"""'], {}), "('h5py')\n", (8335, 8343), False, 'from pde.tools.misc import module_available\n'), ((629, 670), 'functools.partial', 'functools.partial', (['FileStorage', 'file_path'], {}), '(FileStorage, file_path)\n', (646, 670), False, 'import functools\n'), ((844, 858), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (853, 858), True, 'import numpy as np\n'), ((913, 927), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (922, 927), True, 'import numpy as np\n'), ((1946, 1970), 'pde.tools.misc.module_available', 'module_available', (['"""h5py"""'], {}), "('h5py')\n", (1962, 1970), False, 'from pde.tools.misc import module_available\n'), ((2103, 2119), 'pde.UnitGrid', 'UnitGrid', (['[8, 8]'], {}), '([8, 8])\n', (2111, 2119), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((2136, 2178), 'pde.fields.ScalarField.random_uniform', 'ScalarField.random_uniform', (['grid', '(0.2)', '(0.3)'], {}), '(grid, 0.2, 0.3)\n', (2162, 2178), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((2192, 2206), 'pde.DiffusionPDE', 'DiffusionPDE', ([], {}), '()\n', (2204, 2206), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((2460, 2487), 'numpy.arange', 'np.arange', (['(0.1)', '(0.201)', '(0.01)'], {}), '(0.1, 0.201, 0.01)\n', (2469, 2487), True, 'import numpy as np\n'), ((3083, 3096), 'pde.UnitGrid', 'UnitGrid', (['[1]'], {}), '([1])\n', (3091, 3096), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((3285, 3326), 'functools.partial', 'functools.partial', (['FileStorage', 'file_path'], {}), '(FileStorage, file_path)\n', (3302, 3326), False, 'import functools\n'), ((3477, 3490), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3485, 3490), True, 'import numpy as np\n'), ((3534, 3547), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (3542, 3547), True, 'import numpy as np\n'), ((3607, 3645), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['s1[0].data', '(0)'], {}), '(s1[0].data, 0)\n', (3630, 3645), True, 'import numpy as np\n'), ((3654, 3692), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['s1[1].data', '(2)'], {}), '(s1[1].data, 2)\n', (3677, 3692), True, 'import numpy as np\n'), ((3701, 3740), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['s1[-1].data', '(2)'], {}), '(s1[-1].data, 2)\n', (3724, 3740), True, 'import numpy as np\n'), ((3749, 3788), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['s1[-2].data', '(0)'], {}), '(s1[-2].data, 0)\n', (3772, 3788), True, 'import numpy as np\n'), ((4021, 4065), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['s2.data', 's1.data'], {}), '(s2.data, s1.data)\n', (4047, 4065), True, 'import numpy as np\n'), ((4154, 4202), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['s3.data', 's1.data[:1]'], {}), '(s3.data, s1.data[:1])\n', (4180, 4202), True, 'import numpy as np\n'), ((4298, 4346), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['s4.data', 's1.data[1:]'], {}), '(s4.data, s1.data[1:])\n', (4324, 4346), True, 'import numpy as np\n'), ((4902, 4943), 'functools.partial', 'functools.partial', (['FileStorage', 'file_path'], {}), '(FileStorage, file_path)\n', (4919, 4943), False, 'import functools\n'), ((6159, 6200), 'functools.partial', 'functools.partial', (['FileStorage', 'file_path'], {}), '(FileStorage, file_path)\n', (6176, 6200), False, 'import functools\n'), ((6655, 6701), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['s2.times', 's1.times'], {}), '(s2.times, s1.times)\n', (6681, 6701), True, 'import numpy as np\n'), ((7263, 7304), 'functools.partial', 'functools.partial', (['FileStorage', 'file_path'], {}), '(FileStorage, file_path)\n', (7280, 7304), False, 'import functools\n'), ((7741, 7787), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['s2.times', 's1.times'], {}), '(s2.times, s1.times)\n', (7767, 7787), True, 'import numpy as np\n'), ((8443, 8484), 'functools.partial', 'functools.partial', (['FileStorage', 'file_path'], {}), '(FileStorage, file_path)\n', (8460, 8484), False, 'import functools\n'), ((8706, 8749), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['s.times', '[0, 1]'], {}), '(s.times, [0, 1])\n', (8732, 8749), True, 'import numpy as np\n'), ((8758, 8804), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['s[0].data', 'field.data'], {}), '(s[0].data, field.data)\n', (8781, 8804), True, 'import numpy as np\n'), ((8813, 8859), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['s[1].data', 'field.data'], {}), '(s[1].data, field.data)\n', (8836, 8859), True, 'import numpy as np\n'), ((1085, 1097), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (1094, 1097), True, 'import numpy as np\n'), ((1661, 1673), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1670, 1673), True, 'import numpy as np\n'), ((1918, 1933), 'pde.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (1931, 1933), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((2678, 2739), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['storage.times', 'times'], {'err_msg': 'msg'}), '(storage.times, times, err_msg=msg)\n', (2704, 2739), True, 'import numpy as np\n'), ((2756, 2776), 'platform.win32_ver', 'platform.win32_ver', ([], {}), '()\n', (2774, 2776), False, 'import platform\n'), ((3803, 3828), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (3816, 3828), False, 'import pytest\n'), ((3861, 3886), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (3874, 3886), False, 'import pytest\n'), ((5745, 5770), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5758, 5770), False, 'import pytest\n'), ((6726, 6751), 'pde.fields.ScalarField', 'ScalarField', (['grid', '[1, 2]'], {}), '(grid, [1, 2])\n', (6737, 6751), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((6782, 6807), 'pde.fields.ScalarField', 'ScalarField', (['grid', '[2, 3]'], {}), '(grid, [2, 3])\n', (6793, 6807), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((8125, 8157), 'pde.fields.ScalarField.random_uniform', 'ScalarField.random_uniform', (['grid'], {}), '(grid)\n', (8151, 8157), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((8223, 8255), 'pde.fields.ScalarField.random_uniform', 'ScalarField.random_uniform', (['grid'], {}), '(grid)\n', (8249, 8255), False, 'from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField\n'), ((1175, 1189), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (1184, 1189), True, 'import numpy as np\n'), ((1277, 1291), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (1286, 1291), True, 'import numpy as np\n'), ((1507, 1521), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (1516, 1521), True, 'import numpy as np\n'), ((2000, 2017), 'pde.FileStorage', 'FileStorage', (['file'], {}), '(file)\n', (2011, 2017), False, 'from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid\n'), ((2539, 2564), 'numpy.arange', 'np.arange', (['(0)', '(0.101)', '(0.01)'], {}), '(0, 0.101, 0.01)\n', (2548, 2564), True, 'import numpy as np\n')] |
import sys
import os
import getopt
from six.moves import xrange
import h5py, time
import tensorflow as tf
import numpy as np
from utils import leaky_relu, leaky_relu2, batch_norm_wrapper
from data_loader import load_data_new, load_neighbour, load_labelMatrix, load_structMatrix
from meshvae import meshVAE
### Some hyper-parameters...
learning_rate = 0.00001
lambda1 = 1
lambda2 = 1
lambda3 = 1
lambda4 = 1
lambda5 = 1
bound = 1
bound_2 = 1
latent_zdim = 32
latent_zdim_2 = 32
key_dim = 32
mat = 'guitar'
epoch_num = 10000
epoch_num_2 = 10000
batch_size = 32
### Change Datapath for corresponding features...
restore_path = ''
matpath = './pre_processed_features/guitar/edgefeature.mat'
label_path = './pre_processed_features/guitar/labelMatrix.mat'
struct_path = './pre_processed_features/guitar/structMatrix.mat'
### Parsing...
opts, args = getopt.getopt(sys.argv[1:], "a:b:c:d:e:l:f:x:y:m:n:p:s:r:k:", \
["KLpart", "Trippart", "Part2Global", "KLglobal", "Tripglobal", "learning_rate", "epoch_num", "BoundPart", "BoundGlobal", "HidePart", "HideGlobal", "matname", "batch_size","restore_path","ckpt_path"])
print(opts, args)
for op, value in opts:
print(op, value)
if op == "-a":
lambda1 = float(value)
elif op == "-b":
lambda2 = float(value)
elif op == "-c":
lambda3 = float(value)
elif op == "-d":
lambda4 = float(value)
elif op == "-e":
lambda5 = float(value)
elif op == "-l":
learning_rate = float(value)
elif op == "-f":
epoch_num = int(value)
elif op == "-x":
bound = float(value)
elif op == "-y":
bound_2 = float(value)
elif op == "-m":
latent_zdim = int(value)
elif op == "-n":
latent_zdim_2 = int(value)
elif op == "-p":
mat = value
elif op == "-s":
batch_size = int(value)
elif op == "-r":
restore_path = value
elif op == "-k":
ckpt_path = value
else:
sys.exit()
### Create directories to record training process...
matname = './' + mat + '.mat'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
timecurrent = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
logfolder = './' + timecurrent + "Joint" + "_l" + str(learning_rate)+'_a' + str(lambda1) + '_b' + str(lambda2) + '_c' + str(lambda3) + '_d' + str(lambda4) + "_f"+str(epoch_num_2)+'_x' + str(bound) + '_y' + str(bound_2) + '_m' + str(latent_zdim) + '_n' + str(latent_zdim_2)
##########################################################################################
class risaNET():
def __init__(self, matpath):
# Get splited dataset
self.mask, self.part_num, self.modelnum, self.edgenum, self.logdr, self.e_nb, self.maxdegree, self.degree = load_data_new(matpath)
# Set initialize parameters
self.batch_size = batch_size
self.bound = bound
# Instantilize each VAE part and append them to partVAE list
self.partVAE = []
for i in range(self.part_num):
print("# Making Part {}".format(i))
self.partVAE.append(meshVAE(batch_size, label_path, latent_zdim, bound, lambda1, learning_rate, self.e_nb, self.edgenum, self.degree, self.maxdegree, part_no=i))
# Get Label Matrix
self.labelMatrix = load_labelMatrix(label_path)
self.input_label = tf.placeholder(tf.float32, [self.batch_size, self.batch_size], name='label_batch')
self.bound = bound
# ----------------------------------------VAE Set for Stage 1
# Get Weighted Structure Sets
self.structMatrix = load_structMatrix(struct_path)
self.input_struct = tf.placeholder(tf.float32, [None, 8*(self.part_num)], name='struct_batch')
self.latent_set = []
for i in range(self.part_num):
self.latent_set.append(self.partVAE[i].z_mean)
self.Wk = []
self.Wq = []
self.latent_struct = tf.transpose(self.latent_set, perm=[1, 0, 2])
for p in range(self.part_num):
self.Wk.append(tf.get_variable("W_key"+str(p), [latent_zdim, key_dim], tf.float32, tf.random_normal_initializer(stddev=0.02)))
self.Wq.append(tf.get_variable("W_query"+str(p), [latent_zdim, key_dim], tf.float32, tf.random_normal_initializer(stddev=0.02)))
self.s_r, self.atten_latent_struct = self.attention_mechanism( self.latent_struct, self.Wk, self.Wq, key_dim, latent_zdim)
self.atten_latent_struct = tf.reshape(self.atten_latent_struct, shape=[tf.shape(self.partVAE[0].z_mean)[0], -1])
self.sgeo, self.sstruct = self.geo_struct_attention_mechanism(self.atten_latent_struct, self.input_struct, self.part_num * latent_zdim )
self.weight_latent = tf.multiply(self.atten_latent_struct, self.sgeo)
self.weight_struct = tf.multiply(self.input_struct, self.sstruct)
self.weight_latent_struct = tf.concat([self.weight_latent, self.weight_struct], axis=1)
# Calculate Triplet loss of all VAE part
self.distanceMatrix = self.get_l1_matrix(self.weight_latent_struct, name='l1_matrix')
self.margin = self.distanceMatrix - self.bound
self.standard = self.margin * self.input_label
self.sigmoidresult = tf.sigmoid(self.standard)
self.total_triplet = tf.reduce_sum(self.standard)
# Calculate generation and KL loss of all VAE part
self.generation_loss_set = []
for i in range(self.part_num):
self.generation_loss_set.append(self.partVAE[i].generation_loss)
self.total_generation = tf.reduce_sum(self.generation_loss_set)
self.KL_loss_set = []
for i in range(self.part_num):
self.KL_loss_set.append(self.partVAE[i].KL_loss)
self.total_KL = tf.reduce_sum(self.KL_loss_set)
# ------------------------------------------ VAE for stage 2
self.hiddendim_2 = latent_zdim_2
self.embedding_inputs = tf.placeholder(tf.float32, [None, self.hiddendim_2], name = 'embedding_inputs')
self.encode, self.encode_std, self.encode_gauss, self.decode, self.tencode, self.tencode_std, self.tencode_gauss, self.tdecode = self.vae_struct(self.weight_latent_struct, self.embedding_inputs, self.hiddendim_2, name='vae_struct')
# Calculate total loss and Optimization Function for stage 2
self.generation_loss_2 = 1 * 0.5 * tf.reduce_mean(tf.reduce_sum(tf.pow(self.weight_latent_struct - self.decode, 2), axis=1))
self.KL_loss_2 = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(self.encode) + tf.square(self.encode_std) - tf.log(1e-8 + tf.square(self.encode_std)) - 1, axis=1))
self.bound_2 = bound_2
self.distanceMatrix_2 = self.get_l2_matrix(self.encode, name='l1_matrix_2')
self.margin_2 = self.distanceMatrix_2 - self.bound_2
self.standard_2 = self.margin_2 * self.input_label
self.sigmoidresult_2 = tf.sigmoid(self.standard_2)
self.triplet_loss_2 = tf.reduce_sum(self.standard_2)
# Calculate total loss and Optimization Function
self.cost_stg1 = self.total_generation + lambda1 * self.total_KL + lambda2 * self.total_triplet
self.cost_stg2 = lambda3 * self.generation_loss_2 + lambda4 * self.KL_loss_2 + lambda5 * self.triplet_loss_2
# self.optimizer_stg1 = tf.train.AdamOptimizer(learning_rate).minimize(self.cost_stg1)
# self.update_stg2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="vae_struct")
# self.optimizer_stg2 = tf.train.AdamOptimizer(learning_rate).minimize(self.cost_stg2, var_list=self.update_stg2)
self.cost = self.cost_stg1 + self.cost_stg2
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
tf.summary.histogram("DisMat", self.distanceMatrix)
tf.summary.scalar('total_generation', self.total_generation)
tf.summary.scalar("total_KL", self.total_KL)
tf.summary.scalar("total_triplet", self.total_triplet)
tf.summary.scalar('cost', self.cost_stg1)
tf.summary.histogram("DisMat2", self.distanceMatrix_2)
tf.summary.scalar('generation2', self.generation_loss_2)
tf.summary.scalar("KL2", self.KL_loss_2)
tf.summary.scalar("triplet2", self.triplet_loss_2)
tf.summary.scalar('cost2', self.cost_stg2)
self.summaries = tf.summary.merge_all()
self.saver = tf.train.Saver(max_to_keep=2)
def get_l1_matrix(self, feature, name='l1_matrix'):
with tf.variable_scope(name) as scope:
a = tf.tile(feature[0], [self.batch_size])
a = tf.reshape(a, [self.batch_size, -1])
for i in range(1, self.batch_size):
tmp = tf.tile(feature[i], [self.batch_size])
tmp = tf.reshape(tmp, [self.batch_size, -1])
a = tf.concat([a, tmp], 0)
b = tf.tile(feature, [self.batch_size, 1])
abs_val = tf.abs(a - b)
sum_abs = tf.reduce_sum(abs_val, 1)
l1_matrix = tf.reshape(sum_abs, [self.batch_size, self.batch_size])
l1_matrix = tf.nn.l2_normalize(l1_matrix, dim=-1)
return l1_matrix
def get_l2_matrix(self, feature, name='l2_matrix'):
with tf.variable_scope(name) as scope:
r = tf.reduce_sum(feature * feature, 1)
r = tf.reshape(r, [-1, 1])
distanceMatrix = r - 2 * tf.matmul(feature, tf.transpose(feature)) + tf.transpose(r)
distanceMatrix = tf.nn.l2_normalize(distanceMatrix, dim=-1)
return distanceMatrix
def attention_mechanism(self, feature, Wk, Wq, key_dim, latent_zdim):
with tf.variable_scope("attention_mechanism", reuse=tf.AUTO_REUSE) as scope:
batch_size = tf.shape(feature)[0]
Key =[] # part* [batch, key]
Query = tf.zeros([batch_size, key_dim]) #[batch, key]
for p in range(self.part_num):
wk = Wk[p] #[latent, key]
wq = Wq[p] #[latent, key]
f = feature[:, p, :]
f = tf.squeeze(f) #[batch, latent]
Key.append(tf.matmul(f, wk)) #[batch, key]
Query = Query + tf.matmul(f, wk) #[batch, key]
Query = tf.expand_dims(Query, axis=2) #[batch, key, 1]
Key = tf.reshape(Key, [self.part_num, batch_size, key_dim]) #[part, batch, key]
Key = tf.transpose(Key, perm=[1, 0, 2]) #[batch, part, key]
Value = tf.squeeze(tf.matmul(Key, Query)) #[batch, part]
Score = tf.exp(Value) / (1e-10 + tf.reduce_sum(tf.exp(Value), axis=1, keepdims=True)) #[batch, part]
Score_t = tf.tile(tf.expand_dims(Score, axis=2), [1, 1, latent_zdim]) #[batch, part, latent]
Result = tf.multiply(feature, Score_t)
return Score, Result
def geo_struct_attention_mechanism(self, geo_feature, struct_feature, input_dim):
with tf.variable_scope("geo_struct_attention_mechanism", reuse=tf.AUTO_REUSE) as scope:
Wgeo1 = tf.get_variable("Wgeo1", [input_dim, latent_zdim], tf.float32, tf.random_normal_initializer(stddev=0.02))
Wstruct1 = tf.get_variable("Wstruct1", [(self.part_num) * 8, latent_zdim], tf.float32, tf.random_normal_initializer(stddev=0.02))
Wgeo2 = tf.get_variable("Wgeo2", [latent_zdim, 1], tf.float32, tf.random_normal_initializer(stddev=0.02))
Wstruct2 = tf.get_variable("Wstruct2", [latent_zdim, 1], tf.float32, tf.random_normal_initializer(stddev=0.02))
batch_size = tf.shape(geo_feature)[0]
Vgeo1 = tf.matmul(geo_feature, Wgeo1)
Vstruct1 = tf.matmul(struct_feature, Wstruct1)
Vgeo2 = tf.matmul(Vgeo1, Wgeo2)
Vstruct2 = tf.matmul(Vstruct1, Wstruct2)
Value = tf.concat([Vgeo2, Vstruct2], axis=1)
Score = tf.exp(Value) / (1e-10 + tf.reduce_sum(tf.exp(Value), axis=1, keepdims=True))
Sgeo = Score[:, 0]
Sgeo = tf.expand_dims(Sgeo, 1)
Sgeo_t = tf.tile(Sgeo, [1, input_dim])
Sstruct = Score[: ,1]
Sstruct = tf.expand_dims(Sstruct, 1)
Sstruct_t = tf.tile(Sgeo, [1, (self.part_num) * 8])
tf.summary.histogram("Geo_Score", Sgeo)
tf.summary.histogram("Struct_Score", Sstruct)
return Sgeo_t, Sstruct_t
def encoder_symm(self, input_mesh, training = True, keep_prob = 1.0):
with tf.variable_scope("encoder_symm") as scope:
if(training == False):
keep_prob = 1.0
scope.reuse_variables()
bn = True
matrix1, bias1, h1 = self.linear(input_mesh, self.part_num*latent_zdim+(self.part_num)*8, 256, name = 'fc_1', training = training, special_activation = False, bn = bn)
h1 = tf.nn.dropout(h1, keep_prob = keep_prob)
matrix2, bias2, h2 = self.linear(h1, 256, 128, name = 'fc_2', training = training, special_activation = False, bn = bn)
h2 = tf.nn.dropout(h2, keep_prob = keep_prob)
matrix3, bias3, h3 = self.linear(h2, 128, 64, name = 'fc_3', training = training, special_activation = False, bn = bn)
h3 = tf.nn.dropout(h3, keep_prob = keep_prob)
_, _, mean = self.linear(h3, 64, self.hiddendim_2, name = 'mean', training = training, no_activation = True, bn = False)
_, _, stddev = self.linear(h3, 64, self.hiddendim_2, name = 'stddev', training = training, no_activation = True, bn = False)
stddev = tf.sqrt(tf.nn.softsign(stddev)+1.0)
return mean, stddev
def decoder_symm(self, z, training = True, keep_prob = 1.0):
with tf.variable_scope("decoder_symm") as scope:
if(training == False):
keep_prob = 1.0
scope.reuse_variables()
bn = True
matrix1, bias1, h1 = self.linear(z, self.hiddendim_2, 64, name = 'fc_1', training = training, special_activation = False, bn = bn)
h1 = tf.nn.dropout(h1, keep_prob = keep_prob)
matrix2, bias2, h2 = self.linear(h1, 64, 128, name = 'fc_2', training = training, special_activation = False, bn = bn)
h2 = tf.nn.dropout(h2, keep_prob = keep_prob)
matrix3, bias3, h3 = self.linear(h2, 128, 256, name = 'fc_3', training = training, special_activation = False, bn = bn)
h3 = tf.nn.dropout(h3, keep_prob = keep_prob)
matrix3, bias3, output = self.linear(h3, 256, self.part_num*latent_zdim+(self.part_num)*8, name = 'fc_4', training = training, no_activation = True, bn = False)
return output
def leaky_relu(self, input_, alpha = 0.1):
return tf.nn.leaky_relu(input_)
def linear(self, input_, input_size, output_size, name='Linear', training = True, special_activation = False, no_activation = False, bn = True, stddev=0.02, bias_start=0.0):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
matrix = tf.get_variable("weights", [input_size, output_size], tf.float32, tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size], tf.float32, initializer=tf.constant_initializer(bias_start))
output = tf.matmul(input_, matrix) + bias
if bn == False:
fb = output
else:
fb = batch_norm_wrapper(output, is_training = training)
if no_activation == True:
fa = fb
elif special_activation == False:
fa = self.leaky_relu(fb)
else:
fa = tf.nn.tanh(fb)
return matrix, bias, fa
def vae_struct(self, input, embedding_inputs, hiddendim_2, name='vae_struct'):
with tf.variable_scope(name) as scope:
encode, encode_std = self.encoder_symm(input, training = True)
encode_gauss = encode + encode_std * embedding_inputs
decode = self.decoder_symm(encode_gauss, training = True)
tencode, tencode_std = self.encoder_symm(input, training = False)
tencode_gauss = tencode + tencode_std * embedding_inputs
tdecode = self.decoder_symm(tencode_gauss, training = False)
return encode, encode_std, encode_gauss, decode, tencode, tencode_std, tencode_gauss, tdecode
def train(self, restore=None):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
if not os.path.isdir(logfolder):
os.mkdir(logfolder)
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
if not restore is None:
self.saver.restore(sess, restore)
file = open(logfolder + '/' + '_script_result.txt', 'w')
if not os.path.isdir(logfolder+ '/tb'):
os.mkdir(logfolder+ '/tb')
for epoch in xrange(0, epoch_num):
rand_index = np.random.choice(list(range(0,len(self.logdr[0]),5))+list(range(1,len(self.logdr[0]),5))+list(range(2,len(self.logdr[0]),5))+list(range(3,len(self.logdr[0]),5)), size=self.batch_size)
input_label = np.zeros(shape=(len(rand_index), len(rand_index)))
for i in range(len(rand_index)):
row = rand_index[i]
row = row.repeat(len(rand_index))
col = rand_index
input_label[i] = self.labelMatrix[row, col]
timecurrent1 = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
eps_s = np.random.normal(size=(len(rand_index), latent_zdim))
eps_logdr = np.random.normal(size=(len(rand_index), latent_zdim))
teps_s = np.zeros(shape=(len(rand_index), latent_zdim))
teps_logdr = np.zeros(shape=(len(rand_index), latent_zdim))
eps_input = np.random.normal(size=(len(rand_index), latent_zdim_2))
feed_dict_ofall = {self.input_label: input_label, self.input_struct: self.structMatrix[rand_index], self.embedding_inputs: eps_input}
for i in range(self.part_num):
feed_input_mask = self.mask[i][rand_index]
feed_input_mask = np.expand_dims(feed_input_mask, 1)
feed_dict_ofall[self.partVAE[i].inputs_logdr] = self.logdr[i][rand_index]
feed_dict_ofall[self.partVAE[i].input_mask] = feed_input_mask
feed_dict_ofall[self.partVAE[i].eps_logdr] = eps_logdr
feed_dict_ofall[self.partVAE[i].eps_logdr_test] = teps_logdr
feed_dict_ofall[self.partVAE[i].input_label] = input_label
_,gen, KL, trip,cost1,gen2, KL2, trip2, cost2, cost_al = sess.run([self.optimizer, self.total_generation, self.total_KL, self.total_triplet, self.cost_stg1, self.generation_loss_2, self.KL_loss_2, self.triplet_loss_2, self.cost_stg2, self.cost],
feed_dict=feed_dict_ofall)
# ================Save checkpoint, write logger, write summary
# if np.mod(epoch + 1, 200) == 0 and epoch != 0:
# self.saver.save(sess, logfolder + '/' + 'meshvae.model', global_step=epoch + 1)
# # self.save_z(logfolder + '/meshvae.model-' + str(epoch+1), logfolder, epoch+1)
if np.mod(epoch + 1, 50) == 0:
print("%s Epoch: [%4d]G: %.4f K: %.4f T: %.4f cost1: %.4f G2: %.4f K2: %.4f T2: %.4f cost2: %.4f cost: %.8f\n"
% (timecurrent1, epoch + 1, gen, KL, trip, cost1, gen2, KL2, trip2, cost2, cost_al))
file.write("%s Epoch: [%4d]G: %.4f K: %.4f T: %.4f cost1: %.4f G2: %.4f K2: %.4f T2: %.4f cost2: %.4f cost: %.8f\n"
% (timecurrent1, epoch + 1, gen, KL, trip, cost1, gen2, KL2, trip2, cost2, cost_al))
return
def save_z(self, restore, foldername, times=0):
print('###Loading...')
with tf.Session() as sess:
self.saver.restore(sess, restore)
index = list(xrange(len(self.logdr[0])))
eps_s = np.zeros(shape=(len(index), latent_zdim))
eps_logdr = np.zeros(shape=(len(index), latent_zdim))
teps_s = np.zeros(shape=(len(index), latent_zdim))
teps_logdr = np.zeros(shape=(len(index), latent_zdim))
eps_input = np.zeros(shape=(len(index), latent_zdim_2))
teps_input = np.zeros(shape=(len(index), latent_zdim_2))
feed_dict_ofall = {self.input_struct: self.structMatrix[index], self.embedding_inputs: eps_input}
for i in range(self.part_num):
feed_input_mask = self.mask[i][index]
feed_input_mask = np.expand_dims(feed_input_mask, 1)
feed_dict_ofall[self.partVAE[i].inputs_logdr] = self.logdr[i][index]
feed_dict_ofall[self.partVAE[i].input_mask] = feed_input_mask
feed_dict_ofall[self.partVAE[i].eps_logdr] = eps_logdr
feed_dict_ofall[self.partVAE[i].eps_logdr_test] = teps_logdr
z = sess.run([self.encode],
feed_dict=feed_dict_ofall)
z = np.squeeze(z)
print('###Writing...')
name = foldername + '/' + str(times) +'test_index.h5'
print(name)
f = h5py.File(name, 'w')
f['feature_vector'] = z
f.close()
return
##########################################################################################
def main():
risanet = risaNET(matpath)
if restore_path:
risanet.save_z(restore_path+'/meshvae.model-'+ckpt_path, restore_path)
else:
risanet.train()
risanet.save_z(logfolder + '/meshvae.model-' + str(epoch_num), logfolder)
if __name__ == '__main__':
main()
| [
"os.mkdir",
"tensorflow.reduce_sum",
"getopt.getopt",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.constant_initializer",
"tensorflow.nn.tanh",
"tensorflow.reshape",
"tensorflow.nn.l2_normalize",
"tensorflow.ConfigProto",
"tensorflow.multiply",
"tensorflow.matmul",
"data_loader.load... | [((846, 1122), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""a:b:c:d:e:l:f:x:y:m:n:p:s:r:k:"""', "['KLpart', 'Trippart', 'Part2Global', 'KLglobal', 'Tripglobal',\n 'learning_rate', 'epoch_num', 'BoundPart', 'BoundGlobal', 'HidePart',\n 'HideGlobal', 'matname', 'batch_size', 'restore_path', 'ckpt_path']"], {}), "(sys.argv[1:], 'a:b:c:d:e:l:f:x:y:m:n:p:s:r:k:', ['KLpart',\n 'Trippart', 'Part2Global', 'KLglobal', 'Tripglobal', 'learning_rate',\n 'epoch_num', 'BoundPart', 'BoundGlobal', 'HidePart', 'HideGlobal',\n 'matname', 'batch_size', 'restore_path', 'ckpt_path'])\n", (859, 1122), False, 'import getopt\n'), ((2066, 2082), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2080, 2082), True, 'import tensorflow as tf\n'), ((2132, 2157), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2142, 2157), True, 'import tensorflow as tf\n'), ((2218, 2229), 'time.time', 'time.time', ([], {}), '()\n', (2227, 2229), False, 'import h5py, time\n'), ((2794, 2816), 'data_loader.load_data_new', 'load_data_new', (['matpath'], {}), '(matpath)\n', (2807, 2816), False, 'from data_loader import load_data_new, load_neighbour, load_labelMatrix, load_structMatrix\n'), ((3328, 3356), 'data_loader.load_labelMatrix', 'load_labelMatrix', (['label_path'], {}), '(label_path)\n', (3344, 3356), False, 'from data_loader import load_data_new, load_neighbour, load_labelMatrix, load_structMatrix\n'), ((3384, 3471), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.batch_size, self.batch_size]'], {'name': '"""label_batch"""'}), "(tf.float32, [self.batch_size, self.batch_size], name=\n 'label_batch')\n", (3398, 3471), True, 'import tensorflow as tf\n'), ((3631, 3661), 'data_loader.load_structMatrix', 'load_structMatrix', (['struct_path'], {}), '(struct_path)\n', (3648, 3661), False, 'from data_loader import load_data_new, load_neighbour, load_labelMatrix, load_structMatrix\n'), ((3690, 3764), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 8 * self.part_num]'], {'name': '"""struct_batch"""'}), "(tf.float32, [None, 8 * self.part_num], name='struct_batch')\n", (3704, 3764), True, 'import tensorflow as tf\n'), ((3963, 4008), 'tensorflow.transpose', 'tf.transpose', (['self.latent_set'], {'perm': '[1, 0, 2]'}), '(self.latent_set, perm=[1, 0, 2])\n', (3975, 4008), True, 'import tensorflow as tf\n'), ((4762, 4810), 'tensorflow.multiply', 'tf.multiply', (['self.atten_latent_struct', 'self.sgeo'], {}), '(self.atten_latent_struct, self.sgeo)\n', (4773, 4810), True, 'import tensorflow as tf\n'), ((4840, 4884), 'tensorflow.multiply', 'tf.multiply', (['self.input_struct', 'self.sstruct'], {}), '(self.input_struct, self.sstruct)\n', (4851, 4884), True, 'import tensorflow as tf\n'), ((4921, 4980), 'tensorflow.concat', 'tf.concat', (['[self.weight_latent, self.weight_struct]'], {'axis': '(1)'}), '([self.weight_latent, self.weight_struct], axis=1)\n', (4930, 4980), True, 'import tensorflow as tf\n'), ((5271, 5296), 'tensorflow.sigmoid', 'tf.sigmoid', (['self.standard'], {}), '(self.standard)\n', (5281, 5296), True, 'import tensorflow as tf\n'), ((5326, 5354), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.standard'], {}), '(self.standard)\n', (5339, 5354), True, 'import tensorflow as tf\n'), ((5609, 5648), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.generation_loss_set'], {}), '(self.generation_loss_set)\n', (5622, 5648), True, 'import tensorflow as tf\n'), ((5812, 5843), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.KL_loss_set'], {}), '(self.KL_loss_set)\n', (5825, 5843), True, 'import tensorflow as tf\n'), ((5994, 6071), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.hiddendim_2]'], {'name': '"""embedding_inputs"""'}), "(tf.float32, [None, self.hiddendim_2], name='embedding_inputs')\n", (6008, 6071), True, 'import tensorflow as tf\n'), ((6947, 6974), 'tensorflow.sigmoid', 'tf.sigmoid', (['self.standard_2'], {}), '(self.standard_2)\n', (6957, 6974), True, 'import tensorflow as tf\n'), ((7005, 7035), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.standard_2'], {}), '(self.standard_2)\n', (7018, 7035), True, 'import tensorflow as tf\n'), ((7793, 7844), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""DisMat"""', 'self.distanceMatrix'], {}), "('DisMat', self.distanceMatrix)\n", (7813, 7844), True, 'import tensorflow as tf\n'), ((7853, 7913), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_generation"""', 'self.total_generation'], {}), "('total_generation', self.total_generation)\n", (7870, 7913), True, 'import tensorflow as tf\n'), ((7922, 7966), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_KL"""', 'self.total_KL'], {}), "('total_KL', self.total_KL)\n", (7939, 7966), True, 'import tensorflow as tf\n'), ((7975, 8029), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_triplet"""', 'self.total_triplet'], {}), "('total_triplet', self.total_triplet)\n", (7992, 8029), True, 'import tensorflow as tf\n'), ((8038, 8079), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cost"""', 'self.cost_stg1'], {}), "('cost', self.cost_stg1)\n", (8055, 8079), True, 'import tensorflow as tf\n'), ((8097, 8151), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""DisMat2"""', 'self.distanceMatrix_2'], {}), "('DisMat2', self.distanceMatrix_2)\n", (8117, 8151), True, 'import tensorflow as tf\n'), ((8160, 8216), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""generation2"""', 'self.generation_loss_2'], {}), "('generation2', self.generation_loss_2)\n", (8177, 8216), True, 'import tensorflow as tf\n'), ((8225, 8265), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""KL2"""', 'self.KL_loss_2'], {}), "('KL2', self.KL_loss_2)\n", (8242, 8265), True, 'import tensorflow as tf\n'), ((8274, 8324), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""triplet2"""', 'self.triplet_loss_2'], {}), "('triplet2', self.triplet_loss_2)\n", (8291, 8324), True, 'import tensorflow as tf\n'), ((8333, 8375), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cost2"""', 'self.cost_stg2'], {}), "('cost2', self.cost_stg2)\n", (8350, 8375), True, 'import tensorflow as tf\n'), ((8401, 8423), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (8421, 8423), True, 'import tensorflow as tf\n'), ((8445, 8474), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(2)'}), '(max_to_keep=2)\n', (8459, 8474), True, 'import tensorflow as tf\n'), ((14728, 14752), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['input_'], {}), '(input_)\n', (14744, 14752), True, 'import tensorflow as tf\n'), ((16469, 16485), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (16483, 16485), True, 'import tensorflow as tf\n'), ((8545, 8568), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (8562, 8568), True, 'import tensorflow as tf\n'), ((8595, 8633), 'tensorflow.tile', 'tf.tile', (['feature[0]', '[self.batch_size]'], {}), '(feature[0], [self.batch_size])\n', (8602, 8633), True, 'import tensorflow as tf\n'), ((8650, 8686), 'tensorflow.reshape', 'tf.reshape', (['a', '[self.batch_size, -1]'], {}), '(a, [self.batch_size, -1])\n', (8660, 8686), True, 'import tensorflow as tf\n'), ((8917, 8955), 'tensorflow.tile', 'tf.tile', (['feature', '[self.batch_size, 1]'], {}), '(feature, [self.batch_size, 1])\n', (8924, 8955), True, 'import tensorflow as tf\n'), ((8979, 8992), 'tensorflow.abs', 'tf.abs', (['(a - b)'], {}), '(a - b)\n', (8985, 8992), True, 'import tensorflow as tf\n'), ((9016, 9041), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['abs_val', '(1)'], {}), '(abs_val, 1)\n', (9029, 9041), True, 'import tensorflow as tf\n'), ((9067, 9122), 'tensorflow.reshape', 'tf.reshape', (['sum_abs', '[self.batch_size, self.batch_size]'], {}), '(sum_abs, [self.batch_size, self.batch_size])\n', (9077, 9122), True, 'import tensorflow as tf\n'), ((9147, 9184), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['l1_matrix'], {'dim': '(-1)'}), '(l1_matrix, dim=-1)\n', (9165, 9184), True, 'import tensorflow as tf\n'), ((9288, 9311), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (9305, 9311), True, 'import tensorflow as tf\n'), ((9338, 9373), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(feature * feature)', '(1)'], {}), '(feature * feature, 1)\n', (9351, 9373), True, 'import tensorflow as tf\n'), ((9390, 9412), 'tensorflow.reshape', 'tf.reshape', (['r', '[-1, 1]'], {}), '(r, [-1, 1])\n', (9400, 9412), True, 'import tensorflow as tf\n'), ((9539, 9581), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['distanceMatrix'], {'dim': '(-1)'}), '(distanceMatrix, dim=-1)\n', (9557, 9581), True, 'import tensorflow as tf\n'), ((9704, 9765), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention_mechanism"""'], {'reuse': 'tf.AUTO_REUSE'}), "('attention_mechanism', reuse=tf.AUTO_REUSE)\n", (9721, 9765), True, 'import tensorflow as tf\n'), ((9884, 9915), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, key_dim]'], {}), '([batch_size, key_dim])\n', (9892, 9915), True, 'import tensorflow as tf\n'), ((10287, 10316), 'tensorflow.expand_dims', 'tf.expand_dims', (['Query'], {'axis': '(2)'}), '(Query, axis=2)\n', (10301, 10316), True, 'import tensorflow as tf\n'), ((10352, 10405), 'tensorflow.reshape', 'tf.reshape', (['Key', '[self.part_num, batch_size, key_dim]'], {}), '(Key, [self.part_num, batch_size, key_dim])\n', (10362, 10405), True, 'import tensorflow as tf\n'), ((10444, 10477), 'tensorflow.transpose', 'tf.transpose', (['Key'], {'perm': '[1, 0, 2]'}), '(Key, perm=[1, 0, 2])\n', (10456, 10477), True, 'import tensorflow as tf\n'), ((10806, 10835), 'tensorflow.multiply', 'tf.multiply', (['feature', 'Score_t'], {}), '(feature, Score_t)\n', (10817, 10835), True, 'import tensorflow as tf\n'), ((10969, 11041), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""geo_struct_attention_mechanism"""'], {'reuse': 'tf.AUTO_REUSE'}), "('geo_struct_attention_mechanism', reuse=tf.AUTO_REUSE)\n", (10986, 11041), True, 'import tensorflow as tf\n'), ((11633, 11662), 'tensorflow.matmul', 'tf.matmul', (['geo_feature', 'Wgeo1'], {}), '(geo_feature, Wgeo1)\n', (11642, 11662), True, 'import tensorflow as tf\n'), ((11686, 11721), 'tensorflow.matmul', 'tf.matmul', (['struct_feature', 'Wstruct1'], {}), '(struct_feature, Wstruct1)\n', (11695, 11721), True, 'import tensorflow as tf\n'), ((11742, 11765), 'tensorflow.matmul', 'tf.matmul', (['Vgeo1', 'Wgeo2'], {}), '(Vgeo1, Wgeo2)\n', (11751, 11765), True, 'import tensorflow as tf\n'), ((11789, 11818), 'tensorflow.matmul', 'tf.matmul', (['Vstruct1', 'Wstruct2'], {}), '(Vstruct1, Wstruct2)\n', (11798, 11818), True, 'import tensorflow as tf\n'), ((11840, 11876), 'tensorflow.concat', 'tf.concat', (['[Vgeo2, Vstruct2]'], {'axis': '(1)'}), '([Vgeo2, Vstruct2], axis=1)\n', (11849, 11876), True, 'import tensorflow as tf\n'), ((12025, 12048), 'tensorflow.expand_dims', 'tf.expand_dims', (['Sgeo', '(1)'], {}), '(Sgeo, 1)\n', (12039, 12048), True, 'import tensorflow as tf\n'), ((12070, 12099), 'tensorflow.tile', 'tf.tile', (['Sgeo', '[1, input_dim]'], {}), '(Sgeo, [1, input_dim])\n', (12077, 12099), True, 'import tensorflow as tf\n'), ((12156, 12182), 'tensorflow.expand_dims', 'tf.expand_dims', (['Sstruct', '(1)'], {}), '(Sstruct, 1)\n', (12170, 12182), True, 'import tensorflow as tf\n'), ((12207, 12244), 'tensorflow.tile', 'tf.tile', (['Sgeo', '[1, self.part_num * 8]'], {}), '(Sgeo, [1, self.part_num * 8])\n', (12214, 12244), True, 'import tensorflow as tf\n'), ((12259, 12298), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""Geo_Score"""', 'Sgeo'], {}), "('Geo_Score', Sgeo)\n", (12279, 12298), True, 'import tensorflow as tf\n'), ((12311, 12356), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""Struct_Score"""', 'Sstruct'], {}), "('Struct_Score', Sstruct)\n", (12331, 12356), True, 'import tensorflow as tf\n'), ((12483, 12516), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder_symm"""'], {}), "('encoder_symm')\n", (12500, 12516), True, 'import tensorflow as tf\n'), ((12855, 12893), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h1'], {'keep_prob': 'keep_prob'}), '(h1, keep_prob=keep_prob)\n', (12868, 12893), True, 'import tensorflow as tf\n'), ((13046, 13084), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h2'], {'keep_prob': 'keep_prob'}), '(h2, keep_prob=keep_prob)\n', (13059, 13084), True, 'import tensorflow as tf\n'), ((13236, 13274), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h3'], {'keep_prob': 'keep_prob'}), '(h3, keep_prob=keep_prob)\n', (13249, 13274), True, 'import tensorflow as tf\n'), ((13712, 13745), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_symm"""'], {}), "('decoder_symm')\n", (13729, 13745), True, 'import tensorflow as tf\n'), ((14047, 14085), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h1'], {'keep_prob': 'keep_prob'}), '(h1, keep_prob=keep_prob)\n', (14060, 14085), True, 'import tensorflow as tf\n'), ((14237, 14275), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h2'], {'keep_prob': 'keep_prob'}), '(h2, keep_prob=keep_prob)\n', (14250, 14275), True, 'import tensorflow as tf\n'), ((14428, 14466), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h3'], {'keep_prob': 'keep_prob'}), '(h3, keep_prob=keep_prob)\n', (14441, 14466), True, 'import tensorflow as tf\n'), ((14945, 14968), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (14962, 14968), True, 'import tensorflow as tf\n'), ((15845, 15868), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (15862, 15868), True, 'import tensorflow as tf\n'), ((16548, 16572), 'os.path.isdir', 'os.path.isdir', (['logfolder'], {}), '(logfolder)\n', (16561, 16572), False, 'import os\n'), ((16586, 16605), 'os.mkdir', 'os.mkdir', (['logfolder'], {}), '(logfolder)\n', (16594, 16605), False, 'import os\n'), ((16620, 16645), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (16630, 16645), True, 'import tensorflow as tf\n'), ((16982, 17002), 'six.moves.xrange', 'xrange', (['(0)', 'epoch_num'], {}), '(0, epoch_num)\n', (16988, 17002), False, 'from six.moves import xrange\n'), ((20151, 20163), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (20161, 20163), True, 'import tensorflow as tf\n'), ((21397, 21410), 'numpy.squeeze', 'np.squeeze', (['z'], {}), '(z)\n', (21407, 21410), True, 'import numpy as np\n'), ((21555, 21575), 'h5py.File', 'h5py.File', (['name', '"""w"""'], {}), "(name, 'w')\n", (21564, 21575), False, 'import h5py, time\n'), ((3131, 3275), 'meshvae.meshVAE', 'meshVAE', (['batch_size', 'label_path', 'latent_zdim', 'bound', 'lambda1', 'learning_rate', 'self.e_nb', 'self.edgenum', 'self.degree', 'self.maxdegree'], {'part_no': 'i'}), '(batch_size, label_path, latent_zdim, bound, lambda1, learning_rate,\n self.e_nb, self.edgenum, self.degree, self.maxdegree, part_no=i)\n', (3138, 3275), False, 'from meshvae import meshVAE\n'), ((7723, 7760), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (7745, 7760), True, 'import tensorflow as tf\n'), ((8757, 8795), 'tensorflow.tile', 'tf.tile', (['feature[i]', '[self.batch_size]'], {}), '(feature[i], [self.batch_size])\n', (8764, 8795), True, 'import tensorflow as tf\n'), ((8818, 8856), 'tensorflow.reshape', 'tf.reshape', (['tmp', '[self.batch_size, -1]'], {}), '(tmp, [self.batch_size, -1])\n', (8828, 8856), True, 'import tensorflow as tf\n'), ((8877, 8899), 'tensorflow.concat', 'tf.concat', (['[a, tmp]', '(0)'], {}), '([a, tmp], 0)\n', (8886, 8899), True, 'import tensorflow as tf\n'), ((9494, 9509), 'tensorflow.transpose', 'tf.transpose', (['r'], {}), '(r)\n', (9506, 9509), True, 'import tensorflow as tf\n'), ((9801, 9818), 'tensorflow.shape', 'tf.shape', (['feature'], {}), '(feature)\n', (9809, 9818), True, 'import tensorflow as tf\n'), ((10114, 10127), 'tensorflow.squeeze', 'tf.squeeze', (['f'], {}), '(f)\n', (10124, 10127), True, 'import tensorflow as tf\n'), ((10529, 10550), 'tensorflow.matmul', 'tf.matmul', (['Key', 'Query'], {}), '(Key, Query)\n', (10538, 10550), True, 'import tensorflow as tf\n'), ((10587, 10600), 'tensorflow.exp', 'tf.exp', (['Value'], {}), '(Value)\n', (10593, 10600), True, 'import tensorflow as tf\n'), ((10710, 10739), 'tensorflow.expand_dims', 'tf.expand_dims', (['Score'], {'axis': '(2)'}), '(Score, axis=2)\n', (10724, 10739), True, 'import tensorflow as tf\n'), ((11135, 11176), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (11163, 11176), True, 'import tensorflow as tf\n'), ((11277, 11318), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (11305, 11318), True, 'import tensorflow as tf\n'), ((11395, 11436), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (11423, 11436), True, 'import tensorflow as tf\n'), ((11519, 11560), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (11547, 11560), True, 'import tensorflow as tf\n'), ((11587, 11608), 'tensorflow.shape', 'tf.shape', (['geo_feature'], {}), '(geo_feature)\n', (11595, 11608), True, 'import tensorflow as tf\n'), ((11897, 11910), 'tensorflow.exp', 'tf.exp', (['Value'], {}), '(Value)\n', (11903, 11910), True, 'import tensorflow as tf\n'), ((15014, 15057), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', ([], {'scale': '(1.0)'}), '(scale=1.0)\n', (15046, 15057), True, 'import tensorflow as tf\n'), ((15146, 15189), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev'}), '(stddev=stddev)\n', (15174, 15189), True, 'import tensorflow as tf\n'), ((15331, 15356), 'tensorflow.matmul', 'tf.matmul', (['input_', 'matrix'], {}), '(input_, matrix)\n', (15340, 15356), True, 'import tensorflow as tf\n'), ((15460, 15508), 'utils.batch_norm_wrapper', 'batch_norm_wrapper', (['output'], {'is_training': 'training'}), '(output, is_training=training)\n', (15478, 15508), False, 'from utils import leaky_relu, leaky_relu2, batch_norm_wrapper\n'), ((16881, 16913), 'os.path.isdir', 'os.path.isdir', (["(logfolder + '/tb')"], {}), "(logfolder + '/tb')\n", (16894, 16913), False, 'import os\n'), ((16930, 16957), 'os.mkdir', 'os.mkdir', (["(logfolder + '/tb')"], {}), "(logfolder + '/tb')\n", (16938, 16957), False, 'import os\n'), ((20923, 20957), 'numpy.expand_dims', 'np.expand_dims', (['feed_input_mask', '(1)'], {}), '(feed_input_mask, 1)\n', (20937, 20957), True, 'import numpy as np\n'), ((4143, 4184), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (4171, 4184), True, 'import tensorflow as tf\n'), ((4284, 4325), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (4312, 4325), True, 'import tensorflow as tf\n'), ((6450, 6500), 'tensorflow.pow', 'tf.pow', (['(self.weight_latent_struct - self.decode)', '(2)'], {}), '(self.weight_latent_struct - self.decode, 2)\n', (6456, 6500), True, 'import tensorflow as tf\n'), ((10172, 10188), 'tensorflow.matmul', 'tf.matmul', (['f', 'wk'], {}), '(f, wk)\n', (10181, 10188), True, 'import tensorflow as tf\n'), ((10236, 10252), 'tensorflow.matmul', 'tf.matmul', (['f', 'wk'], {}), '(f, wk)\n', (10245, 10252), True, 'import tensorflow as tf\n'), ((13576, 13598), 'tensorflow.nn.softsign', 'tf.nn.softsign', (['stddev'], {}), '(stddev)\n', (13590, 13598), True, 'import tensorflow as tf\n'), ((15273, 15308), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_start'], {}), '(bias_start)\n', (15296, 15308), True, 'import tensorflow as tf\n'), ((15700, 15714), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['fb'], {}), '(fb)\n', (15710, 15714), True, 'import tensorflow as tf\n'), ((16667, 16700), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (16698, 16700), True, 'import tensorflow as tf\n'), ((18345, 18379), 'numpy.expand_dims', 'np.expand_dims', (['feed_input_mask', '(1)'], {}), '(feed_input_mask, 1)\n', (18359, 18379), True, 'import numpy as np\n'), ((19529, 19550), 'numpy.mod', 'np.mod', (['(epoch + 1)', '(50)'], {}), '(epoch + 1, 50)\n', (19535, 19550), True, 'import numpy as np\n'), ((4538, 4570), 'tensorflow.shape', 'tf.shape', (['self.partVAE[0].z_mean'], {}), '(self.partVAE[0].z_mean)\n', (4546, 4570), True, 'import tensorflow as tf\n'), ((10626, 10639), 'tensorflow.exp', 'tf.exp', (['Value'], {}), '(Value)\n', (10632, 10639), True, 'import tensorflow as tf\n'), ((11936, 11949), 'tensorflow.exp', 'tf.exp', (['Value'], {}), '(Value)\n', (11942, 11949), True, 'import tensorflow as tf\n'), ((17624, 17635), 'time.time', 'time.time', ([], {}), '()\n', (17633, 17635), False, 'import h5py, time\n'), ((9469, 9490), 'tensorflow.transpose', 'tf.transpose', (['feature'], {}), '(feature)\n', (9481, 9490), True, 'import tensorflow as tf\n'), ((6571, 6593), 'tensorflow.square', 'tf.square', (['self.encode'], {}), '(self.encode)\n', (6580, 6593), True, 'import tensorflow as tf\n'), ((6596, 6622), 'tensorflow.square', 'tf.square', (['self.encode_std'], {}), '(self.encode_std)\n', (6605, 6622), True, 'import tensorflow as tf\n'), ((6639, 6665), 'tensorflow.square', 'tf.square', (['self.encode_std'], {}), '(self.encode_std)\n', (6648, 6665), True, 'import tensorflow as tf\n'), ((1962, 1972), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1970, 1972), False, 'import sys\n')] |
#!/usr/bin/env python3
"""
This module runs DaoChen's version Variational-Quantum-Eigensolver on Helium
Example running it partially using CK infrastructure (assuming the current directory is $HOME/CK/ck-rigetti/program/rigetti-vqe) :
time ck virtual `ck search env:* --tags=forestopenfermion` `ck search env:* --tags=pyquil` `ck search env:* --tags=login,rigetti` `ck search env:* --tags=hackathon` --shell_cmd="./rigetti_vqe_helium.py --minimizer_method=my_minimizer --max_func_evaluations=10"
"""
import json
import time
import inspect
import numpy as np
#from scipy import linalg as la
import pyquil.api
from pyquil.quil import Program
from pyquil.paulis import PauliTerm
from pyquil.gates import *
#from forestopenfermion import pyquilpauli_to_qubitop
#from openfermion.transforms import jordan_wigner, get_fermion_operator, get_sparse_operator
from hackathon_utils import cmdline_parse_and_report
# See https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
#
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.bool_):
return bool(obj)
return json.JSONEncoder.default(self, obj)
def daochens_vqe(q_device, ansatz, hamiltonian, start_params, minimizer_function, minimizer_options, sample_number):
def expectation_estimation(ab, report):
"""
instead of using Rigetti's VQE instance as is, we have taken it apart to help us improve it
TODO: change the expectation-estimation algorithm according to our paper arXiv:1802.00171
"""
timestamp_before_ee = time.time()
state_program = ansatz(ab)
expectation = 0.0
report_this_iteration = {
'total_q_seconds_per_c_iteration' : 0.0,
'seconds_per_individual_q_run' : [],
'total_q_shots_per_c_iteration' : 0,
'shots_per_individual_q_run' : []
}
for j, term in enumerate(hamiltonian.terms):
meas_basis_change = Program()
qubits_to_measure = []
if term.id() == "":
meas_outcome = 1.0
else:
for index, gate in term:
# TODO: vary sample_number with term.coefficient to make VQE more efficient
## sample_number = 1000;
qubits_to_measure.append(index)
if gate == 'X':
meas_basis_change.inst(RY(-np.pi/2, index))
elif gate == 'Y':
meas_basis_change.inst(RX(np.pi/2, index))
meas_prog = state_program + meas_basis_change
for qindex in qubits_to_measure:
meas_prog.measure(qindex, qindex)
# Because Rigetti sometimes drops the connection after a few successful runs,
# we try to recover from unsuccessful runs and carry on
#
for attempt in range(1,8):
try:
timestamp_before_qvm = time.time()
result = q_device.run(meas_prog, qubits_to_measure, sample_number)
q_run_seconds = time.time() - timestamp_before_qvm
q_run_shots = sample_number
break
except Exception as e:
print("Caught exception (%s), attempt number %d" % (str(e), attempt))
meas_outcome = np.sum([np.power(-1, np.sum(x)) for x in result])/sample_number
report_this_iteration['total_q_seconds_per_c_iteration'] += q_run_seconds # total_q_time_per_iteration
report_this_iteration['seconds_per_individual_q_run'].append( q_run_seconds ) # q_time_per_iteration
report_this_iteration['total_q_shots_per_c_iteration'] += q_run_shots
report_this_iteration['shots_per_individual_q_run'].append( q_run_shots )
expectation += term.coefficient * meas_outcome
energy = expectation.real
report_this_iteration['energy'] = energy
if report != 'TestMode':
report['iterations'].append( report_this_iteration )
report['total_q_seconds'] += report_this_iteration['total_q_seconds_per_c_iteration'] # total_q_time += total
report['total_q_shots'] += report_this_iteration['total_q_shots_per_c_iteration']
report_this_iteration['total_seconds_per_c_iteration'] = time.time() - timestamp_before_ee
print(report_this_iteration, "\n")
return energy
report = { 'total_q_seconds': 0, 'total_q_shots':0, 'iterations' : [] }
# Initial objective function value
fun_initial = expectation_estimation(start_params, 'TestMode')
print('Initial guess at start_params is: {:.4f}'.format(fun_initial))
timestamp_before_optimizer = time.time()
optimizer_output = minimizer_function(expectation_estimation, start_params, my_args=(report), my_options = minimizer_options)
report['total_seconds'] = time.time() - timestamp_before_optimizer
# Also generate and provide a validated function value at the optimal point
fun_validated = expectation_estimation(optimizer_output['x'], 'TestMode')
print('Validated value at solution is: {:.4f}'.format(fun_validated))
optimizer_output['fun_validated'] = fun_validated
# Exact calculation of the energy using matrix multiplication:
progs, coefs = hamiltonian.get_programs()
expect_coeffs = np.array(qvm.expectation(ansatz(optimizer_output['x']), operator_programs=progs))
optimizer_output['fun_exact']=np.real_if_close(np.dot(coefs, expect_coeffs))
print('Total Q seconds = %f' % report['total_q_seconds'])
print('Total Q shots = %d' % report['total_q_shots'])
print('Total seconds = %f' % report['total_seconds'])
return (optimizer_output, report)
def helium_tiny_ansatz(ab):
"in this trial, we also explicitly supply the UCC ansatz"
a = ab[0];
b = ab[1];
p = Program(
X(0),
X(1),
RX(np.pi/2, 0),
H(1),
CNOT(0, 1),
RZ(a, 1),
CNOT(0, 1),
RX(-np.pi/2, 0),
H(1),
H(0),
RX(np.pi/2, 1),
CNOT(0, 1),
RZ(b, 1),
CNOT(0, 1),
H(0),
RX(-np.pi/2, 1))
return p
if __name__ == '__main__':
start_params, sample_number, q_device_name, minimizer_method, minimizer_options, minimizer_function, visualize_ansatz = cmdline_parse_and_report(
num_params = 2,
q_device_name_default = 'QVM',
q_device_name_help = "Real devices: '8Q-Agave' or '19Q-Acorn'. Either 'QVM' or '' for remote simulator",
minimizer_options_default = '{}'
)
# ---------------------------------------- pyquil-specific init: ----------------------------------------
qvm = pyquil.api.QVMConnection()
if q_device_name == 'QVM':
q_device = qvm
else:
q_device = pyquil.api.QPUConnection( q_device_name )
# input molecule and basis set (this is the only user input necessary to perform VQE
# on the Rigetti quantum computer with a UCC ansatz)
name = 'helium'
basis = 'sto-3g'
# # this input would be then converted to the correct Hamiltonian using the q_chem library
# # developed here at River Lane
# import q_chem
# _, _, hamiltonian, _, _ = q_chem.run_chem(name, basis)
# in this trial, we instead explicitly supply the hamiltonian for "helium, sto-3g"
hamiltonian = \
-1.6678202144537553*PauliTerm('I',0) + \
0.7019459893849936*PauliTerm('Z',0) + \
0.263928235683768058*PauliTerm.from_list([("Z", 0), ("Z", 1)]) + \
0.7019459893849936*PauliTerm('Z',1)
# Transforming the Rigetti-style hamiltonian into numpy-friendly dense form
# to compute the energy classically:
#
#qubitOp = pyquilpauli_to_qubitop(hamiltonian)
#sparse_hamiltonian_jw = get_sparse_operator(qubitOp)
#dense_hamiltonian_jw = sparse_hamiltonian_jw.todense()
#classical_energy = np.amin(la.eigh(dense_hamiltonian_jw)[0])
# Due to difficulty in reliably installing forestopenfermion + openfermion,
# the code above is temporarily commented out.
# The following result has been obtained using the code above:
#
classical_energy = -2.8077839575399746
# ---------------------------------------- run VQE: ----------------------------------------
(vqe_output, report) = daochens_vqe(q_device, helium_tiny_ansatz, hamiltonian, start_params, minimizer_function, minimizer_options, sample_number)
# ---------------------------------------- store the results: ----------------------------------------
minimizer_src = inspect.getsource( minimizer_function )
vqe_input = {
"q_device_name" : q_device_name,
"minimizer_method" : minimizer_method,
"minimizer_options" : minimizer_options,
"sample_number" : sample_number,
"minimizer_src" : minimizer_src,
"classical_energy" : classical_energy,
}
output_dict = { "vqe_input" : vqe_input, "vqe_output" : vqe_output, "report" : report }
formatted_json = json.dumps(output_dict, cls=NumpyEncoder, sort_keys = True, indent = 4)
# print(formatted_json)
with open('rigetti_vqe_report.json', 'w') as json_file:
json_file.write( formatted_json )
| [
"numpy.sum",
"pyquil.paulis.PauliTerm",
"json.dumps",
"time.time",
"inspect.getsource",
"numpy.dot",
"hackathon_utils.cmdline_parse_and_report",
"pyquil.quil.Program",
"pyquil.paulis.PauliTerm.from_list",
"json.JSONEncoder.default"
] | [((5121, 5132), 'time.time', 'time.time', ([], {}), '()\n', (5130, 5132), False, 'import time\n'), ((6682, 6898), 'hackathon_utils.cmdline_parse_and_report', 'cmdline_parse_and_report', ([], {'num_params': '(2)', 'q_device_name_default': '"""QVM"""', 'q_device_name_help': '"""Real devices: \'8Q-Agave\' or \'19Q-Acorn\'. Either \'QVM\' or \'\' for remote simulator"""', 'minimizer_options_default': '"""{}"""'}), '(num_params=2, q_device_name_default=\'QVM\',\n q_device_name_help=\n "Real devices: \'8Q-Agave\' or \'19Q-Acorn\'. Either \'QVM\' or \'\' for remote simulator"\n , minimizer_options_default=\'{}\')\n', (6706, 6898), False, 'from hackathon_utils import cmdline_parse_and_report\n'), ((9002, 9039), 'inspect.getsource', 'inspect.getsource', (['minimizer_function'], {}), '(minimizer_function)\n', (9019, 9039), False, 'import inspect\n'), ((9466, 9533), 'json.dumps', 'json.dumps', (['output_dict'], {'cls': 'NumpyEncoder', 'sort_keys': '(True)', 'indent': '(4)'}), '(output_dict, cls=NumpyEncoder, sort_keys=True, indent=4)\n', (9476, 9533), False, 'import json\n'), ((1228, 1263), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (1252, 1263), False, 'import json\n'), ((1698, 1709), 'time.time', 'time.time', ([], {}), '()\n', (1707, 1709), False, 'import time\n'), ((5293, 5304), 'time.time', 'time.time', ([], {}), '()\n', (5302, 5304), False, 'import time\n'), ((5893, 5921), 'numpy.dot', 'np.dot', (['coefs', 'expect_coeffs'], {}), '(coefs, expect_coeffs)\n', (5899, 5921), True, 'import numpy as np\n'), ((2107, 2116), 'pyquil.quil.Program', 'Program', ([], {}), '()\n', (2114, 2116), False, 'from pyquil.quil import Program\n'), ((4727, 4738), 'time.time', 'time.time', ([], {}), '()\n', (4736, 4738), False, 'import time\n'), ((7958, 7975), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', '(1)'], {}), "('Z', 1)\n", (7967, 7975), False, 'from pyquil.paulis import PauliTerm\n'), ((7885, 7926), 'pyquil.paulis.PauliTerm.from_list', 'PauliTerm.from_list', (["[('Z', 0), ('Z', 1)]"], {}), "([('Z', 0), ('Z', 1)])\n", (7904, 7926), False, 'from pyquil.paulis import PauliTerm\n'), ((7787, 7804), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""I"""', '(0)'], {}), "('I', 0)\n", (7796, 7804), False, 'from pyquil.paulis import PauliTerm\n'), ((7835, 7852), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', '(0)'], {}), "('Z', 0)\n", (7844, 7852), False, 'from pyquil.paulis import PauliTerm\n'), ((3249, 3260), 'time.time', 'time.time', ([], {}), '()\n', (3258, 3260), False, 'import time\n'), ((3400, 3411), 'time.time', 'time.time', ([], {}), '()\n', (3409, 3411), False, 'import time\n'), ((3729, 3738), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (3735, 3738), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from .. import utils
class Dummy(unittest.TestCase):
def test_log_zgrid(self):
value = np.array([0.1, 0.21568801, 0.34354303, 0.48484469, 0.64100717, 0.8135934])
np.testing.assert_allclose(utils.log_zgrid([0.1, 1], 0.1), value, rtol=1e-06, atol=0, equal_nan=False, err_msg='', verbose=True)
| [
"numpy.array"
] | [((137, 211), 'numpy.array', 'np.array', (['[0.1, 0.21568801, 0.34354303, 0.48484469, 0.64100717, 0.8135934]'], {}), '([0.1, 0.21568801, 0.34354303, 0.48484469, 0.64100717, 0.8135934])\n', (145, 211), True, 'import numpy as np\n')] |
import numpy as np
from time import time
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn. ensemble import AdaBoostClassifier
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
###############################################################################
# define the KNN algorithm call:
k=[3,5,7,9]
def knn(x_train,y_train,x_test,y_test,k):
acc_list=[]
for element in k:
knn = KNeighborsClassifier(n_neighbors=element)
knn_pred = knn.fit(x_train,y_train)
acc=knn.score(x_test,y_test)
acc_list.append(acc)
return acc_list
# #############################################################################
# Define the Adaboost classifier call fot the data sets need to be tested
# return the acc value with training and testing time
Ada_clf = AdaBoostClassifier(DecisionTreeClassifier(),n_estimators = 15, learning_rate = 1)
def ada_boost(x_train,y_train,x_test,y_test):
t0 = time()
#Ada_clf = AdaBoostClassifier(DecisionTreeClassifier(),n_estimators = 5, learning_rate = 1)
Ada_clf.fit(x_train, y_train)
t1 = time()
Ada_prediction = Ada_clf.predict(x_test)
tn, fp, fn, tp = confusion_matrix(y_test, Ada_prediction).ravel()
t2 = time()
trainT = t1-t0
testT = t2-t1
return tn, fp, fn, tp , trainT, testT
# #############################################################################
# Define the svm classifier call for the data sets
# return the acc value with training and testing time
# Compute a PCA (eigenvalues) on the dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
def svm_call (x_train,y_train,x_test,y_test,n_components):
t0 = time()
pca = PCA(n_components=n_components, svd_solver='randomized',whiten=True).fit(x_train)
x_train_pca = pca.transform(x_train)
X_test_pca = pca.transform(x_test)
svc_clf = SVC(C=1000, class_weight='balanced', gamma=0.05)
svc_clf = svc_clf.fit(x_train_pca, y_train)
t1 = time()
svc_pred = svc_clf.predict(X_test_pca)
tn, fp, fn, tp = confusion_matrix(y_test,svc_pred).ravel()
t2 = time()
trainT = t1-t0
testT = t2-t1
return tn, fp, fn, tp , trainT, testT
###############################################################################
k=[3,5,7,9]
def knn(x_train,y_train,x_test,y_test,k):
tf_list=[]
for element in k:
t0=time()
task=[]
knn = KNeighborsClassifier(n_neighbors=element)
knn = knn.fit(x_train,y_train)
predit = knn.predict(x_test)
tn, fp, fn, tp = confusion_matrix(y_test, predit).ravel()
t1=time()
print("KNN k=%d \t %d \t %d \t %d \t %d \t %.2f%% \t Total: %.2fms" %(element,tn, fp, fn, tp, ((tn+tp)/(tn+fp+fn+tp)*100), (t1-t0)*1000 ))
# print("KNN: k=%d: \t tn:%d fp:%d fn:%d tp:%d \t Accuracy: %.2f%% \t Total time:%.2fms" %(element,tn, fp, fn, tp, ((tn+tp)/(tn+fp+fn+tp)*100), (t1-t0)*1000 ))
#def knn(x_train,y_train,x_test,y_test,k):
# acc_list=[]
# for element in k:
# knn = KNeighborsClassifier(n_neighbors=element)
# knn = knn.fit(x_train,y_train)
# knn_pred = knn.predict(x_test)
# tn, fp, fn, tp = confusion_matrix(y_test,knn_pred).ravel()
# return acc_list
###############################################################################
## file location is subject to change
data_set_1 = pd.read_csv(r'car_2class.data',header=0)
# data_set_2 = np.loadtxt('/home/aaron/Desktop/page-blocks.txt') # For Aaron
data_set_2 = np.loadtxt('page-blocks.txt') # For Jin
#############################################################################################
# first data set header info (reference)
# buying,maint,door,persons,lug_boot,safety,target_label
# convert all the non-int value to numeric value using sklearn preprocessing labelencoder
le = preprocessing.LabelEncoder()
buying = le.fit_transform(list(data_set_1["buying"]))
maint = le.fit_transform(list(data_set_1["maint"]))
door = le.fit_transform(list(data_set_1["door"]))
persons = le.fit_transform(list(data_set_1["persons"]))
lug_boot = le.fit_transform(list(data_set_1["lug_boot"]))
safety = le.fit_transform(list(data_set_1["safety"]))
target_label = list(data_set_1["target_label"])
# zip all the transformed data into numpy array
label_1 = np.array(target_label)
attributes_1= np.array(list(zip(buying,maint,door,persons,lug_boot,safety)))
# second data set
label_2 = data_set_2[:,-1]
attributes_2= data_set_2[:,:-1]
#############################################################################################
#define the value of K for KNN algorithm
print("===========Data Set 1===========")
# cross validation using k fold from sklearn
for i in range (2,7):
kf = KFold(n_splits=i,shuffle=True)
counter =0
# ada_train_t_avg = 0
# ada_total_t_avg = 0
# svc_train_t_avg = 0
# svc_total_t_avg = 0
# knn_total_t_avg = 0
print("\n\n\n=====Data Set 1 with the number of spilt is %d======" %i)
for train_index, test_index in kf.split(attributes_1):
t0 = time()
x_1_train, x_1_test = attributes_1[train_index], attributes_1[test_index]
y_1_train, y_1_test = label_1[train_index], label_1[test_index]
t1 = time()
counter=counter+1
ada_boost_1 = ada_boost(x_1_train,y_1_train,x_1_test,y_1_test)
n_components = attributes_1.shape[1]
svm_1 = svm_call(x_1_train,y_1_train,x_1_test,y_1_test,n_components)
t2 = time()
print ("\n===Split: %d, Fold: %d ===" % (i, counter))
print ("[Algorithm] \t [TN] \t [FP] \t [FN] \t [TP] \t [Accuracy] \t -----------------[Time]----------------")
print ("Adaboost \t %d \t %d \t %d \t %d \t %.2f%% \t Training: %.2fms \t Testing: %.2fms" %(ada_boost_1[0],ada_boost_1[1],ada_boost_1[2],ada_boost_1[3],((ada_boost_1[0]+ada_boost_1[2])/(ada_boost_1[0]+ada_boost_1[1]+ada_boost_1[2]+ada_boost_1[3])*100), (ada_boost_1[4]*1000),(ada_boost_1[5]*1000)))
print ("SVM \t \t %d \t %d \t %d \t %d \t %.2f%% \t Training: %.2fms \t Testing: %.2fms" %(svm_1[0],svm_1[1],svm_1[2],svm_1[3],((svm_1[0]+svm_1[3])/(svm_1[0]+svm_1[1]+svm_1[2]+svm_1[3])*100), (svm_1[4]*1000),(svm_1[5]*1000)))
# print ("Adaboost: \t tn:%d fp:%d fn:%d tp:%d \t Accuracy: %.2f%% \t Training time: %.2fms \t Testing time: %.2fms" %(ada_boost_1[0],ada_boost_1[1],ada_boost_1[2],ada_boost_1[3],((ada_boost_1[0]+ada_boost_1[2])/(ada_boost_1[0]+ada_boost_1[1]+ada_boost_1[2]+ada_boost_1[3])*100), (ada_boost_1[4]*1000),(ada_boost_1[5]*1000)))
# print ("SVM: \t\t tn:%d fp:%d fn:%d tp:%d \t Accuracy: %.2f%% \t Training time: %.2fms \t Testing time: %.2fms" %(svm_1[0],svm_1[1],svm_1[2],svm_1[3],((svm_1[0]+svm_1[3])/(svm_1[0]+svm_1[1]+svm_1[2]+svm_1[3])*100), (svm_1[4]*1000),(svm_1[5]*1000)))
# print ("KNN:")
t3 = time()
knn_1 = knn(x_1_train,y_1_train,x_1_test,y_1_test,k)
t4 = time()
# print("\nThe KNN total time is %.2f ms\n\n" % (t4-t3))
# ada_train_t_avg = ada_train_t_avg + ada_boost_1[4]
# ada_total_t_avg = ada_total_t_avg + ada_boost_1[5]+t1-t0
# svc_train_t_avg = svc_train_t_avg + svm_1[4]
# svc_total_t_avg = svc_total_t_avg + svm_1[5] +t1-t0
# knn_total_t_avg = knn_total_t_avg + t4-t3 +t1-t0
# ada_train_t_avg = ada_train_t_avg / counter
# ada_total_t_avg = ada_total_t_avg / counter
# svc_train_t_avg = svc_train_t_avg / counter
# svc_total_t_avg = svc_total_t_avg / counter
# knn_total_t_avg = knn_total_t_avg / counter
# print("\nThe Adaboost training time on data set 1 is %.2f ms" % (ada_train_t_avg*1000))
# print("The total computation time of Adaboost classifier on data set 1 is %.2f ms" % (ada_total_t_avg*1000))
# print("The svm training time on data set 1 is %.2f ms" % (svc_train_t_avg*1000))
# print("The total computation time of svm classifier on data set 1 is %.2f ms" % (svc_total_t_avg*1000))
# print("The KNN classifier total computation time on data set 1 is %.2f ms" % (knn_total_t_avg*1000))
print("\n\n\n\n===========Data Set 2===========")
# cross validation using k fold from sklearn
for i in range (2,7):
kf = KFold(n_splits=i,shuffle= True)
counter =0
ada_train_t_avg = 0
ada_total_t_avg = 0
svc_train_t_avg = 0
svc_total_t_avg = 0
knn_total_t_avg = 0
print("\n\n\n=====Data Set 2 with the number of spilt is %d======" %i)
for train_index, test_index in kf.split(attributes_2):
t0 = time()
x_2_train, x_2_test = attributes_2[train_index], attributes_2[test_index]
y_2_train, y_2_test = label_2[train_index], label_2[test_index]
t1 = time()
counter=counter+1
ada_boost_2 = ada_boost(x_2_train,y_2_train,x_2_test,y_2_test)
# #############################################################################
# Compute a PCA (eigenvalues) on the dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = attributes_2.shape[1]
svm_2 = svm_call(x_2_train,y_2_train,x_2_test,y_2_test,n_components)
t2 = time()
print ("\n===Split: %d, Fold: %d ===" % (i, counter))
print ("[Algorithm] \t [TN] \t [FP] \t [FN] \t [TP] \t [Accuracy] \t -----[Time]-----")
print ("Adaboost \t %d \t %d \t %d \t %d \t %.2f%% \t Training: %.2fms \t Testing: %.2fms" %(ada_boost_2[0],ada_boost_2[1],ada_boost_2[2],ada_boost_2[3],((ada_boost_2[0]+ada_boost_2[2])/(ada_boost_2[0]+ada_boost_2[1]+ada_boost_2[2]+ada_boost_2[3])*100), (ada_boost_2[4]*1000),(ada_boost_2[5]*1000)))
print ("SVM \t \t %d \t %d \t %d \t %d \t %.2f%% \t Training: %.2fms \t Testing: %.2fms" %(svm_2[0],svm_2[1],svm_2[2],svm_2[3],((svm_2[0]+svm_2[3])/(svm_2[0]+svm_2[1]+svm_2[2]+svm_2[3])*100), (svm_2[4]*1000),(svm_2[5]*1000)))
t3 = time()
knn_2 = knn(x_2_train,y_2_train,x_2_test,y_2_test,k)
t4 = time()
# print("\nThe Adaboost training time is %.2f ms" % (ada_boost_2[4]*1000))
# print("The Adaboost testing time is %.2f ms" % (ada_boost_2[5]*1000))
# print("\nThe svm training time is %.2f ms" % (svm_2[4]*1000))
# print("The svm testing time is %.2f ms\n\n" % (svm_2[5]*1000))
# print("\nThe KNN total time is %.2f ms\n\n" % (t4-t3))
# ada_train_t_avg = ada_train_t_avg + ada_boost_2[4]+t1-t0
# ada_total_t_avg = ada_total_t_avg + ada_boost_2[5]+t1-t0
# svc_train_t_avg = svc_train_t_avg + svm_2[4]
# svc_total_t_avg = svc_total_t_avg + svm_2[5] +t1-t0
# knn_total_t_avg = knn_total_t_avg + t4-t3 +t1-t0
# ada_train_t_avg = ada_train_t_avg / counter
# ada_total_t_avg = ada_total_t_avg / counter
# svc_train_t_avg = svc_train_t_avg / counter
# svc_total_t_avg = svc_total_t_avg / counter
# knn_total_t_avg = knn_total_t_avg / counter
# print("\nThe Adaboost training time on data set 2 is %.2f ms" % (ada_train_t_avg*1000))
# print("The total computation time on data set 2 is %.2f ms" % (ada_total_t_avg*1000))
# print("The svm training time on data set 2 is %.2f ms" % (svc_train_t_avg*1000))
# print("The total computation time on data set 2 is %.2f ms" % (svc_total_t_avg*1000))
# print("The KNN classifier total computation time on data set 2 is %.2f ms" % (knn_total_t_avg*1000))
| [
"pandas.read_csv",
"sklearn.preprocessing.LabelEncoder",
"sklearn.tree.DecisionTreeClassifier",
"time.time",
"sklearn.model_selection.KFold",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.array",
"numpy.loadtxt",
"sklearn.decomposition.PCA",
"sklearn.svm.SVC",
"sklearn.metrics.confusion_matri... | [((3616, 3656), 'pandas.read_csv', 'pd.read_csv', (['"""car_2class.data"""'], {'header': '(0)'}), "('car_2class.data', header=0)\n", (3627, 3656), True, 'import pandas as pd\n'), ((3753, 3782), 'numpy.loadtxt', 'np.loadtxt', (['"""page-blocks.txt"""'], {}), "('page-blocks.txt')\n", (3763, 3782), True, 'import numpy as np\n'), ((4081, 4109), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (4107, 4109), False, 'from sklearn import preprocessing\n'), ((4546, 4568), 'numpy.array', 'np.array', (['target_label'], {}), '(target_label)\n', (4554, 4568), True, 'import numpy as np\n'), ((1033, 1057), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1055, 1057), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1151, 1157), 'time.time', 'time', ([], {}), '()\n', (1155, 1157), False, 'from time import time\n'), ((1297, 1303), 'time.time', 'time', ([], {}), '()\n', (1301, 1303), False, 'from time import time\n'), ((1429, 1435), 'time.time', 'time', ([], {}), '()\n', (1433, 1435), False, 'from time import time\n'), ((1916, 1922), 'time.time', 'time', ([], {}), '()\n', (1920, 1922), False, 'from time import time\n'), ((2108, 2156), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1000)', 'class_weight': '"""balanced"""', 'gamma': '(0.05)'}), "(C=1000, class_weight='balanced', gamma=0.05)\n", (2111, 2156), False, 'from sklearn.svm import SVC\n'), ((2214, 2220), 'time.time', 'time', ([], {}), '()\n', (2218, 2220), False, 'from time import time\n'), ((2337, 2343), 'time.time', 'time', ([], {}), '()\n', (2341, 2343), False, 'from time import time\n'), ((4990, 5021), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'i', 'shuffle': '(True)'}), '(n_splits=i, shuffle=True)\n', (4995, 5021), False, 'from sklearn.model_selection import KFold\n'), ((8435, 8466), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'i', 'shuffle': '(True)'}), '(n_splits=i, shuffle=True)\n', (8440, 8466), False, 'from sklearn.model_selection import KFold\n'), ((619, 660), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'element'}), '(n_neighbors=element)\n', (639, 660), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2614, 2620), 'time.time', 'time', ([], {}), '()\n', (2618, 2620), False, 'from time import time\n'), ((2651, 2692), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'element'}), '(n_neighbors=element)\n', (2671, 2692), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2846, 2852), 'time.time', 'time', ([], {}), '()\n', (2850, 2852), False, 'from time import time\n'), ((5313, 5319), 'time.time', 'time', ([], {}), '()\n', (5317, 5319), False, 'from time import time\n'), ((5487, 5493), 'time.time', 'time', ([], {}), '()\n', (5491, 5493), False, 'from time import time\n'), ((5726, 5732), 'time.time', 'time', ([], {}), '()\n', (5730, 5732), False, 'from time import time\n'), ((7087, 7093), 'time.time', 'time', ([], {}), '()\n', (7091, 7093), False, 'from time import time\n'), ((7168, 7174), 'time.time', 'time', ([], {}), '()\n', (7172, 7174), False, 'from time import time\n'), ((8749, 8755), 'time.time', 'time', ([], {}), '()\n', (8753, 8755), False, 'from time import time\n'), ((8923, 8929), 'time.time', 'time', ([], {}), '()\n', (8927, 8929), False, 'from time import time\n'), ((9380, 9386), 'time.time', 'time', ([], {}), '()\n', (9384, 9386), False, 'from time import time\n'), ((10100, 10106), 'time.time', 'time', ([], {}), '()\n', (10104, 10106), False, 'from time import time\n'), ((10181, 10187), 'time.time', 'time', ([], {}), '()\n', (10185, 10187), False, 'from time import time\n'), ((1371, 1411), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'Ada_prediction'], {}), '(y_test, Ada_prediction)\n', (1387, 1411), False, 'from sklearn.metrics import confusion_matrix\n'), ((1933, 2001), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components', 'svd_solver': '"""randomized"""', 'whiten': '(True)'}), "(n_components=n_components, svd_solver='randomized', whiten=True)\n", (1936, 2001), False, 'from sklearn.decomposition import PCA\n'), ((2286, 2320), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'svc_pred'], {}), '(y_test, svc_pred)\n', (2302, 2320), False, 'from sklearn.metrics import confusion_matrix\n'), ((2794, 2826), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'predit'], {}), '(y_test, predit)\n', (2810, 2826), False, 'from sklearn.metrics import confusion_matrix\n')] |
from flask import Flask, render_template, request
import pickle
import pandas as pd
import numpy as np
from woe_enc import WoeEncoder
import __main__
__main__.WoeEncoder = WoeEncoder
app = Flask(__name__)
# Loading the necessary pickle files
with open('ohe_transformer.pkl', 'rb') as file:
ohe_transformer = pickle.load(file)
with open('ohe_cv.pkl', 'rb') as file:
cv_ohe = pickle.load(file)
with open('kbd_dict.pkl', 'rb') as file:
kbd_dict = pickle.load(file)
with open('iter_map_dict.pkl', 'rb') as file:
iter_map_dict = pickle.load(file)
with open('woe_cv.pkl', 'rb') as file:
cv_woe = pickle.load(file)
with open('dt_cv.pkl', 'rb') as file:
cv_dt = pickle.load(file)
# Specifying GET method
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
# Specifying POST method
@app.route("/predict", methods=['POST'])
def predict():
if request.method == 'POST':
# Getting input from a user
status = int(str(request.form['status']).split(':')[0].strip())
duration = int(request.form['duration'])
credit_history = int(str(request.form['credit_history']).split(':')[0].strip()) - 1
purpose = int(str(request.form['purpose']).split(':')[0].strip()) - 1
amount = float(request.form['amount'])
savings = int(str(request.form['savings']).split(':')[0].strip())
employment_duration = int(str(request.form['employment_duration']).split(':')[0].strip())
dti = int(str(request.form['dti']).split(':')[0].strip())
status_sex = int(str(request.form['status_sex']).split(':')[0].strip())
other_debtors = int(str(request.form['other_debtors']).split(':')[0].strip())
present_residence = int(str(request.form['present_residence']).split(':')[0].strip())
mv_property = int(str(request.form['mv_property']).split(':')[0].strip())
age = int(request.form['age'])
other_installment_plans = int(str(request.form['other_installment_plans']).split(':')[0].strip())
housing = int(str(request.form['housing']).split(':')[0].strip())
number_credits = int(str(request.form['number_credits']).split(':')[0].strip())
job = int(str(request.form['job']).split(':')[0].strip())
people_liable = int(str(request.form['people_liable']).split(':')[0].strip())
telephone = int(str(request.form['telephone']).split(':')[0].strip())
foreign_worker = int(str(request.form['foreign_worker']).split(':')[0].strip())
# Let's create a function which will allow to assign a datapoint
# to one of the clusters derived during the analysis.
def get_hcluster(x):
# The hard coded values below come from the SouthGermanCreditViz.ipynb.
# The values represent medians for age, amount, and duration
# respectively in each cluster.
h1 = np.array([36, 4736, 30])
h2 = np.array([49, 1264, 12])
h3 = np.array([29, 1599.5, 12])
dh1 = np.linalg.norm(x-h1)
dh2 = np.linalg.norm(x-h2)
dh3 = np.linalg.norm(x-h3)
dh_list = [dh1, dh2, dh3]
min_index = dh_list.index(np.min(dh_list))
cluster_index = min_index + 1
return cluster_index
x = np.array([age, amount, duration])
hclusters = get_hcluster(x)
dage = (100 - age) / (12 * duration)
# Let's create a dictionary of lmbda values (Box-Cox transformation lmbda in scipy)
# for each numeric feature. The values come from the SouthGermanCreditViz.ipynb.
lmbda_dict = {'age': -0.6524316739182968,
'amount': -0.0639326907261038,
'duration': 0.09297575561665981,
'dage': -0.025199226092440907}
# Now let's apply Box-Cox transformation to numeric features.
age = (age**lmbda_dict['age'] - 1) / lmbda_dict['age']
dage = (dage**lmbda_dict['dage'] - 1) / lmbda_dict['dage']
amount = (amount**lmbda_dict['amount'] - 1) / lmbda_dict['amount']
duration = (duration**lmbda_dict['duration'] - 1) / lmbda_dict['duration']
# Constructing features for the first model
data = pd.DataFrame([job, employment_duration, number_credits,
other_debtors, status_sex, foreign_worker,
status, credit_history, people_liable, dti,
savings, telephone, mv_property, purpose,
other_installment_plans, housing,
present_residence, hclusters]).T
cat_col_list = ['job', 'employment_duration', 'number_credits',
'other_debtors', 'status_sex', 'foreign_worker',
'status', 'credit_history', 'people_liable', 'dti',
'savings', 'telephone', 'property', 'purpose',
'other_installment_plans', 'housing',
'present_residence', 'hclusters']
data.columns = cat_col_list
num_data = pd.DataFrame([age, amount, duration, dage]).T
num_col_list = ['age', 'amount', 'duration', 'dage']
num_data.columns = num_col_list
# Applying onehot-encoding to the categorical features
data_ohe = pd.DataFrame(ohe_transformer.transform(data[cat_col_list]))
data_ohe.columns = list(ohe_transformer.get_feature_names_out(cat_col_list))
data_ohe = pd.concat([num_data, data_ohe], axis=1)
# Getting the output of the first model
lr_ohe_ppred_list = []
for i_model in cv_ohe['estimator']:
lr_ohe_ppred = i_model.predict_proba(data_ohe)[:, 1]
lr_ohe_ppred_list.append(lr_ohe_ppred.reshape(-1, 1))
ppred_ohe = np.hstack(lr_ohe_ppred_list).mean(axis=1)
# Constructing features for the second model
data = pd.concat([num_data, data], axis=1)
for col in num_col_list:
data[f'{col}_bin'] = kbd_dict[col].transform(data[col].to_frame())
cat_col_list.append(f'{col}_bin')
iter_list = [['status', 'credit_history'],
['credit_history', 'savings'],
['credit_history', 'duration_bin'],
['savings', 'other_debtors'],
['savings', 'amount_bin'],
['duration_bin', 'other_debtors'],
['duration_bin', 'savings'],
['status', 'age_bin'],
['duration_bin', 'amount_bin'],
['duration_bin', 'age_bin'],
['age_bin', 'other_installment_plans'],
['age_bin', 'dage_bin']]
for i_list in iter_list:
temp_iter_feat_name = '_'.join(i_list) + '_iter'
data[temp_iter_feat_name] = data[i_list[0]].astype(str) + \
'_' + data[i_list[1]].astype(str)
data[temp_iter_feat_name] = data[temp_iter_feat_name].\
map(iter_map_dict[iter_list.index(i_list)])
cat_col_list.append(temp_iter_feat_name)
# Getting the result of the second model
lr_woe_ppred_list = []
for i_model in cv_woe['estimator']:
lr_woe_ppred = i_model.predict_proba(data[cat_col_list])[:, 1]
lr_woe_ppred_list.append(lr_woe_ppred.reshape(-1, 1))
ppred_woe = np.hstack(lr_woe_ppred_list).mean(axis=1)
# Constructing features for the third model
dt_feat_list = num_col_list.copy()
dt_feat_list.extend(cat_col_list)
# Getting the output of the third model
dt_ppred_list = []
for i_model in cv_dt['estimator']:
dt_ppred = i_model.predict_proba(data[dt_feat_list])[:, 1]
dt_ppred_list.append(dt_ppred.reshape(-1, 1))
ppred_dt = np.hstack(dt_ppred_list).mean(axis=1)
# Getting the result of the ensemble model
ppred = (ppred_woe + ppred_ohe + ppred_dt) / 3
ppred = np.round(ppred[0], 4)
# Showing the output of the ensemble to the user
if ppred > 0.27562525642456126:
decision_text = f'Refuse: probability of default is {100*ppred:.2f} %.'
else:
decision_text = f'Proceed: probability of default is {100*ppred:.2f} %.'
return render_template('index.html', prediction_text=decision_text)
else:
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True) | [
"pandas.DataFrame",
"flask.Flask",
"numpy.hstack",
"numpy.min",
"pickle.load",
"numpy.array",
"numpy.linalg.norm",
"flask.render_template",
"numpy.round",
"pandas.concat"
] | [((192, 207), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (197, 207), False, 'from flask import Flask, render_template, request\n'), ((316, 333), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (327, 333), False, 'import pickle\n'), ((387, 404), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (398, 404), False, 'import pickle\n'), ((462, 479), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (473, 479), False, 'import pickle\n'), ((547, 564), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (558, 564), False, 'import pickle\n'), ((618, 635), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (629, 635), False, 'import pickle\n'), ((687, 704), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (698, 704), False, 'import pickle\n'), ((785, 814), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (800, 814), False, 'from flask import Flask, render_template, request\n'), ((3303, 3336), 'numpy.array', 'np.array', (['[age, amount, duration]'], {}), '([age, amount, duration])\n', (3311, 3336), True, 'import numpy as np\n'), ((5508, 5547), 'pandas.concat', 'pd.concat', (['[num_data, data_ohe]'], {'axis': '(1)'}), '([num_data, data_ohe], axis=1)\n', (5517, 5547), True, 'import pandas as pd\n'), ((5935, 5970), 'pandas.concat', 'pd.concat', (['[num_data, data]'], {'axis': '(1)'}), '([num_data, data], axis=1)\n', (5944, 5970), True, 'import pandas as pd\n'), ((8092, 8113), 'numpy.round', 'np.round', (['ppred[0]', '(4)'], {}), '(ppred[0], 4)\n', (8100, 8113), True, 'import numpy as np\n'), ((8419, 8479), 'flask.render_template', 'render_template', (['"""index.html"""'], {'prediction_text': 'decision_text'}), "('index.html', prediction_text=decision_text)\n", (8434, 8479), False, 'from flask import Flask, render_template, request\n'), ((8505, 8534), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (8520, 8534), False, 'from flask import Flask, render_template, request\n'), ((2894, 2918), 'numpy.array', 'np.array', (['[36, 4736, 30]'], {}), '([36, 4736, 30])\n', (2902, 2918), True, 'import numpy as np\n'), ((2936, 2960), 'numpy.array', 'np.array', (['[49, 1264, 12]'], {}), '([49, 1264, 12])\n', (2944, 2960), True, 'import numpy as np\n'), ((2978, 3004), 'numpy.array', 'np.array', (['[29, 1599.5, 12]'], {}), '([29, 1599.5, 12])\n', (2986, 3004), True, 'import numpy as np\n'), ((3023, 3045), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - h1)'], {}), '(x - h1)\n', (3037, 3045), True, 'import numpy as np\n'), ((3062, 3084), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - h2)'], {}), '(x - h2)\n', (3076, 3084), True, 'import numpy as np\n'), ((3101, 3123), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - h3)'], {}), '(x - h3)\n', (3115, 3123), True, 'import numpy as np\n'), ((4239, 4500), 'pandas.DataFrame', 'pd.DataFrame', (['[job, employment_duration, number_credits, other_debtors, status_sex,\n foreign_worker, status, credit_history, people_liable, dti, savings,\n telephone, mv_property, purpose, other_installment_plans, housing,\n present_residence, hclusters]'], {}), '([job, employment_duration, number_credits, other_debtors,\n status_sex, foreign_worker, status, credit_history, people_liable, dti,\n savings, telephone, mv_property, purpose, other_installment_plans,\n housing, present_residence, hclusters])\n', (4251, 4500), True, 'import pandas as pd\n'), ((5114, 5157), 'pandas.DataFrame', 'pd.DataFrame', (['[age, amount, duration, dage]'], {}), '([age, amount, duration, dage])\n', (5126, 5157), True, 'import pandas as pd\n'), ((3198, 3213), 'numpy.min', 'np.min', (['dh_list'], {}), '(dh_list)\n', (3204, 3213), True, 'import numpy as np\n'), ((5824, 5852), 'numpy.hstack', 'np.hstack', (['lr_ohe_ppred_list'], {}), '(lr_ohe_ppred_list)\n', (5833, 5852), True, 'import numpy as np\n'), ((7483, 7511), 'numpy.hstack', 'np.hstack', (['lr_woe_ppred_list'], {}), '(lr_woe_ppred_list)\n', (7492, 7511), True, 'import numpy as np\n'), ((7931, 7955), 'numpy.hstack', 'np.hstack', (['dt_ppred_list'], {}), '(dt_ppred_list)\n', (7940, 7955), True, 'import numpy as np\n')] |
#simsets.py
# need a set of sets, to find an ordering over all.
# related to contigs, but with repeated sets.
import numpy as np
import string
import copy
# function to flatten lists
def f(E):
if E==[]:
return []
elif type(E) != list:
return [E]
else:
a = f(E[0])
b = f(E[1:])
a.extend(b)
return a
def simsetdata(ngenes, mean_sample_size, sd_sample_size):
# given a number of genes, generate gene sets, and then thin them out
# for a smaller number of sets that still contains all genes
# first we define the set of genes
genes = [''.join([string.ascii_lowercase[i] for i in np.random.randint(low=0, high=26, size=5)]) for j in range(0,ngenes)]
# then we define a number of sets that contain all genes
idx = 0
sampsize = round(abs(np.random.normal(mean_sample_size,sd_sample_size)))
set1 = [genes[i] for i in np.random.randint(low=0, high=len(genes), size=sampsize)] # pick some random genes
sets =[(set1)]
while(sum([genes[i] in f(sets) for i in range(0,len(genes))]) < len(genes)): # then while we haven't picked all the genes yet.
sampsize = round(abs(np.random.normal(mean_sample_size,sd_sample_size)))
set1 = [genes[i] for i in np.random.randint(low=0, high=len(genes), size=sampsize)]
sets.append(set1)
idx += 1
# after doing all the set creation...
# could do some thinning .. drop sets if coverage over genes doesn't drop
#
# would be better to have sets that do not overlap as much.
# don't worry so much about the smallest number of sets.
#
settry = copy.deepcopy(sets)
for si in sets:
settry.remove(si)
# if all genes still represented, then stay dropped
if sum([genes[i] in f(settry) for i in range(0,len(genes))]) == len(genes):
print("good drop point, still got: " + str(sum([genes[i] in f(settry) for i in range(0,len(genes))])) + " genes")
else:
settry.append(si) # OK, put it back.
setnames = [''.join([string.ascii_lowercase[i] for i in np.random.randint(low=0, high=26, size=5)]) for j in
range(0, len(settry))]
return((genes, settry, setnames))
# Next we produce a matrix connecting sets.
def setoverlap(sets):
mat = [] #np.zeros( (len(sets) * len(sets), 3) )
idx = 0
for i in range(0,len(sets)):
for j in range(i,len(sets)):
intsize = float(len(set(sets[i]).intersection(set(sets[j]))))
unisize = float(len(set(sets[i]).union(set(sets[j]))))
jaccard = intsize/unisize
if i != j:
mat.append([i,j,jaccard])
print(str(i) + " " + str(j))
idx += 1
mat.sort(key=lambda x: x[2])
mat.reverse()
return(mat)
# join sets
def setjoin (a,b):
# returning [----a--][a&b][---b---]
sa = set(a)
sb = set(b)
i = sa.intersection(sb)
ad = sa.difference(sb)
bd = sb.difference(sa)
return(list(ad)+list(i)+list(bd))
# in each round of convalesce
def roundjoin(allsets, scores):
setshave = [i for i in range(0,len(allsets))]
setsused = []
newsets = []
for i in range(0,len(scores)):
if (scores[i][0] not in setsused) and (scores[i][1] not in setsused):
newsets.append(setjoin(allsets[scores[i][0]], allsets[scores[i][1]]))
print("joined " + str(scores[i][0]) + " " + str(scores[i][1]))
setsused.append(scores[i][0])
setsused.append(scores[i][1])
setshave.remove(scores[i][0])
setshave.remove(scores[i][1])
for j in setshave:
newsets.append(allsets[scores[i][0]])
return(newsets)
def fulljoin(sets):
while(len(sets) > 1):
mat = setoverlap(sets)
nextsets = roundjoin(sets,mat)
mat = setoverlap(nextsets)
sets = nextsets
return(sets[0])
def setmatrix(genes, sets, setnames):
m = np.empty( (len(sets), len(genes)+1), dtype='U128')
for gi in range(0,len(genes)+1):
for si in range(0,len(sets)):
if gi == 0:
m[si, gi] = setnames[si]
else:
if genes[(gi-1)] in sets[si]:
m[si,gi] = '1'
else:
m[si,gi] = '0'
return(m)
def kulczynski2(x, y):
# 1 0
# 1 a b
# 0 c d
a=0.0
b=0.0
c=0.0
d=0.0
for i in range(0,len(x)):
if x[i] == 1 and y[i] == 1:
a += 1.0
if x[i] == 1 and y[i] == 0:
b +=1.0
if x[i] == 0 and y[i] == 1:
c +=1.0
if x[i] == 0 and y[i] == 0:
d +=1.0
numer = ((a/2.0)*(2*a+b+c))
denom = ((a+b)*(a+c))
if denom != 0:
return( numer/denom )
else:
return(0)
def setscores(m, genes):
# take a setmatrix
scoremat = np.zeros( (len(genes),len(genes)) )
for gi in range(1,(len(genes)+1)):
for hi in range(1,(len(genes)+1)):
if gi != hi:
x = [float(xi) for xi in m[:,gi]]
y = [float(yi) for yi in m[:,hi]]
scoremat[(gi-1),(hi-1)] = kulczynski2(x, y)
return(scoremat)
def binit(x, t):
if x < t:
return(1)
else:
return(0)
def setmat_to_numeric(setmat):
cols = len(setmat[0])
rows = len(setmat)
m = np.zeros((rows, cols-1))
for j in range(0,cols-1):
for i in range(0,rows):
m[i,j] = float(setmat[i,(j+1)])
return(m)
def permscores(scrmat, setmat, ngenes, perms):
# want to generate simulated vectors
# that have same number of set memberships,
# but random set memberships
#---
# first get list of set-membership-counts
setmatnum = setmat_to_numeric(setmat)
num_genes_in_each_set = [sum(x) for x in setmatnum]
print(num_genes_in_each_set)
allscrrs = []
for pi in range(0,perms):
p1 = float(np.random.choice(a=num_genes_in_each_set, size=1))/ngenes
p2 = float(np.random.choice(a=num_genes_in_each_set, size=1))/ngenes
set1 = [binit(np.random.sample(), p1) for x in range(0, ngenes)]
set2 = [binit(np.random.sample(), p2) for x in range(0, ngenes)]
scrr = kulczynski2(set1,set2)
allscrrs.append(scrr)
return(np.max(allscrrs))
def apply_threshold(gesc, cutoff):
gesc2 = copy.deepcopy((gesc))
gesc2[np.where(gesc2 <= cutoff)] = 0
return(gesc2)
def gen_means_and_sds(sets, idx, jdx, slope):
# the idx gene set will be following a linear trend.
# the jdx indexes the time point
set_means = [np.random.sample() * 0.0 for si in sets] # [np.random.sample() * 1 for si in sets]
set_sds = [0.5 for si in sets] # [np.random.sample() * 5 for si in sets]
set_means[idx] = jdx * slope
return( (set_means, set_sds) )
def gen_expression(gord, sets, set_means, set_sds):
# for each set, generate a mean value and a stddev
gexpr = np.zeros(len(gord)) # expr for each gene
nbexpr = np.zeros(len(gord))
for i,g in enumerate(gord):
for j,s in enumerate(sets):
if g in s: # if this gene is in this set
gexpr[i] += abs(np.random.normal(set_means[j], set_sds[j], size=1))
nbexpr[i] += np.random.negative_binomial(n=100, p=set_means[j]/max(set_means), size=1)
return((gexpr,nbexpr))
#####################
# Going to generate:#
# 1. number of sets
# 2. a binary set-membership matrix
# 3. similarity scores between genes, based on shared set membership
# 4. permutation based score to threshold the similarity scores
# 5. network based on similarity scores
# 6. a gene ordering based on joining sets
# 7. simulated expression values based on shared-set values.
############################################################
# first simulate the gene and gene sets
ngenes = 100
genes, sets, setnames = simsetdata(ngenes, 30, 30)
# then generate the set matrix (sema)
sema = setmatrix(genes, sets, setnames)
# can insert gene names into rows, but have to have binary values as strings
np.savetxt(X=sema, fmt='%s', delimiter='\t', fname="setmatrix.tsv")
# then score the gene-gene pairs (gene scores gesc)
gesc = setscores(sema, genes)
# find the permutation based thresholds
cutoff = permscores(gesc, sema, ngenes, 100)
gesc2 = apply_threshold(gesc, cutoff)
np.savetxt(X=gesc2, delimiter='\t', fname="scorematrix.tsv")
# then get the gene ordering (gene ordering gord
gord = fulljoin(sets)
np.savetxt(X=gord, fmt='%s', delimiter='\t', fname="geneorder.tsv")
# generate the set-based expression levels
expr_file_names = []
for si in range(1,7):
expr_file_names.append('exprdat_'+str(si)+'.tsv')
(set_means, set_sds) = gen_means_and_sds(sets, 1, si, 3)
(gexpr,nbexpr) = gen_expression(gord, sets, set_means, set_sds)
np.savetxt(X=np.transpose([set_means, set_sds]), fmt='%s', delimiter='\t', fname='set_means_'+str(si)+'.tsv')
np.savetxt(X=np.transpose([gord, gexpr,nbexpr]), fmt='%s', delimiter='\t', fname='exprdat_'+str(si)+'.tsv')
np.savetxt(X=expr_file_names, fmt='%s', delimiter='\t', fname="filelist.tsv")
print("done")
| [
"copy.deepcopy",
"numpy.savetxt",
"numpy.zeros",
"numpy.transpose",
"numpy.max",
"numpy.where",
"numpy.random.randint",
"numpy.random.normal",
"numpy.random.choice",
"numpy.random.sample"
] | [((8051, 8118), 'numpy.savetxt', 'np.savetxt', ([], {'X': 'sema', 'fmt': '"""%s"""', 'delimiter': '"""\t"""', 'fname': '"""setmatrix.tsv"""'}), "(X=sema, fmt='%s', delimiter='\\t', fname='setmatrix.tsv')\n", (8061, 8118), True, 'import numpy as np\n'), ((8326, 8386), 'numpy.savetxt', 'np.savetxt', ([], {'X': 'gesc2', 'delimiter': '"""\t"""', 'fname': '"""scorematrix.tsv"""'}), "(X=gesc2, delimiter='\\t', fname='scorematrix.tsv')\n", (8336, 8386), True, 'import numpy as np\n'), ((8459, 8526), 'numpy.savetxt', 'np.savetxt', ([], {'X': 'gord', 'fmt': '"""%s"""', 'delimiter': '"""\t"""', 'fname': '"""geneorder.tsv"""'}), "(X=gord, fmt='%s', delimiter='\\t', fname='geneorder.tsv')\n", (8469, 8526), True, 'import numpy as np\n'), ((9024, 9101), 'numpy.savetxt', 'np.savetxt', ([], {'X': 'expr_file_names', 'fmt': '"""%s"""', 'delimiter': '"""\t"""', 'fname': '"""filelist.tsv"""'}), "(X=expr_file_names, fmt='%s', delimiter='\\t', fname='filelist.tsv')\n", (9034, 9101), True, 'import numpy as np\n'), ((1625, 1644), 'copy.deepcopy', 'copy.deepcopy', (['sets'], {}), '(sets)\n', (1638, 1644), False, 'import copy\n'), ((5355, 5381), 'numpy.zeros', 'np.zeros', (['(rows, cols - 1)'], {}), '((rows, cols - 1))\n', (5363, 5381), True, 'import numpy as np\n'), ((6283, 6299), 'numpy.max', 'np.max', (['allscrrs'], {}), '(allscrrs)\n', (6289, 6299), True, 'import numpy as np\n'), ((6350, 6369), 'copy.deepcopy', 'copy.deepcopy', (['gesc'], {}), '(gesc)\n', (6363, 6369), False, 'import copy\n'), ((6382, 6407), 'numpy.where', 'np.where', (['(gesc2 <= cutoff)'], {}), '(gesc2 <= cutoff)\n', (6390, 6407), True, 'import numpy as np\n'), ((825, 875), 'numpy.random.normal', 'np.random.normal', (['mean_sample_size', 'sd_sample_size'], {}), '(mean_sample_size, sd_sample_size)\n', (841, 875), True, 'import numpy as np\n'), ((6589, 6607), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (6605, 6607), True, 'import numpy as np\n'), ((8814, 8848), 'numpy.transpose', 'np.transpose', (['[set_means, set_sds]'], {}), '([set_means, set_sds])\n', (8826, 8848), True, 'import numpy as np\n'), ((8928, 8963), 'numpy.transpose', 'np.transpose', (['[gord, gexpr, nbexpr]'], {}), '([gord, gexpr, nbexpr])\n', (8940, 8963), True, 'import numpy as np\n'), ((1174, 1224), 'numpy.random.normal', 'np.random.normal', (['mean_sample_size', 'sd_sample_size'], {}), '(mean_sample_size, sd_sample_size)\n', (1190, 1224), True, 'import numpy as np\n'), ((5923, 5972), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'num_genes_in_each_set', 'size': '(1)'}), '(a=num_genes_in_each_set, size=1)\n', (5939, 5972), True, 'import numpy as np\n'), ((6000, 6049), 'numpy.random.choice', 'np.random.choice', ([], {'a': 'num_genes_in_each_set', 'size': '(1)'}), '(a=num_genes_in_each_set, size=1)\n', (6016, 6049), True, 'import numpy as np\n'), ((6080, 6098), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (6096, 6098), True, 'import numpy as np\n'), ((6153, 6171), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (6169, 6171), True, 'import numpy as np\n'), ((656, 697), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(26)', 'size': '(5)'}), '(low=0, high=26, size=5)\n', (673, 697), True, 'import numpy as np\n'), ((2084, 2125), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(26)', 'size': '(5)'}), '(low=0, high=26, size=5)\n', (2101, 2125), True, 'import numpy as np\n'), ((7168, 7218), 'numpy.random.normal', 'np.random.normal', (['set_means[j]', 'set_sds[j]'], {'size': '(1)'}), '(set_means[j], set_sds[j], size=1)\n', (7184, 7218), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch import nn
from core import *
from collections import namedtuple
from itertools import count
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cpu = torch.device("cpu")
@cat.register(torch.Tensor)
def _(*xs):
return torch.cat(xs)
@to_numpy.register(torch.Tensor)
def _(x):
return x.detach().cpu().numpy()
@pad.register(torch.Tensor)
def _(x, border):
return nn.ReflectionPad2d(border)(x)
@transpose.register(torch.Tensor)
def _(x, source, target):
return x.permute([source.index(d) for d in target])
def to(*args, **kwargs):
return lambda x: x.to(*args, **kwargs)
@flip_lr.register(torch.Tensor)
def _(x):
return torch.flip(x, [-1])
#####################
## dataset
#####################
from functools import lru_cache as cache
@cache(None)
def cifar10(root='./data'):
try:
import torchvision
download = lambda train: torchvision.datasets.CIFAR10(root=root, train=train, download=True)
return {k: {'data': v.data, 'targets': v.targets} for k,v in [('train', download(train=True)), ('valid', download(train=False))]}
except ImportError:
from tensorflow.keras import datasets
(train_images, train_labels), (valid_images, valid_labels) = datasets.cifar10.load_data()
return {
'train': {'data': train_images, 'targets': train_labels.squeeze()},
'valid': {'data': valid_images, 'targets': valid_labels.squeeze()}
}
cifar10_mean, cifar10_std = [
(125.31, 122.95, 113.87), # equals np.mean(cifar10()['train']['data'], axis=(0,1,2))
(62.99, 62.09, 66.70), # equals np.std(cifar10()['train']['data'], axis=(0,1,2))
]
cifar10_classes= 'airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck'.split(', ')
#####################
## data loading
#####################
class DataLoader():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).half(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
#GPU dataloading
chunks = lambda data, splits: (data[start:end] for (start, end) in zip(splits, splits[1:]))
even_splits = lambda N, num_chunks: np.cumsum([0] + [(N//num_chunks)+1]*(N % num_chunks) + [N//num_chunks]*(num_chunks - (N % num_chunks)))
def shuffled(xs, inplace=False):
xs = xs if inplace else copy.copy(xs)
np.random.shuffle(xs)
return xs
def transformed(data, targets, transform, max_options=None, unshuffle=False):
i = torch.randperm(len(data), device=device)
data = data[i]
options = shuffled(transform.options(data.shape), inplace=True)[:max_options]
data = torch.cat([transform(x, **choice) for choice, x in zip(options, chunks(data, even_splits(len(data), len(options))))])
return (data[torch.argsort(i)], targets) if unshuffle else (data, targets[i])
class GPUBatches():
def __init__(self, batch_size, transforms=(), dataset=None, shuffle=True, drop_last=False, max_options=None):
self.dataset, self.transforms, self.shuffle, self.max_options = dataset, transforms, shuffle, max_options
N = len(dataset['data'])
self.splits = list(range(0, N+1, batch_size))
if not drop_last and self.splits[-1] != N:
self.splits.append(N)
def __iter__(self):
data, targets = self.dataset['data'], self.dataset['targets']
for transform in self.transforms:
data, targets = transformed(data, targets, transform, max_options=self.max_options, unshuffle=not self.shuffle)
if self.shuffle:
i = torch.randperm(len(data), device=device)
data, targets = data[i], targets[i]
return ({'input': x.clone(), 'target': y} for (x, y) in zip(chunks(data, self.splits), chunks(targets, self.splits)))
def __len__(self):
return len(self.splits) - 1
#####################
## Layers
#####################
#Network
class Network(nn.Module):
def __init__(self, net):
super().__init__()
self.graph = build_graph(net)
for path, (val, _) in self.graph.items():
setattr(self, path.replace('/', '_'), val)
def nodes(self):
return (node for node, _ in self.graph.values())
def forward(self, inputs):
outputs = dict(inputs)
for k, (node, ins) in self.graph.items():
#only compute nodes that are not supplied as inputs.
if k not in outputs:
outputs[k] = node(*[outputs[x] for x in ins])
return outputs
def half(self):
for node in self.nodes():
if isinstance(node, nn.Module) and not isinstance(node, nn.BatchNorm2d):
node.half()
return self
class Identity(namedtuple('Identity', [])):
def __call__(self, x): return x
class Add(namedtuple('Add', [])):
def __call__(self, x, y): return x + y
class AddWeighted(namedtuple('AddWeighted', ['wx', 'wy'])):
def __call__(self, x, y): return self.wx*x + self.wy*y
class Mul(nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def __call__(self, x):
return x*self.weight
class Flatten(nn.Module):
def forward(self, x): return x.view(x.size(0), x.size(1))
class Concat(nn.Module):
def forward(self, *xs): return torch.cat(xs, 1)
class BatchNorm(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0, bias_init=0.0):
super().__init__(num_features, eps=eps, momentum=momentum)
if weight_init is not None: self.weight.data.fill_(weight_init)
if bias_init is not None: self.bias.data.fill_(bias_init)
self.weight.requires_grad = not weight_freeze
self.bias.requires_grad = not bias_freeze
class GhostBatchNorm(BatchNorm):
def __init__(self, num_features, num_splits, **kw):
super().__init__(num_features, **kw)
self.num_splits = num_splits
self.register_buffer('running_mean', torch.zeros(num_features*self.num_splits))
self.register_buffer('running_var', torch.ones(num_features*self.num_splits))
def train(self, mode=True):
if (self.training is True) and (mode is False): #lazily collate stats when we are going to use them
self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0).repeat(self.num_splits)
self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0).repeat(self.num_splits)
return super().train(mode)
def forward(self, input):
N, C, H, W = input.shape
if self.training or not self.track_running_stats:
return nn.functional.batch_norm(
input.view(-1, C*self.num_splits, H, W), self.running_mean, self.running_var,
self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits),
True, self.momentum, self.eps).view(N, C, H, W)
else:
return nn.functional.batch_norm(
input, self.running_mean[:self.num_features], self.running_var[:self.num_features],
self.weight, self.bias, False, self.momentum, self.eps)
# Losses
class CrossEntropyLoss(namedtuple('CrossEntropyLoss', [])):
def __call__(self, log_probs, target):
return torch.nn.functional.nll_loss(log_probs, target, reduction='none')
class KLLoss(namedtuple('KLLoss', [])):
def __call__(self, log_probs):
return -log_probs.mean(dim=1)
class Correct(namedtuple('Correct', [])):
def __call__(self, classifier, target):
return classifier.max(dim = 1)[1] == target
class LogSoftmax(namedtuple('LogSoftmax', ['dim'])):
def __call__(self, x):
return torch.nn.functional.log_softmax(x, self.dim, _stacklevel=5)
x_ent_loss = Network({
'loss': (nn.CrossEntropyLoss(reduction='none'), ['logits', 'target']),
'acc': (Correct(), ['logits', 'target'])
})
label_smoothing_loss = lambda alpha: Network({
'logprobs': (LogSoftmax(dim=1), ['logits']),
'KL': (KLLoss(), ['logprobs']),
'xent': (CrossEntropyLoss(), ['logprobs', 'target']),
'loss': (AddWeighted(wx=1-alpha, wy=alpha), ['xent', 'KL']),
'acc': (Correct(), ['logits', 'target']),
})
trainable_params = lambda model: {k:p for k,p in model.named_parameters() if p.requires_grad}
#####################
## Optimisers
#####################
from functools import partial
def nesterov_update(w, dw, v, lr, weight_decay, momentum):
dw.add_(weight_decay, w).mul_(-lr)
v.mul_(momentum).add_(dw)
w.add_(dw.add_(momentum, v))
norm = lambda x: torch.norm(x.reshape(x.size(0),-1).float(), dim=1)[:,None,None,None]
def LARS_update(w, dw, v, lr, weight_decay, momentum):
nesterov_update(w, dw, v, lr*(norm(w)/(norm(dw)+1e-2)).to(w.dtype), weight_decay, momentum)
def zeros_like(weights):
return [torch.zeros_like(w) for w in weights]
def optimiser(weights, param_schedule, update, state_init):
weights = list(weights)
return {'update': update, 'param_schedule': param_schedule, 'step_number': 0, 'weights': weights, 'opt_state': state_init(weights)}
def opt_step(update, param_schedule, step_number, weights, opt_state):
step_number += 1
param_values = {k: f(step_number) for k, f in param_schedule.items()}
for w, v in zip(weights, opt_state):
if w.requires_grad:
update(w.data, w.grad.data, v, **param_values)
return {'update': update, 'param_schedule': param_schedule, 'step_number': step_number, 'weights': weights, 'opt_state': opt_state}
LARS = partial(optimiser, update=LARS_update, state_init=zeros_like)
SGD = partial(optimiser, update=nesterov_update, state_init=zeros_like)
#####################
## training
#####################
from itertools import chain
def reduce(batches, state, steps):
#state: is a dictionary
#steps: are functions that take (batch, state)
#and return a dictionary of updates to the state (or None)
for batch in chain(batches, [None]):
#we send an extra batch=None at the end for steps that
#need to do some tidying-up (e.g. log_activations)
for step in steps:
updates = step(batch, state)
if updates:
for k,v in updates.items():
state[k] = v
return state
#define keys in the state dict as constants
MODEL = 'model'
LOSS = 'loss'
VALID_MODEL = 'valid_model'
OUTPUT = 'output'
OPTS = 'optimisers'
ACT_LOG = 'activation_log'
WEIGHT_LOG = 'weight_log'
#step definitions
def forward(training_mode):
def step(batch, state):
if not batch: return
model = state[MODEL] if training_mode or (VALID_MODEL not in state) else state[VALID_MODEL]
if model.training != training_mode: #without the guard it's slow!
model.train(training_mode)
return {OUTPUT: state[LOSS](model(batch))}
return step
def forward_tta(tta_transforms):
def step(batch, state):
if not batch: return
model = state[MODEL] if (VALID_MODEL not in state) else state[VALID_MODEL]
if model.training:
model.train(False)
logits = torch.mean(torch.stack([model({'input': transform(batch['input'].clone())})['logits'].detach() for transform in tta_transforms], dim=0), dim=0)
return {OUTPUT: state[LOSS](dict(batch, logits=logits))}
return step
def backward(dtype=None):
def step(batch, state):
state[MODEL].zero_grad()
if not batch: return
loss = state[OUTPUT][LOSS]
if dtype is not None:
loss = loss.to(dtype)
loss.sum().backward()
return step
def opt_steps(batch, state):
if not batch: return
return {OPTS: [opt_step(**opt) for opt in state[OPTS]]}
def log_activations(node_names=('loss', 'acc')):
def step(batch, state):
if '_tmp_logs_' not in state:
state['_tmp_logs_'] = []
if batch:
state['_tmp_logs_'].extend((k, state[OUTPUT][k].detach()) for k in node_names)
else:
res = {k: to_numpy(torch.cat(xs)).astype(np.float) for k, xs in group_by_key(state['_tmp_logs_']).items()}
del state['_tmp_logs_']
return {ACT_LOG: res}
return step
epoch_stats = lambda state: {k: np.mean(v) for k, v in state[ACT_LOG].items()}
def update_ema(momentum, update_freq=1):
n = iter(count())
rho = momentum**update_freq
def step(batch, state):
if not batch: return
if (next(n) % update_freq) != 0: return
for v, ema_v in zip(state[MODEL].state_dict().values(), state[VALID_MODEL].state_dict().values()):
if not v.dtype.is_floating_point: continue #skip things like num_batches_tracked.
ema_v *= rho
ema_v += (1-rho)*v
return step
default_train_steps = (forward(training_mode=True), log_activations(('loss', 'acc')), backward(), opt_steps)
default_valid_steps = (forward(training_mode=False), log_activations(('loss', 'acc')))
def train_epoch(state, timer, train_batches, valid_batches, train_steps=default_train_steps, valid_steps=default_valid_steps,
on_epoch_end=(lambda state: state)):
train_summary, train_time = epoch_stats(on_epoch_end(reduce(train_batches, state, train_steps))), timer()
valid_summary, valid_time = epoch_stats(reduce(valid_batches, state, valid_steps)), timer(include_in_total=False) #DAWNBench rules
return {
'train': union({'time': train_time}, train_summary),
'valid': union({'time': valid_time}, valid_summary),
'total time': timer.total_time
}
#on_epoch_end
def log_weights(state, weights):
state[WEIGHT_LOG] = state.get(WEIGHT_LOG, [])
state[WEIGHT_LOG].append({k: to_numpy(v.data) for k,v in weights.items()})
return state
def fine_tune_bn_stats(state, batches, model_key=VALID_MODEL):
reduce(batches, {MODEL: state[model_key]}, [forward(True)])
return state
#misc
def warmup_cudnn(model, loss, batch):
#run forward and backward pass of the model
#to allow benchmarking of cudnn kernels
reduce([batch], {MODEL: model, LOSS: loss}, [forward(True), backward()])
torch.cuda.synchronize()
#####################
## input whitening
#####################
def cov(X):
X = X/np.sqrt(X.size(0) - 1)
return X.t() @ X
def patches(data, patch_size=(3, 3), dtype=torch.float32):
h, w = patch_size
c = data.size(1)
return data.unfold(2,h,1).unfold(3,w,1).transpose(1,3).reshape(-1, c, h, w).to(dtype)
def eigens(patches):
n,c,h,w = patches.shape
Σ = cov(patches.reshape(n, c*h*w))
Λ, V = torch.symeig(Σ, eigenvectors=True)
return Λ.flip(0), V.t().reshape(c*h*w, c, h, w).flip(0)
def whitening_filter(Λ, V, eps=1e-2):
filt = nn.Conv2d(3, 27, kernel_size=(3,3), padding=(1,1), bias=False)
filt.weight.data = (V/torch.sqrt(Λ+eps)[:,None,None,None])
filt.weight.requires_grad = False
return filt
| [
"torch.cuda.synchronize",
"torch.sqrt",
"torch.cat",
"torchvision.datasets.CIFAR10",
"numpy.mean",
"torch.device",
"torch.ones",
"torch.utils.data.DataLoader",
"torch.nn.ReflectionPad2d",
"numpy.cumsum",
"torch.nn.functional.nll_loss",
"torch.nn.functional.log_softmax",
"torch.zeros",
"ite... | [((253, 272), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (265, 272), False, 'import torch\n'), ((871, 882), 'functools.lru_cache', 'cache', (['None'], {}), '(None)\n', (876, 882), True, 'from functools import lru_cache as cache\n'), ((5351, 5377), 'collections.namedtuple', 'namedtuple', (['"""Identity"""', '[]'], {}), "('Identity', [])\n", (5361, 5377), False, 'from collections import namedtuple\n'), ((5427, 5448), 'collections.namedtuple', 'namedtuple', (['"""Add"""', '[]'], {}), "('Add', [])\n", (5437, 5448), False, 'from collections import namedtuple\n'), ((5518, 5557), 'collections.namedtuple', 'namedtuple', (['"""AddWeighted"""', "['wx', 'wy']"], {}), "('AddWeighted', ['wx', 'wy'])\n", (5528, 5557), False, 'from collections import namedtuple\n'), ((7913, 7947), 'collections.namedtuple', 'namedtuple', (['"""CrossEntropyLoss"""', '[]'], {}), "('CrossEntropyLoss', [])\n", (7923, 7947), False, 'from collections import namedtuple\n'), ((8092, 8116), 'collections.namedtuple', 'namedtuple', (['"""KLLoss"""', '[]'], {}), "('KLLoss', [])\n", (8102, 8116), False, 'from collections import namedtuple\n'), ((8215, 8240), 'collections.namedtuple', 'namedtuple', (['"""Correct"""', '[]'], {}), "('Correct', [])\n", (8225, 8240), False, 'from collections import namedtuple\n'), ((8357, 8390), 'collections.namedtuple', 'namedtuple', (['"""LogSoftmax"""', "['dim']"], {}), "('LogSoftmax', ['dim'])\n", (8367, 8390), False, 'from collections import namedtuple\n'), ((10299, 10360), 'functools.partial', 'partial', (['optimiser'], {'update': 'LARS_update', 'state_init': 'zeros_like'}), '(optimiser, update=LARS_update, state_init=zeros_like)\n', (10306, 10360), False, 'from functools import partial\n'), ((10367, 10432), 'functools.partial', 'partial', (['optimiser'], {'update': 'nesterov_update', 'state_init': 'zeros_like'}), '(optimiser, update=nesterov_update, state_init=zeros_like)\n', (10374, 10432), False, 'from functools import partial\n'), ((325, 338), 'torch.cat', 'torch.cat', (['xs'], {}), '(xs)\n', (334, 338), False, 'import torch\n'), ((751, 770), 'torch.flip', 'torch.flip', (['x', '[-1]'], {}), '(x, [-1])\n', (761, 770), False, 'import torch\n'), ((2798, 2912), 'numpy.cumsum', 'np.cumsum', (['([0] + [N // num_chunks + 1] * (N % num_chunks) + [N // num_chunks] * (\n num_chunks - N % num_chunks))'], {}), '([0] + [N // num_chunks + 1] * (N % num_chunks) + [N // num_chunks\n ] * (num_chunks - N % num_chunks))\n', (2807, 2912), True, 'import numpy as np\n'), ((2984, 3005), 'numpy.random.shuffle', 'np.random.shuffle', (['xs'], {}), '(xs)\n', (3001, 3005), True, 'import numpy as np\n'), ((10720, 10742), 'itertools.chain', 'chain', (['batches', '[None]'], {}), '(batches, [None])\n', (10725, 10742), False, 'from itertools import chain\n'), ((14895, 14919), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (14917, 14919), False, 'import torch\n'), ((15345, 15379), 'torch.symeig', 'torch.symeig', (['Σ'], {'eigenvectors': '(True)'}), '(Σ, eigenvectors=True)\n', (15357, 15379), False, 'import torch\n'), ((15489, 15553), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(27)'], {'kernel_size': '(3, 3)', 'padding': '(1, 1)', 'bias': '(False)'}), '(3, 27, kernel_size=(3, 3), padding=(1, 1), bias=False)\n', (15498, 15553), False, 'from torch import nn\n'), ((209, 234), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (232, 234), False, 'import torch\n'), ((479, 505), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['border'], {}), '(border)\n', (497, 505), False, 'from torch import nn\n'), ((2204, 2348), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'pin_memory': '(True)', 'shuffle': 'shuffle', 'drop_last': 'drop_last'}), '(dataset, batch_size=batch_size, num_workers=\n num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last)\n', (2231, 2348), False, 'import torch\n'), ((5942, 5958), 'torch.cat', 'torch.cat', (['xs', '(1)'], {}), '(xs, 1)\n', (5951, 5958), False, 'import torch\n'), ((8008, 8073), 'torch.nn.functional.nll_loss', 'torch.nn.functional.nll_loss', (['log_probs', 'target'], {'reduction': '"""none"""'}), "(log_probs, target, reduction='none')\n", (8036, 8073), False, 'import torch\n'), ((8435, 8494), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['x', 'self.dim'], {'_stacklevel': '(5)'}), '(x, self.dim, _stacklevel=5)\n', (8466, 8494), False, 'import torch\n'), ((9595, 9614), 'torch.zeros_like', 'torch.zeros_like', (['w'], {}), '(w)\n', (9611, 9614), False, 'import torch\n'), ((13010, 13020), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (13017, 13020), True, 'import numpy as np\n'), ((13112, 13119), 'itertools.count', 'count', ([], {}), '()\n', (13117, 13119), False, 'from itertools import count\n'), ((981, 1048), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'root', 'train': 'train', 'download': '(True)'}), '(root=root, train=train, download=True)\n', (1009, 1048), False, 'import torchvision\n'), ((1326, 1354), 'tensorflow.keras.datasets.cifar10.load_data', 'datasets.cifar10.load_data', ([], {}), '()\n', (1352, 1354), False, 'from tensorflow.keras import datasets\n'), ((6654, 6697), 'torch.zeros', 'torch.zeros', (['(num_features * self.num_splits)'], {}), '(num_features * self.num_splits)\n', (6665, 6697), False, 'import torch\n'), ((6741, 6783), 'torch.ones', 'torch.ones', (['(num_features * self.num_splits)'], {}), '(num_features * self.num_splits)\n', (6751, 6783), False, 'import torch\n'), ((7681, 7855), 'torch.nn.functional.batch_norm', 'nn.functional.batch_norm', (['input', 'self.running_mean[:self.num_features]', 'self.running_var[:self.num_features]', 'self.weight', 'self.bias', '(False)', 'self.momentum', 'self.eps'], {}), '(input, self.running_mean[:self.num_features], self\n .running_var[:self.num_features], self.weight, self.bias, False, self.\n momentum, self.eps)\n', (7705, 7855), False, 'from torch import nn\n'), ((8531, 8568), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (8550, 8568), False, 'from torch import nn\n'), ((15578, 15597), 'torch.sqrt', 'torch.sqrt', (['(Λ + eps)'], {}), '(Λ + eps)\n', (15588, 15597), False, 'import torch\n'), ((3395, 3411), 'torch.argsort', 'torch.argsort', (['i'], {}), '(i)\n', (3408, 3411), False, 'import torch\n'), ((12803, 12816), 'torch.cat', 'torch.cat', (['xs'], {}), '(xs)\n', (12812, 12816), False, 'import torch\n')] |
from numpy import array, einsum, arccos, newaxis, zeros, vstack
from statistics import median, mean
from numpy.linalg import norm
from numpy import triu_indices
import argparse, logging
import cPickle as pickle
from gensim.models.keyedvectors import KeyedVectors as vDB
from pdb import set_trace as st
load_vectors=vDB.load_word2vec_format
class streamer(object):
def __init__(self, file_name):
self.file_name=file_name
def __iter__(self):
for s in open(self.file_name):
yield s.strip().split("\t")[0].split()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--embed", help="""Input file containing pretrained word embeddings.""", required=True)
parser.add_argument("--out", help="""Output file where to write angles and statistics.""", default=None)
parser.add_argument("--sents", help="""Input file containing a document sentence by row.""", required=True)
parser.add_argument("--amount", help="""Amount of inputs to process.""", default=10, type=int)
parser.add_argument("--bin", help="""Binary (word2vec only) or text emebdding format.""", action="store_true")
args = parser.parse_args()
logging.info("Fitting distances from: %s ...\n" % args.embed)
sents=streamer(args.sents)
embedding=load_vectors(args.embed, binary=args.bin, encoding="latin-1")
c=0
if args.out:
fo=open(args.out, "wb")
for s in sents:
if c >= args.amount:
break
sl=len(s) # sentece length
X=[]
for w in set(s):
try:
e = embedding[w]
X.append(e)
except KeyError:
#print ("Word OOv %s\n" % w)
continue
except:
print("No key error nut other stoped the program.")
exit()
X=array(X)
iu2 = triu_indices(X.shape[0], 1)
dotprod_mat=einsum('ij,kj->ik', X, X)
costheta = dotprod_mat / norm(X, axis=1)[:, newaxis]
costheta /= norm(X, axis=1)
angles=arccos(costheta)[iu2]
logging.info("Computed angles sentence %d ..." % c)
if not args.out:
print ("%d %0.4f %0.4f %0.4f %0.4f %s" % (sl, angles.mean(),
median(angles),
angles.max(),
angles.min(),
angles.tolist()))
else:
fo.write("%d\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%s\n" % (sl, angles.mean(),
median(angles),
angles.max(),
angles.min(),
str(angles.tolist())[1:]\
.strip("]")\
.replace(", "," ") ) )
c+=1
| [
"statistics.median",
"argparse.ArgumentParser",
"numpy.einsum",
"numpy.triu_indices",
"logging.info",
"numpy.array",
"numpy.linalg.norm",
"numpy.arccos"
] | [((592, 617), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (615, 617), False, 'import argparse, logging\n'), ((1200, 1261), 'logging.info', 'logging.info', (["('Fitting distances from: %s ...\\n' % args.embed)"], {}), "('Fitting distances from: %s ...\\n' % args.embed)\n", (1212, 1261), False, 'import argparse, logging\n'), ((1870, 1878), 'numpy.array', 'array', (['X'], {}), '(X)\n', (1875, 1878), False, 'from numpy import array, einsum, arccos, newaxis, zeros, vstack\n'), ((1902, 1929), 'numpy.triu_indices', 'triu_indices', (['X.shape[0]', '(1)'], {}), '(X.shape[0], 1)\n', (1914, 1929), False, 'from numpy import triu_indices\n'), ((1951, 1976), 'numpy.einsum', 'einsum', (['"""ij,kj->ik"""', 'X', 'X'], {}), "('ij,kj->ik', X, X)\n", (1957, 1976), False, 'from numpy import array, einsum, arccos, newaxis, zeros, vstack\n'), ((2058, 2073), 'numpy.linalg.norm', 'norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (2062, 2073), False, 'from numpy.linalg import norm\n'), ((2128, 2179), 'logging.info', 'logging.info', (["('Computed angles sentence %d ...' % c)"], {}), "('Computed angles sentence %d ...' % c)\n", (2140, 2179), False, 'import argparse, logging\n'), ((2089, 2105), 'numpy.arccos', 'arccos', (['costheta'], {}), '(costheta)\n', (2095, 2105), False, 'from numpy import array, einsum, arccos, newaxis, zeros, vstack\n'), ((2010, 2025), 'numpy.linalg.norm', 'norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (2014, 2025), False, 'from numpy.linalg import norm\n'), ((2330, 2344), 'statistics.median', 'median', (['angles'], {}), '(angles)\n', (2336, 2344), False, 'from statistics import median, mean\n'), ((2709, 2723), 'statistics.median', 'median', (['angles'], {}), '(angles)\n', (2715, 2723), False, 'from statistics import median, mean\n')] |
#!/usr/bin/python3
# ssr101b.py
# This moves camera toward shorter distance averaged.
# <NAME> 2021 12/3
# How to execute
# sudo pigpiod
# pyhton3 ssrXY.py
import modules.keyin as keyin # キーボード入力を監視するモジュール
import modules.rc3c as rc
import modules.vl53_6a as tof
import time
import numpy as np
SLEEP=0.1
PERIOD=0.3
ssr3=rc.KeyAssign()
tofL,tofR,tofC,tofM=tof.start()
key = keyin.Keyboard()
ch="c"
print("##################################")
print("Input q to stop.")
print("left,right,angl, distL,distC,distR,distM")
now=time.time()
init=now
start=now
while ch!="q":
ch = key.read()
try:
distL=tofL.get_distance()
distR=tofR.get_distance()
distC=tofC.get_distance()
distM=tofM.get_distance()
dL=np.sqrt(distL*distC)
dR=np.sqrt(distR*distC)
left,right,angl=ssr3.update(ch,distL,distR)
now=time.time()
if now-start>PERIOD:
print("\r %4d %4d %4d %5d %5d %5d %5d" % (left,right,angl,distL,distC,distR,distM),end='')
start=now
#time.sleep(SLEEP)
except KeyboardInterrupt:
ssr3.stop()
break
print("\n Bye Bye!")
ssr3.stop()
| [
"modules.keyin.Keyboard",
"time.time",
"modules.rc3c.KeyAssign",
"modules.vl53_6a.start",
"numpy.sqrt"
] | [((324, 338), 'modules.rc3c.KeyAssign', 'rc.KeyAssign', ([], {}), '()\n', (336, 338), True, 'import modules.rc3c as rc\n'), ((359, 370), 'modules.vl53_6a.start', 'tof.start', ([], {}), '()\n', (368, 370), True, 'import modules.vl53_6a as tof\n'), ((378, 394), 'modules.keyin.Keyboard', 'keyin.Keyboard', ([], {}), '()\n', (392, 394), True, 'import modules.keyin as keyin\n'), ((526, 537), 'time.time', 'time.time', ([], {}), '()\n', (535, 537), False, 'import time\n'), ((736, 758), 'numpy.sqrt', 'np.sqrt', (['(distL * distC)'], {}), '(distL * distC)\n', (743, 758), True, 'import numpy as np\n'), ((766, 788), 'numpy.sqrt', 'np.sqrt', (['(distR * distC)'], {}), '(distR * distC)\n', (773, 788), True, 'import numpy as np\n'), ((847, 858), 'time.time', 'time.time', ([], {}), '()\n', (856, 858), False, 'import time\n')] |
"""
This package contains the main convex hull calculation function
to calculate the points of a convex hull from a given array of
points.
"""
import numpy as np
from myConvexHull.point_utils import *
from myConvexHull.dtype import *
from myConvexHull.line_utils import *
from enum import Enum
def convex_hull(points, base_line=None, direction=None) -> Points:
# type: (Points, NullableLine, Direction) -> Points
"""
Calculates the set of points that construct the convex hull of
the given array of points. The calculation is done using divide
and conquer algorithm.
Args:
`points`: an array of points to calculate the convex hull of
`base_line`: the base line of previous iteration convex hull points
`direction`: the direction of which the points of the convex hull is
expanding
Returns:
An array of points that construct the convex hull of the given array
of points.
Example usage:
```python
import numpy as np
import matplotlib.pyplot as plt
from myConvexHull import convex_hull
from myConvexHull.point_utils import X, Y
points = np.random.randint(-30, 30, [10, 2])
hull = convex_hull(points)
points = np.transpose(points)
hull = np.transpose(hull)
plt.scatter(points[X], points[Y])
plt.plot(hull[X], hull[Y])
plt.show()
```
"""
if base_line:
if len(points) == 0:
return np.ndarray([0, 2])
if len(points) == 1:
return points
(farthest_point, index) = get_farthest_point(points, base_line)
points = np.delete(points, [index], axis=0)
new_base_line_a = (base_line[0], farthest_point)
new_base_line_b = (farthest_point, base_line[1])
func = get_upper_points if direction == Direction.UPWARDS else get_lower_points
# Guard
if is_vertical(new_base_line_a):
chl = np.ndarray([0, 2])
else:
new_points_a = func(points, new_base_line_a)
chl = convex_hull(new_points_a, new_base_line_a, direction)
# Guard
if is_vertical(new_base_line_b):
chr = np.ndarray([0, 2])
else:
new_points_b = func(points, new_base_line_b)
chr = convex_hull(new_points_b, new_base_line_b, direction)
hull = merge(chl, farthest_point, chr, direction)
else:
(leftmost_point, lmindex) = _get_leftmost_point(points)
(rightmost_point, rmindex) = _get_rightmost_point(
points
)
points = np.delete(points, [lmindex, rmindex], axis=0)
line: Line = (leftmost_point, rightmost_point)
# Guard
if is_vertical(line):
upper_points = np.ndarray([0, 2])
lower_points = np.ndarray([0, 2])
else:
(upper_points, lower_points) = split(points, line)
chu = convex_hull(upper_points, line, Direction.UPWARDS)
chl = convex_hull(lower_points, line, Direction.DOWNWARDS)
hull = first_merge(leftmost_point, chu, rightmost_point, chl)
return hull
def split(points, line) -> tuple[Points, Points]:
# type: (Points, Line) -> tuple[Points, Points]
"""
Split a set of points into two sets of points separated
by a line. The line is represented by a tuple of 2 points.
Args:
`points`: the set of points to split
`line`: a tuple of 2 points representing a line on which
splitting is based
Returns:
A tuple of two set of points separated by the line.
"""
upper_points = get_upper_points(points, line)
lower_points = get_lower_points(points, line)
return (upper_points, lower_points)
def first_merge(left_vertex, upper_vertices, right_vertex, lower_vertices) -> Points:
# type: (Point, Points, Point, Points) -> Points
"""
Merge subsolution of upper points hull and lower points hull
after splitted by the line through `left_vertex` and
`right_vertex` in the first iteration.
Args:
`left_vertex`: the bottom-leftmost point which the line in the
the first iteration pass through
`upper_vertices`: upper points hull
`right_vertex`: the top-rightmost point which the line in the
the first iteration pass through
`lower_vertices`: lower points hull
Returns:
An array of points which construct the convex hull.
"""
hull = np.ndarray([0, 2])
hull = np.append(hull, [left_vertex], axis=0)
hull = np.append(hull, upper_vertices, axis=0)
hull = np.append(hull, [right_vertex], axis=0)
hull = np.append(hull, lower_vertices, axis=0)
hull = np.append(hull, [left_vertex], axis=0)
return hull
def merge(left_vertices, mid_vertex, right_vertices, direction) -> Points:
# type: (Point, Points, Point, Points) -> Points
"""
Merge subsolution of leftside points hull and rightside points hull
after splitted by the line through the farthest point from the line
of the previous iteration.
Args:
`left_vertices`: leftside points hull
`mid_vertex`: the farthest point from the line of the previous
iteration
`right_vertices`: rightside points hull
`direction`: the direction which the hull is expanding
Returns:
An array of points which is the subset of points that
construct the convex hull.
"""
hull = np.ndarray([0, 2])
if direction == Direction.UPWARDS:
hull = np.append(hull, left_vertices, axis=0)
hull = np.append(hull, [mid_vertex], axis=0)
hull = np.append(hull, right_vertices, axis=0)
else:
hull = np.append(hull, right_vertices, axis=0)
hull = np.append(hull, [mid_vertex], axis=0)
hull = np.append(hull, left_vertices, axis=0)
return hull
def random_color() -> str:
# type: () -> str
"""
Generates a random color in the form of string of RGB hex values.
For example; `#ffffff`, `#123456`, `#a42c30`, etc.
Returns:
A random color in the form of string of RGB hex values.
"""
r = hex(np.random.randint(0, 256))[2:]
g = hex(np.random.randint(0, 256))[2:]
b = hex(np.random.randint(0, 256))[2:]
if len(r) == 1:
r = "0" + r
if len(g) == 1:
g = "0" + g
if len(b) == 1:
b = "0" + b
return f"#{r}{g}{b}"
def _get_leftmost_point(points):
# type: (Points) -> tuple[Point, int]
"""
Gets the bottom-leftmost point and its index from a given
set of points.
Args:
`points`: an array of points to get the bottom-leftmost of
Returns:
A tuple containing the bottom-leftmost point and its index.
"""
if len(points) == 0:
return None
leftmost_point: Point = None
index = 0
for i in range(len(points)):
point = points[i]
if leftmost_point is None or less_than(point, leftmost_point):
leftmost_point = point
index = i
return (leftmost_point, index)
def _get_rightmost_point(points):
# type: (Points) -> tuple[Point, int]
"""
Gets the top-rightmost point and its index from a given
set of points.
Args:
`points`: an array of points to get the top-rightmost of
Returns:
A tuple containing the top-rightmost point and its index.
"""
if len(points) == 0:
return None
rightmost_point: Point = None
index = 0
for i in range(len(points)):
point = points[i]
if rightmost_point is None or greater_than(point, rightmost_point):
rightmost_point = point
index = i
return (rightmost_point, index)
class Direction(Enum):
"""
Vertical direction enumerated values.
"""
UPWARDS = 0
"Upwards direction"
DOWNWARDS = 1
"Downwards direction"
| [
"numpy.append",
"numpy.random.randint",
"numpy.ndarray",
"numpy.delete"
] | [((4428, 4446), 'numpy.ndarray', 'np.ndarray', (['[0, 2]'], {}), '([0, 2])\n', (4438, 4446), True, 'import numpy as np\n'), ((4458, 4496), 'numpy.append', 'np.append', (['hull', '[left_vertex]'], {'axis': '(0)'}), '(hull, [left_vertex], axis=0)\n', (4467, 4496), True, 'import numpy as np\n'), ((4508, 4547), 'numpy.append', 'np.append', (['hull', 'upper_vertices'], {'axis': '(0)'}), '(hull, upper_vertices, axis=0)\n', (4517, 4547), True, 'import numpy as np\n'), ((4559, 4598), 'numpy.append', 'np.append', (['hull', '[right_vertex]'], {'axis': '(0)'}), '(hull, [right_vertex], axis=0)\n', (4568, 4598), True, 'import numpy as np\n'), ((4610, 4649), 'numpy.append', 'np.append', (['hull', 'lower_vertices'], {'axis': '(0)'}), '(hull, lower_vertices, axis=0)\n', (4619, 4649), True, 'import numpy as np\n'), ((4661, 4699), 'numpy.append', 'np.append', (['hull', '[left_vertex]'], {'axis': '(0)'}), '(hull, [left_vertex], axis=0)\n', (4670, 4699), True, 'import numpy as np\n'), ((5407, 5425), 'numpy.ndarray', 'np.ndarray', (['[0, 2]'], {}), '([0, 2])\n', (5417, 5425), True, 'import numpy as np\n'), ((1600, 1634), 'numpy.delete', 'np.delete', (['points', '[index]'], {'axis': '(0)'}), '(points, [index], axis=0)\n', (1609, 1634), True, 'import numpy as np\n'), ((2554, 2599), 'numpy.delete', 'np.delete', (['points', '[lmindex, rmindex]'], {'axis': '(0)'}), '(points, [lmindex, rmindex], axis=0)\n', (2563, 2599), True, 'import numpy as np\n'), ((5480, 5518), 'numpy.append', 'np.append', (['hull', 'left_vertices'], {'axis': '(0)'}), '(hull, left_vertices, axis=0)\n', (5489, 5518), True, 'import numpy as np\n'), ((5534, 5571), 'numpy.append', 'np.append', (['hull', '[mid_vertex]'], {'axis': '(0)'}), '(hull, [mid_vertex], axis=0)\n', (5543, 5571), True, 'import numpy as np\n'), ((5587, 5626), 'numpy.append', 'np.append', (['hull', 'right_vertices'], {'axis': '(0)'}), '(hull, right_vertices, axis=0)\n', (5596, 5626), True, 'import numpy as np\n'), ((5652, 5691), 'numpy.append', 'np.append', (['hull', 'right_vertices'], {'axis': '(0)'}), '(hull, right_vertices, axis=0)\n', (5661, 5691), True, 'import numpy as np\n'), ((5707, 5744), 'numpy.append', 'np.append', (['hull', '[mid_vertex]'], {'axis': '(0)'}), '(hull, [mid_vertex], axis=0)\n', (5716, 5744), True, 'import numpy as np\n'), ((5760, 5798), 'numpy.append', 'np.append', (['hull', 'left_vertices'], {'axis': '(0)'}), '(hull, left_vertices, axis=0)\n', (5769, 5798), True, 'import numpy as np\n'), ((1436, 1454), 'numpy.ndarray', 'np.ndarray', (['[0, 2]'], {}), '([0, 2])\n', (1446, 1454), True, 'import numpy as np\n'), ((1915, 1933), 'numpy.ndarray', 'np.ndarray', (['[0, 2]'], {}), '([0, 2])\n', (1925, 1933), True, 'import numpy as np\n'), ((2153, 2171), 'numpy.ndarray', 'np.ndarray', (['[0, 2]'], {}), '([0, 2])\n', (2163, 2171), True, 'import numpy as np\n'), ((2730, 2748), 'numpy.ndarray', 'np.ndarray', (['[0, 2]'], {}), '([0, 2])\n', (2740, 2748), True, 'import numpy as np\n'), ((2776, 2794), 'numpy.ndarray', 'np.ndarray', (['[0, 2]'], {}), '([0, 2])\n', (2786, 2794), True, 'import numpy as np\n'), ((6094, 6119), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (6111, 6119), True, 'import numpy as np\n'), ((6137, 6162), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (6154, 6162), True, 'import numpy as np\n'), ((6180, 6205), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (6197, 6205), True, 'import numpy as np\n')] |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Spectral Residual algorithm for anomaly detection
"""
import logging
import numpy as np
from merlion.models.anomaly.base import DetectorConfig, DetectorBase
from merlion.transform.resample import TemporalResample
from merlion.utils import TimeSeries, UnivariateTimeSeries
logger = logging.getLogger(__name__)
class SpectralResidualConfig(DetectorConfig):
"""
Config class for `SpectralResidual` anomaly detector.
"""
_default_transform = TemporalResample(granularity=None)
def __init__(self, local_wind_sz=21, q=3, estimated_points=5, predicting_points=5, target_seq_index=None, **kwargs):
r"""
:param local_wind_sz: Number of previous saliency points to consider when computing the anomaly score
:param q: Window size of local frequency average computations
:param estimated_points: Number of padding points to add to the timeseries for saliency map calculations.
:param predicting_points: Number of points to consider when computing gradient for padding points
:param target_seq_index: Index of the univariate whose anomalies we want to detect.
The Saliency Map is computed as follows:
.. math::
R(f) &= \log(A(\mathscr{F}(\textbf{x}))) - \left(\frac{1}{q}\right)_{1 \times q}
* (A(\mathscr{F}(\textbf{x})) \\
S_m &= \mathscr{F}^{-1} (R(f))
where :math:`*` is the convolution operator, and :math:`\mathscr{F}` is the Fourier Transform.
The anomaly scores then are computed as:
.. math::
S(x) = \frac{S(x) - \overline{S(\textbf{x})}}{\overline{S(\textbf{x})}}
where :math:`\textbf{x}` are the last ``local_wind_sz`` points in the timeseries.
The ``estimated_points`` and ``predicting_points`` parameters are used to pad the end of the timeseries with reasonable
values. This is done so that the later points in the timeseries are in the middle of averaging windows rather
than in the end.
"""
self.estimated_points = estimated_points
self.q = q
self.predicting_points = predicting_points
self.local_wind_sz = local_wind_sz
self.target_seq_index = target_seq_index
super().__init__(**kwargs)
class SpectralResidual(DetectorBase):
"""
Spectral Residual Algorithm for Anomaly Detection.
Spectral Residual Anomaly Detection algorithm based on the algorithm described by
`Ren et al. (2019) <https://arxiv.org/abs/1906.03821>`__. After taking the frequency spectrum, compute the
log deviation from the mean. Use inverse fourier transform to obtain the saliency map. Anomaly scores
for a point in the time series are obtained by comparing the saliency score of the point to the
average of the previous points.
"""
config_class = SpectralResidualConfig
def __init__(self, config: SpectralResidualConfig = None):
super().__init__(SpectralResidualConfig() if config is None else config)
self.q_conv_map = np.ones(self.config.q) / self.config.q
self.local_wind_sz = self.config.local_wind_sz
self.local_conv_map = np.ones(self.local_wind_sz)
self.train_data = None
@property
def target_seq_index(self) -> int:
return self.config.target_seq_index
def _get_saliency_map(self, values: np.array) -> np.array:
transform = np.fft.fft(values)
log_amps = np.log(np.abs(transform))
phases = np.angle(transform)
avg_log_amps = np.convolve(log_amps, self.q_conv_map, mode="same") # approximation
residuals = log_amps - avg_log_amps
saliency_map = np.abs(np.fft.ifft(np.exp(residuals + 1j * phases)))
return saliency_map
def _compute_grad(self, values: np.array) -> int:
m = min(self.config.predicting_points, values.shape[0] - 1)
x_n = values[-1]
a = x_n - np.copy(values[-m - 1 : -1])
b = np.flip(np.arange(1, m + 1))
averages = a / b
return np.average(averages)
def _pad(self, values: np.array) -> np.array:
grad = self._compute_grad(values)
m = min(self.config.predicting_points, values.shape[0] - 1)
item = values[-m] + grad * m
return np.pad(values, ((0, self.config.estimated_points),), constant_values=item)
def get_anomaly_score(self, time_series: TimeSeries, time_series_prev: TimeSeries = None) -> TimeSeries:
time_series, time_series_prev = self.transform_time_series(time_series, time_series_prev)
univariate_time_series: UnivariateTimeSeries = time_series.univariates[time_series.names[self.target_seq_index]]
prev_values: UnivariateTimeSeries = (
time_series_prev.univariates[time_series_prev.names[self.target_seq_index]].copy()
if time_series_prev
else UnivariateTimeSeries.empty()
)
train_prev_len = prev_values.shape[0]
values = prev_values
values = values.concat(univariate_time_series).np_values
padded_values = self._pad(values) if self.config.estimated_points > 0 else values
saliency_map = self._get_saliency_map(padded_values)
if self.config.estimated_points > 0:
saliency_map = saliency_map[: -self.config.estimated_points]
average_values = np.convolve(saliency_map, self.local_conv_map, mode="full")[: values.shape[0]]
a = np.arange(1, average_values.shape[0] + 1)
a = np.where(a > self.local_wind_sz, self.local_wind_sz, a)
average_values = (average_values / a)[:-1]
output_values = np.append(np.asarray([0.0]), (saliency_map[1:] - average_values) / (average_values + 1e-8))
result_values = output_values[train_prev_len:]
return TimeSeries(
{"anom_score": UnivariateTimeSeries(time_stamps=univariate_time_series.time_stamps, values=result_values)}
)
def train(
self, train_data: TimeSeries, anomaly_labels: TimeSeries = None, train_config=None, post_rule_train_config=None
) -> TimeSeries:
train_data = self.train_pre_process(train_data, require_even_sampling=True, require_univariate=False)
if train_data.dim == 1:
self.config.target_seq_index = 0
elif self.target_seq_index is None:
raise RuntimeError(
f"Attempting to use the SR algorithm on a {train_data.dim}-variable "
f"time series, but didn't specify a `target_seq_index` "
f"indicating which univariate is the target."
)
assert 0 <= self.target_seq_index < train_data.dim, (
f"Expected `target_seq_index` to be between 0 and {train_data.dim} "
f"(the dimension of the transformed data), but got {self.target_seq_index}"
)
train_scores = self.get_anomaly_score(train_data)
self.train_post_rule(
anomaly_scores=train_scores, anomaly_labels=anomaly_labels, post_rule_train_config=post_rule_train_config
)
return train_scores
| [
"numpy.pad",
"numpy.average",
"numpy.abs",
"numpy.copy",
"merlion.transform.resample.TemporalResample",
"numpy.angle",
"numpy.fft.fft",
"numpy.asarray",
"numpy.ones",
"merlion.utils.UnivariateTimeSeries",
"numpy.where",
"numpy.arange",
"numpy.exp",
"merlion.utils.UnivariateTimeSeries.empty... | [((507, 534), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (524, 534), False, 'import logging\n'), ((683, 717), 'merlion.transform.resample.TemporalResample', 'TemporalResample', ([], {'granularity': 'None'}), '(granularity=None)\n', (699, 717), False, 'from merlion.transform.resample import TemporalResample\n'), ((3363, 3390), 'numpy.ones', 'np.ones', (['self.local_wind_sz'], {}), '(self.local_wind_sz)\n', (3370, 3390), True, 'import numpy as np\n'), ((3604, 3622), 'numpy.fft.fft', 'np.fft.fft', (['values'], {}), '(values)\n', (3614, 3622), True, 'import numpy as np\n'), ((3685, 3704), 'numpy.angle', 'np.angle', (['transform'], {}), '(transform)\n', (3693, 3704), True, 'import numpy as np\n'), ((3728, 3779), 'numpy.convolve', 'np.convolve', (['log_amps', 'self.q_conv_map'], {'mode': '"""same"""'}), "(log_amps, self.q_conv_map, mode='same')\n", (3739, 3779), True, 'import numpy as np\n'), ((4222, 4242), 'numpy.average', 'np.average', (['averages'], {}), '(averages)\n', (4232, 4242), True, 'import numpy as np\n'), ((4456, 4530), 'numpy.pad', 'np.pad', (['values', '((0, self.config.estimated_points),)'], {'constant_values': 'item'}), '(values, ((0, self.config.estimated_points),), constant_values=item)\n', (4462, 4530), True, 'import numpy as np\n'), ((5619, 5660), 'numpy.arange', 'np.arange', (['(1)', '(average_values.shape[0] + 1)'], {}), '(1, average_values.shape[0] + 1)\n', (5628, 5660), True, 'import numpy as np\n'), ((5673, 5728), 'numpy.where', 'np.where', (['(a > self.local_wind_sz)', 'self.local_wind_sz', 'a'], {}), '(a > self.local_wind_sz, self.local_wind_sz, a)\n', (5681, 5728), True, 'import numpy as np\n'), ((3239, 3261), 'numpy.ones', 'np.ones', (['self.config.q'], {}), '(self.config.q)\n', (3246, 3261), True, 'import numpy as np\n'), ((3649, 3666), 'numpy.abs', 'np.abs', (['transform'], {}), '(transform)\n', (3655, 3666), True, 'import numpy as np\n'), ((4112, 4138), 'numpy.copy', 'np.copy', (['values[-m - 1:-1]'], {}), '(values[-m - 1:-1])\n', (4119, 4138), True, 'import numpy as np\n'), ((4161, 4180), 'numpy.arange', 'np.arange', (['(1)', '(m + 1)'], {}), '(1, m + 1)\n', (4170, 4180), True, 'import numpy as np\n'), ((5051, 5079), 'merlion.utils.UnivariateTimeSeries.empty', 'UnivariateTimeSeries.empty', ([], {}), '()\n', (5077, 5079), False, 'from merlion.utils import TimeSeries, UnivariateTimeSeries\n'), ((5528, 5587), 'numpy.convolve', 'np.convolve', (['saliency_map', 'self.local_conv_map'], {'mode': '"""full"""'}), "(saliency_map, self.local_conv_map, mode='full')\n", (5539, 5587), True, 'import numpy as np\n'), ((5814, 5831), 'numpy.asarray', 'np.asarray', (['[0.0]'], {}), '([0.0])\n', (5824, 5831), True, 'import numpy as np\n'), ((3884, 3917), 'numpy.exp', 'np.exp', (['(residuals + 1.0j * phases)'], {}), '(residuals + 1.0j * phases)\n', (3890, 3917), True, 'import numpy as np\n'), ((6007, 6102), 'merlion.utils.UnivariateTimeSeries', 'UnivariateTimeSeries', ([], {'time_stamps': 'univariate_time_series.time_stamps', 'values': 'result_values'}), '(time_stamps=univariate_time_series.time_stamps, values\n =result_values)\n', (6027, 6102), False, 'from merlion.utils import TimeSeries, UnivariateTimeSeries\n')] |
import math
import time
import copy
import numpy as np
from mcts import MCTS
from node import Node
from collections import OrderedDict
# TODO Implementation is very similar to mcts. Notable exception is children,
# which is now a dict and subgoal related things. Still, needs generalization.
class SMCTS(MCTS):
def __init__(
self,
env,
gamma=.9,
c=.4,
action_coverage=.9,
err_tolerance=.1,
horizon=4):
super(SMCTS, self).__init__(env)
self.horizon = horizon
self.threshold = math.ceil(
math.log(err_tolerance) / math.log(action_coverage)
)
self.root_node.children = OrderedDict()
self.root_node.actions = []
self.root_node.action = None
def _is_subgoal(self, obs):
"""TODO: This is a default _is_subgoal implementation (in this case for
grid world).
"""
# An observation of gym minigrid has a 7x7 viewport by default. It is
# also in reverse, i.e. to get the grid in front of the agent we need
# to use index -1.
# We define the corners of the grid as subgoals. Additionally, we make
# sure that the same corner with multiple directions is not a subgoal
# by simply ignoring half of the possible directions.
direction = obs['direction']
if direction == 3 or direction == 2:
return False
img = obs['image']
forward_wall = img[3][-1][0] == 1 and img[3][-2][0] == 2
left_wall = img[2][-1][0] == 2
right_wall = img[4][-1][0] == 2
return forward_wall and (left_wall or right_wall)
def _gen_node(self, parent_node, env, action, reward, done):
node = Node()
node.env = env
node.action = action
node.reward = reward
node.is_terminal = done
node.parent = parent_node
node.calc_hash()
node.children = OrderedDict()
return node
def _expand(self, parent_node, curr_depth, horizon=4):
"""Refer to Algorithm 1 of the SMCTS paper.
Discover new or known macro actions until confidence is reached (the
threshold).
Horizon makes sure that we do not visit the goal instantly on empty
grid world, which results in long action sets.
"""
n = 0
while n < self.threshold:
actions = []
curr_reward = 0
env = copy.copy(parent_node.env)
i = 0
while True:
i += 1
if i > self.horizon:
break
action = np.random.choice(env.actions)
obs, reward, done, _ = env.step(action)
# TODO Make this a wrapper. (see StatePenalty)
# if reward == 0:
# reward = -0.01
actions.append(action)
curr_reward += reward * self.gamma ** curr_depth
curr_depth += 1
if self._is_subgoal(obs) or done:
new_node = self._gen_node(
parent_node,
copy.copy(env),
action,
curr_reward,
done
)
new_node.actions = actions
node_ = parent_node.children.get(new_node._hash)
if node_ is not None:
n += 1
if curr_reward > node_.reward:
parent_node.children[new_node._hash].actions = actions # noqa: E501
parent_node.children[new_node._hash].reward = curr_reward # noqa: E501
break
else:
parent_node.children[new_node._hash] = new_node
self.Q[new_node] = 0
self.visits[new_node] = 0
return new_node._hash
parent_node.is_fully_expanded = True
return None
def _get_best_node(self, parent_node):
# TODO Support not only UCB, but also eps greedy and Boltzmann.
children = []
max_ucb = max(
self._ucb(parent_node, child_node)
for child_node in parent_node.children.values()
)
for child_node in parent_node.children.values():
ucb_child = self._ucb(parent_node, child_node)
if ucb_child >= max_ucb:
children.append(child_node)
return children[np.random.choice(len(children))]
def select_expand(self, curr_depth):
"""Select best node until finding a node that is not fully expanded.
Expand it and return the expanded node (together with length of path
for gamma).
"""
path = []
curr_node = self.root_node
while True:
if curr_node.is_terminal:
break
if curr_node.is_fully_expanded:
curr_node = self._get_best_node(curr_node)
path.extend(curr_node.actions)
else:
node_hash = self._expand(curr_node, curr_depth)
if node_hash is not None:
child_node = curr_node.children[node_hash]
path.extend(child_node.actions)
return child_node, len(path)
return curr_node, len(path)
def backup(self, curr_node, q_val):
curr_node_temp = copy.copy(curr_node)
total_path_len = 0
while curr_node is not None:
total_path_len += len(curr_node.actions)
curr_node = curr_node.parent
curr_node = curr_node_temp
while curr_node is not None:
total_path_len -= len(curr_node.actions)
discount = self.gamma ** total_path_len
q_val += curr_node.reward * discount
self.Q[curr_node] += q_val
self.visits[curr_node] += 1
curr_node = curr_node.parent
def run(self, n_iter=100, max_actions=100):
actions = []
start_time = time.time()
total_depth = 0
for j in range(max_actions):
for _ in range(n_iter):
node, path_len = self.select_expand(total_depth)
q_val = self.simulate(node, path_len + total_depth)
self.backup(node, q_val)
curr_node = self.root_node
curr_node = self._get_best_node(curr_node)
for action in curr_node.actions:
self.env.step(action)
self.root_node = curr_node
curr_node.parent = None
total_depth += len(curr_node.actions)
actions.extend(curr_node.actions)
if curr_node.is_terminal:
break
return actions, time.time() - start_time
| [
"node.Node",
"copy.copy",
"time.time",
"numpy.random.choice",
"collections.OrderedDict",
"math.log"
] | [((710, 723), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (721, 723), False, 'from collections import OrderedDict\n'), ((1762, 1768), 'node.Node', 'Node', ([], {}), '()\n', (1766, 1768), False, 'from node import Node\n'), ((1965, 1978), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1976, 1978), False, 'from collections import OrderedDict\n'), ((5513, 5533), 'copy.copy', 'copy.copy', (['curr_node'], {}), '(curr_node)\n', (5522, 5533), False, 'import copy\n'), ((6131, 6142), 'time.time', 'time.time', ([], {}), '()\n', (6140, 6142), False, 'import time\n'), ((2470, 2496), 'copy.copy', 'copy.copy', (['parent_node.env'], {}), '(parent_node.env)\n', (2479, 2496), False, 'import copy\n'), ((614, 637), 'math.log', 'math.log', (['err_tolerance'], {}), '(err_tolerance)\n', (622, 637), False, 'import math\n'), ((640, 665), 'math.log', 'math.log', (['action_coverage'], {}), '(action_coverage)\n', (648, 665), False, 'import math\n'), ((2651, 2680), 'numpy.random.choice', 'np.random.choice', (['env.actions'], {}), '(env.actions)\n', (2667, 2680), True, 'import numpy as np\n'), ((6850, 6861), 'time.time', 'time.time', ([], {}), '()\n', (6859, 6861), False, 'import time\n'), ((3167, 3181), 'copy.copy', 'copy.copy', (['env'], {}), '(env)\n', (3176, 3181), False, 'import copy\n')] |
import matplotlib.pyplot as plt
import numpy as np
N = 100
r0 = 0.6
x = 0.9*np.random.rand(N)
y = 0.9*np.random.rand(N)
area = np.pi*(10 * np.random.rand(N))**2 # 0 to 10 point radii
c = np.sqrt(area)
r = np.sqrt(x*x + y*y)
area1 = np.ma.masked_where(r < r0, area)
area2 = np.ma.masked_where(r >= r0, area)
plt.scatter(x, y, s=area1, marker='^', c=c)
plt.scatter(x, y, s=area2, marker='o', c=c)
# Show the boundary between the regions:
theta = np.arange(0, np.pi/2, 0.01)
plt.plot(r0*np.cos(theta), r0*np.sin(theta))
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.ma.masked_where",
"matplotlib.pyplot.scatter",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"numpy.random.rand",
"numpy.sqrt"
] | [((189, 202), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (196, 202), True, 'import numpy as np\n'), ((207, 229), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (214, 229), True, 'import numpy as np\n'), ((234, 266), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(r < r0)', 'area'], {}), '(r < r0, area)\n', (252, 266), True, 'import numpy as np\n'), ((275, 308), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(r >= r0)', 'area'], {}), '(r >= r0, area)\n', (293, 308), True, 'import numpy as np\n'), ((309, 352), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': 'area1', 'marker': '"""^"""', 'c': 'c'}), "(x, y, s=area1, marker='^', c=c)\n", (320, 352), True, 'import matplotlib.pyplot as plt\n'), ((353, 396), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': 'area2', 'marker': '"""o"""', 'c': 'c'}), "(x, y, s=area2, marker='o', c=c)\n", (364, 396), True, 'import matplotlib.pyplot as plt\n'), ((446, 475), 'numpy.arange', 'np.arange', (['(0)', '(np.pi / 2)', '(0.01)'], {}), '(0, np.pi / 2, 0.01)\n', (455, 475), True, 'import numpy as np\n'), ((520, 530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (528, 530), True, 'import matplotlib.pyplot as plt\n'), ((77, 94), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (91, 94), True, 'import numpy as np\n'), ((103, 120), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (117, 120), True, 'import numpy as np\n'), ((486, 499), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (492, 499), True, 'import numpy as np\n'), ((504, 517), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (510, 517), True, 'import numpy as np\n'), ((140, 157), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (154, 157), True, 'import numpy as np\n')] |
"""Test basic handler functions."""
import copy
from six import text_type
from webtest import AppError
from webtest import TestApp as App
import numpy as np
from pydap.model import BaseType, StructureType, SequenceType
from pydap.lib import walk
from pydap.exceptions import ConstraintExpressionError
from pydap.handlers.lib import (
load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError,
apply_selection, apply_projection, ConstraintExpression,
IterData)
from pydap.parsers import parse_projection
from pydap.tests.datasets import (
SimpleArray, SimpleSequence, SimpleGrid, VerySimpleSequence,
NestedSequence, SimpleStructure)
import unittest
class TestHandlersLib(unittest.TestCase):
"""Test handler loading."""
def test_load_handlers(self):
"""Test that handlers can be loaded correctly.
We use a mock working set, since by default no handlers are installed
with pydap.
"""
handlers = load_handlers(MockWorkingSet())
self.assertTrue(MockHandler in handlers)
def test_get_handler(self):
"""Test that we can load a specific handler."""
handlers = load_handlers(MockWorkingSet())
handler = get_handler("file.foo", handlers)
self.assertIsInstance(handler, MockHandler)
def test_no_handler_available(self):
"""Test exception raised when file not supported."""
with self.assertRaises(ExtensionNotSupportedError):
get_handler("file.bar")
class TestBaseHandler(unittest.TestCase):
"""Test the base handler as a WSGI app."""
def setUp(self):
"""Create a basic WSGI app."""
self.app = App(MockHandler(SimpleArray))
def test_unconstrained_das(self):
"""DAS responses are always unconstrained."""
res = self.app.get("/.dds")
self.assertEqual(res.text, """Dataset {
Byte byte[byte = 5];
String string[string = 2];
Int16 short;
} SimpleArray;
""")
res = self.app.get("/.dds?byte")
self.assertEqual(res.text, """Dataset {
Byte byte[byte = 5];
} SimpleArray;
""")
res = self.app.get("/.das")
das = res.text
self.assertEqual(das, """Attributes {
byte {
}
string {
}
short {
}
}
""")
# check that DAS is unmodifed with constraint expression
res = self.app.get("/.das?byte")
self.assertEqual(res.text, das)
def test_exception(self):
"""
Test exception handling.
By default pydap will capture all exceptions and return a formatted
error response.
"""
with self.assertRaises(AppError):
self.app.get("/.foo")
def test_exception_non_captured(self):
"""Test exception handling when not captured."""
app = App(MockHandler(SimpleArray), extra_environ={
"x-wsgiorg.throw_errors": True})
with self.assertRaises(KeyError):
app.get("/.foo")
def test_missing_dataset(self):
"""Test exception when dataset is not set."""
app = App(MockHandler(), extra_environ={
"x-wsgiorg.throw_errors": True})
with self.assertRaises(NotImplementedError):
app.get("/.dds")
class TestApplySelection(unittest.TestCase):
"""Test function that applies selections to the dataset."""
def setUp(self):
"""Build our own sequence.
pydap uses lightweight copies of objects that share data. This breaks
unit tests since the same objects are reused for tests.
"""
# make a dataset with its own data
self.dataset = copy.copy(SimpleSequence)
self.dataset.cast.data = SimpleSequence.cast.data.copy()
def test_no_selection(self):
"""Test no selection in the query string."""
dataset = apply_selection("", self.dataset)
np.testing.assert_array_equal(
dataset.cast.data, self.dataset.cast.data)
def test_simple_selection(self):
"""Test a simple selection applied to the dataset."""
dataset = apply_selection(["cast.lon=100"], self.dataset)
np.testing.assert_array_equal(
dataset.cast.data,
self.dataset.cast.data[self.dataset.cast.data["lon"] == 100])
def test_multiple_selections(self):
"""Test multiple selections applied to dataset."""
dataset = apply_selection(
["cast.lon=100", "cast.lat>0"], self.dataset)
np.testing.assert_array_equal(
dataset.cast.data,
self.dataset.cast.data[
self.dataset.cast.data["lon"] == 100
][
self.dataset.cast.data["lat"] > 0
])
class TestApplyProjectionGrid(unittest.TestCase):
"""Test applying projections on a dataset with a grid."""
def setUp(self):
"""Build dataset with no shared data."""
self.dataset = copy.copy(SimpleGrid)
for var in walk(self.dataset, BaseType):
var.data = var.data.copy()
def test_no_projection(self):
"""Test no projections."""
dataset = apply_projection("", self.dataset)
self.assertEqual(list(dataset.children()), [])
def test_simple_projection(self):
"""Test simple projections."""
dataset = apply_projection(parse_projection("x"), self.dataset)
self.assertEqual(list(dataset.keys()), ["x"])
def test_simple_projection_with_index(self):
"""Test simple projections."""
dataset = apply_projection(parse_projection("x[1]"), self.dataset)
np.testing.assert_array_equal(
dataset.x.data, [1])
def test_array(self):
"""Test that the grid degenerates into a structure."""
dataset = apply_projection(
parse_projection("SimpleGrid.SimpleGrid"), self.dataset)
self.assertIsInstance(dataset.SimpleGrid, StructureType)
def test_array_slice(self):
"""Test slices applied to a grid."""
dataset = apply_projection(
parse_projection("SimpleGrid[1]"), self.dataset)
np.testing.assert_array_equal(
dataset.SimpleGrid.x.data, self.dataset.SimpleGrid[1].x.data)
np.testing.assert_array_equal(
dataset.SimpleGrid.y.data, self.dataset.SimpleGrid[1].y.data)
np.testing.assert_array_equal(
dataset.SimpleGrid.SimpleGrid.data,
self.dataset.SimpleGrid[1:2].SimpleGrid.data)
class TestApplyProjectionSequence(unittest.TestCase):
"""Test applying projections on a dataset with a sequence."""
def setUp(self):
"""Build dataset with no shared data."""
self.dataset = copy.copy(VerySimpleSequence)
self.dataset.sequence.data = VerySimpleSequence.sequence.data.copy()
def test_sequence_projection(self):
"""Test projection slicing on sequences."""
dataset = apply_projection(
parse_projection("sequence[2]"), self.dataset)
np.testing.assert_array_equal(
dataset.sequence.data, VerySimpleSequence.sequence.data[2])
class TestInvalidProjection(unittest.TestCase):
"""Test applying a projection to a structure object."""
def test_structure_projection(self):
"""Test projection slicing on a structure."""
with self.assertRaises(ConstraintExpressionError):
apply_projection(parse_projection("types[0]"), SimpleStructure)
class TestConstraintExpression(unittest.TestCase):
"""Test the constraint expression object."""
def test_str(self):
"""Test string representation."""
ce = ConstraintExpression("a>1")
self.assertEqual(str(ce), "a>1")
def test_unicode(self):
"""Test unicode representation."""
ce = ConstraintExpression("a>1")
self.assertEqual(text_type(ce), "a>1")
def test_and(self):
"""Test CE addition."""
ce1 = ConstraintExpression("a>1")
ce2 = ConstraintExpression("b>0")
ce3 = ce1 & ce2
self.assertEqual(str(ce3), "a>1&b>0")
def test_or(self):
"""Expressions cannot be ORed."""
ce1 = ConstraintExpression("a>1")
ce2 = ConstraintExpression("b>0")
with self.assertRaises(ConstraintExpressionError):
ce1 | ce2
class TestIterData(unittest.TestCase):
"""
Test the ``IterData`` class, used to store flat/nested sequence data.
A flat ``IterData`` should behave like a Numpy structured array, except
all operations are stored to be lazily evaluated when the object is
iterated over.
"""
def setUp(self):
"""Create a flat IterData."""
template = SequenceType("a")
template["b"] = BaseType("b")
template["c"] = BaseType("c")
template["d"] = BaseType("d")
self.data = IterData([(1, 2, 3), (4, 5, 6)], template)
self.array = np.array(np.rec.fromrecords([
(1, 2, 3),
(4, 5, 6),
], names=["b", "c", "d"]))
def assertIteratorEqual(self, it1, it2):
self.assertEqual(list(it1), list(it2))
def test_repr(self):
"""Test the object representation."""
self.assertEqual(
repr(self.data),
"<IterData to stream [(1, 2, 3), (4, 5, 6)]>")
def test_dtype(self):
"""Test the ``dtype`` property."""
self.assertEqual(self.data["b"].dtype, self.array["b"].dtype)
def test_iteration(self):
"""Test iteration over data."""
self.assertIteratorEqual(map(tuple, self.data), map(tuple, self.array))
self.assertIteratorEqual(self.data["b"], self.array["b"])
def test_filter(self):
"""Test filtering the object."""
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] == 1]),
map(tuple, self.array[self.array["b"] == 1]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] != 1]),
map(tuple, self.array[self.array["b"] != 1]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] >= 1]),
map(tuple, self.array[self.array["b"] >= 1]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] <= 1]),
map(tuple, self.array[self.array["b"] <= 1]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] > 1]),
map(tuple, self.array[self.array["b"] > 1]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] < 1]),
map(tuple, self.array[self.array["b"] < 1]))
def test_slice(self):
"""Test slicing the object."""
self.assertIteratorEqual(map(tuple, self.data[1:]),
map(tuple, self.array[1:]))
def test_integer_slice(self):
"""Test slicing with an integer.
Note that the behavior here is different from Numpy arrays, since the
data access to ``IterData`` is through iteration it has no direct index
access.
"""
self.assertIteratorEqual(self.data[0:1], self.array[0:1].tolist())
self.assertIteratorEqual(self.data[0], self.array[0:1].tolist())
def test_invalid_child(self):
"""Test accessing a non-existing child."""
with self.assertRaises(KeyError):
self.data["e"]
def test_invalid_key(self):
"""Test accessing using an invalid key."""
with self.assertRaises(KeyError):
self.data[(1, 2)]
def test_selecting_children(self):
"""Test that we can select children."""
self.assertIteratorEqual(
map(tuple, self.data[["d", "b"]]),
map(tuple, self.array[["d", "b"]]))
def test_invalid_selection(self):
"""Test invalid selections.
In theory this should never happen, since ``ConstraintExpression``
object are constructly directly from existing children.
"""
with self.assertRaises(ConstraintExpressionError):
self.data[ConstraintExpression("a.e<1")]
with self.assertRaises(ConstraintExpressionError):
self.data[ConstraintExpression("a.d<foo")]
def test_intercomparison_selection(self):
"""Test comparing children in the selection."""
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] == self.data["c"]]),
map(tuple, self.array[self.array["b"] == self.array["c"]]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] != self.data["c"]]),
map(tuple, self.array[self.array["b"] != self.array["c"]]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] >= self.data["c"]]),
map(tuple, self.array[self.array["b"] >= self.array["c"]]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] <= self.data["c"]]),
map(tuple, self.array[self.array["b"] <= self.array["c"]]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] > self.data["c"]]),
map(tuple, self.array[self.array["b"] > self.array["c"]]))
self.assertIteratorEqual(
map(tuple, self.data[self.data["b"] < self.data["c"]]),
map(tuple, self.array[self.array["b"] < self.array["c"]]))
def test_selection_not_in_projection(self):
"""Test selection with variables that are not in the projection."""
self.data[["d", "b"]]
filtered = self.data[["d", "b"]][self.data["c"] > 3]
self.assertEqual(filtered, self.array[["d", "b"]][self.array["c"] > 3])
class TestRegexp(unittest.TestCase):
"""Test regular expression match."""
def test_regexp(self):
sequence = SequenceType("sequence")
sequence["name"] = BaseType("name")
sequence.data = IterData([
("John", "Paul", "George", "Ringo"),
], sequence)
filtered = sequence[ConstraintExpression('sequence.name=~"J.*"')]
self.assertEqual(list(filtered.iterdata()), [("John",)])
class TestNestedIterData(unittest.TestCase):
"""Test ``IterData`` with nested data."""
def setUp(self):
"""Load data from test dataset."""
self.data = NestedSequence.location.data
def test_iteration(self):
"""Test basic iteration."""
self.assertEqual(list(self.data),
[(1, 1, 1, [(10, 11, 12), (21, 22, 23)]),
(2, 4, 4, [(15, 16, 17)]),
(3, 6, 9, []),
(4, 8, 16, [(31, 32, 33), (41, 42, 43),
(51, 52, 53), (61, 62, 63)])])
def test_children_data(self):
"""Test getting data from a simple child."""
self.assertEqual(list(self.data["lat"]), [1, 2, 3, 4])
def test_sequence_children_data(self):
"""Test getting data from a sequence child."""
self.assertEqual(list(self.data["time_series"]),
[[(10, 11, 12), (21, 22, 23)],
[(15, 16, 17)],
[],
[(31, 32, 33), (41, 42, 43),
(51, 52, 53), (61, 62, 63)]])
def test_deep_children_data(self):
"""Test getting data from a sequence child."""
self.assertEqual(list(self.data["time_series"]["time"]),
[[10, 21], [15], [], [31, 41, 51, 61]])
def test_selecting_children(self):
"""Test that we can select children."""
self.assertEqual(list(self.data[["time_series", "elev"]]),
[([(10, 11, 12), (21, 22, 23)], 1),
([(15, 16, 17)], 4),
([], 9),
([(31, 32, 33), (41, 42, 43),
(51, 52, 53), (61, 62, 63)], 16)])
def test_slice(self):
"""Test slicing the object."""
self.assertEqual(list(self.data[1::2]),
[(2, 4, 4, [(15, 16, 17)]),
(4, 8, 16, [(31, 32, 33), (41, 42, 43),
(51, 52, 53), (61, 62, 63)])])
def test_children_data_from_slice(self):
"""Test getting children data from a sliced sequence."""
self.assertEqual(list(self.data[1::2]["lat"]), [2, 4])
def test_sequence_children_data_from_slice(self):
"""Test getting children data from a sliced sequence."""
self.assertEqual(list(self.data[1::2]["time_series"]),
[[(15, 16, 17)], [(31, 32, 33), (41, 42, 43),
(51, 52, 53), (61, 62, 63)]])
def test_deep_slice(self):
"""Test slicing the inner sequence."""
self.assertEqual(list(self.data["time_series"][::2]),
[[(10, 11, 12), (21, 22, 23)], []])
def test_integer_slice(self):
"""Test slicing with an integer."""
self.assertEqual(list(self.data["time_series"][1]), [[(15, 16, 17)]])
def test_filter_data(self):
"""Test filtering the data."""
self.assertEqual(list(self.data[self.data["lat"] > 2]),
[(3, 6, 9, []), (4, 8, 16,
[(31, 32, 33), (41, 42, 43),
(51, 52, 53), (61, 62, 63)])])
def test_deep_filter(self):
"""Test deep filtering the data."""
self.assertEqual(list(self.data[self.data["time_series"]["slp"] > 11]),
[(1, 1, 1, [(21, 22, 23)]),
(2, 4, 4, [(15, 16, 17)]),
(3, 6, 9, []),
(4, 8, 16, [(31, 32, 33), (41, 42, 43),
(51, 52, 53), (61, 62, 63)])])
class MockWorkingSet(object):
"""A fake working set for testing handlers."""
def iter_entry_points(self, group):
"""Return a mock entry point."""
yield MockEntryPoint(MockHandler)
class MockHandler(BaseHandler):
"""A fake handler for testing."""
extensions = r"^.*\.foo$"
def __init__(self, dataset=None):
BaseHandler.__init__(self, dataset)
self.additional_headers = [
("X-debug", "True")
]
class MockEntryPoint(object):
"""A fake entry point for testing."""
def __init__(self, handler):
self.handler = handler
self.name = 'test'
self.module_name = 'pydap.handlers.test'
self.attrs = ('TestHandler', )
def load(self):
"""Return the wrapped handler."""
return self.handler
| [
"pydap.lib.walk",
"pydap.handlers.lib.apply_selection",
"pydap.parsers.parse_projection",
"pydap.tests.datasets.VerySimpleSequence.sequence.data.copy",
"pydap.model.BaseType",
"numpy.testing.assert_array_equal",
"pydap.handlers.lib.ConstraintExpression",
"copy.copy",
"six.text_type",
"pydap.handle... | [((1218, 1251), 'pydap.handlers.lib.get_handler', 'get_handler', (['"""file.foo"""', 'handlers'], {}), "('file.foo', handlers)\n", (1229, 1251), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((3625, 3650), 'copy.copy', 'copy.copy', (['SimpleSequence'], {}), '(SimpleSequence)\n', (3634, 3650), False, 'import copy\n'), ((3684, 3715), 'pydap.tests.datasets.SimpleSequence.cast.data.copy', 'SimpleSequence.cast.data.copy', ([], {}), '()\n', (3713, 3715), False, 'from pydap.tests.datasets import SimpleArray, SimpleSequence, SimpleGrid, VerySimpleSequence, NestedSequence, SimpleStructure\n'), ((3821, 3854), 'pydap.handlers.lib.apply_selection', 'apply_selection', (['""""""', 'self.dataset'], {}), "('', self.dataset)\n", (3836, 3854), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((3863, 3935), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dataset.cast.data', 'self.dataset.cast.data'], {}), '(dataset.cast.data, self.dataset.cast.data)\n', (3892, 3935), True, 'import numpy as np\n'), ((4067, 4114), 'pydap.handlers.lib.apply_selection', 'apply_selection', (["['cast.lon=100']", 'self.dataset'], {}), "(['cast.lon=100'], self.dataset)\n", (4082, 4114), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((4123, 4238), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dataset.cast.data', "self.dataset.cast.data[self.dataset.cast.data['lon'] == 100]"], {}), "(dataset.cast.data, self.dataset.cast.data[\n self.dataset.cast.data['lon'] == 100])\n", (4152, 4238), True, 'import numpy as np\n'), ((4377, 4438), 'pydap.handlers.lib.apply_selection', 'apply_selection', (["['cast.lon=100', 'cast.lat>0']", 'self.dataset'], {}), "(['cast.lon=100', 'cast.lat>0'], self.dataset)\n", (4392, 4438), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((4460, 4610), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dataset.cast.data', "self.dataset.cast.data[self.dataset.cast.data['lon'] == 100][self.dataset.\n cast.data['lat'] > 0]"], {}), "(dataset.cast.data, self.dataset.cast.data[\n self.dataset.cast.data['lon'] == 100][self.dataset.cast.data['lat'] > 0])\n", (4489, 4610), True, 'import numpy as np\n'), ((4900, 4921), 'copy.copy', 'copy.copy', (['SimpleGrid'], {}), '(SimpleGrid)\n', (4909, 4921), False, 'import copy\n'), ((4941, 4969), 'pydap.lib.walk', 'walk', (['self.dataset', 'BaseType'], {}), '(self.dataset, BaseType)\n', (4945, 4969), False, 'from pydap.lib import walk\n'), ((5098, 5132), 'pydap.handlers.lib.apply_projection', 'apply_projection', (['""""""', 'self.dataset'], {}), "('', self.dataset)\n", (5114, 5132), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((5564, 5614), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dataset.x.data', '[1]'], {}), '(dataset.x.data, [1])\n', (5593, 5614), True, 'import numpy as np\n'), ((6071, 6167), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dataset.SimpleGrid.x.data', 'self.dataset.SimpleGrid[1].x.data'], {}), '(dataset.SimpleGrid.x.data, self.dataset.\n SimpleGrid[1].x.data)\n', (6100, 6167), True, 'import numpy as np\n'), ((6184, 6280), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dataset.SimpleGrid.y.data', 'self.dataset.SimpleGrid[1].y.data'], {}), '(dataset.SimpleGrid.y.data, self.dataset.\n SimpleGrid[1].y.data)\n', (6213, 6280), True, 'import numpy as np\n'), ((6297, 6413), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dataset.SimpleGrid.SimpleGrid.data', 'self.dataset.SimpleGrid[1:2].SimpleGrid.data'], {}), '(dataset.SimpleGrid.SimpleGrid.data, self.\n dataset.SimpleGrid[1:2].SimpleGrid.data)\n', (6326, 6413), True, 'import numpy as np\n'), ((6651, 6680), 'copy.copy', 'copy.copy', (['VerySimpleSequence'], {}), '(VerySimpleSequence)\n', (6660, 6680), False, 'import copy\n'), ((6718, 6757), 'pydap.tests.datasets.VerySimpleSequence.sequence.data.copy', 'VerySimpleSequence.sequence.data.copy', ([], {}), '()\n', (6755, 6757), False, 'from pydap.tests.datasets import SimpleArray, SimpleSequence, SimpleGrid, VerySimpleSequence, NestedSequence, SimpleStructure\n'), ((6954, 7048), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dataset.sequence.data', 'VerySimpleSequence.sequence.data[2]'], {}), '(dataset.sequence.data, VerySimpleSequence.\n sequence.data[2])\n', (6983, 7048), True, 'import numpy as np\n'), ((7582, 7609), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""a>1"""'], {}), "('a>1')\n", (7602, 7609), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((7736, 7763), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""a>1"""'], {}), "('a>1')\n", (7756, 7763), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((7882, 7909), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""a>1"""'], {}), "('a>1')\n", (7902, 7909), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((7924, 7951), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""b>0"""'], {}), "('b>0')\n", (7944, 7951), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((8102, 8129), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""a>1"""'], {}), "('a>1')\n", (8122, 8129), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((8144, 8171), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""b>0"""'], {}), "('b>0')\n", (8164, 8171), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((8632, 8649), 'pydap.model.SequenceType', 'SequenceType', (['"""a"""'], {}), "('a')\n", (8644, 8649), False, 'from pydap.model import BaseType, StructureType, SequenceType\n'), ((8674, 8687), 'pydap.model.BaseType', 'BaseType', (['"""b"""'], {}), "('b')\n", (8682, 8687), False, 'from pydap.model import BaseType, StructureType, SequenceType\n'), ((8712, 8725), 'pydap.model.BaseType', 'BaseType', (['"""c"""'], {}), "('c')\n", (8720, 8725), False, 'from pydap.model import BaseType, StructureType, SequenceType\n'), ((8750, 8763), 'pydap.model.BaseType', 'BaseType', (['"""d"""'], {}), "('d')\n", (8758, 8763), False, 'from pydap.model import BaseType, StructureType, SequenceType\n'), ((8784, 8826), 'pydap.handlers.lib.IterData', 'IterData', (['[(1, 2, 3), (4, 5, 6)]', 'template'], {}), '([(1, 2, 3), (4, 5, 6)], template)\n', (8792, 8826), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((13703, 13727), 'pydap.model.SequenceType', 'SequenceType', (['"""sequence"""'], {}), "('sequence')\n", (13715, 13727), False, 'from pydap.model import BaseType, StructureType, SequenceType\n'), ((13755, 13771), 'pydap.model.BaseType', 'BaseType', (['"""name"""'], {}), "('name')\n", (13763, 13771), False, 'from pydap.model import BaseType, StructureType, SequenceType\n'), ((13796, 13853), 'pydap.handlers.lib.IterData', 'IterData', (["[('John', 'Paul', 'George', 'Ringo')]", 'sequence'], {}), "([('John', 'Paul', 'George', 'Ringo')], sequence)\n", (13804, 13853), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((18112, 18147), 'pydap.handlers.lib.BaseHandler.__init__', 'BaseHandler.__init__', (['self', 'dataset'], {}), '(self, dataset)\n', (18132, 18147), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((1479, 1502), 'pydap.handlers.lib.get_handler', 'get_handler', (['"""file.bar"""'], {}), "('file.bar')\n", (1490, 1502), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((5301, 5322), 'pydap.parsers.parse_projection', 'parse_projection', (['"""x"""'], {}), "('x')\n", (5317, 5322), False, 'from pydap.parsers import parse_projection\n'), ((5516, 5540), 'pydap.parsers.parse_projection', 'parse_projection', (['"""x[1]"""'], {}), "('x[1]')\n", (5532, 5540), False, 'from pydap.parsers import parse_projection\n'), ((5766, 5807), 'pydap.parsers.parse_projection', 'parse_projection', (['"""SimpleGrid.SimpleGrid"""'], {}), "('SimpleGrid.SimpleGrid')\n", (5782, 5807), False, 'from pydap.parsers import parse_projection\n'), ((6014, 6047), 'pydap.parsers.parse_projection', 'parse_projection', (['"""SimpleGrid[1]"""'], {}), "('SimpleGrid[1]')\n", (6030, 6047), False, 'from pydap.parsers import parse_projection\n'), ((6899, 6930), 'pydap.parsers.parse_projection', 'parse_projection', (['"""sequence[2]"""'], {}), "('sequence[2]')\n", (6915, 6930), False, 'from pydap.parsers import parse_projection\n'), ((7789, 7802), 'six.text_type', 'text_type', (['ce'], {}), '(ce)\n', (7798, 7802), False, 'from six import text_type\n'), ((8858, 8923), 'numpy.rec.fromrecords', 'np.rec.fromrecords', (['[(1, 2, 3), (4, 5, 6)]'], {'names': "['b', 'c', 'd']"}), "([(1, 2, 3), (4, 5, 6)], names=['b', 'c', 'd'])\n", (8876, 8923), True, 'import numpy as np\n'), ((13906, 13950), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""sequence.name=~"J.*\\""""'], {}), '(\'sequence.name=~"J.*"\')\n', (13926, 13950), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((7352, 7380), 'pydap.parsers.parse_projection', 'parse_projection', (['"""types[0]"""'], {}), "('types[0]')\n", (7368, 7380), False, 'from pydap.parsers import parse_projection\n'), ((11985, 12014), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""a.e<1"""'], {}), "('a.e<1')\n", (12005, 12014), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n'), ((12097, 12128), 'pydap.handlers.lib.ConstraintExpression', 'ConstraintExpression', (['"""a.d<foo"""'], {}), "('a.d<foo')\n", (12117, 12128), False, 'from pydap.handlers.lib import load_handlers, get_handler, BaseHandler, ExtensionNotSupportedError, apply_selection, apply_projection, ConstraintExpression, IterData\n')] |
#!/usr/bin/env python
import os
import sys
import json
import copy
import h5py
import numpy as np
import pandas as pd
from types import SimpleNamespace
import torch
from torch import nn
from torch.utils.data import DataLoader
from sklearn.metrics import r2_score
sys.path.append('/storage/yaari/mutation_density/pytorch/nets/')
sys.path.append('/storage/yaari/mutation_density/pytorch/')
from cnn_predictors import *
from mut_dataset import *
def add_noise_to_model(model, noise):
tmp_model = copy.deepcopy(model).cuda()
with torch.no_grad():
for param in tmp_model.parameters():
param.add_(torch.normal(0, noise, param.size()).cuda())
return tmp_model
def predict(model, data_loader, label_ids):
corr_coef_sums = np.zeros(len(label_ids))
all_preds = [[] for _ in range(len(label_ids))]
all_true = [[] for _ in range(len(label_ids))]
for j, (X, t_lst) in enumerate(data_loader):
y_lst = model(X.cuda())
with torch.no_grad():
for i, t in enumerate(t_lst):
y = y_lst[i]
all_preds[i].extend(y.data.cpu().numpy().tolist())
all_true[i].extend(t.data.cpu().numpy().tolist())
return all_preds, all_true, [r2_score(all_preds[i], all_true[i]) for i in range(len(label_ids))]
def test_with_perturbations(model, data_loader, label_ids, samp_num, params, fold, verbose=True):
preds = np.empty((samp_num, params.reps))
for rep in range(params.reps):
tmp_model = add_noise_to_model(model, params.alpha)
tmp_preds, _, acc = predict(tmp_model, data_loader, label_ids)
preds[:, rep] = tmp_preds[0]
if verbose and rep % 10 == 0:
print('Fold {}, repetition {}, accuracy: {}'.format(fold, rep, acc))
return preds
def main():
assert len(sys.argv) >= 4, 'Usage: kfold_test_model_confidance.py <run_id> <models folder name> <cancer ids...>'
cur_dir = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(cur_dir, "../configs/config_confidance_kfold.json")
with open(config_path, 'r') as f: config = json.load(f)
run_id = sys.argv[1]
label_ids = sys.argv[3:]
labels_str = '-'.join(label_ids)
models_dir = os.path.join(config['base_path'], labels_str, sys.argv[2])
file_path = config['data_file']
with h5py.File(file_path, 'r') as h5f:
chr_idxs = h5f['idx'][:]
k = config['k']
params = SimpleNamespace()
params.reps = config['repetitions']
params.alpha = config['alpha']
params.bs = config['bs']
pred_df = pd.DataFrame()
idx = 0
for i in range(2):
print('Running iteration {} out of {} folds...'.format(i + 1, k))
test_idxs = np.sort(np.load(os.path.join(models_dir, 'test_indices_fold_{}.npy'.format(i))))
test_ds = SimpleDatasetFromH5(file_path, label_ids, test_idxs, chr_idxs[test_idxs], 'x_data')
test_dl = DataLoader(test_ds, batch_size=params.bs, shuffle=False, drop_last=False, pin_memory=True, num_workers=4)
samp_num = len(test_ds)
test_chr_idxs = chr_idxs[test_idxs]
print('Loading model...')
model = nn.DataParallel(SimpleMultiTaskResNet(test_ds.get_data_shape(), len(label_ids))).cuda()
state_dict = torch.load(os.path.join(models_dir, 'best_model_fold_{}.pt'.format(i)))
model.load_state_dict(state_dict)
model.eval()
print('Computing prediction and confidance...')
preds, labels, acc = predict(model, test_dl, label_ids)
perturp_preds = test_with_perturbations(model, test_dl, label_ids, samp_num, params, i)
print('Model accuracy: {}'.format(acc))
print('Storing predictions...')
fold_pred_df = pd.DataFrame(data=perturp_preds)
fold_pred_df['chr'] = test_chr_idxs[:,0]
fold_pred_df['s_idx'] = test_chr_idxs[:,1]
fold_pred_df['e_idx'] = test_chr_idxs[:,2]
fold_pred_df['obs_mut'] = labels[0]
fold_pred_df['pred_mut'] = preds[0]
pred_df = pred_df.append(fold_pred_df, ignore_index=True)
out_dir = os.path.join(models_dir, run_id)
out_path = os.path.join(out_dir, 'perturb_predictions.csv')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print('Saving predictions to {}...'.format(out_path))
pred_df.to_csv(out_path)
print('Done!')
if __name__ == '__main__':
main()
| [
"sys.path.append",
"pandas.DataFrame",
"h5py.File",
"json.load",
"copy.deepcopy",
"os.makedirs",
"torch.utils.data.DataLoader",
"numpy.empty",
"os.path.realpath",
"sklearn.metrics.r2_score",
"os.path.exists",
"torch.no_grad",
"os.path.join",
"types.SimpleNamespace"
] | [((264, 328), 'sys.path.append', 'sys.path.append', (['"""/storage/yaari/mutation_density/pytorch/nets/"""'], {}), "('/storage/yaari/mutation_density/pytorch/nets/')\n", (279, 328), False, 'import sys\n'), ((329, 388), 'sys.path.append', 'sys.path.append', (['"""/storage/yaari/mutation_density/pytorch/"""'], {}), "('/storage/yaari/mutation_density/pytorch/')\n", (344, 388), False, 'import sys\n'), ((1419, 1452), 'numpy.empty', 'np.empty', (['(samp_num, params.reps)'], {}), '((samp_num, params.reps))\n', (1427, 1452), True, 'import numpy as np\n'), ((2023, 2087), 'os.path.join', 'os.path.join', (['cur_dir', '"""../configs/config_confidance_kfold.json"""'], {}), "(cur_dir, '../configs/config_confidance_kfold.json')\n", (2035, 2087), False, 'import os\n'), ((2257, 2315), 'os.path.join', 'os.path.join', (["config['base_path']", 'labels_str', 'sys.argv[2]'], {}), "(config['base_path'], labels_str, sys.argv[2])\n", (2269, 2315), False, 'import os\n'), ((2467, 2484), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '()\n', (2482, 2484), False, 'from types import SimpleNamespace\n'), ((2604, 2618), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2616, 2618), True, 'import pandas as pd\n'), ((4118, 4150), 'os.path.join', 'os.path.join', (['models_dir', 'run_id'], {}), '(models_dir, run_id)\n', (4130, 4150), False, 'import os\n'), ((4166, 4214), 'os.path.join', 'os.path.join', (['out_dir', '"""perturb_predictions.csv"""'], {}), "(out_dir, 'perturb_predictions.csv')\n", (4178, 4214), False, 'import os\n'), ((537, 552), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (550, 552), False, 'import torch\n'), ((1975, 2001), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1991, 2001), False, 'import os\n'), ((2135, 2147), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2144, 2147), False, 'import json\n'), ((2362, 2387), 'h5py.File', 'h5py.File', (['file_path', '"""r"""'], {}), "(file_path, 'r')\n", (2371, 2387), False, 'import h5py\n'), ((2950, 3059), 'torch.utils.data.DataLoader', 'DataLoader', (['test_ds'], {'batch_size': 'params.bs', 'shuffle': '(False)', 'drop_last': '(False)', 'pin_memory': '(True)', 'num_workers': '(4)'}), '(test_ds, batch_size=params.bs, shuffle=False, drop_last=False,\n pin_memory=True, num_workers=4)\n', (2960, 3059), False, 'from torch.utils.data import DataLoader\n'), ((3765, 3797), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'perturp_preds'}), '(data=perturp_preds)\n', (3777, 3797), True, 'import pandas as pd\n'), ((4226, 4249), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (4240, 4249), False, 'import os\n'), ((4259, 4279), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (4270, 4279), False, 'import os\n'), ((500, 520), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (513, 520), False, 'import copy\n'), ((979, 994), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (992, 994), False, 'import torch\n'), ((1233, 1268), 'sklearn.metrics.r2_score', 'r2_score', (['all_preds[i]', 'all_true[i]'], {}), '(all_preds[i], all_true[i])\n', (1241, 1268), False, 'from sklearn.metrics import r2_score\n')] |
import mdtraj
import parmed as pmd
import numpy as np
from parmed.tools import change
from openmmtools import forces
from qmhub import *
from get_charges import *
from simtk.openmm import openmm
from simtk.openmm import app
from simtk import unit
def set_restrained_atoms(top_file, coords_file, ligand_selection, receptor_selection):
_top = pmd.load_file(top_file, xyz=coords_file)
atom_list = _top.atoms
lig = _top[ligand_selection].atoms
rec = _top[receptor_selection].atoms
ligand_atom_list = [idx for at in lig for idx, atom in enumerate(atom_list) if (
at.residue.number, at.name) == (atom.residue.number, atom.name)]
receptor_atom_list = [idx for at in rec for idx, atom in enumerate(
atom_list) if (at.residue.number, at.name) == (atom.residue.number, atom.name)]
return ligand_atom_list, receptor_atom_list
def setup_simulation(top, positions, update, box_vectors=None, restraint=False, ligand_atom_list=None, receptor_atom_list=None):
'''Setup the openMM system with the current topology and
the input coordinates or the current positions depending on
the value of update.
Standard conditions are assumed (298K, 1bar)
Input:
top : Topology object from OpenMM o ParmEd (Gromacs or Amber)
positions: current positions of atoms
update: integer of charge update cycle
Returns:
Simulation (OpenMM class)
'''
system = top.createSystem(
nonbondedMethod=app.PME, nonbondedCutoff=1 * unit.nanometer, constraints=app.HBonds)
system.addForce(openmm.MonteCarloBarostat(1 * unit.bar, 298 * unit.kelvin))
if restraint:
if ligand_atom_list is not None and receptor_atom_list is not None:
restraint = forces.HarmonicRestraintForce(spring_constant=0.2 * unit.kilocalories_per_mole / unit.angstrom**2,
restrained_atom_indices1=ligand_atom_list,
restrained_atom_indices2=receptor_atom_list)
system.addForce(restraint)
else:
raise Exception("Missing atom list to apply restraints")
integrator = openmm.LangevinIntegrator(
298 * unit.kelvin, 1 / unit.picosecond, 0.002 * unit.picoseconds)
simulation = app.Simulation(top.topology, system, integrator)
simulation.reporters.append(app.StateDataReporter(
sys.stdout, 5000, step=True, potentialEnergy=True, temperature=True, density=True))
simulation.reporters.append(app.DCDReporter(f'traj_{update}.dcd', 50000))
simulation.context.setPositions(positions)
if box_vectors is not None:
simulation.context.setPeriodicBoxVectors(*box_vectors)
simulation.minimizeEnergy()
return simulation, system
def calculate_charges(simulation, system, ligand_selection, qm_charge, radius=10, method='B3LYP', basis='def2-TZVP'):
positions = simulation.context.getState(
getPositions=True, enforcePeriodicBox=True).getPositions()
_box_vectors = simulation.context.getState().getPeriodicBoxVectors()
simulation.topology.setPeriodicBoxVectors(_box_vectors)
app.PDBFile.writeFile(simulation.topology, positions,
open('output.pdb', 'w'))
pdb = pmd.load_file('output.pdb')
traj = mdtraj.load('output.pdb')
traj.image_molecules()
frame = pmd.openmm.load_topology(
pdb.topology, system, traj.openmm_positions(0))
qm_region = frame[ligand_selection]
environment = frame[f'{ligand_selection}<@{float(radius)+2.0} & !{ligand_selection}']
qmmm = QMMM(qm_region, environment, qmSoftware='orca', mmSoftware='openmm', qmCharge=qm_charge, qmMult=1,
qmEmbedNear='eed', qmEmbedFar=None, qmSwitchingType='Switch', qmCutoff=radius)
qmmm.run_qm(method=method, basis=basis, calc_forces=False)
qmmm.parse_output()
epol = qmmm.system.qm_atoms.qm_pol_energy
charges = get_charges('orca.molden.input', 'mbis')
return positions, epol, charges
def charge_stats(charge_list):
charges = np.array(charge_list)
charges_mean, charges_std = charges.mean(
axis=0, dtype=np.float64), charges.std(axis=0, dtype=np.float64)
return charges_mean, charges_std
def epol_stats(epol_list):
epol = np.array(epol_list)
epol_mean, epol_std = epol.mean(), epol.std()
return epol_mean, epol_std
def make_new_top(top_file, box_vectors, charges_mean, ligand_selection):
_top = pmd.load_file(top_file)
_top.box_vectors = box_vectors
for i, atom in enumerate(_top[ligand_selection].atoms):
mask = f'{ligand_selection}&@{atom.name}'
action = change(_top, mask, 'charge', round(charges_mean[i], 5))
action.execute()
_top.save(top_file, overwrite=True)
return _top
| [
"simtk.openmm.openmm.MonteCarloBarostat",
"simtk.openmm.app.StateDataReporter",
"simtk.openmm.app.Simulation",
"mdtraj.load",
"simtk.openmm.openmm.LangevinIntegrator",
"numpy.array",
"parmed.load_file",
"simtk.openmm.app.DCDReporter",
"openmmtools.forces.HarmonicRestraintForce"
] | [((350, 390), 'parmed.load_file', 'pmd.load_file', (['top_file'], {'xyz': 'coords_file'}), '(top_file, xyz=coords_file)\n', (363, 390), True, 'import parmed as pmd\n'), ((2166, 2261), 'simtk.openmm.openmm.LangevinIntegrator', 'openmm.LangevinIntegrator', (['(298 * unit.kelvin)', '(1 / unit.picosecond)', '(0.002 * unit.picoseconds)'], {}), '(298 * unit.kelvin, 1 / unit.picosecond, 0.002 *\n unit.picoseconds)\n', (2191, 2261), False, 'from simtk.openmm import openmm\n'), ((2284, 2332), 'simtk.openmm.app.Simulation', 'app.Simulation', (['top.topology', 'system', 'integrator'], {}), '(top.topology, system, integrator)\n', (2298, 2332), False, 'from simtk.openmm import app\n'), ((3247, 3274), 'parmed.load_file', 'pmd.load_file', (['"""output.pdb"""'], {}), "('output.pdb')\n", (3260, 3274), True, 'import parmed as pmd\n'), ((3286, 3311), 'mdtraj.load', 'mdtraj.load', (['"""output.pdb"""'], {}), "('output.pdb')\n", (3297, 3311), False, 'import mdtraj\n'), ((4040, 4061), 'numpy.array', 'np.array', (['charge_list'], {}), '(charge_list)\n', (4048, 4061), True, 'import numpy as np\n'), ((4259, 4278), 'numpy.array', 'np.array', (['epol_list'], {}), '(epol_list)\n', (4267, 4278), True, 'import numpy as np\n'), ((4447, 4470), 'parmed.load_file', 'pmd.load_file', (['top_file'], {}), '(top_file)\n', (4460, 4470), True, 'import parmed as pmd\n'), ((1553, 1611), 'simtk.openmm.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['(1 * unit.bar)', '(298 * unit.kelvin)'], {}), '(1 * unit.bar, 298 * unit.kelvin)\n', (1578, 1611), False, 'from simtk.openmm import openmm\n'), ((2365, 2473), 'simtk.openmm.app.StateDataReporter', 'app.StateDataReporter', (['sys.stdout', '(5000)'], {'step': '(True)', 'potentialEnergy': '(True)', 'temperature': '(True)', 'density': '(True)'}), '(sys.stdout, 5000, step=True, potentialEnergy=True,\n temperature=True, density=True)\n', (2386, 2473), False, 'from simtk.openmm import app\n'), ((2512, 2556), 'simtk.openmm.app.DCDReporter', 'app.DCDReporter', (['f"""traj_{update}.dcd"""', '(50000)'], {}), "(f'traj_{update}.dcd', 50000)\n", (2527, 2556), False, 'from simtk.openmm import app\n'), ((1731, 1929), 'openmmtools.forces.HarmonicRestraintForce', 'forces.HarmonicRestraintForce', ([], {'spring_constant': '(0.2 * unit.kilocalories_per_mole / unit.angstrom ** 2)', 'restrained_atom_indices1': 'ligand_atom_list', 'restrained_atom_indices2': 'receptor_atom_list'}), '(spring_constant=0.2 * unit.\n kilocalories_per_mole / unit.angstrom ** 2, restrained_atom_indices1=\n ligand_atom_list, restrained_atom_indices2=receptor_atom_list)\n', (1760, 1929), False, 'from openmmtools import forces\n')] |
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import TensorDataset, DataLoader, random_split
import data_handler
import damage_detector
import config
torch.autograd.set_detect_anomaly(True)
device = torch.device("cuda" if config.cuda else "cpu")
detector = damage_detector.Detector()
if config.multi_gpu:
n_gpu = torch.cuda.device_count()
print('Multi GPU mode Use {} GPU'.format(n_gpu))
detector = nn.DataParallel(detector)
detector.to(device)
optimizer = optim.Adam(params=detector.parameters(), lr=config.learning_rate)
def get_dataset():
vlocation = data_handler.get_location()
vimage_all = []
vvbox_all = []
for location in vlocation:
vimage, vvbox = data_handler.get_dataset(location)
vimage_all.extend(vimage)
vvbox_all.extend(vvbox)
return vimage_all, vvbox_all
def get_dataset_debug():
vlocation = data_handler.get_location()
location = vlocation[0]
vimage, vvbox = data_handler.get_dataset(location)
return vimage[0:20], vvbox[0:20]
if config.flag_debug:
vimage, vvbox = get_dataset_debug()
else:
vimage, vvbox = get_dataset()
vimage = torch.from_numpy(np.array(vimage)).float()
vvbox = torch.from_numpy(np.array(vvbox)).float()
datasets = TensorDataset(vimage, vvbox)
trainloader = DataLoader(datasets, batch_size=config.batch_size, shuffle=True, pin_memory=True, num_workers=4)
train_loss_min = 0
for epoch in range(config.epochs):
detector.train()
train_loss = 0
for batch_idx, (image, vbox) in enumerate(trainloader):
if config.cuda:
image = image.to(device)
vbox = vbox.to(device)
optimizer.zero_grad()
predict = detector(image)
loss = damage_detector.loss_function(predict, vbox, device)
train_loss += loss.item()
loss.backward()
optimizer.step()
print(f'Epoch: {epoch}, Batch: {batch_idx}, loss: {loss.item()}')
if (train_loss < train_loss_min) or epoch == 0:
train_loss_min = train_loss
if config.multi_gpu:
torch.save(detector.module.state_dict(), config.fn_model)
else:
torch.save(detector.state_dict(), config.fn_model)
print(f'EpochTotal: {epoch}, loss: {train_loss}') | [
"data_handler.get_dataset",
"data_handler.get_location",
"torch.utils.data.DataLoader",
"damage_detector.loss_function",
"torch.cuda.device_count",
"damage_detector.Detector",
"torch.autograd.set_detect_anomaly",
"torch.utils.data.TensorDataset",
"numpy.array",
"torch.device",
"torch.nn.DataPara... | [((193, 232), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (226, 232), False, 'import torch\n'), ((243, 289), 'torch.device', 'torch.device', (["('cuda' if config.cuda else 'cpu')"], {}), "('cuda' if config.cuda else 'cpu')\n", (255, 289), False, 'import torch\n'), ((302, 328), 'damage_detector.Detector', 'damage_detector.Detector', ([], {}), '()\n', (326, 328), False, 'import damage_detector\n'), ((1308, 1336), 'torch.utils.data.TensorDataset', 'TensorDataset', (['vimage', 'vvbox'], {}), '(vimage, vvbox)\n', (1321, 1336), False, 'from torch.utils.data import TensorDataset, DataLoader, random_split\n'), ((1352, 1453), 'torch.utils.data.DataLoader', 'DataLoader', (['datasets'], {'batch_size': 'config.batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(4)'}), '(datasets, batch_size=config.batch_size, shuffle=True, pin_memory\n =True, num_workers=4)\n', (1362, 1453), False, 'from torch.utils.data import TensorDataset, DataLoader, random_split\n'), ((364, 389), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (387, 389), False, 'import torch\n'), ((460, 485), 'torch.nn.DataParallel', 'nn.DataParallel', (['detector'], {}), '(detector)\n', (475, 485), False, 'from torch import nn, optim\n'), ((625, 652), 'data_handler.get_location', 'data_handler.get_location', ([], {}), '()\n', (650, 652), False, 'import data_handler\n'), ((933, 960), 'data_handler.get_location', 'data_handler.get_location', ([], {}), '()\n', (958, 960), False, 'import data_handler\n'), ((1011, 1045), 'data_handler.get_dataset', 'data_handler.get_dataset', (['location'], {}), '(location)\n', (1035, 1045), False, 'import data_handler\n'), ((751, 785), 'data_handler.get_dataset', 'data_handler.get_dataset', (['location'], {}), '(location)\n', (775, 785), False, 'import data_handler\n'), ((1791, 1843), 'damage_detector.loss_function', 'damage_detector.loss_function', (['predict', 'vbox', 'device'], {}), '(predict, vbox, device)\n', (1820, 1843), False, 'import damage_detector\n'), ((1219, 1235), 'numpy.array', 'np.array', (['vimage'], {}), '(vimage)\n', (1227, 1235), True, 'import numpy as np\n'), ((1271, 1286), 'numpy.array', 'np.array', (['vvbox'], {}), '(vvbox)\n', (1279, 1286), True, 'import numpy as np\n')] |
import numpy as np
class CostFunction(object):
"""CostFunction: Base class for all cost functions
Performs basic error handling and inits common attributes
"""
def __init__(self, y_train, y_hat, X_train):
super(CostFunction, self).__init__()
self._check_array_dims(y_train, y_hat, X_train)
self.y_train = y_train
self.y_hat = y_hat
self.loss = self.y_hat - self.y_train
self.X_train = X_train
self.m = X_train.shape[0]
def _check_array_dims(self, y_train, y_hat, X_train):
try:
assert(y_train.shape == y_hat.shape)
assert(y_train.shape[0] == X_train.shape[0])
except ValueError:
print("Array dimensions must match!\n\
y_train: (m, 1),\n\
y_hat: (m, 1),\n\
X_train: (m, n)\n")
class MeanSquaredError(CostFunction):
"""MeanSquaredError
Arguments:
y_train -- actual labels - vector with shape (m, 1)
y_hat -- predicted labels - vector with shape (m, 1)
X -- feature matrix, X, with shape (m, n)
Properties:
get_cost: Returns error between predicted and actual labels
get_grad: Returns gradient of cost with respect to parameters
Raises:
ValueError: Check dimensions of input arrays
"""
def __init__(self, y_train, y_hat, X_train):
super().__init__(y_train, y_hat, X_train)
@property
def get_cost(self):
return (1 / (2 * self.m)) * np.sum(np.square(self.loss))
@property
def get_grads(self):
return (1 / self.m) * np.dot(self.X_train.T, self.loss)
class BinaryCrossEntropy(CostFunction):
"""BinaryCrossEntropy
Arguments:
y_train -- actual labels - vector with shape (m, 1)
y_hat -- predicted labels - vector with shape (m, 1)
X -- feature matrix, X, with shape (m, n)
Properties:
get_cost: Returns error between predicted and actual labels
get_grad: Returns gradient of cost with respect to parameters
Raises:
ValueError: Check dimensions of input arrays
"""
def __init__(self, y_train, y_hat, X_train):
super().__init__(y_train, y_hat, X_train)
@property
def get_cost(self):
case_true = self.y_train * np.log(self.y_hat)
case_false = (1 - self.y_train) * np.log(1 - self.y_hat)
return -(1 / self.m) * np.sum(case_true + case_false)
@property
def get_grads(self):
dw = (1 / self.m) * np.dot(self.X_train.T, self.loss)
db = (1 / self.m) * np.sum(self.loss)
grads = {
"dw": dw,
"db": db
}
return grads
class CategoricalCrossEntropy(CostFunction):
"""CategoricalCrossEntropy
Arguments:
y_train -- actual labels - vector with shape (m, 1)
y_hat -- predicted labels - vector with shape (m, 1)
X -- feature matrix, X, with shape (m, n)
Properties:
get_cost: Returns error between predicted and actual labels
get_grad: Returns gradient of cost with respect to parameters
Raises:
ValueError: Check dimensions of input arrays
"""
def __init__(self, y_train, y_hat, X_train):
super().__init__(y_train, y_hat, X_train)
@property
def get_cost(self):
return -(1 / self.m) * np.sum(self.y_hat * np.log(self.y_label))
@property
def get_grads(self):
dZ = self.y_label - self.y_hat
dw = -(1 / self.m) * np.dot(self.X_train.T, dZ)
db = -(1 / self.m) * np.sum(dZ)
grads = {
"dw": dw,
"db": db
}
return grads
| [
"numpy.dot",
"numpy.square",
"numpy.sum",
"numpy.log"
] | [((1675, 1708), 'numpy.dot', 'np.dot', (['self.X_train.T', 'self.loss'], {}), '(self.X_train.T, self.loss)\n', (1681, 1708), True, 'import numpy as np\n'), ((2392, 2410), 'numpy.log', 'np.log', (['self.y_hat'], {}), '(self.y_hat)\n', (2398, 2410), True, 'import numpy as np\n'), ((2454, 2476), 'numpy.log', 'np.log', (['(1 - self.y_hat)'], {}), '(1 - self.y_hat)\n', (2460, 2476), True, 'import numpy as np\n'), ((2509, 2539), 'numpy.sum', 'np.sum', (['(case_true + case_false)'], {}), '(case_true + case_false)\n', (2515, 2539), True, 'import numpy as np\n'), ((2612, 2645), 'numpy.dot', 'np.dot', (['self.X_train.T', 'self.loss'], {}), '(self.X_train.T, self.loss)\n', (2618, 2645), True, 'import numpy as np\n'), ((2675, 2692), 'numpy.sum', 'np.sum', (['self.loss'], {}), '(self.loss)\n', (2681, 2692), True, 'import numpy as np\n'), ((3634, 3660), 'numpy.dot', 'np.dot', (['self.X_train.T', 'dZ'], {}), '(self.X_train.T, dZ)\n', (3640, 3660), True, 'import numpy as np\n'), ((3691, 3701), 'numpy.sum', 'np.sum', (['dZ'], {}), '(dZ)\n', (3697, 3701), True, 'import numpy as np\n'), ((1579, 1599), 'numpy.square', 'np.square', (['self.loss'], {}), '(self.loss)\n', (1588, 1599), True, 'import numpy as np\n'), ((3499, 3519), 'numpy.log', 'np.log', (['self.y_label'], {}), '(self.y_label)\n', (3505, 3519), True, 'import numpy as np\n')] |
'''
This code is part of QuTIpy.
(c) Copyright <NAME>, 2021
This code is licensed under the Apache License, Version 2.0. You may
obtain a copy of this license in the LICENSE.txt file in the root directory
of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
Any modifications or derivative works of this code must retain this
copyright notice, and modified files need to carry a notice indicating
that they have been altered from the originals.
'''
import numpy as np
from qutipy.su import su_structure_constants
def coherence_vector_star_product(n1,n2,d):
'''
Computes the star product between two coherence vectors corresponding to states, so that
n1 and n2 (the coherence vectors) have length d^2-1 each.
Definition taken from:
"Characterization of the positivity of the density matrix in terms of
the coherence vector representation"
PHYSICAL REVIEW A 68, 062322 (2003)
'''
#L=su_generators(d)
g=su_structure_constants(d)[1]
p=[]
for k in range(1,d**2):
pk=0
for i in range(1,d**2):
for j in range(1,d**2):
pk+=(d/2)*n1[i-1]*n2[j-1]*g[(i,j,k)]
p.append(pk)
return np.array(p) | [
"numpy.array",
"qutipy.su.su_structure_constants"
] | [((1221, 1232), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (1229, 1232), True, 'import numpy as np\n'), ((986, 1011), 'qutipy.su.su_structure_constants', 'su_structure_constants', (['d'], {}), '(d)\n', (1008, 1011), False, 'from qutipy.su import su_structure_constants\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.