code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/env python3
import sys
from pathlib import Path
from build import VARIANTS
DOCKER_TEST="""version: '2'
services:
mailman-core:
image: core-{variant}
mailman-web:
image: web-{variant}
environment:
- SECRET_KEY=<KEY>
"""
def test_setup(variant):
Path('docker-test.yaml').write_text(
DOCKER_TEST.format(variant=variant))
def usage():
print('usage: python test.py (stable|rolling)')
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
sys.exit(1)
variant = sys.argv[1]
if variant not in VARIANTS:
usage()
sys.exit(1)
test_setup(variant)
|
[
"pathlib.Path",
"sys.exit"
] |
[((535, 546), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (543, 546), False, 'import sys\n'), ((630, 641), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (638, 641), False, 'import sys\n'), ((307, 331), 'pathlib.Path', 'Path', (['"""docker-test.yaml"""'], {}), "('docker-test.yaml')\n", (311, 331), False, 'from pathlib import Path\n')]
|
import sys
import numpy
from scipy import special
import statsmodels.api as sm
from galpy.util import bovy_plot
import define_rcsample
def plot_rcdistancecomparison(plotfilename):
# Get the sample
rcdata= define_rcsample.get_rcsample()
# Now plot the differece
bovy_plot.bovy_print()
levels= special.erf(numpy.arange(1,3)/numpy.sqrt(2.))
bovy_plot.scatterplot(rcdata['RC_DIST'],
(rcdata['RC_DIST_H']-rcdata['RC_DIST'])/rcdata['RC_DIST'],
conditional=True,
levels=levels,
linestyle='none',color='k',marker=',',
xrange=[0.,7.49],yrange=[-0.075,0.075],
xlabel=r'$M_{K_s}\!-\!\mathrm{based\ distance\,(kpc)}$',
ylabel=r'$\mathrm{Fractional\ difference\ of}\ M_H\ \mathrm{vs.}\ M_{K_s}$',
onedhistx=True,bins=31)
bovy_plot.bovy_plot([0.,10.],[0.,0.],'--',lw=2.,color='0.75',overplot=True)
# Plot lowess
lowess= sm.nonparametric.lowess
z= lowess((rcdata['RC_DIST_H']-rcdata['RC_DIST'])/rcdata['RC_DIST'],
rcdata['RC_DIST'],frac=.3)
bovy_plot.bovy_plot(z[:,0],z[:,1],'w--',lw=2.,overplot=True)
bovy_plot.bovy_end_print(plotfilename)
return None
if __name__ == '__main__':
plot_rcdistancecomparison(sys.argv[1])
|
[
"galpy.util.bovy_plot.bovy_plot",
"define_rcsample.get_rcsample",
"galpy.util.bovy_plot.bovy_end_print",
"galpy.util.bovy_plot.bovy_print",
"galpy.util.bovy_plot.scatterplot",
"numpy.arange",
"numpy.sqrt"
] |
[((213, 243), 'define_rcsample.get_rcsample', 'define_rcsample.get_rcsample', ([], {}), '()\n', (241, 243), False, 'import define_rcsample\n'), ((277, 299), 'galpy.util.bovy_plot.bovy_print', 'bovy_plot.bovy_print', ([], {}), '()\n', (297, 299), False, 'from galpy.util import bovy_plot\n'), ((362, 780), 'galpy.util.bovy_plot.scatterplot', 'bovy_plot.scatterplot', (["rcdata['RC_DIST']", "((rcdata['RC_DIST_H'] - rcdata['RC_DIST']) / rcdata['RC_DIST'])"], {'conditional': '(True)', 'levels': 'levels', 'linestyle': '"""none"""', 'color': '"""k"""', 'marker': '""","""', 'xrange': '[0.0, 7.49]', 'yrange': '[-0.075, 0.075]', 'xlabel': '"""$M_{K_s}\\\\!-\\\\!\\\\mathrm{based\\\\ distance\\\\,(kpc)}$"""', 'ylabel': '"""$\\\\mathrm{Fractional\\\\ difference\\\\ of}\\\\ M_H\\\\ \\\\mathrm{vs.}\\\\ M_{K_s}$"""', 'onedhistx': '(True)', 'bins': '(31)'}), "(rcdata['RC_DIST'], (rcdata['RC_DIST_H'] - rcdata[\n 'RC_DIST']) / rcdata['RC_DIST'], conditional=True, levels=levels,\n linestyle='none', color='k', marker=',', xrange=[0.0, 7.49], yrange=[-\n 0.075, 0.075], xlabel=\n '$M_{K_s}\\\\!-\\\\!\\\\mathrm{based\\\\ distance\\\\,(kpc)}$', ylabel=\n '$\\\\mathrm{Fractional\\\\ difference\\\\ of}\\\\ M_H\\\\ \\\\mathrm{vs.}\\\\ M_{K_s}$',\n onedhistx=True, bins=31)\n", (383, 780), False, 'from galpy.util import bovy_plot\n'), ((944, 1035), 'galpy.util.bovy_plot.bovy_plot', 'bovy_plot.bovy_plot', (['[0.0, 10.0]', '[0.0, 0.0]', '"""--"""'], {'lw': '(2.0)', 'color': '"""0.75"""', 'overplot': '(True)'}), "([0.0, 10.0], [0.0, 0.0], '--', lw=2.0, color='0.75',\n overplot=True)\n", (963, 1035), False, 'from galpy.util import bovy_plot\n'), ((1205, 1272), 'galpy.util.bovy_plot.bovy_plot', 'bovy_plot.bovy_plot', (['z[:, 0]', 'z[:, 1]', '"""w--"""'], {'lw': '(2.0)', 'overplot': '(True)'}), "(z[:, 0], z[:, 1], 'w--', lw=2.0, overplot=True)\n", (1224, 1272), False, 'from galpy.util import bovy_plot\n'), ((1270, 1308), 'galpy.util.bovy_plot.bovy_end_print', 'bovy_plot.bovy_end_print', (['plotfilename'], {}), '(plotfilename)\n', (1294, 1308), False, 'from galpy.util import bovy_plot\n'), ((324, 342), 'numpy.arange', 'numpy.arange', (['(1)', '(3)'], {}), '(1, 3)\n', (336, 342), False, 'import numpy\n'), ((342, 357), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (352, 357), False, 'import numpy\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# is_equal_as_previous.py
"""
is_equal_as_previous is made to be called multiple times
is_equal_as_previous saves the previous value and compares it with the current value
thus is_equal_as_previous can detect changes in sequences
when is_equal_as_previous is called for the first time it returns True
when is_equal_as_previous is called again with the same value as parameter it returns True
when is_equal_as_previous is called again with another value as parameter it returns False
when is_equal_as_previous is called again with the same value as the previous time it returns True
after is_equal_as_previous is called if has two attributes: current and previous
is_equal_as_previous.current returns the latest given value
is_equal_as_previous.previous returns the value before the latest given value
is_same_as_prev can also be used just to check the current with the previous
when keyword-only parameter: just_check is set to True the previous is kept as the previous
"""
# avg. of 1_000_000: 0.322166
def is_equal_as_previous1(current: object, *, just_check: bool = False) -> bool:
""" check if the given current is the same as the previous received """
self = is_equal_as_previous1 # set self for readability
try: # first time doesn't have previous so AttributeError is raised
return bool(current == self.__memory)
except AttributeError:
self.__memory = current # set current as previous array
return self(current) # first call is always True
finally: # always set the attributes
self.previous = self.__memory # set is_same_as_prev.previous
self.current = current # set is_same_as_prev.current
if not bool(just_check): # keep the previous if just_check is set to True
self.__memory = current
# monkey patch
is_equal_as_previous = is_equal_as_previous1
# avg. of 1_000_000: 0.342118
def is_equal_as_previous2(current: object, *, just_check: bool = False) -> bool:
""" check if the given current value is the same as the previous received value """
self = is_equal_as_previous2
__memory = current # set memory as current to prevent UnboundLocalError
try:
__memory = self.prev_cur[1] # take and assign the second of the list
self.prev_cur = [__memory, current] # create a new list
return bool(__memory == current) # equality test
except AttributeError:
self.prev_cur = [current, current] # fill the prev_cur list
self.previous = self.current = current # first call set attributes
return self(current) # first call is always True
finally:
if not bool(just_check): # keep the previous if just_check is set to True
self.previous, self.current = __memory, current
# avg. of 1_000_000: 1.250343
def is_equal_as_previous3(current: object, *, just_check: bool = False) -> bool:
""" check if the given current value is the same as the previous received value """
self = is_equal_as_previous3 # self refers to this function
def get_previous() -> object:
""" get the memory attribute that represents the previous value """
if not hasattr(self, '__memory'):
set_previous(current) # take current from main func. and set it as memory
return self.__memory
def set_previous(replacement: object):
""" set a memory attribute that represents the previous value """
self.__memory = replacement
def equal_as_previous(current_value: object, *, only_check: bool = False) -> bool:
""" test the given value against the previously given value """
previous_value = get_previous()
_equality_test = bool(previous_value == current_value)
# only checking doesn't set previous or re-assign the attributes
if not bool(only_check):
set_previous(current_value)
self.previous, self.current = previous_value, current_value
# assure that even the first call has the attributes
if any(not hasattr(self, _) for _ in ('previous', 'current')):
self.previous, self.current = previous_value, current_value
return _equality_test # -> bool
return equal_as_previous(current, only_check=just_check)
if __name__ == '__main__':
eq = is_equal_as_previous1
# eq = is_equal_as_previous2
# eq = is_equal_as_previous3
print("the function is not called and thus has no attributes")
try:
eq.previous
except AttributeError:
print("is_same_as_prev.previous doesn't yet exist")
try:
eq.current
except AttributeError:
print("is_same_as_prev.current doesn't yet exist")
print("call is_same_as_prev and print the attributes and the result")
first: bool = eq(True)
assert first == True, "the first call of is_same_as_prev is always True"
print(f"previous: {eq.previous}, current: {eq.current} -> {first}")
second: bool = eq(False)
print(f"previous: {eq.previous}, current: {eq.current} -> {second}")
third: bool = eq(False)
print(f"previous: {eq.previous}, current: {eq.current} -> {third}")
print("keep the previous a.k.a. just check")
four: bool = eq(..., just_check=True)
print(f"previous: {eq.previous}, current: {eq.current} -> {four}")
five: bool = eq(None, just_check=True)
print(f"previous: {eq.previous}, current: {eq.current} -> {five}")
print("endless loop:")
import time
from itertools import cycle
for cur in cycle([1, 1, 2, 2, 3]):
result = eq(cur)
print(f"current: {cur} is same as prev.: {eq.previous}", end="\n" if result else " ")
if not result:
print(f"-> {eq.previous} == {eq.current}")
time.sleep(1)
import timeit
# setup = "from __main__ import is_equal_as_previous1, is_equal_as_previous2, is_equal_as_previous3"
# print('is_equal_as_previous1', timeit.timeit("is_equal_as_previous1(None)", setup, number=1_000_000))
# print('is_equal_as_previous2', timeit.timeit("is_equal_as_previous2(None)", setup, number=1_000_000))
# print('is_equal_as_previous3', timeit.timeit("is_equal_as_previous3(None)", setup, number=1_000_000))
|
[
"itertools.cycle",
"time.sleep"
] |
[((5528, 5550), 'itertools.cycle', 'cycle', (['[1, 1, 2, 2, 3]'], {}), '([1, 1, 2, 2, 3])\n', (5533, 5550), False, 'from itertools import cycle\n'), ((5757, 5770), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5767, 5770), False, 'import time\n')]
|
#!/usr/bin/env python
"""
Given the output of fconv_slopes, plot the thermodynamic
gradients corresponding to an initial model.
<NAME>
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=str,
help='Name of file containing thermodynamic gradients to plot.')
parser.add_argument('-f', '--format', type=str, default='png',
help='Format of the desired output files. Can be, e.g. "png" or "eps". Defaults to "png".')
parser.add_argument('-rup', '--radius_upper', type=float,
help='Upper bound for the plotted radius.')
parser.add_argument('-o', '--outname', type=str, help='Base name of output file to use (w/o extension).')
args = parser.parse_args()
class ConvectiveGradients(object):
def __init__(self, infile=None):
if infile:
self.r, self.actual, self.adiabatic, self.ledoux = np.loadtxt(infile, unpack=True)
self.infile = infile
else:
self.r = []
self.actual = []
self.adiabatic = []
self.ledoux = []
self.infile = ''
def plot(self, fmt=None, rup=None, outname=None, show=False):
fig = plt.figure()
ax = fig.add_subplot(111)
idxup = -1
if rup:
ax.set_xlim([0, rup])
# Get the lowest index where radius > rup
idxup = np.where(self.r > rup)[0][0]
ax.set_xlabel('$\mathrm{r (cm)}$')
# ax2 = ax.twinx()
# ax.plot(self.r[:idxup], self.adiabatic[:idxup], color='blue', linestyle='-', label='adiabatic')
# ax.plot(self.r[:idxup], self.actual[:idxup], color='green', linestyle='--', label='actual')
# ax.plot(self.r[:idxup], self.ledoux[:idxup], color='red', linestyle=':', label='ledoux')
dadiabatic = self.actual[:idxup]-self.adiabatic[:idxup]
neg_idx, pos_idx = self.get_signed_indices(dadiabatic)
# ax2.plot(self.r[:idxup][neg_idx], dadiabatic[neg_idx], color='black', marker='v', markersize=8,
# linestyle='-', label='actual-adiabatic (-)')
# ax2.plot(self.r[:idxup][pos_idx], dadiabatic[pos_idx], color='black', marker='^', markersize=8,
# linestyle='-', label='actual-adiabatic (+)')
dledoux = self.actual[:idxup]-self.ledoux[:idxup]
neg_idx, pos_idx = self.get_signed_indices(dadiabatic)
# ax2.plot(self.r[:idxup][neg_idx], dledoux[neg_idx], color='magenta', marker='v', markersize=8,
# linestyle=':', label='actual-ledoux (-)')
# ax2.plot(self.r[:idxup][pos_idx], dledoux[pos_idx], color='magenta', marker='^', markersize=8,
# linestyle=':', label='actual-ledoux (+)')
ax.plot(self.r[:idxup], dadiabatic, color='blue', linestyle='-', label='adiabatic $\mathrm{\\nabla_{conv}}$')
ax.plot(self.r[:idxup], dledoux, color='red', linestyle='-.', label='ledoux $\mathrm{\\nabla_{conv}}$')
mx = max(np.amax(dadiabatic), np.amax(dledoux))
mn = min(np.amin(dadiabatic), np.amin(dledoux))
mlin = min(abs(mx), abs(mn))
plt.yscale('symlog', linthreshy=0.5*mlin)
ax.set_ylabel('$\mathrm{\\nabla_{actual} - \\nabla_{conv}}$')
plt.legend()
if fmt=='png':
if not outname:
outname = self.infile + '.png'
plt.savefig(outname, dpi=300)
else:
if not outname:
outname = self.infile + '.eps'
plt.savefig(outname)
if show:
plt.show()
plt.close(fig)
def get_signed_indices(self, dvec):
neg_idx = np.where(dvec < 0.0)
pos_idx = np.where(dvec > 0.0)
return neg_idx, pos_idx
if __name__=='__main__':
cg = ConvectiveGradients(args.infile)
cg.plot(args.format, args.radius_upper, args.outname)
|
[
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.amin",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.loadtxt",
"matplotlib.pyplot.savefig"
] |
[((256, 281), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (279, 281), False, 'import argparse\n'), ((1303, 1315), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1313, 1315), True, 'import matplotlib.pyplot as plt\n'), ((3213, 3256), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""symlog"""'], {'linthreshy': '(0.5 * mlin)'}), "('symlog', linthreshy=0.5 * mlin)\n", (3223, 3256), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3345), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3343, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3754), 'numpy.where', 'np.where', (['(dvec < 0.0)'], {}), '(dvec < 0.0)\n', (3742, 3754), True, 'import numpy as np\n'), ((3773, 3793), 'numpy.where', 'np.where', (['(dvec > 0.0)'], {}), '(dvec > 0.0)\n', (3781, 3793), True, 'import numpy as np\n'), ((992, 1023), 'numpy.loadtxt', 'np.loadtxt', (['infile'], {'unpack': '(True)'}), '(infile, unpack=True)\n', (1002, 1023), True, 'import numpy as np\n'), ((3073, 3092), 'numpy.amax', 'np.amax', (['dadiabatic'], {}), '(dadiabatic)\n', (3080, 3092), True, 'import numpy as np\n'), ((3094, 3110), 'numpy.amax', 'np.amax', (['dledoux'], {}), '(dledoux)\n', (3101, 3110), True, 'import numpy as np\n'), ((3129, 3148), 'numpy.amin', 'np.amin', (['dadiabatic'], {}), '(dadiabatic)\n', (3136, 3148), True, 'import numpy as np\n'), ((3150, 3166), 'numpy.amin', 'np.amin', (['dledoux'], {}), '(dledoux)\n', (3157, 3166), True, 'import numpy as np\n'), ((3456, 3485), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outname'], {'dpi': '(300)'}), '(outname, dpi=300)\n', (3467, 3485), True, 'import matplotlib.pyplot as plt\n'), ((3587, 3607), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outname'], {}), '(outname)\n', (3598, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3637, 3647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3645, 3647), True, 'import matplotlib.pyplot as plt\n'), ((3660, 3674), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3669, 3674), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1515), 'numpy.where', 'np.where', (['(self.r > rup)'], {}), '(self.r > rup)\n', (1501, 1515), True, 'import numpy as np\n')]
|
from os import unlink
from os.path import join, isfile
from contextlib import contextmanager
from functools import partial
from itertools import chain, combinations
from git import Repo, Head, Commit
from mock import patch, MagicMock
from jig.tests.testcase import JigTestCase
from jig.exc import (
GitRevListMissing, GitRevListFormatError, GitWorkingDirectoryDirty,
TrackingBranchMissing)
from jig.gitutils.branches import (
parse_rev_range, prepare_working_directory,
_prepare_against_staged_index, _prepare_with_rev_range, Tracked)
@contextmanager
def assert_git_status_unchanged(repository):
"""
Make sure that the working directory remains in the same rough state.
:param string repository: Git repo
"""
def long_status(repository):
output = Repo(repository).git.status('--long')
# Skip the first line, it tells us the branch
return output.splitlines()[1:]
before = long_status(repository)
yield
after = long_status(repository)
assert before == after, "Working directory status has changed"
class PrepareTestCase(JigTestCase):
"""
Base test class for private functions that prepare the working directory.
"""
def setUp(self):
super(PrepareTestCase, self).setUp()
self.reset_gitrepo()
def reset_gitrepo(self):
del self.gitrepodir
self.commits = [
self.commit(self.gitrepodir, 'a.txt', 'a'),
self.commit(self.gitrepodir, 'b.txt', 'b'),
self.commit(self.gitrepodir, 'c.txt', 'c'),
self.commit(self.gitrepodir, 'd.txt', 'd')
]
@property
def repo(self):
return Repo(self.gitrepodir)
def diff_head(self):
return self.repo.index.diff('HEAD')
@contextmanager
def prepare(self):
with assert_git_status_unchanged(self.gitrepodir):
with self.prepare_context_manager() as subject:
yield subject
class TestParseRevRange(JigTestCase):
"""
Git utils revision range parser.
"""
def setUp(self):
super(TestParseRevRange, self).setUp()
self.gitrepo, self.gitrepodir, _ = self.repo_from_fixture('repo01')
def assertIsRevRange(self, rev_range):
self.assertIsInstance(rev_range.a, Commit)
self.assertIsInstance(rev_range.b, Commit)
def test_bad_format(self):
"""
If the revision range doesn't match the expected format.
"""
with self.assertRaises(GitRevListFormatError):
parse_rev_range(self.gitrepodir, 'A-B')
def test_bad_format_missing_rev(self):
"""
If the format is correct but the revisions are missing.
"""
with self.assertRaises(GitRevListFormatError):
parse_rev_range(self.gitrepodir, '..B')
with self.assertRaises(GitRevListFormatError):
parse_rev_range(self.gitrepodir, 'A..')
def test_bad_revs(self):
"""
If the format is good but the revisions do not exist.
"""
with self.assertRaises(GitRevListMissing):
parse_rev_range(self.gitrepodir, 'FOO..BAR')
def test_good_revs(self):
"""
The revisions to exist.
"""
self.assertIsRevRange(parse_rev_range(self.gitrepodir, 'HEAD^1..HEAD'))
def test_local_branch(self):
"""
A branch that is newly created can be referenced.
"""
self.gitrepo.create_head('feature-branch')
self.assertIsRevRange(
parse_rev_range(self.gitrepodir, 'HEAD^1..feature-branch')
)
def test_out_of_range(self):
"""
A revision is out of range.
"""
with self.assertRaises(GitRevListMissing):
parse_rev_range(self.gitrepodir, 'HEAD~1000..HEAD')
class TestPrepareAgainstStagedIndex(PrepareTestCase):
"""
Prepare the working directory against the staged index.
"""
def prepare_context_manager(self):
return _prepare_against_staged_index(self.repo)
def test_working_directory_clean(self):
"""
Working directory is clean.
"""
with self.prepare() as stash:
self.assertIsNone(stash)
def test_untracked(self):
"""
Untracked file present.
"""
self.create_file(self.gitrepodir, 'e.txt', 'e')
expected_untracked = self.repo.untracked_files
with self.prepare() as stash:
self.assertIsNone(stash)
# We have no untracked files, they are stashed
self.assertEqual(expected_untracked, self.repo.untracked_files)
def test_staged(self):
"""
Staged file.
"""
self.stage(self.gitrepodir, 'a.txt', 'aa')
before = self.diff_head()
with self.prepare() as stash:
self.assertIsNone(stash)
# The staged changes are not stashed
self.assertEqual(before, self.diff_head())
def test_modified(self):
"""
Modified file.
"""
self.modify_file(self.gitrepodir, 'a.txt', 'aa')
with self.prepare() as stash:
self.assertIsNotNone(stash)
# The modifications are stashed
self.assertEqual([], self.repo.index.diff(None))
def test_stageremoved(self):
"""
Staged removal of a file.
"""
self.stage_remove(self.gitrepodir, 'a.txt')
with self.prepare() as stash:
self.assertIsNone(stash)
def test_fsremoved(self):
"""
Non-staged removal of a file.
"""
unlink(join(self.gitrepodir, 'a.txt'))
with self.prepare() as stash:
self.assertIsNotNone(stash)
# The file is temporarily restored
self.assertTrue(isfile(join(self.gitrepodir, 'a.txt')))
def test_combinations(self):
"""
Combine all variations of modification, creation, or removal.
"""
def modified():
self.modify_file(self.gitrepodir, 'a.txt', 'aa')
modified.should_stash = True
def staged():
self.stage(self.gitrepodir, 'b.txt', 'bb')
staged.should_stash = False
def indexremoved():
self.stage_remove(self.gitrepodir, 'c.txt')
indexremoved.should_stash = False
def fsremoved():
unlink(join(self.gitrepodir, 'd.txt'))
fsremoved.should_stash = True
def untracked():
self.create_file(self.gitrepodir, 'e.txt', 'e')
untracked.should_stash = False
mutation = partial(
combinations,
(modified, staged, indexremoved, fsremoved, untracked)
)
options = chain.from_iterable(list(map(mutation, [2, 3, 4, 5])))
for option in options:
# Mutate the Git repository
list(map(lambda x: x(), option))
with self.prepare() as stash:
should_stash = any([x.should_stash for x in option])
if should_stash:
self.assertIsNotNone(stash)
else:
self.assertIsNone(stash)
self.reset_gitrepo()
class TestPrepareWithRevRange(PrepareTestCase):
"""
With a given rev range test that we can checkout the repository.
"""
def prepare_context_manager(self):
rev_range_parsed = parse_rev_range(
self.repo.working_dir,
self.rev_range
)
return _prepare_with_rev_range(self.repo, rev_range_parsed)
def test_dirty_working_directory(self):
"""
Dirty working directory will raise an exception.
"""
self.rev_range = 'HEAD~3..HEAD~0'
# Force the working directory to be dirty
self.modify_file(self.gitrepodir, 'a.txt', 'aa')
with self.assertRaises(GitWorkingDirectoryDirty):
self.prepare().__enter__()
def test_yields_git_named_head(self):
"""
The object that is yielded is a :py:class:`git.Head`.
"""
self.rev_range = 'HEAD~1..HEAD~0'
with self.prepare() as head:
self.assertIsInstance(head, Head)
def test_yields_git_detached_head(self):
"""
If detached HEAD, object that is yielded is a :py:class:`git.Commit`.
"""
self.rev_range = 'HEAD~1..HEAD~0'
# Detach the head by checking out the commit hash
Repo(self.gitrepodir).git.checkout(self.commits[-1].hexsha)
with self.prepare() as head:
self.assertIsInstance(head, Commit)
def test_detached_head_right_side_of_rev_range(self):
"""
The head object points to the right side of the rev range.
"""
self.rev_range = 'HEAD~2..HEAD~1'
# HEAD~1 is going to be our second to last commit
expected = self.commits[-2]
with self.prepare():
# The symbolic ref for HEAD should now be our expected commit
self.assertEqual(
Repo(self.gitrepodir).head.commit,
expected
)
def test_returns_to_master(self):
"""
After exiting the context manager, we should be back on master.
"""
self.rev_range = 'HEAD~2..HEAD~1'
with self.prepare():
pass
self.assertEqual(
Repo(self.gitrepodir).head.reference.path,
'refs/heads/master'
)
def test_returns_to_detached_head(self):
"""
From a detached head upon exiting we should be back where we started.
"""
self.rev_range = 'HEAD~2..HEAD~1'
# Detach the head by checking out the commit hash
Repo(self.gitrepodir).git.checkout(self.commits[-2].hexsha)
# HEAD~1 is going to be our third to last commit
expected = self.commits[-3]
with self.prepare():
self.assertEqual(
Repo(self.gitrepodir).head.commit,
expected
)
# And we are back to our detached head we started with
self.assertEqual(
Repo(self.gitrepodir).head.commit,
self.commits[-2]
)
class TestPrepareWorkingDirectory(JigTestCase):
"""
Make the working directory suitable for running Jig.
"""
def setUp(self):
super(TestPrepareWorkingDirectory, self).setUp()
self.gitrepo, self.gitrepodir, _ = self.repo_from_fixture('repo01')
def test_no_rev_range(self):
"""
Should prepare against the staged index if no rev range.
"""
prepare_function = \
'jig.gitutils.branches._prepare_against_staged_index'
with patch(prepare_function) as p:
p.return_value = MagicMock()
with prepare_working_directory(self.gitrepodir):
pass
self.assertTrue(p.return_value.__enter__.called)
def test_rev_range(self):
"""
Should checkout the Git repo at the end of the rev range.
"""
prepare_function = \
'jig.gitutils.branches._prepare_with_rev_range'
with patch(prepare_function) as p:
p.return_value = MagicMock()
rev_range_parsed = parse_rev_range(
self.gitrepodir, 'HEAD~1..HEAD~0'
)
with prepare_working_directory(self.gitrepodir, rev_range_parsed):
pass
self.assertTrue(p.return_value.__enter__.called)
class TestTracked(JigTestCase):
"""
Git repositories can be tracked for CI mode.
"""
def setUp(self):
super(TestTracked, self).setUp()
self.commits = [
self.commit(self.gitrepodir, 'a.txt', 'a'),
self.commit(self.gitrepodir, 'b.txt', 'b'),
self.commit(self.gitrepodir, 'c.txt', 'c'),
]
def test_tracking_branch_does_not_exist(self):
"""
Tracking branch does not exist.
"""
tracked = Tracked(self.gitrepodir)
self.assertFalse(tracked.exists)
def test_tracking_branch_exists(self):
"""
Tracking branch exists.
"""
tracking_branch = Repo(self.gitrepodir).create_head('jig-ci-last-run')
tracking_branch.commit = 'HEAD'
tracked = Tracked(self.gitrepodir)
self.assertTrue(tracked.exists)
def test_tracking_branch_by_a_different_name(self):
"""
Can check existence by a different name than the default.
"""
name = 'different-tracking-name'
tracking_branch = Repo(self.gitrepodir).create_head(name)
tracking_branch.commit = 'HEAD'
tracked = Tracked(self.gitrepodir, name)
self.assertTrue(tracked.exists)
def test_update_defaults_to_head(self):
"""
Updating the tracking branch defaults to current HEAD.
"""
tracked = Tracked(self.gitrepodir)
reference = tracked.update()
self.assertEqual(
reference.commit,
self.commits[-1]
)
def test_non_existent_reference(self):
"""
Without a tracking branch trying to get a references to it raises.
"""
tracked = Tracked(self.gitrepodir)
with self.assertRaises(TrackingBranchMissing):
tracked.reference
def test_tracking_branch_reference(self):
"""
With a tracking branch we can get a reference to it.
"""
tracking_branch = Repo(self.gitrepodir).create_head('jig-ci-last-run')
tracking_branch.commit = 'HEAD~2'
tracked = Tracked(self.gitrepodir)
self.assertEqual(
tracked.reference.commit,
self.commits[0]
)
def test_update_takes_commit_hash(self):
"""
Updating the tracking branch can be done with a commit hash.
"""
tracked = Tracked(self.gitrepodir)
tracked.update(self.commits[0].hexsha)
self.assertEqual(
tracked.reference.commit,
self.commits[0]
)
def test_update_moves_head_forward(self):
"""
The tracking branch reference can be moved forward.
"""
tracking_branch = Repo(self.gitrepodir).create_head('jig-ci-last-run')
tracking_branch.commit = 'HEAD~2'
tracked = Tracked(self.gitrepodir)
tracked.update()
self.assertEqual(
tracked.reference.commit,
self.commits[-1]
)
|
[
"functools.partial",
"jig.gitutils.branches.prepare_working_directory",
"os.path.join",
"jig.gitutils.branches.parse_rev_range",
"git.Repo",
"jig.gitutils.branches._prepare_with_rev_range",
"mock.patch",
"jig.gitutils.branches._prepare_against_staged_index",
"mock.MagicMock",
"jig.gitutils.branches.Tracked"
] |
[((1679, 1700), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (1683, 1700), False, 'from git import Repo, Head, Commit\n'), ((3993, 4033), 'jig.gitutils.branches._prepare_against_staged_index', '_prepare_against_staged_index', (['self.repo'], {}), '(self.repo)\n', (4022, 4033), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((6590, 6667), 'functools.partial', 'partial', (['combinations', '(modified, staged, indexremoved, fsremoved, untracked)'], {}), '(combinations, (modified, staged, indexremoved, fsremoved, untracked))\n', (6597, 6667), False, 'from functools import partial\n'), ((7391, 7445), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.repo.working_dir', 'self.rev_range'], {}), '(self.repo.working_dir, self.rev_range)\n', (7406, 7445), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((7496, 7548), 'jig.gitutils.branches._prepare_with_rev_range', '_prepare_with_rev_range', (['self.repo', 'rev_range_parsed'], {}), '(self.repo, rev_range_parsed)\n', (7519, 7548), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((11969, 11993), 'jig.gitutils.branches.Tracked', 'Tracked', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (11976, 11993), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((12274, 12298), 'jig.gitutils.branches.Tracked', 'Tracked', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (12281, 12298), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((12654, 12684), 'jig.gitutils.branches.Tracked', 'Tracked', (['self.gitrepodir', 'name'], {}), '(self.gitrepodir, name)\n', (12661, 12684), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((12876, 12900), 'jig.gitutils.branches.Tracked', 'Tracked', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (12883, 12900), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((13196, 13220), 'jig.gitutils.branches.Tracked', 'Tracked', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (13203, 13220), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((13579, 13603), 'jig.gitutils.branches.Tracked', 'Tracked', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (13586, 13603), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((13864, 13888), 'jig.gitutils.branches.Tracked', 'Tracked', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (13871, 13888), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((14311, 14335), 'jig.gitutils.branches.Tracked', 'Tracked', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (14318, 14335), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((2538, 2577), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.gitrepodir', '"""A-B"""'], {}), "(self.gitrepodir, 'A-B')\n", (2553, 2577), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((2777, 2816), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.gitrepodir', '"""..B"""'], {}), "(self.gitrepodir, '..B')\n", (2792, 2816), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((2885, 2924), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.gitrepodir', '"""A.."""'], {}), "(self.gitrepodir, 'A..')\n", (2900, 2924), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((3104, 3148), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.gitrepodir', '"""FOO..BAR"""'], {}), "(self.gitrepodir, 'FOO..BAR')\n", (3119, 3148), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((3266, 3314), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.gitrepodir', '"""HEAD^1..HEAD"""'], {}), "(self.gitrepodir, 'HEAD^1..HEAD')\n", (3281, 3314), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((3527, 3585), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.gitrepodir', '"""HEAD^1..feature-branch"""'], {}), "(self.gitrepodir, 'HEAD^1..feature-branch')\n", (3542, 3585), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((3753, 3804), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.gitrepodir', '"""HEAD~1000..HEAD"""'], {}), "(self.gitrepodir, 'HEAD~1000..HEAD')\n", (3768, 3804), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((5612, 5642), 'os.path.join', 'join', (['self.gitrepodir', '"""a.txt"""'], {}), "(self.gitrepodir, 'a.txt')\n", (5616, 5642), False, 'from os.path import join, isfile\n'), ((10689, 10712), 'mock.patch', 'patch', (['prepare_function'], {}), '(prepare_function)\n', (10694, 10712), False, 'from mock import patch, MagicMock\n'), ((10748, 10759), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (10757, 10759), False, 'from mock import patch, MagicMock\n'), ((11125, 11148), 'mock.patch', 'patch', (['prepare_function'], {}), '(prepare_function)\n', (11130, 11148), False, 'from mock import patch, MagicMock\n'), ((11184, 11195), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (11193, 11195), False, 'from mock import patch, MagicMock\n'), ((11228, 11278), 'jig.gitutils.branches.parse_rev_range', 'parse_rev_range', (['self.gitrepodir', '"""HEAD~1..HEAD~0"""'], {}), "(self.gitrepodir, 'HEAD~1..HEAD~0')\n", (11243, 11278), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((6375, 6405), 'os.path.join', 'join', (['self.gitrepodir', '"""d.txt"""'], {}), "(self.gitrepodir, 'd.txt')\n", (6379, 6405), False, 'from os.path import join, isfile\n'), ((10778, 10820), 'jig.gitutils.branches.prepare_working_directory', 'prepare_working_directory', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (10803, 10820), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((11327, 11387), 'jig.gitutils.branches.prepare_working_directory', 'prepare_working_directory', (['self.gitrepodir', 'rev_range_parsed'], {}), '(self.gitrepodir, rev_range_parsed)\n', (11352, 11387), False, 'from jig.gitutils.branches import parse_rev_range, prepare_working_directory, _prepare_against_staged_index, _prepare_with_rev_range, Tracked\n'), ((12162, 12183), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (12166, 12183), False, 'from git import Repo, Head, Commit\n'), ((12555, 12576), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (12559, 12576), False, 'from git import Repo, Head, Commit\n'), ((13465, 13486), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (13469, 13486), False, 'from git import Repo, Head, Commit\n'), ((14197, 14218), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (14201, 14218), False, 'from git import Repo, Head, Commit\n'), ((796, 812), 'git.Repo', 'Repo', (['repository'], {}), '(repository)\n', (800, 812), False, 'from git import Repo, Head, Commit\n'), ((5806, 5836), 'os.path.join', 'join', (['self.gitrepodir', '"""a.txt"""'], {}), "(self.gitrepodir, 'a.txt')\n", (5810, 5836), False, 'from os.path import join, isfile\n'), ((8435, 8456), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (8439, 8456), False, 'from git import Repo, Head, Commit\n'), ((9697, 9718), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (9701, 9718), False, 'from git import Repo, Head, Commit\n'), ((10103, 10124), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (10107, 10124), False, 'from git import Repo, Head, Commit\n'), ((9018, 9039), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (9022, 9039), False, 'from git import Repo, Head, Commit\n'), ((9355, 9376), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (9359, 9376), False, 'from git import Repo, Head, Commit\n'), ((9927, 9948), 'git.Repo', 'Repo', (['self.gitrepodir'], {}), '(self.gitrepodir)\n', (9931, 9948), False, 'from git import Repo, Head, Commit\n')]
|
import obs,parenty,socket_controller,threading
def callObs():
obs.main()
socket_controller.init_comm_file()
t = threading.Thread(target = callObs, args=())
t.start()
|
[
"obs.main",
"threading.Thread",
"socket_controller.init_comm_file"
] |
[((79, 113), 'socket_controller.init_comm_file', 'socket_controller.init_comm_file', ([], {}), '()\n', (111, 113), False, 'import obs, parenty, socket_controller, threading\n'), ((118, 159), 'threading.Thread', 'threading.Thread', ([], {'target': 'callObs', 'args': '()'}), '(target=callObs, args=())\n', (134, 159), False, 'import obs, parenty, socket_controller, threading\n'), ((67, 77), 'obs.main', 'obs.main', ([], {}), '()\n', (75, 77), False, 'import obs, parenty, socket_controller, threading\n')]
|
import csv
import pandas as pd
import matplotlib.pyplot as plt
games = []
with open("outputs/games_output.csv", 'r') as data:
for line in csv.DictReader(data):
games.append(line)
teams = []
with open("inputs/VBelo - teams.csv", 'r') as data:
for line in csv.DictReader(data):
teams.append(line)
for i in range(len(teams)):
teams[i]['elo'] = int(teams[i]['elo'])
not_tracked = ['D-III','NAIA','NCCAA','n/a']
start_data = {'Conference':['Big West','Carolinas','EIVA','Independent','MIVA','MPSF','SIAC'],
'Non-Conf Matches':[0,0,0,0,0,0,0],'Non-Conf Wins':[0,0,0,0,0,0,0],'Non-Conf Losses':[0,0,0,0,0,0,0],
'Non-Conf Home Matches':[0,0,0,0,0,0,0],'Non-Conf Home Wins':[0,0,0,0,0,0,0],'Non-Conf Home Losses':[0,0,0,0,0,0,0],
'Non-Conf Away Matches':[0,0,0,0,0,0,0],'Non-Conf Away Wins':[0,0,0,0,0,0,0],'Non-Conf Away Losses':[0,0,0,0,0,0,0],
'Non-Conf Neutral Matches':[0,0,0,0,0,0,0],'Non-Conf Neutral Wins':[0,0,0,0,0,0,0],'Non-Conf Neutral Losses':[0,0,0,0,0,0,0],
}
df = pd.DataFrame(start_data)
df=df.set_index('Conference')
def stats (year):
for i in range(len(games)):
if games[i]['season'] == year and games[i]['r_t1'] != '':
t1_c = ''
t2_c = ''
for x in range(len(teams)):
if games[i]['t1'] == teams[x]['short_name']:
t1_c = teams[x]['conference']
if games[i]['t2'] == teams[x]['short_name']:
t2_c = teams[x]['conference']
if t1_c != t2_c:
if t1_c not in not_tracked:
df.loc[t1_c,'Non-Conf Matches'] += 1
df.loc[t1_c,'Non-Conf Wins'] += int(games[i]['r_t1'])
if games[i]['r_t1'] == '0':
df.loc[t1_c,'Non-Conf Losses'] += 1
if games[i]['n'] == '0':
df.loc[t1_c,'Non-Conf Away Matches'] += 1
df.loc[t1_c,'Non-Conf Away Wins'] += int(games[i]['r_t1'])
if games[i]['r_t1'] == '0':
df.loc[t1_c,'Non-Conf Away Losses'] += 1
if games[i]['n'] == '1':
df.loc[t1_c,'Non-Conf Neutral Matches'] += 1
df.loc[t1_c,'Non-Conf Neutral Wins'] += int(games[i]['r_t1'])
if games[i]['r_t1'] == '0':
df.loc[t1_c,'Non-Conf Neutral Losses'] += 1
if t2_c not in not_tracked:
df.loc[t2_c,'Non-Conf Matches'] += 1
df.loc[t2_c,'Non-Conf Wins'] += int(games[i]['r_t2'])
if games[i]['r_t2'] == '0':
df.loc[t2_c,'Non-Conf Losses'] += 1
if games[i]['n'] == '0':
df.loc[t2_c,'Non-Conf Home Matches'] += 1
df.loc[t2_c,'Non-Conf Home Wins'] += int(games[i]['r_t2'])
if games[i]['r_t2'] == '0':
df.loc[t2_c,'Non-Conf Home Losses'] += 1
if games[i]['n'] == '1':
df.loc[t2_c,'Non-Conf Neutral Matches'] += 1
df.loc[t2_c,'Non-Conf Neutral Wins'] += int(games[i]['r_t2'])
if games[i]['r_t2'] == '0':
df.loc[t2_c,'Non-Conf Neutral Losses'] += 1
pd.set_option('display.max_columns', None)
df.to_csv('outputs/conference_stats_2022.csv')
stats('2022')
|
[
"pandas.DataFrame",
"csv.DictReader",
"pandas.set_option"
] |
[((1004, 1028), 'pandas.DataFrame', 'pd.DataFrame', (['start_data'], {}), '(start_data)\n', (1016, 1028), True, 'import pandas as pd\n'), ((143, 163), 'csv.DictReader', 'csv.DictReader', (['data'], {}), '(data)\n', (157, 163), False, 'import csv\n'), ((272, 292), 'csv.DictReader', 'csv.DictReader', (['data'], {}), '(data)\n', (286, 292), False, 'import csv\n'), ((3360, 3402), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (3373, 3402), True, 'import pandas as pd\n')]
|
from __future__ import print_function
import random
import nltk
from nltk.corpus import treebank
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
from keras.layers import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils, plot_model
from keras.wrappers.scikit_learn import KerasClassifier
import matplotlib.pyplot as plt
CUSTOM_SEED = 42
def add_basic_features(sentence_terms, index):
""" Compute some very basic word features.
:param sentence_terms: [w1, w2, ...]
:type sentence_terms: list
:param index: the index of the word
:type index: int
:return: dict containing features
:rtype: dict
"""
term = sentence_terms[index]
return {
'nb_terms': len(sentence_terms),
'term': term,
'is_first': index == 0,
'is_last': index == len(sentence_terms) - 1,
'is_capitalized': term[0].upper() == term[0],
'is_all_caps': term.upper() == term,
'is_all_lower': term.lower() == term,
'prefix-1': term[0],
'prefix-2': term[:2],
'prefix-3': term[:3],
'suffix-1': term[-1],
'suffix-2': term[-2:],
'suffix-3': term[-3:],
'prev_word': '' if index == 0 else sentence_terms[index - 1],
'next_word': '' if index == len(sentence_terms) - 1 else sentence_terms[index + 1]
}
def untag(tagged_sentence):
"""
Remove the tag for each tagged term.
:param tagged_sentence: a POS tagged sentence
:type tagged_sentence: list
:return: a list of tags
:rtype: list of strings
"""
return [w for w, _ in tagged_sentence]
def transform_to_dataset(tagged_sentences):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, y = [], []
for pos_tags in tagged_sentences:
for index, (term, class_) in enumerate(pos_tags):
# Add basic NLP features for each sentence term
X.append(add_basic_features(untag(pos_tags), index))
y.append(class_)
return X, y
def build_model(input_dim, hidden_neurons, output_dim):
"""
Construct, compile and return a Keras model which will be used to fit/predict
"""
model = Sequential([
Dense(hidden_neurons, input_dim=input_dim),
Activation('relu'),
Dropout(0.2),
Dense(hidden_neurons),
Activation('relu'),
Dropout(0.2),
Dense(output_dim, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):
""" Plot model loss and accuracy through epochs. """
green = '#72C29B'
orange = '#FFA577'
with plt.xkcd():
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))
ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,
label='training')
ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,
linewidth=5, label='validation')
ax1.set_xlabel('# epoch')
ax1.set_ylabel('loss')
ax1.tick_params('y')
ax1.legend(loc='upper right', shadow=False)
ax1.set_title('Model loss through #epochs', fontweight='bold')
ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,
label='training')
ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,
linewidth=5, label='validation')
ax2.set_xlabel('# epoch')
ax2.set_ylabel('accuracy')
ax2.tick_params('y')
ax2.legend(loc='lower right', shadow=False)
ax2.set_title('Model accuracy through #epochs', fontweight='bold')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
nb_samples = 100
# Ensure reproducibility
np.random.seed(CUSTOM_SEED)
sentences = treebank.tagged_sents(tagset='universal')[:nb_samples]
print('a random sentence: \n-> {}'.format(random.choice(sentences)))
tags = set([tag for sentence in treebank.tagged_sents() for _, tag in sentence])
print('nb_tags: {}\ntags: {}'.format(len(tags), tags))
# We use approximately 60% of the tagged sentences for training,
# 20% as the validation set and 20% to evaluate our model.
train_test_cutoff = int(.80 * len(sentences))
training_sentences = sentences[:train_test_cutoff]
testing_sentences = sentences[train_test_cutoff:]
train_val_cutoff = int(.25 * len(training_sentences))
validation_sentences = training_sentences[:train_val_cutoff]
training_sentences = training_sentences[train_val_cutoff:]
# For training, validation and testing sentences, we split the
# attributes into X (input variables) and y (output variables).
X_train, y_train = transform_to_dataset(training_sentences)
X_test, y_test = transform_to_dataset(testing_sentences)
X_val, y_val = transform_to_dataset(validation_sentences)
# Fit our DictVectorizer with our set of features
dict_vectorizer = DictVectorizer(sparse=False)
dict_vectorizer.fit(X_train + X_test + X_val)
# Convert dict features to vectors
X_train_vect = dict_vectorizer.transform(X_train)
X_test_vect = dict_vectorizer.transform(X_test)
X_val_vect = dict_vectorizer.transform(X_val)
# Fit LabelEncoder with our list of classes
label_encoder = LabelEncoder()
label_encoder.fit(y_train + y_test + y_val)
# Encode class values as integers
y_train_enc = label_encoder.transform(y_train)
y_test_enc = label_encoder.transform(y_test)
y_val_enc = label_encoder.transform(y_val)
# Convert integers to dummy variables (one hot encoded)
y_train_dummy = np_utils.to_categorical(y_train_enc)
y_test_dummy = np_utils.to_categorical(y_test_enc)
y_val_dummy = np_utils.to_categorical(y_val_enc)
# Set model parameters
model_params = {
'build_fn': build_model,
'input_dim': X_train_vect.shape[1],
'hidden_neurons': 512,
'output_dim': y_train_dummy.shape[1],
'epochs': 5,
'batch_size': 256,
'verbose': 1,
'validation_data': (X_val_vect, y_val_dummy),
'shuffle': True
}
# Create a new sklearn classifier
clf = KerasClassifier(**model_params)
# Finally, fit our classifier
hist = clf.fit(X_train_vect, y_train_dummy)
# Plot model performance
plot_model_performance(
train_loss=hist.history.get('loss', []),
train_acc=hist.history.get('acc', []),
train_val_loss=hist.history.get('val_loss', []),
train_val_acc=hist.history.get('val_acc', [])
)
# Evaluate model accuracy
score = clf.score(X_test_vect, y_test_dummy, verbose=0)
print('model accuracy: {}'.format(score))
# Compute classification report
y_preds = clf.predict(X_test_vect)
# Our target names are our label encoded targets
target_names = label_encoder.classes_
# Compute classification report
classif_report = classification_report(
y_true=y_test_enc, y_pred=y_preds,
target_names=target_names
)
print(classif_report)
# Visualize model architecture
plot_model(clf.model, to_file='tmp/model_structure.png', show_shapes=True)
# Finally save model
clf.model.save('/tmp/keras_mlp.h5')
|
[
"nltk.corpus.treebank.tagged_sents",
"keras.wrappers.scikit_learn.KerasClassifier",
"numpy.random.seed",
"matplotlib.pyplot.show",
"keras.layers.Activation",
"keras.layers.Dropout",
"random.choice",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.subplots",
"keras.utils.np_utils.to_categorical",
"keras.utils.plot_model",
"sklearn.feature_extraction.DictVectorizer",
"keras.layers.Dense",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.xkcd"
] |
[((4196, 4214), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4212, 4214), True, 'import matplotlib.pyplot as plt\n'), ((4220, 4230), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4228, 4230), True, 'import matplotlib.pyplot as plt\n'), ((4322, 4349), 'numpy.random.seed', 'np.random.seed', (['CUSTOM_SEED'], {}), '(CUSTOM_SEED)\n', (4336, 4349), True, 'import numpy as np\n'), ((5544, 5572), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {'sparse': '(False)'}), '(sparse=False)\n', (5558, 5572), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((5897, 5911), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5909, 5911), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((6236, 6272), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train_enc'], {}), '(y_train_enc)\n', (6259, 6272), False, 'from keras.utils import np_utils, plot_model\n'), ((6293, 6328), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test_enc'], {}), '(y_test_enc)\n', (6316, 6328), False, 'from keras.utils import np_utils, plot_model\n'), ((6348, 6382), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_val_enc'], {}), '(y_val_enc)\n', (6371, 6382), False, 'from keras.utils import np_utils, plot_model\n'), ((6805, 6836), 'keras.wrappers.scikit_learn.KerasClassifier', 'KerasClassifier', ([], {}), '(**model_params)\n', (6820, 6836), False, 'from keras.wrappers.scikit_learn import KerasClassifier\n'), ((7578, 7666), 'sklearn.metrics.classification_report', 'classification_report', ([], {'y_true': 'y_test_enc', 'y_pred': 'y_preds', 'target_names': 'target_names'}), '(y_true=y_test_enc, y_pred=y_preds, target_names=\n target_names)\n', (7599, 7666), False, 'from sklearn.metrics import classification_report\n'), ((7757, 7831), 'keras.utils.plot_model', 'plot_model', (['clf.model'], {'to_file': '"""tmp/model_structure.png"""', 'show_shapes': '(True)'}), "(clf.model, to_file='tmp/model_structure.png', show_shapes=True)\n", (7767, 7831), False, 'from keras.utils import np_utils, plot_model\n'), ((3177, 3187), 'matplotlib.pyplot.xkcd', 'plt.xkcd', ([], {}), '()\n', (3185, 3187), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3248), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'figsize': '(10, 8)'}), '(2, figsize=(10, 8))\n', (3228, 3248), True, 'import matplotlib.pyplot as plt\n'), ((4369, 4410), 'nltk.corpus.treebank.tagged_sents', 'treebank.tagged_sents', ([], {'tagset': '"""universal"""'}), "(tagset='universal')\n", (4390, 4410), False, 'from nltk.corpus import treebank\n'), ((2622, 2664), 'keras.layers.Dense', 'Dense', (['hidden_neurons'], {'input_dim': 'input_dim'}), '(hidden_neurons, input_dim=input_dim)\n', (2627, 2664), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2675, 2693), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2685, 2693), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2704, 2716), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2711, 2716), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2727, 2748), 'keras.layers.Dense', 'Dense', (['hidden_neurons'], {}), '(hidden_neurons)\n', (2732, 2748), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2759, 2777), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2769, 2777), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2788, 2800), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2795, 2800), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((2811, 2850), 'keras.layers.Dense', 'Dense', (['output_dim'], {'activation': '"""softmax"""'}), "(output_dim, activation='softmax')\n", (2816, 2850), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4471, 4495), 'random.choice', 'random.choice', (['sentences'], {}), '(sentences)\n', (4484, 4495), False, 'import random\n'), ((4537, 4560), 'nltk.corpus.treebank.tagged_sents', 'treebank.tagged_sents', ([], {}), '()\n', (4558, 4560), False, 'from nltk.corpus import treebank\n')]
|
import pandas as pd
import numpy as np
from .QCBase import VarNames
class Exporter(object):
""" Export class which writes parsed data to a certain format"""
valid_formats = ["pdf", "xlsx", "txt", "csv", "dataframe"]
def __init__(self, data=None):
self.data = data
# for later: add pandas independent functions to export arrays to file
def arrays_to_dframe(self, **kwargs):
""" Using keyworded arguments, expects arrays """
try:
df = pd.DataFrame(kwargs)
except ValueError: #if arrays do not have the same length
d = {}
for key, value in kwargs.items():
d[key] = pd.Series(value)
df = pd.DataFrame(d)
return df
def ExcitedStateSummary(self, results, fname="es_smry", fmt="csv",
ground_state=False):
""" Exports energy related excited state quantities to file
Parameters
----------
results : CCParser.ParseContainer
Parsing container that holds parsed values.
fname : string
Filename prefix.
fmt : string
Output format ('csv', 'xlsx'/'xls' or 'df' for pandas.DataFrame).
ground_state : bool
Whether to include an empty line in the table for the ground state.
"""
if fmt not in Exporter.valid_formats:
raise ValueError("File format '{0:}' not recognized or supported!".format(fmt))
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
d = {}
# (1) Excitation energies (default minimum)
#if hasattr(results, VarNames.exc_energy_rel):
d[VarNames.exc_energy_rel] = getattr(results, VarNames.exc_energy_rel).data
n_states = len(d[VarNames.exc_energy_rel])
# (2) Oscillator strengths
if hasattr(results, VarNames.osc_str):
d[VarNames.osc_str] = getattr(results, VarNames.osc_str).data
# (3) Amplitudes
if hasattr(results, VarNames.amplitudes):
ampl = getattr(results, VarNames.amplitudes)
pieces = [a.to_dataframe() for a in ampl]
key = [x for x in range(1,len(pieces)+1)]
amp_df = pd.concat(pieces, keys=key, names=["State", "Row ID"])
# prepare MultiIndex (there has to be a better way to do that...)
arrays = [[x for x in range(1, n_states+1)],
[0 for x in range(n_states)]]
tuples = list(zip(*arrays))# asterisk unpacks
df1 = pd.DataFrame(d)
df1.index = pd.MultiIndex.from_tuples(tuples, names=["State", "Row ID"])
df = pd.concat([df1, amp_df], axis=1)
# add row to MultiIndex, see https://stackoverflow.com/q/24917700
if ground_state:
df.loc[(0,0),:] = np.nan
df.sort_index(level=0, inplace=True)
# EXPORT TO FILE or dataframe
fout = fname + "." + fmt
if fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == ("xlsx" or "xls"):
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
elif fmt.lower() == ("dataframe" or "df"):
return df
def ReducedWeights(self, results, nbsfA, extern=None, fmt="print",
fname="AmplAnl", silent=False):
""" Calculate reduced weights based on fragment information.
The reduced weight for a single excitation :math:`i \\rightarrow a` is defined as
:math:`v_{i}^{a} = 0.5\\cdot(c_{i,A}^{2} + c_{a,A}^{2})\\cdot w_{i}^{a}`, with
c and w being the molecular orbital coefficient and transition weight,
respectively.
The MO coefficients from the output first have to be transformed to an
orthonormal basis.
Parameters
----------
results : CCParser.ParseContainer
Container object which contains excited state amplitudes
nbsfA : int
Number of basis functions on System A (assumes system A comes first!)
extern : CCParser.ParseContainer
Optional second container which contains orthonormalisation matrix and/or MO coefficients
fmt : string
Output format. Available are "print", "dataframe", "xlsx" or "csv"
fname : string
Output file name (basename only).
silent : bool
Whether to ignore lengthy printouts.
"""
# consistency
has_extern = True if extern != None else False
if False in getattr(results, VarNames.has_converged).data:
raise ValueError("Not converged state detected!")
if not has_extern and not hasattr(results, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif has_extern and not hasattr(extern, VarNames.orthonorm_matrix):
raise AttributeError("Could not find orthonormalization matrix! Was it parsed?")
elif not has_extern and not hasattr(results, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif has_extern and not hasattr(extern, VarNames.mo_coefficients):
raise AttributeError("Could not find MO coefficients! Were they parsed?")
elif not hasattr(results, VarNames.amplitudes):
raise AttributeError("Could not find amplitudes! Were they parsed?")
elif not hasattr(results, VarNames.n_bas):
raise AttributeError("Could not find number of basis functions! Was it parsed?")
else:
# (1) Orthonormalization matrix, hardcoded last
X = getattr(results, VarNames.orthonorm_matrix).get_last() if not \
has_extern else getattr(extern, VarNames.orthonorm_matrix).get_last()
X_inv = np.linalg.inv(X)
# (2) MO coeffiecients, hardcoded last
C = getattr(results, VarNames.mo_coefficients).get_last() if not \
has_extern else getattr(extern, VarNames.mo_coefficients).get_last()
C_prime = C * X_inv # Szabo, Ostlund, page 142
max_mo = C.shape[0]
# (3) Amplitudes
ampl = getattr(results, VarNames.amplitudes)
n_states = len(ampl)
# (4) Number of basis functions
nbsf = getattr(results, VarNames.n_bas).get_last()
# (4) Output variables
sum_weights = [0 for i in range(n_states)]
sum_redweights = [0 for i in range(n_states)]
# --------------
sos_A = [0 for a in range(C_prime.shape[0])]
sos_B = [0 for a in range(C_prime.shape[0])]
for c, vect in enumerate(C_prime):
for n in range(nbsf):
if n < nbsfA:
sos_A[c] += vect[0,n]**2
else:
sos_B[c] += vect[0,n]**2
for i,a in enumerate(ampl):#state
for t in range(len(a.occ)):#transition
if max(a.virt[t]) > max_mo:
if not silent:
print("State {0:>2d}: Omitting transition with weight \
{1:.1%} due to missing MO coefficients.".format(i+1, a.weights[t]))
continue
if len(a.occ[t]) == 1:#single amplitudes
rw = 0.5*(sos_A[a.occ[t][0]-1] + sos_A[a.virt[t][0]-1]) * a.weights[t]
elif len(a.occ[t]) == 2:#double amplitudes
rw = 0.25*(sos_A[a.occ[t][0]-1] + sos_A[a.occ[t][1]-1] +
sos_A[a.virt[t][0]-1] + sos_A[a.virt[t][1]-1]
)*a.weights[t]
else:
raise IndexError("Currently no more than double \
amplitudes are supported!")
sum_weights[i] += a.weights[t]
sum_redweights[i] += rw
#----------------
# Export as
fout = fname + "." + fmt
d = {"State": [i+1 for i in range(n_states)],
"sum_weight" : sum_weights,
"sum_red_weight" : sum_redweights}
df = pd.DataFrame(d)
df = df.assign(diff=df["sum_weight"]-df["sum_red_weight"],
ratio=df["sum_red_weight"]/df["sum_weight"])
if fmt == "print":
print("State | Sum(W) | Sum(P) | Sum(W) - Sum(P) | ratio P/W |\n",50*"-")
for i in range(n_states):
print(" S{0:>2d} | {1:.3f} | {2:.3f} | {3:15.3f} | {4:.1%}".format(
i+1, sum_weights[i], sum_redweights[i], sum_weights[i] -
sum_redweights[i], sum_redweights[i]/sum_weights[i]))
elif fmt == "dataframe":
return df
elif fmt == "csv":
df.to_csv(fout, encoding="utf-8")
elif fmt == "xlsx" or fmt == "xls":
writer = pd.ExcelWriter(fout)
df.to_excel(writer, "Sheet1")
writer.save()
else:
raise ValueError("Output format not supported!")
def MO_Molden(self, results, atom_basis, fname="molecular_orbitals",
tmp_5d=True):
""" Writes molecular orbitals to a molden file.
Expects molecular geometry in Angstrom.
More information on the molden format at
http://www.cmbi.ru.nl/molden/molden_format.html
Parameters
----------
results : CCParser.ParseContainer
Container object which holds MO coefficients.
exponents : dict
Dictionary mapping GTO exponents/coefficients to atoms. Expected
format of dictionary entry is list of strings.
fname : string
Output file name.
"""
from .QCBase import PeriodicTable
import re
C = results.C.get_last()
xyz = results.xyz.get_last()
en = results.mo_energies.get_last()
PeTa = PeriodicTable()
#TODO: Permutator needed in case of different formats (Molcas, Gaussian)
with open(fname+".molden", "w") as out:
out.write("[Molden Format]\n")
# write XYZ
out.write("[Atoms] (Angs)\n")
for i,atom in enumerate(xyz):
num = PeTa.get_atomic_num(atom[0])
out.write("{0:>3}{1:7d}{2:5d}".format(atom[0], i+1, num))
out.write("".join("{0:16.8f}".format(c) for c in atom[1:])+"\n")
# write basis exponents
out.write("[GTO]\n")
for n in range(len(xyz)):
# atom sequence number, 0
out.write("{0:d}{1:5d}\n".format(n+1, 0))
symb = xyz[n][0].upper()
#a = atom.upper()
basis = atom_basis[symb]
for coeff in basis:
# shell label, number of primitives, 1.00
if re.search(r"[SDPF]", coeff[0]):
out.write("{0:}{1:6d}{2:12.6f}\n".format(
coeff[0], int(coeff[1]), float(coeff[2])))
# exponent, contraction coefficient
else:
out.write("{0:18.8e}{1:18.8e}\n".format(
float(coeff[0]), float(coeff[1])))
out.write("\n")
for imo in range(C.shape[0]):#assumes counting from MO 1 !!
out.write("[MO]\nSym=X\n")
if imo < en.n_occ:#occupied
out.write("Ene={0:12.6f}\n".format(en.occ[imo]))
out.write("Spin=alpha\n")
out.write("Occup=1\n")
else:#virtual
out.write("Ene={0:12.6f}\n".format(en.virt[imo]))
out.write("Spin=alpha\n")
out.write("Occup=0\n")
for i in range(C.shape[1]):
out.write("{0:6d}{1: 22.12e}\n".format(i+1,C[imo, i]))
if tmp_5d:
out.write("[5D]\n")
print("MOs written to Molden file.")
|
[
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"numpy.linalg.inv",
"pandas.Series",
"re.search",
"pandas.ExcelWriter",
"pandas.concat"
] |
[((2617, 2632), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (2629, 2632), True, 'import pandas as pd\n'), ((2653, 2713), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['tuples'], {'names': "['State', 'Row ID']"}), "(tuples, names=['State', 'Row ID'])\n", (2678, 2713), True, 'import pandas as pd\n'), ((2736, 2768), 'pandas.concat', 'pd.concat', (['[df1, amp_df]'], {'axis': '(1)'}), '([df1, amp_df], axis=1)\n', (2745, 2768), True, 'import pandas as pd\n'), ((510, 530), 'pandas.DataFrame', 'pd.DataFrame', (['kwargs'], {}), '(kwargs)\n', (522, 530), True, 'import pandas as pd\n'), ((2306, 2360), 'pandas.concat', 'pd.concat', (['pieces'], {'keys': 'key', 'names': "['State', 'Row ID']"}), "(pieces, keys=key, names=['State', 'Row ID'])\n", (2315, 2360), True, 'import pandas as pd\n'), ((721, 736), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (733, 736), True, 'import pandas as pd\n'), ((3165, 3185), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['fout'], {}), '(fout)\n', (3179, 3185), True, 'import pandas as pd\n'), ((687, 703), 'pandas.Series', 'pd.Series', (['value'], {}), '(value)\n', (696, 703), True, 'import pandas as pd\n'), ((11200, 11229), 're.search', 're.search', (['"""[SDPF]"""', 'coeff[0]'], {}), "('[SDPF]', coeff[0])\n", (11209, 11229), False, 'import re\n'), ((6002, 6018), 'numpy.linalg.inv', 'np.linalg.inv', (['X'], {}), '(X)\n', (6015, 6018), True, 'import numpy as np\n'), ((8373, 8388), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (8385, 8388), True, 'import pandas as pd\n'), ((9170, 9190), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['fout'], {}), '(fout)\n', (9184, 9190), True, 'import pandas as pd\n')]
|
#
# Copyright <NAME>, 2012-2014
#
import miner_globals
import m.common as common
from base import *
def p_accumulate_coals(p):
'''command :
| ACCUMULATE'''
p[0] = DefaultAccumulateCommand()
def p_accumulate_command(p):
'''command : ACCUMULATE id_list BY expression'''
p[0] = AccumulateCommand(p[2], p[4])
class AccumulateCommand(TypicalCommand):
NAME = "ACCUMULATE BY"
SHORT_HELP = "ACCUMULATE id [,...] BY accumulatorClass - accumulates coal records"
LONG_HELP = """ACCUMULATE id [,...] BY accumulatorClass
Performs custom accumulation logic.
"""
def __init__(self, accumulatorVariables, accumulatorClass):
TypicalCommand.__init__(self)
self.myAccumulatorVariables = accumulatorVariables
self.myAccumulatorClass = accumulatorClass
self.yieldVal = "(accumulated,)" if len(self.myAccumulatorVariables)==1 else "accumulated"
def getStart(self):
return """
import types
_acc = %s
if isinstance(_acc,types.ClassType):
accumulator = _acc()
else:
accumulator = _acc
""" % self.myAccumulatorClass
def getBody(self):
return """
for accumulated in accumulator.accumulate(%s):
yield %s
""" % (", ".join(self.myAccumulatorVariables), self.yieldVal)
def getEnd(self):
return """
for accumulated in accumulator.finish():
yield %s
""" % self.yieldVal
def getRequiredVariables(self):
return []
def getVariableNames(self):
return self.myAccumulatorVariables
class DefaultAccumulateCommand(TypicalCommand):
NAME = "ACCUMULATE"
SHORT_HELP = "ACCUMULATE|<empty> - accumulates coal records"
LONG_HELP = """ACCUMULATE
| <empty> |
Performs context dependent accumulation.
ACCUMULATE id [,...] BY accumulatorClass[(params)]
Performs custom accumulation
"""
def setParent(self, parent):
GeneratorBase.setParent(self, parent)
self.accumulatorTuple = miner_globals.getAccumulator(self.myParent.getVariableNames())
def getStart(self):
if not self.accumulatorTuple:
raise common.CompilationError("Invalid input for accumulation")
return """ accumulator = %s()\n""" % self.accumulatorTuple[1]
def getBody(self):
return """
for accumulated in accumulator.accumulate(%s):
yield (accumulated, )
""" % self.accumulatorTuple[0]
def getEnd(self):
return """
for accumulated in accumulator.finish():
yield (accumulated, )
"""
def getRequiredVariables(self):
return [self.accumulatorTuple[0]]
def getVariableNames(self):
return [self.accumulatorTuple[0]]
miner_globals.addHelpClass(AccumulateCommand)
miner_globals.addHelpClass(DefaultAccumulateCommand)
miner_globals.addKeyWord(command="ACCUMULATE")
miner_globals.addKeyWord(keyword="BY")
|
[
"miner_globals.addKeyWord",
"m.common.CompilationError",
"miner_globals.addHelpClass"
] |
[((2793, 2838), 'miner_globals.addHelpClass', 'miner_globals.addHelpClass', (['AccumulateCommand'], {}), '(AccumulateCommand)\n', (2819, 2838), False, 'import miner_globals\n'), ((2840, 2892), 'miner_globals.addHelpClass', 'miner_globals.addHelpClass', (['DefaultAccumulateCommand'], {}), '(DefaultAccumulateCommand)\n', (2866, 2892), False, 'import miner_globals\n'), ((2894, 2940), 'miner_globals.addKeyWord', 'miner_globals.addKeyWord', ([], {'command': '"""ACCUMULATE"""'}), "(command='ACCUMULATE')\n", (2918, 2940), False, 'import miner_globals\n'), ((2942, 2980), 'miner_globals.addKeyWord', 'miner_globals.addKeyWord', ([], {'keyword': '"""BY"""'}), "(keyword='BY')\n", (2966, 2980), False, 'import miner_globals\n'), ((2206, 2263), 'm.common.CompilationError', 'common.CompilationError', (['"""Invalid input for accumulation"""'], {}), "('Invalid input for accumulation')\n", (2229, 2263), True, 'import m.common as common\n')]
|
"""
2016 Day 10
https://adventofcode.com/2016/day/10
"""
from dataclasses import dataclass
from typing import Dict, List, Sequence, Tuple
import re
import aocd # type: ignore
re_value = re.compile(r"value (\d+) goes to (\w+) (\d+)")
re_robot = re.compile(r"bot (\d+) gives low to (\w+) (\d+) and high to (\w+) (\d+)")
@dataclass(frozen=True)
class Target:
"""
Target within the system to deliver numbers to - defined by its genre (bot or output) and its
reference number.
"""
genre: str
number: int
@dataclass
class Robot:
"""
Robot with knowledge of its two recipients/targets. Will wait until receiving its second number
and then that dispatch them back to the system environment to deliver to their targets.
"""
environ: "Environment"
holding: List[int]
give_low: Target
give_high: Target
def __init__(self, environ: "Environment", match_groups: Sequence[str]):
low_genre, low_number, high_genre, high_number = match_groups
self.environ = environ
self.holding = []
self.give_low = Target(low_genre, int(low_number))
self.give_high = Target(high_genre, int(high_number))
def add(self, value: int) -> None:
"""
Add a number to the robot - if this is the second deliver, it will in turn dispatch calls
to envion.deliver(..) with the low and high number held and their respective targets.
"""
self.holding.append(value)
if len(self.holding) == 2:
self.environ.deliver(self.give_low, min(self.holding))
self.environ.deliver(self.give_high, max(self.holding))
@dataclass
class Environment:
"""
An interconnected system of robots and outputs.
"""
robots: Dict[int, Robot]
outputs: Dict[int, int]
def __init__(self, instructions: str):
self.robots = {}
self.outputs = {}
for robot_definition in re_robot.findall(instructions):
bot, args = robot_definition[0], robot_definition[1:]
self.robots[int(bot)] = Robot(self, args)
for (initial_value, to_genre, to_number) in re_value.findall(instructions):
target = Target(to_genre, int(to_number))
self.deliver(target, int(initial_value))
def deliver(self, target: Target, value: int) -> None:
"""
Deliver the given number to its target - either storing it in the outputs of this
Environment object, or delivering it to the relevant robot via its .add(..) method.
"""
if target.genre == "bot":
self.robots[target.number].add(value)
elif target.genre == "output":
self.outputs[target.number] = value
def find_robot_holding(self, search: Tuple[int, int]) -> int:
"""
Find the ID number of the robot which ends up holding the given pair of values.
"""
inventory = set(search)
for number, bot in self.robots.items():
if set(bot.holding) == inventory:
return number
return -1
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2016, day=10)
env = Environment(data)
print(f"Part 1: {env.find_robot_holding((61, 17))}")
print(f"Part 2: {env.outputs[0] * env.outputs[1] * env.outputs[2]}")
if __name__ == "__main__":
main()
|
[
"aocd.get_data",
"dataclasses.dataclass",
"re.compile"
] |
[((189, 237), 're.compile', 're.compile', (['"""value (\\\\d+) goes to (\\\\w+) (\\\\d+)"""'], {}), "('value (\\\\d+) goes to (\\\\w+) (\\\\d+)')\n", (199, 237), False, 'import re\n'), ((247, 324), 're.compile', 're.compile', (['"""bot (\\\\d+) gives low to (\\\\w+) (\\\\d+) and high to (\\\\w+) (\\\\d+)"""'], {}), "('bot (\\\\d+) gives low to (\\\\w+) (\\\\d+) and high to (\\\\w+) (\\\\d+)')\n", (257, 324), False, 'import re\n'), ((324, 346), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (333, 346), False, 'from dataclasses import dataclass\n'), ((3184, 3216), 'aocd.get_data', 'aocd.get_data', ([], {'year': '(2016)', 'day': '(10)'}), '(year=2016, day=10)\n', (3197, 3216), False, 'import aocd\n')]
|
#!/usr/bin/env python3
from gunicorn_server import StandaloneApplication
from flask import Flask
import multiprocessing
app = Flask(__name__)
@app.route("/")
def ok():
return "OK"
if __name__ == "__main__":
gunicorn_app = StandaloneApplication(app, options={
'bind': '127.0.0.1:8080',
'workers': (multiprocessing.cpu_count() * 2) + 1,
})
gunicorn_app.run()
|
[
"flask.Flask",
"multiprocessing.cpu_count"
] |
[((128, 143), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (133, 143), False, 'from flask import Flask\n'), ((326, 353), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (351, 353), False, 'import multiprocessing\n')]
|
# Copyright © 2019 National Institute of Advanced Industrial Science and Technology (AIST). All rights reserved.
from datetime import datetime, timedelta, timezone
from requests import post, get
from pathlib import Path
import json
from injector import singleton
import os
import shutil
from distutils.dir_util import remove_tree, copy_tree
from sqlalchemy import asc
from qlib.utils.logging import get_logger, log
from reportgenerator import ReportGenerator
from ..across.exception import QAINotFoundException, QAIBadRequestException,\
QAIInvalidRequestException
from ..across.file_checker import FileChecker
from ..controllers.dto.testrunner import PostTestRunnerReq, PostTestRunnerRes, Result, Job, \
GetTestRunnerStatusRes, JobStatus, RunStatus, PostReportGeneratorRes, PostReportGeneratorReq
from ..controllers.dto.testrunner import GetTestRunnerRes
from ..entities.test import TestMapper
from ..entities.test_description import TestDescriptionMapper
from ..entities.ml_component import MLComponentMapper
from ..entities.dowmload import DownloadMapper
from ..entities.setting import SettingMapper
from ..entities.graph import GraphMapper
from ..entities.run import RunMapper
from ..gateways.extensions import sql_db
from ..entities.test_runner import TestRunnerMapper
from sqlalchemy.exc import SQLAlchemyError
logger = get_logger()
@singleton
class TestRunnerService:
def __init__(self):
self.ip_entry_point = SettingMapper.query.get('ip_entry_point').value
self._file_checker = FileChecker()
@log(logger)
def post(self, organizer_id: str, ml_component_id: int, request: PostTestRunnerReq) -> PostTestRunnerRes:
test = TestMapper.query.\
filter(TestMapper.ml_component_id == ml_component_id).\
filter(MLComponentMapper.org_id == organizer_id).first()
if test is None:
raise QAINotFoundException('R14000', 'not found test descriptions')
td_ids = request.test_description_ids
if len(td_ids) == 0:
# 以下条件を除外したTestDescriptionMapperを作る
# 既に削除済み, 既に実行済み(OK or NG)
tds = TestDescriptionMapper.query. \
filter(TestDescriptionMapper.test_id == test.id). \
filter(TestDescriptionMapper.delete_flag == False). \
all()
# 既に実行済み(OK or NG)を削除
# ※sqlalchemyでリレーション先のテーブルをfilterに加えられないため、python側で絞り込む
tds = [t for t in tds if
(t.run is None) or
((t.run is not None) and (t.run.result != 'OK' and t.run.result != 'NG'))]
if not tds:
raise QAINotFoundException('R14001', 'all test descriptions are deleted or executed \n'
'You can\'t re-execute a previously executed TD,'
' so create a new one or duplicate it.')
td_ids = [t.id for t in tds]
else:
# td_id指定時は、それぞれ削除済みか、実行済みかをチェックする
for td_id in td_ids:
td = TestDescriptionMapper.query.get(td_id)
if td is None:
raise QAINotFoundException('R14001', f'test description[id={td_id}] is not exists.')
if td.delete_flag:
raise QAINotFoundException('R14001', 'test description[id={}, name={}] are deleted.'
.format(td_id, td.name))
if (td.run_id is not None) and (td.run.result == 'OK' or td.run.result == 'NG'):
raise QAINotFoundException('R14001', 'test description[id={}, name={}] are executed.\n'
'You can\'t re-execute a previously executed TD,'
' so create a new one or duplicate it.'
.format(td_id, td.name))
# インベントリ登録時とファイルが変更されていないか、ハッシュ値チェック
for td_id in td_ids:
td = TestDescriptionMapper.query.get(td_id)
for inventory_td_mapper in td.inventories:
file_check_result = self._file_checker.execute(inventory_td_mapper.inventory.file_path,
inventory_td_mapper.inventory.file_system_id)
if not file_check_result['exists']:
raise QAINotFoundException('R14002', f'inventory file not found.'
f'file:{inventory_td_mapper.inventory.file_path}')
if file_check_result['hash_sha256'] != inventory_td_mapper.inventory.file_hash_sha256:
raise QAIInvalidRequestException('R14003', f'inventory file hash is not much.'
f'file:{inventory_td_mapper.inventory.file_path}')
res = post(url=self.ip_entry_point + '/' + organizer_id + '/mlComponents/' + str(ml_component_id) + '/job',
headers={'content-type': 'application/json'},
json={'TestDescriptionIds': td_ids})
# レスポンスエラーチェック
if res.status_code != 200:
raise QAIInvalidRequestException('R19999', 'testrunner error: {}'.format(res.text))
job_id = res.json()['JobId']
return PostTestRunnerRes(
result=Result(code='R12000', message="job launch success."),
job=Job(id_=str(job_id),
start_datetime=datetime.now(timezone(timedelta(hours=+9), 'JST')))
)
@log(logger)
def get_test_runners(self) -> GetTestRunnerRes:
test_runners = TestRunnerMapper.query.all() # organizer_id, ml_component_idに関わらずすべて取得
if test_runners is None:
raise QAINotFoundException('I54000', 'not found test runners')
return GetTestRunnerRes(
result=Result(code='I52000', message="get test runners success."),
test_runners=[t.to_template_dto() for t in test_runners]
)
@singleton
class TestRunnerStatusService:
def __init__(self):
self.ip_entry_point = SettingMapper.query.get('ip_entry_point').value
@log(logger)
def get(self, organizer_id: str, ml_component_id: int) -> GetTestRunnerStatusRes:
test = TestMapper.query.\
filter(TestMapper.ml_component_id == ml_component_id).\
filter(MLComponentMapper.org_id == organizer_id).first()
if test is None:
raise QAINotFoundException('R24000', 'not found test descriptions')
if test.job_id is None:
return GetTestRunnerStatusRes(
result=Result(code='R24001', message="job is not found."),
job_status=JobStatus(id_=0, status='NA', result='NA', result_detail='OK:0 NG:0 ERR:0 NA:0'),
run_statuses=[])
return GetTestRunnerStatusRes(
result=Result(code='R22000', message="get job status success."),
job_status=test.job.to_dto(),
run_statuses=[r.to_dto() for r in test.job.runs]
)
@singleton
class ReportGeneratorService:
def __init__(self):
self.func_table = {
"SetParam": self._invoke_set_params,
"Generate": self._invoke_report_generate
}
self.backend_entry_point = SettingMapper.query.get('backend_entry_point').value
# Windowsとそれ以外でマウント先を変更する
if os.name == 'nt':
mount_dst_path = Path(SettingMapper.query.get('mount_src_path').value)
else:
mount_dst_path = Path(SettingMapper.query.get('mount_dst_path').value)
self.report_home_path = mount_dst_path/'report'
self._initialize_report_dir(self.report_home_path)
self.report_generator = ReportGenerator(home_path=str(self.report_home_path)+os.sep)
self.backend_report_home = mount_dst_path/'backend'/'report_gen'
def _initialize_report_dir(self, path):
# workdirを作成
if path.exists():
remove_tree(str(path))
path.mkdir(parents=True, exist_ok=True)
# templateをコピー
copy_src_dir = Path(__file__).joinpath('../../../report/template')
copy_dst_dir = path / 'template'
if copy_dst_dir.exists():
remove_tree(str(copy_dst_dir))
copy_tree(src=str(copy_src_dir.resolve()), dst=str(copy_dst_dir.resolve()))
def _invoke_set_params(self, request: PostReportGeneratorReq, _=None) -> {}:
# 先頭のdestinationのみ反映
td_id = int(request.destination[0])
td = TestDescriptionMapper.query.get(td_id)
if td.run is not None:
for td_graph in td.run.graphs:
param_graphs = [g for g in request.params.graphs if g.id_ == td_graph.id]
if len(param_graphs) > 0:
param_graph = param_graphs[0]
td_graph.report_required = param_graph.report_required
if param_graph.report_required:
td_graph.report_index = param_graph.report_index
td_graph.report_name = param_graph.report_name
else:
td_graph.report_required = False
if request.params.opinion is not None:
td.opinion = request.params.opinion
sql_db.session.commit()
def _invoke_report_generate(self, request: PostReportGeneratorReq,
test_descriptions: [TestDescriptionMapper] = None) -> {}:
# 事前処理
# フォルダ準備
dt_now_jst = datetime.now(timezone(timedelta(hours=9))).strftime('%Y%m%d%H%M%S')
base_dir = self.backend_report_home / dt_now_jst
in_dir = base_dir / 'in'
in_dir.mkdir(parents=True)
out_dir = base_dir / 'out'
out_dir.mkdir(parents=True)
# 入力JSON作成
in_json = {}
target_td_ids = []
# 以下条件を除外したtarget_td_idsを作る
# 既に削除済み, 未実行(None)、実行時失敗(ERR)
if len(request.destination) == 0:
# test_descriptionsは既に削除済みTDを除外したリスト
target_td_ids = [td.id for td in test_descriptions if td.run and td.run.result != 'ERR']
else:
tmp_td_ids = [int(td_id) for td_id in request.destination]
for td_id in tmp_td_ids:
td = TestDescriptionMapper.query\
.filter(TestDescriptionMapper.id == td_id)\
.filter(TestDescriptionMapper.delete_flag == False).first()
if td.run and td.run.result != 'ERR':
target_td_ids.append(td_id)
if len(target_td_ids) == 0:
raise QAINotFoundException('D14004', 'these test description is not running')
file_path_list = []
type_list = []
quality_props_list = []
td_id__list = []
required_list = []
report_name = []
for td_id in target_td_ids:
td = TestDescriptionMapper.query.get(td_id)
if td.run_id is None:
raise QAINotFoundException('D14002', 'test description\'s result is None')
# opinionファイル出力
if len(td.opinion) != 0:
opinion_path = in_dir / ('opinion' + str(td_id) + ".txt")
with open(str(opinion_path), mode='w', encoding='utf-8') as f:
f.write(td.opinion)
file_path_list.append(str(opinion_path))
type_list.append('text')
quality_props_list.append(td.quality_dimension_id)
td_id__list.append(str(td_id))
required_list.append(True)
report_name.append('Opinion')
graphs = GraphMapper.query.\
filter(GraphMapper.run_id == td.run_id).\
filter(GraphMapper.report_required == True).\
order_by(asc(GraphMapper.report_index)).\
all()
for graph in graphs:
file_path_list.append(graph.download.path)
type_list.append(graph.graph_template.resource_type.type)
quality_props_list.append(td.quality_dimension_id)
td_id__list.append(str(td_id))
required_list.append(graph.report_required)
report_name.append(graph.report_name)
in_json['filepath'] = dict(zip(range(len(file_path_list)), file_path_list))
in_json['type'] = dict(zip(range(len(type_list)), type_list))
in_json['quality_props'] = dict(zip(range(len(quality_props_list)), quality_props_list))
in_json['testDescriptionID'] = dict(zip(range(len(td_id__list)), td_id__list))
in_json['required'] = dict(zip(range(len(required_list)), required_list))
in_json['name'] = dict(zip(range(len(report_name)), report_name))
in_json_path = in_dir/'input.json'
with open(str(in_json_path), 'w', encoding='utf-8') as f:
json.dump(in_json, f, indent=4, ensure_ascii=False)
# レポート生成
pdf_file_path = self.report_home_path / 'work' / 'report.pdf'
pdf_file = self.report_generator.report_generate(sql_db, str(in_json_path), str(pdf_file_path))
if not pdf_file or not Path(pdf_file).exists():
raise QAINotFoundException('D16000', 'failed report generate')
# 事後処理
res = {}
try:
dst_path = out_dir / Path(pdf_file).name
shutil.copy(src=pdf_file, dst=str(dst_path))
dl = DownloadMapper(path=pdf_file)
sql_db.session.add(dl)
sql_db.session.commit()
res['ReportUrl'] = self.backend_entry_point + '/download/' + str(dl.id)
except Exception as e:
print('Exception: {}'.format(e))
sql_db.session.rollback()
raise e
return res
@log(logger)
def post(self, organizer_id: str, ml_component_id: int, request: PostReportGeneratorReq) -> PostReportGeneratorRes:
test = TestMapper.query.\
filter(TestMapper.ml_component_id == ml_component_id).\
filter(MLComponentMapper.org_id == organizer_id).first()
if test is None:
raise QAINotFoundException('D14000', 'not found test descriptions')
if request.command not in self.func_table:
raise QAIBadRequestException('D10001', 'invaid command')
# delete_flagがTrueのTDを除外したTestDescriptionMapperを作る
mapper = TestDescriptionMapper.query. \
filter(TestDescriptionMapper.test_id == test.id). \
filter(TestDescriptionMapper.delete_flag == False). \
all()
if not mapper:
raise QAINotFoundException('D14001', 'test descriptions are all deleted')
try:
func = self.func_table[request.command]
out_params = func(request, mapper)
except Exception as e:
print('Exception: {}'.format(e))
sql_db.session.rollback()
raise e
return PostReportGeneratorRes(
result=Result(code='D12000', message="command invoke success."),
out_params=out_params
)
|
[
"json.dump",
"qlib.utils.logging.log",
"pathlib.Path",
"datetime.timedelta",
"sqlalchemy.asc",
"qlib.utils.logging.get_logger"
] |
[((1336, 1348), 'qlib.utils.logging.get_logger', 'get_logger', ([], {}), '()\n', (1346, 1348), False, 'from qlib.utils.logging import get_logger, log\n'), ((1538, 1549), 'qlib.utils.logging.log', 'log', (['logger'], {}), '(logger)\n', (1541, 1549), False, 'from qlib.utils.logging import get_logger, log\n'), ((5596, 5607), 'qlib.utils.logging.log', 'log', (['logger'], {}), '(logger)\n', (5599, 5607), False, 'from qlib.utils.logging import get_logger, log\n'), ((6207, 6218), 'qlib.utils.logging.log', 'log', (['logger'], {}), '(logger)\n', (6210, 6218), False, 'from qlib.utils.logging import get_logger, log\n'), ((13807, 13818), 'qlib.utils.logging.log', 'log', (['logger'], {}), '(logger)\n', (13810, 13818), False, 'from qlib.utils.logging import get_logger, log\n'), ((12915, 12966), 'json.dump', 'json.dump', (['in_json', 'f'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(in_json, f, indent=4, ensure_ascii=False)\n', (12924, 12966), False, 'import json\n'), ((8175, 8189), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (8179, 8189), False, 'from pathlib import Path\n'), ((13369, 13383), 'pathlib.Path', 'Path', (['pdf_file'], {}), '(pdf_file)\n', (13373, 13383), False, 'from pathlib import Path\n'), ((9603, 9621), 'datetime.timedelta', 'timedelta', ([], {'hours': '(9)'}), '(hours=9)\n', (9612, 9621), False, 'from datetime import datetime, timedelta, timezone\n'), ((11849, 11878), 'sqlalchemy.asc', 'asc', (['GraphMapper.report_index'], {}), '(GraphMapper.report_index)\n', (11852, 11878), False, 'from sqlalchemy import asc\n'), ((13190, 13204), 'pathlib.Path', 'Path', (['pdf_file'], {}), '(pdf_file)\n', (13194, 13204), False, 'from pathlib import Path\n'), ((5550, 5569), 'datetime.timedelta', 'timedelta', ([], {'hours': '(+9)'}), '(hours=+9)\n', (5559, 5569), False, 'from datetime import datetime, timedelta, timezone\n')]
|
import nmap
import requests
def nScan(ip):
nm = nmap.PortScanner()
nm.scan(ip, arguments="-F")
for host in nm.all_hosts():
ports = []
protocols = []
states = []
for proto in nm[host].all_protocols():
protocols.append(proto)
lport = nm[host][proto].keys()
for port in lport:
ports.append(port)
states.append(nm[host][proto][port]['state'])
po = []
for p in ports:
n = {
"Port": str(p),
"Name": nm[host][proto][p]['name'],
"Reason": nm[host][proto][p]['reason'],
"State": nm[host][proto][p]['state']
}
po.append(n)
return po
|
[
"nmap.PortScanner"
] |
[((53, 71), 'nmap.PortScanner', 'nmap.PortScanner', ([], {}), '()\n', (69, 71), False, 'import nmap\n')]
|
from flask import render_template
from app import app
@app.route('/')
@app.route('/index')
def index():
return render_template('config.html')
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
|
[
"app.app.route",
"flask.render_template"
] |
[((57, 71), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (66, 71), False, 'from app import app\n'), ((73, 92), 'app.app.route', 'app.route', (['"""/index"""'], {}), "('/index')\n", (82, 92), False, 'from app import app\n'), ((150, 173), 'app.app.route', 'app.route', (['"""/dashboard"""'], {}), "('/dashboard')\n", (159, 173), False, 'from app import app\n'), ((117, 147), 'flask.render_template', 'render_template', (['"""config.html"""'], {}), "('config.html')\n", (132, 147), False, 'from flask import render_template\n'), ((202, 235), 'flask.render_template', 'render_template', (['"""dashboard.html"""'], {}), "('dashboard.html')\n", (217, 235), False, 'from flask import render_template\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import CheckConstraint, Column, Integer, Text
from warehouse import db
from warehouse.utils.attrs import make_repr
class Classifier(db.ModelBase):
__tablename__ = "trove_classifiers"
__tableargs__ = CheckConstraint(
"classifier not ilike 'private ::%'",
name="ck_disallow_private_top_level_classifier",
)
__repr__ = make_repr("classifier")
id = Column(Integer, primary_key=True, nullable=False)
classifier = Column(Text, unique=True)
|
[
"sqlalchemy.CheckConstraint",
"warehouse.utils.attrs.make_repr",
"sqlalchemy.Column"
] |
[((768, 875), 'sqlalchemy.CheckConstraint', 'CheckConstraint', (['"""classifier not ilike \'private ::%\'"""'], {'name': '"""ck_disallow_private_top_level_classifier"""'}), '("classifier not ilike \'private ::%\'", name=\n \'ck_disallow_private_top_level_classifier\')\n', (783, 875), False, 'from sqlalchemy import CheckConstraint, Column, Integer, Text\n'), ((910, 933), 'warehouse.utils.attrs.make_repr', 'make_repr', (['"""classifier"""'], {}), "('classifier')\n", (919, 933), False, 'from warehouse.utils.attrs import make_repr\n'), ((944, 993), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'nullable': '(False)'}), '(Integer, primary_key=True, nullable=False)\n', (950, 993), False, 'from sqlalchemy import CheckConstraint, Column, Integer, Text\n'), ((1011, 1036), 'sqlalchemy.Column', 'Column', (['Text'], {'unique': '(True)'}), '(Text, unique=True)\n', (1017, 1036), False, 'from sqlalchemy import CheckConstraint, Column, Integer, Text\n')]
|
# Generated from /Users/tom/Code/soda-core/soda/core/soda/sodacl/antlr/SodaCLAntlr.g4 by ANTLR 4.9.3
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\28")
buf.write("\u01d3\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\3\2\3\2\3\2\3\2\3")
buf.write("\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5")
buf.write("\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7")
buf.write("\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f")
buf.write("\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3")
buf.write("\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\25\3\25\3\25\3\25")
buf.write("\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27")
buf.write("\3\27\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\32\3\32\3\32")
buf.write("\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34")
buf.write("\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3!\3!\3!\3")
buf.write("!\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3\'\3\'\3(\3(\3)\3)")
buf.write("\3*\3*\3+\3+\3,\3,\3,\3-\3-\3-\3.\3.\3.\3/\3/\3/\3\60")
buf.write("\3\60\3\61\3\61\3\62\3\62\3\63\3\63\3\63\3\63\6\63\u01b6")
buf.write("\n\63\r\63\16\63\u01b7\3\63\3\63\3\64\3\64\3\64\3\64\6")
buf.write("\64\u01c0\n\64\r\64\16\64\u01c1\3\64\3\64\3\65\3\65\7")
buf.write("\65\u01c8\n\65\f\65\16\65\u01cb\13\65\3\66\6\66\u01ce")
buf.write("\n\66\r\66\16\66\u01cf\3\67\3\67\2\28\3\3\5\4\7\5\t\6")
buf.write("\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20")
buf.write("\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65")
buf.write("\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60")
buf.write("_\61a\62c\63e\64g\65i\66k\67m8\3\2\7\3\2$$\3\2bb\5\2C")
buf.write("\\aac|\b\2\"\"*+..>@]]__\3\2\62;\2\u01d8\2\3\3\2\2\2\2")
buf.write("\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3")
buf.write("\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2")
buf.write("\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2")
buf.write("\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3")
buf.write("\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61")
buf.write("\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2")
buf.write("\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3")
buf.write("\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M")
buf.write("\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2")
buf.write("W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2")
buf.write("\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2")
buf.write("\2\2k\3\2\2\2\2m\3\2\2\2\3o\3\2\2\2\5\177\3\2\2\2\7\u0084")
buf.write("\3\2\2\2\t\u0096\3\2\2\2\13\u009e\3\2\2\2\r\u00b1\3\2")
buf.write("\2\2\17\u00c5\3\2\2\2\21\u00d8\3\2\2\2\23\u00da\3\2\2")
buf.write("\2\25\u00dc\3\2\2\2\27\u00de\3\2\2\2\31\u00e8\3\2\2\2")
buf.write("\33\u00f6\3\2\2\2\35\u0101\3\2\2\2\37\u0108\3\2\2\2!\u011b")
buf.write("\3\2\2\2#\u012c\3\2\2\2%\u013b\3\2\2\2\'\u014b\3\2\2\2")
buf.write(")\u014d\3\2\2\2+\u0151\3\2\2\2-\u0155\3\2\2\2/\u015d\3")
buf.write("\2\2\2\61\u0161\3\2\2\2\63\u0164\3\2\2\2\65\u0169\3\2")
buf.write("\2\2\67\u016e\3\2\2\29\u0173\3\2\2\2;\u017a\3\2\2\2=\u017f")
buf.write("\3\2\2\2?\u0183\3\2\2\2A\u0187\3\2\2\2C\u018b\3\2\2\2")
buf.write("E\u018d\3\2\2\2G\u018f\3\2\2\2I\u0191\3\2\2\2K\u0193\3")
buf.write("\2\2\2M\u0195\3\2\2\2O\u0197\3\2\2\2Q\u0199\3\2\2\2S\u019b")
buf.write("\3\2\2\2U\u019d\3\2\2\2W\u019f\3\2\2\2Y\u01a2\3\2\2\2")
buf.write("[\u01a5\3\2\2\2]\u01a8\3\2\2\2_\u01ab\3\2\2\2a\u01ad\3")
buf.write("\2\2\2c\u01af\3\2\2\2e\u01b1\3\2\2\2g\u01bb\3\2\2\2i\u01c5")
buf.write("\3\2\2\2k\u01cd\3\2\2\2m\u01d1\3\2\2\2op\7h\2\2pq\7t\2")
buf.write("\2qr\7g\2\2rs\7u\2\2st\7j\2\2tu\7p\2\2uv\7g\2\2vw\7u\2")
buf.write("\2wx\7u\2\2xy\7\"\2\2yz\7w\2\2z{\7u\2\2{|\7k\2\2|}\7p")
buf.write("\2\2}~\7i\2\2~\4\3\2\2\2\177\u0080\7y\2\2\u0080\u0081")
buf.write("\7k\2\2\u0081\u0082\7v\2\2\u0082\u0083\7j\2\2\u0083\6")
buf.write("\3\2\2\2\u0084\u0085\7t\2\2\u0085\u0086\7q\2\2\u0086\u0087")
buf.write("\7y\2\2\u0087\u0088\7a\2\2\u0088\u0089\7e\2\2\u0089\u008a")
buf.write("\7q\2\2\u008a\u008b\7w\2\2\u008b\u008c\7p\2\2\u008c\u008d")
buf.write("\7v\2\2\u008d\u008e\7\"\2\2\u008e\u008f\7u\2\2\u008f\u0090")
buf.write("\7c\2\2\u0090\u0091\7o\2\2\u0091\u0092\7g\2\2\u0092\u0093")
buf.write("\7\"\2\2\u0093\u0094\7c\2\2\u0094\u0095\7u\2\2\u0095\b")
buf.write("\3\2\2\2\u0096\u0097\7f\2\2\u0097\u0098\7g\2\2\u0098\u0099")
buf.write("\7h\2\2\u0099\u009a\7c\2\2\u009a\u009b\7w\2\2\u009b\u009c")
buf.write("\7n\2\2\u009c\u009d\7v\2\2\u009d\n\3\2\2\2\u009e\u009f")
buf.write("\7u\2\2\u009f\u00a0\7c\2\2\u00a0\u00a1\7o\2\2\u00a1\u00a2")
buf.write("\7g\2\2\u00a2\u00a3\7\"\2\2\u00a3\u00a4\7f\2\2\u00a4\u00a5")
buf.write("\7c\2\2\u00a5\u00a6\7{\2\2\u00a6\u00a7\7\"\2\2\u00a7\u00a8")
buf.write("\7n\2\2\u00a8\u00a9\7c\2\2\u00a9\u00aa\7u\2\2\u00aa\u00ab")
buf.write("\7v\2\2\u00ab\u00ac\7\"\2\2\u00ac\u00ad\7y\2\2\u00ad\u00ae")
buf.write("\7g\2\2\u00ae\u00af\7g\2\2\u00af\u00b0\7m\2\2\u00b0\f")
buf.write("\3\2\2\2\u00b1\u00b2\7u\2\2\u00b2\u00b3\7c\2\2\u00b3\u00b4")
buf.write("\7o\2\2\u00b4\u00b5\7g\2\2\u00b5\u00b6\7\"\2\2\u00b6\u00b7")
buf.write("\7f\2\2\u00b7\u00b8\7c\2\2\u00b8\u00b9\7{\2\2\u00b9\u00ba")
buf.write("\7\"\2\2\u00ba\u00bb\7n\2\2\u00bb\u00bc\7c\2\2\u00bc\u00bd")
buf.write("\7u\2\2\u00bd\u00be\7v\2\2\u00be\u00bf\7\"\2\2\u00bf\u00c0")
buf.write("\7o\2\2\u00c0\u00c1\7q\2\2\u00c1\u00c2\7p\2\2\u00c2\u00c3")
buf.write("\7v\2\2\u00c3\u00c4\7j\2\2\u00c4\16\3\2\2\2\u00c5\u00c6")
buf.write("\7c\2\2\u00c6\u00c7\7p\2\2\u00c7\u00c8\7q\2\2\u00c8\u00c9")
buf.write("\7o\2\2\u00c9\u00ca\7c\2\2\u00ca\u00cb\7n\2\2\u00cb\u00cc")
buf.write("\7{\2\2\u00cc\u00cd\7\"\2\2\u00cd\u00ce\7u\2\2\u00ce\u00cf")
buf.write("\7e\2\2\u00cf\u00d0\7q\2\2\u00d0\u00d1\7t\2\2\u00d1\u00d2")
buf.write("\7g\2\2\u00d2\u00d3\7\"\2\2\u00d3\u00d4\7h\2\2\u00d4\u00d5")
buf.write("\7q\2\2\u00d5\u00d6\7t\2\2\u00d6\u00d7\7\"\2\2\u00d7\20")
buf.write("\3\2\2\2\u00d8\u00d9\7f\2\2\u00d9\22\3\2\2\2\u00da\u00db")
buf.write("\7j\2\2\u00db\24\3\2\2\2\u00dc\u00dd\7o\2\2\u00dd\26\3")
buf.write("\2\2\2\u00de\u00df\7x\2\2\u00df\u00e0\7c\2\2\u00e0\u00e1")
buf.write("\7n\2\2\u00e1\u00e2\7w\2\2\u00e2\u00e3\7g\2\2\u00e3\u00e4")
buf.write("\7u\2\2\u00e4\u00e5\7\"\2\2\u00e5\u00e6\7k\2\2\u00e6\u00e7")
buf.write("\7p\2\2\u00e7\30\3\2\2\2\u00e8\u00e9\7o\2\2\u00e9\u00ea")
buf.write("\7w\2\2\u00ea\u00eb\7u\2\2\u00eb\u00ec\7v\2\2\u00ec\u00ed")
buf.write("\7\"\2\2\u00ed\u00ee\7g\2\2\u00ee\u00ef\7z\2\2\u00ef\u00f0")
buf.write("\7k\2\2\u00f0\u00f1\7u\2\2\u00f1\u00f2\7v\2\2\u00f2\u00f3")
buf.write("\7\"\2\2\u00f3\u00f4\7k\2\2\u00f4\u00f5\7p\2\2\u00f5\32")
buf.write("\3\2\2\2\u00f6\u00f7\7e\2\2\u00f7\u00f8\7j\2\2\u00f8\u00f9")
buf.write("\7g\2\2\u00f9\u00fa\7e\2\2\u00fa\u00fb\7m\2\2\u00fb\u00fc")
buf.write("\7u\2\2\u00fc\u00fd\7\"\2\2\u00fd\u00fe\7h\2\2\u00fe\u00ff")
buf.write("\7q\2\2\u00ff\u0100\7t\2\2\u0100\34\3\2\2\2\u0101\u0102")
buf.write("\7h\2\2\u0102\u0103\7k\2\2\u0103\u0104\7n\2\2\u0104\u0105")
buf.write("\7v\2\2\u0105\u0106\7g\2\2\u0106\u0107\7t\2\2\u0107\36")
buf.write("\3\2\2\2\u0108\u0109\7e\2\2\u0109\u010a\7q\2\2\u010a\u010b")
buf.write("\7p\2\2\u010b\u010c\7h\2\2\u010c\u010d\7k\2\2\u010d\u010e")
buf.write("\7i\2\2\u010e\u010f\7w\2\2\u010f\u0110\7t\2\2\u0110\u0111")
buf.write("\7c\2\2\u0111\u0112\7v\2\2\u0112\u0113\7k\2\2\u0113\u0114")
buf.write("\7q\2\2\u0114\u0115\7p\2\2\u0115\u0116\7u\2\2\u0116\u0117")
buf.write("\7\"\2\2\u0117\u0118\7h\2\2\u0118\u0119\7q\2\2\u0119\u011a")
buf.write("\7t\2\2\u011a \3\2\2\2\u011b\u011c\7h\2\2\u011c\u011d")
buf.write("\7q\2\2\u011d\u011e\7t\2\2\u011e\u011f\7\"\2\2\u011f\u0120")
buf.write("\7g\2\2\u0120\u0121\7c\2\2\u0121\u0122\7e\2\2\u0122\u0123")
buf.write("\7j\2\2\u0123\u0124\7\"\2\2\u0124\u0125\7f\2\2\u0125\u0126")
buf.write("\7c\2\2\u0126\u0127\7v\2\2\u0127\u0128\7c\2\2\u0128\u0129")
buf.write("\7u\2\2\u0129\u012a\7g\2\2\u012a\u012b\7v\2\2\u012b\"")
buf.write("\3\2\2\2\u012c\u012d\7h\2\2\u012d\u012e\7q\2\2\u012e\u012f")
buf.write("\7t\2\2\u012f\u0130\7\"\2\2\u0130\u0131\7g\2\2\u0131\u0132")
buf.write("\7c\2\2\u0132\u0133\7e\2\2\u0133\u0134\7j\2\2\u0134\u0135")
buf.write("\7\"\2\2\u0135\u0136\7v\2\2\u0136\u0137\7c\2\2\u0137\u0138")
buf.write("\7d\2\2\u0138\u0139\7n\2\2\u0139\u013a\7g\2\2\u013a$\3")
buf.write("\2\2\2\u013b\u013c\7h\2\2\u013c\u013d\7q\2\2\u013d\u013e")
buf.write("\7t\2\2\u013e\u013f\7\"\2\2\u013f\u0140\7g\2\2\u0140\u0141")
buf.write("\7c\2\2\u0141\u0142\7e\2\2\u0142\u0143\7j\2\2\u0143\u0144")
buf.write("\7\"\2\2\u0144\u0145\7e\2\2\u0145\u0146\7q\2\2\u0146\u0147")
buf.write("\7n\2\2\u0147\u0148\7w\2\2\u0148\u0149\7o\2\2\u0149\u014a")
buf.write("\7p\2\2\u014a&\3\2\2\2\u014b\u014c\7\60\2\2\u014c(\3\2")
buf.write("\2\2\u014d\u014e\7h\2\2\u014e\u014f\7q\2\2\u014f\u0150")
buf.write("\7t\2\2\u0150*\3\2\2\2\u0151\u0152\7c\2\2\u0152\u0153")
buf.write("\7p\2\2\u0153\u0154\7f\2\2\u0154,\3\2\2\2\u0155\u0156")
buf.write("\7d\2\2\u0156\u0157\7g\2\2\u0157\u0158\7v\2\2\u0158\u0159")
buf.write("\7y\2\2\u0159\u015a\7g\2\2\u015a\u015b\7g\2\2\u015b\u015c")
buf.write("\7p\2\2\u015c.\3\2\2\2\u015d\u015e\7p\2\2\u015e\u015f")
buf.write("\7q\2\2\u015f\u0160\7v\2\2\u0160\60\3\2\2\2\u0161\u0162")
buf.write("\7k\2\2\u0162\u0163\7p\2\2\u0163\62\3\2\2\2\u0164\u0165")
buf.write("\7y\2\2\u0165\u0166\7c\2\2\u0166\u0167\7t\2\2\u0167\u0168")
buf.write("\7p\2\2\u0168\64\3\2\2\2\u0169\u016a\7h\2\2\u016a\u016b")
buf.write("\7c\2\2\u016b\u016c\7k\2\2\u016c\u016d\7n\2\2\u016d\66")
buf.write("\3\2\2\2\u016e\u016f\7r\2\2\u016f\u0170\7c\2\2\u0170\u0171")
buf.write("\7u\2\2\u0171\u0172\7u\2\2\u01728\3\2\2\2\u0173\u0174")
buf.write("\7e\2\2\u0174\u0175\7j\2\2\u0175\u0176\7c\2\2\u0176\u0177")
buf.write("\7p\2\2\u0177\u0178\7i\2\2\u0178\u0179\7g\2\2\u0179:\3")
buf.write("\2\2\2\u017a\u017b\7n\2\2\u017b\u017c\7c\2\2\u017c\u017d")
buf.write("\7u\2\2\u017d\u017e\7v\2\2\u017e<\3\2\2\2\u017f\u0180")
buf.write("\7c\2\2\u0180\u0181\7x\2\2\u0181\u0182\7i\2\2\u0182>\3")
buf.write("\2\2\2\u0183\u0184\7o\2\2\u0184\u0185\7k\2\2\u0185\u0186")
buf.write("\7p\2\2\u0186@\3\2\2\2\u0187\u0188\7o\2\2\u0188\u0189")
buf.write("\7c\2\2\u0189\u018a\7z\2\2\u018aB\3\2\2\2\u018b\u018c")
buf.write("\7]\2\2\u018cD\3\2\2\2\u018d\u018e\7_\2\2\u018eF\3\2\2")
buf.write("\2\u018f\u0190\7}\2\2\u0190H\3\2\2\2\u0191\u0192\7\177")
buf.write("\2\2\u0192J\3\2\2\2\u0193\u0194\7*\2\2\u0194L\3\2\2\2")
buf.write("\u0195\u0196\7+\2\2\u0196N\3\2\2\2\u0197\u0198\7.\2\2")
buf.write("\u0198P\3\2\2\2\u0199\u019a\7\'\2\2\u019aR\3\2\2\2\u019b")
buf.write("\u019c\7-\2\2\u019cT\3\2\2\2\u019d\u019e\7/\2\2\u019e")
buf.write("V\3\2\2\2\u019f\u01a0\7#\2\2\u01a0\u01a1\7?\2\2\u01a1")
buf.write("X\3\2\2\2\u01a2\u01a3\7>\2\2\u01a3\u01a4\7@\2\2\u01a4")
buf.write("Z\3\2\2\2\u01a5\u01a6\7>\2\2\u01a6\u01a7\7?\2\2\u01a7")
buf.write("\\\3\2\2\2\u01a8\u01a9\7@\2\2\u01a9\u01aa\7?\2\2\u01aa")
buf.write("^\3\2\2\2\u01ab\u01ac\7?\2\2\u01ac`\3\2\2\2\u01ad\u01ae")
buf.write("\7>\2\2\u01aeb\3\2\2\2\u01af\u01b0\7@\2\2\u01b0d\3\2\2")
buf.write("\2\u01b1\u01b5\7$\2\2\u01b2\u01b6\n\2\2\2\u01b3\u01b4")
buf.write("\7^\2\2\u01b4\u01b6\7$\2\2\u01b5\u01b2\3\2\2\2\u01b5\u01b3")
buf.write("\3\2\2\2\u01b6\u01b7\3\2\2\2\u01b7\u01b5\3\2\2\2\u01b7")
buf.write("\u01b8\3\2\2\2\u01b8\u01b9\3\2\2\2\u01b9\u01ba\7$\2\2")
buf.write("\u01baf\3\2\2\2\u01bb\u01bf\7b\2\2\u01bc\u01c0\n\3\2\2")
buf.write("\u01bd\u01be\7^\2\2\u01be\u01c0\7b\2\2\u01bf\u01bc\3\2")
buf.write("\2\2\u01bf\u01bd\3\2\2\2\u01c0\u01c1\3\2\2\2\u01c1\u01bf")
buf.write("\3\2\2\2\u01c1\u01c2\3\2\2\2\u01c2\u01c3\3\2\2\2\u01c3")
buf.write("\u01c4\7b\2\2\u01c4h\3\2\2\2\u01c5\u01c9\t\4\2\2\u01c6")
buf.write("\u01c8\n\5\2\2\u01c7\u01c6\3\2\2\2\u01c8\u01cb\3\2\2\2")
buf.write("\u01c9\u01c7\3\2\2\2\u01c9\u01ca\3\2\2\2\u01caj\3\2\2")
buf.write("\2\u01cb\u01c9\3\2\2\2\u01cc\u01ce\t\6\2\2\u01cd\u01cc")
buf.write("\3\2\2\2\u01ce\u01cf\3\2\2\2\u01cf\u01cd\3\2\2\2\u01cf")
buf.write("\u01d0\3\2\2\2\u01d0l\3\2\2\2\u01d1\u01d2\7\"\2\2\u01d2")
buf.write("n\3\2\2\2\t\2\u01b5\u01b7\u01bf\u01c1\u01c9\u01cf\2")
return buf.getvalue()
class SodaCLAntlrLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
FOR = 20
AND = 21
BETWEEN = 22
NOT = 23
IN = 24
WARN = 25
FAIL = 26
PASS = 27
CHANGE = 28
LAST = 29
AVG = 30
MIN = 31
MAX = 32
SQUARE_LEFT = 33
SQUARE_RIGHT = 34
CURLY_LEFT = 35
CURLY_RIGHT = 36
ROUND_LEFT = 37
ROUND_RIGHT = 38
COMMA = 39
PERCENT = 40
PLUS = 41
MINUS = 42
NOT_EQUAL = 43
NOT_EQUAL_SQL = 44
LTE = 45
GTE = 46
EQUAL = 47
LT = 48
GT = 49
IDENTIFIER_DOUBLE_QUOTE = 50
IDENTIFIER_BACKTICK = 51
IDENTIFIER_UNQUOTED = 52
DIGITS = 53
S = 54
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'freshness using'", "'with'", "'row_count same as'", "'default'",
"'same day last week'", "'same day last month'", "'anomaly score for '",
"'d'", "'h'", "'m'", "'values in'", "'must exist in'", "'checks for'",
"'filter'", "'configurations for'", "'for each dataset'", "'for each table'",
"'for each column'", "'.'", "'for'", "'and'", "'between'", "'not'",
"'in'", "'warn'", "'fail'", "'pass'", "'change'", "'last'",
"'avg'", "'min'", "'max'", "'['", "']'", "'{'", "'}'", "'('",
"')'", "','", "'%'", "'+'", "'-'", "'!='", "'<>'", "'<='", "'>='",
"'='", "'<'", "'>'", "' '" ]
symbolicNames = [ "<INVALID>",
"FOR", "AND", "BETWEEN", "NOT", "IN", "WARN", "FAIL", "PASS",
"CHANGE", "LAST", "AVG", "MIN", "MAX", "SQUARE_LEFT", "SQUARE_RIGHT",
"CURLY_LEFT", "CURLY_RIGHT", "ROUND_LEFT", "ROUND_RIGHT", "COMMA",
"PERCENT", "PLUS", "MINUS", "NOT_EQUAL", "NOT_EQUAL_SQL", "LTE",
"GTE", "EQUAL", "LT", "GT", "IDENTIFIER_DOUBLE_QUOTE", "IDENTIFIER_BACKTICK",
"IDENTIFIER_UNQUOTED", "DIGITS", "S" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "FOR", "AND",
"BETWEEN", "NOT", "IN", "WARN", "FAIL", "PASS", "CHANGE",
"LAST", "AVG", "MIN", "MAX", "SQUARE_LEFT", "SQUARE_RIGHT",
"CURLY_LEFT", "CURLY_RIGHT", "ROUND_LEFT", "ROUND_RIGHT",
"COMMA", "PERCENT", "PLUS", "MINUS", "NOT_EQUAL", "NOT_EQUAL_SQL",
"LTE", "GTE", "EQUAL", "LT", "GT", "IDENTIFIER_DOUBLE_QUOTE",
"IDENTIFIER_BACKTICK", "IDENTIFIER_UNQUOTED", "DIGITS",
"S" ]
grammarFileName = "SodaCLAntlr.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.3")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
[
"io.StringIO"
] |
[((287, 297), 'io.StringIO', 'StringIO', ([], {}), '()\n', (295, 297), False, 'from io import StringIO\n')]
|
"""
4) Sinkhorn vs. blurred Wasserstein distances
==========================================================
Sinkhorn divergences rely on a simple idea:
by **blurring** the transport plan through the addition of
an entropic penalty, we can reduce the effective dimensionality
of the transportation problem and compute **sensible approximations of the
Wasserstein distance at a low computational cost**.
"""
##################################################
# As discussed in previous notebooks, the *vanilla* Sinkhorn loop
# can be symmetrized, de-biased and turned into a genuine
# multiscale algorithm: available through the
# :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer, the **Sinkhorn divergence**
#
# .. math::
# \text{S}_\varepsilon(\alpha,\beta)~=~ \text{OT}_\varepsilon(\alpha,\beta)
# - \tfrac{1}{2}\text{OT}_\varepsilon(\alpha,\alpha)
# - \tfrac{1}{2}\text{OT}_\varepsilon(\beta,\beta),
#
# is a tractable approximation of the Wasserstein distance
# that **retains its key geometric properties** - positivity, convexity,
# metrization of the convergence in law.
#
# **But is it really the best way of smoothing our transportation problem?**
# When "p = 2" and :math:`\text{C}(x,y)=\tfrac{1}{2}\|x-y\|^2`,
# a very sensible alternative to Sinkhorn divergences is the
# **blurred Wasserstein distance**
#
# .. math::
# \text{B}_\varepsilon(\alpha,\beta) ~=~ \text{W}_2(\,k_{\varepsilon/4}\star\alpha,\,k_{\varepsilon/4}\star\beta\,),
#
# where :math:`\text{W}_2` denotes the *true* Wasserstein distance associated to
# our cost function :math:`\text{C}` and
#
# .. math::
# k_{\varepsilon/4}: (x-y) \mapsto \exp(-\|x-y\|^2 / \tfrac{2}{4}\varepsilon)
#
# is a Gaussian kernel of deviation :math:`\sigma = \sqrt{\varepsilon}/2`.
# On top of making explicit our intuitions on **low-frequency Optimal Transport**, this
# simple divergence enjoys a collection of desirable properties:
#
# - It is the **square of a distance** that metrizes the convergence in law.
# - It takes the "correct" values on atomic **Dirac masses**, lifting
# the ground cost function to the space of positive measures:
#
# .. math::
# \text{B}_\varepsilon(\delta_x,\delta_y)~=~\text{C}(x,y)
# ~=~\tfrac{1}{2}\|x-y\|^2~=~\text{S}_\varepsilon(\delta_x,\delta_y).
#
# - It has the same **asymptotic properties** as the Sinkhorn divergence,
# interpolating between the true Wasserstein distance (when :math:`\varepsilon \rightarrow 0`)
# and a degenerate kernel norm (when :math:`\varepsilon \rightarrow +\infty`).
# - Thanks to the joint convexity of the Wasserstein distance,
# :math:`\text{B}_\varepsilon(\alpha,\beta)` is a **decreasing** function of :math:`\varepsilon`:
# as we remove small-scale details, we lower the overall transport cost.
#
# To compare the Sinkhorn and blurred Wasserstein divergences, a simple experiment
# is to **display their values on pairs of 1D measures** for increasing values of
# the temperature :math:`\varepsilon`:
# having generated random samples :math:`\alpha` and :math:`\beta`
# on the unit interval, we can simply compute :math:`\text{S}_\varepsilon(\alpha,\beta)`
# with our :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer
# while the blurred Wasserstein loss :math:`\text{B}_\varepsilon(\alpha,\beta)` can be
# quickly approximated with the **addition of a Gaussian noise** followed
# by a **sorting pass**.
##############################################
# Setup
# ---------------------
# Standard imports:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity # display as density curves
import torch
from geomloss import SamplesLoss
use_cuda = torch.cuda.is_available()
# N.B.: We use float64 numbers to get nice limits when blur -> +infinity
dtype = torch.cuda.DoubleTensor if use_cuda else torch.DoubleTensor
###############################################
# Display routine:
t_plot = np.linspace(-0.5, 1.5, 1000)[:, np.newaxis]
def display_samples(ax, x, color, label=None):
"""Displays samples on the unit interval using a density curve."""
kde = KernelDensity(kernel="gaussian", bandwidth=0.005).fit(x.data.cpu().numpy())
dens = np.exp(kde.score_samples(t_plot))
dens[0] = 0
dens[-1] = 0
ax.fill(t_plot, dens, color=color, label=label)
###############################################
# Experiment
# -------------
def rweight():
"""Random weight."""
return torch.rand(1).type(dtype)
N = 100 if not use_cuda else 10 ** 3 # Number of samples per measure
C = 100 if not use_cuda else 10000 # number of copies for the Gaussian blur
for _ in range(5): # Repeat the experiment 5 times
K = 5 # Generate random 1D measures as the superposition of K=5 intervals
t = torch.linspace(0, 1, N // K).type(dtype).view(-1, 1)
X_i = torch.cat([rweight() ** 2 * t + rweight() - 0.5 for k in range(K)], dim=0)
Y_j = torch.cat([rweight() ** 2 * t + rweight() - 0.5 for k in range(K)], dim=0)
# Compute the limits when blur = 0...
x_, _ = X_i.sort(dim=0)
y_, _ = Y_j.sort(dim=0)
true_wass = (0.5 / len(X_i)) * ((x_ - y_) ** 2).sum()
true_wass = true_wass.item()
# and when blur = +infinity:
mean_diff = 0.5 * ((X_i.mean(0) - Y_j.mean(0)) ** 2).sum()
mean_diff = mean_diff.item()
blurs = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]
sink, bwass = [], []
for blur in blurs:
# Compute the Sinkhorn divergence:
# N.B.: To be super-precise, we use the well-tested "online" backend
# with a very large 'scaling' coefficient
loss = SamplesLoss("sinkhorn", p=2, blur=blur, scaling=0.99, backend="online")
sink.append(loss(X_i, Y_j).item())
# Compute the blurred Wasserstein distance:
x_i = torch.cat([X_i] * C, dim=0)
y_j = torch.cat([Y_j] * C, dim=0)
x_i = x_i + 0.5 * blur * torch.randn(x_i.shape).type(dtype)
y_j = y_j + 0.5 * blur * torch.randn(y_j.shape).type(dtype)
x_, _ = x_i.sort(dim=0)
y_, _ = y_j.sort(dim=0)
wass = (0.5 / len(x_i)) * ((x_ - y_) ** 2).sum()
bwass.append(wass.item())
# Fancy display:
plt.figure(figsize=(12, 5))
if N < 10 ** 5:
ax = plt.subplot(1, 2, 1)
display_samples(ax, X_i, (1.0, 0, 0, 0.5), label="$\\alpha$")
display_samples(ax, Y_j, (0, 0, 1.0, 0.5), label="$\\beta$")
plt.axis([-0.5, 1.5, -0.1, 5.5])
plt.ylabel("density")
ax.legend()
plt.tight_layout()
ax = plt.subplot(1, 2, 2)
plt.plot([0.01, 10], [true_wass, true_wass], "g", label="True Wasserstein")
plt.plot(blurs, sink, "r-o", label="Sinkhorn divergence")
plt.plot(blurs, bwass, "b-o", label="Blurred Wasserstein")
plt.plot(
[0.01, 10], [mean_diff, mean_diff], "m", label="Squared difference of means"
)
ax.set_xscale("log")
ax.legend()
plt.axis([0.01, 10.0, 0.0, 1.5 * bwass[0]])
plt.xlabel("blur $\\sqrt{\\varepsilon}$")
plt.tight_layout()
plt.show()
##################################################
# Conclusion
# --------------
#
# In practice, the Sinkhorn and blurred Wasserstein divergences
# are **nearly indistinguishable**. But as far as we can tell *today*,
# these two loss functions have very different properties:
#
# - :math:`\text{B}_\varepsilon` is **easy to define**, compute in 1D and
# **analyze** from geometric or statistical point of views...
# But cannot (?) be computed efficiently in higher dimensions,
# where the true OT problem is nearly intractable.
# - :math:`\text{S}_\varepsilon` is simply available through
# the :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer,
# but has a weird, composite definition and is pretty **hard to**
# **study** rigorously - as evidenced by recent, technical proofs
# of `positivity, definiteness (Feydy et al., 2018) <https://arxiv.org/abs/1810.08278>`_
# and `sample complexity (Genevay et al., 2018) <https://arxiv.org/abs/1810.02733>`_.
#
# **So couldn't we get the best of both worlds?**
# In an ideal world, we'd like to tweak the *efficient* multiscale Sinkhorn algorithm
# to compute the *natural* divergence :math:`\text{B}_\varepsilon`...
# but this may be out of reach. A realistic target could be to **quantify**
# **the difference** between these two objects, thus legitimizing the
# use of the :mod:`SamplesLoss("sinkhorn") <geomloss.SamplesLoss>` layer
# as a **cheap proxy** for the intuitive and well-understood *blurred Wasserstein distance*.
#
# In my opinion, investigating the link between these two quantities
# is one of the most interesting questions left open in the field of discrete entropic OT.
# The geometric loss functions implemented in GeomLoss are probably *good enough*
# for most practical purposes,
# but getting a **rigorous understanding** of the multiscale,
# wavelet-like behavior of our algorithms
# as we add small details through an exponential decay of
# the blurring scale :math:`\sqrt{\varepsilon}` would be truly insightful.
# In some sense, couldn't we prove a
# `Hilbert <https://en.wikipedia.org/wiki/Orthonormal_basis>`_-`Plancherel <https://en.wikipedia.org/wiki/Plancherel_theorem>`_
# theorem for the Wasserstein distance?
#
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"geomloss.SamplesLoss",
"sklearn.neighbors.KernelDensity",
"matplotlib.pyplot.axis",
"torch.cat",
"torch.randn",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"numpy.linspace",
"torch.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"torch.linspace"
] |
[((3677, 3702), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3700, 3702), False, 'import torch\n'), ((3923, 3951), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(1.5)', '(1000)'], {}), '(-0.5, 1.5, 1000)\n', (3934, 3951), True, 'import numpy as np\n'), ((6170, 6197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (6180, 6197), True, 'import matplotlib.pyplot as plt\n'), ((6520, 6540), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (6531, 6540), True, 'import matplotlib.pyplot as plt\n'), ((6545, 6620), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.01, 10]', '[true_wass, true_wass]', '"""g"""'], {'label': '"""True Wasserstein"""'}), "([0.01, 10], [true_wass, true_wass], 'g', label='True Wasserstein')\n", (6553, 6620), True, 'import matplotlib.pyplot as plt\n'), ((6625, 6682), 'matplotlib.pyplot.plot', 'plt.plot', (['blurs', 'sink', '"""r-o"""'], {'label': '"""Sinkhorn divergence"""'}), "(blurs, sink, 'r-o', label='Sinkhorn divergence')\n", (6633, 6682), True, 'import matplotlib.pyplot as plt\n'), ((6687, 6745), 'matplotlib.pyplot.plot', 'plt.plot', (['blurs', 'bwass', '"""b-o"""'], {'label': '"""Blurred Wasserstein"""'}), "(blurs, bwass, 'b-o', label='Blurred Wasserstein')\n", (6695, 6745), True, 'import matplotlib.pyplot as plt\n'), ((6750, 6841), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.01, 10]', '[mean_diff, mean_diff]', '"""m"""'], {'label': '"""Squared difference of means"""'}), "([0.01, 10], [mean_diff, mean_diff], 'm', label=\n 'Squared difference of means')\n", (6758, 6841), True, 'import matplotlib.pyplot as plt\n'), ((6896, 6939), 'matplotlib.pyplot.axis', 'plt.axis', (['[0.01, 10.0, 0.0, 1.5 * bwass[0]]'], {}), '([0.01, 10.0, 0.0, 1.5 * bwass[0]])\n', (6904, 6939), True, 'import matplotlib.pyplot as plt\n'), ((6944, 6985), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""blur $\\\\sqrt{\\\\varepsilon}$"""'], {}), "('blur $\\\\sqrt{\\\\varepsilon}$')\n", (6954, 6985), True, 'import matplotlib.pyplot as plt\n'), ((6990, 7008), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7006, 7008), True, 'import matplotlib.pyplot as plt\n'), ((7013, 7023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7021, 7023), True, 'import matplotlib.pyplot as plt\n'), ((5600, 5671), 'geomloss.SamplesLoss', 'SamplesLoss', (['"""sinkhorn"""'], {'p': '(2)', 'blur': 'blur', 'scaling': '(0.99)', 'backend': '"""online"""'}), "('sinkhorn', p=2, blur=blur, scaling=0.99, backend='online')\n", (5611, 5671), False, 'from geomloss import SamplesLoss\n'), ((5782, 5809), 'torch.cat', 'torch.cat', (['([X_i] * C)'], {'dim': '(0)'}), '([X_i] * C, dim=0)\n', (5791, 5809), False, 'import torch\n'), ((5824, 5851), 'torch.cat', 'torch.cat', (['([Y_j] * C)'], {'dim': '(0)'}), '([Y_j] * C, dim=0)\n', (5833, 5851), False, 'import torch\n'), ((6232, 6252), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (6243, 6252), True, 'import matplotlib.pyplot as plt\n'), ((6400, 6432), 'matplotlib.pyplot.axis', 'plt.axis', (['[-0.5, 1.5, -0.1, 5.5]'], {}), '([-0.5, 1.5, -0.1, 5.5])\n', (6408, 6432), True, 'import matplotlib.pyplot as plt\n'), ((6441, 6462), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""density"""'], {}), "('density')\n", (6451, 6462), True, 'import matplotlib.pyplot as plt\n'), ((6491, 6509), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6507, 6509), True, 'import matplotlib.pyplot as plt\n'), ((4097, 4146), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(0.005)'}), "(kernel='gaussian', bandwidth=0.005)\n", (4110, 4146), False, 'from sklearn.neighbors import KernelDensity\n'), ((4435, 4448), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4445, 4448), False, 'import torch\n'), ((4750, 4778), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(N // K)'], {}), '(0, 1, N // K)\n', (4764, 4778), False, 'import torch\n'), ((5885, 5907), 'torch.randn', 'torch.randn', (['x_i.shape'], {}), '(x_i.shape)\n', (5896, 5907), False, 'import torch\n'), ((5953, 5975), 'torch.randn', 'torch.randn', (['y_j.shape'], {}), '(y_j.shape)\n', (5964, 5975), False, 'import torch\n')]
|
from dataclasses import dataclass
from typing import Callable, Any, Dict, Set
from PyQt5 import QtCore
from PyQt5.QtGui import QKeyEvent, QKeySequence
from piafedit.gui.common.handler.keyboard_handler import KeyboardHandler
@dataclass
class UIAction:
name: str
tooltip: str
shortcut: str
action: Callable[[], Any]
class _Actions:
def __init__(self):
self.by_name: Dict[str, UIAction] = dict()
self.by_shortcut: Dict[str, UIAction] = dict()
self.shortcuts: Set[str] = set()
def action(self, name: str, tooltip: str = None, shortcut: str = None):
def decorator(fn: Callable[[], Any]):
print('register:', name, fn)
action = UIAction(name, tooltip, shortcut, fn)
self.by_name[name] = action
self.by_shortcut[shortcut] = action
return action
return decorator
def handler(self):
return MyKeyboardHandler(self)
class MyKeyboardHandler(KeyboardHandler):
def __init__(self, actions: _Actions):
self.actions = actions
def keyPressEvent(self, ev: QKeyEvent):
from piafedit.editor_api import P
modifiers = ev.modifiers()
if modifiers == QtCore.Qt.ShiftModifier:
print('Shift+Click')
elif modifiers == QtCore.Qt.ControlModifier:
print('Control+Click')
elif modifiers == (QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier):
print('Control+Shift+Click')
else:
print('Click')
key = QKeySequence(ev.modifiers() | ev.key())
print('-----', key.toString() )
for shortcut, action in self.actions.by_shortcut.items():
sequence = QKeySequence(shortcut)
print(sequence.toString())
P.log.debug(sequence)
if key.matches(sequence):
action.action()
class Ui:
actions = _Actions()
@staticmethod
def action(name: str, tooltip: str = None, shortcut: str = None):
return Ui.actions.action(name, tooltip, shortcut)
|
[
"piafedit.editor_api.P.log.debug",
"PyQt5.QtGui.QKeySequence"
] |
[((1709, 1731), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['shortcut'], {}), '(shortcut)\n', (1721, 1731), False, 'from PyQt5.QtGui import QKeyEvent, QKeySequence\n'), ((1783, 1804), 'piafedit.editor_api.P.log.debug', 'P.log.debug', (['sequence'], {}), '(sequence)\n', (1794, 1804), False, 'from piafedit.editor_api import P\n')]
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Profile, Neighbourhood, Business, Post
from cloudinary.models import CloudinaryField
class SignupForm(UserCreationForm):
email = forms.EmailField(max_length=254)
class Meta:
model = User
fields = ('username', 'email', '<PASSWORD>', '<PASSWORD>',)
class UpdateUserForm(forms.ModelForm):
email = forms.EmailField(max_length=254)
class Meta:
model = User
fields = ('username', 'email')
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['name', 'location', 'profile_picture']
class NeighbourHoodForm(forms.ModelForm):
picture = CloudinaryField('image')
class Meta:
model = Neighbourhood
fields = ('picture', 'name', 'location','health','police')
class BusinessForm(forms.ModelForm):
class Meta:
model = Business
fields = ('name', 'email', 'description')
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'post')
|
[
"django.forms.EmailField",
"cloudinary.models.CloudinaryField"
] |
[((278, 310), 'django.forms.EmailField', 'forms.EmailField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (294, 310), False, 'from django import forms\n'), ((469, 501), 'django.forms.EmailField', 'forms.EmailField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (485, 501), False, 'from django import forms\n'), ((776, 800), 'cloudinary.models.CloudinaryField', 'CloudinaryField', (['"""image"""'], {}), "('image')\n", (791, 800), False, 'from cloudinary.models import CloudinaryField\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from __future__ import annotations
import re
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Optional
from rdflib import URIRef
from oc_ocdm.graph.graph_entity import GraphEntity
from oc_ocdm.decorators import accepts_only
from oc_ocdm.graph.entities.bibliographic_entity import BibliographicEntity
class ResourceEmbodiment(BibliographicEntity):
"""Resource embodiment (short: re): the particular physical or digital format in which a
bibliographic resource was made available by its publisher."""
@accepts_only('re')
def merge(self, other: ResourceEmbodiment) -> None:
super(ResourceEmbodiment, self).merge(other)
media_type: Optional[URIRef] = other.get_media_type()
if media_type is not None:
self.has_media_type(media_type)
starting_page: Optional[str] = other.get_starting_page()
if starting_page is not None:
self.has_starting_page(starting_page)
ending_page: Optional[str] = other.get_ending_page()
if ending_page is not None:
self.has_ending_page(ending_page)
url: Optional[URIRef] = other.get_url()
if url is not None:
self.has_url(url)
# HAS FORMAT
def get_media_type(self) -> Optional[URIRef]:
uri: Optional[URIRef] = self._get_uri_reference(GraphEntity.iri_has_format)
return uri
@accepts_only('thing')
def has_media_type(self, thing_ref: URIRef) -> None:
"""It allows one to specify the IANA media type of the embodiment.
"""
self.remove_media_type()
self.g.add((self.res, GraphEntity.iri_has_format, thing_ref))
def remove_media_type(self) -> None:
self.g.remove((self.res, GraphEntity.iri_has_format, None))
# HAS FIRST PAGE
def get_starting_page(self) -> Optional[str]:
return self._get_literal(GraphEntity.iri_starting_page)
@accepts_only('literal')
def has_starting_page(self, string: str) -> None:
"""The first page of the bibliographic resource according to the current embodiment.
"""
self.remove_starting_page()
if re.search("[-–]+", string) is None:
page_number = string
else:
page_number = re.sub("[-–]+.*$", "", string)
self._create_literal(GraphEntity.iri_starting_page, page_number)
def remove_starting_page(self) -> None:
self.g.remove((self.res, GraphEntity.iri_starting_page, None))
# HAS LAST PAGE
def get_ending_page(self) -> Optional[str]:
return self._get_literal(GraphEntity.iri_ending_page)
@accepts_only('literal')
def has_ending_page(self, string: str) -> None:
"""The last page of the bibliographic resource according to the current embodiment.
"""
self.remove_ending_page()
if re.search("[-–]+", string) is None:
page_number = string
else:
page_number = re.sub("^.*[-–]+", "", string)
self._create_literal(GraphEntity.iri_ending_page, page_number)
def remove_ending_page(self) -> None:
self.g.remove((self.res, GraphEntity.iri_ending_page, None))
# HAS URL
def get_url(self) -> Optional[URIRef]:
uri: Optional[URIRef] = self._get_uri_reference(GraphEntity.iri_has_url)
return uri
@accepts_only('thing')
def has_url(self, thing_ref: URIRef) -> None:
"""The URL at which the embodiment of the bibliographic resource is available.
"""
self.remove_url()
self.g.add((self.res, GraphEntity.iri_has_url, thing_ref))
def remove_url(self) -> None:
self.g.remove((self.res, GraphEntity.iri_has_url, None))
# HAS TYPE
def create_digital_embodiment(self) -> None:
"""It identifies the particular type of the embodiment, either digital or print.
"""
self._create_type(GraphEntity.iri_digital_manifestation)
def create_print_embodiment(self) -> None:
"""It identifies the particular type of the embodiment, either digital or print.
"""
self._create_type(GraphEntity.iri_print_object)
|
[
"re.search",
"oc_ocdm.decorators.accepts_only",
"re.sub"
] |
[((1351, 1369), 'oc_ocdm.decorators.accepts_only', 'accepts_only', (['"""re"""'], {}), "('re')\n", (1363, 1369), False, 'from oc_ocdm.decorators import accepts_only\n'), ((2203, 2224), 'oc_ocdm.decorators.accepts_only', 'accepts_only', (['"""thing"""'], {}), "('thing')\n", (2215, 2224), False, 'from oc_ocdm.decorators import accepts_only\n'), ((2724, 2747), 'oc_ocdm.decorators.accepts_only', 'accepts_only', (['"""literal"""'], {}), "('literal')\n", (2736, 2747), False, 'from oc_ocdm.decorators import accepts_only\n'), ((3420, 3443), 'oc_ocdm.decorators.accepts_only', 'accepts_only', (['"""literal"""'], {}), "('literal')\n", (3432, 3443), False, 'from oc_ocdm.decorators import accepts_only\n'), ((4132, 4153), 'oc_ocdm.decorators.accepts_only', 'accepts_only', (['"""thing"""'], {}), "('thing')\n", (4144, 4153), False, 'from oc_ocdm.decorators import accepts_only\n'), ((2954, 2980), 're.search', 're.search', (['"""[-–]+"""', 'string'], {}), "('[-–]+', string)\n", (2963, 2980), False, 'import re\n'), ((3063, 3093), 're.sub', 're.sub', (['"""[-–]+.*$"""', '""""""', 'string'], {}), "('[-–]+.*$', '', string)\n", (3069, 3093), False, 'import re\n'), ((3645, 3671), 're.search', 're.search', (['"""[-–]+"""', 'string'], {}), "('[-–]+', string)\n", (3654, 3671), False, 'import re\n'), ((3754, 3784), 're.sub', 're.sub', (['"""^.*[-–]+"""', '""""""', 'string'], {}), "('^.*[-–]+', '', string)\n", (3760, 3784), False, 'import re\n')]
|
from setuptools import setup, find_packages
from collections import OrderedDict
long_description="""
This is a minimal engine for the Battlehack20 game.
It lacks the secure of the original engine, but makes up for it
by running 30 times faster.
Read more at the Battlehack website: https://bh2020.battlecode.org.
"""
setup(name='battlehack20-minimal',
version="1.0.6",
description='Battlehack 2020 fancy viewer.',
author='cooljoseph',
long_description=long_description,
author_email='<EMAIL>',
url="https://bh2020.battlecode.org",
license='GNU General Public License v3.0',
packages=find_packages(),
project_urls=OrderedDict((
('Code', 'https://github.com/cooljoseph1/battlehack20-minimal'),
('Documentation', 'https://github.com/cooljoseph1/battlehack20-minimal')
)),
install_requires=[],
python_requires='>=3, <3.8',
zip_safe=False,
include_package_data=True
)
|
[
"collections.OrderedDict",
"setuptools.find_packages"
] |
[((633, 648), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (646, 648), False, 'from setuptools import setup, find_packages\n'), ((669, 830), 'collections.OrderedDict', 'OrderedDict', (["(('Code', 'https://github.com/cooljoseph1/battlehack20-minimal'), (\n 'Documentation', 'https://github.com/cooljoseph1/battlehack20-minimal'))"], {}), "((('Code', 'https://github.com/cooljoseph1/battlehack20-minimal'\n ), ('Documentation',\n 'https://github.com/cooljoseph1/battlehack20-minimal')))\n", (680, 830), False, 'from collections import OrderedDict\n')]
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for inference."""
import tensorflow as tf
def split_and_pad(strategy, batch_size, x):
"""Split and pad for interence."""
per_replica_size = batch_size // strategy.num_replicas_in_sync
def slice_fn(x, i):
begin = min(x.shape[0], i * per_replica_size)
end = min(x.shape[0], (i + 1) * per_replica_size)
indices = tf.range(begin, end, dtype=tf.int32)
return tf.gather(x, tf.pad(indices, [[0, per_replica_size - end + begin]]))
# pylint: disable=g-long-lambda
return tf.nest.map_structure(
lambda x: strategy.experimental_distribute_values_from_function(
lambda ctx: slice_fn(x, ctx.replica_id_in_sync_group)), x)
# pylint: enable=g-long-lambda
def decode_logits(top_k, max_size, logits, default):
"""Get the span from logits."""
logits = tf.transpose(logits, [0, 2, 1])
values, indices = tf.math.top_k(logits, top_k)
width = (
tf.expand_dims(indices[:, 1, :], -2) -
tf.expand_dims(indices[:, 0, :], -1))
mask = tf.logical_and(width >= 0, width <= max_size)
scores = (
tf.expand_dims(values[:, 0, :], -1) + tf.expand_dims(values[:, 1, :], -2))
scores = tf.where(mask, scores, -1e8)
flat_indices = tf.argmax(tf.reshape(scores, (-1, top_k * top_k)), -1)
begin = tf.gather(
indices[:, 0, :], tf.math.floordiv(flat_indices, top_k), batch_dims=1)
end = tf.gather(
indices[:, 1, :], tf.math.mod(flat_indices, top_k), batch_dims=1)
reduced_mask = tf.math.reduce_any(mask, [-1, -2])
return (tf.where(reduced_mask, begin,
default), tf.where(reduced_mask, end, default),
tf.math.reduce_max(scores, [-1, -2]))
@tf.function
def decode_answer(context, begin, end, token_offsets, end_limit):
i = tf.gather(token_offsets, begin, batch_dims=1)
j = tf.gather(token_offsets, tf.minimum(end + 1, end_limit), batch_dims=1)
j = tf.where(end == end_limit, tf.cast(tf.strings.length(context), tf.int64),
j)
return tf.strings.substr(context, i, j - i)
def distributed_logits_fn(model, x):
return model.distribute_strategy.run(
lambda x: model(x, training=False), args=(x,))
|
[
"tensorflow.range",
"tensorflow.math.mod",
"tensorflow.logical_and",
"tensorflow.math.floordiv",
"tensorflow.gather",
"tensorflow.math.reduce_max",
"tensorflow.math.reduce_any",
"tensorflow.reshape",
"tensorflow.pad",
"tensorflow.transpose",
"tensorflow.minimum",
"tensorflow.strings.length",
"tensorflow.math.top_k",
"tensorflow.where",
"tensorflow.strings.substr",
"tensorflow.expand_dims"
] |
[((1447, 1478), 'tensorflow.transpose', 'tf.transpose', (['logits', '[0, 2, 1]'], {}), '(logits, [0, 2, 1])\n', (1459, 1478), True, 'import tensorflow as tf\n'), ((1500, 1528), 'tensorflow.math.top_k', 'tf.math.top_k', (['logits', 'top_k'], {}), '(logits, top_k)\n', (1513, 1528), True, 'import tensorflow as tf\n'), ((1643, 1688), 'tensorflow.logical_and', 'tf.logical_and', (['(width >= 0)', '(width <= max_size)'], {}), '(width >= 0, width <= max_size)\n', (1657, 1688), True, 'import tensorflow as tf\n'), ((1797, 1833), 'tensorflow.where', 'tf.where', (['mask', 'scores', '(-100000000.0)'], {}), '(mask, scores, -100000000.0)\n', (1805, 1833), True, 'import tensorflow as tf\n'), ((2110, 2144), 'tensorflow.math.reduce_any', 'tf.math.reduce_any', (['mask', '[-1, -2]'], {}), '(mask, [-1, -2])\n', (2128, 2144), True, 'import tensorflow as tf\n'), ((2395, 2440), 'tensorflow.gather', 'tf.gather', (['token_offsets', 'begin'], {'batch_dims': '(1)'}), '(token_offsets, begin, batch_dims=1)\n', (2404, 2440), True, 'import tensorflow as tf\n'), ((2629, 2665), 'tensorflow.strings.substr', 'tf.strings.substr', (['context', 'i', '(j - i)'], {}), '(context, i, j - i)\n', (2646, 2665), True, 'import tensorflow as tf\n'), ((978, 1014), 'tensorflow.range', 'tf.range', (['begin', 'end'], {'dtype': 'tf.int32'}), '(begin, end, dtype=tf.int32)\n', (986, 1014), True, 'import tensorflow as tf\n'), ((1549, 1585), 'tensorflow.expand_dims', 'tf.expand_dims', (['indices[:, 1, :]', '(-2)'], {}), '(indices[:, 1, :], -2)\n', (1563, 1585), True, 'import tensorflow as tf\n'), ((1595, 1631), 'tensorflow.expand_dims', 'tf.expand_dims', (['indices[:, 0, :]', '(-1)'], {}), '(indices[:, 0, :], -1)\n', (1609, 1631), True, 'import tensorflow as tf\n'), ((1710, 1745), 'tensorflow.expand_dims', 'tf.expand_dims', (['values[:, 0, :]', '(-1)'], {}), '(values[:, 0, :], -1)\n', (1724, 1745), True, 'import tensorflow as tf\n'), ((1748, 1783), 'tensorflow.expand_dims', 'tf.expand_dims', (['values[:, 1, :]', '(-2)'], {}), '(values[:, 1, :], -2)\n', (1762, 1783), True, 'import tensorflow as tf\n'), ((1854, 1893), 'tensorflow.reshape', 'tf.reshape', (['scores', '(-1, top_k * top_k)'], {}), '(scores, (-1, top_k * top_k))\n', (1864, 1893), True, 'import tensorflow as tf\n'), ((1946, 1983), 'tensorflow.math.floordiv', 'tf.math.floordiv', (['flat_indices', 'top_k'], {}), '(flat_indices, top_k)\n', (1962, 1983), True, 'import tensorflow as tf\n'), ((2044, 2076), 'tensorflow.math.mod', 'tf.math.mod', (['flat_indices', 'top_k'], {}), '(flat_indices, top_k)\n', (2055, 2076), True, 'import tensorflow as tf\n'), ((2156, 2194), 'tensorflow.where', 'tf.where', (['reduced_mask', 'begin', 'default'], {}), '(reduced_mask, begin, default)\n', (2164, 2194), True, 'import tensorflow as tf\n'), ((2216, 2252), 'tensorflow.where', 'tf.where', (['reduced_mask', 'end', 'default'], {}), '(reduced_mask, end, default)\n', (2224, 2252), True, 'import tensorflow as tf\n'), ((2265, 2301), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['scores', '[-1, -2]'], {}), '(scores, [-1, -2])\n', (2283, 2301), True, 'import tensorflow as tf\n'), ((2473, 2503), 'tensorflow.minimum', 'tf.minimum', (['(end + 1)', 'end_limit'], {}), '(end + 1, end_limit)\n', (2483, 2503), True, 'import tensorflow as tf\n'), ((1040, 1094), 'tensorflow.pad', 'tf.pad', (['indices', '[[0, per_replica_size - end + begin]]'], {}), '(indices, [[0, per_replica_size - end + begin]])\n', (1046, 1094), True, 'import tensorflow as tf\n'), ((2561, 2587), 'tensorflow.strings.length', 'tf.strings.length', (['context'], {}), '(context)\n', (2578, 2587), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
import cv2 as cv
import json
import math
import numpy as np
import os
import sys
from requests.utils import requote_uri
from geojson import FeatureCollection, Feature, Polygon, dumps
config = json.load(open("config.json","r"))
target = config.get('target')
tilesize = config.get('tilesize')
maxzoom = config.get('maxzoom')
spacing = config.get('spacing')
tile_format = '.webp'
LLBOUNDS = [-180.0, 180.0, -180.0, 180.0]
match = None
if len(sys.argv)>=2:
match = sys.argv[1]
# pixel coordinates as x,y
# tile coordinates as t,u
def xy_to_latlon(x,y,zoom):
max_x = -float(math.pow(2,zoom-1) * tilesize)
lat = x / max_x * LLBOUNDS[1]
max_y = float(math.pow(2,zoom-1) * tilesize)
lon = y / max_y * LLBOUNDS[3]
return lat,lon
features = []
prev_x, prev_y, prev_zoom = None, None, None
ymax = -1e10
for source in config.get('sources',[]):
if len(source)<7:
continue
filename, xrel, yrel, imgzoom, title, family, date, location, comment, href = source[:10]
# auto-place after spacing
if xrel=="+":
xrel = prev_x + int((2**imgzoom) * spacing)
xrel = xrel * (2**(imgzoom-prev_zoom))
print("CALCULATED NEW X FROM", prev_x, " AS ", xrel)
if yrel=="+":
yrel = prev_y + int((2**imgzoom) * spacing)
yrel = yrel * (2**(imgzoom-prev_zoom))
print("CALCULATED NEW Y FROM", prev_y, " AS ", yrel)
print("Processing ",filename)
source_im = cv.imread(filename, cv.IMREAD_UNCHANGED)
w,h = source_im.shape[:2]
# auto-place centered
if yrel=="=":
yrel = prev_yc * (2**(imgzoom-prev_zoom)) - int(h/2)
print("CALCULATED NEW Y FROM CENTER", prev_yc, " AS ", yrel)
# auto-place right of previous column
elif yrel==">":
yrel = (ymax + 1.0/100) * (2**imgzoom)
print("CALCULATED NEW Y FROM YMAX", ymax, " AS ", yrel, imgzoom)
else:
ymax = yrel
# might be off by a factor off two, to be verified.
if title:
print(title)
print("PIXEL COORDINATES ", xrel, yrel, xrel+w, yrel+h)
left, top = xy_to_latlon(xrel, yrel, imgzoom)
right, bottom = xy_to_latlon(xrel+w, yrel+h, imgzoom)
poly = Polygon([[(top, left), (top, right), (bottom, right), (bottom, left), (top, left)]])
feat = Feature(geometry=poly, properties = {
"title": title,
"family": family,
"date": date,
"loc": location,
"comment": comment,
"href": href
})
features.append(feat)
#if imgzoom < maxzoom:
# factor = math.pow(2, maxzoom-imgzoom)
# source_im = cv.resize(source_im, (0, 0), fx=factor, fy=factor)
# FIXME: memory issues when blowing up - add maxzoom (and minzoom) to define display range
# calculate outer borders of previous item to calculate relative positions
prev_x = xrel + w
prev_y = yrel + h
prev_yc = yrel + h/2
prev_yr = float(yrel + h) / (2**imgzoom)
if prev_yr > ymax:
ymax = prev_yr
print("NEW YMAX ", ymax, "FROM", yrel, h)
prev_zoom = imgzoom
if match and not match in filename:
continue
zoom = imgzoom
w = h = 256 # just to pass the first check
while zoom > 1 and w > 2 and h > 2:
if zoom <= maxzoom:
# relative zero (center) at the defined zoom level
x0 = math.floor(tilesize * math.pow(2, zoom-1))
y0 = math.floor(tilesize * math.pow(2, zoom-1))
# image coordinates at that zoom level
xi, yi = x0 + xrel, y0 + yrel
# image size
# NOTE: source images should always be transparent png, or overlaps will be covered
w,h = source_im.shape[:2]
wt = math.ceil(w / tilesize)
ht = math.ceil(h / tilesize)
# first tile to consider
t0 = math.floor(xi / tilesize)
u0 = math.floor(yi / tilesize)
# top left of the considered tile
xA = t0 * tilesize
yA = u0 * tilesize
# offset of the image to the first tile
off_x = xi - xA
off_y = yi - yA
off_t = math.floor(off_x / tilesize)
off_u = math.floor(off_y / tilesize)
# CHECK: adjust range to actually cover the location of the translated image
folders={}
for tx in range(0, wt+1): # TODO: try t0-t0+wt
for ty in range(0, ht+1):
# read current background tile
folder = target+"tiles/"+str(zoom)+"/"+str(u0+ty)
tile_url = folder +"/"+str(t0+tx)+tile_format
#print("Loading "+tile_url)
white_tile = np.zeros([tilesize, tilesize, 4],dtype=np.uint8)
#white_tile.fill(255)
bg = cv.imread(tile_url, cv.IMREAD_UNCHANGED)
if bg is None:
bg = white_tile.copy()
bg = cv.cvtColor(bg, cv.COLOR_BGR2BGRA)
# cut relevant section of source_im
from_x = max(0, tx * tilesize - off_x)
from_y = max(0, ty * tilesize - off_y)
to_x = min(w, (tx+1) * tilesize - off_x)
to_y = min(h, (ty+1) * tilesize - off_y)
cutout = source_im[from_x:to_x, from_y:to_y]
# correct location of background
dest_x = max(0, off_x - tx * tilesize)
dest_y = max(0, off_y - ty * tilesize)
dto_x = dest_x + to_x - from_x
dto_y = dest_y + to_y - from_y
# paste cutout onto background
# TODO: actually paste, not overwrite
# eg. overwrite white_tile, then merge with bg
try:
bg[dest_x:dto_x, dest_y:dto_y] = cutout
except:
continue
#print("SOMETHING FAILED")
#cv.imshow('BG',bg)
#print("CUTOUT SIZE:", (from_x, to_x, from_y, to_y))
#print("FROM Y:", (from_y))
#print("TO Y:", (to_y))
#print("H:", h)
#cv.waitKey(1)
#sys.exit(1)
# then write that tile to file
if not folder in folders:
#print("Writing ",folder)
try:
os.makedirs(folder)
folders[folder]=True
except:
pass
cv.imwrite(tile_url, bg)
zoom = zoom - 1
xrel = math.floor(xrel / 2)
yrel = math.floor(yrel / 2)
source_im = cv.resize(source_im, (0, 0), fx=0.5, fy=0.5)
w = math.floor(w / 2)
h = math.floor(h / 2)
fc = FeatureCollection(features)
fp = open(target+"features.geojson", "w")
fp.write(dumps(fc))
fp.close()
def species_link(s):
return '<li><a href="https://setzkasten.relet.net#?{}">{}</a></li>'.format(requote_uri(s),s)
species_list=map(lambda f:f.properties.get('title'), features)
species_links = "\n".join(map(species_link, sorted(species_list)))
fi = open(target+"species_index.html", "w")
fi.write("<html><body><ul>{}<ul></body><html>".format(species_links))
fi.close()
|
[
"cv2.resize",
"geojson.Polygon",
"os.makedirs",
"math.pow",
"geojson.dumps",
"math.ceil",
"cv2.cvtColor",
"cv2.imwrite",
"math.floor",
"geojson.Feature",
"numpy.zeros",
"cv2.imread",
"geojson.FeatureCollection",
"requests.utils.requote_uri"
] |
[((6720, 6747), 'geojson.FeatureCollection', 'FeatureCollection', (['features'], {}), '(features)\n', (6737, 6747), False, 'from geojson import FeatureCollection, Feature, Polygon, dumps\n'), ((1462, 1502), 'cv2.imread', 'cv.imread', (['filename', 'cv.IMREAD_UNCHANGED'], {}), '(filename, cv.IMREAD_UNCHANGED)\n', (1471, 1502), True, 'import cv2 as cv\n'), ((6799, 6808), 'geojson.dumps', 'dumps', (['fc'], {}), '(fc)\n', (6804, 6808), False, 'from geojson import FeatureCollection, Feature, Polygon, dumps\n'), ((2207, 2295), 'geojson.Polygon', 'Polygon', (['[[(top, left), (top, right), (bottom, right), (bottom, left), (top, left)]]'], {}), '([[(top, left), (top, right), (bottom, right), (bottom, left), (top,\n left)]])\n', (2214, 2295), False, 'from geojson import FeatureCollection, Feature, Polygon, dumps\n'), ((2307, 2445), 'geojson.Feature', 'Feature', ([], {'geometry': 'poly', 'properties': "{'title': title, 'family': family, 'date': date, 'loc': location, 'comment':\n comment, 'href': href}"}), "(geometry=poly, properties={'title': title, 'family': family, 'date':\n date, 'loc': location, 'comment': comment, 'href': href})\n", (2314, 2445), False, 'from geojson import FeatureCollection, Feature, Polygon, dumps\n'), ((6540, 6560), 'math.floor', 'math.floor', (['(xrel / 2)'], {}), '(xrel / 2)\n', (6550, 6560), False, 'import math\n'), ((6574, 6594), 'math.floor', 'math.floor', (['(yrel / 2)'], {}), '(yrel / 2)\n', (6584, 6594), False, 'import math\n'), ((6613, 6657), 'cv2.resize', 'cv.resize', (['source_im', '(0, 0)'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(source_im, (0, 0), fx=0.5, fy=0.5)\n', (6622, 6657), True, 'import cv2 as cv\n'), ((6668, 6685), 'math.floor', 'math.floor', (['(w / 2)'], {}), '(w / 2)\n', (6678, 6685), False, 'import math\n'), ((6696, 6713), 'math.floor', 'math.floor', (['(h / 2)'], {}), '(h / 2)\n', (6706, 6713), False, 'import math\n'), ((6920, 6934), 'requests.utils.requote_uri', 'requote_uri', (['s'], {}), '(s)\n', (6931, 6934), False, 'from requests.utils import requote_uri\n'), ((692, 713), 'math.pow', 'math.pow', (['(2)', '(zoom - 1)'], {}), '(2, zoom - 1)\n', (700, 713), False, 'import math\n'), ((3735, 3758), 'math.ceil', 'math.ceil', (['(w / tilesize)'], {}), '(w / tilesize)\n', (3744, 3758), False, 'import math\n'), ((3772, 3795), 'math.ceil', 'math.ceil', (['(h / tilesize)'], {}), '(h / tilesize)\n', (3781, 3795), False, 'import math\n'), ((3843, 3868), 'math.floor', 'math.floor', (['(xi / tilesize)'], {}), '(xi / tilesize)\n', (3853, 3868), False, 'import math\n'), ((3882, 3907), 'math.floor', 'math.floor', (['(yi / tilesize)'], {}), '(yi / tilesize)\n', (3892, 3907), False, 'import math\n'), ((4119, 4147), 'math.floor', 'math.floor', (['(off_x / tilesize)'], {}), '(off_x / tilesize)\n', (4129, 4147), False, 'import math\n'), ((4164, 4192), 'math.floor', 'math.floor', (['(off_y / tilesize)'], {}), '(off_y / tilesize)\n', (4174, 4192), False, 'import math\n'), ((609, 630), 'math.pow', 'math.pow', (['(2)', '(zoom - 1)'], {}), '(2, zoom - 1)\n', (617, 630), False, 'import math\n'), ((3411, 3432), 'math.pow', 'math.pow', (['(2)', '(zoom - 1)'], {}), '(2, zoom - 1)\n', (3419, 3432), False, 'import math\n'), ((3467, 3488), 'math.pow', 'math.pow', (['(2)', '(zoom - 1)'], {}), '(2, zoom - 1)\n', (3475, 3488), False, 'import math\n'), ((4643, 4692), 'numpy.zeros', 'np.zeros', (['[tilesize, tilesize, 4]'], {'dtype': 'np.uint8'}), '([tilesize, tilesize, 4], dtype=np.uint8)\n', (4651, 4692), True, 'import numpy as np\n'), ((4752, 4792), 'cv2.imread', 'cv.imread', (['tile_url', 'cv.IMREAD_UNCHANGED'], {}), '(tile_url, cv.IMREAD_UNCHANGED)\n', (4761, 4792), True, 'import cv2 as cv\n'), ((4888, 4922), 'cv2.cvtColor', 'cv.cvtColor', (['bg', 'cv.COLOR_BGR2BGRA'], {}), '(bg, cv.COLOR_BGR2BGRA)\n', (4899, 4922), True, 'import cv2 as cv\n'), ((6479, 6503), 'cv2.imwrite', 'cv.imwrite', (['tile_url', 'bg'], {}), '(tile_url, bg)\n', (6489, 6503), True, 'import cv2 as cv\n'), ((6351, 6370), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (6362, 6370), False, 'import os\n')]
|
import math
import numpy as np
import basis.robot_math as rm
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq85
import grasping.annotation.utils as gu
import pickle
base = wd.World(cam_pos=[.3, .3, .3], lookat_pos=[0, 0, 0])
gm.gen_frame(length=.05, thickness=.0021).attach_to(base)
# object
object_bunny = cm.CollisionModel("objects/bunnysim.stl")
object_bunny.set_rgba([.9, .75, .35, .3])
object_bunny.attach_to(base)
# hnd_s
# contact_pairs, contact_points = gpa.plan_contact_pairs(object_bunny,
# max_samples=10000,
# min_dist_between_sampled_contact_points=.014,
# angle_between_contact_normals=math.radians(160),
# toggle_sampled_points=True)
# for p in contact_points:
# gm.gen_sphere(p, radius=.002).attach_to(base)
# base.run()
# pickle.dump(contact_pairs, open( "save.p", "wb" ))
contact_pairs = pickle.load(open( "save.p", "rb" ))
for i, cp in enumerate(contact_pairs):
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
rgba = rm.get_rgba_from_cmap(i)
gm.gen_sphere(contact_p0, radius=.002, rgba=rgba).attach_to(base)
gm.gen_arrow(contact_p0, contact_p0+contact_n0*.01, thickness=.0012, rgba = rgba).attach_to(base)
# gm.gen_arrow(contact_p0, contact_p0-contact_n0*.1, thickness=.0012, rgba = rgba).attach_to(base)
gm.gen_sphere(contact_p1, radius=.002, rgba=rgba).attach_to(base)
# gm.gen_dashstick(contact_p0, contact_p1, thickness=.0012, rgba=rgba).attach_to(base)
gm.gen_arrow(contact_p1, contact_p1+contact_n1*.01, thickness=.0012, rgba=rgba).attach_to(base)
# gm.gen_dasharrow(contact_p1, contact_p1+contact_n1*.03, thickness=.0012, rgba=rgba).attach_to(base)
# base.run()
gripper_s = rtq85.Robotiq85()
contact_offset = .002
grasp_info_list = []
for i, cp in enumerate(contact_pairs):
print(f"{i} of {len(contact_pairs)} done!")
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2
if jaw_width > gripper_s.jawwidth_rng[1]:
continue
hndy = contact_n0
hndz = rm.orthogonal_vector(contact_n0)
grasp_info_list += gu.define_grasp_with_rotation(gripper_s,
object_bunny,
gl_jaw_center_pos=contact_center,
gl_jaw_center_z=hndz,
gl_jaw_center_y=hndy,
jaw_width=jaw_width,
gl_rotation_ax=hndy,
rotation_interval=math.radians(30),
toggle_flip=True)
for grasp_info in grasp_info_list:
aw_width, gl_jaw_center, hnd_pos, hnd_rotmat = grasp_info
gripper_s.fix_to(hnd_pos, hnd_rotmat)
gripper_s.jaw_to(aw_width)
gripper_s.gen_meshmodel().attach_to(base)
base.run()
|
[
"modeling.geometric_model.gen_arrow",
"math.radians",
"modeling.collision_model.CollisionModel",
"basis.robot_math.orthogonal_vector",
"modeling.geometric_model.gen_frame",
"robot_sim.end_effectors.gripper.robotiq85.robotiq85.Robotiq85",
"modeling.geometric_model.gen_sphere",
"numpy.linalg.norm",
"visualization.panda.world.World",
"basis.robot_math.get_rgba_from_cmap"
] |
[((305, 360), 'visualization.panda.world.World', 'wd.World', ([], {'cam_pos': '[0.3, 0.3, 0.3]', 'lookat_pos': '[0, 0, 0]'}), '(cam_pos=[0.3, 0.3, 0.3], lookat_pos=[0, 0, 0])\n', (313, 360), True, 'import visualization.panda.world as wd\n'), ((440, 481), 'modeling.collision_model.CollisionModel', 'cm.CollisionModel', (['"""objects/bunnysim.stl"""'], {}), "('objects/bunnysim.stl')\n", (457, 481), True, 'import modeling.collision_model as cm\n'), ((2011, 2028), 'robot_sim.end_effectors.gripper.robotiq85.robotiq85.Robotiq85', 'rtq85.Robotiq85', ([], {}), '()\n', (2026, 2028), True, 'import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq85\n'), ((1319, 1343), 'basis.robot_math.get_rgba_from_cmap', 'rm.get_rgba_from_cmap', (['i'], {}), '(i)\n', (1340, 1343), True, 'import basis.robot_math as rm\n'), ((2453, 2485), 'basis.robot_math.orthogonal_vector', 'rm.orthogonal_vector', (['contact_n0'], {}), '(contact_n0)\n', (2473, 2485), True, 'import basis.robot_math as rm\n'), ((358, 401), 'modeling.geometric_model.gen_frame', 'gm.gen_frame', ([], {'length': '(0.05)', 'thickness': '(0.0021)'}), '(length=0.05, thickness=0.0021)\n', (370, 401), True, 'import modeling.geometric_model as gm\n'), ((2296, 2335), 'numpy.linalg.norm', 'np.linalg.norm', (['(contact_p0 - contact_p1)'], {}), '(contact_p0 - contact_p1)\n', (2310, 2335), True, 'import numpy as np\n'), ((1348, 1398), 'modeling.geometric_model.gen_sphere', 'gm.gen_sphere', (['contact_p0'], {'radius': '(0.002)', 'rgba': 'rgba'}), '(contact_p0, radius=0.002, rgba=rgba)\n', (1361, 1398), True, 'import modeling.geometric_model as gm\n'), ((1418, 1507), 'modeling.geometric_model.gen_arrow', 'gm.gen_arrow', (['contact_p0', '(contact_p0 + contact_n0 * 0.01)'], {'thickness': '(0.0012)', 'rgba': 'rgba'}), '(contact_p0, contact_p0 + contact_n0 * 0.01, thickness=0.0012,\n rgba=rgba)\n', (1430, 1507), True, 'import modeling.geometric_model as gm\n'), ((1623, 1673), 'modeling.geometric_model.gen_sphere', 'gm.gen_sphere', (['contact_p1'], {'radius': '(0.002)', 'rgba': 'rgba'}), '(contact_p1, radius=0.002, rgba=rgba)\n', (1636, 1673), True, 'import modeling.geometric_model as gm\n'), ((1784, 1873), 'modeling.geometric_model.gen_arrow', 'gm.gen_arrow', (['contact_p1', '(contact_p1 + contact_n1 * 0.01)'], {'thickness': '(0.0012)', 'rgba': 'rgba'}), '(contact_p1, contact_p1 + contact_n1 * 0.01, thickness=0.0012,\n rgba=rgba)\n', (1796, 1873), True, 'import modeling.geometric_model as gm\n'), ((3073, 3089), 'math.radians', 'math.radians', (['(30)'], {}), '(30)\n', (3085, 3089), False, 'import math\n')]
|
"""A CarController Module."""
from masonite.request import Request
from masonite.view import View
from masonite.controllers import Controller
from app.Car import Car
class CarController(Controller):
"""CarController Controller Class."""
def __init__(self, request: Request):
"""CarController Initializer
Arguments:
request {masonite.request.Request} -- The Masonite Request class.
"""
self.request = request
def show(self, view: View, request: Request):
cars = Car.all()
return view.render("cars.html", {"cars": cars})
def single(self, view: View, request: Request):
param = self.request.id
car = Car.find(param)
return view.render(
"car.html",
{"car": car}
)
|
[
"app.Car.Car.all",
"app.Car.Car.find"
] |
[((531, 540), 'app.Car.Car.all', 'Car.all', ([], {}), '()\n', (538, 540), False, 'from app.Car import Car\n'), ((696, 711), 'app.Car.Car.find', 'Car.find', (['param'], {}), '(param)\n', (704, 711), False, 'from app.Car import Car\n')]
|
import logging
from xwing.mailbox import initialize, spawn, start
initialize()
logging.basicConfig(level='INFO')
async def run_server(mailbox):
while True:
data = await mailbox.recv()
if not data:
break
sender, message = data
await mailbox.send(sender, message)
if __name__ == '__main__':
spawn(run_server, name='server')
start()
|
[
"xwing.mailbox.spawn",
"xwing.mailbox.initialize",
"xwing.mailbox.start",
"logging.basicConfig"
] |
[((67, 79), 'xwing.mailbox.initialize', 'initialize', ([], {}), '()\n', (77, 79), False, 'from xwing.mailbox import initialize, spawn, start\n'), ((81, 114), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""INFO"""'}), "(level='INFO')\n", (100, 114), False, 'import logging\n'), ((348, 380), 'xwing.mailbox.spawn', 'spawn', (['run_server'], {'name': '"""server"""'}), "(run_server, name='server')\n", (353, 380), False, 'from xwing.mailbox import initialize, spawn, start\n'), ((385, 392), 'xwing.mailbox.start', 'start', ([], {}), '()\n', (390, 392), False, 'from xwing.mailbox import initialize, spawn, start\n')]
|
from snuba.clickhouse.columns import (
UUID,
Array,
ColumnSet,
DateTime,
Float,
IPv4,
IPv6,
Nested,
)
from snuba.clickhouse.columns import SchemaModifiers as Modifiers
from snuba.clickhouse.columns import String, UInt
from snuba.clusters.storage_sets import StorageSetKey
from snuba.datasets.schemas.tables import WritableTableSchema
from snuba.datasets.storage import WritableTableStorage
from snuba.datasets.storages import StorageKey
from snuba.datasets.storages.transaction_column_processor import (
TransactionColumnProcessor,
)
from snuba.datasets.table_storage import KafkaStreamLoader
from snuba.datasets.transactions_processor import TransactionsMessageProcessor
from snuba.query.processors.arrayjoin_keyvalue_optimizer import (
ArrayJoinKeyValueOptimizer,
)
from snuba.query.processors.mapping_optimizer import MappingOptimizer
from snuba.query.processors.prewhere import PrewhereProcessor
from snuba.web.split import TimeSplitQueryStrategy
columns = ColumnSet(
[
("project_id", UInt(64)),
("event_id", UUID()),
("trace_id", UUID()),
("span_id", UInt(64)),
("transaction_name", String()),
("transaction_hash", UInt(64, Modifiers(readonly=True))),
("transaction_op", String()),
("transaction_status", UInt(8)),
("start_ts", DateTime()),
("start_ms", UInt(16)),
("finish_ts", DateTime()),
("finish_ms", UInt(16)),
("duration", UInt(32)),
("platform", String()),
("environment", String(Modifiers(nullable=True))),
("release", String(Modifiers(nullable=True))),
("dist", String(Modifiers(nullable=True))),
("ip_address_v4", IPv4(Modifiers(nullable=True))),
("ip_address_v6", IPv6(Modifiers(nullable=True))),
("user", String()),
("user_hash", UInt(64, Modifiers(readonly=True))),
("user_id", String(Modifiers(nullable=True))),
("user_name", String(Modifiers(nullable=True))),
("user_email", String(Modifiers(nullable=True))),
("sdk_name", String()),
("sdk_version", String()),
("http_method", String(Modifiers(nullable=True))),
("http_referer", String(Modifiers(nullable=True))),
("tags", Nested([("key", String()), ("value", String())])),
("_tags_flattened", String()),
("_tags_hash_map", Array(UInt(64), Modifiers(readonly=True))),
("contexts", Nested([("key", String()), ("value", String())])),
("_contexts_flattened", String()),
("measurements", Nested([("key", String()), ("value", Float(64))]),),
("partition", UInt(16)),
("offset", UInt(64)),
("message_timestamp", DateTime()),
("retention_days", UInt(16)),
("deleted", UInt(8)),
]
)
schema = WritableTableSchema(
columns=columns,
local_table_name="transactions_local",
dist_table_name="transactions_dist",
storage_set_key=StorageSetKey.TRANSACTIONS,
mandatory_conditions=[],
prewhere_candidates=["event_id", "transaction_name", "transaction", "title"],
)
storage = WritableTableStorage(
storage_key=StorageKey.TRANSACTIONS,
storage_set_key=StorageSetKey.TRANSACTIONS,
schema=schema,
query_processors=[
MappingOptimizer("tags", "_tags_hash_map", "tags_hash_map_enabled"),
TransactionColumnProcessor(),
ArrayJoinKeyValueOptimizer("tags"),
ArrayJoinKeyValueOptimizer("measurements"),
PrewhereProcessor(),
],
stream_loader=KafkaStreamLoader(
processor=TransactionsMessageProcessor(), default_topic="events",
),
query_splitters=[TimeSplitQueryStrategy(timestamp_col="finish_ts")],
writer_options={"insert_allow_materialized_columns": 1},
)
|
[
"snuba.query.processors.mapping_optimizer.MappingOptimizer",
"snuba.query.processors.prewhere.PrewhereProcessor",
"snuba.query.processors.arrayjoin_keyvalue_optimizer.ArrayJoinKeyValueOptimizer",
"snuba.datasets.schemas.tables.WritableTableSchema",
"snuba.web.split.TimeSplitQueryStrategy",
"snuba.clickhouse.columns.SchemaModifiers",
"snuba.datasets.storages.transaction_column_processor.TransactionColumnProcessor",
"snuba.datasets.transactions_processor.TransactionsMessageProcessor",
"snuba.clickhouse.columns.String",
"snuba.clickhouse.columns.UUID",
"snuba.clickhouse.columns.Float",
"snuba.clickhouse.columns.UInt",
"snuba.clickhouse.columns.DateTime"
] |
[((2817, 3089), 'snuba.datasets.schemas.tables.WritableTableSchema', 'WritableTableSchema', ([], {'columns': 'columns', 'local_table_name': '"""transactions_local"""', 'dist_table_name': '"""transactions_dist"""', 'storage_set_key': 'StorageSetKey.TRANSACTIONS', 'mandatory_conditions': '[]', 'prewhere_candidates': "['event_id', 'transaction_name', 'transaction', 'title']"}), "(columns=columns, local_table_name='transactions_local',\n dist_table_name='transactions_dist', storage_set_key=StorageSetKey.\n TRANSACTIONS, mandatory_conditions=[], prewhere_candidates=['event_id',\n 'transaction_name', 'transaction', 'title'])\n", (2836, 3089), False, 'from snuba.datasets.schemas.tables import WritableTableSchema\n'), ((1042, 1050), 'snuba.clickhouse.columns.UInt', 'UInt', (['(64)'], {}), '(64)\n', (1046, 1050), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1074, 1080), 'snuba.clickhouse.columns.UUID', 'UUID', ([], {}), '()\n', (1078, 1080), False, 'from snuba.clickhouse.columns import UUID, Array, ColumnSet, DateTime, Float, IPv4, IPv6, Nested\n'), ((1104, 1110), 'snuba.clickhouse.columns.UUID', 'UUID', ([], {}), '()\n', (1108, 1110), False, 'from snuba.clickhouse.columns import UUID, Array, ColumnSet, DateTime, Float, IPv4, IPv6, Nested\n'), ((1133, 1141), 'snuba.clickhouse.columns.UInt', 'UInt', (['(64)'], {}), '(64)\n', (1137, 1141), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1173, 1181), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (1179, 1181), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1277, 1285), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (1283, 1285), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1319, 1326), 'snuba.clickhouse.columns.UInt', 'UInt', (['(8)'], {}), '(8)\n', (1323, 1326), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1350, 1360), 'snuba.clickhouse.columns.DateTime', 'DateTime', ([], {}), '()\n', (1358, 1360), False, 'from snuba.clickhouse.columns import UUID, Array, ColumnSet, DateTime, Float, IPv4, IPv6, Nested\n'), ((1384, 1392), 'snuba.clickhouse.columns.UInt', 'UInt', (['(16)'], {}), '(16)\n', (1388, 1392), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1417, 1427), 'snuba.clickhouse.columns.DateTime', 'DateTime', ([], {}), '()\n', (1425, 1427), False, 'from snuba.clickhouse.columns import UUID, Array, ColumnSet, DateTime, Float, IPv4, IPv6, Nested\n'), ((1452, 1460), 'snuba.clickhouse.columns.UInt', 'UInt', (['(16)'], {}), '(16)\n', (1456, 1460), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1484, 1492), 'snuba.clickhouse.columns.UInt', 'UInt', (['(32)'], {}), '(32)\n', (1488, 1492), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1516, 1524), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (1522, 1524), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((1828, 1836), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (1834, 1836), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2089, 2097), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2095, 2097), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2124, 2132), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2130, 2132), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2350, 2358), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2356, 2358), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2536, 2544), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2542, 2544), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2647, 2655), 'snuba.clickhouse.columns.UInt', 'UInt', (['(16)'], {}), '(16)\n', (2651, 2655), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2677, 2685), 'snuba.clickhouse.columns.UInt', 'UInt', (['(64)'], {}), '(64)\n', (2681, 2685), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2718, 2728), 'snuba.clickhouse.columns.DateTime', 'DateTime', ([], {}), '()\n', (2726, 2728), False, 'from snuba.clickhouse.columns import UUID, Array, ColumnSet, DateTime, Float, IPv4, IPv6, Nested\n'), ((2758, 2766), 'snuba.clickhouse.columns.UInt', 'UInt', (['(16)'], {}), '(16)\n', (2762, 2766), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2789, 2796), 'snuba.clickhouse.columns.UInt', 'UInt', (['(8)'], {}), '(8)\n', (2793, 2796), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((3277, 3344), 'snuba.query.processors.mapping_optimizer.MappingOptimizer', 'MappingOptimizer', (['"""tags"""', '"""_tags_hash_map"""', '"""tags_hash_map_enabled"""'], {}), "('tags', '_tags_hash_map', 'tags_hash_map_enabled')\n", (3293, 3344), False, 'from snuba.query.processors.mapping_optimizer import MappingOptimizer\n'), ((3354, 3382), 'snuba.datasets.storages.transaction_column_processor.TransactionColumnProcessor', 'TransactionColumnProcessor', ([], {}), '()\n', (3380, 3382), False, 'from snuba.datasets.storages.transaction_column_processor import TransactionColumnProcessor\n'), ((3392, 3426), 'snuba.query.processors.arrayjoin_keyvalue_optimizer.ArrayJoinKeyValueOptimizer', 'ArrayJoinKeyValueOptimizer', (['"""tags"""'], {}), "('tags')\n", (3418, 3426), False, 'from snuba.query.processors.arrayjoin_keyvalue_optimizer import ArrayJoinKeyValueOptimizer\n'), ((3436, 3478), 'snuba.query.processors.arrayjoin_keyvalue_optimizer.ArrayJoinKeyValueOptimizer', 'ArrayJoinKeyValueOptimizer', (['"""measurements"""'], {}), "('measurements')\n", (3462, 3478), False, 'from snuba.query.processors.arrayjoin_keyvalue_optimizer import ArrayJoinKeyValueOptimizer\n'), ((3488, 3507), 'snuba.query.processors.prewhere.PrewhereProcessor', 'PrewhereProcessor', ([], {}), '()\n', (3505, 3507), False, 'from snuba.query.processors.prewhere import PrewhereProcessor\n'), ((3655, 3704), 'snuba.web.split.TimeSplitQueryStrategy', 'TimeSplitQueryStrategy', ([], {'timestamp_col': '"""finish_ts"""'}), "(timestamp_col='finish_ts')\n", (3677, 3704), False, 'from snuba.web.split import TimeSplitQueryStrategy\n'), ((1222, 1246), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'readonly': '(True)'}), '(readonly=True)\n', (1231, 1246), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((1558, 1582), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1567, 1582), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((1613, 1637), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1622, 1637), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((1665, 1689), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1674, 1689), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((1724, 1748), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1733, 1748), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((1783, 1807), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1792, 1807), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((1870, 1894), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'readonly': '(True)'}), '(readonly=True)\n', (1879, 1894), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((1925, 1949), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1934, 1949), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((1982, 2006), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (1991, 2006), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((2040, 2064), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2049, 2064), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((2166, 2190), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2175, 2190), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((2226, 2250), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'nullable': '(True)'}), '(nullable=True)\n', (2235, 2250), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((2394, 2402), 'snuba.clickhouse.columns.UInt', 'UInt', (['(64)'], {}), '(64)\n', (2398, 2402), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2404, 2428), 'snuba.clickhouse.columns.SchemaModifiers', 'Modifiers', ([], {'readonly': '(True)'}), '(readonly=True)\n', (2413, 2428), True, 'from snuba.clickhouse.columns import SchemaModifiers as Modifiers\n'), ((3571, 3601), 'snuba.datasets.transactions_processor.TransactionsMessageProcessor', 'TransactionsMessageProcessor', ([], {}), '()\n', (3599, 3601), False, 'from snuba.datasets.transactions_processor import TransactionsMessageProcessor\n'), ((2287, 2295), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2293, 2295), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2308, 2316), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2314, 2316), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2469, 2477), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2475, 2477), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2490, 2498), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2496, 2498), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2588, 2596), 'snuba.clickhouse.columns.String', 'String', ([], {}), '()\n', (2594, 2596), False, 'from snuba.clickhouse.columns import String, UInt\n'), ((2609, 2618), 'snuba.clickhouse.columns.Float', 'Float', (['(64)'], {}), '(64)\n', (2614, 2618), False, 'from snuba.clickhouse.columns import UUID, Array, ColumnSet, DateTime, Float, IPv4, IPv6, Nested\n')]
|
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import time
import mock
from oslotest import base as test_base
from oslotest import moxstubout
from oslo_utils import excutils
from oslo_utils import timeutils
mox = moxstubout.mox
class Fail1(excutils.CausedByException):
pass
class Fail2(excutils.CausedByException):
pass
class CausedByTest(test_base.BaseTestCase):
def test_caused_by_explicit(self):
e = self.assertRaises(Fail1,
excutils.raise_with_cause,
Fail1, "I was broken",
cause=Fail2("I have been broken"))
self.assertIsInstance(e.cause, Fail2)
e_p = e.pformat()
self.assertIn("I have been broken", e_p)
self.assertIn("Fail2", e_p)
def test_caused_by_implicit(self):
def raises_chained():
try:
raise Fail2("I have been broken")
except Fail2:
excutils.raise_with_cause(Fail1, "I was broken")
e = self.assertRaises(Fail1, raises_chained)
self.assertIsInstance(e.cause, Fail2)
e_p = e.pformat()
self.assertIn("I have been broken", e_p)
self.assertIn("Fail2", e_p)
class SaveAndReraiseTest(test_base.BaseTestCase):
def test_save_and_reraise_exception(self):
e = None
msg = 'foo'
try:
try:
raise Exception(msg)
except Exception:
with excutils.save_and_reraise_exception():
pass
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
@mock.patch('logging.getLogger')
def test_save_and_reraise_exception_dropped(self, get_logger_mock):
logger = get_logger_mock()
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except Exception:
with excutils.save_and_reraise_exception():
raise Exception(msg)
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
self.assertTrue(logger.error.called)
def test_save_and_reraise_exception_no_reraise(self):
"""Test that suppressing the reraise works."""
try:
raise Exception('foo')
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = False
@mock.patch('logging.getLogger')
def test_save_and_reraise_exception_dropped_no_reraise(self,
get_logger_mock):
logger = get_logger_mock()
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
raise Exception(msg)
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
self.assertFalse(logger.error.called)
def test_save_and_reraise_exception_provided_logger(self):
fake_logger = mock.MagicMock()
try:
try:
raise Exception('foo')
except Exception:
with excutils.save_and_reraise_exception(logger=fake_logger):
raise Exception('second exception')
except Exception:
pass
self.assertTrue(fake_logger.error.called)
class ForeverRetryUncaughtExceptionsTest(test_base.BaseTestCase):
def setUp(self):
super(ForeverRetryUncaughtExceptionsTest, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.mox = moxfixture.mox
self.stubs = moxfixture.stubs
@excutils.forever_retry_uncaught_exceptions
def exception_generator(self):
exc = self.exception_to_raise()
while exc is not None:
raise exc
exc = self.exception_to_raise()
def exception_to_raise(self):
return None
def my_time_sleep(self, arg):
pass
def exc_retrier_common_start(self):
self.stubs.Set(time, 'sleep', self.my_time_sleep)
self.mox.StubOutWithMock(logging, 'exception')
self.mox.StubOutWithMock(timeutils, 'now',
use_mock_anything=True)
self.mox.StubOutWithMock(self, 'exception_to_raise')
def exc_retrier_sequence(self, exc_id=None,
exc_count=None, before_timestamp_calls=(),
after_timestamp_calls=()):
self.exception_to_raise().AndReturn(
Exception('unexpected %d' % exc_id))
# Timestamp calls that happen before the logging is possibly triggered.
for timestamp in before_timestamp_calls:
timeutils.now().AndReturn(timestamp)
if exc_count != 0:
logging.exception(mox.In(
'Unexpected exception occurred %d time(s)' % exc_count))
# Timestamp calls that happen after the logging is possibly triggered.
for timestamp in after_timestamp_calls:
timeutils.now().AndReturn(timestamp)
def exc_retrier_common_end(self):
self.exception_to_raise().AndReturn(None)
self.mox.ReplayAll()
self.exception_generator()
self.addCleanup(self.stubs.UnsetAll)
def test_exc_retrier_1exc_gives_1log(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1, exc_count=1,
after_timestamp_calls=[0])
self.exc_retrier_common_end()
def test_exc_retrier_same_10exc_1min_gives_1log(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
after_timestamp_calls=[0], exc_count=1)
# By design, the following exception don't get logged because they
# are within the same minute.
for i in range(2, 11):
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[i],
exc_count=0)
self.exc_retrier_common_end()
def test_exc_retrier_same_2exc_2min_gives_2logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
after_timestamp_calls=[0], exc_count=1)
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[65], exc_count=1,
after_timestamp_calls=[65, 66])
self.exc_retrier_common_end()
def test_exc_retrier_same_10exc_2min_gives_2logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
after_timestamp_calls=[0], exc_count=1)
for ts in [12, 23, 34, 45]:
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[ts],
exc_count=0)
# The previous 4 exceptions are counted here
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[106],
exc_count=5,
after_timestamp_calls=[106, 107])
# Again, the following are not logged due to being within
# the same minute
for ts in [117, 128, 139, 150]:
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[ts],
exc_count=0)
self.exc_retrier_common_end()
def test_exc_retrier_mixed_4exc_1min_gives_2logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
# The stop watch will be started,
# which will consume one timestamp call.
after_timestamp_calls=[0], exc_count=1)
# By design, this second 'unexpected 1' exception is not counted. This
# is likely a rare thing and is a sacrifice for code simplicity.
self.exc_retrier_sequence(exc_id=1, exc_count=0,
# Since the exception will be the same
# the expiry method will be called, which
# uses up a timestamp call.
before_timestamp_calls=[5])
self.exc_retrier_sequence(exc_id=2, exc_count=1,
# The watch should get reset, which uses
# up two timestamp calls.
after_timestamp_calls=[10, 20])
# Again, trailing exceptions within a minute are not counted.
self.exc_retrier_sequence(exc_id=2, exc_count=0,
# Since the exception will be the same
# the expiry method will be called, which
# uses up a timestamp call.
before_timestamp_calls=[25])
self.exc_retrier_common_end()
def test_exc_retrier_mixed_4exc_2min_gives_2logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
# The stop watch will now be started.
after_timestamp_calls=[0], exc_count=1)
# Again, this second exception of the same type is not counted
# for the sake of code simplicity.
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[10], exc_count=0)
# The difference between this and the previous case is the log
# is also triggered by more than a minute expiring.
self.exc_retrier_sequence(exc_id=2, exc_count=1,
# The stop watch will now be restarted.
after_timestamp_calls=[100, 105])
self.exc_retrier_sequence(exc_id=2,
before_timestamp_calls=[110], exc_count=0)
self.exc_retrier_common_end()
def test_exc_retrier_mixed_4exc_2min_gives_3logs(self):
self.exc_retrier_common_start()
self.exc_retrier_sequence(exc_id=1,
# The stop watch will now be started.
after_timestamp_calls=[0], exc_count=1)
# This time the second 'unexpected 1' exception is counted due
# to the same exception occurring same when the minute expires.
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[10], exc_count=0)
self.exc_retrier_sequence(exc_id=1,
before_timestamp_calls=[100],
after_timestamp_calls=[100, 105],
exc_count=2)
self.exc_retrier_sequence(exc_id=2, exc_count=1,
after_timestamp_calls=[110, 111])
self.exc_retrier_common_end()
|
[
"oslo_utils.excutils.raise_with_cause",
"mock.patch",
"oslotest.moxstubout.MoxStubout",
"oslo_utils.excutils.save_and_reraise_exception",
"mock.MagicMock",
"oslo_utils.timeutils.now"
] |
[((2221, 2252), 'mock.patch', 'mock.patch', (['"""logging.getLogger"""'], {}), "('logging.getLogger')\n", (2231, 2252), False, 'import mock\n'), ((3043, 3074), 'mock.patch', 'mock.patch', (['"""logging.getLogger"""'], {}), "('logging.getLogger')\n", (3053, 3074), False, 'import mock\n'), ((3740, 3756), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3754, 3756), False, 'import mock\n'), ((4274, 4297), 'oslotest.moxstubout.MoxStubout', 'moxstubout.MoxStubout', ([], {}), '()\n', (4295, 4297), False, 'from oslotest import moxstubout\n'), ((1546, 1594), 'oslo_utils.excutils.raise_with_cause', 'excutils.raise_with_cause', (['Fail1', '"""I was broken"""'], {}), "(Fail1, 'I was broken')\n", (1571, 1594), False, 'from oslo_utils import excutils\n'), ((2953, 2990), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (2988, 2990), False, 'from oslo_utils import excutils\n'), ((5430, 5445), 'oslo_utils.timeutils.now', 'timeutils.now', ([], {}), '()\n', (5443, 5445), False, 'from oslo_utils import timeutils\n'), ((5744, 5759), 'oslo_utils.timeutils.now', 'timeutils.now', ([], {}), '()\n', (5757, 5759), False, 'from oslo_utils import timeutils\n'), ((2061, 2098), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (2096, 2098), False, 'from oslo_utils import excutils\n'), ((2534, 2571), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (2569, 2571), False, 'from oslo_utils import excutils\n'), ((3426, 3476), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {'reraise': '(False)'}), '(reraise=False)\n', (3461, 3476), False, 'from oslo_utils import excutils\n'), ((3877, 3932), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {'logger': 'fake_logger'}), '(logger=fake_logger)\n', (3912, 3932), False, 'from oslo_utils import excutils\n')]
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Common functions used across the UI tabs.
The UI shares several common functions across its tabs. Unlike dep_util, this file
contains functions that specifically reference elements in the tab. This means, if
further extension of the UI is pursued, this file should be reserved for common
functions that are *explicitly* tied to the UI and dep_util for functions that could
be used in contexts outside the UI.
"""
import collections
import datetime
import glob
import os
import shutil
import subprocess
import sys
from PyQt5 import QtCore, QtWidgets
dir_scripts = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir_root = os.path.dirname(dir_scripts)
sys.path.append(dir_root)
sys.path.append(os.path.join(dir_scripts, "aws"))
sys.path.append(os.path.join(dir_scripts, "render"))
sys.path.append(os.path.join(dir_scripts, "util"))
import dep_util
import glog_check as glog
import scripts.render.config as config
from log_reader import LogReader
from scripts.aws.create import (
get_staging_info,
get_render_pid,
has_render_flag,
run_ssh_command,
)
from scripts.aws.util import AWSUtil
from scripts.render.network import LAN
from scripts.util.system_util import (
get_flags,
get_flags_from_flagfile,
image_type_paths,
run_command,
)
from slider_image_thresholds import SliderWidget
script_dir = os.path.dirname(os.path.realpath(__file__))
scripts_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
dep_dir = os.path.join(scripts_dir, os.pardir)
dep_bin_dir = os.path.join(dep_dir, "build", "bin")
dep_res_dir = os.path.join(dep_dir, "res")
dep_flags_dir = os.path.join(dep_res_dir, "flags")
os.makedirs(dep_flags_dir, exist_ok=True)
source_root = os.path.join(dep_dir, "source")
depth_est_src = os.path.join(source_root, "depth_estimation")
render_src = os.path.join(source_root, "render")
render_scripts = os.path.join(scripts_dir, "render")
type_color_var = "color_variance"
type_fg_mask = "fg_mask"
threshold_sliders = {
# attr: type, printed name, slider index, max value, default value
"noise": [type_color_var, "Noise variance", 1, 1.5e-3, 4e-5],
"detail": [type_color_var, "Detail variance", 2, 2e-2, 1e-3],
"blur": [type_fg_mask, "Blur radius", 1, 20, 2],
"closing": [type_fg_mask, "Closing size", 2, 20, 4],
"thresh": [type_fg_mask, "Threshold", 3, 1, 3e-2],
}
def init(parent):
"""Sets up all the UI global internals (logs, data, and flags) and any
tab specific components.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
parent.is_refreshing_data = True
parent.initialize_paths()
parent.set_default_top_level_paths()
parent.setup_logs()
parent.setup_data()
parent.setup_flags()
if "retrieve_missing_flagfiles" in dir(parent):
parent.retrieve_missing_flagfiles()
if "add_default_flags" in dir(parent):
parent.add_default_flags()
if "setup_thresholds" in dir(parent):
parent.setup_thresholds()
if "add_data_type_validators" in dir(parent):
parent.add_data_type_validators()
if "setup_farm" in dir(parent):
parent.setup_farm()
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.is_refreshing_data = False
def setup_aws_config(parent):
"""Sets up the configuration of the Kubernetes cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if parent.parent.is_aws:
create_flagfile = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_create]
)
if os.path.exists(create_flagfile):
create_flags = get_flags_from_flagfile(create_flagfile)
if "cluster_size" in create_flags:
spin_num_workers = getattr(
parent.dlg, f"spin_{parent.tag}_farm_num_workers", None
)
spin_num_workers.setValue(int(create_flags["cluster_size"]))
if "instance_type" in create_flags:
dd_ec2 = getattr(parent.dlg, f"dd_{parent.tag}_farm_ec2", None)
dd_ec2.setCurrentText(create_flags["instance_type"])
def setup_farm(parent):
"""Sets up the UI to interact with a LAN cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
initialize_farm_groupbox(parent)
ip_begin, _ = parent.parent.ui_flags.master.rsplit(".", 1)
parent.lan = LAN(f"{ip_begin}.255")
def get_tooltip(parent, app_name):
"""Gets the help tooltip display of a binary.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
app_name (str): Name of the binary.
Returns:
str: Help from the binary.
"""
dir = scripts_dir if app_name.endswith(".py") else dep_bin_dir
tooltip = dep_util.get_tooltip(os.path.join(dir, app_name))
if not tooltip:
parent.log_reader.log_warning(f"Cannot get tooltip for: {app_name}")
return tooltip
def initialize_paths(parent):
"""Initializes paths for scripts and flags depending on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
parent.app_name_to_flagfile = {}
if tag in ["bg", "depth", "export"]:
parent.app_name = "render/render.py"
if tag in ["depth", "export"]:
parent.app_aws_clean = "aws/clean.py"
parent.app_aws_create = "aws/create.py"
parent.app_name_to_flagfile[parent.app_aws_clean] = "clean.flags"
if tag == "calibrate":
parent.app_name = "Calibration"
parent.flagfile_basename = "calibration.flags"
elif tag == "bg":
parent.flagfile_basename = "render_background.flags"
elif tag == "depth":
parent.flagfile_basename = "render_depth.flags"
parent.app_name_to_flagfile[parent.app_aws_create] = "aws_create_video.flags"
elif tag == "export":
parent.flagfile_basename = "render_export.flags"
parent.app_name_to_flagfile[parent.app_aws_create] = "aws_create_export.flags"
parent.app_aws_download_meshes = "aws/download_meshes.py"
parent.app_name_to_flagfile[
parent.app_aws_download_meshes
] = "download_meshes.flags"
parent.app_name_to_flagfile[parent.app_name] = parent.flagfile_basename
parent.tooltip = get_tooltip(parent, parent.app_name)
parent.is_refreshing_data = False
parent.is_process_killed = False
parent.threshs_tooltip = "Click and drag to pan, scroll to zoom in and out"
parent.script_dir = script_dir
def setup_logs(parent):
"""Sets up logging system for dialog on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
LogReader: Reader configured for the current tab.
"""
tag = parent.tag
qt_text_edit = getattr(parent.dlg, f"text_{tag}_log", None)
qt_tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = qt_tab_widget.count() - 1 # log is always the last tab
ts = dep_util.get_timestamp("%Y%m%d%H%M%S.%f")
name = parent.__class__.__name__
log_file = os.path.join(parent.path_logs, f"{name}_{ts}")
log_reader = LogReader(qt_text_edit, parent, log_file)
log_reader.set_tab_widget(qt_tab_widget, tab_idx)
return log_reader
def setup_flagfile_tab(parent):
"""Sets up the flags according to the corresponding flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
qt_text_edit = getattr(dlg, f"text_{tag}_flagfile_edit", None)
qt_btn_save = getattr(dlg, f"btn_{tag}_flagfile_save", None)
qt_text_edit.textChanged.connect(parent.on_changed_flagfile_edit)
qt_btn_save.clicked.connect(parent.save_flag_file)
qt_btn_save.setEnabled(False)
def setup_file_explorer(parent):
"""Creates the file explorer rooted on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
dlg = parent.dlg
parent.fs_tree = dlg.tree_file_explorer
path = parent.path_project
parent.fs_model, parent.fs_tree = dep_util.setup_file_explorer(parent.fs_tree, path)
parent.fs_tree.clicked.connect(lambda: preview_file(parent))
def preview_file(parent):
"""Displays the file and its label on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
dlg = parent.dlg
frame = dlg.label_preview_image
label = dlg.label_preview_path
project = parent.path_project
prefix = f"{project}/"
dep_util.preview_file(parent.fs_model, parent.fs_tree, frame, label, prefix)
def switch_ui_elements_for_processing(parent, gb, state):
"""Switches element interaction when processing on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
state (str): Identifier of the callback state.
"""
# Buttons
parent.update_buttons(gb)
# Switch all other sections, except the file explorer
dlg = parent.dlg
for gbi in dlg.findChildren(QtWidgets.QGroupBox):
if gbi != gb and not gbi.objectName().endswith("_file_explorer"):
gbi.setEnabled(state)
# Switch current group box elements
prefixes = ["cb_", "dd_", "val_", "label_"]
dep_util.switch_objects_prefix(gb, prefixes, state)
# Switch tabs that are not image preview or log
for w in dlg.findChildren(QtWidgets.QWidget):
name = w.objectName()
ignore = name.endswith("_preview") or name.endswith("_log")
if name.startswith("tab_") and not ignore:
w.setEnabled(state)
# Switch other sections
for s in parent.parent.sections:
if s != parent:
dep_util.set_tab_enabled(parent.dlg.w_steps, s.tag, state)
def cancel_process(parent):
"""Stops a running process on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
running_render = False # Render has to be explicitly killed since it runs detached
if parent.is_farm and parent.is_aws:
processes = parent.log_reader.get_processes()
for process in processes:
if process == "run_aws_create" or process.startswith("run_export"):
running_render = True
if running_render:
aws_util = AWSUtil(
parent.path_aws_credentials, region_name=parent.parent.aws_util.region_name
)
_, ip_staging = get_staging_info(aws_util, parent.path_aws_ip_file)
if ip_staging:
render_pid = get_render_pid(parent.path_aws_key_fn, ip_staging)
if render_pid is not None:
run_ssh_command(
parent.path_aws_key_fn, ip_staging, f"kill -9 {render_pid}"
)
parent.log_reader.kill_all_processes()
parent.is_process_killed = True
if "reset_run_button_text" in dir(parent):
parent.reset_run_button_text()
def is_cloud_running_process(parent):
"""Checks if a render process is being run on the cloud"""
key_fn = parent.path_aws_key_fn
if not parent.is_aws or not parent.is_farm or not os.path.isfile(key_fn):
return False
aws_util = AWSUtil(
parent.path_aws_credentials, region_name=parent.parent.aws_util.region_name
)
_, ip_staging = get_staging_info(
aws_util, parent.path_aws_ip_file, start_instance=False
)
if not ip_staging:
return False
tag = parent.tag
if tag not in ["depth", "export"]:
return False
flag = "run_depth_estimation"
value = tag == "depth"
return has_render_flag(key_fn, ip_staging, flag, value)
def sync_with_s3(parent, gb, subdirs):
"""Synchronizes data from the local directory to S3.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
subdirs (list[str]): Local path to be synced.
"""
run_silently = not parent.parent.ui_flags.verbose
cmds = []
parent.log_reader.log_notice(f"Syncing frames with S3...")
for subdir in subdirs:
local = os.path.join(config.DOCKER_INPUT_ROOT, subdir)
remote = os.path.join(parent.parent.ui_flags.project_root, subdir)
if "_levels" in subdir:
locals = [
os.path.join(local, f"level_{l}") for l in range(len(config.WIDTHS))
]
else:
locals = [local]
# Tar frames
tar_app_path = os.path.join(scripts_dir, "util", "tar_frame.py")
for local_i in locals:
frames = dep_util.get_frame_list(local_i)
if not frames:
if not run_silently:
print(glog.yellow(f"No frames found for S3 syncing in {local_i}"))
continue
for frame in frames:
cmds.append(f"python3.7 {tar_app_path} --src={local_i} --frame={frame}")
cmds.append(f"aws s3 sync {local} {remote} --exclude '*' --include '*.tar'")
p_id = f"sync_results_s3_{parent.tag}"
cmd_and = " && ".join(cmds)
cmd = f'/bin/sh -c "{cmd_and}"'
start_process(parent, cmd, gb, p_id, run_silently)
def on_process_finished(parent, p_id):
"""Callback event handler for a process completing on the specified tab.
Args:
p_id (str): PID of completed process.
"""
if not p_id or p_id.startswith("run"):
parent.log_reader.remove_processes()
else:
parent.log_reader.remove_process(p_id)
parent.refresh_data()
if p_id.startswith("run") and "_export_" not in p_id:
if "update_frame_names" in dir(parent):
parent.update_frame_names()
if "sync_with_s3" in dir(parent) and not parent.is_process_killed:
if parent.parent.is_aws:
parent.sync_with_s3()
if len(parent.log_reader.get_processes()) == 0:
# Re-enable UI elements
switch_ui_elements_for_processing(parent, parent.log_reader.gb, True)
# We may have data to enable other tabs
if p_id.startswith("run"):
[s.refresh_data() for s in parent.parent.sections if s != parent]
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.is_process_killed = False
def populate_dropdown(parent, gb, dd):
"""Populates a dropdown on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd (QtWidgets.QComboBox): Dropdown UI element.
"""
project = parent.parent.path_project
t = dep_util.remove_prefix(gb.objectName(), "gb_")
dd_prev_text = dd.currentText() if dd.count() > 0 else ""
tag = dep_util.remove_prefix(dd.objectName(), f"dd_{t}_")
ps = parent.get_files(tag)
dep_util.populate_dropdown(dd, ps, f"{project}/")
dep_util.update_qt_dropdown(dd, dd_prev_text, add_if_missing=False)
def populate_dropdowns(parent, gb, dd_first=None):
"""Populates the dropdowns on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd_first (list[QtWidgets.QGroupBox], optional): Dropdowns to populate first.
"""
if not dd_first:
dd_first = []
for dd in dd_first:
populate_dropdown(parent, gb, dd)
for dd in gb.findChildren(QtWidgets.QComboBox):
if dd not in dd_first:
populate_dropdown(parent, gb, dd)
def refresh_data(parent):
"""Updates UI elements to be in sync with data on disk on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
tab = getattr(dlg, f"t_{tag}", None)
if tag in ["bg", "depth", "export"]:
parent.path_rig_json = get_calibrated_rig_json(parent)
if tag == "depth":
parent.update_bg_checkbox()
# This locks the dropdown callbacks while we re-populate them
parent.is_refreshing_data = True
for gb in tab.findChildren(QtWidgets.QGroupBox):
gb.setEnabled(True)
parent.populate_dropdowns(gb)
parent.update_buttons(gb)
if "flagfile_fn" in dir(parent):
sync_data_and_flagfile(parent, parent.flagfile_fn)
parent.disable_tab_if_no_data()
parent.is_refreshing_data = False
def update_flagfile_edit(parent, flagfile_fn, switch_to_flag_tab=False):
"""Updates the edit box for the flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
if not os.path.isfile(flagfile_fn):
return
tag = parent.tag
dlg = parent.dlg
text = getattr(dlg, f"text_{tag}_flagfile_edit", None)
preview = getattr(dlg, f"w_{tag}_preview", None)
text.setPlainText(open(flagfile_fn).read())
if switch_to_flag_tab:
preview.setCurrentIndex(1)
def update_data_or_flags(
parent, flagfile_fn, flagfile_from_data, switch_to_flag_tab=False
):
"""Updates the flagfile from the UI elements or vice versa on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
flagfile_from_data (bool): Whether to load the flagfile from the data (True) or
vice versa (False).
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
if not flagfile_fn:
return
flags = get_flags_from_flagfile(flagfile_fn)
if flagfile_from_data:
parent.update_flags_from_data(flags)
else:
parent.update_data_from_flags(flags)
if flagfile_from_data:
# Overwrite flag file
sorted_flags = collections.OrderedDict(sorted(flags.items()))
dep_util.write_flagfile(flagfile_fn, sorted_flags)
# Refresh flagfile edit window
parent.update_flagfile_edit(flagfile_fn, switch_to_flag_tab)
def sync_data_and_flagfile(
parent, flagfile_fn, set_label=True, switch_to_flag_tab=False
):
"""Synchronizes displayed UI elements and contents of the flagfile.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
set_label (bool, optional): Whether or not to update the flagfile label in the UI.
switch_to_flag_tab (bool, optional): Whether or not to switch tabs after updating.
"""
tag = parent.tag
dlg = parent.dlg
label = getattr(dlg, f"label_{tag}_flagfile_path", None)
flagfile = os.path.basename(flagfile_fn)
label.setText(flagfile)
# flag file to data first, then data to flag file for missing info
flagfile_from_data = False
parent.update_data_or_flags(flagfile_fn, flagfile_from_data, switch_to_flag_tab)
parent.update_data_or_flags(flagfile_fn, not flagfile_from_data, switch_to_flag_tab)
def disable_tab_if_no_data(parent, btn_run):
"""Prevents navigation to the tab if the required data is not present on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
btn_run (QtWidgets.QPushButton): UI button for tab switch.
"""
if not btn_run.isEnabled():
dep_util.set_tab_enabled(parent.dlg.w_steps, parent.tag, enabled=False)
def setup_project(parent, mkdirs=False):
"""Retrieves any missing flagfiles and sets the default flags on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
mkdirs (bool, optional): Whether or not to make the defined directories.
"""
parent.is_refreshing_data = True
parent.log_reader.log_header()
parent.refresh_data()
parent.is_refreshing_data = False
def save_flag_file(parent, flagfile_fn):
"""Saves flagfile from the UI to disk on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
"""
if not os.path.isfile(flagfile_fn):
return
tag = parent.tag
dlg = parent.dlg
text_edit = getattr(dlg, f"text_{tag}_flagfile_edit", None)
btn_save = getattr(dlg, f"btn_{tag}_flagfile_save", None)
with open(flagfile_fn, "w") as f:
f.write(text_edit.toPlainText())
f.close()
# Disable save button
btn_save.setEnabled(False)
# Update corresponding groupbox
flagfile_from_data = False # flagfile to data
parent.update_data_or_flags(flagfile_fn, flagfile_from_data)
def update_flagfile(parent, flagfile_fn):
"""Updates the edit box for the flagfile on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flagfile_fn (str): Name of the flagfile.
"""
parent.update_data_or_flags(flagfile_fn, flagfile_from_data=True)
def retrieve_missing_flagfiles(parent):
"""Copies the missing flagfiles to project for local modification on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
if tag == "calibrate":
ff_base = "calibration.flags"
elif tag in ["bg", "depth", "export"]:
ff_base = "render.flags"
ffs_expected = [[ff_base, parent.flagfile_fn]]
if tag in ["depth", "export"]:
ff_aws_create = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_create]
)
ffs_expected.append(["aws_create.flags", ff_aws_create])
for ff_src_rel, ff_dst_abs in ffs_expected:
if not os.path.isfile(ff_dst_abs):
ff_src_abs = os.path.join(dep_flags_dir, ff_src_rel)
os.makedirs(os.path.dirname(ff_dst_abs), exist_ok=True)
shutil.copyfile(ff_src_abs, ff_dst_abs)
update_flagfile(parent, ff_dst_abs)
def add_default_flags(parent):
"""Retrieves the default flags to the local flagfile on the specified tab from
either the source or scripts binaries.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
default_flags = {}
tag = parent.tag
if tag in ["bg", "depth"]:
default_flags.update(
{
os.path.join(depth_est_src, "DerpCLI.cpp"): {
"max_depth_m",
"min_depth_m",
"resolution",
"var_high_thresh",
"var_noise_floor",
}
}
)
if tag == "depth":
default_flags.update(
{
os.path.join(render_scripts, "setup.py"): {"do_temporal_filter"},
os.path.join(depth_est_src, "TemporalBilateralFilter.cpp"): {
"time_radius"
},
os.path.join(render_src, "GenerateForegroundMasks.cpp"): {
"blur_radius",
"morph_closing_size",
"threshold",
},
}
)
elif tag == "export":
default_flags.update(
{
os.path.join(render_src, "SimpleMeshRenderer.cpp"): {"width"},
os.path.join(render_src, "ConvertToBinary.cpp"): {"output_formats"},
}
)
flagfile_fn = os.path.join(parent.path_flags, parent.flagfile_basename)
flags = get_flags_from_flagfile(flagfile_fn)
for source in default_flags:
if os.path.isfile(source):
source_flags = get_flags(source)
else:
source_flags
desired_flags = default_flags[source]
for source_flag in source_flags:
flag_name = source_flag["name"]
# Only add the default flag if not already present in current flags
if flag_name in desired_flags:
if flag_name not in flags or flags[flag_name] == "":
flags[flag_name] = source_flag["default"]
# Add run flags
if tag == "bg":
flags["run_generate_foreground_masks"] = False
flags["run_precompute_resizes"] = True
flags["run_depth_estimation"] = True
flags["run_convert_to_binary"] = False
flags["run_fusion"] = False
flags["run_simple_mesh_renderer"] = False
flags["use_foreground_masks"] = False
elif tag == "depth":
flags["run_depth_estimation"] = True
flags["run_precompute_resizes"] = True
flags["run_precompute_resizes_foreground"] = True
flags["run_convert_to_binary"] = False
flags["run_fusion"] = False
flags["run_simple_mesh_renderer"] = False
elif tag == "export":
flags["run_generate_foreground_masks"] = False
flags["run_precompute_resizes"] = False
flags["run_precompute_resizes_foreground"] = False
flags["run_depth_estimation"] = False
# Overwrite flag file
sorted_flags = collections.OrderedDict(sorted(flags.items()))
dep_util.write_flagfile(flagfile_fn, sorted_flags)
def get_calibrated_rig_json(parent):
"""Finds calibrated rig in the project.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
str: Name of the calibrated rig (assumes the rig contains "_calibrated.json").
"""
has_log_reader = "log_reader" in dir(parent)
ps = dep_util.get_files_ext(parent.path_rigs, "json", "calibrated")
if len(ps) == 0:
if has_log_reader:
parent.log_reader.log_warning(f"No rig files found in {parent.path_rigs}")
return ""
if len(ps) > 1:
ps_str = "\n".join(ps)
if has_log_reader:
parent.log_reader.log_warning(
f"Too many rig files found in {parent.path_rigs}:\n{ps_str}"
)
return ""
return ps[0]
def update_run_button_text(parent, btn):
"""Updates the text of the Run button depending on the existance of a process
running on the cloud
"""
text_run_btn = "Run"
if is_cloud_running_process(parent):
text_run_btn = "Re-attach"
btn.setText(text_run_btn)
def update_buttons(parent, gb, ignore=None):
"""Enables buttons and dropdowns according to whether or not data is present on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
ignore (list[QtWidgets.QGroupBox], optional): Buttons to not update.
Returns:
tuple[bool, bool, bool]: Whether or not the UI is currently running a process and if it
has all its dropdowns.
"""
if not ignore:
ignore = []
has_all_dropdowns = True
for dd in gb.findChildren(QtWidgets.QComboBox):
if not dd.currentText() and dd not in ignore:
has_all_dropdowns = False
break
has_all_values = True
for v in gb.findChildren(QtWidgets.QLineEdit):
if v.objectName() and not v.text() and v not in ignore:
has_all_values = False
break
is_running = parent.log_reader.is_running()
for btn in gb.findChildren(QtWidgets.QPushButton):
btn_name = btn.objectName()
if btn in ignore:
continue
if btn_name.endswith("_run"):
btn.setEnabled(not is_running and has_all_dropdowns and has_all_values)
elif btn_name.endswith("_cancel"):
btn.setEnabled(is_running)
elif btn_name.endswith("_threshs"):
btn.setEnabled(not is_running and has_all_dropdowns)
elif btn_name.endswith("_view"):
btn.setEnabled(not is_running)
elif btn_name.endswith("_download_meshes"):
btn.setEnabled(not is_running)
return is_running, has_all_dropdowns, is_running
def on_changed_dropdown(parent, gb, dd):
"""Callback event handler for changed dropdown on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
dd (QtWidgets.QComboBox): Dropdown UI element.
"""
if not parent.is_refreshing_data:
name = dd.objectName()
if not name.endswith(
"_farm_ec2"
): # farm_ec2 dropdowns are not used in flagfile
parent.update_flagfile(parent.flagfile_fn)
# Check if we need to update the threshold image
if name.endswith(("_camera", "_frame_bg", "_first")):
# Check if we are already in a threshold tab, else default to color variance
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = tab_widget.currentIndex()
if tab_widget.widget(tab_idx).objectName().endswith("_fg_mask"):
type = type_fg_mask
else:
type = type_color_var
if "run_thresholds" in dir(parent):
parent.run_thresholds(type)
def on_changed_line_edit(parent, gb, le):
"""Callback event handler for changed line edit on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
le (_): Ignore
"""
if not parent.is_refreshing_data:
parent.update_buttons(gb)
parent.update_flagfile(parent.flagfile_fn)
def setup_groupbox(gb, callbacks):
"""Sets up callbacks for any groupboxes on the specified tab.
Args:
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
if gb.isCheckable() and gb in callbacks:
gb.toggled.connect(callbacks[gb])
def setup_checkboxes(gb, callbacks):
"""Sets up callbacks for any checkboxes on the specified tab.
Args:
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
for cb in gb.findChildren(QtWidgets.QCheckBox):
if cb in callbacks:
cb.stateChanged.connect(callbacks[cb])
def setup_dropdowns(parent, gb):
"""Sets up callbacks for any dropdowns on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QComboBox): Group box for the tab.
"""
if "on_changed_dropdown" in dir(parent):
for dd in gb.findChildren(QtWidgets.QComboBox):
dd.currentTextChanged.connect(
lambda state, y=gb, z=dd: parent.on_changed_dropdown(y, z)
)
dd.activated.connect(
lambda state, y=gb, z=dd: parent.on_changed_dropdown(y, z)
)
def setup_lineedits(parent, gb):
"""Sets up callbacks for any line edits on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if "on_changed_line_edit" in dir(parent):
for le in gb.findChildren(QtWidgets.QLineEdit):
le.textChanged.connect(
lambda state, y=gb, z=le: parent.on_changed_line_edit(y, z)
)
def setup_buttons(parent, gb, callbacks):
"""Sets up callbacks for any buttons on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
callbacks (dict[QtWidgets.QPushButton, func : QEvent -> _]): Callbacks for the UI elements.
"""
for btn in gb.findChildren(QtWidgets.QPushButton):
if btn in callbacks:
callback = callbacks[btn]
else:
name = btn.objectName()
callback = None
if name.endswith("_refresh"):
callback = parent.refresh
elif name.endswith("_run"):
callback = parent.run_process
elif name.endswith("_cancel"):
callback = parent.cancel_process
elif name.endswith("_threshs"):
callback = parent.run_thresholds
elif name.endswith("_logs"):
callback = parent.get_logs
else:
parent.log_reader.log_error(f"Cannot setup button {name}")
if callback:
btn.clicked.connect(callback)
def on_changed_preview(parent):
"""Callback event handler for changed image previews on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
tab_idx = tab_widget.currentIndex()
tab_name = tab_widget.widget(tab_idx).objectName()
if "_threshs_" in tab_name:
if tab_name.endswith("_fg_mask"):
type = type_fg_mask
else:
type = type_color_var
if not parent.is_refreshing_data:
parent.run_thresholds(type)
def setup_preview(parent):
"""Creates preview window in the UI and connects a callback on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
btn_log_clear = getattr(dlg, f"btn_{tag}_log_clear", None)
text_log = getattr(dlg, f"text_{tag}_log", None)
preview = getattr(dlg, f"w_{tag}_preview", None)
btn_log_clear.clicked.connect(lambda: text_log.clear())
preview.setCurrentIndex(0)
if "on_changed_preview" in dir(parent):
preview.currentChanged.connect(parent.on_changed_preview)
def setup_data(parent, callbacks=None):
"""Sets up callbacks and initial UI element statuses on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
callbacks (dict[QtWidgets.QGroupBox, func : QEvent -> _]): Callbacks for the UI elements.
"""
tag = parent.tag
dlg = parent.dlg
tab = getattr(dlg, f"t_{tag}", None)
if not callbacks:
callbacks = {}
for gb in tab.findChildren(QtWidgets.QGroupBox):
setup_groupbox(gb, callbacks)
setup_checkboxes(gb, callbacks)
setup_dropdowns(parent, gb)
setup_lineedits(parent, gb)
setup_buttons(parent, gb, callbacks)
# Preview tabs
setup_preview(parent)
def update_noise_detail(parent, noise, detail):
"""Updates noise/detail thresholds interaction on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
noise (float): Noise threshold.
detail (float): Detail threshold.
"""
# Modify flagfile
parent.update_data_or_flags(
parent.flagfile_fn, flagfile_from_data=True, switch_to_flag_tab=False
)
# Update flagfile edit window
parent.update_flagfile_edit(parent.flagfile_fn, switch_to_flag_tab=False)
def update_fg_masks_thresholds(parent, blur, closing, thresh):
"""Updates thresholds and display for the foreground masking on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
blur (int, optional): Gaussian blur radius.
closing (int, optional): Closure (for sealing holes).
thresh (int, optional): Threshold applied to segment foreground and background
"""
# Modify flagfile
parent.update_data_or_flags(
parent.flagfile_fn, flagfile_from_data=True, switch_to_flag_tab=False
)
# Update flagfile edit window
parent.update_flagfile_edit(parent.flagfile_fn, switch_to_flag_tab=False)
def log_missing_image(parent, path_color, cam_id, frame):
"""Prints a warning if an image cannot be located.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
path_color (str): Path to the directory with color images.
cam_id (str): Name of the camera.
frame (str): Name of the frame (0-padded, six digits).
"""
parent.log_reader.log_warning(f"Cannot find frame {cam_id}/{frame} in {path_color}")
def update_thresholds_color_variance(parent, path_color, labels=None):
"""Updates the displayed thresholds for color variance on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
path_color (str): Path to the directory with color images.
labels (list[str], optional): Labels used to filter UI elements to update.
"""
labels = labels if labels is not None else ("_frame_bg", "_first")
dlg = parent.dlg
for dd in parent.dlg.findChildren(QtWidgets.QComboBox):
name = dd.objectName()
if name.endswith(labels):
frame = dd.currentText()
elif name.endswith("_camera"):
cam_id = dd.currentText()
image_path = dep_util.get_level_image_path(path_color, cam_id, frame)
if not image_path:
log_missing_image(parent, path_color, cam_id, frame)
return
tag = parent.tag
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type_color_var}", None)
# Foreground masks are generated at the finest level of the pyramid
res = max(config.WIDTHS)
w_image.color_var.set_image(image_path, res)
noise = float(parent.slider_noise.get_label_text())
detail = float(parent.slider_detail.get_label_text())
project = parent.parent.path_project
fn = dep_util.remove_prefix(image_path, f"{project}/")
getattr(dlg, f"label_{tag}_threshs_filename_{type_color_var}", None).setText(fn)
# Force update
w_image.update_thresholds(noise=noise, detail=detail)
def update_thresholds_fg_mask(parent, paths_color):
"""Updates thresholds and display for the foreground masking using values from UI
on the specified tab."
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
paths_color (list[str]): Paths to the directory with color images.
"""
dlg = parent.dlg
frames = [None] * 2
for dd in parent.dlg.findChildren(QtWidgets.QComboBox):
name = dd.objectName()
if name.endswith("_frame_bg"):
frames[0] = dd.currentText()
elif name.endswith("_first"):
frames[1] = dd.currentText()
elif name.endswith("_camera"):
cam_id = dd.currentText()
bg_image_path = dep_util.get_level_image_path(paths_color[0], cam_id, frames[0])
if not bg_image_path:
log_missing_image(parent, paths_color[0], cam_id, frames[0])
return
fg_image_path = dep_util.get_level_image_path(paths_color[1], cam_id, frames[1])
if not fg_image_path:
log_missing_image(parent, paths_color[1], cam_id, frames[1])
return
tag = parent.tag
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type_fg_mask}", None)
# Foreground masks are generated at the finest level of the pyramid
res = max(config.WIDTHS)
w_image.fg_mask.set_images(bg_image_path, fg_image_path, res)
blur = float(parent.slider_blur.get_label_text())
closing = float(parent.slider_closing.get_label_text())
thresh = float(parent.slider_thresh.get_label_text())
project = parent.parent.path_project
fn_bg = dep_util.remove_prefix(bg_image_path, f"{project}/")
fn_fg = dep_util.remove_prefix(fg_image_path, f"{project}/")
getattr(dlg, f"label_{tag}_threshs_filename_{type_fg_mask}", None).setText(
f"{fn_bg} vs {fn_fg}"
)
# Force update
w_image.update_thresholds(blur=blur, closing=closing, thresh=thresh)
def run_thresholds_after_wait(parent, type):
"""Computes the threshold and displays after a delay on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
# Apply flag file values in case it had unsaved changes
parent.save_flag_file()
tag = parent.tag
dlg = parent.dlg
label = getattr(dlg, f"label_{tag}_threshs_tooltip_{type}", None)
label.setToolTip(parent.threshs_tooltip)
getattr(dlg, f"w_{tag}_threshs_image_{type}", None).set_zoom_level(0)
if type == type_color_var:
parent.setup_thresholds_color_variance()
parent.update_thresholds_color_variance()
elif type == type_fg_mask:
parent.setup_thresholds_fg_masks()
parent.update_thresholds_fg_mask()
def run_thresholds(parent, type):
"""Runs thresholding based on values in the UI and update UI display on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds are run.
"""
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
dep_util.switch_tab(tab_widget, f"_threshs_{type}")
# HACK: if we try to draw on a widget too quickly after switching tabs the resulting image
# does not span all the way to the width of the widget. We can wait a few milliseconds to
# let the UI "settle"
parent.timer = QtCore.QTimer(parent.parent)
parent.timer.timeout.connect(lambda: parent.run_thresholds_after_wait(type))
parent.timer.setSingleShot(True)
parent.timer.start(10) # 10ms
def output_has_images(output_dirs):
"""Whether or not outputs already have results.
Args:
output_dirs (list[str]): List of directories where outputs will be saved.
Returns:
bool: Whether or not the output directories all have at least one valid file.
"""
for d in output_dirs:
if dep_util.get_first_file_path(d):
return True
return False
def run_process_check_existing_output(parent, gb, app_name, flagfile_fn, p_id):
"""Run terminal process and raise on failure.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
app_name (str): Name of the binary being executed.
flagfile_fn (str): Name of the flagfile.
p_id (str): PID name of the process to be run.
"""
tag = parent.tag
cb_recompute = getattr(parent.dlg, f"cb_{tag}_recompute", None)
if cb_recompute is not None:
needs_rename = cb_recompute.isChecked()
if needs_rename:
# Rename current output directories using timestamp and create new empty ones
ts = dep_util.get_timestamp()
for d in parent.output_dirs:
if not os.path.isdir(d):
continue
d_dst = f"{d}_{ts}"
parent.log_reader.log_notice(
f"Saving copy of {d} to {d_dst} before re-computing"
)
shutil.move(d, d_dst)
os.makedirs(d, exist_ok=True)
run_process(parent, gb, app_name, flagfile_fn, p_id, not needs_rename)
def start_process(parent, cmd, gb, p_id, run_silently=False):
"""Runs a terminal process and disables UI element interaction.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
cmd (str): Command to run in the terminal.
gb (QtWidgets.QGroupBox): Group box for the tab.
p_id (str): PID name of the process being started.
"""
if not run_silently:
parent.log_reader.log(f"CMD: {cmd}")
parent.log_reader.gb = gb
parent.log_reader.setup_process(p_id)
parent.log_reader.start_process(p_id, cmd)
# Switch to log tab
tag = parent.tag
tab_widget = getattr(parent.dlg, f"w_{tag}_preview", None)
dep_util.switch_tab(tab_widget, "_log")
# Disable UI elements
parent.switch_ui_elements_for_processing(False)
def run_process(
parent, gb, app_name=None, flagfile_fn=None, p_id="run", overwrite=False
):
"""Runs an application on the terminal, using the associated flagfile.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
app_name (str, optional): Name of the binary being executed.
flagfile_fn (str, optional): Name of the flagfile to supply to the binary. this
will default to the flagfile associated with the binary if unspecified.
p_id (str, optional): PID name of the process being started.
overwrite (bool, optional): Whether or not to overwrite the local flagfile on disk.
"""
# Apply flag file values in case it had unsaved changes
parent.save_flag_file()
if not app_name:
app_name = parent.app_name
is_py_script = app_name.endswith(".py")
dir = scripts_dir if is_py_script else dep_bin_dir
app_path = os.path.join(dir, app_name)
if not os.path.isfile(app_path):
parent.log_reader.log_warning(f"App doesn't exist: {app_path}")
return
if not flagfile_fn:
flagfile_fn = parent.flagfile_fn
if output_has_images(parent.output_dirs) and not overwrite:
run_process_check_existing_output(parent, gb, app_name, flagfile_fn, p_id)
return
cmd = f'{app_path} --flagfile="{flagfile_fn}"'
if is_py_script:
cmd = f"python3.7 -u {cmd}"
start_process(parent, cmd, gb, p_id)
def update_thresholds(parent, gb, type):
"""Updates the displayed thresholds for either color variance or foreground masks.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
if type == type_color_var:
noise = parent.slider_noise.get_label_text()
detail = parent.slider_detail.get_label_text()
parent.update_noise_detail(noise, detail)
elif type == type_fg_mask:
blur = parent.slider_blur.get_label_text()
closing = parent.slider_closing.get_label_text()
thresh = parent.slider_thresh.get_label_text()
parent.update_fg_masks_thresholds(blur, closing, thresh)
# Update buttons
parent.update_buttons(gb)
def on_state_changed_partial_360(parent):
"""Callback event handler for changed "partial coverage" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if not parent.is_refreshing_data:
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_recompute(parent):
"""Callback event handler for changed "recompute" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
if not parent.is_refreshing_data:
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_use_bg(parent, gb):
"""Callback event handler for changed "use background" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if not parent.is_refreshing_data:
parent.update_buttons(gb)
parent.update_flagfile(parent.flagfile_fn)
def on_state_changed_farm(parent, state):
"""Callback event handler for changed "AWS" checkbox on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
state (str): Identifier of the callback state.
"""
parent.is_farm = state > 0
if not parent.is_refreshing_data:
if "update_frame_range_dropdowns" in dir(parent):
parent.update_frame_range_dropdowns()
if "update_run_button_text" in dir(parent):
parent.update_run_button_text()
parent.update_flagfile(parent.flagfile_fn)
def setup_thresholds(parent, types):
"""Sets necessary thresholds apps on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
type (Union[ColorVariance, ForegroundMask]): Instance where thresholds
can be run.
"""
tag = parent.tag
dlg = parent.dlg
for attr in threshold_sliders:
type, printed, num, max, default = threshold_sliders[attr]
if type in types:
name = getattr(dlg, f"label_{tag}_threshs_{num}_name_{type}", None)
hs = getattr(dlg, f"hs_{tag}_threshs_{num}_{type}", None)
label = getattr(dlg, f"label_{tag}_threshs_{num}_{type}", None)
slider = SliderWidget(type, attr, name, printed, hs, label, max, default)
setattr(parent, f"slider_{attr}", slider)
for type in types:
w_image = getattr(dlg, f"w_{tag}_threshs_image_{type}", None)
w_viewer = getattr(dlg, f"w_{tag}_image_viewer_{type}", None)
w_image.set_image_viewer(w_viewer)
def setup_thresholds_color_variance(parent):
"""Sets color variance thresholds apps on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
for slider in [parent.slider_noise, parent.slider_detail]:
slider.setup(callback=parent.on_changed_slider)
def setup_thresholds_fg_masks(parent):
"""Sets up the default thresholds on foreground masks on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
for slider in [parent.slider_blur, parent.slider_closing, parent.slider_thresh]:
slider.setup(callback=parent.on_changed_slider)
def update_data_from_flags(
parent,
flags,
dropdowns=None,
values=None,
checkboxes=None,
labels=None,
prefix=None,
):
"""Updates UI elements from the flags on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
flags (dict[str, _]): Flags and their corresponding values.
dropdowns (list[QtWidgets.QComboBox], optional): Dropdowns in the tab.
values (dict[QtWidgets.QLineEdit, _], optional): Map from UI elements to values.
checkboxes (list[QtWidgets.QCheckBox], optional): Checkboxes in the tab.
labels (list[QtWidgets.QLabel], optional): Labels in the tab.
prefix (str, optional): Prefix to append to values in the population of tab values.
"""
if not dropdowns:
dropdowns = []
if not values:
values = []
if not checkboxes:
checkboxes = []
if not labels:
labels = []
flagfile = parent.flagfile_basename
if not prefix:
prefix = f"{parent.parent.path_project}/"
for key, dd in dropdowns:
error = dep_util.update_qt_dropdown_from_flags(flags, key, prefix, dd)
if error:
parent.log_reader.log_warning(f"{flagfile}: {error}")
for key, val in values:
dep_util.update_qt_lineedit_from_flags(flags, key, prefix, val)
for key, cb in checkboxes:
error = dep_util.update_qt_checkbox_from_flags(flags, key, prefix, cb)
if error:
parent.log_reader.log_warning(f"{flagfile}: {error}")
for key, label in labels:
dep_util.update_qt_label_from_flags(flags, key, prefix, label)
def get_notation(parent, attr):
"""Gets standard format for attribute on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
attr (str): Name of the attribute.
Returns:
str: Format string corresponding to the display notation.
"""
if attr in ["noise", "detail", "thresh"]:
notation = "{:.3e}"
elif attr in ["blur", "closing"]:
notation = "{:d}"
else:
parent.log_reader.log_error(f"Invalid slider attr: {attr}")
return notation
def on_changed_slider(parent, slider, value):
"""Callback event handler for changes to a slider UI element on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
slider (QtWidgets.QSlider): Slider UI element.
value (int/float): Value of the slider element.
"""
type = slider.type
attr = slider.attr
notation = get_notation(parent, attr)
if notation == "{:d}":
value = int(value)
slider.set_label(value, notation)
tag = parent.tag
w_image = getattr(parent.dlg, f"w_{tag}_threshs_image_{type}", None)
if w_image.update_thresholds(**{attr: value}):
# Update thresholds in flagfile
parent.update_thresholds(type)
def initialize_farm_groupbox(parent):
"""Sets up the farm render box for the project path, i.e. AWS is displayed if
rendering on an S3 project path and LAN if on a SMB drive.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
tag = parent.tag
dlg = parent.dlg
gb_farm = getattr(dlg, f"gb_{tag}_farm", None)
grid_s3 = getattr(dlg, f"w_{tag}_farm_s3", None)
grid_lan = getattr(dlg, f"w_{tag}_farm_lan", None)
parent.is_aws = parent.parent.is_aws
parent.is_lan = parent.parent.is_lan
if not parent.is_aws and not parent.is_lan:
gb_farm.hide()
elif parent.is_aws:
grid_lan.hide()
elif parent.is_lan:
grid_s3.hide()
parent.ec2_instance_types_cpu = []
parent.ec2_instance_types_gpu = []
if parent.is_aws:
# Get list of EC2 instances
client = parent.parent.aws_util.session.client("ec2")
ts = client._service_model.shape_for("InstanceType").enum
ts = [t for t in ts if not t.startswith(config.EC2_UNSUPPORTED_TYPES)]
parent.ec2_instance_types_cpu = [t for t in ts if t.startswith("c")]
parent.ec2_instance_types_gpu = [t for t in ts if t.startswith(("p", "g"))]
# Check if flagfile has farm attributes
flagfile_fn = os.path.join(parent.path_flags, parent.flagfile_basename)
flags = get_flags_from_flagfile(flagfile_fn)
parent.is_farm = False
for farm_attr in ["master", "workers", "cloud"]:
if flags[farm_attr] != "":
parent.is_farm = True
break
call_force_refreshing(parent, gb_farm.setChecked, parent.is_farm)
def show_resources(parent):
"""Displays resources used in the container.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
str: Resources (memory and CPU) being used.
"""
return run_command("top -b -n 1")
def show_aws_resources(parent):
"""Displays resources used across the AWS cluster.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
src: Resources (memory and CPU) being used in the farm.
"""
return "\n".join(parent.parent.aws_util.ec2_get_running_instances())
def get_aws_workers():
"""Get names of the instances in the AWS farm.
Returns:
list[str]: Instances IDs of EC2 instances in the farm.
"""
with open(config.DOCKER_AWS_WORKERS) as f:
lines = f.readlines()
return lines
def set_aws_workers(workers):
"""Sets names of the instances in the AWS farm.
Args:
workers (list[str]): Instance IDs of EC2 instances in the farm.
"""
with open(config.DOCKER_AWS_WORKERS, "w") as f:
f.writelines([worker.id for worker in workers])
def popup_ec2_dashboard_url(parent):
"""Displays a link to the EC2 dashboard in a popup on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
region = parent.parent.aws_util.region_name
prefix = f"{region}." if region else ""
url = f"https://{prefix}console.aws.amazon.com/ec2#Instances"
dep_util.popup_message(parent.parent, url, "EC2 Dashboard")
def popup_logs_locations(parent):
"""Displays the path to local logs in a popup on the specified tab.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
"""
logs = [parent.log_reader.log_file]
logs_workers = glob.iglob(f"{parent.path_logs}/Worker-*", recursive=False)
for log in logs_workers:
ts_log = datetime.datetime.fromtimestamp(os.path.getmtime(log))
if ts_log > parent.parent.ts_start:
logs.append(log)
project = parent.parent.path_project
logs = [dep_util.remove_prefix(l, f"{project}/") for l in logs]
dep_util.popup_message(parent.parent, "\n".join(logs), "Logs")
def run_process_aws(parent, gb, p_id=None):
"""Runs the process to create a cluster on AWS and perform the render job.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
flags = {}
flags["key_dir"] = os.path.dirname(parent.path_aws_key_fn)
flags["key_name"] = os.path.splitext(os.path.basename(parent.path_aws_key_fn))[0]
flags["csv_path"] = parent.path_aws_credentials
flags["ec2_file"] = parent.path_aws_ip_file
spin_num_workers = getattr(parent.dlg, f"spin_{parent.tag}_farm_num_workers", None)
flags["cluster_size"] = int(spin_num_workers.value())
flags["region"] = parent.parent.aws_util.region_name
dd_ec2 = getattr(parent.dlg, f"dd_{parent.tag}_farm_ec2", None)
flags["instance_type"] = dd_ec2.currentText()
flags["tag"] = parent.tag
# Overwrite flag file
app_name = parent.app_aws_create
flagfile_fn = os.path.join(parent.path_flags, parent.app_name_to_flagfile[app_name])
dep_util.write_flagfile(flagfile_fn, flags)
if not p_id:
p_id = "run_aws_create"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def on_download_meshes(parent, gb):
"""Downloads meshes from S3. This is a no-op if not an S3 project.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
if not parent.parent.is_aws:
return
subdir = image_type_paths["video_bin"]
flags = {}
flags["csv_path"] = parent.path_aws_credentials
flags["local_dir"] = os.path.join(config.DOCKER_INPUT_ROOT, subdir)
flags["s3_dir"] = os.path.join(parent.parent.ui_flags.project_root, subdir)
flags["verbose"] = parent.parent.ui_flags.verbose
flags["watch"] = True # NOTE: watchdog sometimes gets stale file handles in Windows
# Overwrite flag file
app_name = parent.app_aws_download_meshes
flagfile_fn = os.path.join(parent.path_flags, parent.app_name_to_flagfile[app_name])
dep_util.write_flagfile(flagfile_fn, flags)
p_id = "download_meshes"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def on_terminate_cluster(parent, gb):
"""Terminates a running AWS cluster. This is a no-op if no cluster is up.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
gb (QtWidgets.QGroupBox): Group box for the tab.
"""
flags = {}
flags["key_dir"] = os.path.dirname(parent.path_aws_key_fn)
flags["key_name"] = os.path.splitext(os.path.basename(parent.path_aws_key_fn))[0]
flags["csv_path"] = parent.path_aws_credentials
flags["ec2_file"] = parent.path_aws_ip_file
flags["region"] = parent.parent.aws_util.region_name
# Overwrite flag file
flagfile_fn = os.path.join(
parent.path_flags, parent.app_name_to_flagfile[parent.app_aws_clean]
)
dep_util.write_flagfile(flagfile_fn, flags)
app_name = parent.app_aws_clean
p_id = "terminate_cluster"
run_process(parent, gb, app_name, flagfile_fn, p_id)
def get_workers(parent):
"""Finds workers in a LAN farm.
Args:
parent (App(QDialog)): Object corresponding to the parent UI element.
Returns:
list[str]: IPs of workers in the local farm.
"""
if parent.parent.ui_flags.master == config.LOCALHOST:
return []
else:
return parent.lan.scan()
def call_force_refreshing(parent, fun, *args):
already_refreshing = parent.is_refreshing_data
if not already_refreshing:
parent.is_refreshing_data = True
fun(*args)
if not already_refreshing:
parent.is_refreshing_data = False
|
[
"dep_util.preview_file",
"dep_util.write_flagfile",
"os.path.isfile",
"dep_util.remove_prefix",
"dep_util.get_first_file_path",
"dep_util.get_timestamp",
"glob.iglob",
"os.path.join",
"sys.path.append",
"os.path.abspath",
"dep_util.update_qt_dropdown",
"scripts.util.system_util.run_command",
"os.path.dirname",
"dep_util.update_qt_dropdown_from_flags",
"dep_util.get_files_ext",
"os.path.exists",
"scripts.util.system_util.get_flags_from_flagfile",
"dep_util.set_tab_enabled",
"shutil.copyfile",
"dep_util.popup_message",
"glog_check.yellow",
"dep_util.populate_dropdown",
"scripts.aws.create.get_render_pid",
"PyQt5.QtCore.QTimer",
"slider_image_thresholds.SliderWidget",
"scripts.render.network.LAN",
"os.path.basename",
"scripts.aws.create.get_staging_info",
"os.path.realpath",
"scripts.aws.util.AWSUtil",
"scripts.aws.create.has_render_flag",
"dep_util.switch_objects_prefix",
"dep_util.get_frame_list",
"os.makedirs",
"scripts.util.system_util.get_flags",
"dep_util.get_level_image_path",
"os.path.isdir",
"log_reader.LogReader",
"dep_util.switch_tab",
"scripts.aws.create.run_ssh_command",
"dep_util.setup_file_explorer",
"dep_util.update_qt_lineedit_from_flags",
"os.path.getmtime",
"shutil.move",
"dep_util.update_qt_label_from_flags",
"dep_util.update_qt_checkbox_from_flags"
] |
[((852, 880), 'os.path.dirname', 'os.path.dirname', (['dir_scripts'], {}), '(dir_scripts)\n', (867, 880), False, 'import os\n'), ((881, 906), 'sys.path.append', 'sys.path.append', (['dir_root'], {}), '(dir_root)\n', (896, 906), False, 'import sys\n'), ((1679, 1715), 'os.path.join', 'os.path.join', (['scripts_dir', 'os.pardir'], {}), '(scripts_dir, os.pardir)\n', (1691, 1715), False, 'import os\n'), ((1730, 1767), 'os.path.join', 'os.path.join', (['dep_dir', '"""build"""', '"""bin"""'], {}), "(dep_dir, 'build', 'bin')\n", (1742, 1767), False, 'import os\n'), ((1782, 1810), 'os.path.join', 'os.path.join', (['dep_dir', '"""res"""'], {}), "(dep_dir, 'res')\n", (1794, 1810), False, 'import os\n'), ((1827, 1861), 'os.path.join', 'os.path.join', (['dep_res_dir', '"""flags"""'], {}), "(dep_res_dir, 'flags')\n", (1839, 1861), False, 'import os\n'), ((1862, 1903), 'os.makedirs', 'os.makedirs', (['dep_flags_dir'], {'exist_ok': '(True)'}), '(dep_flags_dir, exist_ok=True)\n', (1873, 1903), False, 'import os\n'), ((1919, 1950), 'os.path.join', 'os.path.join', (['dep_dir', '"""source"""'], {}), "(dep_dir, 'source')\n", (1931, 1950), False, 'import os\n'), ((1967, 2012), 'os.path.join', 'os.path.join', (['source_root', '"""depth_estimation"""'], {}), "(source_root, 'depth_estimation')\n", (1979, 2012), False, 'import os\n'), ((2026, 2061), 'os.path.join', 'os.path.join', (['source_root', '"""render"""'], {}), "(source_root, 'render')\n", (2038, 2061), False, 'import os\n'), ((2079, 2114), 'os.path.join', 'os.path.join', (['scripts_dir', '"""render"""'], {}), "(scripts_dir, 'render')\n", (2091, 2114), False, 'import os\n'), ((923, 955), 'os.path.join', 'os.path.join', (['dir_scripts', '"""aws"""'], {}), "(dir_scripts, 'aws')\n", (935, 955), False, 'import os\n'), ((973, 1008), 'os.path.join', 'os.path.join', (['dir_scripts', '"""render"""'], {}), "(dir_scripts, 'render')\n", (985, 1008), False, 'import os\n'), ((1026, 1059), 'os.path.join', 'os.path.join', (['dir_scripts', '"""util"""'], {}), "(dir_scripts, 'util')\n", (1038, 1059), False, 'import os\n'), ((1574, 1600), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1590, 1600), False, 'import os\n'), ((1632, 1667), 'os.path.join', 'os.path.join', (['script_dir', 'os.pardir'], {}), '(script_dir, os.pardir)\n', (1644, 1667), False, 'import os\n'), ((4717, 4739), 'scripts.render.network.LAN', 'LAN', (['f"""{ip_begin}.255"""'], {}), "(f'{ip_begin}.255')\n", (4720, 4739), False, 'from scripts.render.network import LAN\n'), ((7374, 7415), 'dep_util.get_timestamp', 'dep_util.get_timestamp', (['"""%Y%m%d%H%M%S.%f"""'], {}), "('%Y%m%d%H%M%S.%f')\n", (7396, 7415), False, 'import dep_util\n'), ((7468, 7514), 'os.path.join', 'os.path.join', (['parent.path_logs', 'f"""{name}_{ts}"""'], {}), "(parent.path_logs, f'{name}_{ts}')\n", (7480, 7514), False, 'import os\n'), ((7532, 7573), 'log_reader.LogReader', 'LogReader', (['qt_text_edit', 'parent', 'log_file'], {}), '(qt_text_edit, parent, log_file)\n', (7541, 7573), False, 'from log_reader import LogReader\n'), ((8530, 8580), 'dep_util.setup_file_explorer', 'dep_util.setup_file_explorer', (['parent.fs_tree', 'path'], {}), '(parent.fs_tree, path)\n', (8558, 8580), False, 'import dep_util\n'), ((8989, 9065), 'dep_util.preview_file', 'dep_util.preview_file', (['parent.fs_model', 'parent.fs_tree', 'frame', 'label', 'prefix'], {}), '(parent.fs_model, parent.fs_tree, frame, label, prefix)\n', (9010, 9065), False, 'import dep_util\n'), ((9788, 9839), 'dep_util.switch_objects_prefix', 'dep_util.switch_objects_prefix', (['gb', 'prefixes', 'state'], {}), '(gb, prefixes, state)\n', (9818, 9839), False, 'import dep_util\n'), ((11716, 11805), 'scripts.aws.util.AWSUtil', 'AWSUtil', (['parent.path_aws_credentials'], {'region_name': 'parent.parent.aws_util.region_name'}), '(parent.path_aws_credentials, region_name=parent.parent.aws_util.\n region_name)\n', (11723, 11805), False, 'from scripts.aws.util import AWSUtil\n'), ((11835, 11908), 'scripts.aws.create.get_staging_info', 'get_staging_info', (['aws_util', 'parent.path_aws_ip_file'], {'start_instance': '(False)'}), '(aws_util, parent.path_aws_ip_file, start_instance=False)\n', (11851, 11908), False, 'from scripts.aws.create import get_staging_info, get_render_pid, has_render_flag, run_ssh_command\n'), ((12122, 12170), 'scripts.aws.create.has_render_flag', 'has_render_flag', (['key_fn', 'ip_staging', 'flag', 'value'], {}), '(key_fn, ip_staging, flag, value)\n', (12137, 12170), False, 'from scripts.aws.create import get_staging_info, get_render_pid, has_render_flag, run_ssh_command\n'), ((15350, 15399), 'dep_util.populate_dropdown', 'dep_util.populate_dropdown', (['dd', 'ps', 'f"""{project}/"""'], {}), "(dd, ps, f'{project}/')\n", (15376, 15399), False, 'import dep_util\n'), ((15404, 15471), 'dep_util.update_qt_dropdown', 'dep_util.update_qt_dropdown', (['dd', 'dd_prev_text'], {'add_if_missing': '(False)'}), '(dd, dd_prev_text, add_if_missing=False)\n', (15431, 15471), False, 'import dep_util\n'), ((18231, 18267), 'scripts.util.system_util.get_flags_from_flagfile', 'get_flags_from_flagfile', (['flagfile_fn'], {}), '(flagfile_fn)\n', (18254, 18267), False, 'from scripts.util.system_util import get_flags, get_flags_from_flagfile, image_type_paths, run_command\n'), ((19309, 19338), 'os.path.basename', 'os.path.basename', (['flagfile_fn'], {}), '(flagfile_fn)\n', (19325, 19338), False, 'import os\n'), ((24064, 24121), 'os.path.join', 'os.path.join', (['parent.path_flags', 'parent.flagfile_basename'], {}), '(parent.path_flags, parent.flagfile_basename)\n', (24076, 24121), False, 'import os\n'), ((24134, 24170), 'scripts.util.system_util.get_flags_from_flagfile', 'get_flags_from_flagfile', (['flagfile_fn'], {}), '(flagfile_fn)\n', (24157, 24170), False, 'from scripts.util.system_util import get_flags, get_flags_from_flagfile, image_type_paths, run_command\n'), ((25715, 25765), 'dep_util.write_flagfile', 'dep_util.write_flagfile', (['flagfile_fn', 'sorted_flags'], {}), '(flagfile_fn, sorted_flags)\n', (25738, 25765), False, 'import dep_util\n'), ((26105, 26167), 'dep_util.get_files_ext', 'dep_util.get_files_ext', (['parent.path_rigs', '"""json"""', '"""calibrated"""'], {}), "(parent.path_rigs, 'json', 'calibrated')\n", (26127, 26167), False, 'import dep_util\n'), ((37632, 37688), 'dep_util.get_level_image_path', 'dep_util.get_level_image_path', (['path_color', 'cam_id', 'frame'], {}), '(path_color, cam_id, frame)\n', (37661, 37688), False, 'import dep_util\n'), ((38202, 38251), 'dep_util.remove_prefix', 'dep_util.remove_prefix', (['image_path', 'f"""{project}/"""'], {}), "(image_path, f'{project}/')\n", (38224, 38251), False, 'import dep_util\n'), ((39147, 39211), 'dep_util.get_level_image_path', 'dep_util.get_level_image_path', (['paths_color[0]', 'cam_id', 'frames[0]'], {}), '(paths_color[0], cam_id, frames[0])\n', (39176, 39211), False, 'import dep_util\n'), ((39343, 39407), 'dep_util.get_level_image_path', 'dep_util.get_level_image_path', (['paths_color[1]', 'cam_id', 'frames[1]'], {}), '(paths_color[1], cam_id, frames[1])\n', (39372, 39407), False, 'import dep_util\n'), ((40009, 40061), 'dep_util.remove_prefix', 'dep_util.remove_prefix', (['bg_image_path', 'f"""{project}/"""'], {}), "(bg_image_path, f'{project}/')\n", (40031, 40061), False, 'import dep_util\n'), ((40074, 40126), 'dep_util.remove_prefix', 'dep_util.remove_prefix', (['fg_image_path', 'f"""{project}/"""'], {}), "(fg_image_path, f'{project}/')\n", (40096, 40126), False, 'import dep_util\n'), ((41634, 41685), 'dep_util.switch_tab', 'dep_util.switch_tab', (['tab_widget', 'f"""_threshs_{type}"""'], {}), "(tab_widget, f'_threshs_{type}')\n", (41653, 41685), False, 'import dep_util\n'), ((41921, 41949), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', (['parent.parent'], {}), '(parent.parent)\n', (41934, 41949), False, 'from PyQt5 import QtCore, QtWidgets\n'), ((44426, 44465), 'dep_util.switch_tab', 'dep_util.switch_tab', (['tab_widget', '"""_log"""'], {}), "(tab_widget, '_log')\n", (44445, 44465), False, 'import dep_util\n'), ((45535, 45562), 'os.path.join', 'os.path.join', (['dir', 'app_name'], {}), '(dir, app_name)\n', (45547, 45562), False, 'import os\n'), ((54587, 54644), 'os.path.join', 'os.path.join', (['parent.path_flags', 'parent.flagfile_basename'], {}), '(parent.path_flags, parent.flagfile_basename)\n', (54599, 54644), False, 'import os\n'), ((54657, 54693), 'scripts.util.system_util.get_flags_from_flagfile', 'get_flags_from_flagfile', (['flagfile_fn'], {}), '(flagfile_fn)\n', (54680, 54693), False, 'from scripts.util.system_util import get_flags, get_flags_from_flagfile, image_type_paths, run_command\n'), ((55184, 55210), 'scripts.util.system_util.run_command', 'run_command', (['"""top -b -n 1"""'], {}), "('top -b -n 1')\n", (55195, 55210), False, 'from scripts.util.system_util import get_flags, get_flags_from_flagfile, image_type_paths, run_command\n'), ((56461, 56520), 'dep_util.popup_message', 'dep_util.popup_message', (['parent.parent', 'url', '"""EC2 Dashboard"""'], {}), "(parent.parent, url, 'EC2 Dashboard')\n", (56483, 56520), False, 'import dep_util\n'), ((56785, 56844), 'glob.iglob', 'glob.iglob', (['f"""{parent.path_logs}/Worker-*"""'], {'recursive': '(False)'}), "(f'{parent.path_logs}/Worker-*', recursive=False)\n", (56795, 56844), False, 'import glob\n'), ((57513, 57552), 'os.path.dirname', 'os.path.dirname', (['parent.path_aws_key_fn'], {}), '(parent.path_aws_key_fn)\n', (57528, 57552), False, 'import os\n'), ((58172, 58242), 'os.path.join', 'os.path.join', (['parent.path_flags', 'parent.app_name_to_flagfile[app_name]'], {}), '(parent.path_flags, parent.app_name_to_flagfile[app_name])\n', (58184, 58242), False, 'import os\n'), ((58247, 58290), 'dep_util.write_flagfile', 'dep_util.write_flagfile', (['flagfile_fn', 'flags'], {}), '(flagfile_fn, flags)\n', (58270, 58290), False, 'import dep_util\n'), ((58845, 58891), 'os.path.join', 'os.path.join', (['config.DOCKER_INPUT_ROOT', 'subdir'], {}), '(config.DOCKER_INPUT_ROOT, subdir)\n', (58857, 58891), False, 'import os\n'), ((58914, 58971), 'os.path.join', 'os.path.join', (['parent.parent.ui_flags.project_root', 'subdir'], {}), '(parent.parent.ui_flags.project_root, subdir)\n', (58926, 58971), False, 'import os\n'), ((59206, 59276), 'os.path.join', 'os.path.join', (['parent.path_flags', 'parent.app_name_to_flagfile[app_name]'], {}), '(parent.path_flags, parent.app_name_to_flagfile[app_name])\n', (59218, 59276), False, 'import os\n'), ((59281, 59324), 'dep_util.write_flagfile', 'dep_util.write_flagfile', (['flagfile_fn', 'flags'], {}), '(flagfile_fn, flags)\n', (59304, 59324), False, 'import dep_util\n'), ((59722, 59761), 'os.path.dirname', 'os.path.dirname', (['parent.path_aws_key_fn'], {}), '(parent.path_aws_key_fn)\n', (59737, 59761), False, 'import os\n'), ((60050, 60137), 'os.path.join', 'os.path.join', (['parent.path_flags', 'parent.app_name_to_flagfile[parent.app_aws_clean]'], {}), '(parent.path_flags, parent.app_name_to_flagfile[parent.\n app_aws_clean])\n', (60062, 60137), False, 'import os\n'), ((60151, 60194), 'dep_util.write_flagfile', 'dep_util.write_flagfile', (['flagfile_fn', 'flags'], {}), '(flagfile_fn, flags)\n', (60174, 60194), False, 'import dep_util\n'), ((813, 838), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (828, 838), False, 'import os\n'), ((3746, 3834), 'os.path.join', 'os.path.join', (['parent.path_flags', 'parent.app_name_to_flagfile[parent.app_aws_create]'], {}), '(parent.path_flags, parent.app_name_to_flagfile[parent.\n app_aws_create])\n', (3758, 3834), False, 'import os\n'), ((3863, 3894), 'os.path.exists', 'os.path.exists', (['create_flagfile'], {}), '(create_flagfile)\n', (3877, 3894), False, 'import os\n'), ((5119, 5146), 'os.path.join', 'os.path.join', (['dir', 'app_name'], {}), '(dir, app_name)\n', (5131, 5146), False, 'import os\n'), ((10843, 10932), 'scripts.aws.util.AWSUtil', 'AWSUtil', (['parent.path_aws_credentials'], {'region_name': 'parent.parent.aws_util.region_name'}), '(parent.path_aws_credentials, region_name=parent.parent.aws_util.\n region_name)\n', (10850, 10932), False, 'from scripts.aws.util import AWSUtil\n'), ((10974, 11025), 'scripts.aws.create.get_staging_info', 'get_staging_info', (['aws_util', 'parent.path_aws_ip_file'], {}), '(aws_util, parent.path_aws_ip_file)\n', (10990, 11025), False, 'from scripts.aws.create import get_staging_info, get_render_pid, has_render_flag, run_ssh_command\n'), ((12653, 12699), 'os.path.join', 'os.path.join', (['config.DOCKER_INPUT_ROOT', 'subdir'], {}), '(config.DOCKER_INPUT_ROOT, subdir)\n', (12665, 12699), False, 'import os\n'), ((12717, 12774), 'os.path.join', 'os.path.join', (['parent.parent.ui_flags.project_root', 'subdir'], {}), '(parent.parent.ui_flags.project_root, subdir)\n', (12729, 12774), False, 'import os\n'), ((13018, 13067), 'os.path.join', 'os.path.join', (['scripts_dir', '"""util"""', '"""tar_frame.py"""'], {}), "(scripts_dir, 'util', 'tar_frame.py')\n", (13030, 13067), False, 'import os\n'), ((17326, 17353), 'os.path.isfile', 'os.path.isfile', (['flagfile_fn'], {}), '(flagfile_fn)\n', (17340, 17353), False, 'import os\n'), ((18531, 18581), 'dep_util.write_flagfile', 'dep_util.write_flagfile', (['flagfile_fn', 'sorted_flags'], {}), '(flagfile_fn, sorted_flags)\n', (18554, 18581), False, 'import dep_util\n'), ((19991, 20062), 'dep_util.set_tab_enabled', 'dep_util.set_tab_enabled', (['parent.dlg.w_steps', 'parent.tag'], {'enabled': '(False)'}), '(parent.dlg.w_steps, parent.tag, enabled=False)\n', (20015, 20062), False, 'import dep_util\n'), ((20772, 20799), 'os.path.isfile', 'os.path.isfile', (['flagfile_fn'], {}), '(flagfile_fn)\n', (20786, 20799), False, 'import os\n'), ((22121, 22209), 'os.path.join', 'os.path.join', (['parent.path_flags', 'parent.app_name_to_flagfile[parent.app_aws_create]'], {}), '(parent.path_flags, parent.app_name_to_flagfile[parent.\n app_aws_create])\n', (22133, 22209), False, 'import os\n'), ((24215, 24237), 'os.path.isfile', 'os.path.isfile', (['source'], {}), '(source)\n', (24229, 24237), False, 'import os\n'), ((42431, 42462), 'dep_util.get_first_file_path', 'dep_util.get_first_file_path', (['d'], {}), '(d)\n', (42459, 42462), False, 'import dep_util\n'), ((45574, 45598), 'os.path.isfile', 'os.path.isfile', (['app_path'], {}), '(app_path)\n', (45588, 45598), False, 'import os\n'), ((51442, 51504), 'dep_util.update_qt_dropdown_from_flags', 'dep_util.update_qt_dropdown_from_flags', (['flags', 'key', 'prefix', 'dd'], {}), '(flags, key, prefix, dd)\n', (51480, 51504), False, 'import dep_util\n'), ((51626, 51689), 'dep_util.update_qt_lineedit_from_flags', 'dep_util.update_qt_lineedit_from_flags', (['flags', 'key', 'prefix', 'val'], {}), '(flags, key, prefix, val)\n', (51664, 51689), False, 'import dep_util\n'), ((51738, 51800), 'dep_util.update_qt_checkbox_from_flags', 'dep_util.update_qt_checkbox_from_flags', (['flags', 'key', 'prefix', 'cb'], {}), '(flags, key, prefix, cb)\n', (51776, 51800), False, 'import dep_util\n'), ((51924, 51986), 'dep_util.update_qt_label_from_flags', 'dep_util.update_qt_label_from_flags', (['flags', 'key', 'prefix', 'label'], {}), '(flags, key, prefix, label)\n', (51959, 51986), False, 'import dep_util\n'), ((57072, 57112), 'dep_util.remove_prefix', 'dep_util.remove_prefix', (['l', 'f"""{project}/"""'], {}), "(l, f'{project}/')\n", (57094, 57112), False, 'import dep_util\n'), ((3923, 3963), 'scripts.util.system_util.get_flags_from_flagfile', 'get_flags_from_flagfile', (['create_flagfile'], {}), '(create_flagfile)\n', (3946, 3963), False, 'from scripts.util.system_util import get_flags, get_flags_from_flagfile, image_type_paths, run_command\n'), ((10226, 10284), 'dep_util.set_tab_enabled', 'dep_util.set_tab_enabled', (['parent.dlg.w_steps', 's.tag', 'state'], {}), '(parent.dlg.w_steps, s.tag, state)\n', (10250, 10284), False, 'import dep_util\n'), ((11074, 11124), 'scripts.aws.create.get_render_pid', 'get_render_pid', (['parent.path_aws_key_fn', 'ip_staging'], {}), '(parent.path_aws_key_fn, ip_staging)\n', (11088, 11124), False, 'from scripts.aws.create import get_staging_info, get_render_pid, has_render_flag, run_ssh_command\n'), ((11655, 11677), 'os.path.isfile', 'os.path.isfile', (['key_fn'], {}), '(key_fn)\n', (11669, 11677), False, 'import os\n'), ((13120, 13152), 'dep_util.get_frame_list', 'dep_util.get_frame_list', (['local_i'], {}), '(local_i)\n', (13143, 13152), False, 'import dep_util\n'), ((22355, 22381), 'os.path.isfile', 'os.path.isfile', (['ff_dst_abs'], {}), '(ff_dst_abs)\n', (22369, 22381), False, 'import os\n'), ((22408, 22447), 'os.path.join', 'os.path.join', (['dep_flags_dir', 'ff_src_rel'], {}), '(dep_flags_dir, ff_src_rel)\n', (22420, 22447), False, 'import os\n'), ((22528, 22567), 'shutil.copyfile', 'shutil.copyfile', (['ff_src_abs', 'ff_dst_abs'], {}), '(ff_src_abs, ff_dst_abs)\n', (22543, 22567), False, 'import shutil\n'), ((24266, 24283), 'scripts.util.system_util.get_flags', 'get_flags', (['source'], {}), '(source)\n', (24275, 24283), False, 'from scripts.util.system_util import get_flags, get_flags_from_flagfile, image_type_paths, run_command\n'), ((43256, 43280), 'dep_util.get_timestamp', 'dep_util.get_timestamp', ([], {}), '()\n', (43278, 43280), False, 'import dep_util\n'), ((49314, 49378), 'slider_image_thresholds.SliderWidget', 'SliderWidget', (['type', 'attr', 'name', 'printed', 'hs', 'label', 'max', 'default'], {}), '(type, attr, name, printed, hs, label, max, default)\n', (49326, 49378), False, 'from slider_image_thresholds import SliderWidget\n'), ((56923, 56944), 'os.path.getmtime', 'os.path.getmtime', (['log'], {}), '(log)\n', (56939, 56944), False, 'import os\n'), ((57594, 57634), 'os.path.basename', 'os.path.basename', (['parent.path_aws_key_fn'], {}), '(parent.path_aws_key_fn)\n', (57610, 57634), False, 'import os\n'), ((59803, 59843), 'os.path.basename', 'os.path.basename', (['parent.path_aws_key_fn'], {}), '(parent.path_aws_key_fn)\n', (59819, 59843), False, 'import os\n'), ((11180, 11256), 'scripts.aws.create.run_ssh_command', 'run_ssh_command', (['parent.path_aws_key_fn', 'ip_staging', 'f"""kill -9 {render_pid}"""'], {}), "(parent.path_aws_key_fn, ip_staging, f'kill -9 {render_pid}')\n", (11195, 11256), False, 'from scripts.aws.create import get_staging_info, get_render_pid, has_render_flag, run_ssh_command\n'), ((12847, 12880), 'os.path.join', 'os.path.join', (['local', 'f"""level_{l}"""'], {}), "(local, f'level_{l}')\n", (12859, 12880), False, 'import os\n'), ((22472, 22499), 'os.path.dirname', 'os.path.dirname', (['ff_dst_abs'], {}), '(ff_dst_abs)\n', (22487, 22499), False, 'import os\n'), ((23008, 23050), 'os.path.join', 'os.path.join', (['depth_est_src', '"""DerpCLI.cpp"""'], {}), "(depth_est_src, 'DerpCLI.cpp')\n", (23020, 23050), False, 'import os\n'), ((23362, 23402), 'os.path.join', 'os.path.join', (['render_scripts', '"""setup.py"""'], {}), "(render_scripts, 'setup.py')\n", (23374, 23402), False, 'import os\n'), ((23444, 23502), 'os.path.join', 'os.path.join', (['depth_est_src', '"""TemporalBilateralFilter.cpp"""'], {}), "(depth_est_src, 'TemporalBilateralFilter.cpp')\n", (23456, 23502), False, 'import os\n'), ((23575, 23630), 'os.path.join', 'os.path.join', (['render_src', '"""GenerateForegroundMasks.cpp"""'], {}), "(render_src, 'GenerateForegroundMasks.cpp')\n", (23587, 23630), False, 'import os\n'), ((43581, 43602), 'shutil.move', 'shutil.move', (['d', 'd_dst'], {}), '(d, d_dst)\n', (43592, 43602), False, 'import shutil\n'), ((43619, 43648), 'os.makedirs', 'os.makedirs', (['d'], {'exist_ok': '(True)'}), '(d, exist_ok=True)\n', (43630, 43648), False, 'import os\n'), ((23873, 23923), 'os.path.join', 'os.path.join', (['render_src', '"""SimpleMeshRenderer.cpp"""'], {}), "(render_src, 'SimpleMeshRenderer.cpp')\n", (23885, 23923), False, 'import os\n'), ((23952, 23999), 'os.path.join', 'os.path.join', (['render_src', '"""ConvertToBinary.cpp"""'], {}), "(render_src, 'ConvertToBinary.cpp')\n", (23964, 23999), False, 'import os\n'), ((43345, 43361), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (43358, 43361), False, 'import os\n'), ((13243, 13302), 'glog_check.yellow', 'glog.yellow', (['f"""No frames found for S3 syncing in {local_i}"""'], {}), "(f'No frames found for S3 syncing in {local_i}')\n", (13254, 13302), True, 'import glog_check as glog\n')]
|
from flask import Flask
app = Flask(__name__)
app.secret_key = 'my secret key 123'
from .views import *
|
[
"flask.Flask"
] |
[((31, 46), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (36, 46), False, 'from flask import Flask\n')]
|
# This file is licensed under the terms of the MIT license.
# See the LICENSE file in the root of this repository for complete details.
"""Data structure for representing string diagrams"""
from collections import namedtuple
from copy import deepcopy
__all__ = [
'Ntext',
'Composition',
'NotComposable',
'NotCompatible',
'ncell',
'zcell',
'icell',
]
_Ntext = namedtuple('Ntext', [
'cells',
'dim',
'source',
'target',
'smap',
'tmap',
])
class Ntext(_Ntext):
"""Represents a string diagram.
Attributes:
cells (dict): indicates how cells connect to each other.
dim (int): the dimension.
source (Ntext): the source diagram.
target (Ntext): the target diagram.
smap (dict): indicates how the source maps to the cells.
tmap (dict): indicates how the target maps to the cells.
"""
__slots__ = ()
def adapt(self, n, perm):
"""Applies a permutation to the cells of the n-source.
This is useful to allow composition of ntexts when the target of one
and source of the other coincide only up to permutation.
Args:
n: the n-source is the source (of the source) n times.
perm: the permutation represented as a list of non repeated
indices.
"""
return adapt(self, n, perm)
def compatible(self, target):
"""This diagram can be a source for the given target.
The source and the target satisfy the globular conditions. This means
that the source of the source is the same as source of the target and
the target of the source is the same as the target of the target.
Args:
target: the target against which compatibility is checked.
Returns:
bool
"""
return compatible(self, target)
def composable(self, ntext, n):
"""This diagram can be n-composed with the given diagram."""
return composable(self, ntext, n, None)
class Composition:
"""Composition of diagrams"""
def __init__(self, ntext):
self._ntext = icell(ntext.source)
def ntext(self):
return deepcopy(self._ntext)
def compose(self, factor, n):
ntext, ok = compose(self._ntext, factor, n, None)
if not ok:
raise NotComposable('Target of self is not source of factor.')
self._ntext = ntext
class Error(Exception):
"""Base class for ntext exceptions."""
class NotComposable(Error):
"""Raised when trying to compose cells.
Composing cells requires target of the first cell
to be the same as the source of the second cell.
"""
class NotCompatible(Error):
"""Raised when trying to create a cell.
Creating a cell with a source and target requires the source and target to
satisfy the globular conditions. See `Ntext.compatible`.
"""
# source and target are maps.
NtextCell = namedtuple('NtextCell', ['source', 'target'])
_ref = namedtuple('_ref', ['index', 'is_new'])
def ncell(name, source, target):
if not compatible(source, target):
raise NotCompatible('Source and target are not compatible.')
return _ncell(name, source, target)
def _ncell(name, source, target):
def apply_offset(ct, o):
for m, p in ct.items():
ct[m] = [j+o[m] for j in p]
# Assumes name is an unused name.
source = deepcopy(source)
target = deepcopy(target)
dim = source.dim
ntext = dict()
smap = dict()
tmap = dict()
ncsource = dict()
nctarget = dict()
ntext[name] = [NtextCell(ncsource, nctarget)]
if dim > 0:
offset = dict()
for n, cells in source.cells.items():
ntext[n] = [deepcopy(c) for c in cells]
smap[n] = list(range(len(cells)))
ncsource[n] = smap[n][:]
for n, cells in target.cells.items():
if n not in ntext:
ntext[n] = []
off = len(ntext[n])
offset[n] = off
ntext[n].extend(deepcopy(c) for c in cells)
tmap[n] = list(range(off, len(cells)+off))
nctarget[n] = tmap[n][:]
for n, cells in ntext.items():
for i in range(offset[n], len(cells)):
for cell in cells[i]:
apply_offset(cell.source, offset)
apply_offset(cell.target, offset)
else:
sn = source.cells
ntext[sn] = 1
smap[sn] = [0]
ncsource[sn] = [0]
tn = target.cells
if tn in ntext:
ntext[tn] += 1
tmap[tn] = [1]
nctarget[tn] = [1]
else:
ntext[tn] = 1
tmap[tn] = [0]
nctarget[tn] = [0]
return Ntext(ntext, dim+1, source, target, smap, tmap)
def zcell(name):
return Ntext(name, 0, None, None, None, None)
def icell(ntext):
dim = ntext.dim + 1
if ntext.dim == 0:
cells = {ntext.cells: 1}
smap = {ntext.cells: [0]}
else:
cells = deepcopy(ntext.cells)
smap = dict()
for n, p in cells.items():
smap[n] = range(len(p))
source = deepcopy(ntext)
target = deepcopy(ntext)
tmap = deepcopy(smap)
return Ntext(cells, dim, source, target, smap, tmap)
def pre_stitch(dst, src, mapping):
for name, maps in src.items():
if isinstance(maps, int):
length = maps
cells = dst.get(name, 0)
clength = cells
else:
length = len(maps)
cells = dst.get(name) or []
clength = len(cells)
if not cells:
dst[name] = cells
for i in range(length):
ni = (name, i)
if ni not in mapping:
mapping[ni] = _ref(clength, True)
clength += 1
if isinstance(maps, int):
dst[name] = clength
else:
cells.extend(None for i in range(len(cells), clength))
def stitch(dst, src, mapping):
pre_stitch(dst, src, mapping)
# dst and src are ntext maps.
for name, maps in src.items():
if isinstance(maps, int):
continue
cells = dst[name]
for i, cell in enumerate(maps):
j, is_new = mapping[(name, i)]
if is_new:
assert cells[j] is None
source = dict()
target = dict()
for t, ct in [(source, cell.source), (target, cell.target)]:
for n, p in ct.items():
t[n] = [mapping[(n, k)].index for k in p]
cells[j] = NtextCell(source, target)
def compatible(source, target):
r = (source.source == target.source
and source.target == target.target)
if r:
assert source.dim == target.dim
return r
def compose(dst, src, dim, mapping):
mapping = dict() if mapping is None else mapping
c = composable(dst, src, dim, mapping)
if not c:
return None, c
ntext = dst.cells
stitch(ntext, src.cells, mapping)
if dim > 0: # dim 0 is vertical composition.
dim -= 1
r = [None]*2
for k, d, s, dm, sm in [
(0, dst.source, src.source, dst.smap, src.smap),
(1, dst.target, src.target, dst.tmap, src.tmap),
]:
cm = dict()
r[k], c = compose(d, s, dim, cm)
assert c
for n, p in sm:
if n not in dm:
dm[n] = []
dm[n].extend(
j for j, is_new in (cm[(n, i)] for i in p)
if is_new
)
source, target = r
else:
source = dst.source
target = deepcopy(src.target)
smap = dst.smap
tmap = dict()
for n, p in src.tmap.items():
tmap[n] = [mapping[(n, i)].index for i in p]
return Ntext(ntext, dst.dim, source, target, smap, tmap), True
def composable(dst, src, dim, mapping):
if dst.dim != src.dim:
return False
target, tmap = get_face(dst, dim, 1)
source, smap = get_face(src, dim, -1)
if mapping is not None:
for name, pos in smap.items():
t = tmap[name]
for i, k in enumerate(pos):
mapping[(name, k)] = _ref(t[i], False)
return source == target
def permute(ntext, mapping, perm):
# perm changes ntext and mapping in place by applying itself
# to one and its inverse to the other.
for name, q in perm.items():
cells = ntext[name]
ntext[name] = [cells[i] for i in q]
m = mapping[name]
mapping[name] = [m[i] for i in q]
def adapt(cell, dim, perm):
# Permute the dim-source. dim 0 is vertical composition.
if dim > 0:
dim -= 1
adapt(cell.source, dim, perm)
adapt(cell.target, dim, perm)
else:
permute(cell.source, cell.smap, perm)
def get_face(cell, dim, side):
if dim > 0:
dim -= 1
sc, scmap = get_face(cell.source, dim, side)
smap = dict()
for n, p in cell.smap.items():
smap[n] = [p[i] for i in scmap[n]]
return sc, smap
if side < 0:
return cell.source, cell.smap
return cell.target, cell.tmap
|
[
"copy.deepcopy",
"collections.namedtuple"
] |
[((390, 463), 'collections.namedtuple', 'namedtuple', (['"""Ntext"""', "['cells', 'dim', 'source', 'target', 'smap', 'tmap']"], {}), "('Ntext', ['cells', 'dim', 'source', 'target', 'smap', 'tmap'])\n", (400, 463), False, 'from collections import namedtuple\n'), ((2964, 3009), 'collections.namedtuple', 'namedtuple', (['"""NtextCell"""', "['source', 'target']"], {}), "('NtextCell', ['source', 'target'])\n", (2974, 3009), False, 'from collections import namedtuple\n'), ((3018, 3057), 'collections.namedtuple', 'namedtuple', (['"""_ref"""', "['index', 'is_new']"], {}), "('_ref', ['index', 'is_new'])\n", (3028, 3057), False, 'from collections import namedtuple\n'), ((3431, 3447), 'copy.deepcopy', 'deepcopy', (['source'], {}), '(source)\n', (3439, 3447), False, 'from copy import deepcopy\n'), ((3461, 3477), 'copy.deepcopy', 'deepcopy', (['target'], {}), '(target)\n', (3469, 3477), False, 'from copy import deepcopy\n'), ((5183, 5198), 'copy.deepcopy', 'deepcopy', (['ntext'], {}), '(ntext)\n', (5191, 5198), False, 'from copy import deepcopy\n'), ((5212, 5227), 'copy.deepcopy', 'deepcopy', (['ntext'], {}), '(ntext)\n', (5220, 5227), False, 'from copy import deepcopy\n'), ((5239, 5253), 'copy.deepcopy', 'deepcopy', (['smap'], {}), '(smap)\n', (5247, 5253), False, 'from copy import deepcopy\n'), ((2200, 2221), 'copy.deepcopy', 'deepcopy', (['self._ntext'], {}), '(self._ntext)\n', (2208, 2221), False, 'from copy import deepcopy\n'), ((5054, 5075), 'copy.deepcopy', 'deepcopy', (['ntext.cells'], {}), '(ntext.cells)\n', (5062, 5075), False, 'from copy import deepcopy\n'), ((7748, 7768), 'copy.deepcopy', 'deepcopy', (['src.target'], {}), '(src.target)\n', (7756, 7768), False, 'from copy import deepcopy\n'), ((3759, 3770), 'copy.deepcopy', 'deepcopy', (['c'], {}), '(c)\n', (3767, 3770), False, 'from copy import deepcopy\n'), ((4067, 4078), 'copy.deepcopy', 'deepcopy', (['c'], {}), '(c)\n', (4075, 4078), False, 'from copy import deepcopy\n')]
|
from common import unittest2
import pyuv
class PrepareTest(unittest2.TestCase):
def test_prepare1(self):
self.prepare_cb_called = 0
def prepare_cb(prepare):
self.prepare_cb_called += 1
prepare.stop()
prepare.close()
loop = pyuv.Loop.default_loop()
prepare = pyuv.Prepare(loop)
prepare.start(prepare_cb)
loop.run()
self.assertEqual(self.prepare_cb_called, 1)
if __name__ == '__main__':
unittest2.main(verbosity=2)
|
[
"pyuv.Loop.default_loop",
"pyuv.Prepare",
"common.unittest2.main"
] |
[((491, 518), 'common.unittest2.main', 'unittest2.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (505, 518), False, 'from common import unittest2\n'), ((291, 315), 'pyuv.Loop.default_loop', 'pyuv.Loop.default_loop', ([], {}), '()\n', (313, 315), False, 'import pyuv\n'), ((334, 352), 'pyuv.Prepare', 'pyuv.Prepare', (['loop'], {}), '(loop)\n', (346, 352), False, 'import pyuv\n')]
|
#!/usr/bin/env python
"""
Generate visualisations for the main script
"""
from io import BytesIO
import sys
from PIL import Image, ImageDraw
import dailyprogrammer.challenges.c20170904e2 as challenge
from dailyprogrammer.utils.logging import moduleLogger
logger = moduleLogger(__name__)
INF = float('inf')
def getY(line, x):
"""
Get a y coordinate for a line
"""
m, c = line
y = int((m * x) + c)
return y
def main(circles, scale=50, units=10, offset=(0, 0)):
"""
:param circles
"""
# Offset circles to get them positive
circles = list((x+offset[0], y+offset[1], r) for x, y, r in circles)
dimensions = (units * scale, ) * 2
im = Image.new("RGB", dimensions, (255, 255, 255))
draw = ImageDraw.Draw(im)
# Draw circles
logger.warn("Drawing circles")
for circle in circles:
x, y, r = circle
coords = [i * scale for i in (x-r, y-r, x+r, y+r)]
draw.ellipse(coords, outline=(127, 127, 127))
# Draw tangent lines
logger.warn("Drawing tangent lines")
hull = challenge.convexHullDisks(circles)
for line in hull:
m, c = line
c = c * scale
xs = [-im.width, im.width]
if abs(m) != INF:
points = list((x, getY((m, c), x)) for x in xs)
else:
points = list((c, y) for y in xs)
draw.line(points, fill=(255, 0, 0), width=1)
# Draw smallest box
logger.warn("Drawing box")
box = challenge.minimumBounding(circles)
for i, point in enumerate(box):
a = box[i]
try:
b = box[i+1]
except IndexError:
b = box[0]
points = [a, b]
points = list(tuple(p * scale for p in point) for point in points)
draw.line(points, fill=(0, 0, 255), width=1)
im = im.transpose(Image.FLIP_TOP_BOTTOM)
# write to stdout
imagefile = BytesIO()
im.save(imagefile, "PNG")
imagedata = imagefile.getvalue()
sys.stdout.buffer.write(imagedata)
if __name__ == "__main__":
# challenge
#main([(1,1,2), (2,2,0.5), (-1,-3,2), (5,2,1)],
# scale=25,
# main=20,
# offset=(9, 10))
# demo
main([(3, 3, 1), (3, 6, 1), (4, 7, 1.5), (8, 6, 0.5), (7, 5, 0.75), (5, 4.5, 2), (5.5, 3.5, 0.125)])
|
[
"dailyprogrammer.challenges.c20170904e2.convexHullDisks",
"PIL.Image.new",
"io.BytesIO",
"dailyprogrammer.utils.logging.moduleLogger",
"dailyprogrammer.challenges.c20170904e2.minimumBounding",
"PIL.ImageDraw.Draw",
"sys.stdout.buffer.write"
] |
[((268, 290), 'dailyprogrammer.utils.logging.moduleLogger', 'moduleLogger', (['__name__'], {}), '(__name__)\n', (280, 290), False, 'from dailyprogrammer.utils.logging import moduleLogger\n'), ((689, 734), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'dimensions', '(255, 255, 255)'], {}), "('RGB', dimensions, (255, 255, 255))\n", (698, 734), False, 'from PIL import Image, ImageDraw\n'), ((746, 764), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (760, 764), False, 'from PIL import Image, ImageDraw\n'), ((1063, 1097), 'dailyprogrammer.challenges.c20170904e2.convexHullDisks', 'challenge.convexHullDisks', (['circles'], {}), '(circles)\n', (1088, 1097), True, 'import dailyprogrammer.challenges.c20170904e2 as challenge\n'), ((1465, 1499), 'dailyprogrammer.challenges.c20170904e2.minimumBounding', 'challenge.minimumBounding', (['circles'], {}), '(circles)\n', (1490, 1499), True, 'import dailyprogrammer.challenges.c20170904e2 as challenge\n'), ((1880, 1889), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1887, 1889), False, 'from io import BytesIO\n'), ((1961, 1995), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (['imagedata'], {}), '(imagedata)\n', (1984, 1995), False, 'import sys\n')]
|
import cv2
def color_detector():
def setValues(x):
print("")
# creating a window for HUE selector
cv2.namedWindow("Color detectors")
cv2.createTrackbar("Upper Hue", "Color detectors", 153, 180, setValues)
cv2.createTrackbar("Upper Saturation", "Color detectors", 255, 255, setValues)
cv2.createTrackbar("Upper Value", "Color detectors", 255, 255, setValues)
cv2.createTrackbar("Lower Hue", "Color detectors", 64, 180, setValues)
cv2.createTrackbar("Lower Saturation", "Color detectors", 72, 255, setValues)
cv2.createTrackbar("Lower Value", "Color detectors", 49, 255, setValues)
|
[
"cv2.createTrackbar",
"cv2.namedWindow"
] |
[((120, 154), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Color detectors"""'], {}), "('Color detectors')\n", (135, 154), False, 'import cv2\n'), ((160, 231), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Upper Hue"""', '"""Color detectors"""', '(153)', '(180)', 'setValues'], {}), "('Upper Hue', 'Color detectors', 153, 180, setValues)\n", (178, 231), False, 'import cv2\n'), ((237, 315), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Upper Saturation"""', '"""Color detectors"""', '(255)', '(255)', 'setValues'], {}), "('Upper Saturation', 'Color detectors', 255, 255, setValues)\n", (255, 315), False, 'import cv2\n'), ((321, 394), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Upper Value"""', '"""Color detectors"""', '(255)', '(255)', 'setValues'], {}), "('Upper Value', 'Color detectors', 255, 255, setValues)\n", (339, 394), False, 'import cv2\n'), ((400, 470), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Lower Hue"""', '"""Color detectors"""', '(64)', '(180)', 'setValues'], {}), "('Lower Hue', 'Color detectors', 64, 180, setValues)\n", (418, 470), False, 'import cv2\n'), ((476, 553), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Lower Saturation"""', '"""Color detectors"""', '(72)', '(255)', 'setValues'], {}), "('Lower Saturation', 'Color detectors', 72, 255, setValues)\n", (494, 553), False, 'import cv2\n'), ((559, 631), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Lower Value"""', '"""Color detectors"""', '(49)', '(255)', 'setValues'], {}), "('Lower Value', 'Color detectors', 49, 255, setValues)\n", (577, 631), False, 'import cv2\n')]
|
import two
two.ct().fun()
|
[
"two.ct"
] |
[((11, 19), 'two.ct', 'two.ct', ([], {}), '()\n', (17, 19), False, 'import two\n')]
|
#######################
# <NAME> #
# perform.py #
# Copyright 2018-2020 #
# <NAME> #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
from lib.color import *
import random
NAME = "perform"
CATEGORIES = ["actions","users"]
ALIASES = ["miracle", "cast", "ritual"]
SCOST=0
USAGE = "perform <ritual> <optional ...>"
DESCRIPTION = """Perform the ritual called <ritual>.
Current rituals: telepathy, identify, reveal, seer, ghost, cleanse, whirlpool.
TELEPATHY can send someone an anonymous message.
IDENTIFY can show you additional information about an object.
REVEAL can reveal hidden things in a room.
SEER can show you information about the location of someone.
GHOST can hide you almost completely for continual spirit cost.
CLEANSE can cleanse someone and the cursed items they have.
WHIRLPOOL can teleport a sleeping player to a random room.
Ex. `perform telepathy seisatsu Hello there!`
Ex1. `perform reveal`"""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argmin=1, awake=True):
return False
# WHIRLPOOL can teleport a sleeping player to a random room.
if args[0]=="whirlpool":
SCOST=5
if not COMMON.check(NAME, console, args, argmin=2, spiritcost=SCOST, awake=True):
return False
thisreceiver=' '.join(args[1:])
targetuser = COMMON.check_user(NAME, console, thisreceiver, room=True, online=True, live=True, reason=False,
wizardskip=["room", "online"])
if not targetuser:
# Check for a partial user match, and try running again if there's just one.
partial = COMMON.match_partial(NAME, console, thisreceiver.lower(), "user")
if partial:
partial=["whirlpool"]+partial
return COMMAND(console, partial)
console.msg("{0}: No such user in this room.".format(NAME))
return False
# Found the user, let's teleport them away.
userconsole = console.shell.console_by_username(targetuser["name"])
if not userconsole:
return False
elif userconsole["posture"]=="sleeping":
console.shell.broadcast_room(console,"{0} whispers some words in the ears of {1}.".format(console.user["nick"],targetuser["nick"]))
destroom=random.choice(console.database.rooms.all())
thisroom = COMMON.check_room(NAME, console, console.user["room"])
# Somehow we got a nonexistent room. Log and report it.
if not destroom:
console.msg("{0}: ERROR: Tried to teleport a sleeper into a nonexistent room!".format(NAME))
console.log.error("Tried to teleport a sleeper into a nonexistent room!")
# Proceed with teleportation.
else:
if userconsole["posture_item"]: userconsole["posture_item"]=""
# Remove us from the current room.
if targetuser["name"] in thisroom["users"]:
thisroom["users"].remove(targetuser["name"])
# Add us to the destination room.
if targetuser["name"] not in destroom["users"]:
destroom["users"].append(targetuser["name"])
# Broadcast our teleportation to the origin room.
console.shell.broadcast_room(console, "{0} vanished from the room.".format(targetuser["nick"]))
# Set our current room to the new room.
targetuser["room"] = destroom["id"]
# Broadcast our arrival to the destination room, but not to ourselves.
console.shell.broadcast_room(userconsole, "{0} appeared.".format(targetuser["nick"]))
# Save the origin room, the destination room, and our user document.
console.database.upsert_room(thisroom)
console.database.upsert_room(destroom)
console.database.upsert_user(targetuser)
# Update console's exit list.
userconsole.exits = []
for exi in range(len(destroom["exits"])):
userconsole.exits.append(destroom["exits"][exi]["name"])
else:
if userconsole.user["pronouns"]=="male":
console.msg("He is not asleep.")
elif userconsole.user["pronouns"]=="female":
console.msg("She is not asleep.")
elif userconsole.user["pronouns"]=="neutral":
console.msg("They are not asleep.")
else: console.msg("{0} is not asleep.".format(userconsole.user["pronouns"].capitalize()))
return False
# CLEANSE can cleanse someone and the cursed items they have.
elif args[0]=="cleanse":
SCOST=5
# Should we be able to cleanse ourselves?
#if thisreceiver==console.user["name"] or thisreceiver==console.user["nick"] or thisreceiver==console.user["nick"].lower():
# console.msg("Can't cleanse yourself.")
# return False
if not COMMON.check(NAME, console, args, argmin=2, spiritcost=SCOST, awake=True):
return False
thisreceiver = ' '.join(args[1:])
targetuser = COMMON.check_user(NAME, console, thisreceiver, room=True, online=True, live=True, reason=False,
wizardskip=["room", "online"])
if not targetuser:
# Check for a partial user match, and try running again if there's just one.
partial = COMMON.match_partial(NAME, console, thisreceiver, "user", message=False)
if partial:
return COMMAND(console,["cleanse"]+partial)
console.msg("{0}: No such user in this room.".format(NAME))
return False
if console.user["name"]==targetuser["name"]:
if console.user["pronouns"]=="male":
msg = "{0} focuses on himself for a moment.".format(console.user["nick"])
elif console.user["pronouns"]=="female":
msg = "{0} focuses on herself for a moment.".format(console.user["nick"])
elif console.user["pronouns"]=="neutral":
msg = "{0} focuses on themself for a moment.".format(console.user["nick"])
else:
msg = "{0} focuses on {1}self for a moment.".format(console.user["nick"],console.user["pronouno"])
else:
msg = "{0} focuses on {1} for a moment.".format(console.user["nick"],targetuser["nick"])
console.shell.broadcast_room(console, msg)
for it in targetuser["inventory"]:
thisitem = COMMON.check_item(NAME, console, it, owner=False, holding=False)
if thisitem["cursed"]["enabled"]:
thisitem["cursed"]["enabled"]=False
console.database.upsert_item(thisitem)
if not console.user["name"]==targetuser["name"]:
console.shell.msg_user(targetuser["name"],"{0} cleansed some of your items.".format(console.user["nick"]))
if targetuser["pronouns"]=="male":
console.msg("You cleansed some of his items.")
elif targetuser["pronouns"]=="female":
console.msg("You cleansed some of her items.")
elif targetuser["pronouns"]=="neutral":
console.msg("You cleansed some of their items.")
else:
console.msg("You cleansed some of {0} items.".format(targetuser["pronouno"]))
else:
console.msg("You cleansed some of your items.")
return True
# SEER can show you information about the location of someone.
elif args[0]=="seer":
SCOST=5
if not COMMON.check(NAME, console, args, argmin=2, spiritcost=SCOST):
return False
thisreceiver = ' '.join(args[1:])
# Make sure the named user exists and is online.
targetuser = COMMON.check_user(NAME, console, thisreceiver, online=True)
if not targetuser:
# Check for a partial user match, and try running again if there's just one.
partial = COMMON.match_partial(NAME, console, thisreceiver, "user", message=False)
if partial:
return COMMAND(console,["seer"]+partial)
console.msg("{0}: No such user was found.".format(NAME))
return False
# Look up room.
targetroom = COMMON.check_room(NAME, console, roomid=targetuser["room"])
msg = "{0} looks into the distance for a moment.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
console.msg("You see a vision... \n{0}\nThe vision ends...".format(targetroom["desc"]))
return True
# GHOST can hide you almost completely for continual spirit cost.
elif args[0]=="ghost":
SCOST=50
if not COMMON.check(NAME, console, args, argmax=1, spiritcost=SCOST):
return False
# We are ghosts already, lets appear.
if console.user["ghost"]:
console.user["spirit"]+=50
msg = "{0} suddenly appears.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
console.user["ghost"]=False
# We arent ghosts, lets disappear.
else:
msg = "{0} mutters a few words and disappears.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
console.user["ghost"]=True
console.database.upsert_user(console.user)
return True
# REVEAL can reveal hidden things in a room.
elif args[0]=="reveal":
SCOST=5
if not COMMON.check(NAME, console, args, argmax=1, spiritcost=SCOST):
return False
msg = "{0} tries to reveal hidden things with a ritual.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
destroom = COMMON.check_room(NAME,console)
dexits = destroom["exits"]
for dex in range(len(dexits)):
# Check for randomized chance
if dexits[dex]["chance"] and dexits[dex]["hidden"]==True:
if random.randint(1,dexits[dex]["chance"])==1:
dexits[dex]["hidden"]=False
# Random items check.
ditems = destroom["items"]
for dit in ditems:
dit = console.database.item_by_id(dit)
# Check for randomized chance
if dit["chance"] and dit["hidden"]==True:
if dit["truehide"]==True:
console.msg("You sense {0} being hidden around here.".format(COMMON.format_item(NAME, dit["name"])))
elif random.randint(1,dit["chance"])==1:
dit["hidden"]=False
# Should we be able to reveal ghosts?
#for uss in destroom["users"]:
# duss = console.database.user_by_name(uss)
# if duss["ghost"]:
# if random.randint(1,4)==1:
# duss["ghost"]=False
# console.shell.msg_user(duss["name"],"Someone revealed you.")
return True
# IDENTIFY can show you additional information about an object.
elif args[0]=="identify":
SCOST=5
found_something = False
partials = []
target=' '.join(args[1:])
if not COMMON.check(NAME, console, args, argmin=2, spiritcost=SCOST):
return False
# Lookup the current room and perform room checks.
thisroom = COMMON.check_room(NAME, console)
if not thisroom:
return False
# It wasn't us, so maybe it's an item in the room.
for itemid in thisroom["items"]:
item = console.database.item_by_id(itemid)
# A reference was found to a nonexistent item. Report this and continue searching.
if not item:
console.log.error("Item referenced in room does not exist: {room} :: {item}", room=console.user["room"],
item=itemid)
console.msg("{0}: ERROR: Item referenced in this room does not exist: {1}".format(NAME, itemid))
continue
attributes = []
# Record partial matches.
if target in item["name"].lower() or target.replace("the ", "", 1) in item["name"].lower():
partials.append(item["name"].lower())
# It was an item in the room. Show the item's name, ID, owners, description, and attributes.
if target in [item["name"].lower(), "the " + item["name"].lower()]:
# Only enumerate item attributes if we are the item owner or a wizard.
if item["duplified"]:
attributes.append("This thing can be anywhere, somehow at the same time.")
if item["cursed"]["enabled"]:
attributes.append("A dark presence haunts it.")
if item["glued"]:
attributes.append("This object can't be carried with you.")
if item["truehide"]:
attributes.append("Maybe it's invisible, but something truly hides it from sight.")
if item["hidden"]:
attributes.append("Somehow it blends into it's environment.")
if item["lang"]:
attributes.append("You sense that this thing can teach you and alter your language.")
if item["container"]["enabled"]:
attributes.append("Something else could easily fit into the insides of this object.")
if item["telekey"]:
attributes.append("Using this thing would take you somewhere else.")
# Send the info for this item.
if len(attributes)>0:
console.msg("You sense the {0}. {1}".format(item["name"], ' '.join(attributes)))
else:
console.msg("You sense the {0}.".format(item["name"]))
console.msg("It seems to be connected to {0}.".format(', '.join(item["owners"])))
# List content if it's a container
if item["container"]["enabled"]:
if len(item["container"]["inventory"])>0:
console.msg("{0} seems to contain some items.".format(item["name"].capitalize()))
else:
console.msg("{0} seems to be empty.".format(item["name"].capitalize()))
found_something = True
msg = "{0} performs a ritual of knowledge.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
return True
# Maybe it's an item in our inventory.
for itemid in console.user["inventory"]:
item = console.database.item_by_id(itemid)
# A reference was found to a nonexistent item. Report this and continue searching.
if not item:
console.log.error("Item referenced in user inventory does not exist: {user} :: {item}",
user=console.user["name"], item=itemid)
console.msg("{0}: ERROR: Item referenced in your inventory does not exist: {1}".format(NAME, itemid))
continue
attributes = []
# Record partial matches.
if target in item["name"].lower() or target.replace("the ", "", 1) in item["name"].lower():
partials.append(item["name"].lower())
# It was an item in our inventory. Show the item's name, ID, owners, description, and attributes,
# but only if we didn't already see it in the current room. Also check if the user prepended "the ".
if target in [item["name"].lower(), "the " + item["name"].lower()]:
# Only enumerate item attributes if we are the item owner or a wizard.
if item["duplified"]:
attributes.append("This thing can be anywhere, somehow at the same time.")
if item["cursed"]["enabled"]:
attributes.append("A dark presence haunts it.")
if item["glued"]:
attributes.append("This object can't be carried with you.")
if item["truehide"]:
attributes.append("Maybe it's invisible, but something truly hides it from sight.")
if item["hidden"]:
attributes.append("Somehow it blends into it's environment.")
if item["lang"]:
attributes.append("You sense that this thing can teach you and alter your language.")
if item["container"]["enabled"]:
attributes.append("Something else could easily fit into the insides of this object.")
if item["telekey"]:
attributes.append("Using this thing would take you somewhere else.")
# Send the info for this item.
if len(attributes)>0:
console.msg("You sense the {0}. {1}".format(item["name"], ' '.join(attributes)))
else:
console.msg("You sense the {0}.".format(item["name"]))
console.msg("It seems to be connected to {0}.".format(', '.join(item["owners"])))
# Description exists, so show it.
#if item["desc"]:
# console.msg(item["desc"])
# List content if it's a container
if item["container"]["enabled"]:
if len(item["container"]["inventory"])>0:
console.msg("{0} seems to contain some items.".format(item["name"].capitalize()))
else:
console.msg("{0} seems to be empty.".format(item["name"].capitalize()))
found_something = True
msg = "{0} performs a ritual of knowledge.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
return True
# We didn't find anything by that name. See if we found partial matches.
if not found_something:
# Eliminate duplicate matches.
if partials:
partials = list(dict.fromkeys(partials))
# We got exactly one partial match. Assume that one.
if len(partials) == 1:
#console.msg("Assuming {0}.".format(partials[0]))
console.user["spirit"]+=SCOST
partials[0]="identify "+partials[0]
return COMMAND(console, partials[0].split(' '))
# We got up to 5 partial matches. List them.
elif partials and len(partials) <= 5:
console.msg("{0}: Did you mean one of: {1}".format(NAME, ', '.join(partials)))
return False
# We got too many matches.
elif len(partials) > 5:
console.msg("{0}: Too many possible matches.".format(NAME))
return False
# Really nothing.
else:
console.msg("{0}: No such thing: {1}".format(NAME, ' '.join(args[1:])))
return False
# TELEPATHY can send someone an anonymous message.
elif args[0]=="telepathy":
SCOST=5
if not COMMON.check(NAME, console, args, argmin=3, spiritcost=SCOST):
return False
# Make sure the named user exists and is online.
targetuser = COMMON.check_user(NAME, console, args[1].lower(), online=True)
if not targetuser:
return False
# Finished. Message the user, and echo the message to ourselves, if it wasn't a self-message.
console.shell.msg_user(args[1].lower(), mcolor(CBYELLO,"You hear a whisper in your mind: '{0}'".format(' '.join(args[2:])),targetuser["colors"]))
if targetuser["name"] != console.user["name"]:
console.msg(mcolor(CBYELLO,"You plant a message in the mind of {0}, that says: '{1}'".format(targetuser["name"], ' '.join(args[2:])),console.user["colors"]))
msg = "{0} focuses for a moment to perform a ritual.".format(console.user["nick"])
console.shell.broadcast_room(console, msg)
return True
# Unknown ritual name.
else:
console.msg("You never heard of such a ritual.")
return False
|
[
"random.randint"
] |
[((11296, 11336), 'random.randint', 'random.randint', (['(1)', "dexits[dex]['chance']"], {}), "(1, dexits[dex]['chance'])\n", (11310, 11336), False, 'import random\n'), ((11813, 11845), 'random.randint', 'random.randint', (['(1)', "dit['chance']"], {}), "(1, dit['chance'])\n", (11827, 11845), False, 'import random\n')]
|
# Copyright 2021 Adobe
# All Rights Reserved.
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it.
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib
# matplotlib.use('TkAgg')
import albumentations as A
from skimage import data
import os
from copy import deepcopy
import random
import time
from PIL import Image
from skimage.color import label2rgb
# import beacon_aug as BA
from . import properties
''' flatten the pipeline tree'''
def extract_single_operation(augPipeline):
def flatten(dict, flatten_ls=[]):
'''use DFS to unfold the operations'''
for operation in dict["transforms"]: # "OneOf" or "OneOrOther", etc
class_name = operation['__class_fullname__']
if "." in class_name:
if operation['__class_fullname__'].split(".")[-2] == "composition":
flatten(operation, flatten_ls)
continue
flatten_ls.append(operation)
return flatten_ls
transform_dict = A.to_dict(augPipeline)
flatten_ls = flatten(transform_dict["transform"])
return [{'__version__': transform_dict['__version__'], 'transform':opr} for opr in flatten_ls]
def screenshot_pipeline(augPipeline, image, save_fig_path=None):
''' Visualize an augmentation pipeline by displaying the extreme case for all the parameters
'''
# get the flattened operator sequence avoiding hierarchical structure
single_operation_ls = extract_single_operation(augPipeline)
numOfOperation = len(single_operation_ls)
fig, axs = plt.subplots(numOfOperation, 3,
figsize=(6, 2*numOfOperation),
constrained_layout=True)
axs[0, 1].set_title("Lower Limit")
axs[0, 2].set_title("Upper Limit")
for i, single_operation in enumerate(single_operation_ls):
# Extract the upper and lower limit
transform_name = single_operation["transform"]['__class_fullname__'].split(".")[-1]
# deep copy to avoid pointing save location in dict
lowerAndUpper = [single_operation, deepcopy(single_operation)]
limit_para_name = None
# Extract all the limit parameters
for para in single_operation["transform"]:
if para == "p": # change prob to 1 to make it always happen
lowerAndUpper[0]["transform"][para] = 1
lowerAndUpper[1]["transform"][para] = 1
if "limit" in para:
limit_para_name = para
original_values = list(single_operation["transform"][para])
lowerAndUpper[0]["transform"][para] = [original_values[0]]*2
lowerAndUpper[1]["transform"][para] = [original_values[1]]*2
# plot
for lu in range(2): # lower or upper limit
lu_transform = A.from_dict(lowerAndUpper[lu])
axs[i, lu+1].imshow(lu_transform(image=image)["image"])
axs[i, lu+1].axis("off")
if limit_para_name:
axs[i, 0].text(0.15, 0.5, transform_name+"\n" + limit_para_name+":" +
str(lowerAndUpper[0]["transform"][limit_para_name][0]) + "," +
str(lowerAndUpper[1]["transform"][limit_para_name][1]), dict(size=10))
else:
axs[i, 0].text(0.15, 0.5, transform_name, dict(size=10))
axs[i, 0].axis("off")
if save_fig_path:
figname = os.path.join(save_fig_path, "aug_pipeline-screenshot.png")
print("\n...screenshot figure save as : ", figname)
plt.savefig(figname)
return fig
def screenshot_library(BA_operator, image_data, save_fig_path=None, individual_fig=False, **kwargs):
''' Visualize the augmentation result comparision to all available libraries
e.g.
----
import beacon_aug as BA
from beacon_aug import screenshot
fig, __ = BA.screenshot.screenshot_library(BA.Brightness(), image_data=image)
fig.show()
'''
avail_libraries = BA_operator(**kwargs).avail_libraries
numOfLibraries = len(avail_libraries)
fig, axs = plt.subplots(2, 1 + numOfLibraries,
figsize=(4*numOfLibraries, 4),
constrained_layout=True)
fig.suptitle("beacon_aug."+BA_operator.__name__ + " with " +
str(kwargs)) # or plt.suptitle('Main title')
axs[0][0].imshow(image_data)
axs[0][0].set_title("Raw")
axs[1][0].text(0.3, 0.5, "Difference to\n" + "raw")
axs[1][0].axis("off")
attributes_result = {"runtime": {}, "differentiable": {}}
# axs[1][0].text(0.3, 0.5, "Sanity Check:\n p=0 ->", dict(size=10))
for i, library in enumerate(avail_libraries):
t_before = time.time()
op = BA_operator(always_apply=False, p=1, library=library, **kwargs)
image_auged = op(image=image_data)["image"]
t_after = time.time()
runtime = t_after - t_before
image_auged_vis = image_auged
attributes_result["runtime"][library] = runtime
attributes_result["differentiable"][library] = properties.isOpDifferentiable(op)
axs[0][1+i].set_title(library + ":" + '{0:.1f}'.format(runtime*1000) + " (ms)")
axs[0][1+i].imshow(image_auged)
# display the difference of original to augmented images
if image_auged.shape == image_data.shape:
axs[1][1+i].imshow(image_auged - image_data)
if save_fig_path and individual_fig == True:
img_name = os.path.join(save_fig_path, BA_operator.__name__+"-" + library+".jpeg")
if os.path.isfile(img_name):
print("\n...screenshot individual figure already existed as : ", img_name)
else:
if image_auged.min() < 0: # normalzied case, need to
image_auged = image_auged - image_auged.min()
image_auged = image_auged/image_auged.max()
print("@@@@@@@", image_auged.min())
plt.imsave(img_name, image_auged)
print("\n...screenshot individual figure save as : ", img_name)
fig.subplots_adjust(wspace=0)
if save_fig_path and individual_fig == False:
fig_name = os.path.join(save_fig_path, BA_operator.__name__+"aug_library-screenshot.png")
print("\n...screenshot figure save as : ", fig_name)
plt.savefig(fig_name)
return fig, attributes_result
def visualize_bboxes(img, bboxes, color=(255, 0, 0), thickness=2, **kwargs):
'''
color = BOX_COLOR (BOX_COLOR = (255, 0, 0) # Red
'''
image = img.copy()
for bbox in bboxes:
# x_min, y_min, w, h = bbox
if len(bbox) == 5:
bbox = bbox[:4] # the last one is label
x_min, y_min, x_max, y_max = map(int, bbox) # need to make sure bbox is integer
# x_min, x_max, y_min, y_max = int(x_min), int(x_min + w), int(y_min), int(y_min + h)
img = cv2.rectangle(image, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)
return image
def visualize_kps(img, kps, color=(0, 255, 0), key_point_diameter=2, **kwargs):
'''
'''
image = img.copy()
for kp in kps:
x, y = kp
image = cv2.circle(image, (int(x), int(y)), key_point_diameter, color, -1)
return image
def visualize_titles(img, bbox, title, color=(255, 0, 0), thickness=2, font_thickness=2, font_scale=0.35, **kwargs):
x_min, y_min, x_max, y_max = map(int, bbox) # x_min, y_min, w, h = bbox
# x_min, x_max, y_min, y_max = int(x_min), int(x_min + w), int(y_min), int(y_min + h)
((text_width, text_height), _) = cv2.getTextSize(
title, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)
cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)),
(x_min + text_width, y_min), color=(255, 0, 0))
cv2.putText(img, title, (x_min, y_min - int(0.3 * text_height)), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),
font_thickness, lineType=cv2.LINE_AA)
return img
def visualize_targets(image, mask=None, bboxes=None, keypoints=None, image0=None):
''' Stack all the targets '''
target_list = []
if image.ndim == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
target_list.append(image.copy())
if image0 is not None:
if image0.ndim == 2:
image0 = cv2.cvtColor(image0, cv2.COLOR_GRAY2RGB)
target_list.append(image0)
if mask is not None:
target_list.append(cv2.cvtColor((mask*255).astype('uint8'), cv2.COLOR_GRAY2RGB))
if bboxes is not None:
target_list.append(visualize_bboxes(image, bboxes, thickness=10))
if keypoints is not None:
target_list.append(visualize_kps(image, keypoints, key_point_diameter=15))
return np.hstack(target_list)
def augment_and_show(aug, image, mask=None, bboxes=[], keypoints=[], categories=[], category_id_to_name=[], filename=None,
font_scale_orig=0.35, font_scale_aug=0.35, key_point_diameter=15,
show_title=True, **kwargs):
"""
Use from: https://albumentations.ai/docs/examples/showcase/
visualize the image,(mask), (bbox),(kp) superimposed result before and after augmentation
Args:
aug: augmentation pipelineg
image: single image
mask: original mask
bbox: original bounding boxes
keypoints: original keypoints
output:
augmented: augmented image components
f: visualize image
"""
if mask is None:
augmented = aug(image=image, bboxes=bboxes,
keypoints=keypoints, category_id=categories)
else:
augmented = aug(image=image, mask=mask, bboxes=bboxes,
keypoints=keypoints, category_id=categories)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image_aug = cv2.cvtColor(augmented['image'], cv2.COLOR_BGR2RGB)
image_aug = augmented['image']
visualize_bboxes(image, bboxes, **kwargs)
visualize_bboxes(image_aug, augmented['bboxes'], **kwargs)
visualize_kps(image, keypoints, **kwargs)
visualize_kps(image, augmented["keypoints"], **kwargs)
if show_title:
for bbox, cat_id in zip(bboxes, categories):
visualize_titles(
image, bbox, category_id_to_name[cat_id], font_scale=font_scale_orig, **kwargs)
for bbox, cat_id in zip(augmented['bboxes'], augmented['category_id']):
visualize_titles(
image_aug, bbox, category_id_to_name[cat_id], font_scale=font_scale_aug, **kwargs)
if mask is None:
f, ax = plt.subplots(1, 2, figsize=(16, 8))
ax[0].imshow(image)
ax[0].set_title('Original image')
ax[1].imshow(image_aug)
ax[1].set_title('Augmented image')
else:
f, ax = plt.subplots(2, 2, figsize=(16, 16))
if len(mask.shape) != 3:
mask = label2rgb(mask, bg_label=0)
mask_aug = label2rgb(augmented['mask'], bg_label=0)
else:
import pdb
pdb.set_trace()
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
mask_aug = cv2.cvtColor(augmented['mask'], cv2.COLOR_BGR2RGB)
ax[0, 0].imshow(image)
ax[0, 0].set_title('Original image')
ax[0, 1].imshow(image_aug)
ax[0, 1].set_title('Augmented image')
ax[1, 0].imshow(mask, interpolation='nearest')
ax[1, 0].set_title('Original mask')
ax[1, 1].imshow(mask_aug, interpolation='nearest')
ax[1, 1].set_title('Augmented mask')
f.tight_layout()
if filename is not None:
f.savefig(filename)
return augmented, f
if __name__ == "__main__":
# Load an example image (uint8, 128x128x3).
image = data.astronaut()
# Example of an augmentation pipeline
augPipeline = A.Compose([
A.RandomCrop(256, 256),
A.OneOf([A.RGBShift(),
A.HueSaturationValue()])])
os.makedirs("tmp", exist_ok=True)
screenshot_pipeline(augPipeline, image, save_fig_path="tmp/")
|
[
"skimage.data.astronaut",
"os.path.isfile",
"matplotlib.pyplot.imsave",
"cv2.rectangle",
"os.path.join",
"albumentations.RGBShift",
"albumentations.from_dict",
"cv2.cvtColor",
"matplotlib.pyplot.subplots",
"albumentations.to_dict",
"copy.deepcopy",
"numpy.hstack",
"os.makedirs",
"skimage.color.label2rgb",
"cv2.getTextSize",
"albumentations.HueSaturationValue",
"time.time",
"pdb.set_trace",
"albumentations.RandomCrop",
"matplotlib.pyplot.savefig"
] |
[((1124, 1146), 'albumentations.to_dict', 'A.to_dict', (['augPipeline'], {}), '(augPipeline)\n', (1133, 1146), True, 'import albumentations as A\n'), ((1672, 1765), 'matplotlib.pyplot.subplots', 'plt.subplots', (['numOfOperation', '(3)'], {'figsize': '(6, 2 * numOfOperation)', 'constrained_layout': '(True)'}), '(numOfOperation, 3, figsize=(6, 2 * numOfOperation),\n constrained_layout=True)\n', (1684, 1765), True, 'import matplotlib.pyplot as plt\n'), ((4204, 4301), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1 + numOfLibraries)'], {'figsize': '(4 * numOfLibraries, 4)', 'constrained_layout': '(True)'}), '(2, 1 + numOfLibraries, figsize=(4 * numOfLibraries, 4),\n constrained_layout=True)\n', (4216, 4301), True, 'import matplotlib.pyplot as plt\n'), ((7725, 7801), 'cv2.getTextSize', 'cv2.getTextSize', (['title', 'cv2.FONT_HERSHEY_SIMPLEX', 'font_scale', 'font_thickness'], {}), '(title, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)\n', (7740, 7801), False, 'import cv2\n'), ((8886, 8908), 'numpy.hstack', 'np.hstack', (['target_list'], {}), '(target_list)\n', (8895, 8908), True, 'import numpy as np\n'), ((11857, 11873), 'skimage.data.astronaut', 'data.astronaut', ([], {}), '()\n', (11871, 11873), False, 'from skimage import data\n'), ((12058, 12091), 'os.makedirs', 'os.makedirs', (['"""tmp"""'], {'exist_ok': '(True)'}), "('tmp', exist_ok=True)\n", (12069, 12091), False, 'import os\n'), ((3546, 3604), 'os.path.join', 'os.path.join', (['save_fig_path', '"""aug_pipeline-screenshot.png"""'], {}), "(save_fig_path, 'aug_pipeline-screenshot.png')\n", (3558, 3604), False, 'import os\n'), ((3673, 3693), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {}), '(figname)\n', (3684, 3693), True, 'import matplotlib.pyplot as plt\n'), ((4832, 4843), 'time.time', 'time.time', ([], {}), '()\n', (4841, 4843), False, 'import time\n'), ((4992, 5003), 'time.time', 'time.time', ([], {}), '()\n', (5001, 5003), False, 'import time\n'), ((6321, 6406), 'os.path.join', 'os.path.join', (['save_fig_path', "(BA_operator.__name__ + 'aug_library-screenshot.png')"], {}), "(save_fig_path, BA_operator.__name__ + 'aug_library-screenshot.png'\n )\n", (6333, 6406), False, 'import os\n'), ((6469, 6490), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_name'], {}), '(fig_name)\n', (6480, 6490), True, 'import matplotlib.pyplot as plt\n'), ((7038, 7129), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x_min, y_min)', '(x_max, y_max)'], {'color': 'color', 'thickness': 'thickness'}), '(image, (x_min, y_min), (x_max, y_max), color=color, thickness\n =thickness)\n', (7051, 7129), False, 'import cv2\n'), ((8315, 8354), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2RGB'], {}), '(image, cv2.COLOR_GRAY2RGB)\n', (8327, 8354), False, 'import cv2\n'), ((10712, 10747), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 8)'}), '(1, 2, figsize=(16, 8))\n', (10724, 10747), True, 'import matplotlib.pyplot as plt\n'), ((10921, 10957), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(16, 16)'}), '(2, 2, figsize=(16, 16))\n', (10933, 10957), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2224), 'copy.deepcopy', 'deepcopy', (['single_operation'], {}), '(single_operation)\n', (2206, 2224), False, 'from copy import deepcopy\n'), ((2958, 2988), 'albumentations.from_dict', 'A.from_dict', (['lowerAndUpper[lu]'], {}), '(lowerAndUpper[lu])\n', (2969, 2988), True, 'import albumentations as A\n'), ((5607, 5682), 'os.path.join', 'os.path.join', (['save_fig_path', "(BA_operator.__name__ + '-' + library + '.jpeg')"], {}), "(save_fig_path, BA_operator.__name__ + '-' + library + '.jpeg')\n", (5619, 5682), False, 'import os\n'), ((5694, 5718), 'os.path.isfile', 'os.path.isfile', (['img_name'], {}), '(img_name)\n', (5708, 5718), False, 'import os\n'), ((8470, 8510), 'cv2.cvtColor', 'cv2.cvtColor', (['image0', 'cv2.COLOR_GRAY2RGB'], {}), '(image0, cv2.COLOR_GRAY2RGB)\n', (8482, 8510), False, 'import cv2\n'), ((11011, 11038), 'skimage.color.label2rgb', 'label2rgb', (['mask'], {'bg_label': '(0)'}), '(mask, bg_label=0)\n', (11020, 11038), False, 'from skimage.color import label2rgb\n'), ((11062, 11102), 'skimage.color.label2rgb', 'label2rgb', (["augmented['mask']"], {'bg_label': '(0)'}), "(augmented['mask'], bg_label=0)\n", (11071, 11102), False, 'from skimage.color import label2rgb\n'), ((11152, 11167), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (11165, 11167), False, 'import pdb\n'), ((11187, 11224), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_BGR2RGB'], {}), '(mask, cv2.COLOR_BGR2RGB)\n', (11199, 11224), False, 'import cv2\n'), ((11248, 11298), 'cv2.cvtColor', 'cv2.cvtColor', (["augmented['mask']", 'cv2.COLOR_BGR2RGB'], {}), "(augmented['mask'], cv2.COLOR_BGR2RGB)\n", (11260, 11298), False, 'import cv2\n'), ((11955, 11977), 'albumentations.RandomCrop', 'A.RandomCrop', (['(256)', '(256)'], {}), '(256, 256)\n', (11967, 11977), True, 'import albumentations as A\n'), ((6103, 6136), 'matplotlib.pyplot.imsave', 'plt.imsave', (['img_name', 'image_auged'], {}), '(img_name, image_auged)\n', (6113, 6136), True, 'import matplotlib.pyplot as plt\n'), ((11996, 12008), 'albumentations.RGBShift', 'A.RGBShift', ([], {}), '()\n', (12006, 12008), True, 'import albumentations as A\n'), ((12026, 12048), 'albumentations.HueSaturationValue', 'A.HueSaturationValue', ([], {}), '()\n', (12046, 12048), True, 'import albumentations as A\n')]
|
'''
Vortex OpenSplice
This software and documentation are Copyright 2006 to TO_YEAR ADLINK
Technology Limited, its affiliated companies and licensors. All rights
reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import time
import redis
from api.emulation import (
Config,
EmulationStatus,
WorkerStatus
)
from .emulation_handler import abort_handled
from .utils import parser
class EmulationManager:
def __init__(self):
self.ip_collection = None
self.redis_connection = redis.StrictRedis(
host=Config.FRONTEND_IP, port=Config.REDIS_PORT, password=Config.REDIS_PASSWORD,
encoding="utf-8", decode_responses=True)
@abort_handled
def init(self, emulation_data: dict):
"""
Initialize the redis using the specific emualtion data, then set
emulation status to `init`.
Args:
emulation_data (dict): Emulation data.
"""
print('emulation initialization.')
self.redis_connection.set('emulation_status', EmulationStatus.INIT)
self.redis_connection.set('emulation_time', emulation_data['emulation_time'])
self.ip_collection = [key for key in emulation_data.keys() if 'ip' in key]
for ip in self.ip_collection:
self.redis_connection.hset(ip, 'device_name', emulation_data[ip]['device_name'])
self.redis_connection.hset(ip, 'device_settings', json.dumps(emulation_data[ip]['device_setting']))
self.redis_connection.hset(ip, 'worker_status', WorkerStatus.PREPARE)
@abort_handled
def ready(self):
"""
Waiting for all device is ready to run emulation. If all device is ready,
set the emulation status to `start`.
"""
while not self.check_worker_status(WorkerStatus.READY):
print("Waiting for all of worker is ready.")
if self.redis_connection.get('emulation_status') == EmulationStatus.ABORT:
break
else:
time.sleep(1)
@abort_handled
def start(self):
self.redis_connection.set("emulation_status", EmulationStatus.START)
while not self.check_worker_status(WorkerStatus.DONE):
print("Waiting for emulation finish.")
if self.redis_connection.get('emulation_status') == EmulationStatus.ABORT:
break
else:
time.sleep(1)
@abort_handled
def finish(self):
"""
Waiting for the emulation in each device are finished. After every device finish
the emulations, re-initialize redis table, then set emulation status to `end`.
"""
self.redis_connection.set("emulation_status", EmulationStatus.END)
print("done")
def check_worker_status(self, status):
print(status, end=": ")
worker_status_count = 0
for ip in self.ip_collection:
if self.redis_connection.hget(ip, "worker_status") == status:
worker_status_count += 1
print("{}.".format(ip), end=" ")
else:
print("{}.".format(ip), end=" ")
return worker_status_count == len(self.ip_collection)
|
[
"redis.StrictRedis",
"json.dumps",
"time.sleep"
] |
[((990, 1134), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'Config.FRONTEND_IP', 'port': 'Config.REDIS_PORT', 'password': 'Config.REDIS_PASSWORD', 'encoding': '"""utf-8"""', 'decode_responses': '(True)'}), "(host=Config.FRONTEND_IP, port=Config.REDIS_PORT, password\n =Config.REDIS_PASSWORD, encoding='utf-8', decode_responses=True)\n", (1007, 1134), False, 'import redis\n'), ((1913, 1961), 'json.dumps', 'json.dumps', (["emulation_data[ip]['device_setting']"], {}), "(emulation_data[ip]['device_setting'])\n", (1923, 1961), False, 'import json\n'), ((2509, 2522), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2519, 2522), False, 'import time\n'), ((2898, 2911), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2908, 2911), False, 'import time\n')]
|
from pandas import *
from ggplot import *
from datetime import datetime
def get_day(date):
return datetime.strftime(datetime.strptime(date,'%Y-%m-%d').date(),'%a')
def plot_weather_data(turnstile_weather):
'''
You are passed in a dataframe called turnstile_weather.
Use turnstile_weather along with ggplot to make a data visualization
focused on the MTA and weather data we used in assignment #3.
You should feel free to implement something that we discussed in class
(e.g., scatterplots, line plots, or histograms) or attempt to implement
something more advanced if you'd like.
Here are some suggestions for things to investigate and illustrate:
* Ridership by time of day or day of week
* How ridership varies based on Subway station (UNIT)
* Which stations have more exits or entries at different times of day
(You can use UNIT as a proxy for subway station.)
If you'd like to learn more about ggplot and its capabilities, take
a look at the documentation at:
https://pypi.python.org/pypi/ggplot/
You can check out:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/turnstile_data_master_with_weather.csv
To see all the columns and data points included in the turnstile_weather
dataframe.
However, due to the limitation of our Amazon EC2 server, we are giving you a random
subset, about 1/3 of the actual data in the turnstile_weather dataframe.
'''
daysn = []
for the_date in turnstile_weather['DATEn']:
daysn.append(get_day(the_date))
turnstile_weather['Dayn'] = daysn
grouped = turnstile_weather.groupby('Dayn',as_index=False).sum()
plot = ggplot(grouped, aes(x='Dayn',y='ENTRIESn_hourly')) + \
geom_bar(aes(weight='ENTRIESn_hourly'), fill='blue') #your code here
return plot
|
[
"datetime.datetime.strptime"
] |
[((121, 156), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (138, 156), False, 'from datetime import datetime\n')]
|
from typing import Optional, Any, Tuple, Union, Sequence
from torch.distributions import Distribution
import torch
class JointDistribution(Distribution):
"""
Defines an object for combining multiple distributions by assuming independence, i.e. we define:
.. math::
p(x_1, x_2, ..., x_n) = p(x_1) \\cdot p(x_2) ... \\cdot p(x_n)
Example:
A basic example can be seen below, where we combine a normal and and exponential distribution:
>>> from torch.distributions import Normal, Exponential
>>> import torch
>>>
>>> distribution = JointDistribution(Normal(0.0, 1.0), Exponential(1.0))
>>> y = distribution.sample((1000,)) # should be 1000 x 2
>>>
>>> log_prob = distribution.log_prob(y)
"""
arg_constraints = {}
def __init__(self, *distributions: Distribution, indices: Sequence[Union[int, slice]] = None, **kwargs):
"""
Initializes the ``JointDistribution`` class.
Args:
distributions: Iterable of ``pytorch.distributions.Distribution`` objects.
indices: Optional parameter specifying which distribution corresponds to which column in input tensors. If
``None``, then is inferred.
kwargs: Key-worded arguments passed to base class.
"""
_indices = indices or self.infer_indices(*distributions)
event_shape = torch.Size([(_indices[-1].stop if isinstance(_indices[-1], slice) else _indices[-1] + 1)])
batch_shape = distributions[0].batch_shape
if any(d.batch_shape != batch_shape for d in distributions):
raise NotImplementedError(f"All batch shapes must be congruent!")
super(JointDistribution, self).__init__(event_shape=event_shape, batch_shape=batch_shape, **kwargs)
if any(len(d.event_shape) > 1 for d in distributions):
raise NotImplementedError(f"Currently cannot handle matrix valued distributions!")
self.distributions = distributions
self.indices = _indices
def expand(self, batch_shape, _instance=None):
return JointDistribution(*(d.expand(batch_shape) for d in self.distributions))
@property
def support(self) -> Optional[Any]:
raise NotImplementedError()
@property
def mean(self):
raise NotImplementedError()
@property
def variance(self):
raise NotImplementedError()
def cdf(self, value):
res = 0.0
for d, m in zip(self.distributions, self.indices):
res *= d.cdf(value[..., m])
return res
def icdf(self, value):
raise NotImplementedError()
def enumerate_support(self, expand=True):
raise NotImplementedError()
def entropy(self):
return sum(d.entropy() for d in self.distributions)
@staticmethod
def infer_indices(*distributions: Distribution) -> Tuple[Union[int, slice]]:
"""
Given a sequence of ``pytorch.distributions.Distribution`` objects, this method infers the indices at which to
slice an input tensor.
Args:
distributions: Sequence of ``pytorch.distributions.Distribution`` objects.
Returns:
A tuple containing indices and/or slices.
Example:
>>> from torch.distributions import Normal, Exponential
>>> import torch
>>> from pyfilter.distributions import JointDistribution
>>>
>>> distributions = Normal(0.0, 1.0), Exponential(1.0)
>>> y = torch.stack([d.sample((1000,)) for d in distributions], dim=-1)
>>>
>>> slices = JointDistribution.infer_indices(*distributions)
>>> log_probs = [d.log_prob(y[..., s]) for d, s in zip(distributions, slices)]
"""
res = tuple()
length = 0
for i, d in enumerate(distributions):
multi_dimensional = len(d.event_shape) > 0
if multi_dimensional:
size = d.event_shape[-1]
slice_ = slice(length, size + 1)
length += slice_.stop
else:
slice_ = length
length += 1
res += (slice_,)
return res
def log_prob(self, value):
# TODO: Add check for wrong dimensions
return sum(d.log_prob(value[..., m]) for d, m in zip(self.distributions, self.indices))
def rsample(self, sample_shape=torch.Size()):
res = tuple(
d.rsample(sample_shape) if len(d.event_shape) > 0 else d.rsample(sample_shape).unsqueeze(-1)
for d in self.distributions
)
return torch.cat(res, dim=-1)
|
[
"torch.cat",
"torch.Size"
] |
[((4485, 4497), 'torch.Size', 'torch.Size', ([], {}), '()\n', (4495, 4497), False, 'import torch\n'), ((4692, 4714), 'torch.cat', 'torch.cat', (['res'], {'dim': '(-1)'}), '(res, dim=-1)\n', (4701, 4714), False, 'import torch\n')]
|
import streamlit as st
import numpy as np
from tensorflow.keras.models import load_model
import librosa
import time
import matplotlib.pyplot as plt
def wav2mfcc(wave, sr=22050,n_mfcc=20, max_len=170):
'''wave is a np array'''
wave = np.asfortranarray(wave)
mfcc = librosa.feature.mfcc(wave, sr=sr, n_mfcc=n_mfcc)
# If maximum length exceeds mfcc lengths then pad the remaining ones
if (max_len > mfcc.shape[1]):
pad_width = max_len - mfcc.shape[1]
mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')
# Else cutoff the remaining parts
else:
mfcc = mfcc[:, :max_len]
return mfcc
def updateplot(wave,txt_output):
"""
update the plot with the wave file
"""
line.set_ydata(wave)
the_plot.pyplot(plt)
text.set_text(txt_output)
# load the model from disk
model_path="models/"
cnn_model=load_model(model_path+'bal_cnn_model_accuracy_98.2_alpha_0.0001.h5')
#-------------------------------------------------
st.title('Firearm Alarm')
st.header('Listening for Firearms in Your Home')
##-----------------------------------------------------------------------------
path="data/external/"
audio_clip1='5-195710-A-10.wav' # ?
audio_clip2='2-121978-A-29.wav' #?
audio_clip3='T_17P.wav'
audio_dict={
'Audio clip 1':audio_clip1,
'Audio clip 2': audio_clip2,
'Audio clip 3': audio_clip3}
#-----------------------------------------------
# select a sidebar to navigate between different options of the app
options=['Test with some sample clips', 'Test with a youtube video']
page=st.sidebar.radio('Select an option',options)
st.sidebar.header('Firearm-Alarm Options')
st.sidebar.markdown('The first option will allow you to test firearm-alarm with some pre-recorded sound clips.')
st.sidebar.markdown('The second option will enable you to have firearm-alarm listen to a youtube clip: https://www.youtube.com/watch?v=1N_m3tsPyP0.')
#-----------------------------------------------
if page==options[0]: #The first option is selected
st.text('The following are a set of sample audio clips that can be input into the model.')
st.audio(path+audio_clip1)
st.text('This is audio clip 1.')
st.audio(path+audio_clip2)
st.text('This is audio clip 2.')
st.audio(path+audio_clip3)
option = st.selectbox('Select the clip you would like the model to analyze.',('Audio clip 1', 'Audio clip 2', 'Audio clip 3'))
st.write('You selected:', option)
if st.button('Analyze '+option):
wave, sr = librosa.load(path+audio_dict[option], mono=True, sr=22050)
mfcc=wav2mfcc(wave,sr=sr)
X_test = np.reshape(mfcc,(1, 20, 170, 1))
Y_predict=cnn_model.predict(X_test)
print(Y_predict)
if Y_predict.round()[0][0]==1 :
st.write("This doesn't sound like a firearm.")
if Y_predict.round()[0][0]==0:
st.write("This is a firearm! Contacting local authorities...")
else:
st.write('Click the button to analyze the audio clip.')
###############################################----------------------------------
elif page==options[1]: #if the second page is selected
st.header('Firearm Alarm in Action')
x = np.arange(0, 4,1/22050)
fig, ax=plt.subplots()
ax.set_ylim(-1, 1)
line, = ax.plot(x, np.zeros(len(x)),color='m',linewidth=2)
plt.xlabel('Time (s)')
plt.ylabel('Sound Wave')
the_plot = st.pyplot(plt)
text=plt.text(0,.8,'',fontsize=14)
sample='data/external/Real_life_gunshot_sound_effects.wav'
if st.button('See an example with Firearm Alarm'):
with st.spinner("Listening..."):
array,sr=librosa.load(sample)
tiempo=librosa.get_duration(array) #time in seconds
for t in range(0,int(tiempo),4):
wave, sr = librosa.load(sample, mono=True,offset=t,duration=4)
## run it through the model
mfcc=wav2mfcc(wave)
X_test = np.reshape(mfcc,(1, 20, 170, 1))
Y_predict=cnn_model.predict(X_test)
if Y_predict.round()[0][0]==1 :
txt_output='No firearm sound(s) detected'
# text.set_text('No firearm sounds detected')
if Y_predict.round()[0][0]==0:
txt_output='Firearm sound(s) detected!'
# text.set_text('Firearm sounds detected!')
updateplot(wave,txt_output)
time.sleep(3)
plt.show()
else:
st.write('Click the button to start listening.')
#-----------------------------------
|
[
"streamlit.title",
"numpy.arange",
"streamlit.sidebar.radio",
"librosa.feature.mfcc",
"librosa.get_duration",
"numpy.pad",
"streamlit.audio",
"streamlit.spinner",
"streamlit.text",
"streamlit.sidebar.markdown",
"streamlit.button",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"streamlit.sidebar.header",
"tensorflow.keras.models.load_model",
"matplotlib.pyplot.show",
"streamlit.header",
"numpy.asfortranarray",
"time.sleep",
"matplotlib.pyplot.text",
"librosa.load",
"streamlit.pyplot",
"matplotlib.pyplot.ylabel",
"streamlit.write",
"streamlit.selectbox",
"matplotlib.pyplot.xlabel"
] |
[((889, 959), 'tensorflow.keras.models.load_model', 'load_model', (["(model_path + 'bal_cnn_model_accuracy_98.2_alpha_0.0001.h5')"], {}), "(model_path + 'bal_cnn_model_accuracy_98.2_alpha_0.0001.h5')\n", (899, 959), False, 'from tensorflow.keras.models import load_model\n'), ((1011, 1036), 'streamlit.title', 'st.title', (['"""Firearm Alarm"""'], {}), "('Firearm Alarm')\n", (1019, 1036), True, 'import streamlit as st\n'), ((1037, 1085), 'streamlit.header', 'st.header', (['"""Listening for Firearms in Your Home"""'], {}), "('Listening for Firearms in Your Home')\n", (1046, 1085), True, 'import streamlit as st\n'), ((1574, 1619), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Select an option"""', 'options'], {}), "('Select an option', options)\n", (1590, 1619), True, 'import streamlit as st\n'), ((1620, 1662), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Firearm-Alarm Options"""'], {}), "('Firearm-Alarm Options')\n", (1637, 1662), True, 'import streamlit as st\n'), ((1663, 1785), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""The first option will allow you to test firearm-alarm with some pre-recorded sound clips."""'], {}), "(\n 'The first option will allow you to test firearm-alarm with some pre-recorded sound clips.'\n )\n", (1682, 1785), True, 'import streamlit as st\n'), ((1776, 1935), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""The second option will enable you to have firearm-alarm listen to a youtube clip: https://www.youtube.com/watch?v=1N_m3tsPyP0."""'], {}), "(\n 'The second option will enable you to have firearm-alarm listen to a youtube clip: https://www.youtube.com/watch?v=1N_m3tsPyP0.'\n )\n", (1795, 1935), True, 'import streamlit as st\n'), ((244, 267), 'numpy.asfortranarray', 'np.asfortranarray', (['wave'], {}), '(wave)\n', (261, 267), True, 'import numpy as np\n'), ((279, 327), 'librosa.feature.mfcc', 'librosa.feature.mfcc', (['wave'], {'sr': 'sr', 'n_mfcc': 'n_mfcc'}), '(wave, sr=sr, n_mfcc=n_mfcc)\n', (299, 327), False, 'import librosa\n'), ((2031, 2131), 'streamlit.text', 'st.text', (['"""The following are a set of sample audio clips that can be input into the model."""'], {}), "(\n 'The following are a set of sample audio clips that can be input into the model.'\n )\n", (2038, 2131), True, 'import streamlit as st\n'), ((2128, 2156), 'streamlit.audio', 'st.audio', (['(path + audio_clip1)'], {}), '(path + audio_clip1)\n', (2136, 2156), True, 'import streamlit as st\n'), ((2161, 2193), 'streamlit.text', 'st.text', (['"""This is audio clip 1."""'], {}), "('This is audio clip 1.')\n", (2168, 2193), True, 'import streamlit as st\n'), ((2199, 2227), 'streamlit.audio', 'st.audio', (['(path + audio_clip2)'], {}), '(path + audio_clip2)\n', (2207, 2227), True, 'import streamlit as st\n'), ((2231, 2263), 'streamlit.text', 'st.text', (['"""This is audio clip 2."""'], {}), "('This is audio clip 2.')\n", (2238, 2263), True, 'import streamlit as st\n'), ((2269, 2297), 'streamlit.audio', 'st.audio', (['(path + audio_clip3)'], {}), '(path + audio_clip3)\n', (2277, 2297), True, 'import streamlit as st\n'), ((2310, 2433), 'streamlit.selectbox', 'st.selectbox', (['"""Select the clip you would like the model to analyze."""', "('Audio clip 1', 'Audio clip 2', 'Audio clip 3')"], {}), "('Select the clip you would like the model to analyze.', (\n 'Audio clip 1', 'Audio clip 2', 'Audio clip 3'))\n", (2322, 2433), True, 'import streamlit as st\n'), ((2432, 2465), 'streamlit.write', 'st.write', (['"""You selected:"""', 'option'], {}), "('You selected:', option)\n", (2440, 2465), True, 'import streamlit as st\n'), ((2474, 2504), 'streamlit.button', 'st.button', (["('Analyze ' + option)"], {}), "('Analyze ' + option)\n", (2483, 2504), True, 'import streamlit as st\n'), ((495, 560), 'numpy.pad', 'np.pad', (['mfcc'], {'pad_width': '((0, 0), (0, pad_width))', 'mode': '"""constant"""'}), "(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')\n", (501, 560), True, 'import numpy as np\n'), ((2523, 2583), 'librosa.load', 'librosa.load', (['(path + audio_dict[option])'], {'mono': '(True)', 'sr': '(22050)'}), '(path + audio_dict[option], mono=True, sr=22050)\n', (2535, 2583), False, 'import librosa\n'), ((2633, 2666), 'numpy.reshape', 'np.reshape', (['mfcc', '(1, 20, 170, 1)'], {}), '(mfcc, (1, 20, 170, 1))\n', (2643, 2666), True, 'import numpy as np\n'), ((2972, 3027), 'streamlit.write', 'st.write', (['"""Click the button to analyze the audio clip."""'], {}), "('Click the button to analyze the audio clip.')\n", (2980, 3027), True, 'import streamlit as st\n'), ((3171, 3207), 'streamlit.header', 'st.header', (['"""Firearm Alarm in Action"""'], {}), "('Firearm Alarm in Action')\n", (3180, 3207), True, 'import streamlit as st\n'), ((3217, 3243), 'numpy.arange', 'np.arange', (['(0)', '(4)', '(1 / 22050)'], {}), '(0, 4, 1 / 22050)\n', (3226, 3243), True, 'import numpy as np\n'), ((3253, 3267), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3265, 3267), True, 'import matplotlib.pyplot as plt\n'), ((3358, 3380), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (3368, 3380), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sound Wave"""'], {}), "('Sound Wave')\n", (3395, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3425, 3439), 'streamlit.pyplot', 'st.pyplot', (['plt'], {}), '(plt)\n', (3434, 3439), True, 'import streamlit as st\n'), ((3449, 3482), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(0.8)', '""""""'], {'fontsize': '(14)'}), "(0, 0.8, '', fontsize=14)\n", (3457, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3597), 'streamlit.button', 'st.button', (['"""See an example with Firearm Alarm"""'], {}), "('See an example with Firearm Alarm')\n", (3560, 3597), True, 'import streamlit as st\n'), ((2790, 2836), 'streamlit.write', 'st.write', (['"""This doesn\'t sound like a firearm."""'], {}), '("This doesn\'t sound like a firearm.")\n', (2798, 2836), True, 'import streamlit as st\n'), ((2890, 2952), 'streamlit.write', 'st.write', (['"""This is a firearm! Contacting local authorities..."""'], {}), "('This is a firearm! Contacting local authorities...')\n", (2898, 2952), True, 'import streamlit as st\n'), ((4532, 4580), 'streamlit.write', 'st.write', (['"""Click the button to start listening."""'], {}), "('Click the button to start listening.')\n", (4540, 4580), True, 'import streamlit as st\n'), ((3612, 3638), 'streamlit.spinner', 'st.spinner', (['"""Listening..."""'], {}), "('Listening...')\n", (3622, 3638), True, 'import streamlit as st\n'), ((3661, 3681), 'librosa.load', 'librosa.load', (['sample'], {}), '(sample)\n', (3673, 3681), False, 'import librosa\n'), ((3701, 3728), 'librosa.get_duration', 'librosa.get_duration', (['array'], {}), '(array)\n', (3721, 3728), False, 'import librosa\n'), ((3818, 3871), 'librosa.load', 'librosa.load', (['sample'], {'mono': '(True)', 'offset': 't', 'duration': '(4)'}), '(sample, mono=True, offset=t, duration=4)\n', (3830, 3871), False, 'import librosa\n'), ((3977, 4010), 'numpy.reshape', 'np.reshape', (['mfcc', '(1, 20, 170, 1)'], {}), '(mfcc, (1, 20, 170, 1))\n', (3987, 4010), True, 'import numpy as np\n'), ((4472, 4485), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4482, 4485), False, 'import time\n'), ((4503, 4513), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4511, 4513), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
import os
JLDCF_root = "<your_methods_path>/CVPR2020_JL-DCF"
JLDCF = {
"LFSD": dict(path=os.path.join(JLDCF_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(JLDCF_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(JLDCF_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(JLDCF_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(JLDCF_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(JLDCF_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(JLDCF_root, "DUT-RGBD-testing"), suffix=".png"),
}
CoNet_root = "<your_methods_path>/2020-ECCV-CoNet"
CoNet = {
"LFSD": dict(path=os.path.join(CoNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CoNet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(CoNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CoNet_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(CoNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CoNet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(CoNet_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(CoNet_root, "STERE1000"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CoNet_root, "DUT-RGBD"), suffix=".png"),
}
BBSNet_root = "<your_methods_path>/ECCV2020_BBSNet"
BBSNet = {
"LFSD": dict(path=os.path.join(BBSNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(BBSNet_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(BBSNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(BBSNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(BBSNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(BBSNet_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(BBSNet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(BBSNet_root, "DUT"), suffix=".png"),
}
CMWNet_root = "<your_methods_path>/ECCV2020_CMWNet"
CMWNet = {
"LFSD": dict(path=os.path.join(CMWNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CMWNet_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(CMWNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CMWNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(CMWNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CMWNet_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CMWNet_root, "STEREO"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CMWNet_root, "DUT-RGBD"), suffix=".png"),
}
FRDT_root = "<your_methods_path>/2020-ACMMM-FRDT"
FRDT = {
"LFSD": dict(path=os.path.join(FRDT_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(FRDT_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(FRDT_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(FRDT_root, "RGBD-135"), suffix=".png"),
"SIP": None,
"SSD": dict(path=os.path.join(FRDT_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(FRDT_root, "STEREO"), suffix=".png"),
"STEREO1000": None,
"DUTRGBD": dict(path=os.path.join(FRDT_root, "DUT"), suffix=".png"),
}
S2MA_root = "<your_methods_path>/2020-CVPR-S2MA"
S2MA = {
"LFSD": dict(path=os.path.join(S2MA_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(S2MA_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(S2MA_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(S2MA_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(S2MA_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(S2MA_root, "SSD100"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(S2MA_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(S2MA_root, "DUT-RGBD"), suffix=".png"),
}
UCNet_root = "<your_methods_path>/2020-CVPR-UCNet_Res50/CVPR-UCNet_R50"
UCNet = {
"LFSD": dict(path=os.path.join(UCNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(UCNet_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(UCNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(UCNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(UCNet_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(UCNet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(UCNet_root, "DUT"), suffix=".png"),
}
UCNet_ABP_root = "<your_methods_path>/2020-CVPR-UCNet_Res50/TPAMI_UCNet_R50_ABP"
UCNet_ABP = {
"LFSD": dict(path=os.path.join(UCNet_ABP_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(UCNet_ABP_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(UCNet_ABP_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(UCNet_ABP_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(UCNet_ABP_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(UCNet_ABP_root, "STERE"), suffix=".png"),
"DUTRGBD": None,
}
UCNet_CVAE_root = "<your_methods_path>/2020-CVPR-UCNet_Res50/TPAMI_UCNet_R50_CVAE"
UCNet_CVAE = {
"LFSD": dict(path=os.path.join(UCNet_CVAE_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(UCNet_CVAE_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(UCNet_CVAE_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(UCNet_CVAE_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(UCNet_CVAE_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(UCNet_CVAE_root, "STERE"), suffix=".png"),
"DUTRGBD": None,
}
CasGNN_root = "<your_methods_path>/2020-ECCV-CasGNN"
CasGNN = {
"LFSD": dict(path=os.path.join(CasGNN_root, "LFSD", "pred"), suffix=".png"),
"NJUD": dict(path=os.path.join(CasGNN_root, "NJUD", "pred"), suffix=".png"),
"NLPR": dict(path=os.path.join(CasGNN_root, "NLPR", "pred"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CasGNN_root, "DES", "pred"), suffix=".png"),
"SIP": None,
"SSD": dict(path=os.path.join(CasGNN_root, "SSD", "pred"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CasGNN_root, "STERE", "pred"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CasGNN_root, "DUT-RGBD", "pred"), suffix=".png"),
}
DANet_VGG16_root = "<your_methods_path>/2020-ECCV-DANet_VGG/DANet_vgg16"
DANet_VGG16 = {
"LFSD": dict(path=os.path.join(DANet_VGG16_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DANet_VGG16_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DANet_VGG16_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DANet_VGG16_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DANet_VGG16_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DANet_VGG16_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(DANet_VGG16_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DANet_VGG16_root, "DUT-RGBD"), suffix=".png"),
}
DANet_VGG19_root = "<your_methods_path>/2020-ECCV-DANet_VGG/DANet_vgg19"
DANet_VGG19 = {
"LFSD": dict(path=os.path.join(DANet_VGG19_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DANet_VGG19_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DANet_VGG19_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DANet_VGG19_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DANet_VGG19_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DANet_VGG19_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(DANet_VGG19_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DANet_VGG19_root, "DUT-RGBD"), suffix=".png"),
}
PGAR_root = "<your_methods_path>/2020-ECCV-PGAR"
PGAR = {
"LFSD": dict(path=os.path.join(PGAR_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(PGAR_root, "NJUD_test"), suffix=".png"),
"NLPR": dict(path=os.path.join(PGAR_root, "NLPR_test"), suffix=".png"),
"RGBD135": dict(path=os.path.join(PGAR_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(PGAR_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(PGAR_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(PGAR_root, "DUT-RGBD"), suffix=".png"),
}
DisenFuse_root = "<your_methods_path>/2020-TIP-DisenFuse_VGG16"
DisenFuse = {
"LFSD": dict(path=os.path.join(DisenFuse_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DisenFuse_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DisenFuse_root, "NLPR"), suffix=".jpg"),
"RGBD135": dict(path=os.path.join(DisenFuse_root, "DES"), suffix=".bmp"),
"SIP": dict(path=os.path.join(DisenFuse_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(DisenFuse_root, "STEREO1000"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DisenFuse_root, "DUT"), suffix=".png"),
}
DPANet_root = "<your_methods_path>/2020-TIP-DPANet"
DPANet = {
"LFSD": dict(path=os.path.join(DPANet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DPANet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DPANet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DPANet_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DPANet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DPANet_root, "SSD100"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DPANet_root, "STEREO797"), suffix=".png"),
"STEREO1000": None,
"DUTRGBD": dict(path=os.path.join(DPANet_root, "DUT"), suffix=".png"),
}
ICNet_root = "<your_methods_path>/2020-TIP-ICNet"
ICNet = {
"LFSD": dict(path=os.path.join(ICNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(ICNet_root, "NJU2K"), suffix=".png"),
"NLPR": dict(path=os.path.join(ICNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(ICNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(ICNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(ICNet_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(ICNet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(ICNet_root, "DUT-RGBD"), suffix=".png"),
}
D3Net_root = "<your_methods_path>/2020-TNNLS-D3Net"
D3Net = {
"LFSD": dict(path=os.path.join(D3Net_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(D3Net_root, "NJU2K_TEST"), suffix=".png"),
"NLPR": dict(path=os.path.join(D3Net_root, "NLPR_TEST"), suffix=".png"),
"RGBD135": dict(path=os.path.join(D3Net_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(D3Net_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(D3Net_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(D3Net_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(D3Net_root, "DUT-RGBD_TEST"), suffix=".png"),
}
RD3D_root = "<your_methods_path>/2021-AAAI-RD3D"
RD3D = {
"LFSD": dict(path=os.path.join(RD3D_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(RD3D_root, "NJU2000"), suffix=".png"),
"NLPR": dict(path=os.path.join(RD3D_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(RD3D_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(RD3D_root, "SIP"), suffix=".png"),
"SSD": None,
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(RD3D_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(RD3D_root, "DUT"), suffix=".png"),
}
AFNet_root = "<your_methods_path>/AFNet"
AFNet = {
"LFSD": dict(path=os.path.join(AFNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(AFNet_root, "NJU2K-TEST"), suffix=".png"),
"NLPR": dict(path=os.path.join(AFNet_root, "NLPR-TEST"), suffix=".png"),
"RGBD135": dict(path=os.path.join(AFNet_root, "DES"), suffix=".png"),
"SIP": dict(path=os.path.join(AFNet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(AFNet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(AFNet_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(AFNet_root, "STERE"), suffix=".png"),
"DUTRGBD": None,
}
CDCP_root = "<your_methods_path>/CDCP"
CDCP = {
"LFSD": dict(path=os.path.join(CDCP_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CDCP_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(CDCP_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CDCP_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(CDCP_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CDCP_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CDCP_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CDCP_root, "DUT-RGBD"), suffix=".png"),
}
CPFP_root = "<your_methods_path>/CPFP"
CPFP = {
"LFSD": dict(path=os.path.join(CPFP_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CPFP_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(CPFP_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(CPFP_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(CPFP_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CPFP_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CPFP_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CPFP_root, "DUT-RGBD"), suffix=".png"),
}
CTMF_root = "<your_methods_path>/CTMF"
CTMF = {
"LFSD": dict(path=os.path.join(CTMF_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(CTMF_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(CTMF_root, "NLPR"), suffix=".jpg"),
"RGBD135": dict(path=os.path.join(CTMF_root, "RGBD135"), suffix=".bmp"),
"SIP": dict(path=os.path.join(CTMF_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(CTMF_root, "SSD"), suffix=".png"),
"STEREO797": None,
"STEREO1000": dict(path=os.path.join(CTMF_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(CTMF_root, "DUT-RGBD"), suffix=".png"),
}
DCMC_root = "<your_methods_path>/DCMC"
DCMC = {
"LFSD": dict(path=os.path.join(DCMC_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DCMC_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DCMC_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DCMC_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DCMC_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DCMC_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DCMC_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(DCMC_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DCMC_root, "DUT-RGBD"), suffix=".png"),
}
DES_root = "<your_methods_path>/DES"
DES = {
"LFSD": dict(path=os.path.join(DES_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DES_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DES_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DES_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DES_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DES_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DES_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(DES_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DES_root, "DUT-RGBD"), suffix=".png"),
}
DF_root = "<your_methods_path>/DF"
DF = {
"LFSD": dict(path=os.path.join(DF_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DF_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DF_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DF_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DF_root, "SIP/SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(DF_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DF_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(DF_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DF_root, "DUT-RGBD"), suffix=".png"),
}
DMRA_root = "<your_methods_path>/DMRA"
DMRA = {
"LFSD": dict(path=os.path.join(DMRA_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(DMRA_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(DMRA_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(DMRA_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(DMRA_root, "SIP_FromAuthor"), suffix=".png"),
"SSD": dict(path=os.path.join(DMRA_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(DMRA_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(DMRA_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(DMRA_root, "DUT-RGBD"), suffix=".png"),
}
MB_root = "<your_methods_path>/MB"
MB = {
"LFSD": dict(path=os.path.join(MB_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(MB_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(MB_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(MB_root, "RGBD135"), suffix=".png"),
"SIP": None,
"SSD": dict(path=os.path.join(MB_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(MB_root, "STEREO"), suffix=".png"),
"STEREO1000": None,
"DUTRGBD": dict(path=os.path.join(MB_root, "DUT-RGBD"), suffix=".png"),
}
MMCI_root = "<your_methods_path>/MMCI"
MMCI = {
"LFSD": dict(path=os.path.join(MMCI_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(MMCI_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(MMCI_root, "NLPR"), suffix=".jpg"),
"RGBD135": dict(path=os.path.join(MMCI_root, "RGBD135"), suffix=".bmp"),
"SIP": dict(path=os.path.join(MMCI_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(MMCI_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(MMCI_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(MMCI_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(MMCI_root, "DUT-RGBD"), suffix=".png"),
}
NLPR_root = "<your_methods_path>/NLPR"
NLPR = {
"LFSD": dict(path=os.path.join(NLPR_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(NLPR_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(NLPR_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(NLPR_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(NLPR_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(NLPR_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(NLPR_root, "STEREO-797"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(NLPR_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(NLPR_root, "DUT-RGBD"), suffix=".png"),
}
PCANet_root = "<your_methods_path>/PCANet"
PCANet = {
"LFSD": dict(path=os.path.join(PCANet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(PCANet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(PCANet_root, "NLPR"), suffix=".jpg"),
"RGBD135": dict(path=os.path.join(PCANet_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(PCANet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(PCANet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(PCANet_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(PCANet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(PCANet_root, "DUT-RGBD"), suffix=".png"),
}
PDNet_root = "<your_methods_path>/PDNet"
PDNet = {
"LFSD": dict(path=os.path.join(PDNet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(PDNet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(PDNet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(PDNet_root, "RGBD135"), suffix=".png"),
"SIP": None,
"SSD": dict(path=os.path.join(PDNet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(PDNet_root, "STEREO"), suffix=".png"),
"STEREO1000": None,
"DUTRGBD": dict(path=os.path.join(PDNet_root, "DUT-RGBD"), suffix=".png"),
}
TANet_root = "<your_methods_path>/TANet"
TANet = {
"LFSD": dict(path=os.path.join(TANet_root, "LFSD"), suffix=".png"),
"NJUD": dict(path=os.path.join(TANet_root, "NJUD"), suffix=".png"),
"NLPR": dict(path=os.path.join(TANet_root, "NLPR"), suffix=".png"),
"RGBD135": dict(path=os.path.join(TANet_root, "RGBD135"), suffix=".png"),
"SIP": dict(path=os.path.join(TANet_root, "SIP"), suffix=".png"),
"SSD": dict(path=os.path.join(TANet_root, "SSD"), suffix=".png"),
"STEREO797": dict(path=os.path.join(TANet_root, "STEREO"), suffix=".png"),
"STEREO1000": dict(path=os.path.join(TANet_root, "STERE"), suffix=".png"),
"DUTRGBD": dict(path=os.path.join(TANet_root, "DUT-RGBD"), suffix=".png"),
}
|
[
"os.path.join"
] |
[((118, 150), 'os.path.join', 'os.path.join', (['JLDCF_root', '"""LFSD"""'], {}), "(JLDCF_root, 'LFSD')\n", (130, 150), False, 'import os\n'), ((190, 223), 'os.path.join', 'os.path.join', (['JLDCF_root', '"""NJU2K"""'], {}), "(JLDCF_root, 'NJU2K')\n", (202, 223), False, 'import os\n'), ((263, 295), 'os.path.join', 'os.path.join', (['JLDCF_root', '"""NLPR"""'], {}), "(JLDCF_root, 'NLPR')\n", (275, 295), False, 'import os\n'), ((338, 373), 'os.path.join', 'os.path.join', (['JLDCF_root', '"""RGBD135"""'], {}), "(JLDCF_root, 'RGBD135')\n", (350, 373), False, 'import os\n'), ((412, 443), 'os.path.join', 'os.path.join', (['JLDCF_root', '"""SIP"""'], {}), "(JLDCF_root, 'SIP')\n", (424, 443), False, 'import os\n'), ((529, 562), 'os.path.join', 'os.path.join', (['JLDCF_root', '"""STERE"""'], {}), "(JLDCF_root, 'STERE')\n", (541, 562), False, 'import os\n'), ((605, 649), 'os.path.join', 'os.path.join', (['JLDCF_root', '"""DUT-RGBD-testing"""'], {}), "(JLDCF_root, 'DUT-RGBD-testing')\n", (617, 649), False, 'import os\n'), ((753, 785), 'os.path.join', 'os.path.join', (['CoNet_root', '"""LFSD"""'], {}), "(CoNet_root, 'LFSD')\n", (765, 785), False, 'import os\n'), ((825, 857), 'os.path.join', 'os.path.join', (['CoNet_root', '"""NJUD"""'], {}), "(CoNet_root, 'NJUD')\n", (837, 857), False, 'import os\n'), ((897, 929), 'os.path.join', 'os.path.join', (['CoNet_root', '"""NLPR"""'], {}), "(CoNet_root, 'NLPR')\n", (909, 929), False, 'import os\n'), ((972, 1007), 'os.path.join', 'os.path.join', (['CoNet_root', '"""RGBD135"""'], {}), "(CoNet_root, 'RGBD135')\n", (984, 1007), False, 'import os\n'), ((1046, 1077), 'os.path.join', 'os.path.join', (['CoNet_root', '"""SIP"""'], {}), "(CoNet_root, 'SIP')\n", (1058, 1077), False, 'import os\n'), ((1116, 1147), 'os.path.join', 'os.path.join', (['CoNet_root', '"""SSD"""'], {}), "(CoNet_root, 'SSD')\n", (1128, 1147), False, 'import os\n'), ((1192, 1226), 'os.path.join', 'os.path.join', (['CoNet_root', '"""STEREO"""'], {}), "(CoNet_root, 'STEREO')\n", (1204, 1226), False, 'import os\n'), ((1272, 1309), 'os.path.join', 'os.path.join', (['CoNet_root', '"""STERE1000"""'], {}), "(CoNet_root, 'STERE1000')\n", (1284, 1309), False, 'import os\n'), ((1352, 1388), 'os.path.join', 'os.path.join', (['CoNet_root', '"""DUT-RGBD"""'], {}), "(CoNet_root, 'DUT-RGBD')\n", (1364, 1388), False, 'import os\n'), ((1494, 1527), 'os.path.join', 'os.path.join', (['BBSNet_root', '"""LFSD"""'], {}), "(BBSNet_root, 'LFSD')\n", (1506, 1527), False, 'import os\n'), ((1567, 1601), 'os.path.join', 'os.path.join', (['BBSNet_root', '"""NJU2K"""'], {}), "(BBSNet_root, 'NJU2K')\n", (1579, 1601), False, 'import os\n'), ((1641, 1674), 'os.path.join', 'os.path.join', (['BBSNet_root', '"""NLPR"""'], {}), "(BBSNet_root, 'NLPR')\n", (1653, 1674), False, 'import os\n'), ((1717, 1749), 'os.path.join', 'os.path.join', (['BBSNet_root', '"""DES"""'], {}), "(BBSNet_root, 'DES')\n", (1729, 1749), False, 'import os\n'), ((1788, 1820), 'os.path.join', 'os.path.join', (['BBSNet_root', '"""SIP"""'], {}), "(BBSNet_root, 'SIP')\n", (1800, 1820), False, 'import os\n'), ((1859, 1891), 'os.path.join', 'os.path.join', (['BBSNet_root', '"""SSD"""'], {}), "(BBSNet_root, 'SSD')\n", (1871, 1891), False, 'import os\n'), ((1960, 1994), 'os.path.join', 'os.path.join', (['BBSNet_root', '"""STERE"""'], {}), "(BBSNet_root, 'STERE')\n", (1972, 1994), False, 'import os\n'), ((2037, 2069), 'os.path.join', 'os.path.join', (['BBSNet_root', '"""DUT"""'], {}), "(BBSNet_root, 'DUT')\n", (2049, 2069), False, 'import os\n'), ((2175, 2208), 'os.path.join', 'os.path.join', (['CMWNet_root', '"""LFSD"""'], {}), "(CMWNet_root, 'LFSD')\n", (2187, 2208), False, 'import os\n'), ((2248, 2282), 'os.path.join', 'os.path.join', (['CMWNet_root', '"""NJU2K"""'], {}), "(CMWNet_root, 'NJU2K')\n", (2260, 2282), False, 'import os\n'), ((2322, 2355), 'os.path.join', 'os.path.join', (['CMWNet_root', '"""NLPR"""'], {}), "(CMWNet_root, 'NLPR')\n", (2334, 2355), False, 'import os\n'), ((2398, 2430), 'os.path.join', 'os.path.join', (['CMWNet_root', '"""DES"""'], {}), "(CMWNet_root, 'DES')\n", (2410, 2430), False, 'import os\n'), ((2469, 2501), 'os.path.join', 'os.path.join', (['CMWNet_root', '"""SIP"""'], {}), "(CMWNet_root, 'SIP')\n", (2481, 2501), False, 'import os\n'), ((2540, 2572), 'os.path.join', 'os.path.join', (['CMWNet_root', '"""SSD"""'], {}), "(CMWNet_root, 'SSD')\n", (2552, 2572), False, 'import os\n'), ((2641, 2676), 'os.path.join', 'os.path.join', (['CMWNet_root', '"""STEREO"""'], {}), "(CMWNet_root, 'STEREO')\n", (2653, 2676), False, 'import os\n'), ((2719, 2756), 'os.path.join', 'os.path.join', (['CMWNet_root', '"""DUT-RGBD"""'], {}), "(CMWNet_root, 'DUT-RGBD')\n", (2731, 2756), False, 'import os\n'), ((2858, 2889), 'os.path.join', 'os.path.join', (['FRDT_root', '"""LFSD"""'], {}), "(FRDT_root, 'LFSD')\n", (2870, 2889), False, 'import os\n'), ((2929, 2960), 'os.path.join', 'os.path.join', (['FRDT_root', '"""NJUD"""'], {}), "(FRDT_root, 'NJUD')\n", (2941, 2960), False, 'import os\n'), ((3000, 3031), 'os.path.join', 'os.path.join', (['FRDT_root', '"""NLPR"""'], {}), "(FRDT_root, 'NLPR')\n", (3012, 3031), False, 'import os\n'), ((3074, 3109), 'os.path.join', 'os.path.join', (['FRDT_root', '"""RGBD-135"""'], {}), "(FRDT_root, 'RGBD-135')\n", (3086, 3109), False, 'import os\n'), ((3165, 3195), 'os.path.join', 'os.path.join', (['FRDT_root', '"""SSD"""'], {}), "(FRDT_root, 'SSD')\n", (3177, 3195), False, 'import os\n'), ((3240, 3273), 'os.path.join', 'os.path.join', (['FRDT_root', '"""STEREO"""'], {}), "(FRDT_root, 'STEREO')\n", (3252, 3273), False, 'import os\n'), ((3340, 3370), 'os.path.join', 'os.path.join', (['FRDT_root', '"""DUT"""'], {}), "(FRDT_root, 'DUT')\n", (3352, 3370), False, 'import os\n'), ((3471, 3502), 'os.path.join', 'os.path.join', (['S2MA_root', '"""LFSD"""'], {}), "(S2MA_root, 'LFSD')\n", (3483, 3502), False, 'import os\n'), ((3542, 3574), 'os.path.join', 'os.path.join', (['S2MA_root', '"""NJU2K"""'], {}), "(S2MA_root, 'NJU2K')\n", (3554, 3574), False, 'import os\n'), ((3614, 3645), 'os.path.join', 'os.path.join', (['S2MA_root', '"""NLPR"""'], {}), "(S2MA_root, 'NLPR')\n", (3626, 3645), False, 'import os\n'), ((3688, 3722), 'os.path.join', 'os.path.join', (['S2MA_root', '"""RGBD135"""'], {}), "(S2MA_root, 'RGBD135')\n", (3700, 3722), False, 'import os\n'), ((3761, 3791), 'os.path.join', 'os.path.join', (['S2MA_root', '"""SIP"""'], {}), "(S2MA_root, 'SIP')\n", (3773, 3791), False, 'import os\n'), ((3830, 3863), 'os.path.join', 'os.path.join', (['S2MA_root', '"""SSD100"""'], {}), "(S2MA_root, 'SSD100')\n", (3842, 3863), False, 'import os\n'), ((3932, 3964), 'os.path.join', 'os.path.join', (['S2MA_root', '"""STERE"""'], {}), "(S2MA_root, 'STERE')\n", (3944, 3964), False, 'import os\n'), ((4007, 4042), 'os.path.join', 'os.path.join', (['S2MA_root', '"""DUT-RGBD"""'], {}), "(S2MA_root, 'DUT-RGBD')\n", (4019, 4042), False, 'import os\n'), ((4167, 4199), 'os.path.join', 'os.path.join', (['UCNet_root', '"""LFSD"""'], {}), "(UCNet_root, 'LFSD')\n", (4179, 4199), False, 'import os\n'), ((4239, 4272), 'os.path.join', 'os.path.join', (['UCNet_root', '"""NJU2K"""'], {}), "(UCNet_root, 'NJU2K')\n", (4251, 4272), False, 'import os\n'), ((4312, 4344), 'os.path.join', 'os.path.join', (['UCNet_root', '"""NLPR"""'], {}), "(UCNet_root, 'NLPR')\n", (4324, 4344), False, 'import os\n'), ((4387, 4418), 'os.path.join', 'os.path.join', (['UCNet_root', '"""DES"""'], {}), "(UCNet_root, 'DES')\n", (4399, 4418), False, 'import os\n'), ((4457, 4488), 'os.path.join', 'os.path.join', (['UCNet_root', '"""SIP"""'], {}), "(UCNet_root, 'SIP')\n", (4469, 4488), False, 'import os\n'), ((4574, 4607), 'os.path.join', 'os.path.join', (['UCNet_root', '"""STERE"""'], {}), "(UCNet_root, 'STERE')\n", (4586, 4607), False, 'import os\n'), ((4650, 4681), 'os.path.join', 'os.path.join', (['UCNet_root', '"""DUT"""'], {}), "(UCNet_root, 'DUT')\n", (4662, 4681), False, 'import os\n'), ((4819, 4855), 'os.path.join', 'os.path.join', (['UCNet_ABP_root', '"""LFSD"""'], {}), "(UCNet_ABP_root, 'LFSD')\n", (4831, 4855), False, 'import os\n'), ((4895, 4932), 'os.path.join', 'os.path.join', (['UCNet_ABP_root', '"""NJU2K"""'], {}), "(UCNet_ABP_root, 'NJU2K')\n", (4907, 4932), False, 'import os\n'), ((4972, 5008), 'os.path.join', 'os.path.join', (['UCNet_ABP_root', '"""NLPR"""'], {}), "(UCNet_ABP_root, 'NLPR')\n", (4984, 5008), False, 'import os\n'), ((5051, 5086), 'os.path.join', 'os.path.join', (['UCNet_ABP_root', '"""DES"""'], {}), "(UCNet_ABP_root, 'DES')\n", (5063, 5086), False, 'import os\n'), ((5125, 5160), 'os.path.join', 'os.path.join', (['UCNet_ABP_root', '"""SIP"""'], {}), "(UCNet_ABP_root, 'SIP')\n", (5137, 5160), False, 'import os\n'), ((5246, 5283), 'os.path.join', 'os.path.join', (['UCNet_ABP_root', '"""STERE"""'], {}), "(UCNet_ABP_root, 'STERE')\n", (5258, 5283), False, 'import os\n'), ((5445, 5482), 'os.path.join', 'os.path.join', (['UCNet_CVAE_root', '"""LFSD"""'], {}), "(UCNet_CVAE_root, 'LFSD')\n", (5457, 5482), False, 'import os\n'), ((5522, 5560), 'os.path.join', 'os.path.join', (['UCNet_CVAE_root', '"""NJU2K"""'], {}), "(UCNet_CVAE_root, 'NJU2K')\n", (5534, 5560), False, 'import os\n'), ((5600, 5637), 'os.path.join', 'os.path.join', (['UCNet_CVAE_root', '"""NLPR"""'], {}), "(UCNet_CVAE_root, 'NLPR')\n", (5612, 5637), False, 'import os\n'), ((5680, 5716), 'os.path.join', 'os.path.join', (['UCNet_CVAE_root', '"""DES"""'], {}), "(UCNet_CVAE_root, 'DES')\n", (5692, 5716), False, 'import os\n'), ((5755, 5791), 'os.path.join', 'os.path.join', (['UCNet_CVAE_root', '"""SIP"""'], {}), "(UCNet_CVAE_root, 'SIP')\n", (5767, 5791), False, 'import os\n'), ((5877, 5915), 'os.path.join', 'os.path.join', (['UCNet_CVAE_root', '"""STERE"""'], {}), "(UCNet_CVAE_root, 'STERE')\n", (5889, 5915), False, 'import os\n'), ((6043, 6084), 'os.path.join', 'os.path.join', (['CasGNN_root', '"""LFSD"""', '"""pred"""'], {}), "(CasGNN_root, 'LFSD', 'pred')\n", (6055, 6084), False, 'import os\n'), ((6124, 6165), 'os.path.join', 'os.path.join', (['CasGNN_root', '"""NJUD"""', '"""pred"""'], {}), "(CasGNN_root, 'NJUD', 'pred')\n", (6136, 6165), False, 'import os\n'), ((6205, 6246), 'os.path.join', 'os.path.join', (['CasGNN_root', '"""NLPR"""', '"""pred"""'], {}), "(CasGNN_root, 'NLPR', 'pred')\n", (6217, 6246), False, 'import os\n'), ((6289, 6329), 'os.path.join', 'os.path.join', (['CasGNN_root', '"""DES"""', '"""pred"""'], {}), "(CasGNN_root, 'DES', 'pred')\n", (6301, 6329), False, 'import os\n'), ((6385, 6425), 'os.path.join', 'os.path.join', (['CasGNN_root', '"""SSD"""', '"""pred"""'], {}), "(CasGNN_root, 'SSD', 'pred')\n", (6397, 6425), False, 'import os\n'), ((6494, 6536), 'os.path.join', 'os.path.join', (['CasGNN_root', '"""STERE"""', '"""pred"""'], {}), "(CasGNN_root, 'STERE', 'pred')\n", (6506, 6536), False, 'import os\n'), ((6579, 6624), 'os.path.join', 'os.path.join', (['CasGNN_root', '"""DUT-RGBD"""', '"""pred"""'], {}), "(CasGNN_root, 'DUT-RGBD', 'pred')\n", (6591, 6624), False, 'import os\n'), ((6756, 6794), 'os.path.join', 'os.path.join', (['DANet_VGG16_root', '"""LFSD"""'], {}), "(DANet_VGG16_root, 'LFSD')\n", (6768, 6794), False, 'import os\n'), ((6834, 6872), 'os.path.join', 'os.path.join', (['DANet_VGG16_root', '"""NJUD"""'], {}), "(DANet_VGG16_root, 'NJUD')\n", (6846, 6872), False, 'import os\n'), ((6912, 6950), 'os.path.join', 'os.path.join', (['DANet_VGG16_root', '"""NLPR"""'], {}), "(DANet_VGG16_root, 'NLPR')\n", (6924, 6950), False, 'import os\n'), ((6993, 7034), 'os.path.join', 'os.path.join', (['DANet_VGG16_root', '"""RGBD135"""'], {}), "(DANet_VGG16_root, 'RGBD135')\n", (7005, 7034), False, 'import os\n'), ((7073, 7110), 'os.path.join', 'os.path.join', (['DANet_VGG16_root', '"""SIP"""'], {}), "(DANet_VGG16_root, 'SIP')\n", (7085, 7110), False, 'import os\n'), ((7149, 7186), 'os.path.join', 'os.path.join', (['DANet_VGG16_root', '"""SSD"""'], {}), "(DANet_VGG16_root, 'SSD')\n", (7161, 7186), False, 'import os\n'), ((7255, 7294), 'os.path.join', 'os.path.join', (['DANet_VGG16_root', '"""STERE"""'], {}), "(DANet_VGG16_root, 'STERE')\n", (7267, 7294), False, 'import os\n'), ((7337, 7379), 'os.path.join', 'os.path.join', (['DANet_VGG16_root', '"""DUT-RGBD"""'], {}), "(DANet_VGG16_root, 'DUT-RGBD')\n", (7349, 7379), False, 'import os\n'), ((7511, 7549), 'os.path.join', 'os.path.join', (['DANet_VGG19_root', '"""LFSD"""'], {}), "(DANet_VGG19_root, 'LFSD')\n", (7523, 7549), False, 'import os\n'), ((7589, 7627), 'os.path.join', 'os.path.join', (['DANet_VGG19_root', '"""NJUD"""'], {}), "(DANet_VGG19_root, 'NJUD')\n", (7601, 7627), False, 'import os\n'), ((7667, 7705), 'os.path.join', 'os.path.join', (['DANet_VGG19_root', '"""NLPR"""'], {}), "(DANet_VGG19_root, 'NLPR')\n", (7679, 7705), False, 'import os\n'), ((7748, 7789), 'os.path.join', 'os.path.join', (['DANet_VGG19_root', '"""RGBD135"""'], {}), "(DANet_VGG19_root, 'RGBD135')\n", (7760, 7789), False, 'import os\n'), ((7828, 7865), 'os.path.join', 'os.path.join', (['DANet_VGG19_root', '"""SIP"""'], {}), "(DANet_VGG19_root, 'SIP')\n", (7840, 7865), False, 'import os\n'), ((7904, 7941), 'os.path.join', 'os.path.join', (['DANet_VGG19_root', '"""SSD"""'], {}), "(DANet_VGG19_root, 'SSD')\n", (7916, 7941), False, 'import os\n'), ((8010, 8049), 'os.path.join', 'os.path.join', (['DANet_VGG19_root', '"""STERE"""'], {}), "(DANet_VGG19_root, 'STERE')\n", (8022, 8049), False, 'import os\n'), ((8092, 8134), 'os.path.join', 'os.path.join', (['DANet_VGG19_root', '"""DUT-RGBD"""'], {}), "(DANet_VGG19_root, 'DUT-RGBD')\n", (8104, 8134), False, 'import os\n'), ((8235, 8266), 'os.path.join', 'os.path.join', (['PGAR_root', '"""LFSD"""'], {}), "(PGAR_root, 'LFSD')\n", (8247, 8266), False, 'import os\n'), ((8306, 8342), 'os.path.join', 'os.path.join', (['PGAR_root', '"""NJUD_test"""'], {}), "(PGAR_root, 'NJUD_test')\n", (8318, 8342), False, 'import os\n'), ((8382, 8418), 'os.path.join', 'os.path.join', (['PGAR_root', '"""NLPR_test"""'], {}), "(PGAR_root, 'NLPR_test')\n", (8394, 8418), False, 'import os\n'), ((8461, 8495), 'os.path.join', 'os.path.join', (['PGAR_root', '"""RGBD135"""'], {}), "(PGAR_root, 'RGBD135')\n", (8473, 8495), False, 'import os\n'), ((8534, 8564), 'os.path.join', 'os.path.join', (['PGAR_root', '"""SIP"""'], {}), "(PGAR_root, 'SIP')\n", (8546, 8564), False, 'import os\n'), ((8650, 8682), 'os.path.join', 'os.path.join', (['PGAR_root', '"""STERE"""'], {}), "(PGAR_root, 'STERE')\n", (8662, 8682), False, 'import os\n'), ((8725, 8760), 'os.path.join', 'os.path.join', (['PGAR_root', '"""DUT-RGBD"""'], {}), "(PGAR_root, 'DUT-RGBD')\n", (8737, 8760), False, 'import os\n'), ((8881, 8917), 'os.path.join', 'os.path.join', (['DisenFuse_root', '"""LFSD"""'], {}), "(DisenFuse_root, 'LFSD')\n", (8893, 8917), False, 'import os\n'), ((8957, 8993), 'os.path.join', 'os.path.join', (['DisenFuse_root', '"""NJUD"""'], {}), "(DisenFuse_root, 'NJUD')\n", (8969, 8993), False, 'import os\n'), ((9033, 9069), 'os.path.join', 'os.path.join', (['DisenFuse_root', '"""NLPR"""'], {}), "(DisenFuse_root, 'NLPR')\n", (9045, 9069), False, 'import os\n'), ((9112, 9147), 'os.path.join', 'os.path.join', (['DisenFuse_root', '"""DES"""'], {}), "(DisenFuse_root, 'DES')\n", (9124, 9147), False, 'import os\n'), ((9186, 9221), 'os.path.join', 'os.path.join', (['DisenFuse_root', '"""SIP"""'], {}), "(DisenFuse_root, 'SIP')\n", (9198, 9221), False, 'import os\n'), ((9307, 9349), 'os.path.join', 'os.path.join', (['DisenFuse_root', '"""STEREO1000"""'], {}), "(DisenFuse_root, 'STEREO1000')\n", (9319, 9349), False, 'import os\n'), ((9392, 9427), 'os.path.join', 'os.path.join', (['DisenFuse_root', '"""DUT"""'], {}), "(DisenFuse_root, 'DUT')\n", (9404, 9427), False, 'import os\n'), ((9533, 9566), 'os.path.join', 'os.path.join', (['DPANet_root', '"""LFSD"""'], {}), "(DPANet_root, 'LFSD')\n", (9545, 9566), False, 'import os\n'), ((9606, 9639), 'os.path.join', 'os.path.join', (['DPANet_root', '"""NJUD"""'], {}), "(DPANet_root, 'NJUD')\n", (9618, 9639), False, 'import os\n'), ((9679, 9712), 'os.path.join', 'os.path.join', (['DPANet_root', '"""NLPR"""'], {}), "(DPANet_root, 'NLPR')\n", (9691, 9712), False, 'import os\n'), ((9755, 9791), 'os.path.join', 'os.path.join', (['DPANet_root', '"""RGBD135"""'], {}), "(DPANet_root, 'RGBD135')\n", (9767, 9791), False, 'import os\n'), ((9830, 9862), 'os.path.join', 'os.path.join', (['DPANet_root', '"""SIP"""'], {}), "(DPANet_root, 'SIP')\n", (9842, 9862), False, 'import os\n'), ((9901, 9936), 'os.path.join', 'os.path.join', (['DPANet_root', '"""SSD100"""'], {}), "(DPANet_root, 'SSD100')\n", (9913, 9936), False, 'import os\n'), ((9981, 10019), 'os.path.join', 'os.path.join', (['DPANet_root', '"""STEREO797"""'], {}), "(DPANet_root, 'STEREO797')\n", (9993, 10019), False, 'import os\n'), ((10086, 10118), 'os.path.join', 'os.path.join', (['DPANet_root', '"""DUT"""'], {}), "(DPANet_root, 'DUT')\n", (10098, 10118), False, 'import os\n'), ((10221, 10253), 'os.path.join', 'os.path.join', (['ICNet_root', '"""LFSD"""'], {}), "(ICNet_root, 'LFSD')\n", (10233, 10253), False, 'import os\n'), ((10293, 10326), 'os.path.join', 'os.path.join', (['ICNet_root', '"""NJU2K"""'], {}), "(ICNet_root, 'NJU2K')\n", (10305, 10326), False, 'import os\n'), ((10366, 10398), 'os.path.join', 'os.path.join', (['ICNet_root', '"""NLPR"""'], {}), "(ICNet_root, 'NLPR')\n", (10378, 10398), False, 'import os\n'), ((10441, 10472), 'os.path.join', 'os.path.join', (['ICNet_root', '"""DES"""'], {}), "(ICNet_root, 'DES')\n", (10453, 10472), False, 'import os\n'), ((10511, 10542), 'os.path.join', 'os.path.join', (['ICNet_root', '"""SIP"""'], {}), "(ICNet_root, 'SIP')\n", (10523, 10542), False, 'import os\n'), ((10581, 10612), 'os.path.join', 'os.path.join', (['ICNet_root', '"""SSD"""'], {}), "(ICNet_root, 'SSD')\n", (10593, 10612), False, 'import os\n'), ((10681, 10714), 'os.path.join', 'os.path.join', (['ICNet_root', '"""STERE"""'], {}), "(ICNet_root, 'STERE')\n", (10693, 10714), False, 'import os\n'), ((10757, 10793), 'os.path.join', 'os.path.join', (['ICNet_root', '"""DUT-RGBD"""'], {}), "(ICNet_root, 'DUT-RGBD')\n", (10769, 10793), False, 'import os\n'), ((10898, 10930), 'os.path.join', 'os.path.join', (['D3Net_root', '"""LFSD"""'], {}), "(D3Net_root, 'LFSD')\n", (10910, 10930), False, 'import os\n'), ((10970, 11008), 'os.path.join', 'os.path.join', (['D3Net_root', '"""NJU2K_TEST"""'], {}), "(D3Net_root, 'NJU2K_TEST')\n", (10982, 11008), False, 'import os\n'), ((11048, 11085), 'os.path.join', 'os.path.join', (['D3Net_root', '"""NLPR_TEST"""'], {}), "(D3Net_root, 'NLPR_TEST')\n", (11060, 11085), False, 'import os\n'), ((11128, 11159), 'os.path.join', 'os.path.join', (['D3Net_root', '"""DES"""'], {}), "(D3Net_root, 'DES')\n", (11140, 11159), False, 'import os\n'), ((11198, 11229), 'os.path.join', 'os.path.join', (['D3Net_root', '"""SIP"""'], {}), "(D3Net_root, 'SIP')\n", (11210, 11229), False, 'import os\n'), ((11268, 11299), 'os.path.join', 'os.path.join', (['D3Net_root', '"""SSD"""'], {}), "(D3Net_root, 'SSD')\n", (11280, 11299), False, 'import os\n'), ((11368, 11401), 'os.path.join', 'os.path.join', (['D3Net_root', '"""STERE"""'], {}), "(D3Net_root, 'STERE')\n", (11380, 11401), False, 'import os\n'), ((11444, 11485), 'os.path.join', 'os.path.join', (['D3Net_root', '"""DUT-RGBD_TEST"""'], {}), "(D3Net_root, 'DUT-RGBD_TEST')\n", (11456, 11485), False, 'import os\n'), ((11586, 11617), 'os.path.join', 'os.path.join', (['RD3D_root', '"""LFSD"""'], {}), "(RD3D_root, 'LFSD')\n", (11598, 11617), False, 'import os\n'), ((11657, 11691), 'os.path.join', 'os.path.join', (['RD3D_root', '"""NJU2000"""'], {}), "(RD3D_root, 'NJU2000')\n", (11669, 11691), False, 'import os\n'), ((11731, 11762), 'os.path.join', 'os.path.join', (['RD3D_root', '"""NLPR"""'], {}), "(RD3D_root, 'NLPR')\n", (11743, 11762), False, 'import os\n'), ((11805, 11839), 'os.path.join', 'os.path.join', (['RD3D_root', '"""RGBD135"""'], {}), "(RD3D_root, 'RGBD135')\n", (11817, 11839), False, 'import os\n'), ((11878, 11908), 'os.path.join', 'os.path.join', (['RD3D_root', '"""SIP"""'], {}), "(RD3D_root, 'SIP')\n", (11890, 11908), False, 'import os\n'), ((11994, 12026), 'os.path.join', 'os.path.join', (['RD3D_root', '"""STERE"""'], {}), "(RD3D_root, 'STERE')\n", (12006, 12026), False, 'import os\n'), ((12069, 12099), 'os.path.join', 'os.path.join', (['RD3D_root', '"""DUT"""'], {}), "(RD3D_root, 'DUT')\n", (12081, 12099), False, 'import os\n'), ((12193, 12225), 'os.path.join', 'os.path.join', (['AFNet_root', '"""LFSD"""'], {}), "(AFNet_root, 'LFSD')\n", (12205, 12225), False, 'import os\n'), ((12265, 12303), 'os.path.join', 'os.path.join', (['AFNet_root', '"""NJU2K-TEST"""'], {}), "(AFNet_root, 'NJU2K-TEST')\n", (12277, 12303), False, 'import os\n'), ((12343, 12380), 'os.path.join', 'os.path.join', (['AFNet_root', '"""NLPR-TEST"""'], {}), "(AFNet_root, 'NLPR-TEST')\n", (12355, 12380), False, 'import os\n'), ((12423, 12454), 'os.path.join', 'os.path.join', (['AFNet_root', '"""DES"""'], {}), "(AFNet_root, 'DES')\n", (12435, 12454), False, 'import os\n'), ((12493, 12524), 'os.path.join', 'os.path.join', (['AFNet_root', '"""SIP"""'], {}), "(AFNet_root, 'SIP')\n", (12505, 12524), False, 'import os\n'), ((12563, 12594), 'os.path.join', 'os.path.join', (['AFNet_root', '"""SSD"""'], {}), "(AFNet_root, 'SSD')\n", (12575, 12594), False, 'import os\n'), ((12639, 12673), 'os.path.join', 'os.path.join', (['AFNet_root', '"""STEREO"""'], {}), "(AFNet_root, 'STEREO')\n", (12651, 12673), False, 'import os\n'), ((12719, 12752), 'os.path.join', 'os.path.join', (['AFNet_root', '"""STERE"""'], {}), "(AFNet_root, 'STERE')\n", (12731, 12752), False, 'import os\n'), ((12864, 12895), 'os.path.join', 'os.path.join', (['CDCP_root', '"""LFSD"""'], {}), "(CDCP_root, 'LFSD')\n", (12876, 12895), False, 'import os\n'), ((12935, 12966), 'os.path.join', 'os.path.join', (['CDCP_root', '"""NJUD"""'], {}), "(CDCP_root, 'NJUD')\n", (12947, 12966), False, 'import os\n'), ((13006, 13037), 'os.path.join', 'os.path.join', (['CDCP_root', '"""NLPR"""'], {}), "(CDCP_root, 'NLPR')\n", (13018, 13037), False, 'import os\n'), ((13080, 13114), 'os.path.join', 'os.path.join', (['CDCP_root', '"""RGBD135"""'], {}), "(CDCP_root, 'RGBD135')\n", (13092, 13114), False, 'import os\n'), ((13153, 13183), 'os.path.join', 'os.path.join', (['CDCP_root', '"""SIP"""'], {}), "(CDCP_root, 'SIP')\n", (13165, 13183), False, 'import os\n'), ((13222, 13252), 'os.path.join', 'os.path.join', (['CDCP_root', '"""SSD"""'], {}), "(CDCP_root, 'SSD')\n", (13234, 13252), False, 'import os\n'), ((13321, 13353), 'os.path.join', 'os.path.join', (['CDCP_root', '"""STERE"""'], {}), "(CDCP_root, 'STERE')\n", (13333, 13353), False, 'import os\n'), ((13396, 13431), 'os.path.join', 'os.path.join', (['CDCP_root', '"""DUT-RGBD"""'], {}), "(CDCP_root, 'DUT-RGBD')\n", (13408, 13431), False, 'import os\n'), ((13522, 13553), 'os.path.join', 'os.path.join', (['CPFP_root', '"""LFSD"""'], {}), "(CPFP_root, 'LFSD')\n", (13534, 13553), False, 'import os\n'), ((13593, 13624), 'os.path.join', 'os.path.join', (['CPFP_root', '"""NJUD"""'], {}), "(CPFP_root, 'NJUD')\n", (13605, 13624), False, 'import os\n'), ((13664, 13695), 'os.path.join', 'os.path.join', (['CPFP_root', '"""NLPR"""'], {}), "(CPFP_root, 'NLPR')\n", (13676, 13695), False, 'import os\n'), ((13738, 13772), 'os.path.join', 'os.path.join', (['CPFP_root', '"""RGBD135"""'], {}), "(CPFP_root, 'RGBD135')\n", (13750, 13772), False, 'import os\n'), ((13811, 13841), 'os.path.join', 'os.path.join', (['CPFP_root', '"""SIP"""'], {}), "(CPFP_root, 'SIP')\n", (13823, 13841), False, 'import os\n'), ((13880, 13910), 'os.path.join', 'os.path.join', (['CPFP_root', '"""SSD"""'], {}), "(CPFP_root, 'SSD')\n", (13892, 13910), False, 'import os\n'), ((13979, 14011), 'os.path.join', 'os.path.join', (['CPFP_root', '"""STERE"""'], {}), "(CPFP_root, 'STERE')\n", (13991, 14011), False, 'import os\n'), ((14054, 14089), 'os.path.join', 'os.path.join', (['CPFP_root', '"""DUT-RGBD"""'], {}), "(CPFP_root, 'DUT-RGBD')\n", (14066, 14089), False, 'import os\n'), ((14180, 14211), 'os.path.join', 'os.path.join', (['CTMF_root', '"""LFSD"""'], {}), "(CTMF_root, 'LFSD')\n", (14192, 14211), False, 'import os\n'), ((14251, 14282), 'os.path.join', 'os.path.join', (['CTMF_root', '"""NJUD"""'], {}), "(CTMF_root, 'NJUD')\n", (14263, 14282), False, 'import os\n'), ((14322, 14353), 'os.path.join', 'os.path.join', (['CTMF_root', '"""NLPR"""'], {}), "(CTMF_root, 'NLPR')\n", (14334, 14353), False, 'import os\n'), ((14396, 14430), 'os.path.join', 'os.path.join', (['CTMF_root', '"""RGBD135"""'], {}), "(CTMF_root, 'RGBD135')\n", (14408, 14430), False, 'import os\n'), ((14469, 14499), 'os.path.join', 'os.path.join', (['CTMF_root', '"""SIP"""'], {}), "(CTMF_root, 'SIP')\n", (14481, 14499), False, 'import os\n'), ((14538, 14568), 'os.path.join', 'os.path.join', (['CTMF_root', '"""SSD"""'], {}), "(CTMF_root, 'SSD')\n", (14550, 14568), False, 'import os\n'), ((14637, 14669), 'os.path.join', 'os.path.join', (['CTMF_root', '"""STERE"""'], {}), "(CTMF_root, 'STERE')\n", (14649, 14669), False, 'import os\n'), ((14712, 14747), 'os.path.join', 'os.path.join', (['CTMF_root', '"""DUT-RGBD"""'], {}), "(CTMF_root, 'DUT-RGBD')\n", (14724, 14747), False, 'import os\n'), ((14838, 14869), 'os.path.join', 'os.path.join', (['DCMC_root', '"""LFSD"""'], {}), "(DCMC_root, 'LFSD')\n", (14850, 14869), False, 'import os\n'), ((14909, 14940), 'os.path.join', 'os.path.join', (['DCMC_root', '"""NJUD"""'], {}), "(DCMC_root, 'NJUD')\n", (14921, 14940), False, 'import os\n'), ((14980, 15011), 'os.path.join', 'os.path.join', (['DCMC_root', '"""NLPR"""'], {}), "(DCMC_root, 'NLPR')\n", (14992, 15011), False, 'import os\n'), ((15054, 15088), 'os.path.join', 'os.path.join', (['DCMC_root', '"""RGBD135"""'], {}), "(DCMC_root, 'RGBD135')\n", (15066, 15088), False, 'import os\n'), ((15127, 15157), 'os.path.join', 'os.path.join', (['DCMC_root', '"""SIP"""'], {}), "(DCMC_root, 'SIP')\n", (15139, 15157), False, 'import os\n'), ((15196, 15226), 'os.path.join', 'os.path.join', (['DCMC_root', '"""SSD"""'], {}), "(DCMC_root, 'SSD')\n", (15208, 15226), False, 'import os\n'), ((15271, 15304), 'os.path.join', 'os.path.join', (['DCMC_root', '"""STEREO"""'], {}), "(DCMC_root, 'STEREO')\n", (15283, 15304), False, 'import os\n'), ((15350, 15382), 'os.path.join', 'os.path.join', (['DCMC_root', '"""STERE"""'], {}), "(DCMC_root, 'STERE')\n", (15362, 15382), False, 'import os\n'), ((15425, 15460), 'os.path.join', 'os.path.join', (['DCMC_root', '"""DUT-RGBD"""'], {}), "(DCMC_root, 'DUT-RGBD')\n", (15437, 15460), False, 'import os\n'), ((15548, 15578), 'os.path.join', 'os.path.join', (['DES_root', '"""LFSD"""'], {}), "(DES_root, 'LFSD')\n", (15560, 15578), False, 'import os\n'), ((15618, 15648), 'os.path.join', 'os.path.join', (['DES_root', '"""NJUD"""'], {}), "(DES_root, 'NJUD')\n", (15630, 15648), False, 'import os\n'), ((15688, 15718), 'os.path.join', 'os.path.join', (['DES_root', '"""NLPR"""'], {}), "(DES_root, 'NLPR')\n", (15700, 15718), False, 'import os\n'), ((15761, 15794), 'os.path.join', 'os.path.join', (['DES_root', '"""RGBD135"""'], {}), "(DES_root, 'RGBD135')\n", (15773, 15794), False, 'import os\n'), ((15833, 15862), 'os.path.join', 'os.path.join', (['DES_root', '"""SIP"""'], {}), "(DES_root, 'SIP')\n", (15845, 15862), False, 'import os\n'), ((15901, 15930), 'os.path.join', 'os.path.join', (['DES_root', '"""SSD"""'], {}), "(DES_root, 'SSD')\n", (15913, 15930), False, 'import os\n'), ((15975, 16007), 'os.path.join', 'os.path.join', (['DES_root', '"""STEREO"""'], {}), "(DES_root, 'STEREO')\n", (15987, 16007), False, 'import os\n'), ((16053, 16084), 'os.path.join', 'os.path.join', (['DES_root', '"""STERE"""'], {}), "(DES_root, 'STERE')\n", (16065, 16084), False, 'import os\n'), ((16127, 16161), 'os.path.join', 'os.path.join', (['DES_root', '"""DUT-RGBD"""'], {}), "(DES_root, 'DUT-RGBD')\n", (16139, 16161), False, 'import os\n'), ((16246, 16275), 'os.path.join', 'os.path.join', (['DF_root', '"""LFSD"""'], {}), "(DF_root, 'LFSD')\n", (16258, 16275), False, 'import os\n'), ((16315, 16344), 'os.path.join', 'os.path.join', (['DF_root', '"""NJUD"""'], {}), "(DF_root, 'NJUD')\n", (16327, 16344), False, 'import os\n'), ((16384, 16413), 'os.path.join', 'os.path.join', (['DF_root', '"""NLPR"""'], {}), "(DF_root, 'NLPR')\n", (16396, 16413), False, 'import os\n'), ((16456, 16488), 'os.path.join', 'os.path.join', (['DF_root', '"""RGBD135"""'], {}), "(DF_root, 'RGBD135')\n", (16468, 16488), False, 'import os\n'), ((16527, 16559), 'os.path.join', 'os.path.join', (['DF_root', '"""SIP/SIP"""'], {}), "(DF_root, 'SIP/SIP')\n", (16539, 16559), False, 'import os\n'), ((16598, 16626), 'os.path.join', 'os.path.join', (['DF_root', '"""SSD"""'], {}), "(DF_root, 'SSD')\n", (16610, 16626), False, 'import os\n'), ((16671, 16702), 'os.path.join', 'os.path.join', (['DF_root', '"""STEREO"""'], {}), "(DF_root, 'STEREO')\n", (16683, 16702), False, 'import os\n'), ((16748, 16778), 'os.path.join', 'os.path.join', (['DF_root', '"""STERE"""'], {}), "(DF_root, 'STERE')\n", (16760, 16778), False, 'import os\n'), ((16821, 16854), 'os.path.join', 'os.path.join', (['DF_root', '"""DUT-RGBD"""'], {}), "(DF_root, 'DUT-RGBD')\n", (16833, 16854), False, 'import os\n'), ((16945, 16976), 'os.path.join', 'os.path.join', (['DMRA_root', '"""LFSD"""'], {}), "(DMRA_root, 'LFSD')\n", (16957, 16976), False, 'import os\n'), ((17016, 17047), 'os.path.join', 'os.path.join', (['DMRA_root', '"""NJUD"""'], {}), "(DMRA_root, 'NJUD')\n", (17028, 17047), False, 'import os\n'), ((17087, 17118), 'os.path.join', 'os.path.join', (['DMRA_root', '"""NLPR"""'], {}), "(DMRA_root, 'NLPR')\n", (17099, 17118), False, 'import os\n'), ((17161, 17195), 'os.path.join', 'os.path.join', (['DMRA_root', '"""RGBD135"""'], {}), "(DMRA_root, 'RGBD135')\n", (17173, 17195), False, 'import os\n'), ((17234, 17275), 'os.path.join', 'os.path.join', (['DMRA_root', '"""SIP_FromAuthor"""'], {}), "(DMRA_root, 'SIP_FromAuthor')\n", (17246, 17275), False, 'import os\n'), ((17314, 17344), 'os.path.join', 'os.path.join', (['DMRA_root', '"""SSD"""'], {}), "(DMRA_root, 'SSD')\n", (17326, 17344), False, 'import os\n'), ((17389, 17422), 'os.path.join', 'os.path.join', (['DMRA_root', '"""STEREO"""'], {}), "(DMRA_root, 'STEREO')\n", (17401, 17422), False, 'import os\n'), ((17468, 17500), 'os.path.join', 'os.path.join', (['DMRA_root', '"""STERE"""'], {}), "(DMRA_root, 'STERE')\n", (17480, 17500), False, 'import os\n'), ((17543, 17578), 'os.path.join', 'os.path.join', (['DMRA_root', '"""DUT-RGBD"""'], {}), "(DMRA_root, 'DUT-RGBD')\n", (17555, 17578), False, 'import os\n'), ((17663, 17692), 'os.path.join', 'os.path.join', (['MB_root', '"""LFSD"""'], {}), "(MB_root, 'LFSD')\n", (17675, 17692), False, 'import os\n'), ((17732, 17761), 'os.path.join', 'os.path.join', (['MB_root', '"""NJUD"""'], {}), "(MB_root, 'NJUD')\n", (17744, 17761), False, 'import os\n'), ((17801, 17830), 'os.path.join', 'os.path.join', (['MB_root', '"""NLPR"""'], {}), "(MB_root, 'NLPR')\n", (17813, 17830), False, 'import os\n'), ((17873, 17905), 'os.path.join', 'os.path.join', (['MB_root', '"""RGBD135"""'], {}), "(MB_root, 'RGBD135')\n", (17885, 17905), False, 'import os\n'), ((17961, 17989), 'os.path.join', 'os.path.join', (['MB_root', '"""SSD"""'], {}), "(MB_root, 'SSD')\n", (17973, 17989), False, 'import os\n'), ((18034, 18065), 'os.path.join', 'os.path.join', (['MB_root', '"""STEREO"""'], {}), "(MB_root, 'STEREO')\n", (18046, 18065), False, 'import os\n'), ((18132, 18165), 'os.path.join', 'os.path.join', (['MB_root', '"""DUT-RGBD"""'], {}), "(MB_root, 'DUT-RGBD')\n", (18144, 18165), False, 'import os\n'), ((18256, 18287), 'os.path.join', 'os.path.join', (['MMCI_root', '"""LFSD"""'], {}), "(MMCI_root, 'LFSD')\n", (18268, 18287), False, 'import os\n'), ((18327, 18358), 'os.path.join', 'os.path.join', (['MMCI_root', '"""NJUD"""'], {}), "(MMCI_root, 'NJUD')\n", (18339, 18358), False, 'import os\n'), ((18398, 18429), 'os.path.join', 'os.path.join', (['MMCI_root', '"""NLPR"""'], {}), "(MMCI_root, 'NLPR')\n", (18410, 18429), False, 'import os\n'), ((18472, 18506), 'os.path.join', 'os.path.join', (['MMCI_root', '"""RGBD135"""'], {}), "(MMCI_root, 'RGBD135')\n", (18484, 18506), False, 'import os\n'), ((18545, 18575), 'os.path.join', 'os.path.join', (['MMCI_root', '"""SIP"""'], {}), "(MMCI_root, 'SIP')\n", (18557, 18575), False, 'import os\n'), ((18614, 18644), 'os.path.join', 'os.path.join', (['MMCI_root', '"""SSD"""'], {}), "(MMCI_root, 'SSD')\n", (18626, 18644), False, 'import os\n'), ((18689, 18722), 'os.path.join', 'os.path.join', (['MMCI_root', '"""STEREO"""'], {}), "(MMCI_root, 'STEREO')\n", (18701, 18722), False, 'import os\n'), ((18768, 18800), 'os.path.join', 'os.path.join', (['MMCI_root', '"""STERE"""'], {}), "(MMCI_root, 'STERE')\n", (18780, 18800), False, 'import os\n'), ((18843, 18878), 'os.path.join', 'os.path.join', (['MMCI_root', '"""DUT-RGBD"""'], {}), "(MMCI_root, 'DUT-RGBD')\n", (18855, 18878), False, 'import os\n'), ((18969, 19000), 'os.path.join', 'os.path.join', (['NLPR_root', '"""LFSD"""'], {}), "(NLPR_root, 'LFSD')\n", (18981, 19000), False, 'import os\n'), ((19040, 19071), 'os.path.join', 'os.path.join', (['NLPR_root', '"""NJUD"""'], {}), "(NLPR_root, 'NJUD')\n", (19052, 19071), False, 'import os\n'), ((19111, 19142), 'os.path.join', 'os.path.join', (['NLPR_root', '"""NLPR"""'], {}), "(NLPR_root, 'NLPR')\n", (19123, 19142), False, 'import os\n'), ((19185, 19219), 'os.path.join', 'os.path.join', (['NLPR_root', '"""RGBD135"""'], {}), "(NLPR_root, 'RGBD135')\n", (19197, 19219), False, 'import os\n'), ((19258, 19288), 'os.path.join', 'os.path.join', (['NLPR_root', '"""SIP"""'], {}), "(NLPR_root, 'SIP')\n", (19270, 19288), False, 'import os\n'), ((19327, 19357), 'os.path.join', 'os.path.join', (['NLPR_root', '"""SSD"""'], {}), "(NLPR_root, 'SSD')\n", (19339, 19357), False, 'import os\n'), ((19402, 19439), 'os.path.join', 'os.path.join', (['NLPR_root', '"""STEREO-797"""'], {}), "(NLPR_root, 'STEREO-797')\n", (19414, 19439), False, 'import os\n'), ((19485, 19517), 'os.path.join', 'os.path.join', (['NLPR_root', '"""STERE"""'], {}), "(NLPR_root, 'STERE')\n", (19497, 19517), False, 'import os\n'), ((19560, 19595), 'os.path.join', 'os.path.join', (['NLPR_root', '"""DUT-RGBD"""'], {}), "(NLPR_root, 'DUT-RGBD')\n", (19572, 19595), False, 'import os\n'), ((19692, 19725), 'os.path.join', 'os.path.join', (['PCANet_root', '"""LFSD"""'], {}), "(PCANet_root, 'LFSD')\n", (19704, 19725), False, 'import os\n'), ((19765, 19798), 'os.path.join', 'os.path.join', (['PCANet_root', '"""NJUD"""'], {}), "(PCANet_root, 'NJUD')\n", (19777, 19798), False, 'import os\n'), ((19838, 19871), 'os.path.join', 'os.path.join', (['PCANet_root', '"""NLPR"""'], {}), "(PCANet_root, 'NLPR')\n", (19850, 19871), False, 'import os\n'), ((19914, 19950), 'os.path.join', 'os.path.join', (['PCANet_root', '"""RGBD135"""'], {}), "(PCANet_root, 'RGBD135')\n", (19926, 19950), False, 'import os\n'), ((19989, 20021), 'os.path.join', 'os.path.join', (['PCANet_root', '"""SIP"""'], {}), "(PCANet_root, 'SIP')\n", (20001, 20021), False, 'import os\n'), ((20060, 20092), 'os.path.join', 'os.path.join', (['PCANet_root', '"""SSD"""'], {}), "(PCANet_root, 'SSD')\n", (20072, 20092), False, 'import os\n'), ((20137, 20172), 'os.path.join', 'os.path.join', (['PCANet_root', '"""STEREO"""'], {}), "(PCANet_root, 'STEREO')\n", (20149, 20172), False, 'import os\n'), ((20218, 20252), 'os.path.join', 'os.path.join', (['PCANet_root', '"""STERE"""'], {}), "(PCANet_root, 'STERE')\n", (20230, 20252), False, 'import os\n'), ((20295, 20332), 'os.path.join', 'os.path.join', (['PCANet_root', '"""DUT-RGBD"""'], {}), "(PCANet_root, 'DUT-RGBD')\n", (20307, 20332), False, 'import os\n'), ((20426, 20458), 'os.path.join', 'os.path.join', (['PDNet_root', '"""LFSD"""'], {}), "(PDNet_root, 'LFSD')\n", (20438, 20458), False, 'import os\n'), ((20498, 20530), 'os.path.join', 'os.path.join', (['PDNet_root', '"""NJUD"""'], {}), "(PDNet_root, 'NJUD')\n", (20510, 20530), False, 'import os\n'), ((20570, 20602), 'os.path.join', 'os.path.join', (['PDNet_root', '"""NLPR"""'], {}), "(PDNet_root, 'NLPR')\n", (20582, 20602), False, 'import os\n'), ((20645, 20680), 'os.path.join', 'os.path.join', (['PDNet_root', '"""RGBD135"""'], {}), "(PDNet_root, 'RGBD135')\n", (20657, 20680), False, 'import os\n'), ((20736, 20767), 'os.path.join', 'os.path.join', (['PDNet_root', '"""SSD"""'], {}), "(PDNet_root, 'SSD')\n", (20748, 20767), False, 'import os\n'), ((20812, 20846), 'os.path.join', 'os.path.join', (['PDNet_root', '"""STEREO"""'], {}), "(PDNet_root, 'STEREO')\n", (20824, 20846), False, 'import os\n'), ((20913, 20949), 'os.path.join', 'os.path.join', (['PDNet_root', '"""DUT-RGBD"""'], {}), "(PDNet_root, 'DUT-RGBD')\n", (20925, 20949), False, 'import os\n'), ((21043, 21075), 'os.path.join', 'os.path.join', (['TANet_root', '"""LFSD"""'], {}), "(TANet_root, 'LFSD')\n", (21055, 21075), False, 'import os\n'), ((21115, 21147), 'os.path.join', 'os.path.join', (['TANet_root', '"""NJUD"""'], {}), "(TANet_root, 'NJUD')\n", (21127, 21147), False, 'import os\n'), ((21187, 21219), 'os.path.join', 'os.path.join', (['TANet_root', '"""NLPR"""'], {}), "(TANet_root, 'NLPR')\n", (21199, 21219), False, 'import os\n'), ((21262, 21297), 'os.path.join', 'os.path.join', (['TANet_root', '"""RGBD135"""'], {}), "(TANet_root, 'RGBD135')\n", (21274, 21297), False, 'import os\n'), ((21336, 21367), 'os.path.join', 'os.path.join', (['TANet_root', '"""SIP"""'], {}), "(TANet_root, 'SIP')\n", (21348, 21367), False, 'import os\n'), ((21406, 21437), 'os.path.join', 'os.path.join', (['TANet_root', '"""SSD"""'], {}), "(TANet_root, 'SSD')\n", (21418, 21437), False, 'import os\n'), ((21482, 21516), 'os.path.join', 'os.path.join', (['TANet_root', '"""STEREO"""'], {}), "(TANet_root, 'STEREO')\n", (21494, 21516), False, 'import os\n'), ((21562, 21595), 'os.path.join', 'os.path.join', (['TANet_root', '"""STERE"""'], {}), "(TANet_root, 'STERE')\n", (21574, 21595), False, 'import os\n'), ((21638, 21674), 'os.path.join', 'os.path.join', (['TANet_root', '"""DUT-RGBD"""'], {}), "(TANet_root, 'DUT-RGBD')\n", (21650, 21674), False, 'import os\n')]
|
"""
Extract MNI coordinates for all brain maps.
Created on Fri May 24 11:26:07 2019
@author: <NAME> <<EMAIL>>
"""
import mne
import numpy as np
from summarize_clusters_stc_AT import summarize_clusters_stc_AT
import csv
#%% for one-sample T-test whether ISCs are significant
results_path = '/media/cbru/SMEDY/results/ISCs_comp_against_0/'
fres = {'5.000000e-01-4Hz', '4-8Hz', '8-12Hz', '12-25Hz', '25-45Hz', '55-90Hz'}
condition = '_1' # 1 speech, 2 rest
win = '_613' #'_579' #
groups = {'con_', 'dys_'}
fsave_vertices = [np.arange(10242), np.arange(10242)]
for fre in fres:
for group in groups:
T_obs, clusters, cluster_p_values, H0 = clu =\
np.load(results_path + 't_clu_' + group + fre + win + condition + '.npy')
stc_all_cluster_vis = summarize_clusters_stc_AT(clu,
vertices=fsave_vertices,
subject='fsaverage')
# find the max T value and vertex (clusters are all the same size)
max_T = stc_all_cluster_vis.data[:, 0].max()
max_vtx = np.where(stc_all_cluster_vis.data[:, 0] ==
stc_all_cluster_vis.data[:, 0].max())
p_cluster_threshold = 0.05
good_cluster_inds = np.where(cluster_p_values <
p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters[ii][1]):
clu_size = len(clusters[ii][1])
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(group, fre, clu_size, mni.astype(np.int64), round(max_T, 2))
#%% for ISC group differences
results_dir = '/media/cbru/SMEDY/results/dys_con_contrast/2020_02_redo_subject_perm/'
delta = (results_dir + 't_clu_tail1_5.000000e-01-4Hz_613_1.npy',
results_dir + 't_clu_tail-1_5.000000e-01-4Hz_613_1.npy')
theta = (results_dir + 't_clu_tail1_4-8Hz_613_1.npy',
results_dir + 't_clu_tail-1_4-8Hz_613_1.npy')
alpha = (results_dir + 't_clu_tail1_8-12Hz_613_1.npy',
results_dir + 't_clu_tail-1_8-12Hz_613_1.npy')
beta = (results_dir + 't_clu_tail1_12-25Hz_613_1.npy',
results_dir + 't_clu_tail-1_12-25Hz_613_1.npy')
gamma1 = (results_dir + 't_clu_tail1_25-45Hz_613_1.npy',
results_dir + 't_clu_tail-1_25-45Hz_613_1.npy')
gamma2 = (results_dir + 't_clu_tail1_55-90Hz_613_1.npy',
results_dir + 't_clu_tail-1_55-90Hz_613_1.npy')
all_bands = {delta, theta, alpha, beta, gamma1, gamma2}
#all_bands = {gamma1}
p_cluster_threshold = 0.05/6
with open(results_dir + 'mni_corrdinates_out.csv', mode='w') as file_out:
mni_out = csv.writer(file_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for band in all_bands:
max_T = None
min_T = None
clu_size = None
stc_all_cluster_vis_pos = None
stc_all_cluster_vis_neg = None
stc_all_cluster_vis_both = None
clu = np.load(band[0])
T_obs_pos, clusters_pos, cluster_p_values_pos, H0_pos = clu
good_cluster_inds_pos = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
if not good_cluster_inds_pos.any():
print('')
else:
stc_all_cluster_vis_pos = summarize_clusters_stc_AT(clu, p_thresh=p_cluster_threshold,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=None)
clu = np.load(band[1])
T_obs_neg, clusters_neg, cluster_p_values_neg, H0_neg = clu
good_cluster_inds_neg = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
if not good_cluster_inds_neg.any():
print('')
else:
stc_all_cluster_vis_neg = summarize_clusters_stc_AT(clu, p_thresh=p_cluster_threshold,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=None)
# combine positive and negative clusters to one source estimate file
if stc_all_cluster_vis_pos is not None and stc_all_cluster_vis_neg is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_pos.copy()
stc_all_cluster_vis_both.data[:, 0] =\
stc_all_cluster_vis_pos.data[:, 0] + stc_all_cluster_vis_neg.data[:, 0]
elif stc_all_cluster_vis_pos is None and stc_all_cluster_vis_neg is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_neg.copy()
stc_all_cluster_vis_both.data[:, 0] = stc_all_cluster_vis_neg.data[:, 0]
elif stc_all_cluster_vis_neg is None and stc_all_cluster_vis_pos is not None:
stc_all_cluster_vis_both = stc_all_cluster_vis_pos.copy()
stc_all_cluster_vis_both.data[:, 0] = stc_all_cluster_vis_pos.data[:, 0]
else:
print('Error! There is no data for negative and positive contrasts.')
# find the max T value and vertex, extreme might be negative or positive
# find largest cluster first
# pos
out = []
if good_cluster_inds_pos.any():
for j in range(0, len(good_cluster_inds_pos)):
inds_t, inds_v = [(clusters_pos[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds_pos)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max_pos = out.index(out2[0])
max_T = stc_all_cluster_vis_pos.data[:, id_max_pos+1].max()
# neg
out = []
if good_cluster_inds_neg.any():
for j in range(0, len(good_cluster_inds_neg)):
inds_t, inds_v = [(clusters_neg[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds_neg)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max_neg = out.index(out2[0])
min_T = stc_all_cluster_vis_neg.data[:, id_max_neg+1].min()
if min_T is None and max_T is None:
print('No pos nor neg clusters')
elif min_T is None: # take only positive clusters
T = max_T
max_vtx = np.where(stc_all_cluster_vis_pos.data[:, id_max_pos+1] ==
stc_all_cluster_vis_pos.data[:, id_max_pos+1].max())
good_cluster_inds = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_pos[ii][1]):
clu_size = len(clusters_pos[ii][1])
elif max_T is None: # take only negative clusters
T = min_T
max_vtx = np.where(stc_all_cluster_vis_neg.data[:, id_max_neg+1] ==
stc_all_cluster_vis_neg.data[:, id_max_neg+1].min())
good_cluster_inds = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_neg[ii][1]):
clu_size = len(clusters_neg[ii][1])
elif abs(max_T) > abs(min_T): # take only positive clusters
T = max_T
max_vtx = np.where(stc_all_cluster_vis_pos.data[:, id_max_pos+1] ==
stc_all_cluster_vis_pos.data[:, id_max_pos+1].max())
good_cluster_inds = np.where(cluster_p_values_pos < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_pos[ii][1]):
clu_size = len(clusters_pos[ii][1])
elif abs(max_T) < abs(min_T): # take only negative clusters
T = min_T
max_vtx = np.where(stc_all_cluster_vis_neg.data[:, id_max_neg+1] ==
stc_all_cluster_vis_neg.data[:, id_max_neg+1].min())
good_cluster_inds = np.where(cluster_p_values_neg < p_cluster_threshold)[0]
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters_neg[ii][1]):
clu_size = len(clusters_neg[ii][1])
else:
print('Something went wrong')
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(band, clu_size, mni.astype(np.int64), round(T, 2))
mni_out.writerow([band[0], clu_size, mni.astype(np.str), round(T, 2)])
#%% for Mantel regressions
results_path = '/media/cbru/SMEDY/results/mantel_correlations/2019_05_simple_model/'
clu_files = [
results_path + 'phon_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'phon_clu_4-8Hz_613_1.npy',
results_path + 'phon_clu_8-12Hz_613_1.npy',
results_path + 'phon_clu_12-25Hz_613_1.npy',
results_path + 'phon_clu_25-45Hz_613_1.npy',
results_path + 'phon_clu_55-90Hz_613_1.npy',
results_path + 'read_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'read_clu_4-8Hz_613_1.npy',
results_path + 'read_clu_8-12Hz_613_1.npy',
results_path + 'read_clu_12-25Hz_613_1.npy',
results_path + 'read_clu_25-45Hz_613_1.npy',
results_path + 'mem_clu_5.000000e-01-4Hz_613_1.npy',
results_path + 'iq_clu_5.000000e-01-4Hz_613_1.npy'
]
cutoff = 25
with open(results_path + 'mni_corrdinates_out.csv', mode='w') as file_out:
mni_out = csv.writer(file_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for file in clu_files:
print(file)
# load clu
clu = np.load(file)
r_obs, clusters = clu
fsave_vertices = [np.arange(10242), np.arange(10242)]
# thresholding by cluster length
good_cluster_inds = []
clusters2 = []
for ii in range(0, len(clusters)):
if len(clusters[ii][1]) > (cutoff-1):
good_cluster_inds.append(ii)
clusters2.append(clusters[ii])
clu2 = r_obs, clusters2, np.zeros(len(clusters2)), _
if not clusters2:
print('All clusters are smaller than the minimal length.')
else:
# Investigating the significant effects / Find max cluster
out = []
for j in range(0, len(good_cluster_inds)):
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][j]
out.append(len(inds_v)) # max cluster is xxth
out2 = out.copy()
out2.sort(reverse=True)
id_max = out.index(out2[0])
clusters[good_cluster_inds[id_max]]
stc_all_cluster_vis = summarize_clusters_stc_AT(clu2, p_thresh=0.05,
tstep=1e-3, tmin=0,
subject='fsaverage',
vertices=fsave_vertices)
max_R = np.absolute(stc_all_cluster_vis.data[:, id_max+1]).max()
R_max = stc_all_cluster_vis.data[:, id_max+1].max()
R_min = stc_all_cluster_vis.data[:, id_max+1].min()
if np.absolute(R_max)<np.absolute(R_min):
max_R = max_R*-1
max_vtx = np.where(np.absolute(stc_all_cluster_vis.data[:, id_max+1]) ==
np.absolute(stc_all_cluster_vis.data[:, id_max+1]).max())
for ii in good_cluster_inds:
if np.isin(max_vtx, clusters[ii][1]):
clu_size = len(clusters[ii][1])
if max_vtx[0][0] > 10242:
hemi = 1 # rh
vtx = max_vtx[0][0] - 10242
else:
hemi = 0 # lh
vtx = max_vtx[0][0]
# transform to mni coordinates
mni = mne.vertex_to_mni(vtx, hemi, 'fsaverage')[0]
print(file, clu_size, mni.astype(np.int64), round(max_R, 2))
mni_out.writerow([file, clu_size, mni.astype(np.str), round(max_R, 2)])
|
[
"numpy.isin",
"numpy.load",
"numpy.absolute",
"csv.writer",
"summarize_clusters_stc_AT.summarize_clusters_stc_AT",
"mne.vertex_to_mni",
"numpy.where",
"numpy.arange"
] |
[((524, 540), 'numpy.arange', 'np.arange', (['(10242)'], {}), '(10242)\n', (533, 540), True, 'import numpy as np\n'), ((542, 558), 'numpy.arange', 'np.arange', (['(10242)'], {}), '(10242)\n', (551, 558), True, 'import numpy as np\n'), ((2864, 2941), 'csv.writer', 'csv.writer', (['file_out'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(file_out, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (2874, 2941), False, 'import csv\n'), ((9996, 10073), 'csv.writer', 'csv.writer', (['file_out'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(file_out, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (10006, 10073), False, 'import csv\n'), ((670, 743), 'numpy.load', 'np.load', (["(results_path + 't_clu_' + group + fre + win + condition + '.npy')"], {}), "(results_path + 't_clu_' + group + fre + win + condition + '.npy')\n", (677, 743), True, 'import numpy as np\n'), ((783, 859), 'summarize_clusters_stc_AT.summarize_clusters_stc_AT', 'summarize_clusters_stc_AT', (['clu'], {'vertices': 'fsave_vertices', 'subject': '"""fsaverage"""'}), "(clu, vertices=fsave_vertices, subject='fsaverage')\n", (808, 859), False, 'from summarize_clusters_stc_AT import summarize_clusters_stc_AT\n'), ((3167, 3183), 'numpy.load', 'np.load', (['band[0]'], {}), '(band[0])\n', (3174, 3183), True, 'import numpy as np\n'), ((3781, 3797), 'numpy.load', 'np.load', (['band[1]'], {}), '(band[1])\n', (3788, 3797), True, 'import numpy as np\n'), ((10154, 10167), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (10161, 10167), True, 'import numpy as np\n'), ((1289, 1337), 'numpy.where', 'np.where', (['(cluster_p_values < p_cluster_threshold)'], {}), '(cluster_p_values < p_cluster_threshold)\n', (1297, 1337), True, 'import numpy as np\n'), ((1430, 1463), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters[ii][1]'], {}), '(max_vtx, clusters[ii][1])\n', (1437, 1463), True, 'import numpy as np\n'), ((1739, 1780), 'mne.vertex_to_mni', 'mne.vertex_to_mni', (['vtx', 'hemi', '"""fsaverage"""'], {}), "(vtx, hemi, 'fsaverage')\n", (1756, 1780), False, 'import mne\n'), ((3284, 3336), 'numpy.where', 'np.where', (['(cluster_p_values_pos < p_cluster_threshold)'], {}), '(cluster_p_values_pos < p_cluster_threshold)\n', (3292, 3336), True, 'import numpy as np\n'), ((3458, 3579), 'summarize_clusters_stc_AT.summarize_clusters_stc_AT', 'summarize_clusters_stc_AT', (['clu'], {'p_thresh': 'p_cluster_threshold', 'tstep': '(0.001)', 'tmin': '(0)', 'subject': '"""fsaverage"""', 'vertices': 'None'}), "(clu, p_thresh=p_cluster_threshold, tstep=0.001,\n tmin=0, subject='fsaverage', vertices=None)\n", (3483, 3579), False, 'from summarize_clusters_stc_AT import summarize_clusters_stc_AT\n'), ((3898, 3950), 'numpy.where', 'np.where', (['(cluster_p_values_neg < p_cluster_threshold)'], {}), '(cluster_p_values_neg < p_cluster_threshold)\n', (3906, 3950), True, 'import numpy as np\n'), ((4072, 4193), 'summarize_clusters_stc_AT.summarize_clusters_stc_AT', 'summarize_clusters_stc_AT', (['clu'], {'p_thresh': 'p_cluster_threshold', 'tstep': '(0.001)', 'tmin': '(0)', 'subject': '"""fsaverage"""', 'vertices': 'None'}), "(clu, p_thresh=p_cluster_threshold, tstep=0.001,\n tmin=0, subject='fsaverage', vertices=None)\n", (4097, 4193), False, 'from summarize_clusters_stc_AT import summarize_clusters_stc_AT\n'), ((8908, 8949), 'mne.vertex_to_mni', 'mne.vertex_to_mni', (['vtx', 'hemi', '"""fsaverage"""'], {}), "(vtx, hemi, 'fsaverage')\n", (8925, 8949), False, 'import mne\n'), ((10224, 10240), 'numpy.arange', 'np.arange', (['(10242)'], {}), '(10242)\n', (10233, 10240), True, 'import numpy as np\n'), ((10242, 10258), 'numpy.arange', 'np.arange', (['(10242)'], {}), '(10242)\n', (10251, 10258), True, 'import numpy as np\n'), ((11294, 11412), 'summarize_clusters_stc_AT.summarize_clusters_stc_AT', 'summarize_clusters_stc_AT', (['clu2'], {'p_thresh': '(0.05)', 'tstep': '(0.001)', 'tmin': '(0)', 'subject': '"""fsaverage"""', 'vertices': 'fsave_vertices'}), "(clu2, p_thresh=0.05, tstep=0.001, tmin=0, subject\n ='fsaverage', vertices=fsave_vertices)\n", (11319, 11412), False, 'from summarize_clusters_stc_AT import summarize_clusters_stc_AT\n'), ((11820, 11838), 'numpy.absolute', 'np.absolute', (['R_max'], {}), '(R_max)\n', (11831, 11838), True, 'import numpy as np\n'), ((11839, 11857), 'numpy.absolute', 'np.absolute', (['R_min'], {}), '(R_min)\n', (11850, 11857), True, 'import numpy as np\n'), ((12139, 12172), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters[ii][1]'], {}), '(max_vtx, clusters[ii][1])\n', (12146, 12172), True, 'import numpy as np\n'), ((12492, 12533), 'mne.vertex_to_mni', 'mne.vertex_to_mni', (['vtx', 'hemi', '"""fsaverage"""'], {}), "(vtx, hemi, 'fsaverage')\n", (12509, 12533), False, 'import mne\n'), ((6914, 6966), 'numpy.where', 'np.where', (['(cluster_p_values_pos < p_cluster_threshold)'], {}), '(cluster_p_values_pos < p_cluster_threshold)\n', (6922, 6966), True, 'import numpy as np\n'), ((7030, 7067), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters_pos[ii][1]'], {}), '(max_vtx, clusters_pos[ii][1])\n', (7037, 7067), True, 'import numpy as np\n'), ((11620, 11672), 'numpy.absolute', 'np.absolute', (['stc_all_cluster_vis.data[:, id_max + 1]'], {}), '(stc_all_cluster_vis.data[:, id_max + 1])\n', (11631, 11672), True, 'import numpy as np\n'), ((11923, 11975), 'numpy.absolute', 'np.absolute', (['stc_all_cluster_vis.data[:, id_max + 1]'], {}), '(stc_all_cluster_vis.data[:, id_max + 1])\n', (11934, 11975), True, 'import numpy as np\n'), ((7401, 7453), 'numpy.where', 'np.where', (['(cluster_p_values_neg < p_cluster_threshold)'], {}), '(cluster_p_values_neg < p_cluster_threshold)\n', (7409, 7453), True, 'import numpy as np\n'), ((7517, 7554), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters_neg[ii][1]'], {}), '(max_vtx, clusters_neg[ii][1])\n', (7524, 7554), True, 'import numpy as np\n'), ((7898, 7950), 'numpy.where', 'np.where', (['(cluster_p_values_pos < p_cluster_threshold)'], {}), '(cluster_p_values_pos < p_cluster_threshold)\n', (7906, 7950), True, 'import numpy as np\n'), ((8014, 8051), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters_pos[ii][1]'], {}), '(max_vtx, clusters_pos[ii][1])\n', (8021, 8051), True, 'import numpy as np\n'), ((12008, 12060), 'numpy.absolute', 'np.absolute', (['stc_all_cluster_vis.data[:, id_max + 1]'], {}), '(stc_all_cluster_vis.data[:, id_max + 1])\n', (12019, 12060), True, 'import numpy as np\n'), ((8395, 8447), 'numpy.where', 'np.where', (['(cluster_p_values_neg < p_cluster_threshold)'], {}), '(cluster_p_values_neg < p_cluster_threshold)\n', (8403, 8447), True, 'import numpy as np\n'), ((8511, 8548), 'numpy.isin', 'np.isin', (['max_vtx', 'clusters_neg[ii][1]'], {}), '(max_vtx, clusters_neg[ii][1])\n', (8518, 8548), True, 'import numpy as np\n')]
|
# !/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from resource_management.core.logger import Logger
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.script import Script
from resource_management.core.exceptions import Fail
from unittest import TestCase
import json
Logger.initialize_logger()
class TestStackFeature(TestCase):
"""
EU Upgrade (HDP 2.5 to HDP 2.6)
- STOP
clusterLevelParams/stack_name = HDP
clusterLevelParams/stack_version = 2.5
commandParams/version = 2.5.0.0-1237
- START
clusterLevelParams/stack_name = HDP
clusterLevelParams/stack_version = 2.6
commandParams/version = 2.6.0.0-334
EU Downgrade (HDP 2.6 to HDP 2.5)
- STOP
clusterLevelParams/stack_name = HDP
clusterLevelParams/stack_version = 2.6
commandParams/version = 2.6.0.0-334
- START
clusterLevelParams/stack_name = HDP
clusterLevelParams/stack_version = 2.5
commandParams/version = 2.5.0.0-1237
"""
def test_get_stack_feature_version_missing_params(self):
try:
stack_feature_version = get_stack_feature_version({})
self.assertEqual("2.3.0.0-1234", stack_feature_version)
self.fail("Expected an exception when there are required parameters missing from the dictionary")
except Fail:
pass
def test_get_stack_feature_version_for_install_command(self):
"""
Tests the stack feature version calculated during an install command on a new cluster
:return:
"""
command_json = TestStackFeature._get_cluster_install_command_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.4", stack_feature_version)
def test_get_stack_feature_version_for_upgrade_restart(self):
"""
Tests the stack feature version calculated during a restart command in an upgrade.
:return:
"""
command_json = TestStackFeature._get_cluster_upgrade_restart_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.5.9.9-9999", stack_feature_version)
def test_get_stack_feature_version_for_downgrade_restart(self):
"""
Tests the stack feature version calculated during a restart command in a downgrade.
:return:
"""
command_json = TestStackFeature._get_cluster_downgrade_restart_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.4.0.0-1234", stack_feature_version)
def test_get_stack_feature_version_for_downgrade_stop(self):
"""
Tests the stack feature version calculated during a STOP command in a downgrade.
:return:
"""
command_json = TestStackFeature._get_cluster_downgrade_stop_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.5.9.9-9999", stack_feature_version)
command_json = TestStackFeature._get_cluster_downgrade_stop_custom_command_json()
Script.config = command_json
stack_feature_version = get_stack_feature_version(command_json)
self.assertEqual("2.5.9.9-9999", stack_feature_version)
def test_get_stack_feature(self):
"""
Tests the stack feature version calculated during a STOP command in a downgrade.
:return:
"""
command_json = TestStackFeature._get_cluster_upgrade_restart_json()
Script.config = command_json
Script.config["configurations"] = {}
Script.config["configurations"]["cluster-env"] = {}
Script.config["configurations"]["cluster-env"]["stack_features"] = {}
Script.config["configurations"]["cluster-env"]["stack_features"] = json.dumps(TestStackFeature._get_stack_feature_json())
stack_feature_version = get_stack_feature_version(command_json)
self.assertTrue(check_stack_feature("stack-feature-1", stack_feature_version))
self.assertTrue(check_stack_feature("stack-feature-2", stack_feature_version))
self.assertFalse(check_stack_feature("stack-feature-3", stack_feature_version))
command_json = TestStackFeature._get_cluster_install_command_json()
Script.config.update(command_json)
stack_feature_version = get_stack_feature_version(command_json)
self.assertTrue(check_stack_feature("stack-feature-1", stack_feature_version))
self.assertTrue(check_stack_feature("stack-feature-2", stack_feature_version))
self.assertFalse(check_stack_feature("stack-feature-3", stack_feature_version))
@staticmethod
def _get_cluster_install_command_json():
"""
Install command JSON with no upgrade and no version information.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand": "ACTIONEXECUTE",
"clusterLevelParams": {
"stack_name": "HDP",
"stack_version": "2.4",
},
"commandParams": {
"command_timeout": "1800",
"script_type": "PYTHON",
"script": "install_packages.py"
}
}
@staticmethod
def _get_cluster_upgrade_restart_json():
"""
A restart command during an upgrade.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand":"ACTIONEXECUTE",
"clusterLevelParams": {
"stack_name": "HDP",
"stack_version": "2.4",
},
"commandParams": {
"source_stack": "2.4",
"target_stack": "2.5",
"upgrade_direction": "upgrade",
"version": "2.5.9.9-9999"
},
"upgradeSummary": {
"services":{
"HDFS":{
"sourceRepositoryId":1,
"sourceStackId":"HDP-2.4",
"sourceVersion":"2.4.0.0-1234",
"targetRepositoryId":2,
"targetStackId":"HDP-2.5",
"targetVersion":"2.5.9.9-9999"
}
},
"direction":"UPGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD",
"isDowngradeAllowed": True
}
}
@staticmethod
def _get_cluster_downgrade_restart_json():
"""
A restart command during a downgrade.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand":"ACTIONEXECUTE",
"clusterLevelParams":{
"stack_name":"HDP",
"stack_version":"2.4"
},
"commandParams":{
"source_stack":"2.5",
"target_stack":"2.4",
"upgrade_direction":"downgrade",
"version":"2.4.0.0-1234"
},
"upgradeSummary":{
"services":{
"HDFS":{
"sourceRepositoryId":2,
"sourceStackId":"HDP-2.5",
"sourceVersion":"2.5.9.9-9999",
"targetRepositoryId":1,
"targetStackId":"HDP-2.4",
"targetVersion":"2.4.0.0-1234"
}
},
"direction":"DOWNGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD",
"isDowngradeAllowed": True
}
}
@staticmethod
def _get_cluster_downgrade_stop_json():
"""
A STOP command during a downgrade.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand":"STOP",
"clusterLevelParams":{
"stack_name":"HDP",
"stack_version":"2.5",
},
"commandParams":{
"source_stack":"2.5",
"target_stack":"2.4",
"upgrade_direction":"downgrade",
"version":"2.5.9.9-9999"
},
"upgradeSummary":{
"services":{
"HDFS":{
"sourceRepositoryId":2,
"sourceStackId":"HDP-2.5",
"sourceVersion":"2.5.9.9-9999",
"targetRepositoryId":1,
"targetStackId":"HDP-2.4",
"targetVersion":"2.4.0.0-1234"
}
},
"direction":"DOWNGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD",
"isDowngradeAllowed": True
}
}
@staticmethod
def _get_cluster_downgrade_stop_custom_command_json():
"""
A STOP command during a downgrade.
:return:
"""
return {
"serviceName":"HDFS",
"roleCommand":"CUSTOM_COMMAND",
"clusterLevelParams":{
"stack_name":"HDP",
"stack_version":"2.5",
"custom_command":"STOP"
},
"commandParams":{
"source_stack":"2.5",
"target_stack":"2.4",
"upgrade_direction":"downgrade",
"version":"2.5.9.9-9999"
},
"upgradeSummary":{
"services":{
"HDFS":{
"sourceRepositoryId":2,
"sourceStackId":"HDP-2.5",
"sourceVersion":"2.5.9.9-9999",
"targetRepositoryId":1,
"targetStackId":"HDP-2.4",
"targetVersion":"2.4.0.0-1234"
}
},
"direction":"DOWNGRADE",
"type":"rolling_upgrade",
"isRevert":False,
"orchestration":"STANDARD"
}
}
@staticmethod
def _get_stack_feature_json():
"""
A STOP command during a downgrade.
:return:
"""
return {
"HDP": {
"stack_features":[
{
"name":"stack-feature-1",
"description":"Stack Feature 1",
"min_version":"2.2.0.0"
},
{
"name":"stack-feature-2",
"description":"Stack Feature 2",
"min_version":"2.2.0.0",
"max_version":"2.6.0.0"
},
{
"name":"stack-feature-3",
"description":"Stack Feature 3",
"min_version":"2.2.0.0",
"max_version":"2.3.0.0"
}
]
}
}
|
[
"resource_management.libraries.functions.stack_features.check_stack_feature",
"resource_management.libraries.functions.stack_features.get_stack_feature_version",
"resource_management.libraries.script.Script.config.update",
"resource_management.core.logger.Logger.initialize_logger"
] |
[((1174, 1200), 'resource_management.core.logger.Logger.initialize_logger', 'Logger.initialize_logger', ([], {}), '()\n', (1198, 1200), False, 'from resource_management.core.logger import Logger\n'), ((2512, 2551), 'resource_management.libraries.functions.stack_features.get_stack_feature_version', 'get_stack_feature_version', (['command_json'], {}), '(command_json)\n', (2537, 2551), False, 'from resource_management.libraries.functions.stack_features import get_stack_feature_version\n'), ((2919, 2958), 'resource_management.libraries.functions.stack_features.get_stack_feature_version', 'get_stack_feature_version', (['command_json'], {}), '(command_json)\n', (2944, 2958), False, 'from resource_management.libraries.functions.stack_features import get_stack_feature_version\n'), ((3340, 3379), 'resource_management.libraries.functions.stack_features.get_stack_feature_version', 'get_stack_feature_version', (['command_json'], {}), '(command_json)\n', (3365, 3379), False, 'from resource_management.libraries.functions.stack_features import get_stack_feature_version\n'), ((3752, 3791), 'resource_management.libraries.functions.stack_features.get_stack_feature_version', 'get_stack_feature_version', (['command_json'], {}), '(command_json)\n', (3777, 3791), False, 'from resource_management.libraries.functions.stack_features import get_stack_feature_version\n'), ((4001, 4040), 'resource_management.libraries.functions.stack_features.get_stack_feature_version', 'get_stack_feature_version', (['command_json'], {}), '(command_json)\n', (4026, 4040), False, 'from resource_management.libraries.functions.stack_features import get_stack_feature_version\n'), ((4685, 4724), 'resource_management.libraries.functions.stack_features.get_stack_feature_version', 'get_stack_feature_version', (['command_json'], {}), '(command_json)\n', (4710, 4724), False, 'from resource_management.libraries.functions.stack_features import get_stack_feature_version\n'), ((5052, 5086), 'resource_management.libraries.script.Script.config.update', 'Script.config.update', (['command_json'], {}), '(command_json)\n', (5072, 5086), False, 'from resource_management.libraries.script import Script\n'), ((5116, 5155), 'resource_management.libraries.functions.stack_features.get_stack_feature_version', 'get_stack_feature_version', (['command_json'], {}), '(command_json)\n', (5141, 5155), False, 'from resource_management.libraries.functions.stack_features import get_stack_feature_version\n'), ((1970, 1999), 'resource_management.libraries.functions.stack_features.get_stack_feature_version', 'get_stack_feature_version', (['{}'], {}), '({})\n', (1995, 1999), False, 'from resource_management.libraries.functions.stack_features import get_stack_feature_version\n'), ((4745, 4806), 'resource_management.libraries.functions.stack_features.check_stack_feature', 'check_stack_feature', (['"""stack-feature-1"""', 'stack_feature_version'], {}), "('stack-feature-1', stack_feature_version)\n", (4764, 4806), False, 'from resource_management.libraries.functions.stack_features import check_stack_feature\n'), ((4828, 4889), 'resource_management.libraries.functions.stack_features.check_stack_feature', 'check_stack_feature', (['"""stack-feature-2"""', 'stack_feature_version'], {}), "('stack-feature-2', stack_feature_version)\n", (4847, 4889), False, 'from resource_management.libraries.functions.stack_features import check_stack_feature\n'), ((4912, 4973), 'resource_management.libraries.functions.stack_features.check_stack_feature', 'check_stack_feature', (['"""stack-feature-3"""', 'stack_feature_version'], {}), "('stack-feature-3', stack_feature_version)\n", (4931, 4973), False, 'from resource_management.libraries.functions.stack_features import check_stack_feature\n'), ((5176, 5237), 'resource_management.libraries.functions.stack_features.check_stack_feature', 'check_stack_feature', (['"""stack-feature-1"""', 'stack_feature_version'], {}), "('stack-feature-1', stack_feature_version)\n", (5195, 5237), False, 'from resource_management.libraries.functions.stack_features import check_stack_feature\n'), ((5259, 5320), 'resource_management.libraries.functions.stack_features.check_stack_feature', 'check_stack_feature', (['"""stack-feature-2"""', 'stack_feature_version'], {}), "('stack-feature-2', stack_feature_version)\n", (5278, 5320), False, 'from resource_management.libraries.functions.stack_features import check_stack_feature\n'), ((5343, 5404), 'resource_management.libraries.functions.stack_features.check_stack_feature', 'check_stack_feature', (['"""stack-feature-3"""', 'stack_feature_version'], {}), "('stack-feature-3', stack_feature_version)\n", (5362, 5404), False, 'from resource_management.libraries.functions.stack_features import check_stack_feature\n')]
|
import boto3
class Graph:
"""
Represents a Graph object in a DynamoDB table
"""
def __init__(self, table_name, graph_id):
dynamodb = boto3.resource("dynamodb")
self.table = dynamodb.Table(table_name)
self.graph_id = graph_id
def check_status(self, expected_status):
"""
Checks whether the graph has an expected status
"""
response = self.table.get_item(
Key={
'graphId': self.graph_id
}
)
# If the graph does not exist, it cannot have the expected status
graph = response["Item"]
if graph is None:
return False
status = graph["currentState"]
return status == expected_status
def update_status(self, status):
"""
Updates the status of a Graph
"""
self.table.update_item(
Key={
"graphId": self.graph_id
},
UpdateExpression="SET currentState = :state",
ExpressionAttributeValues={
":state": status
}
)
def delete(self):
"""
Deletes the graph from the Table
"""
self.table.delete_item(
Key={
"graphId": self.graph_id
}
)
|
[
"boto3.resource"
] |
[((158, 184), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (172, 184), False, 'import boto3\n')]
|
from .dot_product import DotProduct
import os
from numpy import log2
filedir = os.path.dirname(os.path.realpath(__file__))
dot_product_tb_module_path = os.path.join(filedir, '..', 'src')
dot_product_module_path = os.path.join(filedir, '..', 'src', 'dot_prod_pip.v')
class DotProdPip(DotProduct):
"""
"""
def template_dict(self, inst_name=None):
t_dict = super(DotProdPip, self).template_dict(inst_name)
t_dict['length_counter_bits'] = int(log2(self.length))
return t_dict
|
[
"numpy.log2",
"os.path.realpath",
"os.path.join"
] |
[((153, 187), 'os.path.join', 'os.path.join', (['filedir', '""".."""', '"""src"""'], {}), "(filedir, '..', 'src')\n", (165, 187), False, 'import os\n'), ((214, 266), 'os.path.join', 'os.path.join', (['filedir', '""".."""', '"""src"""', '"""dot_prod_pip.v"""'], {}), "(filedir, '..', 'src', 'dot_prod_pip.v')\n", (226, 266), False, 'import os\n'), ((96, 122), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (112, 122), False, 'import os\n'), ((472, 489), 'numpy.log2', 'log2', (['self.length'], {}), '(self.length)\n', (476, 489), False, 'from numpy import log2\n')]
|
from typing import Tuple
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
except (AttributeError, ImportError):
from wand.display import display
else:
from IPython.display import display
from wand.color import Color
from wand.drawing import Drawing
from wand.image import Image
class TreeVisualizer:
"""
Object to display bounding boxes on a pdf document
"""
def __init__(self, pdf_file):
"""
:param pdf_path: directory where documents are stored
:return:
"""
self.pdf_file = pdf_file
def display_boxes(self, tree, html_path, filename_prefix, alternate_colors=False):
"""
Displays each of the bounding boxes passed in 'boxes' on images of the pdf
pointed to by pdf_file
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = []
colors = {
"section_header": Color("blue"),
"figure": Color("green"),
"figure_caption": Color("green"),
"table_caption": Color("red"),
"list": Color("yellow"),
"paragraph": Color("gray"),
"table": Color("red"),
"header": Color("brown"),
}
for i, page_num in enumerate(tree.keys()):
img = self.pdf_to_img(page_num)
draw = Drawing()
draw.fill_color = Color("rgba(0, 0, 0, 0.0)")
for clust in tree[page_num]:
for (pnum, pwidth, pheight, top, left, bottom, right) in tree[page_num][
clust
]:
draw.stroke_color = colors[clust]
draw.rectangle(left=left, top=top, right=right, bottom=bottom)
draw.push()
draw.font_size = 20
draw.font_weight = 10
draw.fill_color = colors[clust]
if int(left) > 0 and int(top) > 0:
draw.text(x=int(left), y=int(top), body=clust)
draw.pop()
draw(img)
img.save(filename=html_path + filename_prefix + "_page_" + str(i) + ".png")
imgs.append(img)
return imgs
def display_candidates(self, tree, html_path, filename_prefix):
"""
Displays the bounding boxes corresponding to candidates on an image of the pdf
boxes is a list of 5-tuples (page, top, left, bottom, right)
"""
imgs = self.display_boxes(
tree, html_path, filename_prefix, alternate_colors=True
)
return display(*imgs)
def pdf_to_img(self, page_num, pdf_dim=None):
"""
Converts pdf file into image
:param pdf_file: path to the pdf file
:param page_num: page number to convert (index starting at 1)
:return: wand image object
"""
if not pdf_dim:
pdf_dim = get_pdf_dim(self.pdf_file)
page_width, page_height = pdf_dim
img = Image(filename="{}[{}]".format(self.pdf_file, page_num - 1))
img.resize(page_width, page_height)
return img
def get_pdf_dim(pdf_file) -> Tuple[int, int]:
with open(pdf_file, "rb") as f:
parser = PDFParser(f)
doc = PDFDocument(parser)
# Look at the 1st page only.
page = next(PDFPage.create_pages(doc))
_, _, page_width, page_height = page.mediabox
return page_width, page_height
|
[
"pdfminer.pdfdocument.PDFDocument",
"wand.display.display",
"pdfminer.pdfpage.PDFPage.create_pages",
"wand.color.Color",
"IPython.get_ipython",
"wand.drawing.Drawing",
"pdfminer.pdfparser.PDFParser"
] |
[((2775, 2789), 'wand.display.display', 'display', (['*imgs'], {}), '(*imgs)\n', (2782, 2789), False, 'from wand.display import display\n'), ((3407, 3419), 'pdfminer.pdfparser.PDFParser', 'PDFParser', (['f'], {}), '(f)\n', (3416, 3419), False, 'from pdfminer.pdfparser import PDFParser\n'), ((3434, 3453), 'pdfminer.pdfdocument.PDFDocument', 'PDFDocument', (['parser'], {}), '(parser)\n', (3445, 3453), False, 'from pdfminer.pdfdocument import PDFDocument\n'), ((220, 233), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (231, 233), False, 'from IPython import get_ipython\n'), ((1120, 1133), 'wand.color.Color', 'Color', (['"""blue"""'], {}), "('blue')\n", (1125, 1133), False, 'from wand.color import Color\n'), ((1157, 1171), 'wand.color.Color', 'Color', (['"""green"""'], {}), "('green')\n", (1162, 1171), False, 'from wand.color import Color\n'), ((1203, 1217), 'wand.color.Color', 'Color', (['"""green"""'], {}), "('green')\n", (1208, 1217), False, 'from wand.color import Color\n'), ((1248, 1260), 'wand.color.Color', 'Color', (['"""red"""'], {}), "('red')\n", (1253, 1260), False, 'from wand.color import Color\n'), ((1282, 1297), 'wand.color.Color', 'Color', (['"""yellow"""'], {}), "('yellow')\n", (1287, 1297), False, 'from wand.color import Color\n'), ((1324, 1337), 'wand.color.Color', 'Color', (['"""gray"""'], {}), "('gray')\n", (1329, 1337), False, 'from wand.color import Color\n'), ((1360, 1372), 'wand.color.Color', 'Color', (['"""red"""'], {}), "('red')\n", (1365, 1372), False, 'from wand.color import Color\n'), ((1396, 1410), 'wand.color.Color', 'Color', (['"""brown"""'], {}), "('brown')\n", (1401, 1410), False, 'from wand.color import Color\n'), ((1536, 1545), 'wand.drawing.Drawing', 'Drawing', ([], {}), '()\n', (1543, 1545), False, 'from wand.drawing import Drawing\n'), ((1576, 1603), 'wand.color.Color', 'Color', (['"""rgba(0, 0, 0, 0.0)"""'], {}), "('rgba(0, 0, 0, 0.0)')\n", (1581, 1603), False, 'from wand.color import Color\n'), ((3511, 3536), 'pdfminer.pdfpage.PDFPage.create_pages', 'PDFPage.create_pages', (['doc'], {}), '(doc)\n', (3531, 3536), False, 'from pdfminer.pdfpage import PDFPage\n')]
|
#%%
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
TRAIN_UPDATE_FILE = "C:/kaggle/kaggle_keypoints/pickle/cleandata_updates_augment.pkl"
train = pickle.load(open(TRAIN_UPDATE_FILE, "rb")).reset_index()
print("Size of 'augmentation' set: %d" % train.shape[0])
# %%
fig = plt.figure(figsize=(20,20))
cols = [c for c in train.columns if not c.startswith('image')]
rng = np.clip(train.shape[0], 0, 60)
for i in range(rng):
img = train.iloc[i].image.reshape(96,96)
points = train.iloc[i][cols].values
ax = fig.add_subplot(6,10,i+1)
ax.imshow(img, cmap='gray')
ax.scatter(points[0::2], points[1::2], color = 'red', s = 20)
plt.axis('off')
plt.tight_layout()
plt.show()
# %%
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"numpy.clip",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout"
] |
[((314, 342), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (324, 342), True, 'import matplotlib.pyplot as plt\n'), ((411, 441), 'numpy.clip', 'np.clip', (['train.shape[0]', '(0)', '(60)'], {}), '(train.shape[0], 0, 60)\n', (418, 441), True, 'import numpy as np\n'), ((704, 722), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (720, 722), True, 'import matplotlib.pyplot as plt\n'), ((723, 733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (731, 733), True, 'import matplotlib.pyplot as plt\n'), ((686, 701), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (694, 701), True, 'import matplotlib.pyplot as plt\n')]
|
from tkinter import Tk,scrolledtext,Menu,filedialog,END,messagebox
import tkinter.scrolledtext as ScrolledText
from tkinter import *
from PIL import ImageTk
#root is the root window into which all other widgets go. It is an instance of the class Tk, and every tkinter application must have exactly one instance of this class. app is an instance of the class App, which is a subclass of Frame.
root=Tk(className=" AJ Text Editor")
textArea=ScrolledText.ScrolledText(root,width=100,height=80,bg='green')
#
#FUNCTIONS
#
def openFile():
file=filedialog.askopenfile(parent=root,mode='rb',title='select a text file')
if file!=None:
contents=file.read()
textArea.insert('1.0',contents)
file.close()
def saveFile():
file=filedialog.asksaveasfile(mode='w')
if file!=None:
#Slice of the last character from get,as an extra enter is added
data=textArea.get('1.0',END+'-1c')
file.write(data)
file.close()
def quit():
if messagebox.askyesno("Quit","Are u sure u want to quit"):
root.destroy()
def about():
label=messagebox.showinfo("A python Text editor using tkinter!")
#Menu options
menu=Menu(root)
root.configure(background = 'black')
root.config(menu=menu)
fileMenu=Menu(menu)
#Creates a new hierarchical menu by associating a given menu to a parent menu(add_cascade)
menu.add_cascade(label="File",menu=fileMenu)
fileMenu.add_command(label="New")
fileMenu.add_command(label="Open",command=openFile)
fileMenu.add_command(label="Save",command=saveFile)
fileMenu.add_command(label="Print")
helpMenu=Menu(menu)
menu.add_cascade(label="Help")
menu.add_cascade(label="About", command=about)
#fileMenu.add_seperator()
fileMenu.add_command(label="Exit",command=quit)
textArea.pack()
photo = PhotoImage(file = "aa.png")
w = Label(root, image=photo)
w.pack()
root.mainloop()
|
[
"tkinter.Menu",
"tkinter.messagebox.showinfo",
"tkinter.filedialog.asksaveasfile",
"tkinter.messagebox.askyesno",
"tkinter.filedialog.askopenfile",
"tkinter.Tk",
"tkinter.scrolledtext.ScrolledText"
] |
[((403, 434), 'tkinter.Tk', 'Tk', ([], {'className': '""" AJ Text Editor"""'}), "(className=' AJ Text Editor')\n", (405, 434), False, 'from tkinter import Tk, scrolledtext, Menu, filedialog, END, messagebox\n'), ((445, 510), 'tkinter.scrolledtext.ScrolledText', 'ScrolledText.ScrolledText', (['root'], {'width': '(100)', 'height': '(80)', 'bg': '"""green"""'}), "(root, width=100, height=80, bg='green')\n", (470, 510), True, 'import tkinter.scrolledtext as ScrolledText\n'), ((1128, 1138), 'tkinter.Menu', 'Menu', (['root'], {}), '(root)\n', (1132, 1138), False, 'from tkinter import Tk, scrolledtext, Menu, filedialog, END, messagebox\n'), ((1211, 1221), 'tkinter.Menu', 'Menu', (['menu'], {}), '(menu)\n', (1215, 1221), False, 'from tkinter import Tk, scrolledtext, Menu, filedialog, END, messagebox\n'), ((1548, 1558), 'tkinter.Menu', 'Menu', (['menu'], {}), '(menu)\n', (1552, 1558), False, 'from tkinter import Tk, scrolledtext, Menu, filedialog, END, messagebox\n'), ((550, 624), 'tkinter.filedialog.askopenfile', 'filedialog.askopenfile', ([], {'parent': 'root', 'mode': '"""rb"""', 'title': '"""select a text file"""'}), "(parent=root, mode='rb', title='select a text file')\n", (572, 624), False, 'from tkinter import Tk, scrolledtext, Menu, filedialog, END, messagebox\n'), ((739, 773), 'tkinter.filedialog.asksaveasfile', 'filedialog.asksaveasfile', ([], {'mode': '"""w"""'}), "(mode='w')\n", (763, 773), False, 'from tkinter import Tk, scrolledtext, Menu, filedialog, END, messagebox\n'), ((951, 1007), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Quit"""', '"""Are u sure u want to quit"""'], {}), "('Quit', 'Are u sure u want to quit')\n", (970, 1007), False, 'from tkinter import Tk, scrolledtext, Menu, filedialog, END, messagebox\n'), ((1048, 1106), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""A python Text editor using tkinter!"""'], {}), "('A python Text editor using tkinter!')\n", (1067, 1106), False, 'from tkinter import Tk, scrolledtext, Menu, filedialog, END, messagebox\n')]
|
# OBSOLETE
import os
from directory_cache import DirectoryCache
from loguru import logger
from typing import List
from lxml import html, etree
from unidecode import unidecode
import re
class ContentTable():
"""
ContentTable represents from an HTML table
contains both a simplied HTML table (new_element) and a list for rows (rows).
the rows are the text content of the table
"""
def __init__(self, element: html.Element, fail_on_unexpected_tags = True):
self.orig_element = element
self.fail_on_unexpected_tags = fail_on_unexpected_tags
self.id = None
self._new_element = html.Element("table")
self.caption = ""
self.rows = []
self._extract_content()
def contains_data(self) -> bool:
" simple test if a table contains anything that looks like data "
for r in self.rows:
for c in r:
if re.search("^[ 0-9,]+$", c): return True
if re.search(":[ 0-9,]+,", c): return True
return False
def reformat(self) -> html.Element:
" get the newly created element "
return self._new_element
def _extract_content(self):
"""
Pull information from HTML table
1. Ignore TH/TD distinction
2. remove content that only changes presentation
3. assume script/comment tags do not contain data
creates a new element fragment and List[List[Str]]
embedded UL are converted into a comma delimited string
"""
#print(f"input table ===>{html.tostring(self.orig_element)}<<====\n")
self.id = self.orig_element.get("id")
if self.id != None:
self._new_element.attrib["id"] = self.id
tr_temp = html.Element("tr")
for x in self.orig_element:
#print(f"row ===>{html.tostring(x)}<<====\n")
# -- handle TD that are missing surrounding TR
if x.tag == "td":
logger.warning(f"misplaced TD: {html.tostring(x)}")
tr_temp.append(x)
continue
#self._extract_td(x)
elif len(tr_temp) > 0:
self._extract_tr(tr_temp)
tr_temp = html.Element("tr")
if x.tag == "tr":
self._extract_tr(x)
elif x.tag == "thead" or x.tag == "tbody" or x.tag == "tfoot":
for y in x:
if y.tag == "tr":
self._extract_tr(y)
elif self.fail_on_unexpected_tags:
raise Exception(f"unexpected tag in tr: {y.tag}")
else:
logger.warning(f"unexpected tag in tr: {html.tostring(y)}")
elif x.tag == "colgroup":
# logger.warning(f"colgroup: {html.tostring(x)}")
pass
elif x.tag == "caption":
self._extract_caption(x)
elif self.fail_on_unexpected_tags:
logger.warning(f"unexpected tag in table: {html.tostring(x)}")
raise Exception(f"unexpected tag in table: {x.tag}")
else:
logger.warning(f"unexpected tag: {html.tostring(x)}")
#print(f"output table ===>{html.tostring(self.new_element)}<<====\n")
def _extract_caption(self, caption: html.Element):
elem, s = self._extract_any(caption)
self.caption = s
self._new_element.append(elem)
def _extract_tr(self, tr: html.Element):
" extract a row "
#print(f"tr ===>{html.tostring(tr)}<<====\n")
elem = html.Element("tr")
elem.text = ""
elem.tail = ""
cells = []
for x in tr:
if x.tag != "td" and x.tag != "th":
if x.tag == etree.Comment: continue
if x.tag == "script": continue
logger.warning(f" adding td around {html.tostring(x)}")
ch_elem = html.Element("td")
bad_elem, val = self._extract_any(x)
if bad_elem != None:
ch_elem.append(bad_elem)
else:
ch_elem.text = val
else:
ch_elem, val = self._extract_any(x)
if ch_elem == None: ch_elem = html.Element(x.tag)
ch_elem.tail = ""
elem.append(ch_elem)
cells.append(val)
self._new_element.append(elem)
self.rows.append(cells)
def _extract_any(self, x: html.Element) -> [html.Element, str]:
" extract/simplify an HTML element (recursive) "
#print(f"extract any ===>{html.tostring(x)}<<====\n")
# nested tables are special because we are processing a flattend list so ignore them.
if x.tag == "table": return html.Element("table"), "[TABLE]"
# lists are special because we want to build up a comma seperated list
if x.tag == "ul": return self._extract_list(x)
if x.tag == etree.Comment: return etree.Comment(), ""
# no children --> text element
if len(x) == 0:
if x.text == None:
return None, ""
elem, val = x, self._extract_text(x.text)
return elem, val
elem = html.Element(x.tag)
items = []
if x.text != None:
elem.text = x.text
items.append(x.text)
for y in x:
#ignore/strip out layout tags
if y.tag == etree.Comment: continue
if y.tag in ["script", "noscript", "br", "hr", "input", "button", "svg", "img", "form"]: continue
if y.tag in ["span", "div", "h3", "h2", "h1", "small", "strong", "em", "sup", "i",
"a", "b", "u", "p", "ul", "label", "sub"]:
elem_ch, s = self._extract_any(y)
if elem_ch != None:
if len(x) == 1:
if s != None and s != "":
elem.text = s
else:
elem.append(elem_ch)
if s != None and s != "":
items.append(s)
elif y.tag == "table" or y.tag == "iframe":
elem.append(html.Element(y.tag))
items.append(f"[{y.tag.upper()}]")
else:
logger.warning(f"unexpected tag {y.tag} ===>{html.tostring(y)}<<====\n")
elem_ch, s = self._extract_any(y)
if elem_ch != None:
if len(x) == 1:
if s != None and s != "":
elem.text = s
else:
elem.append(elem_ch)
if s != None and s != "":
items.append(s)
val = " ".join(items)
return elem, val
def _extract_list(self, x: html.Element) -> [html.Element, str]:
" extract a list "
#print(f"list ===>{html.tostring(x)}<<====\n")
elem = html.Element("ul")
result = []
for y in x:
#print(f"li ===>{html.tostring(y)}<<====\n")
if y.tag != "li": raise Exception(f"Unexpected tag: {y.tag}")
if len(y) == 0: continue
ch_elem, s = self._extract_any(y)
elem.append(ch_elem)
if s != None:
s = s.replace(",", "_comma_")
result.append(s)
val = ", ".join(result)
return elem, val
def _extract_text(self, s: str) -> str:
" filter out specific items with non-ascii chars "
if s == None: return s
return unidecode(s).strip()
|
[
"lxml.html.tostring",
"unidecode.unidecode",
"lxml.etree.Comment",
"lxml.html.Element",
"re.search"
] |
[((636, 657), 'lxml.html.Element', 'html.Element', (['"""table"""'], {}), "('table')\n", (648, 657), False, 'from lxml import html, etree\n'), ((1789, 1807), 'lxml.html.Element', 'html.Element', (['"""tr"""'], {}), "('tr')\n", (1801, 1807), False, 'from lxml import html, etree\n'), ((3709, 3727), 'lxml.html.Element', 'html.Element', (['"""tr"""'], {}), "('tr')\n", (3721, 3727), False, 'from lxml import html, etree\n'), ((5383, 5402), 'lxml.html.Element', 'html.Element', (['x.tag'], {}), '(x.tag)\n', (5395, 5402), False, 'from lxml import html, etree\n'), ((7103, 7121), 'lxml.html.Element', 'html.Element', (['"""ul"""'], {}), "('ul')\n", (7115, 7121), False, 'from lxml import html, etree\n'), ((923, 949), 're.search', 're.search', (['"""^[ 0-9,]+$"""', 'c'], {}), "('^[ 0-9,]+$', c)\n", (932, 949), False, 'import re\n'), ((982, 1008), 're.search', 're.search', (['""":[ 0-9,]+,"""', 'c'], {}), "(':[ 0-9,]+,', c)\n", (991, 1008), False, 'import re\n'), ((4065, 4083), 'lxml.html.Element', 'html.Element', (['"""td"""'], {}), "('td')\n", (4077, 4083), False, 'from lxml import html, etree\n'), ((4918, 4939), 'lxml.html.Element', 'html.Element', (['"""table"""'], {}), "('table')\n", (4930, 4939), False, 'from lxml import html, etree\n'), ((5129, 5144), 'lxml.etree.Comment', 'etree.Comment', ([], {}), '()\n', (5142, 5144), False, 'from lxml import html, etree\n'), ((7728, 7740), 'unidecode.unidecode', 'unidecode', (['s'], {}), '(s)\n', (7737, 7740), False, 'from unidecode import unidecode\n'), ((2284, 2302), 'lxml.html.Element', 'html.Element', (['"""tr"""'], {}), "('tr')\n", (2296, 2302), False, 'from lxml import html, etree\n'), ((4396, 4415), 'lxml.html.Element', 'html.Element', (['x.tag'], {}), '(x.tag)\n', (4408, 4415), False, 'from lxml import html, etree\n'), ((6337, 6356), 'lxml.html.Element', 'html.Element', (['y.tag'], {}), '(y.tag)\n', (6349, 6356), False, 'from lxml import html, etree\n'), ((2064, 2080), 'lxml.html.tostring', 'html.tostring', (['x'], {}), '(x)\n', (2077, 2080), False, 'from lxml import html, etree\n'), ((4016, 4032), 'lxml.html.tostring', 'html.tostring', (['x'], {}), '(x)\n', (4029, 4032), False, 'from lxml import html, etree\n'), ((6488, 6504), 'lxml.html.tostring', 'html.tostring', (['y'], {}), '(y)\n', (6501, 6504), False, 'from lxml import html, etree\n'), ((2776, 2792), 'lxml.html.tostring', 'html.tostring', (['y'], {}), '(y)\n', (2789, 2792), False, 'from lxml import html, etree\n'), ((3105, 3121), 'lxml.html.tostring', 'html.tostring', (['x'], {}), '(x)\n', (3118, 3121), False, 'from lxml import html, etree\n'), ((3262, 3278), 'lxml.html.tostring', 'html.tostring', (['x'], {}), '(x)\n', (3275, 3278), False, 'from lxml import html, etree\n')]
|
from django.test import TestCase
from rapidsms.tests.harness import CreateDataMixin
from rtwilio.forms import TwilioForm
class TwilioFormTest(CreateDataMixin, TestCase):
def test_valid_form(self):
"""Form should be valid if GET keys match configuration."""
data = {"From": "+12223334444",
"To": "+19998887777",
"Body": self.random_string(50),
"AccountSid": self.random_string(34),
"SmsSid": self.random_string(34)}
form = TwilioForm(data, backend_name='rtwilio-backend')
self.assertTrue(form.is_valid())
def test_invalid_form(self):
"""Form is invalid if POST keys don't match configuration."""
data = {'invalid-phone': '1112223333', 'invalid-message': 'hi there'}
form = TwilioForm(data, backend_name='rtwilio-backend')
self.assertFalse(form.is_valid())
def test_get_incoming_data(self):
"""get_incoming_data should return matching text and connection."""
data = {"From": "+12223334444",
"To": "+19998887777",
"Body": self.random_string(50),
"AccountSid": self.random_string(34),
"SmsSid": self.random_string(34)}
form = TwilioForm(data, backend_name='rtwilio-backend')
form.is_valid()
incoming_data = form.get_incoming_data()
self.assertEqual(data['Body'], incoming_data['text'])
self.assertEqual(data['From'],
incoming_data['connection'].identity)
self.assertEqual(data['SmsSid'],
incoming_data['fields']['external_id'])
self.assertEqual('rtwilio-backend',
incoming_data['connection'].backend.name)
|
[
"rtwilio.forms.TwilioForm"
] |
[((519, 567), 'rtwilio.forms.TwilioForm', 'TwilioForm', (['data'], {'backend_name': '"""rtwilio-backend"""'}), "(data, backend_name='rtwilio-backend')\n", (529, 567), False, 'from rtwilio.forms import TwilioForm\n'), ((806, 854), 'rtwilio.forms.TwilioForm', 'TwilioForm', (['data'], {'backend_name': '"""rtwilio-backend"""'}), "(data, backend_name='rtwilio-backend')\n", (816, 854), False, 'from rtwilio.forms import TwilioForm\n'), ((1257, 1305), 'rtwilio.forms.TwilioForm', 'TwilioForm', (['data'], {'backend_name': '"""rtwilio-backend"""'}), "(data, backend_name='rtwilio-backend')\n", (1267, 1305), False, 'from rtwilio.forms import TwilioForm\n')]
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
import sys
import time
import getopt
import requests
import sqlite3
import math
import re
import os
from lxml import etree
'''
未启用的两个函数
data_check()
按照主表检查缺少数据,时间非常长,需手动配置
test_page() 输出单页数据
图片服务器:
https://jp.netcdn.space/digital/video/miae00056/miae00056jp-10.jpg
https://pics.dmm.co.jp/digital/video/miae00056/miae00056jp-10.jpg
https://pics.dmm.com/digital/video/miae00056/miae00056jp-10.jpg
小封面:
https://jp.netcdn.space/digital/video/miae00056/miae00056ps.jpg
https://pics.javbus.info/thumb/{{linkid}}.jpg
大封面:
https://jp.netcdn.space/digital/video/miae00056/miae00056pl.jpg
'''
class avmo:
def __init__(self):
#================主要配置================
#目标域名
# self.site = 'avmoo.xyz'
# 单页代理
self.site_url = 'https://moozik.cn/mousehole.php?url=https://avmask.com/cn/'
# 原网址
self.site_url = 'https://avmask.com/cn/'
#sqlite数据库地址
if os.path.exists('avmoo_.db'):
self.sqlite_file = 'avmoo_.db'
else:
self.sqlite_file = 'avmoo.db'
#主函数延时
self.main_sleep = 1
#其他配置初始化
self.config()
#================测试区间================
# self.main(sqlfun.return_dict())
# exit()
'''
#重试缺失地址
# self.data_check()
exit()
'''
#================读取参数================
try:
opts, args = getopt.getopt(
sys.argv[1:],
"hs:e:arp:gtu:c",
['help', 'start', 'end', 'auto', 'retry', 'proxies', 'genre', 'stars', 'sub', 'cover']
)
except:
self.usage()
exit()
#展示说明
if len(sys.argv) == 1:
self.usage()
exit()
opt_dict = {}
opt_r = {
'-h':'-help',
'-s':'-start',
'-e':'-end',
'-a':'-auto',
'-r':'-retry',
'-p':'-proxies',
'-g':'-genre',
'-t':'-stars',
'-u':'-sub',
'-c':'-cover',
}
for op, value in opts:
if op in opt_r:
opt_dict[opt_r[op]] = value
else:
opt_dict[op] = value
if '-help' in opt_dict:
self.usage()
exit()
if '-proxies' in opt_dict:
self.s.proxies['https'] = opt_dict['-proxies']
if '-auto' in opt_dict:
self.auto = True
self.get_last()
if '-cover' in opt_dict:
self.sub_cover = True
if '-start' in opt_dict:
self.start_id = opt_dict['-start']
if '-end' in opt_dict:
self.end_id = opt_dict['-end']
if '-retry' in opt_dict:
self.retry_errorurl()
exit()
if '-sub' in opt_dict:
self.sub_keyword = opt_dict['-sub'].upper()
self.get_sub()
exit()
if '-genre' in opt_dict:
self.genre_update()
exit()
if '-stars' in opt_dict:
self.stars_loop()
exit()
#主程序
self.main(self.get_linkid())
#默认配置
def config(self):
#待insert数据
self.insert_list = []
#遍历linkid
self.abc_sequence = '0123456789abcdefghijklmnopqrstuvwxyz'
#获取sl的字典列表dl
self.dl = {}
for item in range(len(self.abc_sequence)):
self.dl[self.abc_sequence[item]] = item
#字幕爬虫默认不覆盖
self.sub_cover = False
#更新flag
self.last_flag = False
#是否重试
self.flag_retry = True
#开始id
self.start_id = '0000'
#结束id
self.end_id = 'zzzz'
#自动获取start stop
self.auto = False
#插入阈值
self.insert_threshold = 20
#用于重试失败计数
self.retry_counter = 0
#重试阈值
self.retry_threshold = 5
#主表
self.table_main = 'av_list'
#重试表
self.table_retry = 'av_error_linkid'
self.table_genre = 'av_genre'
self.table_stars = 'av_stars'
#表结构
self.column = ['id', 'linkid', 'director', 'director_url', 'studio',
'studio_url', 'label', 'label_url', 'series', 'series_url', 'image_len',
'genre', 'len', 'stars', 'av_id', 'title', 'bigimage', 'release_date', 'stars_url']
#表结构str
self.column_str = ",".join(self.column)
#链接数据库
self.conn()
#站点url
# self.site_url = 'https://{0}/cn'.format(self.site)
#番号主页url
self.movie_url = self.site_url+'/movie/'
#导演 制作 发行 系列
self.director = self.site_url+'/director/'
self.studio = self.site_url+'/studio/'
self.label = self.site_url+'/label/'
self.series = self.site_url+'/series/'
self.genre_url = self.site_url+'/genre/'
self.star_url = self.site_url+'/star/'
#创建会话对象
self.s = requests.Session()
#超时时间
self.s.timeout = 3
self.s.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
#代理
self.s.proxies = {
#'https':'http://127.0.0.1:1080'
}
#sqlite conn
def conn(self):
try:
#链接sqlite
self.CONN = sqlite3.connect(self.sqlite_file, check_same_thread=False)
self.CUR = self.CONN.cursor()
except:
print('connect database fail.')
sys.exit()
#写出命令行格式
def usage(self):
usage = '''
-h(-help):使用说明
-s(-start):开始id
例如:'-s 0000' '-s 1ddd'
-e(-end):结束id
例如:'-e xxxx' '-e zzzz'
-a(-auto):(常用功能)获取当前数据库最新的一个id和网站最新的一个id,补全新增数据
-r(-retry):重试错误链接
-g(-genre):更新类别
-t(-stars):更新演员
-p(-proxies):使用指定的https代理服务器或SOCKS5代理服务器。
例如:'-p http://127.0.0.1:1080,-p socks5://127.0.0.1:52772'
-u(-163sub):使用指定关键字查找视频字幕
例如:'-u IPZ' '-u ABP'
-c(-cover):重新抓取字幕数据
'''
print(usage.replace(' ',''))
def get_subjson(self, response):
json = response.json()
data = []
linkID = 0
for item in json.get('Data'):
linkID = item['linkID']
if self.sub_keyword not in item['mkvName'].replace(' ','-'):
continue
avid_tmp = re.findall('[a-zA-Z0-9]+[ \-]\d{3,}',item['mkvName'])
if avid_tmp == []:
continue
time_tmp = re.findall('\d{4}-\d{2}-\d{2}', item['otherName3'])
if time_tmp != []:
time_tmp = time_tmp[0]
else:
time_tmp = ''
data.append(
(
item['ID'].strip(),
avid_tmp[0].upper().replace(' ', '-'),
time_tmp
)
)
return int(json.get('Count')), data, linkID
#获取字幕
def get_sub(self):
def get_suburl(keyword, item=None):
if item == None:
return 'http://www.163sub.org/search.ashx?q={}'.format(keyword)
else:
return 'http://www.163sub.org/search.ashx?q={}&lastid={}'.format(keyword, item)
av_163sub_log = {
'sub_keyword': self.sub_keyword,
'run_time': time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime()
),
'data_count': '',
'insert_count': '',
}
#查询抓取历史
SELECT_SQL = 'SELECT * FROM av_163sub_log WHERE sub_keyword = "{}" ORDER BY run_time DESC LIMIT 1;'.format(
self.sub_keyword)
self.CUR.execute(SELECT_SQL)
log_data = self.CUR.fetchall()
if log_data != []:
print('上次查询时间:{}\n条数:{}\n有效条数:{}\n'.format(
log_data[0][2], log_data[0][3], log_data[0][4]
))
#查询当前条数
response = self.s.get(get_suburl(self.sub_keyword))
res = self.get_subjson(response)
print('163sub实时数据:{}条'.format(
res[0]))
if False == self.sub_cover and log_data != [] and res[0] == log_data[0][3]:
print('需要重新抓取请添加参数-c(-cover)\n')
exit()
resultArr = []
if res[1] != []:
resultArr.extend(res[1])
av_163sub_log['data_count'] = res[0]
else:
print('没有找到!')
exit()
for item in range(1, math.ceil(res[0] / 10)):
print('当前:', item * 10)
response = self.s.get(get_suburl(self.sub_keyword, res[2]))
res = self.get_subjson(response)
resultArr.extend(res[1])
print(self.sub_keyword, '字幕有效条数为:', len(resultArr))
av_163sub_log['insert_count'] = len(resultArr)
#计算新增的字幕
SELECT_SQL = 'SELECT DISTINCT av_id FROM "av_163sub" where av_id like "{}%" ORDER BY av_id;'.format(
self.sub_keyword)
self.CUR.execute(SELECT_SQL)
fetch_data = self.CUR.fetchall()
if fetch_data != []:
history_data = set([x[0] for x in fetch_data])
new_data = set([x[1] for x in resultArr])
new_sub = new_data - history_data
if len(new_sub) != 0:
print('新增的字幕为:')
print("\n".join(list(new_sub)))
if len(resultArr) > 0:
INSERT_SQL = 'REPLACE INTO av_163sub VALUES({});'.format('),('.join([
'"{}","{}","{}"'.format(x[0], x[1], x[2]) for x in resultArr]))
INSERT_LOG = 'REPLACE INTO av_163sub_log ("sub_keyword","run_time","data_count","insert_count")VALUES("{}","{}","{}","{}");'.format(
av_163sub_log['sub_keyword'],
av_163sub_log['run_time'],
av_163sub_log['data_count'],
av_163sub_log['insert_count'],
)
while True:
try:
self.CUR.execute(INSERT_SQL)
self.CUR.execute(INSERT_LOG)
self.CONN.commit()
break
except:
print('database is locked!')
time.sleep(3)
#主函数,抓取页面内信息
def main(self, looplist):
for item in looplist:
url = self.movie_url + item
time.sleep(self.main_sleep)
try:
res = self.s.get(url)
if res.status_code != 200:
self.insert_retry((item, res.status_code))
print(url, res.status_code)
continue
except:
print(url, 'requests.get error')
self.insert_retry((item, 777))
continue
try:
html = etree.HTML(res.text)
except:
print(url, 'etree.HTML error')
self.insert_retry((item, 888))
continue
#解析页面内容
data = self.movie_page_data(html)
#从linkid获取id
id_column = self.linkid2id(item)
#输出当前进度
print(data[12].ljust(30), data[15].ljust(11), item.ljust(5), id_column)
self.insert_list.append(
"'{0}','{1}','{2}'".format(id_column, item, "','".join(data))
)
#存储数据
if len(self.insert_list) == self.insert_threshold:
self.movie_save()
#插入剩余的数据
self.movie_save()
#重试错误数据
self.retry_errorurl()
#获取最后一次的id
def get_last(self):
sql = "SELECT linkid FROM {0} ORDER BY linkid DESC LIMIT 0,1".format(self.table_main)
self.CUR.execute(sql)
res = self.CUR.fetchall()
self.start_id = res[0][0]
try:
response = self.s.get(self.site_url)
except:
print('timeout.')
exit()
if response.status_code != 200:
print('page error.')
exit()
html = etree.HTML(response.text)
self.end_id = html.xpath('//*[@id="waterfall"]/div[1]/a')[0].attrib.get('href')[-4:]
print('数据库最新ID:{0},线上最新ID:{1}'.format(self.start_id, self.end_id))
print('本次更新数量:{}'.format(self.linkid2id(self.end_id)-self.linkid2id(self.start_id)))
#插入重试表
def insert_retry(self, data):
self.CUR.execute("REPLACE INTO {0}(linkid, status_code, datetime)VALUES('{1[0]}', {1[1]}, '{2}');"
.format(
self.table_retry,
data,
time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime()
)
)
)
self.CONN.commit()
#获取演员
def stars_loop(self, map_list = []):
self.CUR.execute(
'SELECT linkid FROM {} ORDER BY linkid DESC LIMIT 0,1'.format(self.table_stars))
self.start_id = self.CUR.fetchall()[0][0]
self.end_id = '3000'
def get_val(str):
return str.split(':')[1].strip()
page_404_count = 0
if map_list == []:
map_list = self.get_linkid()
print(map_list)
return
for linkid in map_list:
url = self.star_url + linkid
sort_id = self.linkid2id(linkid)
print(linkid, sort_id)
data = {
'id': sort_id,
'linkid': linkid,
'name': '',
'name_history': '',
'birthday': '',
'height': '',
'cup': '',
'bust': '',
'waist': '',
'hips': '',
'hometown': '',
'hobby': '',
'headimg': ''
}
try:
response = self.s.get(url)
html = etree.HTML(response.text)
except:
data['birthday'] = 'error'
self.stars_save(data)
print('出现错误,延时10s')
time.sleep(10)
continue
if response.status_code == 403:
print(data['id'], ' ', data['linkid'],' status_code:403')
exit()
if response.status_code == 404:
#id大于38000的页面,出现404直接结束
if sort_id > 38000:
print('page 404,done!')
exit()
page_404_count += 1
#检查error条目
if map_list == []:
sql = 'SELECT linkid FROM "av_stars" WHERE birthday="error"'
self.CUR.execute(sql)
error_list = self.CUR.fetchall()
map_list = [x[0] for x in error_list]
self.stars_loop(map_list)
if page_404_count == 10:
print('stat=404 count:10')
exit()
else:
print(data['id'],' ',data['linkid'],' ',page_404_count)
data['birthday'] = '404'
self.stars_save(data)
time.sleep(1)
continue
page_404_count = 0
try:
data['name'] = html.xpath(
'/html/head/meta[8]/@content')[0].split(',', 1)[0]
data['headimg'] = html.xpath(
'//*[@id="waterfall"]/div[1]/div/div[1]/img/@src')[0].split('/', 3)[3].replace('mono/actjpgs/nowprinting.gif', '')
print(data)
except:
print(response.text)
exit()
for item_p in html.xpath('//*[@id="waterfall"]/div[1]/div/div[2]/p'):
if item_p.text == None:
continue
if '生日' in item_p.text:
data['birthday'] = get_val(item_p.text)
continue
if '身高' in item_p.text:
data['height'] = get_val(item_p.text)
continue
if '罩杯' in item_p.text:
data['cup'] = get_val(item_p.text)
continue
if '胸围' in item_p.text:
data['bust'] = get_val(item_p.text)
continue
if '腰围' in item_p.text:
data['waist'] = get_val(item_p.text)
continue
if '臀围' in item_p.text:
data['hips'] = get_val(item_p.text)
continue
if '出生地' in item_p.text:
data['hometown'] = get_val(item_p.text)
continue
if '爱好' in item_p.text:
data['hobby'] = get_val(item_p.text)
continue
#讲括号中的名字记录为曾用名
tmp = data['name'].replace('(','(').replace(')','').split('(')
if len(tmp) == 2:
data['name_history'] = tmp[1]
print(
data['birthday'].ljust(13),
data['height'].ljust(7),
data['cup'].ljust(3),
data['bust'].ljust(7),
data['waist'].ljust(7),
data['hips'].ljust(7),
data['name'].ljust(15),
data['hometown']
)
self.stars_save(data)
if data['cup'] == 'F':
time.sleep(5)
elif data['cup'] == 'E':
time.sleep(3)
elif data['cup'] == 'D':
time.sleep(2.5)
elif data['cup'] == 'C':
time.sleep(2)
elif data['cup'] == 'B':
time.sleep(1)
else:
time.sleep(1)
def stars_save(self, data):
insert_sql = 'REPLACE INTO "{}" VALUES({},"{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}")'.format(
self.table_stars,
data['id'],
data['linkid'],
data['name'],
data['name_history'],
data['birthday'],
data['height'],
data['cup'],
data['bust'],
data['waist'],
data['hips'],
data['hometown'],
data['hobby'],
data['headimg']
)
self.CUR.execute(insert_sql)
self.CONN.commit()
#遍历urlid
def get_linkid(self):
for abcd in self.abc_map():
if abcd <= self.start_id:
continue
if self.start_id < abcd <= self.end_id:
yield abcd
if abcd > self.end_id:
print('start:{0} end:{1} done!'.format(
self.start_id, self.end_id))
self.movie_save()
exit()
#由urlid获取排序自增id
def linkid2id(self, item):
return self.dl[item[3]] + self.dl[item[2]]*36 + self.dl[item[1]]*1296 + self.dl[item[0]]*46656
#插入数据库
def movie_save(self):
if len(self.insert_list) == 0:
return
self.replace_sql(self.table_main, self.column_str, "),(".join(self.insert_list))
print('INSERT:', len(self.insert_list))
self.insert_list = []
self.retry_counter += 1
if self.flag_retry:
#重试失败地址
if self.retry_counter >= self.retry_threshold:
self.retry_counter = 0
self.retry_errorurl()
def replace_sql(self, table, column, data):
self.CUR.execute("REPLACE INTO {0}({1})VALUES({2});".format(table, column, data))
self.CONN.commit()
#重试
def retry_errorurl(self):
self.CUR.execute("SELECT * FROM {0} WHERE status_code<>'404' ORDER BY linkid;".format(self.table_retry))
res_retry = self.CUR.fetchall()
reslen = len(res_retry)
if reslen == 0:
return
print('error url count:', reslen)
del_list = []
update_list = []
def update_sql(update_list):
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sql = "REPLACE INTO {0}(linkid, status_code, datetime)VALUES({1});".format(
self.table_retry, "),(".join(["'{0[0]}',{0[1]},'{1}'".format(x, time_now) for x in update_list]))
self.CUR.execute(sql)
self.CONN.commit()
def delete_sql(del_list):
sql = 'DELETE FROM {0} WHERE {1};'.format(
self.table_retry, ' OR '.join([" linkid='{0}' ".format(x) for x in del_list]))
self.CUR.execute(sql)
self.CONN.commit()
for item in res_retry:
retry_linkid = item[0]
reslen -= 1
#统一更新表,提高效率
if len(update_list) == 20:
update_sql(update_list)
update_list = []
print('done 20.')
url = self.movie_url + retry_linkid
try:
response = self.s.get(url)
html = etree.HTML(response.text)
except:
# 重写重试记录
if response.status_code == 404:
update_list.append((retry_linkid, 404))
print(reslen, retry_linkid, 'status_code:404')
continue
if response.status_code != 200:
# 重写重试记录
update_list.append((retry_linkid, response.status_code))
print(reslen, retry_linkid, 'status_code:{}'.format(response.status_code))
continue
print(reslen, retry_linkid, 'success')
data = self.movie_page_data(html)
id = self.linkid2id(retry_linkid)
self.insert_list.append("'{0}','{1}','{2}'".format(id, retry_linkid, "','".join(data)))
del_list.append(retry_linkid)
#存储数据
if len(self.insert_list) == self.insert_threshold:
#插入数据
print(self.insert_threshold, 'insert.')
self.replace_sql(self.table_main, self.column_str, "),(".join(self.insert_list))
if del_list != []:
delete_sql(del_list)
del_list = []
#插入数据
if len(self.insert_list) != 0:
self.replace_sql(self.table_main, self.column_str, "),(".join(self.insert_list))
#删除数据
if len(del_list) != 0:
delete_sql(del_list)
#更新数据
if len(update_list) != 0:
update_sql(update_list)
def movie_page_data(self, html):
print(html.xpath('/html/body/div[2]/div[1]/div[2]/p[1]/span[2]/text()'))
exit()
data = ['' for x in range(17)]
#番号
try:
data[12] = html.xpath('/html/body/div[2]/div[1]/div[2]/p[1]/span[2]/text()')[0]
except:
return data
#获取:导演、制作商、发行商、系列
right_info = html.xpath('/html/body/div[2]/div[1]/div[2]/p/a')
for i in right_info:
if i.text == None:
continue
tmp_text = i.text.replace("'", '"')
tmp_href = i.attrib.get('href')
if self.director in tmp_href:
#导演
data[0] = tmp_text
data[1] = tmp_href.replace(self.director, '')
elif self.studio in tmp_href:
#制作商
data[2] = tmp_text
data[3] = tmp_href.replace(self.studio, '')
elif self.label in tmp_href:
#发行商
data[4] = tmp_text
data[5] = tmp_href.replace(self.label, '')
elif self.series in tmp_href:
#系列
data[6] = tmp_text
data[7] = tmp_href.replace(self.series, '')
#图片个数image_len
data[8] = str(len(html.xpath('//div[@id="sample-waterfall"]/a')))
#获取类别列表genre
data[9] = '|'.join(html.xpath('/html/body/div[2]/div[1]/div[2]/p/span/a/text()')).replace("'", '"')
#时长len
lentext = html.xpath('/html/body/div[2]/div[1]/div[2]/p[3]/text()')
if len(lentext) != 0 and '分钟' in lentext[0]:
data[10] = lentext[0].replace('分钟', '').strip()
else:
data[10] = '0'
#演员stars
data[11] = '|'.join(html.xpath('//div[@id="avatar-waterfall"]/a/span/text()')).replace("'", '"')
#接取除了番号的标题
data[13] = html.xpath('/html/body/div[2]/h3/text()')[0][len(data[12]) + 1:].replace("'", '"')
#封面 截取域名之后的部分
data[14] = '/' + html.xpath('/html/body/div[2]/div[1]/div[1]/a/img/@src')[0].split('/',5)[5]
#发行时间
data[15] = html.xpath('/html/body/div[2]/div[1]/div[2]/p[2]/text()')[0].strip()
#stars_url
stars_url_list = html.xpath('//div[@id="avatar-waterfall"]/a/@href')
if stars_url_list != None and len(stars_url_list)!=0:
data[16] = '|'.join([re.findall('([a-z0-9]+)$',x)[0].rjust(4,'0') for x in stars_url_list])
return data
def abc_map(self):
for i1 in self.abc_sequence:
for i2 in self.abc_sequence:
for i3 in self.abc_sequence:
for i4 in self.abc_sequence:
yield (i1 + i2 + i3 + i4)
#检查被遗漏的页面,并插入数据库
#按照linkid的顺序检查漏掉的番号,并不是从重试表检索
def data_check(self):
self.CUR.execute("SELECT linkid FROM {0} WHERE 1 ORDER BY linkid;".format(self.table_main))
res = self.CUR.fetchall()
res_list = [x[0] for x in res]
res_min = res_list[0]
res_max = res_list[len(res)-1]
miss_list = []
for abcd in self.abc_map():
if abcd <= res_min:
continue
if abcd >= res_max:
break
if abcd in res_list:
continue
else:
miss_list.append(abcd)
continue
print('miss count:', len(miss_list))
print('需要遍历请手动修改代码')
exit()
self.CUR.execute('DELETE FROM "{0}";'.format(self.table_retry))
self.CONN.commit()
if len(miss_list) != 0:
for item in miss_list:
self.CUR.execute('INSERT INTO "{0}" ("linkid") VALUES ("{1}");'.format(self.table_retry, item))
self.CONN.commit()
else:
print("miss_list is empty")
return
#重试错误链接并插入数据库
self.CUR.execute('SELECT linkid FROM "{0}" ORDER BY linkid;'.format(self.table_retry))
res = self.CUR.fetchall()
self.main([x[0] for x in res])
#插入剩余的数据
self.movie_save()
#获取所有类别
def genre_update(self):
html = etree.HTML(self.s.get(self.genre_url).text)
insert_list = []
h4 = html.xpath('/html/body/div[2]/h4/text()')
div = html.xpath('/html/body/div[2]/div')
for div_item in range(len(div)):
g_title = h4[div_item]
a_list = div[div_item].xpath('a')
for a_item in a_list:
if a_item.text == None:
continue
g_name = a_item.text#.replace('・','')
g_id = a_item.attrib.get('href').replace(self.genre_url,'')
insert_list.append("'{0}','{1}','{2}'".format(g_id,g_name,g_title))
sql = "REPLACE INTO {} (id,name,title)VALUES({});".format(self.table_genre, "),(".join(insert_list))
self.CUR.execute(sql)
self.CONN.commit()
print('update record:{}'.format(len(insert_list)))
#测试单个页面
def test_page(self, linkid):
url = self.movie_url + linkid
res = self.s.get(url).text
#解析页面内容
data = self.movie_page_data(etree.HTML(res))
print(data)
if __name__ == '__main__':
avmo()
|
[
"getopt.getopt",
"math.ceil",
"requests.Session",
"os.path.exists",
"time.sleep",
"re.findall",
"sqlite3.connect",
"time.localtime",
"lxml.etree.HTML",
"sys.exit"
] |
[((978, 1005), 'os.path.exists', 'os.path.exists', (['"""avmoo_.db"""'], {}), "('avmoo_.db')\n", (992, 1005), False, 'import os\n'), ((5027, 5045), 'requests.Session', 'requests.Session', ([], {}), '()\n', (5043, 5045), False, 'import requests\n'), ((12199, 12224), 'lxml.etree.HTML', 'etree.HTML', (['response.text'], {}), '(response.text)\n', (12209, 12224), False, 'from lxml import etree\n'), ((1471, 1608), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hs:e:arp:gtu:c"""', "['help', 'start', 'end', 'auto', 'retry', 'proxies', 'genre', 'stars',\n 'sub', 'cover']"], {}), "(sys.argv[1:], 'hs:e:arp:gtu:c', ['help', 'start', 'end',\n 'auto', 'retry', 'proxies', 'genre', 'stars', 'sub', 'cover'])\n", (1484, 1608), False, 'import getopt\n'), ((5452, 5510), 'sqlite3.connect', 'sqlite3.connect', (['self.sqlite_file'], {'check_same_thread': '(False)'}), '(self.sqlite_file, check_same_thread=False)\n', (5467, 5510), False, 'import sqlite3\n'), ((6546, 6602), 're.findall', 're.findall', (['"""[a-zA-Z0-9]+[ \\\\-]\\\\d{3,}"""', "item['mkvName']"], {}), "('[a-zA-Z0-9]+[ \\\\-]\\\\d{3,}', item['mkvName'])\n", (6556, 6602), False, 'import re\n'), ((6680, 6734), 're.findall', 're.findall', (['"""\\\\d{4}-\\\\d{2}-\\\\d{2}"""', "item['otherName3']"], {}), "('\\\\d{4}-\\\\d{2}-\\\\d{2}', item['otherName3'])\n", (6690, 6734), False, 'import re\n'), ((8681, 8703), 'math.ceil', 'math.ceil', (['(res[0] / 10)'], {}), '(res[0] / 10)\n', (8690, 8703), False, 'import math\n'), ((10551, 10578), 'time.sleep', 'time.sleep', (['self.main_sleep'], {}), '(self.main_sleep)\n', (10561, 10578), False, 'import time\n'), ((27746, 27761), 'lxml.etree.HTML', 'etree.HTML', (['res'], {}), '(res)\n', (27756, 27761), False, 'from lxml import etree\n'), ((5625, 5635), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5633, 5635), False, 'import sys\n'), ((7592, 7608), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7606, 7608), False, 'import time\n'), ((10998, 11018), 'lxml.etree.HTML', 'etree.HTML', (['res.text'], {}), '(res.text)\n', (11008, 11018), False, 'from lxml import etree\n'), ((14019, 14044), 'lxml.etree.HTML', 'etree.HTML', (['response.text'], {}), '(response.text)\n', (14029, 14044), False, 'from lxml import etree\n'), ((17554, 17567), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (17564, 17567), False, 'import time\n'), ((20191, 20207), 'time.localtime', 'time.localtime', ([], {}), '()\n', (20205, 20207), False, 'import time\n'), ((21132, 21157), 'lxml.etree.HTML', 'etree.HTML', (['response.text'], {}), '(response.text)\n', (21142, 21157), False, 'from lxml import etree\n'), ((12812, 12828), 'time.localtime', 'time.localtime', ([], {}), '()\n', (12826, 12828), False, 'import time\n'), ((14198, 14212), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (14208, 14212), False, 'import time\n'), ((15285, 15298), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15295, 15298), False, 'import time\n'), ((17621, 17634), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (17631, 17634), False, 'import time\n'), ((10402, 10415), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (10412, 10415), False, 'import time\n'), ((17688, 17703), 'time.sleep', 'time.sleep', (['(2.5)'], {}), '(2.5)\n', (17698, 17703), False, 'import time\n'), ((17757, 17770), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (17767, 17770), False, 'import time\n'), ((17824, 17837), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17834, 17837), False, 'import time\n'), ((17872, 17885), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17882, 17885), False, 'import time\n'), ((24985, 25014), 're.findall', 're.findall', (['"""([a-z0-9]+)$"""', 'x'], {}), "('([a-z0-9]+)$', x)\n", (24995, 25014), False, 'import re\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .norm_module import *
from .mask_regression import *
from .sync_batchnorm import SynchronizedBatchNorm2d
BatchNorm = SynchronizedBatchNorm2d
class ResnetGenerator128(nn.Module):
def __init__(self, ch=64, z_dim=128, num_classes=10, output_dim=3):
super(ResnetGenerator128, self).__init__()
self.num_classes = num_classes
self.label_embedding = nn.Embedding(num_classes, 180)
num_w = 128+180
self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*16*ch))
self.res1 = ResBlock(ch*16, ch*16, upsample=True, num_w=num_w)
self.res2 = ResBlock(ch*16, ch*8, upsample=True, num_w=num_w)
self.res3 = ResBlock(ch*8, ch*4, upsample=True, num_w=num_w)
self.res4 = ResBlock(ch*4, ch*2, upsample=True, num_w=num_w, psp_module=True)
self.res5 = ResBlock(ch*2, ch*1, upsample=True, num_w=num_w, predict_mask=False)
self.final = nn.Sequential(BatchNorm(ch),
nn.ReLU(),
conv2d(ch, output_dim, 3, 1, 1),
nn.Tanh())
# mapping function
mapping = list()
self.mapping = nn.Sequential(*mapping)
self.alpha1 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha2 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha3 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha4 = nn.Parameter(torch.zeros(1, 184, 1))
self.sigmoid = nn.Sigmoid()
self.mask_regress = MaskRegressNetv2(num_w)
self.init_parameter()
def forward(self, z, bbox, z_im=None, y=None):
b, o = z.size(0), z.size(1)
label_embedding = self.label_embedding(y)
z = z.view(b * o, -1)
label_embedding = label_embedding.view(b * o, -1)
latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)
w = self.mapping(latent_vector.view(b * o, -1))
# preprocess bbox
bmask = self.mask_regress(w, bbox)
if z_im is None:
z_im = torch.randn((b, 128), device=z.device)
bbox_mask_ = bbox_mask(z, bbox, 64, 64)
# 4x4
x = self.fc(z_im).view(b, -1, 4, 4)
# 8x8
x, stage_mask = self.res1(x, w, bmask)
# 16x16
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha1 = torch.gather(self.sigmoid(self.alpha1).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha1) + seman_bbox * alpha1
x, stage_mask = self.res2(x, w, stage_bbox)
# 32x32
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha2 = torch.gather(self.sigmoid(self.alpha2).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha2) + seman_bbox * alpha2
x, stage_mask = self.res3(x, w, stage_bbox)
# 64x64
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha3 = torch.gather(self.sigmoid(self.alpha3).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha3) + seman_bbox * alpha3
x, stage_mask = self.res4(x, w, stage_bbox)
# 128x128
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha4 = torch.gather(self.sigmoid(self.alpha4).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha4) + seman_bbox * alpha4
x, _ = self.res5(x, w, stage_bbox)
# to RGB
x = self.final(x)
return x, stage_bbox
def init_parameter(self):
for k in self.named_parameters():
if k[1].dim() > 1:
torch.nn.init.orthogonal_(k[1])
if k[0][-4:] == 'bias':
torch.nn.init.constant_(k[1], 0)
class ResnetGenerator256(nn.Module):
def __init__(self, ch=64, z_dim=128, num_classes=10, output_dim=3):
super(ResnetGenerator256, self).__init__()
self.num_classes = num_classes
self.label_embedding = nn.Embedding(num_classes, 180)
num_w = 128+180
self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*16*ch))
self.res1 = ResBlock(ch*16, ch*16, upsample=True, num_w=num_w)
self.res2 = ResBlock(ch*16, ch*8, upsample=True, num_w=num_w)
self.res3 = ResBlock(ch*8, ch*8, upsample=True, num_w=num_w)
self.res4 = ResBlock(ch*8, ch*4, upsample=True, num_w=num_w)
self.res5 = ResBlock(ch*4, ch*2, upsample=True, num_w=num_w)
self.res6 = ResBlock(ch*2, ch*1, upsample=True, num_w=num_w, predict_mask=False)
self.final = nn.Sequential(BatchNorm(ch),
nn.ReLU(),
conv2d(ch, output_dim, 3, 1, 1),
nn.Tanh())
# mapping function
mapping = list()
self.mapping = nn.Sequential(*mapping)
self.alpha1 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha2 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha3 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha4 = nn.Parameter(torch.zeros(1, 184, 1))
self.alpha5 = nn.Parameter(torch.zeros(1, 184, 1))
self.sigmoid = nn.Sigmoid()
self.mask_regress = MaskRegressNetv2(num_w)
self.init_parameter()
def forward(self, z, bbox, z_im=None, y=None, include_mask_loss=False):
b, o = z.size(0), z.size(1)
label_embedding = self.label_embedding(y)
z = z.view(b * o, -1)
label_embedding = label_embedding.view(b * o, -1)
latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)
w = self.mapping(latent_vector.view(b * o, -1))
# preprocess bbox
bmask = self.mask_regress(w, bbox)
if z_im is None:
z_im = torch.randn((b, 128), device=z.device)
bbox_mask_ = bbox_mask(z, bbox, 128, 128)
latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1)
w = self.mapping(latent_vector.view(b * o, -1))
# 4x4
x = self.fc(z_im).view(b, -1, 4, 4)
# 8x8
# label mask
x, stage_mask = self.res1(x, w, bmask)
# 16x16
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha1 = torch.gather(self.sigmoid(self.alpha1).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha1) + seman_bbox * alpha1
x, stage_mask = self.res2(x, w, stage_bbox)
# 32x32
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha2 = torch.gather(self.sigmoid(self.alpha2).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha2) + seman_bbox * alpha2
x, stage_mask = self.res3(x, w, stage_bbox)
# 64x64
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha3 = torch.gather(self.sigmoid(self.alpha3).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha3) + seman_bbox * alpha3
x, stage_mask = self.res4(x, w, stage_bbox)
# 128x128
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha4 = torch.gather(self.sigmoid(self.alpha4).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha4) + seman_bbox * alpha4
x, stage_mask = self.res5(x, w, stage_bbox)
# 256x256
hh, ww = x.size(2), x.size(3)
seman_bbox = batched_index_select(stage_mask, dim=1, index=y.view(b, o, 1, 1)) # size (b, num_o, h, w)
seman_bbox = torch.sigmoid(seman_bbox) * F.interpolate(bbox_mask_, size=(hh, ww), mode='nearest')
alpha5 = torch.gather(self.sigmoid(self.alpha5).expand(b, -1, -1), dim=1, index=y.view(b, o, 1)).unsqueeze(-1)
stage_bbox = F.interpolate(bmask, size=(hh, ww), mode='bilinear') * (1 - alpha5) + seman_bbox * alpha5
x, _ = self.res6(x, w, stage_bbox)
# to RGB
x = self.final(x)
return x
def init_parameter(self):
for k in self.named_parameters():
if k[1].dim() > 1:
torch.nn.init.orthogonal_(k[1])
if k[0][-4:] == 'bias':
torch.nn.init.constant_(k[1], 0)
class ResBlock(nn.Module):
def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, pad=1, upsample=False, num_w=128, predict_mask=True, psp_module=False):
super(ResBlock, self).__init__()
self.upsample = upsample
self.h_ch = h_ch if h_ch else out_ch
self.conv1 = conv2d(in_ch, self.h_ch, ksize, pad=pad)
self.conv2 = conv2d(self.h_ch, out_ch, ksize, pad=pad)
self.b1 = SpatialAdaptiveSynBatchNorm2d(in_ch, num_w=num_w, batchnorm_func=BatchNorm)
self.b2 = SpatialAdaptiveSynBatchNorm2d(self.h_ch, num_w=num_w, batchnorm_func=BatchNorm)
self.learnable_sc = in_ch != out_ch or upsample
if self.learnable_sc:
self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
self.activation = nn.ReLU()
self.predict_mask = predict_mask
if self.predict_mask:
if psp_module:
self.conv_mask = nn.Sequential(PSPModule(out_ch, 100),
nn.Conv2d(100, 184, kernel_size=1))
else:
self.conv_mask = nn.Sequential(nn.Conv2d(out_ch, 100, 3, 1, 1),
BatchNorm(100),
nn.ReLU(),
nn.Conv2d(100, 184, 1, 1, 0, bias=True))
def residual(self, in_feat, w, bbox):
x = in_feat
x = self.b1(x, w, bbox)
x = self.activation(x)
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv1(x)
x = self.b2(x, w, bbox)
x = self.activation(x)
x = self.conv2(x)
return x
def shortcut(self, x):
if self.learnable_sc:
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.c_sc(x)
return x
def forward(self, in_feat, w, bbox):
out_feat = self.residual(in_feat, w, bbox) + self.shortcut(in_feat)
if self.predict_mask:
mask = self.conv_mask(out_feat)
else:
mask = None
return out_feat, mask
def conv2d(in_feat, out_feat, kernel_size=3, stride=1, pad=1, spectral_norm=True):
conv = nn.Conv2d(in_feat, out_feat, kernel_size, stride, pad)
if spectral_norm:
return nn.utils.spectral_norm(conv, eps=1e-4)
else:
return conv
def batched_index_select(input, dim, index):
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.expand(expanse)
return torch.gather(input, dim, index)
def bbox_mask(x, bbox, H, W):
b, o, _ = bbox.size()
N = b * o
bbox_1 = bbox.float().view(-1, 4)
x0, y0 = bbox_1[:, 0], bbox_1[:, 1]
ww, hh = bbox_1[:, 2], bbox_1[:, 3]
x0 = x0.contiguous().view(N, 1).expand(N, H)
ww = ww.contiguous().view(N, 1).expand(N, H)
y0 = y0.contiguous().view(N, 1).expand(N, W)
hh = hh.contiguous().view(N, 1).expand(N, W)
X = torch.linspace(0, 1, steps=W).view(1, W).expand(N, W).cuda(device=x.device)
Y = torch.linspace(0, 1, steps=H).view(1, H).expand(N, H).cuda(device=x.device)
X = (X - x0) / ww
Y = (Y - y0) / hh
X_out_mask = ((X < 0) + (X > 1)).view(N, 1, W).expand(N, H, W)
Y_out_mask = ((Y < 0) + (Y > 1)).view(N, H, 1).expand(N, H, W)
out_mask = 1 - (X_out_mask + Y_out_mask).float().clamp(max=1)
return out_mask.view(b, o, H, W)
class PSPModule(nn.Module):
"""
Reference:
Zhao, Hengshuang, et al. *"Pyramid scene parsing network."*
"""
def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)):
super(PSPModule, self).__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes])
self.bottleneck = nn.Sequential(
nn.Conv2d(features+len(sizes)*out_features, out_features, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm(out_features),
nn.ReLU(),
nn.Dropout2d(0.1)
)
def _make_stage(self, features, out_features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)
bn = nn.BatchNorm2d(out_features)
return nn.Sequential(prior, conv, bn, nn.ReLU())
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return bottle
|
[
"torch.nn.Embedding",
"torch.randn",
"torch.cat",
"torch.nn.init.constant_",
"torch.gather",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout2d",
"torch.nn.Tanh",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.utils.spectral_norm",
"torch.sigmoid",
"torch.linspace",
"torch.nn.functional.interpolate",
"torch.nn.init.orthogonal_"
] |
[((12548, 12602), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_feat', 'out_feat', 'kernel_size', 'stride', 'pad'], {}), '(in_feat, out_feat, kernel_size, stride, pad)\n', (12557, 12602), True, 'import torch.nn as nn\n'), ((12875, 12906), 'torch.gather', 'torch.gather', (['input', 'dim', 'index'], {}), '(input, dim, index)\n', (12887, 12906), False, 'import torch\n'), ((447, 477), 'torch.nn.Embedding', 'nn.Embedding', (['num_classes', '(180)'], {}), '(num_classes, 180)\n', (459, 477), True, 'import torch.nn as nn\n'), ((1245, 1268), 'torch.nn.Sequential', 'nn.Sequential', (['*mapping'], {}), '(*mapping)\n', (1258, 1268), True, 'import torch.nn as nn\n'), ((1530, 1542), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1540, 1542), True, 'import torch.nn as nn\n'), ((5075, 5105), 'torch.nn.Embedding', 'nn.Embedding', (['num_classes', '(180)'], {}), '(num_classes, 180)\n', (5087, 5105), True, 'import torch.nn as nn\n'), ((5925, 5948), 'torch.nn.Sequential', 'nn.Sequential', (['*mapping'], {}), '(*mapping)\n', (5938, 5948), True, 'import torch.nn as nn\n'), ((6268, 6280), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6278, 6280), True, 'import torch.nn as nn\n'), ((11070, 11079), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11077, 11079), True, 'import torch.nn as nn\n'), ((12640, 12680), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['conv'], {'eps': '(0.0001)'}), '(conv, eps=0.0001)\n', (12662, 12680), True, 'import torch.nn as nn\n'), ((14461, 14507), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', ([], {'output_size': '(size, size)'}), '(output_size=(size, size))\n', (14481, 14507), True, 'import torch.nn as nn\n'), ((14523, 14583), 'torch.nn.Conv2d', 'nn.Conv2d', (['features', 'out_features'], {'kernel_size': '(1)', 'bias': '(False)'}), '(features, out_features, kernel_size=1, bias=False)\n', (14532, 14583), True, 'import torch.nn as nn\n'), ((14597, 14625), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_features'], {}), '(out_features)\n', (14611, 14625), True, 'import torch.nn as nn\n'), ((544, 577), 'torch.nn.Linear', 'nn.Linear', (['z_dim', '(4 * 4 * 16 * ch)'], {}), '(z_dim, 4 * 4 * 16 * ch)\n', (553, 577), True, 'import torch.nn as nn\n'), ((1044, 1053), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1051, 1053), True, 'import torch.nn as nn\n'), ((1158, 1167), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1165, 1167), True, 'import torch.nn as nn\n'), ((1305, 1327), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (1316, 1327), False, 'import torch\n'), ((1364, 1386), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (1375, 1386), False, 'import torch\n'), ((1423, 1445), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (1434, 1445), False, 'import torch\n'), ((1482, 1504), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (1493, 1504), False, 'import torch\n'), ((2103, 2141), 'torch.randn', 'torch.randn', (['(b, 128)'], {'device': 'z.device'}), '((b, 128), device=z.device)\n', (2114, 2141), False, 'import torch\n'), ((2498, 2523), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (2511, 2523), False, 'import torch\n'), ((2526, 2582), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (2539, 2582), True, 'import torch.nn.functional as F\n'), ((3053, 3078), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (3066, 3078), False, 'import torch\n'), ((3081, 3137), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (3094, 3137), True, 'import torch.nn.functional as F\n'), ((3608, 3633), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (3621, 3633), False, 'import torch\n'), ((3636, 3692), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (3649, 3692), True, 'import torch.nn.functional as F\n'), ((4165, 4190), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (4178, 4190), False, 'import torch\n'), ((4193, 4249), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (4206, 4249), True, 'import torch.nn.functional as F\n'), ((5172, 5205), 'torch.nn.Linear', 'nn.Linear', (['z_dim', '(4 * 4 * 16 * ch)'], {}), '(z_dim, 4 * 4 * 16 * ch)\n', (5181, 5205), True, 'import torch.nn as nn\n'), ((5724, 5733), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5731, 5733), True, 'import torch.nn as nn\n'), ((5838, 5847), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (5845, 5847), True, 'import torch.nn as nn\n'), ((5985, 6007), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (5996, 6007), False, 'import torch\n'), ((6044, 6066), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (6055, 6066), False, 'import torch\n'), ((6103, 6125), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (6114, 6125), False, 'import torch\n'), ((6162, 6184), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (6173, 6184), False, 'import torch\n'), ((6221, 6243), 'torch.zeros', 'torch.zeros', (['(1)', '(184)', '(1)'], {}), '(1, 184, 1)\n', (6232, 6243), False, 'import torch\n'), ((6868, 6906), 'torch.randn', 'torch.randn', (['(b, 128)'], {'device': 'z.device'}), '((b, 128), device=z.device)\n', (6879, 6906), False, 'import torch\n'), ((7429, 7454), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (7442, 7454), False, 'import torch\n'), ((7457, 7513), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (7470, 7513), True, 'import torch.nn.functional as F\n'), ((7983, 8008), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (7996, 8008), False, 'import torch\n'), ((8011, 8067), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (8024, 8067), True, 'import torch.nn.functional as F\n'), ((8538, 8563), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (8551, 8563), False, 'import torch\n'), ((8566, 8622), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (8579, 8622), True, 'import torch.nn.functional as F\n'), ((9095, 9120), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (9108, 9120), False, 'import torch\n'), ((9123, 9179), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (9136, 9179), True, 'import torch.nn.functional as F\n'), ((9652, 9677), 'torch.sigmoid', 'torch.sigmoid', (['seman_bbox'], {}), '(seman_bbox)\n', (9665, 9677), False, 'import torch\n'), ((9680, 9736), 'torch.nn.functional.interpolate', 'F.interpolate', (['bbox_mask_'], {'size': '(hh, ww)', 'mode': '"""nearest"""'}), "(bbox_mask_, size=(hh, ww), mode='nearest')\n", (9693, 9736), True, 'import torch.nn.functional as F\n'), ((11808, 11856), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(x, scale_factor=2, mode='nearest')\n", (11821, 11856), True, 'import torch.nn.functional as F\n'), ((14332, 14341), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14339, 14341), True, 'import torch.nn as nn\n'), ((14355, 14372), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (14367, 14372), True, 'import torch.nn as nn\n'), ((14672, 14681), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14679, 14681), True, 'import torch.nn as nn\n'), ((14929, 14949), 'torch.cat', 'torch.cat', (['priors', '(1)'], {}), '(priors, 1)\n', (14938, 14949), False, 'import torch\n'), ((1878, 1916), 'torch.cat', 'torch.cat', (['(z, label_embedding)'], {'dim': '(1)'}), '((z, label_embedding), dim=1)\n', (1887, 1916), False, 'import torch\n'), ((2724, 2776), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (2737, 2776), True, 'import torch.nn.functional as F\n'), ((3279, 3331), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (3292, 3331), True, 'import torch.nn.functional as F\n'), ((3834, 3886), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (3847, 3886), True, 'import torch.nn.functional as F\n'), ((4391, 4443), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (4404, 4443), True, 'import torch.nn.functional as F\n'), ((4725, 4756), 'torch.nn.init.orthogonal_', 'torch.nn.init.orthogonal_', (['k[1]'], {}), '(k[1])\n', (4750, 4756), False, 'import torch\n'), ((4809, 4841), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['k[1]', '(0)'], {}), '(k[1], 0)\n', (4832, 4841), False, 'import torch\n'), ((6642, 6680), 'torch.cat', 'torch.cat', (['(z, label_embedding)'], {'dim': '(1)'}), '((z, label_embedding), dim=1)\n', (6651, 6680), False, 'import torch\n'), ((6991, 7029), 'torch.cat', 'torch.cat', (['(z, label_embedding)'], {'dim': '(1)'}), '((z, label_embedding), dim=1)\n', (7000, 7029), False, 'import torch\n'), ((7654, 7706), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (7667, 7706), True, 'import torch.nn.functional as F\n'), ((8209, 8261), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (8222, 8261), True, 'import torch.nn.functional as F\n'), ((8764, 8816), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (8777, 8816), True, 'import torch.nn.functional as F\n'), ((9321, 9373), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (9334, 9373), True, 'import torch.nn.functional as F\n'), ((9878, 9930), 'torch.nn.functional.interpolate', 'F.interpolate', (['bmask'], {'size': '(hh, ww)', 'mode': '"""bilinear"""'}), "(bmask, size=(hh, ww), mode='bilinear')\n", (9891, 9930), True, 'import torch.nn.functional as F\n'), ((10191, 10222), 'torch.nn.init.orthogonal_', 'torch.nn.init.orthogonal_', (['k[1]'], {}), '(k[1])\n', (10216, 10222), False, 'import torch\n'), ((10275, 10307), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['k[1]', '(0)'], {}), '(k[1], 0)\n', (10298, 10307), False, 'import torch\n'), ((12097, 12145), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(x, scale_factor=2, mode='nearest')\n", (12110, 12145), True, 'import torch.nn.functional as F\n'), ((11297, 11331), 'torch.nn.Conv2d', 'nn.Conv2d', (['(100)', '(184)'], {'kernel_size': '(1)'}), '(100, 184, kernel_size=1)\n', (11306, 11331), True, 'import torch.nn as nn\n'), ((11398, 11429), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_ch', '(100)', '(3)', '(1)', '(1)'], {}), '(out_ch, 100, 3, 1, 1)\n', (11407, 11429), True, 'import torch.nn as nn\n'), ((11541, 11550), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11548, 11550), True, 'import torch.nn as nn\n'), ((11599, 11638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(100)', '(184)', '(1)', '(1)', '(0)'], {'bias': '(True)'}), '(100, 184, 1, 1, 0, bias=True)\n', (11608, 11638), True, 'import torch.nn as nn\n'), ((13304, 13333), 'torch.linspace', 'torch.linspace', (['(0)', '(1)'], {'steps': 'W'}), '(0, 1, steps=W)\n', (13318, 13333), False, 'import torch\n'), ((13388, 13417), 'torch.linspace', 'torch.linspace', (['(0)', '(1)'], {'steps': 'H'}), '(0, 1, steps=H)\n', (13402, 13417), False, 'import torch\n')]
|
# Separate script, to use python multiprocessing, that utilizes pickle #
import sys
from multiprocessing import Pool, cpu_count
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
from scipy import optimize
from python.util import get_model_params, model_path, model_epoch
from python.model.vae import build_vae
test_data_path, model, bound, output_path = sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4]
n_channels, depth, z_dim, n_hid_first, lam, L = get_model_params(model)
test_data = np.load(test_data_path)
# load trained model
input_var = T.matrix('inputs')
z_var = T.vector()
l_z_mean, l_z_stddev, _, _, _, l_x = build_vae(input_var, n_channels=n_channels, depth=depth, z_dim=z_dim,
n_hid_first=n_hid_first, L=1)
with np.load(model_path(model) + str(model_epoch(model)) + '.npz') as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
nn.layers.set_all_param_values(l_x, param_values)
# create encoder function to find initial values for z
encoder = nn.layers.get_output([l_z_mean, l_z_stddev], deterministic=True)
encode = theano.function([input_var], encoder)
# create decoder function
generated_x = nn.layers.get_output(l_x, {l_z_mean: z_var}, deterministic=True)
gen_fn = theano.function([z_var], generated_x)
# create l2 loss to optimize over latent space
z_mean, z_stddev = encode(test_data)
z_0 = z_mean
def loss(z, voxel):
x = gen_fn(z).reshape(n_channels)
return np.linalg.norm(voxel-x)
if bound == 0:
def minimize_voxel(args):
loss, z_0, voxel = args
optimize_result = optimize.minimize(loss, z_0, voxel)
return loss(optimize_result.x, voxel)
else:
boundaries = ((-bound, bound),)
for _ in range(z_dim-1):
boundaries += ((-bound, bound),)
def minimize_voxel(args):
loss, z_0, voxel = args
optimize_result = optimize.minimize(loss, z_0, voxel, bounds=boundaries)
return loss(optimize_result.x, voxel)
args = [(loss, z_0[i], test_data[i]) for i in range(len(test_data))]
p = Pool(cpu_count())
novelty_score = np.array(p.map(minimize_voxel, args))
np.save(output_path, novelty_score)
|
[
"numpy.load",
"numpy.save",
"scipy.optimize.minimize",
"python.util.model_path",
"theano.function",
"multiprocessing.cpu_count",
"python.util.get_model_params",
"lasagne.layers.get_output",
"numpy.linalg.norm",
"python.model.vae.build_vae",
"theano.tensor.vector",
"python.util.model_epoch",
"lasagne.layers.set_all_param_values",
"theano.tensor.matrix"
] |
[((491, 514), 'python.util.get_model_params', 'get_model_params', (['model'], {}), '(model)\n', (507, 514), False, 'from python.util import get_model_params, model_path, model_epoch\n'), ((527, 550), 'numpy.load', 'np.load', (['test_data_path'], {}), '(test_data_path)\n', (534, 550), True, 'import numpy as np\n'), ((585, 603), 'theano.tensor.matrix', 'T.matrix', (['"""inputs"""'], {}), "('inputs')\n", (593, 603), True, 'import theano.tensor as T\n'), ((612, 622), 'theano.tensor.vector', 'T.vector', ([], {}), '()\n', (620, 622), True, 'import theano.tensor as T\n'), ((660, 763), 'python.model.vae.build_vae', 'build_vae', (['input_var'], {'n_channels': 'n_channels', 'depth': 'depth', 'z_dim': 'z_dim', 'n_hid_first': 'n_hid_first', 'L': '(1)'}), '(input_var, n_channels=n_channels, depth=depth, z_dim=z_dim,\n n_hid_first=n_hid_first, L=1)\n', (669, 763), False, 'from python.model.vae import build_vae\n'), ((939, 988), 'lasagne.layers.set_all_param_values', 'nn.layers.set_all_param_values', (['l_x', 'param_values'], {}), '(l_x, param_values)\n', (969, 988), True, 'import lasagne as nn\n'), ((1055, 1119), 'lasagne.layers.get_output', 'nn.layers.get_output', (['[l_z_mean, l_z_stddev]'], {'deterministic': '(True)'}), '([l_z_mean, l_z_stddev], deterministic=True)\n', (1075, 1119), True, 'import lasagne as nn\n'), ((1129, 1166), 'theano.function', 'theano.function', (['[input_var]', 'encoder'], {}), '([input_var], encoder)\n', (1144, 1166), False, 'import theano\n'), ((1208, 1272), 'lasagne.layers.get_output', 'nn.layers.get_output', (['l_x', '{l_z_mean: z_var}'], {'deterministic': '(True)'}), '(l_x, {l_z_mean: z_var}, deterministic=True)\n', (1228, 1272), True, 'import lasagne as nn\n'), ((1282, 1319), 'theano.function', 'theano.function', (['[z_var]', 'generated_x'], {}), '([z_var], generated_x)\n', (1297, 1319), False, 'import theano\n'), ((2147, 2182), 'numpy.save', 'np.save', (['output_path', 'novelty_score'], {}), '(output_path, novelty_score)\n', (2154, 2182), True, 'import numpy as np\n'), ((1489, 1514), 'numpy.linalg.norm', 'np.linalg.norm', (['(voxel - x)'], {}), '(voxel - x)\n', (1503, 1514), True, 'import numpy as np\n'), ((2080, 2091), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2089, 2091), False, 'from multiprocessing import Pool, cpu_count\n'), ((1617, 1652), 'scipy.optimize.minimize', 'optimize.minimize', (['loss', 'z_0', 'voxel'], {}), '(loss, z_0, voxel)\n', (1634, 1652), False, 'from scipy import optimize\n'), ((1900, 1954), 'scipy.optimize.minimize', 'optimize.minimize', (['loss', 'z_0', 'voxel'], {'bounds': 'boundaries'}), '(loss, z_0, voxel, bounds=boundaries)\n', (1917, 1954), False, 'from scipy import optimize\n'), ((812, 829), 'python.util.model_path', 'model_path', (['model'], {}), '(model)\n', (822, 829), False, 'from python.util import get_model_params, model_path, model_epoch\n'), ((836, 854), 'python.util.model_epoch', 'model_epoch', (['model'], {}), '(model)\n', (847, 854), False, 'from python.util import get_model_params, model_path, model_epoch\n')]
|
"""User management."""
from functools import lru_cache
from typing import Any
from httpx import Client
from .base import Base
from .database import Database
from .errors import EmailError, LoginError
from .forms import Form
from .storage import Storage
class LoginState(Base):
def __init__(self, client: Client, token: str) -> None:
super().__init__(client, token)
self.token = token
@property # type: ignore
@lru_cache()
def database(self) -> Database:
return Database(self.client, self.token) # type: ignore
@property # type: ignore
@lru_cache()
def forms(self) -> Form:
return Form(self.client, self.token) # type: ignore
@property # type: ignore
@lru_cache()
def storage(self) -> Storage:
return Storage(self.client, self.token) # type: ignore
class User(Base):
def __init__(self, client: Client, root_token: str = None) -> None:
super().__init__(client, root_token)
def _user(self, uri: str, email: str, password: str) -> str:
resp: Any = self._request(uri, {"email": email, "password": password})
return resp # type: ignore
def register(
self,
email: str,
password: str,
) -> LoginState:
"""Register new user.
:param email: User’s email address
:param password: <PASSWORD>’<PASSWORD>
:return: User’s authentication token
"""
token = self._user("/register", email, password)
if "invalid email" in token:
raise EmailError()
return LoginState(self.client, token)
def login(
self,
email: str,
password: str,
) -> LoginState:
"""Validate user by email and password to receive their id and session token.
:param email: User’s email address
:param password: <PASSWORD>’<PASSWORD>
:return: User’s authentication token
"""
token = self._user("/login", email, password)
if "no documents in result" in token:
raise LoginError()
return LoginState(self.client, token)
def send_reset_code(
self,
email: str,
) -> str:
"""Send reset code to user's email.
:param email: User’s email address
:return: Reset code.
"""
resp: Any = self._request(
"/password/resetcode", method="get", params={"e": email}
)
if "invalid email" in resp:
raise EmailError()
return resp # type: ignore
def reset_password(
self,
email: str,
code: str,
password: str,
) -> bool:
"""Reset user password.
:param email: User’s email address
:param code: User’s reset code
:param password: <PASSWORD>
:return: Reset or not
"""
resp: Any = self._request(
"/password/reset",
body={"email": email.lower(), "code": code, "password": password},
)
return resp # type: ignore
|
[
"functools.lru_cache"
] |
[((445, 456), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (454, 456), False, 'from functools import lru_cache\n'), ((594, 605), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (603, 605), False, 'from functools import lru_cache\n'), ((732, 743), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (741, 743), False, 'from functools import lru_cache\n')]
|
from . import Constant
from datetime import date
import os.path
import time
class Clock:
def __init__(self):
self.cached_today = date.today()
self.cached_time = int(time.time() * Constant.Clock.MILLIS_IN_SECOND)
def get_timestamped_directory_name(self, dirname):
return "{}-{}".format(dirname, self.cached_time)
def delta(self, timestamp):
return self.cached_time - timestamp
|
[
"datetime.date.today",
"time.time"
] |
[((138, 150), 'datetime.date.today', 'date.today', ([], {}), '()\n', (148, 150), False, 'from datetime import date\n'), ((178, 189), 'time.time', 'time.time', ([], {}), '()\n', (187, 189), False, 'import time\n')]
|
import datetime
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse as url_reverse
from django.template.defaultfilters import slugify
from django.utils.translation import gettext_lazy as _
class NewsItemManager(models.Manager):
def to_export(self):
"""
Filters all items, that haven't been exported yet.
"""
return self.filter(twitter_id__isnull=True)
def exported(self):
return self.filter(twitter_id__isnull=False)
class NewsItem(models.Model):
"""
A basic news item. The title is mostly something that can end up on
services like Twitter. If the body is not empty, the exteral
representation of such an item also includes a link to the post
and a respectively shortened title.
"""
title = models.CharField(verbose_name=_('Title'), max_length=200)
slug = models.SlugField(verbose_name=_('Slug'), blank=True)
body = models.TextField(verbose_name=_('Body'), blank=True, null=True)
pub_date = models.DateTimeField(verbose_name=_('Published at'),
default=datetime.datetime.now)
author = models.ForeignKey(User, verbose_name=_('Author'), null=True,
blank=True)
twitter_id = models.BigIntegerField(verbose_name=_('Twitter ID'),
blank=True, null=True)
objects = NewsItemManager()
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if not self.pk and not self.slug:
self.slug = slugify(self.title[:30])
return super(NewsItem, self).save(*args, **kwargs)
def as_twitter_message(self):
if not self.body:
return self.title
item_url = 'http://%s%s' % (
Site.objects.get_current().domain,
url_reverse('news_shortcut', kwargs={'pk': str(self.pk)})
)
return self.title[:-(len(item_url)+4)] + '... ' + item_url
def get_absolute_url(self):
return url_reverse('news_detail', kwargs=dict(
slug=self.slug, pk=self.pk))
def get_twitter_url(self):
return 'http://twitter.com/%s/status/%d' % (settings.TWITTER_USERNAME,
self.twitter_id,)
class Meta:
ordering = ['-pub_date']
|
[
"django.contrib.sites.models.Site.objects.get_current",
"django.template.defaultfilters.slugify",
"django.utils.translation.gettext_lazy"
] |
[((948, 958), 'django.utils.translation.gettext_lazy', '_', (['"""Title"""'], {}), "('Title')\n", (949, 958), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1017, 1026), 'django.utils.translation.gettext_lazy', '_', (['"""Slug"""'], {}), "('Slug')\n", (1018, 1026), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1081, 1090), 'django.utils.translation.gettext_lazy', '_', (['"""Body"""'], {}), "('Body')\n", (1082, 1090), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1164, 1181), 'django.utils.translation.gettext_lazy', '_', (['"""Published at"""'], {}), "('Published at')\n", (1165, 1181), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1300, 1311), 'django.utils.translation.gettext_lazy', '_', (['"""Author"""'], {}), "('Author')\n", (1301, 1311), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1421, 1436), 'django.utils.translation.gettext_lazy', '_', (['"""Twitter ID"""'], {}), "('Twitter ID')\n", (1422, 1436), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1692, 1716), 'django.template.defaultfilters.slugify', 'slugify', (['self.title[:30]'], {}), '(self.title[:30])\n', (1699, 1716), False, 'from django.template.defaultfilters import slugify\n'), ((1918, 1944), 'django.contrib.sites.models.Site.objects.get_current', 'Site.objects.get_current', ([], {}), '()\n', (1942, 1944), False, 'from django.contrib.sites.models import Site\n')]
|
from datetime import datetime
from enum import Enum
from flask import current_app
from itsdangerous import Signer
DATE_FORMAT = '%Y-%m-%d'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M'
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
def parse_dt(text):
return datetime.strptime(text, DATETIME_FORMAT)
def format_dt(dt):
return dt.strftime(DATETIME_FORMAT)
def range_union(ranges):
"""Take a list of (H, M) tuples and merge any overlapping intervals."""
results = []
# tuples are sorted in increasing order, so we are sure we always have
# the "latest" end time at the back of the list
for start, end in sorted(ranges):
last_end_time = results[-1] if results else None
# if the next start time is earlier than the latest end time, then
# we can merge the intervals
if last_end_time and start <= last_end_time[1]:
results[-1] = (last_end_time[0], max(last_end_time[1], end))
else:
results.append((start, end))
return results
def _get_signature_source_bytes(data, fields=None):
if fields:
data = {k: v for k, v in data.items() if k in fields}
return '-'.join(v for k, v in sorted(data.items())).encode()
def sign_user(user_data, fields=None):
"""Sign user data."""
signer = Signer(current_app.config['SECRET_KEY'], salt='<PASSWORD>')
return dict(
user_data,
signature=signer.get_signature(
_get_signature_source_bytes(user_data, fields)
).decode('ascii'),
)
def check_user_signature(user_data, signature, fields=None):
"""Check that user data matches the signature."""
signer = Signer(current_app.config['SECRET_KEY'], salt='<PASSWORD>')
return signer.verify_signature(
_get_signature_source_bytes(user_data, fields), signature.encode('ascii')
)
|
[
"itsdangerous.Signer",
"datetime.datetime.strptime"
] |
[((322, 362), 'datetime.datetime.strptime', 'datetime.strptime', (['text', 'DATETIME_FORMAT'], {}), '(text, DATETIME_FORMAT)\n', (339, 362), False, 'from datetime import datetime\n'), ((1357, 1416), 'itsdangerous.Signer', 'Signer', (["current_app.config['SECRET_KEY']"], {'salt': '"""<PASSWORD>"""'}), "(current_app.config['SECRET_KEY'], salt='<PASSWORD>')\n", (1363, 1416), False, 'from itsdangerous import Signer\n'), ((1715, 1774), 'itsdangerous.Signer', 'Signer', (["current_app.config['SECRET_KEY']"], {'salt': '"""<PASSWORD>"""'}), "(current_app.config['SECRET_KEY'], salt='<PASSWORD>')\n", (1721, 1774), False, 'from itsdangerous import Signer\n')]
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import Hello, HelloViewSets, UserProfileViewSet, UserLoginApiView, UserProfileFeedViewSet
router = DefaultRouter()
router.register('hello', HelloViewSets, base_name='hello')
router.register('profile', UserProfileViewSet)
router.register('feed', UserProfileFeedViewSet)
urlpatterns = [
path('a/', Hello.as_view() ,name="hello"),
path('', include(router.urls)),
path('login/', UserLoginApiView.as_view(), name="login")
]
|
[
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((201, 216), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (214, 216), False, 'from rest_framework.routers import DefaultRouter\n'), ((448, 468), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (455, 468), False, 'from django.urls import path, include\n')]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from logging import DEBUG
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
from jinja2.nativetypes import NativeEnvironment
from pypsrp.powershell import Command
from pypsrp.serializer import TaggedValue
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.psrp.hooks.psrp import PsrpHook
from airflow.settings import json
# TODO: Replace with airflow.utils.helpers.exactly_one in Airflow 2.3.
def exactly_one(*args):
return len(set(filter(None, args))) == 1
if TYPE_CHECKING:
from airflow.utils.context import Context
class PsrpOperator(BaseOperator):
"""PowerShell Remoting Protocol operator.
Use one of the 'command', 'cmdlet', or 'powershell' arguments.
The 'securestring' template filter can be used to tag a value for
serialization into a `System.Security.SecureString` (applicable only
for DAGs which have `render_template_as_native_obj=True`).
When using the `cmdlet` or `powershell` arguments and when `do_xcom_push`
is enabled, the command output is converted to JSON by PowerShell using
the `ConvertTo-Json
<https://docs.microsoft.com/en-us/powershell/
module/microsoft.powershell.utility/convertto-json>`__ cmdlet such
that the operator return value is serializable to an XCom value.
:param psrp_conn_id: connection id
:param command: command to execute on remote host. (templated)
:param powershell: powershell to execute on remote host. (templated)
:param cmdlet:
cmdlet to execute on remote host (templated). Also used as the default
value for `task_id`.
:param parameters:
When using the `cmdlet` or `powershell` arguments, use this parameter to
provide parameters (templated). Note that a parameter with a value of `None`
becomes an *argument* (i.e., switch).
:param logging_level:
Logging level for message streams which are received during remote execution.
The default is to include all messages in the task log.
:param runspace_options:
optional dictionary which is passed when creating the runspace pool. See
:py:class:`~pypsrp.powershell.RunspacePool` for a description of the
available options.
:param wsman_options:
optional dictionary which is passed when creating the `WSMan` client. See
:py:class:`~pypsrp.wsman.WSMan` for a description of the available options.
:param psrp_session_init:
Optional command which will be added to the pipeline when a new PowerShell
session has been established, prior to invoking the action specified using
the `cmdlet`, `command`, or `powershell` parameters.
"""
template_fields: Sequence[str] = (
"cmdlet",
"command",
"parameters",
"powershell",
)
template_fields_renderers = {"command": "powershell", "powershell": "powershell"}
ui_color = "#c2e2ff"
def __init__(
self,
*,
psrp_conn_id: str,
command: Optional[str] = None,
powershell: Optional[str] = None,
cmdlet: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
logging_level: int = DEBUG,
runspace_options: Optional[Dict[str, Any]] = None,
wsman_options: Optional[Dict[str, Any]] = None,
psrp_session_init: Optional[Command] = None,
**kwargs,
) -> None:
args = {command, powershell, cmdlet}
if not exactly_one(*args):
raise ValueError("Must provide exactly one of 'command', 'powershell', or 'cmdlet'")
if parameters and not cmdlet:
raise ValueError("Parameters only allowed with 'cmdlet'")
if cmdlet:
kwargs.setdefault('task_id', cmdlet)
super().__init__(**kwargs)
self.conn_id = psrp_conn_id
self.command = command
self.powershell = powershell
self.cmdlet = cmdlet
self.parameters = parameters
self.logging_level = logging_level
self.runspace_options = runspace_options
self.wsman_options = wsman_options
self.psrp_session_init = psrp_session_init
def execute(self, context: "Context") -> Optional[List[Any]]:
with PsrpHook(
self.conn_id,
logging_level=self.logging_level,
runspace_options=self.runspace_options,
wsman_options=self.wsman_options,
on_output_callback=self.log.info if not self.do_xcom_push else None,
) as hook, hook.invoke() as ps:
if self.psrp_session_init is not None:
ps.add_command(self.psrp_session_init)
if self.command:
ps.add_script(f"cmd.exe /c @'\n{self.command}\n'@")
else:
if self.cmdlet:
ps.add_cmdlet(self.cmdlet)
else:
ps.add_script(self.powershell)
if self.parameters:
ps.add_parameters(self.parameters)
if self.do_xcom_push:
ps.add_cmdlet("ConvertTo-Json")
if ps.had_errors:
raise AirflowException("Process failed")
rc = ps.runspace_pool.host.rc
if rc:
raise AirflowException(f"Process exited with non-zero status code: {rc}")
if not self.do_xcom_push:
return None
return [json.loads(output) for output in ps.output]
def get_template_env(self):
# Create a template environment overlay in order to leave the underlying
# environment unchanged.
env = super().get_template_env().overlay()
native = isinstance(env, NativeEnvironment)
def securestring(value: str):
if not native:
raise AirflowException(
"Filter 'securestring' not applicable to non-native templating environment"
)
return TaggedValue("SS", value)
env.filters["securestring"] = securestring
return env
|
[
"pypsrp.serializer.TaggedValue",
"airflow.exceptions.AirflowException",
"airflow.settings.json.loads",
"airflow.providers.microsoft.psrp.hooks.psrp.PsrpHook"
] |
[((5067, 5275), 'airflow.providers.microsoft.psrp.hooks.psrp.PsrpHook', 'PsrpHook', (['self.conn_id'], {'logging_level': 'self.logging_level', 'runspace_options': 'self.runspace_options', 'wsman_options': 'self.wsman_options', 'on_output_callback': '(self.log.info if not self.do_xcom_push else None)'}), '(self.conn_id, logging_level=self.logging_level, runspace_options=\n self.runspace_options, wsman_options=self.wsman_options,\n on_output_callback=self.log.info if not self.do_xcom_push else None)\n', (5075, 5275), False, 'from airflow.providers.microsoft.psrp.hooks.psrp import PsrpHook\n'), ((5967, 6001), 'airflow.exceptions.AirflowException', 'AirflowException', (['"""Process failed"""'], {}), "('Process failed')\n", (5983, 6001), False, 'from airflow.exceptions import AirflowException\n'), ((6074, 6141), 'airflow.exceptions.AirflowException', 'AirflowException', (['f"""Process exited with non-zero status code: {rc}"""'], {}), "(f'Process exited with non-zero status code: {rc}')\n", (6090, 6141), False, 'from airflow.exceptions import AirflowException\n'), ((6218, 6236), 'airflow.settings.json.loads', 'json.loads', (['output'], {}), '(output)\n', (6228, 6236), False, 'from airflow.settings import json\n'), ((6751, 6775), 'pypsrp.serializer.TaggedValue', 'TaggedValue', (['"""SS"""', 'value'], {}), "('SS', value)\n", (6762, 6775), False, 'from pypsrp.serializer import TaggedValue\n'), ((6600, 6703), 'airflow.exceptions.AirflowException', 'AirflowException', (['"""Filter \'securestring\' not applicable to non-native templating environment"""'], {}), '(\n "Filter \'securestring\' not applicable to non-native templating environment"\n )\n', (6616, 6703), False, 'from airflow.exceptions import AirflowException\n')]
|
#
# Created by: <NAME>, September 2002
#
from __future__ import division, print_function, absolute_import
import sys
import subprocess
import time
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy.random import rand, seed
from scipy.linalg import _flapack as flapack
from scipy.linalg import inv
from scipy.linalg import svd
from scipy.linalg.lapack import _compute_lwork
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm, a1)
if norm in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm in 'Mm':
ref = np.max(np.abs(a1))
elif norm in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))), m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], (sigmas[0] +
it_len*np.sqrt(np.sum(np.power(m_vec, 2))),)))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.ones(4, dtype) * 3
v = np.ones(4, dtype) * 4
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:,1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read())
class TestSytrd(object):
def test_sytrd(self):
for dtype in REAL_DTYPES:
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
assert_raises(ValueError, sytrd, A)
# Tests for n = 1 currently fail with
# ```
# ValueError: failed to create intent(cache|hide)|optional array--
# must have defined dimensions but got (0,)
# ```
# This is a NumPy issue
# <https://github.com/numpy/numpy/issues/9617>.
# TODO once the issue has been resolved, test for n=1
# some upper triangular array
n = 3
A = np.zeros((n, n), dtype=dtype)
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd(object):
def test_hetrd(self):
for real_dtype, complex_dtype in zip(REAL_DTYPES, COMPLEX_DTYPES):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
assert_raises(ValueError, hetrd, A)
# Tests for n = 1 currently fail with
# ```
# ValueError: failed to create intent(cache|hide)|optional array--
# must have defined dimensions but got (0,)
# ```
# This is a NumPy issue
# <https://github.com/numpy/numpy/issues/9617>.
# TODO once the issue has been resolved, test for n=1
# some upper triangular array
n = 3
A = np.zeros((n, n), dtype=complex_dtype)
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# query lwork
lwork, info = hetrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermetian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = np.linalg.norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
|
[
"numpy.random.seed",
"numpy.triu",
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"numpy.linalg.cond",
"scipy.linalg.svd",
"numpy.linalg.norm",
"scipy.linalg.blas.get_blas_funcs",
"numpy.arange",
"numpy.random.normal",
"numpy.diag",
"numpy.testing.assert_array_almost_equal",
"numpy.zeros_like",
"numpy.testing.assert_almost_equal",
"numpy.power",
"scipy.linalg.inv",
"numpy.finfo",
"pytest.raises",
"numpy.testing.assert_equal",
"numpy.real",
"numpy.triu_indices_from",
"numpy.testing.assert_allclose",
"scipy.linalg.lapack.get_lapack_funcs",
"numpy.tril_indices",
"numpy.conj",
"subprocess.Popen",
"numpy.testing.assert_array_equal",
"scipy.linalg.lapack._compute_lwork",
"time.sleep",
"numpy.testing.assert_",
"numpy.dot",
"numpy.concatenate",
"numpy.outer",
"numpy.iscomplexobj",
"numpy.tril",
"numpy.zeros",
"numpy.random.random",
"numpy.array",
"numpy.random.rand",
"numpy.eye"
] |
[((18500, 18520), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (18514, 18520), True, 'import numpy as np\n'), ((18530, 18554), 'numpy.random.random', 'np.random.random', (['(4, 4)'], {}), '((4, 4))\n', (18546, 18554), True, 'import numpy as np\n'), ((19988, 20193), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, '-c',\n 'import numpy as np; from scipy.linalg import svd; a = np.zeros([9537, 9537], dtype=np.float32); svd(a)'\n ]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "([sys.executable, '-c',\n 'import numpy as np; from scipy.linalg import svd; a = np.zeros([9537, 9537], dtype=np.float32); svd(a)'\n ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n", (20004, 20193), False, 'import subprocess\n'), ((28683, 28693), 'numpy.random.seed', 'seed', (['(1234)'], {}), '(1234)\n', (28687, 28693), False, 'from numpy.random import rand, seed\n'), ((2050, 2076), 'numpy.array', 'np.array', (['[[1, 2], [0, 4]]'], {}), '([[1, 2], [0, 4]])\n', (2058, 2076), True, 'import numpy as np\n'), ((2089, 2115), 'numpy.array', 'np.array', (['[[5, 6], [0, 8]]'], {}), '([[5, 6], [0, 8]])\n', (2097, 2115), True, 'import numpy as np\n'), ((2128, 2157), 'numpy.array', 'np.array', (['[[9, 10], [11, 12]]'], {}), '([[9, 10], [11, 12]])\n', (2136, 2157), True, 'import numpy as np\n'), ((3166, 3228), 'numpy.array', 'np.array', (['[[-149, -50, -154], [537, 180, 546], [-27, -9, -25]]'], {}), '([[-149, -50, -154], [537, 180, 546], [-27, -9, -25]])\n', (3174, 3228), True, 'import numpy as np\n'), ((15239, 15267), 'numpy.array', 'np.array', (['[4.0, 3.0, 2.0, 0]'], {}), '([4.0, 3.0, 2.0, 0])\n', (15247, 15267), True, 'import numpy as np\n'), ((15281, 15314), 'numpy.array', 'np.array', (['[3.12, 5.7, -4.8, -2.2]'], {}), '([3.12, 5.7, -4.8, -2.2])\n', (15289, 15314), True, 'import numpy as np\n'), ((15465, 15553), 'scipy.linalg.svd', 'svd', (['M'], {'full_matrices': '(False)', 'compute_uv': '(False)', 'overwrite_a': '(False)', 'check_finite': '(False)'}), '(M, full_matrices=False, compute_uv=False, overwrite_a=False,\n check_finite=False)\n', (15468, 15553), False, 'from scipy.linalg import svd\n'), ((15745, 15780), 'numpy.concatenate', 'np.concatenate', (['(m_vec[::-1], (0,))'], {}), '((m_vec[::-1], (0,)))\n', (15759, 15780), True, 'import numpy as np\n'), ((15798, 15834), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""lasd4"""', '(sigmas,)'], {}), "('lasd4', (sigmas,))\n", (15814, 15834), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((16404, 16442), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""lartg"""'], {'dtype': 'dtype'}), "('lartg', dtype=dtype)\n", (16420, 16442), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((16456, 16474), 'numpy.array', 'np.array', (['(3)', 'dtype'], {}), '(3, dtype)\n', (16464, 16474), True, 'import numpy as np\n'), ((16487, 16505), 'numpy.array', 'np.array', (['(4)', 'dtype'], {}), '(4, dtype)\n', (16495, 16505), True, 'import numpy as np\n'), ((16518, 16536), 'numpy.iscomplexobj', 'np.iscomplexobj', (['g'], {}), '(g)\n', (16533, 16536), True, 'import numpy as np\n'), ((16600, 16630), 'numpy.testing.assert_allclose', 'assert_allclose', (['cs', '(3.0 / 5.0)'], {}), '(cs, 3.0 / 5.0)\n', (16615, 16630), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((16637, 16660), 'numpy.testing.assert_allclose', 'assert_allclose', (['r', '(5.0)'], {}), '(r, 5.0)\n', (16652, 16660), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((16673, 16691), 'numpy.iscomplexobj', 'np.iscomplexobj', (['g'], {}), '(g)\n', (16688, 16691), True, 'import numpy as np\n'), ((18327, 18342), 'numpy.testing.assert_', 'assert_', (['(a is u)'], {}), '(a is u)\n', (18334, 18342), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((18351, 18366), 'numpy.testing.assert_', 'assert_', (['(b is v)'], {}), '(b is v)\n', (18358, 18366), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((18375, 18418), 'numpy.testing.assert_allclose', 'assert_allclose', (['a', '[5, 5, 5, 5]'], {'atol': 'atol'}), '(a, [5, 5, 5, 5], atol=atol)\n', (18390, 18418), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((18427, 18470), 'numpy.testing.assert_allclose', 'assert_allclose', (['b', '[0, 0, 0, 0]'], {'atol': 'atol'}), '(b, [0, 0, 0, 0], atol=atol)\n', (18442, 18470), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((18588, 18612), 'numpy.random.random', 'np.random.random', (['(4, 4)'], {}), '((4, 4))\n', (18604, 18612), True, 'import numpy as np\n'), ((18854, 18902), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["['larfg', 'larf']"], {'dtype': 'dtype'}), "(['larfg', 'larf'], dtype=dtype)\n", (18870, 18902), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((19174, 19196), 'numpy.zeros_like', 'np.zeros_like', (['a[:, 0]'], {}), '(a[:, 0])\n', (19187, 19196), True, 'import numpy as np\n'), ((19306, 19329), 'numpy.zeros_like', 'np.zeros_like', (['a[1:, 0]'], {}), '(a[1:, 0])\n', (19319, 19329), True, 'import numpy as np\n'), ((19608, 19654), 'numpy.testing.assert_allclose', 'assert_allclose', (['a[:, 0]', 'expected'], {'atol': '(1e-05)'}), '(a[:, 0], expected, atol=1e-05)\n', (19623, 19654), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((19662, 19708), 'numpy.testing.assert_allclose', 'assert_allclose', (['a[0, :]', 'expected'], {'atol': '(1e-05)'}), '(a[0, :], expected, atol=1e-05)\n', (19677, 19708), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((20522, 20537), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (20532, 20537), False, 'import time\n'), ((26548, 26603), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gglse', 'gglse_lwork')"], {'dtype': 'dtype'}), "(('gglse', 'gglse_lwork'), dtype=dtype)\n", (26564, 26603), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((26664, 26705), 'scipy.linalg.lapack._compute_lwork', '_compute_lwork', (['func_lwork'], {'m': '(6)', 'n': '(4)', 'p': '(2)'}), '(func_lwork, m=6, n=4, p=2)\n', (26678, 26705), False, 'from scipy.linalg.lapack import _compute_lwork\n'), ((28022, 28091), 'numpy.array', 'np.array', (['[[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]]'], {'dtype': 'dtype'}), '([[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]], dtype=dtype)\n', (28030, 28091), True, 'import numpy as np\n'), ((28598, 28652), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'expected'], {'decimal': '(4)'}), '(result, expected, decimal=4)\n', (28623, 28652), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((29472, 29492), 'numpy.linalg.norm', 'np.linalg.norm', (['A', '(1)'], {}), '(A, 1)\n', (29486, 29492), True, 'import numpy as np\n'), ((29509, 29538), 'scipy.linalg.lapack._compute_lwork', '_compute_lwork', (['func_lwork', 'n'], {}), '(func_lwork, n)\n', (29523, 29538), False, 'from scipy.linalg.lapack import _compute_lwork\n'), ((1368, 1400), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['ba', 'a'], {}), '(ba, a)\n', (1393, 1400), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((2394, 2429), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('trsyl',)", '(a1,)'], {}), "(('trsyl',), (a1,))\n", (2410, 2429), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((4649, 4708), 'numpy.array', 'np.array', (['[[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype)\n', (4657, 4708), True, 'import numpy as np\n'), ((4778, 4819), 'numpy.array', 'np.array', (['[16.0, 17.0, 20.0]'], {'dtype': 'dtype'}), '([16.0, 17.0, 20.0], dtype=dtype)\n', (4786, 4819), True, 'import numpy as np\n'), ((4858, 4917), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gels', 'gels_lwork', 'geqrf')", '(a1, b1)'], {}), "(('gels', 'gels_lwork', 'geqrf'), (a1, b1))\n", (4874, 4917), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((5133, 5171), 'scipy.linalg.lapack._compute_lwork', '_compute_lwork', (['gels_lwork', 'm', 'n', 'nrhs'], {}), '(gels_lwork, m, n, nrhs)\n', (5147, 5171), False, 'from scipy.linalg.lapack import _compute_lwork\n'), ((5531, 5565), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lqr', 'lqr_truth'], {}), '(lqr, lqr_truth)\n', (5549, 5565), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((5621, 5720), 'numpy.array', 'np.array', (['[[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + 0.7j]]'], {'dtype': 'dtype'}), '([[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + \n 0.7j]], dtype=dtype)\n', (5629, 5720), True, 'import numpy as np\n'), ((5775, 5830), 'numpy.array', 'np.array', (['[16.0, 17.0 + 2.0j, 20.0 - 4.0j]'], {'dtype': 'dtype'}), '([16.0, 17.0 + 2.0j, 20.0 - 4.0j], dtype=dtype)\n', (5783, 5830), True, 'import numpy as np\n'), ((5865, 5924), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gels', 'gels_lwork', 'geqrf')", '(a1, b1)'], {}), "(('gels', 'gels_lwork', 'geqrf'), (a1, b1))\n", (5881, 5924), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((6140, 6178), 'scipy.linalg.lapack._compute_lwork', '_compute_lwork', (['gels_lwork', 'm', 'n', 'nrhs'], {}), '(gels_lwork, m, n, nrhs)\n', (6154, 6178), False, 'from scipy.linalg.lapack import _compute_lwork\n'), ((6548, 6582), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['lqr', 'lqr_truth'], {}), '(lqr, lqr_truth)\n', (6566, 6582), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((6661, 6720), 'numpy.array', 'np.array', (['[[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype)\n', (6669, 6720), True, 'import numpy as np\n'), ((6790, 6831), 'numpy.array', 'np.array', (['[16.0, 17.0, 20.0]'], {'dtype': 'dtype'}), '([16.0, 17.0, 20.0], dtype=dtype)\n', (6798, 6831), True, 'import numpy as np\n'), ((6865, 6917), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelsd', 'gelsd_lwork')", '(a1, b1)'], {}), "(('gelsd', 'gelsd_lwork'), (a1, b1))\n", (6881, 6917), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((7844, 7943), 'numpy.array', 'np.array', (['[[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + 0.7j]]'], {'dtype': 'dtype'}), '([[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + \n 0.7j]], dtype=dtype)\n', (7852, 7943), True, 'import numpy as np\n'), ((7998, 8053), 'numpy.array', 'np.array', (['[16.0, 17.0 + 2.0j, 20.0 - 4.0j]'], {'dtype': 'dtype'}), '([16.0, 17.0 + 2.0j, 20.0 - 4.0j], dtype=dtype)\n', (8006, 8053), True, 'import numpy as np\n'), ((8083, 8135), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelsd', 'gelsd_lwork')", '(a1, b1)'], {}), "(('gelsd', 'gelsd_lwork'), (a1, b1))\n", (8099, 8135), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((9193, 9252), 'numpy.array', 'np.array', (['[[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype)\n', (9201, 9252), True, 'import numpy as np\n'), ((9322, 9363), 'numpy.array', 'np.array', (['[16.0, 17.0, 20.0]'], {'dtype': 'dtype'}), '([16.0, 17.0, 20.0], dtype=dtype)\n', (9330, 9363), True, 'import numpy as np\n'), ((9397, 9449), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelss', 'gelss_lwork')", '(a1, b1)'], {}), "(('gelss', 'gelss_lwork'), (a1, b1))\n", (9413, 9449), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((10298, 10397), 'numpy.array', 'np.array', (['[[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + 0.7j]]'], {'dtype': 'dtype'}), '([[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + \n 0.7j]], dtype=dtype)\n', (10306, 10397), True, 'import numpy as np\n'), ((10452, 10507), 'numpy.array', 'np.array', (['[16.0, 17.0 + 2.0j, 20.0 - 4.0j]'], {'dtype': 'dtype'}), '([16.0, 17.0 + 2.0j, 20.0 - 4.0j], dtype=dtype)\n', (10460, 10507), True, 'import numpy as np\n'), ((10537, 10589), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelss', 'gelss_lwork')", '(a1, b1)'], {}), "(('gelss', 'gelss_lwork'), (a1, b1))\n", (10553, 10589), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((11555, 11614), 'numpy.array', 'np.array', (['[[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0], [4.0, 5.0], [7.0, 8.0]], dtype=dtype)\n', (11563, 11614), True, 'import numpy as np\n'), ((11684, 11725), 'numpy.array', 'np.array', (['[16.0, 17.0, 20.0]'], {'dtype': 'dtype'}), '([16.0, 17.0, 20.0], dtype=dtype)\n', (11692, 11725), True, 'import numpy as np\n'), ((11759, 11811), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelsy', 'gelss_lwork')", '(a1, b1)'], {}), "(('gelsy', 'gelss_lwork'), (a1, b1))\n", (11775, 11811), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((12168, 12210), 'numpy.zeros', 'np.zeros', (['(a1.shape[1], 1)'], {'dtype': 'np.int32'}), '((a1.shape[1], 1), dtype=np.int32)\n', (12176, 12210), True, 'import numpy as np\n'), ((12606, 12705), 'numpy.array', 'np.array', (['[[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + 0.7j]]'], {'dtype': 'dtype'}), '([[1.0 + 4.0j, 2.0], [4.0 + 0.5j, 5.0 - 3.0j], [7.0 - 2.0j, 8.0 + \n 0.7j]], dtype=dtype)\n', (12614, 12705), True, 'import numpy as np\n'), ((12760, 12815), 'numpy.array', 'np.array', (['[16.0, 17.0 + 2.0j, 20.0 - 4.0j]'], {'dtype': 'dtype'}), '([16.0, 17.0 + 2.0j, 20.0 - 4.0j], dtype=dtype)\n', (12768, 12815), True, 'import numpy as np\n'), ((12845, 12897), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('gelsy', 'gelss_lwork')", '(a1, b1)'], {}), "(('gelsy', 'gelss_lwork'), (a1, b1))\n", (12861, 12897), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((13254, 13296), 'numpy.zeros', 'np.zeros', (['(a1.shape[1], 1)'], {'dtype': 'np.int32'}), '((a1.shape[1], 1), dtype=np.int32)\n', (13262, 13296), True, 'import numpy as np\n'), ((13892, 13923), 'numpy.zeros', 'np.zeros', (['(300, 2)'], {'dtype': 'dtype'}), '((300, 2), dtype=dtype)\n', (13900, 13923), True, 'import numpy as np\n'), ((13946, 13978), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["['gerqf']", '[a]'], {}), "(['gerqf'], [a])\n", (13962, 13978), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((13991, 14034), 'pytest.raises', 'assert_raises', (['Exception', 'gerqf', 'a'], {'lwork': '(2)'}), '(Exception, gerqf, a, lwork=2)\n', (14004, 14034), True, 'from pytest import raises as assert_raises\n'), ((15973, 16110), 'numpy.testing.assert_', 'assert_', (['(res[3] <= 0)', "('LAPACK root finding dlasd4 failed to find the singular value %i'\n % i)"], {}), "(res[3] <= 0, \n 'LAPACK root finding dlasd4 failed to find the singular value %i'\n % i)\n", (15980, 16110), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((16121, 16136), 'numpy.array', 'np.array', (['roots'], {}), '(roots)\n', (16129, 16136), True, 'import numpy as np\n'), ((16705, 16737), 'numpy.testing.assert_allclose', 'assert_allclose', (['sn', '(-4.0j / 5.0)'], {}), '(sn, -4.0j / 5.0)\n', (16720, 16737), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((16841, 16871), 'numpy.testing.assert_allclose', 'assert_allclose', (['sn', '(4.0 / 5.0)'], {}), '(sn, 4.0 / 5.0)\n', (16856, 16871), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((17017, 17034), 'numpy.ones', 'np.ones', (['(4)', 'dtype'], {}), '(4, dtype)\n', (17024, 17034), True, 'import numpy as np\n'), ((17051, 17068), 'numpy.ones', 'np.ones', (['(4)', 'dtype'], {}), '(4, dtype)\n', (17058, 17068), True, 'import numpy as np\n'), ((17168, 17202), 'scipy.linalg.blas.get_blas_funcs', 'get_blas_funcs', (['"""rot"""'], {'dtype': 'dtype'}), "('rot', dtype=dtype)\n", (17182, 17202), False, 'from scipy.linalg.blas import get_blas_funcs\n'), ((17253, 17289), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""rot"""'], {'dtype': 'dtype'}), "('rot', dtype=dtype)\n", (17269, 17289), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((18618, 18642), 'numpy.random.random', 'np.random.random', (['(4, 4)'], {}), '((4, 4))\n', (18634, 18642), True, 'import numpy as np\n'), ((19462, 19482), 'numpy.zeros', 'np.zeros', (['a.shape[1]'], {}), '(a.shape[1])\n', (19470, 19482), True, 'import numpy as np\n'), ((19567, 19587), 'numpy.zeros', 'np.zeros', (['a.shape[0]'], {}), '(a.shape[0])\n', (19575, 19587), True, 'import numpy as np\n'), ((21096, 21125), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {'dtype': 'dtype'}), '((0, 0), dtype=dtype)\n', (21104, 21125), True, 'import numpy as np\n'), ((21177, 21225), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('sytrd', 'sytrd_lwork')", '(A,)'], {}), "(('sytrd', 'sytrd_lwork'), (A,))\n", (21193, 21225), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((21238, 21273), 'pytest.raises', 'assert_raises', (['ValueError', 'sytrd', 'A'], {}), '(ValueError, sytrd, A)\n', (21251, 21273), True, 'from pytest import raises as assert_raises\n'), ((21735, 21764), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'dtype'}), '((n, n), dtype=dtype)\n', (21743, 21764), True, 'import numpy as np\n'), ((21824, 21871), 'numpy.arange', 'np.arange', (['(1)', '(n * (n + 1) // 2 + 1)'], {'dtype': 'dtype'}), '(1, n * (n + 1) // 2 + 1, dtype=dtype)\n', (21833, 21871), True, 'import numpy as np\n'), ((21944, 21965), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (21956, 21965), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22154, 22175), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (22166, 22175), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22307, 22330), 'numpy.testing.assert_allclose', 'assert_allclose', (['e', '(0.0)'], {}), '(e, 0.0)\n', (22322, 22330), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22343, 22368), 'numpy.testing.assert_allclose', 'assert_allclose', (['tau', '(0.0)'], {}), '(tau, 0.0)\n', (22358, 22368), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22507, 22528), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (22519, 22528), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((22634, 22663), 'numpy.zeros_like', 'np.zeros_like', (['A'], {'dtype': 'dtype'}), '(A, dtype=dtype)\n', (22647, 22663), True, 'import numpy as np\n'), ((22680, 22701), 'numpy.arange', 'np.arange', (['A.shape[0]'], {}), '(A.shape[0])\n', (22689, 22701), True, 'import numpy as np\n'), ((22743, 22768), 'numpy.arange', 'np.arange', (['(A.shape[0] - 1)'], {}), '(A.shape[0] - 1)\n', (22752, 22768), True, 'import numpy as np\n'), ((22862, 22887), 'numpy.eye', 'np.eye', (['n', 'n'], {'dtype': 'dtype'}), '(n, n, dtype=dtype)\n', (22868, 22887), True, 'import numpy as np\n'), ((23201, 23223), 'numpy.tril_indices', 'np.tril_indices', (['n', '(-1)'], {}), '(n, -1)\n', (23216, 23223), True, 'import numpy as np\n'), ((23682, 23719), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {'dtype': 'complex_dtype'}), '((0, 0), dtype=complex_dtype)\n', (23690, 23719), True, 'import numpy as np\n'), ((23771, 23819), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('hetrd', 'hetrd_lwork')", '(A,)'], {}), "(('hetrd', 'hetrd_lwork'), (A,))\n", (23787, 23819), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((23832, 23867), 'pytest.raises', 'assert_raises', (['ValueError', 'hetrd', 'A'], {}), '(ValueError, hetrd, A)\n', (23845, 23867), True, 'from pytest import raises as assert_raises\n'), ((24329, 24366), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'complex_dtype'}), '((n, n), dtype=complex_dtype)\n', (24337, 24366), True, 'import numpy as np\n'), ((24690, 24711), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (24702, 24711), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((24900, 24921), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (24912, 24921), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((25068, 25091), 'numpy.testing.assert_allclose', 'assert_allclose', (['e', '(0.0)'], {}), '(e, 0.0)\n', (25083, 25091), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((25104, 25129), 'numpy.testing.assert_allclose', 'assert_allclose', (['tau', '(0.0)'], {}), '(tau, 0.0)\n', (25119, 25129), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((25268, 25289), 'numpy.testing.assert_equal', 'assert_equal', (['info', '(0)'], {}), '(info, 0)\n', (25280, 25289), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((25395, 25429), 'numpy.zeros_like', 'np.zeros_like', (['A'], {'dtype': 'real_dtype'}), '(A, dtype=real_dtype)\n', (25408, 25429), True, 'import numpy as np\n'), ((25446, 25478), 'numpy.arange', 'np.arange', (['A.shape[0]'], {'dtype': 'int'}), '(A.shape[0], dtype=int)\n', (25455, 25478), True, 'import numpy as np\n'), ((25520, 25556), 'numpy.arange', 'np.arange', (['(A.shape[0] - 1)'], {'dtype': 'int'}), '(A.shape[0] - 1, dtype=int)\n', (25529, 25556), True, 'import numpy as np\n'), ((25650, 25683), 'numpy.eye', 'np.eye', (['n', 'n'], {'dtype': 'complex_dtype'}), '(n, n, dtype=complex_dtype)\n', (25656, 25683), True, 'import numpy as np\n'), ((26044, 26066), 'numpy.tril_indices', 'np.tril_indices', (['n', '(-1)'], {}), '(n, -1)\n', (26059, 26066), True, 'import numpy as np\n'), ((26092, 26113), 'numpy.conj', 'np.conj', (['A.T[i_lower]'], {}), '(A.T[i_lower])\n', (26099, 26113), True, 'import numpy as np\n'), ((26767, 26963), 'numpy.array', 'np.array', (['[[-0.57, -1.28, -0.39, 0.25], [-1.93, 1.08, -0.31, -2.14], [2.3, 0.24, 0.4,\n -0.35], [-1.93, 0.64, -0.66, 0.08], [0.15, 0.3, 0.15, -2.13], [-0.02, \n 1.03, -1.43, 0.5]]'], {'dtype': 'dtype'}), '([[-0.57, -1.28, -0.39, 0.25], [-1.93, 1.08, -0.31, -2.14], [2.3, \n 0.24, 0.4, -0.35], [-1.93, 0.64, -0.66, 0.08], [0.15, 0.3, 0.15, -2.13],\n [-0.02, 1.03, -1.43, 0.5]], dtype=dtype)\n', (26775, 26963), True, 'import numpy as np\n'), ((27105, 27167), 'numpy.array', 'np.array', (['[-1.5, -2.14, 1.23, -0.54, -1.68, 0.82]'], {'dtype': 'dtype'}), '([-1.5, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)\n', (27113, 27167), True, 'import numpy as np\n'), ((27185, 27218), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'dtype'}), '([0.0, 0.0], dtype=dtype)\n', (27193, 27218), True, 'import numpy as np\n'), ((27272, 27658), 'numpy.array', 'np.array', (['[[0.96 - 0.81j, -0.03 + 0.96j, -0.91 + 2.06j, -0.05 + 0.41j], [-0.98 + \n 1.98j, -1.2 + 0.19j, -0.66 + 0.42j, -0.81 + 0.56j], [0.62 - 0.46j, 1.01 +\n 0.02j, 0.63 - 0.17j, -1.11 + 0.6j], [0.37 + 0.38j, 0.19 - 0.54j, -0.98 -\n 0.36j, 0.22 - 0.2j], [0.83 + 0.51j, 0.2 + 0.01j, -0.17 - 0.46j, 1.47 + \n 1.59j], [1.08 - 0.28j, 0.2 - 0.12j, -0.07 + 1.23j, 0.26 + 0.26j]]'], {}), '([[0.96 - 0.81j, -0.03 + 0.96j, -0.91 + 2.06j, -0.05 + 0.41j], [-\n 0.98 + 1.98j, -1.2 + 0.19j, -0.66 + 0.42j, -0.81 + 0.56j], [0.62 - \n 0.46j, 1.01 + 0.02j, 0.63 - 0.17j, -1.11 + 0.6j], [0.37 + 0.38j, 0.19 -\n 0.54j, -0.98 - 0.36j, 0.22 - 0.2j], [0.83 + 0.51j, 0.2 + 0.01j, -0.17 -\n 0.46j, 1.47 + 1.59j], [1.08 - 0.28j, 0.2 - 0.12j, -0.07 + 1.23j, 0.26 +\n 0.26j]])\n', (27280, 27658), True, 'import numpy as np\n'), ((27740, 27852), 'numpy.array', 'np.array', (['[[-2.54 + 0.09j], [1.65 - 2.26j], [-2.11 - 3.96j], [1.82 + 3.3j], [-6.41 + \n 3.77j], [2.07 + 0.66j]]'], {}), '([[-2.54 + 0.09j], [1.65 - 2.26j], [-2.11 - 3.96j], [1.82 + 3.3j],\n [-6.41 + 3.77j], [2.07 + 0.66j]])\n', (27748, 27852), True, 'import numpy as np\n'), ((27984, 28008), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'dtype'}), '(2, dtype=dtype)\n', (27992, 28008), True, 'import numpy as np\n'), ((28187, 28245), 'numpy.array', 'np.array', (['[0.48904455, 0.99754786, 0.48904455, 0.99754786]'], {}), '([0.48904455, 0.99754786, 0.48904455, 0.99754786])\n', (28195, 28245), True, 'import numpy as np\n'), ((28382, 28502), 'numpy.array', 'np.array', (['[1.08742917 - 1.96205783j, -0.74093902 + 3.72973919j, 1.08742917 - \n 1.96205759j, -0.74093896 + 3.72973895j]'], {}), '([1.08742917 - 1.96205783j, -0.74093902 + 3.72973919j, 1.08742917 -\n 1.96205759j, -0.74093896 + 3.72973895j])\n', (28390, 28502), True, 'import numpy as np\n'), ((28904, 28948), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""sytrf_lwork"""'], {'dtype': 'dtype'}), "('sytrf_lwork', dtype=dtype)\n", (28920, 28948), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((28979, 29028), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('sycon', 'sytrf')"], {'dtype': 'dtype'}), "(('sycon', 'sytrf'), dtype=dtype)\n", (28995, 29028), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((29136, 29180), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (['"""hetrf_lwork"""'], {'dtype': 'dtype'}), "('hetrf_lwork', dtype=dtype)\n", (29152, 29180), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((29211, 29260), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('hecon', 'hetrf')"], {'dtype': 'dtype'}), "(('hecon', 'hetrf'), dtype=dtype)\n", (29227, 29260), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((3508, 3543), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('lange',)", '(a1,)'], {}), "(('lange',), (a1,))\n", (3524, 3543), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((5262, 5326), 'numpy.array', 'np.array', (['[-14.333333333333323, 14.999999999999991]'], {'dtype': 'dtype'}), '([-14.333333333333323, 14.999999999999991], dtype=dtype)\n', (5270, 5326), True, 'import numpy as np\n'), ((6297, 6405), 'numpy.array', 'np.array', (['[1.161753632288328 - 1.901075709391912j, 1.735882340522193 + 1.521240901196909j\n ]'], {'dtype': 'dtype'}), '([1.161753632288328 - 1.901075709391912j, 1.735882340522193 + \n 1.521240901196909j], dtype=dtype)\n', (6305, 6405), True, 'import numpy as np\n'), ((7226, 7239), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (7233, 7239), True, 'import numpy as np\n'), ((7428, 7492), 'numpy.array', 'np.array', (['[-14.333333333333323, 14.999999999999991]'], {'dtype': 'dtype'}), '([-14.333333333333323, 14.999999999999991], dtype=dtype)\n', (7436, 7492), True, 'import numpy as np\n'), ((7627, 7689), 'numpy.array', 'np.array', (['[12.596017180511966, 0.583396253199685]'], {'dtype': 'dtype'}), '([12.596017180511966, 0.583396253199685], dtype=dtype)\n', (7635, 7689), True, 'import numpy as np\n'), ((8451, 8464), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (8458, 8464), True, 'import numpy as np\n'), ((8729, 8837), 'numpy.array', 'np.array', (['[1.161753632288328 - 1.901075709391912j, 1.735882340522193 + 1.521240901196909j\n ]'], {'dtype': 'dtype'}), '([1.161753632288328 - 1.901075709391912j, 1.735882340522193 + \n 1.521240901196909j], dtype=dtype)\n', (8737, 8837), True, 'import numpy as np\n'), ((8984, 9046), 'numpy.array', 'np.array', (['[13.035514762572044, 4.337666985231382]'], {'dtype': 'dtype'}), '([13.035514762572044, 4.337666985231382], dtype=dtype)\n', (8992, 9046), True, 'import numpy as np\n'), ((9751, 9764), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (9758, 9764), True, 'import numpy as np\n'), ((9882, 9946), 'numpy.array', 'np.array', (['[-14.333333333333323, 14.999999999999991]'], {'dtype': 'dtype'}), '([-14.333333333333323, 14.999999999999991], dtype=dtype)\n', (9890, 9946), True, 'import numpy as np\n'), ((10081, 10143), 'numpy.array', 'np.array', (['[12.596017180511966, 0.583396253199685]'], {'dtype': 'dtype'}), '([12.596017180511966, 0.583396253199685], dtype=dtype)\n', (10089, 10143), True, 'import numpy as np\n'), ((10891, 10904), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (10898, 10904), True, 'import numpy as np\n'), ((11050, 11158), 'numpy.array', 'np.array', (['[1.161753632288328 - 1.901075709391912j, 1.735882340522193 + 1.521240901196909j\n ]'], {'dtype': 'dtype'}), '([1.161753632288328 - 1.901075709391912j, 1.735882340522193 + \n 1.521240901196909j], dtype=dtype)\n', (11058, 11158), True, 'import numpy as np\n'), ((11314, 11376), 'numpy.array', 'np.array', (['[13.035514762572044, 4.337666985231382]'], {'dtype': 'dtype'}), '([13.035514762572044, 4.337666985231382], dtype=dtype)\n', (11322, 11376), True, 'import numpy as np\n'), ((12133, 12146), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (12140, 12146), True, 'import numpy as np\n'), ((12383, 12447), 'numpy.array', 'np.array', (['[-14.333333333333323, 14.999999999999991]'], {'dtype': 'dtype'}), '([-14.333333333333323, 14.999999999999991], dtype=dtype)\n', (12391, 12447), True, 'import numpy as np\n'), ((13219, 13232), 'numpy.real', 'np.real', (['work'], {}), '(work)\n', (13226, 13232), True, 'import numpy as np\n'), ((13497, 13605), 'numpy.array', 'np.array', (['[1.161753632288328 - 1.901075709391912j, 1.735882340522193 + 1.521240901196909j\n ]'], {'dtype': 'dtype'}), '([1.161753632288328 - 1.901075709391912j, 1.735882340522193 + \n 1.521240901196909j], dtype=dtype)\n', (13505, 13605), True, 'import numpy as np\n'), ((14141, 14173), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["['orgrq']", '[a]'], {}), "(['orgrq'], [a])\n", (14157, 14173), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((14190, 14244), 'pytest.raises', 'assert_raises', (['Exception', 'orgrq', 'rq[-2:]', 'tau'], {'lwork': '(1)'}), '(Exception, orgrq, rq[-2:], tau, lwork=1)\n', (14203, 14244), True, 'from pytest import raises as assert_raises\n'), ((14697, 14715), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (14711, 14715), True, 'import numpy as np\n'), ((14736, 14765), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (14752, 14765), True, 'import numpy as np\n'), ((14831, 14873), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["('potrf', 'potri')", '(a,)'], {}), "(('potrf', 'potri'), (a,))\n", (14847, 14873), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((21779, 21802), 'numpy.triu_indices_from', 'np.triu_indices_from', (['A'], {}), '(A)\n', (21799, 21802), True, 'import numpy as np\n'), ((22283, 22293), 'numpy.diag', 'np.diag', (['A'], {}), '(A)\n', (22290, 22293), True, 'import numpy as np\n'), ((22941, 22965), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (22949, 22965), True, 'import numpy as np\n'), ((23123, 23135), 'numpy.dot', 'np.dot', (['H', 'Q'], {}), '(H, Q)\n', (23129, 23135), True, 'import numpy as np\n'), ((23294, 23306), 'numpy.dot', 'np.dot', (['A', 'Q'], {}), '(A, Q)\n', (23300, 23306), True, 'import numpy as np\n'), ((24381, 24404), 'numpy.triu_indices_from', 'np.triu_indices_from', (['A'], {}), '(A)\n', (24401, 24404), True, 'import numpy as np\n'), ((24426, 24478), 'numpy.arange', 'np.arange', (['(1)', '(n * (n + 1) // 2 + 1)'], {'dtype': 'real_dtype'}), '(1, n * (n + 1) // 2 + 1, dtype=real_dtype)\n', (24435, 24478), True, 'import numpy as np\n'), ((25737, 25769), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'complex_dtype'}), '(n, dtype=complex_dtype)\n', (25745, 25769), True, 'import numpy as np\n'), ((25966, 25978), 'numpy.dot', 'np.dot', (['H', 'Q'], {}), '(H, Q)\n', (25972, 25978), True, 'import numpy as np\n'), ((26141, 26153), 'numpy.conj', 'np.conj', (['Q.T'], {}), '(Q.T)\n', (26148, 26153), True, 'import numpy as np\n'), ((26155, 26167), 'numpy.dot', 'np.dot', (['A', 'Q'], {}), '(A, Q)\n', (26161, 26167), True, 'import numpy as np\n'), ((29432, 29454), 'numpy.eye', 'np.eye', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (29438, 29454), True, 'import numpy as np\n'), ((2624, 2637), 'numpy.dot', 'np.dot', (['a1', 'x'], {}), '(a1, x)\n', (2630, 2637), True, 'import numpy as np\n'), ((2640, 2653), 'numpy.dot', 'np.dot', (['x', 'b1'], {}), '(x, b1)\n', (2646, 2653), True, 'import numpy as np\n'), ((3035, 3048), 'numpy.dot', 'np.dot', (['a1', 'x'], {}), '(a1, x)\n', (3041, 3048), True, 'import numpy as np\n'), ((3051, 3064), 'numpy.dot', 'np.dot', (['x', 'b1'], {}), '(x, b1)\n', (3057, 3064), True, 'import numpy as np\n'), ((3841, 3881), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['value', 'ref', 'decimal'], {}), '(value, ref, decimal)\n', (3860, 3881), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((4220, 4244), 'numpy.testing.assert_equal', 'assert_equal', (['value', 'ref'], {}), '(value, ref)\n', (4232, 4244), False, 'from numpy.testing import assert_equal, assert_array_almost_equal, assert_, assert_allclose, assert_almost_equal, assert_array_equal\n'), ((12265, 12280), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (12273, 12280), True, 'import numpy as np\n'), ((13351, 13366), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (13359, 13366), True, 'import numpy as np\n'), ((14357, 14389), 'scipy.linalg.lapack.get_lapack_funcs', 'get_lapack_funcs', (["['ungrq']", '[a]'], {}), "(['ungrq'], [a])\n", (14373, 14389), False, 'from scipy.linalg.lapack import get_lapack_funcs\n'), ((14406, 14460), 'pytest.raises', 'assert_raises', (['Exception', 'ungrq', 'rq[-2:]', 'tau'], {'lwork': '(1)'}), '(Exception, ungrq, rq[-2:], tau, lwork=1)\n', (14419, 14460), True, 'from pytest import raises as assert_raises\n'), ((23051, 23076), 'numpy.eye', 'np.eye', (['n', 'n'], {'dtype': 'dtype'}), '(n, n, dtype=dtype)\n', (23057, 23076), True, 'import numpy as np\n'), ((24494, 24546), 'numpy.arange', 'np.arange', (['(1)', '(n * (n + 1) // 2 + 1)'], {'dtype': 'real_dtype'}), '(1, n * (n + 1) // 2 + 1, dtype=real_dtype)\n', (24503, 24546), True, 'import numpy as np\n'), ((24597, 24607), 'numpy.diag', 'np.diag', (['A'], {}), '(A)\n', (24604, 24607), True, 'import numpy as np\n'), ((25043, 25053), 'numpy.diag', 'np.diag', (['A'], {}), '(A)\n', (25050, 25053), True, 'import numpy as np\n'), ((25855, 25888), 'numpy.eye', 'np.eye', (['n', 'n'], {'dtype': 'complex_dtype'}), '(n, n, dtype=complex_dtype)\n', (25861, 25888), True, 'import numpy as np\n'), ((29046, 29056), 'numpy.random.rand', 'rand', (['n', 'n'], {}), '(n, n)\n', (29050, 29056), False, 'from numpy.random import rand, seed\n'), ((12088, 12103), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (12096, 12103), True, 'import numpy as np\n'), ((13174, 13189), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (13182, 13189), True, 'import numpy as np\n'), ((15037, 15049), 'numpy.tril', 'np.tril', (['dpt'], {}), '(dpt)\n', (15044, 15049), True, 'import numpy as np\n'), ((15126, 15138), 'numpy.triu', 'np.triu', (['dpt'], {}), '(dpt)\n', (15133, 15138), True, 'import numpy as np\n'), ((15350, 15371), 'numpy.diag', 'np.diag', (['sigmas[0:-1]'], {}), '(sigmas[0:-1])\n', (15357, 15371), True, 'import numpy as np\n'), ((16172, 16187), 'numpy.isnan', 'np.isnan', (['roots'], {}), '(roots)\n', (16180, 16187), True, 'import numpy as np\n'), ((16258, 16278), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (16266, 16278), True, 'import numpy as np\n'), ((16317, 16337), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (16325, 16337), True, 'import numpy as np\n'), ((17094, 17109), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (17102, 17109), True, 'import numpy as np\n'), ((23088, 23102), 'numpy.outer', 'np.outer', (['v', 'v'], {}), '(v, v)\n', (23096, 23102), True, 'import numpy as np\n'), ((29278, 29288), 'numpy.random.rand', 'rand', (['n', 'n'], {}), '(n, n)\n', (29282, 29288), False, 'from numpy.random import rand, seed\n'), ((3978, 3988), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (3984, 3988), True, 'import numpy as np\n'), ((5455, 5470), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (5463, 5470), True, 'import numpy as np\n'), ((6472, 6487), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (6480, 6487), True, 'import numpy as np\n'), ((7575, 7590), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (7583, 7590), True, 'import numpy as np\n'), ((7768, 7783), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (7776, 7783), True, 'import numpy as np\n'), ((8904, 8919), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (8912, 8919), True, 'import numpy as np\n'), ((9093, 9108), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (9101, 9108), True, 'import numpy as np\n'), ((10029, 10044), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (10037, 10044), True, 'import numpy as np\n'), ((10222, 10237), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (10230, 10237), True, 'import numpy as np\n'), ((11262, 11277), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (11270, 11277), True, 'import numpy as np\n'), ((11455, 11470), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (11463, 11470), True, 'import numpy as np\n'), ((12530, 12545), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (12538, 12545), True, 'import numpy as np\n'), ((13709, 13724), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (13717, 13724), True, 'import numpy as np\n'), ((15059, 15065), 'scipy.linalg.inv', 'inv', (['a'], {}), '(a)\n', (15062, 15065), False, 'from scipy.linalg import inv\n'), ((15148, 15154), 'scipy.linalg.inv', 'inv', (['a'], {}), '(a)\n', (15151, 15154), False, 'from scipy.linalg import inv\n'), ((22221, 22236), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (22229, 22236), True, 'import numpy as np\n'), ((23452, 23467), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (23460, 23467), True, 'import numpy as np\n'), ((24967, 24987), 'numpy.finfo', 'np.finfo', (['real_dtype'], {}), '(real_dtype)\n', (24975, 24987), True, 'import numpy as np\n'), ((25934, 25944), 'numpy.conj', 'np.conj', (['v'], {}), '(v)\n', (25941, 25944), True, 'import numpy as np\n'), ((26331, 26351), 'numpy.finfo', 'np.finfo', (['real_dtype'], {}), '(real_dtype)\n', (26339, 26351), True, 'import numpy as np\n'), ((29291, 29301), 'numpy.random.rand', 'rand', (['n', 'n'], {}), '(n, n)\n', (29295, 29301), False, 'from numpy.random import rand, seed\n'), ((29729, 29751), 'numpy.linalg.cond', 'np.linalg.cond', (['A'], {'p': '(1)'}), '(A, p=1)\n', (29743, 29751), True, 'import numpy as np\n'), ((3807, 3817), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (3813, 3817), True, 'import numpy as np\n'), ((4074, 4084), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (4080, 4084), True, 'import numpy as np\n'), ((15706, 15724), 'numpy.power', 'np.power', (['m_vec', '(2)'], {}), '(m_vec, 2)\n', (15714, 15724), True, 'import numpy as np\n'), ((4178, 4188), 'numpy.abs', 'np.abs', (['a1'], {}), '(a1)\n', (4184, 4188), True, 'import numpy as np\n')]
|
import pathlib
import subprocess as sp
import tempfile
def blacking(source_code: str):
with tempfile.NamedTemporaryFile("w", delete=False) as f:
f.write(source_code)
fname = f.name
p = sp.Popen(f"cat {fname}".split(), stdout=sp.PIPE)
out = sp.check_output("black -q -".split(), stdin=p.stdout)
p.wait()
try:
pathlib.Path(fname).unlink()
except FileNotFoundError:
pass
return out.decode()
def isorting(source_code: str):
with tempfile.NamedTemporaryFile("w", delete=False) as f:
f.write(source_code)
fname = f.name
p = sp.Popen(f"cat {fname}".split(), stdout=sp.PIPE)
out = sp.check_output("isort -q -".split(), stdin=p.stdout)
p.wait()
try:
pathlib.Path(fname).unlink()
except FileNotFoundError:
pass
return out.decode()
|
[
"tempfile.NamedTemporaryFile",
"pathlib.Path"
] |
[((99, 145), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {'delete': '(False)'}), "('w', delete=False)\n", (126, 145), False, 'import tempfile\n'), ((500, 546), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w"""'], {'delete': '(False)'}), "('w', delete=False)\n", (527, 546), False, 'import tempfile\n'), ((359, 378), 'pathlib.Path', 'pathlib.Path', (['fname'], {}), '(fname)\n', (371, 378), False, 'import pathlib\n'), ((760, 779), 'pathlib.Path', 'pathlib.Path', (['fname'], {}), '(fname)\n', (772, 779), False, 'import pathlib\n')]
|
# Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
from enum import IntEnum
from dataclasses import dataclass
from typing import Optional, Dict, Any
from ...utils.conversion import try_enum
from ...utils.api_model import APIModelBase
from ...utils.types import APINullable, UNDEFINED
from ...utils.snowflake import Snowflake
class PremiumTypes(IntEnum):
"""Premium types denote the level of premium a user has.
Attributes
----------
NITRO:
Full nitro
NITRO_CLASSIC:
Nitro (not boost)
NONE:
There is no subscription Discord Nitro(Full or classic)
"""
NONE = 0
NITRO_CLASSIC = 1
NITRO = 2
def __int__(self):
return self.value
class UserFlags(IntEnum):
"""Profile Icons
Attributes
----------
NONE:
None
STAFF:
Discord Employee
PARTNER:
Partnered Server Owner
HYPESQUAD:
HypeSquad Events Coordinator
BUG_HUNTER_LEVEL_1:
Bug Hunter Level 1
HYPESQUAD_ONLINE_HOUSE_1:
House Bravery Member
HYPESQUAD_ONLINE_HOUSE_2:
House Brilliance Member
HYPESQUAD_ONLINE_HOUSE_3:
House Balance Member
PREMIUM_EARLY_SUPPORTER:
Early Nitro Supporter
TEAM_PSEUDO_USER:
User is a team
BUG_HUNTER_LEVEL_2:
Bug Hunter Level 2
VERIFIED_BOT:
Verified Bot
VERIFIED_DEVELOPER:
Early Verified Bot Developer
CERTIFIED_MODERATOR:
Discord Certified Moderator
BOT_HTTP_INTERACTIONS:
Bot uses only HTTP interactions and is shown in the online member list
"""
NONE = 0
STAFF = 1 << 0
PARTNER = 1 << 1
HYPESQUAD = 1 << 2
BUG_HUNTER_LEVEL_1 = 1 << 3
HYPESQUAD_ONLINE_HOUSE_1 = 1 << 6
HYPESQUAD_ONLINE_HOUSE_2 = 1 << 7
HYPESQUAD_ONLINE_HOUSE_3 = 1 << 8
PREMIUM_EARLY_SUPPORTER = 1 << 9
TEAM_PSEUDO_USER = 1 << 10
BUG_HUNTER_LEVEL_2 = 1 << 14
VERIFIED_BOT = 1 << 16
VERIFIED_DEVELOPER = 1 << 17
CERTIFIED_MODERATOR = 1 << 18
BOT_HTTP_INTERACTIONS = 1 << 19
def __int__(self):
return self.value
class VisibilityTypes(IntEnum):
"""The type of connection visibility.
Attributes
----------
None:
invisible to everyone except the user themselves
Everyone:
visible to everyone
"""
NONE = 0
EVERYONE = 1
def __int__(self):
return self.value
@dataclass(repr=False)
class User(APIModelBase):
# ToDo: Update Docstrings
"""User Structure
Attributes
----------
id: :class:`~melisa.utils.types.Snowflake`
the user's id
username: :class:`str`
the user's username, not unique across the platform
discriminator: :class:`int`
the user's 4-digit discord-tag
avatar: Optional[:class:`str`]
the user's avatar hash
bot: APINullable[:class:`bool`]
whether the user belongs to an OAuth2 application
system: APINullable[:class:`bool`]
whether the user is an Official Discord System user (part of the urgent message system)
mfa_enabled: APINullable[:class:`bool`]
whether the user has two factor enabled on their account
banner: APINullable[:class:`str`]
the user's banner hash
accent_color: APINullable[:class:`int`]
the user's banner color encoded as an integer representation of hexadecimal color code
locale: APINullable[:class:`str`]
the user's chosen language option
verified: APINullable[:class:`bool`]
whether the email on this account has been verified
email: APINullable[:class:`str`]
the user's email
flags: APINullable[:class:`~models.user.user.UserFlags`]
the flags on a user's account
premium_type: APINullable[:class:`int`]
the type of Nitro subscription on a user's account
public_flags: APINullable[:class:`int`]
the public flags on a user's account
premium: APINullable[:class:`PremiumTypes`]
The user their premium type in a usable enum.
"""
id: APINullable[Snowflake] = UNDEFINED
username: APINullable[str] = UNDEFINED
discriminator: APINullable[str] = UNDEFINED
avatar: APINullable[str] = UNDEFINED
bot: APINullable[bool] = UNDEFINED
system: APINullable[bool] = UNDEFINED
mfa_enabled: APINullable[bool] = UNDEFINED
banner: APINullable[str] = UNDEFINED
accent_color: APINullable[int] = UNDEFINED
local: APINullable[str] = UNDEFINED
verified: APINullable[bool] = UNDEFINED
email: APINullable[str] = UNDEFINED
premium_type: APINullable[int] = UNDEFINED
public_flags: APINullable[int] = UNDEFINED
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> User:
"""Generate a user from the given data.
Parameters
----------
data: :class:`dict`
The dictionary to convert into a user.
"""
self: User = super().__new__(cls)
self.id = int(data["id"])
self.username = data.get("username")
self.discriminator = data.get("discriminator")
self.avatar = data.get("avatar")
self.bot = data.get("bot", False)
self.system = data.get("system", False)
self.mfa_enabled = data.get("mfa_enable", False)
self.banner = data.get("banner")
self.accent_color = data.get("accent_color")
self.local = data.get("local")
self.verified = data.get("verified", False)
self.email = data.get("email")
self.premium_type = try_enum(PremiumTypes, data.get("premium_type"))
self.public_flags = try_enum(UserFlags, data.get("public_flags"))
return self
@property
def premium(self) -> Optional[PremiumTypes]:
return None if self.premium_type is None else PremiumTypes(self.premium_type)
@property
def flags(self) -> Optional[UserFlags]:
return None if self.flags is None else UserFlags(self.flags)
def __str__(self):
"""String representation of the User object"""
return self.username + "#" + self.discriminator
@property
def mention(self):
""":class:`str`: The user's mention string. (<@id>)"""
return "<@{}>".format(self.id)
def avatar_url(self) -> str:
"""Avatar url (from the Discord CDN server)"""
return "https://cdn.discordapp.com/avatars/{}/{}.png?size=1024".format(
self.id, self.avatar
)
async def create_dm_channel(self):
# ToDo: Add docstrings
# ToDo: Add checking this channel in cache
return await self._http.post(
"/users/@me/channels", data={"recipient_id": self.id}
)
|
[
"dataclasses.dataclass"
] |
[((2507, 2528), 'dataclasses.dataclass', 'dataclass', ([], {'repr': '(False)'}), '(repr=False)\n', (2516, 2528), False, 'from dataclasses import dataclass\n')]
|
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from werkzeug.exceptions import (
NotFound,
MethodNotAllowed,
HTTPVersionNotSupported,
)
import re
from app import app
from app.helpers.errors import EmailAlreadyRegisteredError, MobilicError
from app.helpers.mail import InvalidEmailAddressError
from config import MOBILIC_ENV
from app.helpers.livestorm import NoLivestormCredentialsError
FILTER_OUT_ERRORS = [
NotFound,
MethodNotAllowed,
HTTPVersionNotSupported,
EmailAlreadyRegisteredError,
InvalidEmailAddressError,
NoLivestormCredentialsError,
]
FILTER_OUT_RE_FOR_MOBILIC_ERRORS = [
re.compile(r"^Wrong email/password combination")
]
def filter_errors(event, hint):
if "exc_info" in hint:
exc_type, exc_value, tb = hint["exc_info"]
if any(
[
issubclass(exc_type, filtered_out_error_type)
for filtered_out_error_type in FILTER_OUT_ERRORS
]
):
return None
if issubclass(exc_type, MobilicError) and any(
[
regexp.search(exc_value.message) is not None
for regexp in FILTER_OUT_RE_FOR_MOBILIC_ERRORS
]
):
return None
return event
def setup_sentry():
sentry_sdk.init(
dsn=app.config["SENTRY_URL"],
integrations=[FlaskIntegration()],
environment=MOBILIC_ENV,
before_send=filter_errors,
)
|
[
"sentry_sdk.integrations.flask.FlaskIntegration",
"re.compile"
] |
[((659, 706), 're.compile', 're.compile', (['"""^Wrong email/password combination"""'], {}), "('^Wrong email/password combination')\n", (669, 706), False, 'import re\n'), ((1390, 1408), 'sentry_sdk.integrations.flask.FlaskIntegration', 'FlaskIntegration', ([], {}), '()\n', (1406, 1408), False, 'from sentry_sdk.integrations.flask import FlaskIntegration\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 15:21:02 2018
@author: zbj
"""
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn import metrics
from xgboost import XGBClassifier
import pandas as pd
from sklearn.metrics import accuracy_score
## 总样本
root = "E:\\liuhongbing\\360finance\\open_data_train_valid1\\open_data_train_valid\\train\\";
train_by_zero = pd.read_table(root+'train_sample.txt', sep='\t')
Y = train_by_zero['tag']
X = train_by_zero.iloc[:,3:5081]
train_x, test_x, train_y, test_y = train_test_split(X, Y, test_size=0.3, random_state=0)
#
#tuned_parameters= [{'n_estimators':[100,200,500],
# 'max_depth':[3,5,7], ##range(3,10,2)
# 'learning_rate':[0.5, 1.0],
# 'subsample':[0.75,0.8,0.85,0.9]
# }]
tuned_parameters= [{'n_estimators':[100,200,500,1000]
}]
train_param = {'max_depth':6, # 树深度
'learning_rate':0.3, # 学习率
'nthread':4,
'min_child_weight':1, #叶子节点最小权重
'gamma':0.1,
'subsample':1, # 全部的样本进行训练
'reg_lambda':1, # 正则话参数
'colsample_btree':0.8, # 80%的特征
'n_estimators':100,
'scale_pos_weight':2, ## 正负样本比例
'seed':100
}
clf = GridSearchCV(XGBClassifier(**train_param),
param_grid=tuned_parameters,
scoring='roc_auc',
n_jobs=4,
iid=False,
cv=5)
clf.fit(train_x, train_y)
##clf.grid_scores_, clf.best_params_, clf.best_score_
print(clf.best_params_)
pred_y = clf.predict(test_x)
accuracy = accuracy_score(test_y, pred_y)
print("accuarcy: %.2f%%" % (accuracy*100.0))
y_proba=clf.predict_proba(test_x)[:,1]
print("AUC Score (Train): %f" % metrics.roc_auc_score(test_y, y_proba))
|
[
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.roc_auc_score",
"xgboost.XGBClassifier",
"pandas.read_table"
] |
[((429, 479), 'pandas.read_table', 'pd.read_table', (["(root + 'train_sample.txt')"], {'sep': '"""\t"""'}), "(root + 'train_sample.txt', sep='\\t')\n", (442, 479), True, 'import pandas as pd\n'), ((571, 624), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, Y, test_size=0.3, random_state=0)\n', (587, 624), False, 'from sklearn.model_selection import train_test_split\n'), ((1794, 1824), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_y', 'pred_y'], {}), '(test_y, pred_y)\n', (1808, 1824), False, 'from sklearn.metrics import accuracy_score\n'), ((1445, 1473), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '(**train_param)\n', (1458, 1473), False, 'from xgboost import XGBClassifier\n'), ((1943, 1981), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['test_y', 'y_proba'], {}), '(test_y, y_proba)\n', (1964, 1981), False, 'from sklearn import metrics\n')]
|
import json
import sys
sys.path.append('.')
import argparse
import logging
from tqdm import tqdm
from easydict import EasyDict as edict
from evaluate_metric import TransformationLoss, ClassificationLoss
from dataset import ThreeDLoMatchLoader
from benchmark_utils import set_seed, icp_refine
from benchmark_utils_predator import *
from utils.timer import Timer
from SC2_PCR import Matcher
set_seed()
from utils.SE3 import *
from collections import defaultdict
def eval_3DLoMatch_scene(loader, matcher, trans_evaluator, cls_evaluator, scene_ind, config):
num_pair = loader.__len__()
final_poses = np.zeros([num_pair, 4, 4])
# 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio, 5. output inlier number
# 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. scene_ind
stats = np.zeros([num_pair, 12])
data_timer, model_timer = Timer(), Timer()
with torch.no_grad():
error_pair = []
for i in tqdm(range(num_pair)):
#################################
# 1. load data
#################################
data_timer.tic()
src_keypts, tgt_keypts, src_features, tgt_features, gt_trans = loader.get_data(i)
data_time = data_timer.toc()
#################################
# 2. match descriptor and compute rigid transformation
#################################
model_timer.tic()
pred_trans, pred_labels, src_keypts_corr, tgt_keypts_corr = matcher.estimator(src_keypts, tgt_keypts,
src_features, tgt_features)
model_time = model_timer.toc()
#################################
# 3. generate the ground-truth classification result
#################################
frag1_warp = transform(src_keypts_corr, gt_trans)
distance = torch.sum((frag1_warp - tgt_keypts_corr) ** 2, dim=-1) ** 0.5
gt_labels = (distance < config.inlier_threshold).float()
#################################
# 4. evaluate result
#################################
loss, recall, Re, Te, rmse = trans_evaluator(pred_trans, gt_trans, src_keypts_corr, tgt_keypts_corr,
pred_labels)
class_stats = cls_evaluator(pred_labels, gt_labels)
#################################
# record the evaluation results.
#################################
# save statistics
stats[i, 0] = float(recall / 100.0) # success
stats[i, 1] = float(Re) # Re (deg)
stats[i, 2] = float(Te) # Te (cm)
stats[i, 3] = int(torch.sum(gt_labels)) # input inlier number
stats[i, 4] = float(torch.mean(gt_labels.float())) # input inlier ratio
stats[i, 5] = int(torch.sum(gt_labels[pred_labels > 0])) # output inlier number
stats[i, 6] = float(class_stats['precision']) # output inlier precision
stats[i, 7] = float(class_stats['recall']) # output inlier recall
stats[i, 8] = float(class_stats['f1']) # output inlier f1 score
stats[i, 9] = model_time
stats[i, 10] = data_time
stats[i, 11] = scene_ind
final_poses[i] = pred_trans[0].detach().cpu().numpy()
print(error_pair)
return stats, final_poses
def eval_3DLoMatch(config):
loader = ThreeDLoMatchLoader(root=config.data_path,
descriptor=config.descriptor,
inlier_threshold=config.inlier_threshold,
num_node=config.num_node,
use_mutual=config.use_mutual,
)
matcher = Matcher(inlier_threshold=config.inlier_threshold,
num_node=config.num_node,
use_mutual=config.use_mutual,
d_thre=config.d_thre,
num_iterations=config.num_iterations,
ratio=config.ratio,
nms_radius=config.nms_radius,
max_points=config.max_points,
k1=config.k1,
k2=config.k2, )
trans_evaluator = TransformationLoss(re_thre=config.re_thre, te_thre=config.te_thre)
cls_evaluator = ClassificationLoss()
allpair_stats, allpair_poses = eval_3DLoMatch_scene(loader, matcher, trans_evaluator, cls_evaluator, 0, config)
allpair_average = allpair_stats.mean(0)
allpair_status_ndarray = np.array(allpair_stats, dtype=float)
benchmark_predator(allpair_poses, gt_folder='benchmarks/3DLoMatch')
# benchmarking using the registration recall defined in DGR
allpair_average = allpair_stats.mean(0)
correct_pair_average = allpair_stats[allpair_stats[:, 0] == 1].mean(0)
logging.info(f"*" * 40)
logging.info(f"All {allpair_stats.shape[0]} pairs, Mean Reg Recall={allpair_average[0] * 100:.2f}%, Mean Re={correct_pair_average[1]:.2f}, Mean Te={correct_pair_average[2]:.2f}")
logging.info(f"\tInput: Mean Inlier Num={allpair_average[3]:.2f}(ratio={allpair_average[4] * 100:.2f}%)")
logging.info(f"\tOutput: Mean Inlier Num={allpair_average[5]:.2f}(precision={allpair_average[6] * 100:.2f}%, recall={allpair_average[7] * 100:.2f}%, f1={allpair_average[8] * 100:.2f}%)")
logging.info(f"\tMean model time: {allpair_average[9]:.2f}s, Mean data time: {allpair_average[10]:.2f}s")
# all_stats_npy = np.concatenate([v for k, v in all_stats.items()], axis=0)
return allpair_stats
def benchmark_predator(pred_poses, gt_folder):
scenes = sorted(os.listdir(gt_folder))
scene_names = [os.path.join(gt_folder,ele) for ele in scenes]
re_per_scene = defaultdict(list)
te_per_scene = defaultdict(list)
re_all, te_all, precision, recall = [], [], [], []
n_valids= []
short_names=['Kitchen','Home 1','Home 2','Hotel 1','Hotel 2','Hotel 3','Study','MIT Lab']
logging.info(("Scene\t¦ prec.\t¦ rec.\t¦ re\t¦ te\t¦ samples\t¦"))
start_ind = 0
for idx,scene in enumerate(scene_names):
# ground truth info
gt_pairs, gt_traj = read_trajectory(os.path.join(scene, "gt.log"))
n_valid=0
for ele in gt_pairs:
diff=abs(int(ele[0])-int(ele[1]))
n_valid+=diff>1
n_valids.append(n_valid)
n_fragments, gt_traj_cov = read_trajectory_info(os.path.join(scene,"gt.info"))
# estimated info
# est_pairs, est_traj = read_trajectory(os.path.join(est_folder,scenes[idx],'est.log'))
est_traj = pred_poses[start_ind:start_ind + len(gt_pairs)]
start_ind = start_ind + len(gt_pairs)
temp_precision, temp_recall,c_flag = evaluate_registration(n_fragments, est_traj, gt_pairs, gt_pairs, gt_traj, gt_traj_cov)
# Filter out the estimated rotation matrices
ext_gt_traj = extract_corresponding_trajectors(gt_pairs,gt_pairs, gt_traj)
re = rotation_error(torch.from_numpy(ext_gt_traj[:,0:3,0:3]), torch.from_numpy(est_traj[:,0:3,0:3])).cpu().numpy()[np.array(c_flag)==0]
te = translation_error(torch.from_numpy(ext_gt_traj[:,0:3,3:4]), torch.from_numpy(est_traj[:,0:3,3:4])).cpu().numpy()[np.array(c_flag)==0]
re_per_scene['mean'].append(np.mean(re))
re_per_scene['median'].append(np.median(re))
re_per_scene['min'].append(np.min(re))
re_per_scene['max'].append(np.max(re))
te_per_scene['mean'].append(np.mean(te))
te_per_scene['median'].append(np.median(te))
te_per_scene['min'].append(np.min(te))
te_per_scene['max'].append(np.max(te))
re_all.extend(re.reshape(-1).tolist())
te_all.extend(te.reshape(-1).tolist())
precision.append(temp_precision)
recall.append(temp_recall)
logging.info("{}\t¦ {:.3f}\t¦ {:.3f}\t¦ {:.3f}\t¦ {:.3f}\t¦ {:3d}¦".format(short_names[idx], temp_precision, temp_recall, np.median(re), np.median(te), n_valid))
# np.save(f'{est_folder}/{scenes[idx]}/flag.npy',c_flag)
weighted_precision = (np.array(n_valids) * np.array(precision)).sum() / np.sum(n_valids)
logging.info("Mean precision: {:.3f}: +- {:.3f}".format(np.mean(precision),np.std(precision)))
logging.info("Weighted precision: {:.3f}".format(weighted_precision))
logging.info("Mean median RRE: {:.3f}: +- {:.3f}".format(np.mean(re_per_scene['median']), np.std(re_per_scene['median'])))
logging.info("Mean median RTE: {:.3F}: +- {:.3f}".format(np.mean(te_per_scene['median']),np.std(te_per_scene['median'])))
if __name__ == '__main__':
from config import str2bool
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', default='', type=str, help='snapshot dir')
parser.add_argument('--solver', default='SVD', type=str, choices=['SVD', 'RANSAC'])
parser.add_argument('--use_icp', default=False, type=str2bool)
parser.add_argument('--save_npy', default=False, type=str2bool)
args = parser.parse_args()
config_path = args.config_path
config = json.load(open(config_path, 'r'))
config = edict(config)
import os
os.environ['CUDA_VISIBLE_DEVICES'] = config.CUDA_Devices
if not os.path.exists("./logs"):
os.makedirs("./logs")
log_filename = f'logs/3DLoMatch-{config.descriptor}.log'
logging.basicConfig(level=logging.INFO,
filename=log_filename,
filemode='a',
format="")
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
# evaluate on the test set
stats = eval_3DLoMatch(config)
if args.save_npy:
save_path = log_filename.replace('.log', '.npy')
np.save(save_path, stats)
print(f"Save the stats in {save_path}")
|
[
"sys.path.append",
"os.listdir",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"evaluate_metric.TransformationLoss",
"logging.StreamHandler",
"dataset.ThreeDLoMatchLoader",
"os.path.exists",
"logging.getLogger",
"collections.defaultdict",
"logging.info",
"benchmark_utils.set_seed",
"evaluate_metric.ClassificationLoss",
"easydict.EasyDict",
"os.path.join",
"utils.timer.Timer",
"SC2_PCR.Matcher"
] |
[((23, 43), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (38, 43), False, 'import sys\n'), ((389, 399), 'benchmark_utils.set_seed', 'set_seed', ([], {}), '()\n', (397, 399), False, 'from benchmark_utils import set_seed, icp_refine\n'), ((3683, 3861), 'dataset.ThreeDLoMatchLoader', 'ThreeDLoMatchLoader', ([], {'root': 'config.data_path', 'descriptor': 'config.descriptor', 'inlier_threshold': 'config.inlier_threshold', 'num_node': 'config.num_node', 'use_mutual': 'config.use_mutual'}), '(root=config.data_path, descriptor=config.descriptor,\n inlier_threshold=config.inlier_threshold, num_node=config.num_node,\n use_mutual=config.use_mutual)\n', (3702, 3861), False, 'from dataset import ThreeDLoMatchLoader\n'), ((4010, 4296), 'SC2_PCR.Matcher', 'Matcher', ([], {'inlier_threshold': 'config.inlier_threshold', 'num_node': 'config.num_node', 'use_mutual': 'config.use_mutual', 'd_thre': 'config.d_thre', 'num_iterations': 'config.num_iterations', 'ratio': 'config.ratio', 'nms_radius': 'config.nms_radius', 'max_points': 'config.max_points', 'k1': 'config.k1', 'k2': 'config.k2'}), '(inlier_threshold=config.inlier_threshold, num_node=config.num_node,\n use_mutual=config.use_mutual, d_thre=config.d_thre, num_iterations=\n config.num_iterations, ratio=config.ratio, nms_radius=config.nms_radius,\n max_points=config.max_points, k1=config.k1, k2=config.k2)\n', (4017, 4296), False, 'from SC2_PCR import Matcher\n'), ((4524, 4590), 'evaluate_metric.TransformationLoss', 'TransformationLoss', ([], {'re_thre': 'config.re_thre', 'te_thre': 'config.te_thre'}), '(re_thre=config.re_thre, te_thre=config.te_thre)\n', (4542, 4590), False, 'from evaluate_metric import TransformationLoss, ClassificationLoss\n'), ((4611, 4631), 'evaluate_metric.ClassificationLoss', 'ClassificationLoss', ([], {}), '()\n', (4629, 4631), False, 'from evaluate_metric import TransformationLoss, ClassificationLoss\n'), ((5126, 5149), 'logging.info', 'logging.info', (["(f'*' * 40)"], {}), "(f'*' * 40)\n", (5138, 5149), False, 'import logging\n'), ((5154, 5342), 'logging.info', 'logging.info', (['f"""All {allpair_stats.shape[0]} pairs, Mean Reg Recall={allpair_average[0] * 100:.2f}%, Mean Re={correct_pair_average[1]:.2f}, Mean Te={correct_pair_average[2]:.2f}"""'], {}), "(\n f'All {allpair_stats.shape[0]} pairs, Mean Reg Recall={allpair_average[0] * 100:.2f}%, Mean Re={correct_pair_average[1]:.2f}, Mean Te={correct_pair_average[2]:.2f}'\n )\n", (5166, 5342), False, 'import logging\n'), ((5337, 5453), 'logging.info', 'logging.info', (['f"""\tInput: Mean Inlier Num={allpair_average[3]:.2f}(ratio={allpair_average[4] * 100:.2f}%)"""'], {}), "(\n f'\\tInput: Mean Inlier Num={allpair_average[3]:.2f}(ratio={allpair_average[4] * 100:.2f}%)'\n )\n", (5349, 5453), False, 'import logging\n'), ((5448, 5644), 'logging.info', 'logging.info', (['f"""\tOutput: Mean Inlier Num={allpair_average[5]:.2f}(precision={allpair_average[6] * 100:.2f}%, recall={allpair_average[7] * 100:.2f}%, f1={allpair_average[8] * 100:.2f}%)"""'], {}), "(\n f'\\tOutput: Mean Inlier Num={allpair_average[5]:.2f}(precision={allpair_average[6] * 100:.2f}%, recall={allpair_average[7] * 100:.2f}%, f1={allpair_average[8] * 100:.2f}%)'\n )\n", (5460, 5644), False, 'import logging\n'), ((5639, 5754), 'logging.info', 'logging.info', (['f"""\tMean model time: {allpair_average[9]:.2f}s, Mean data time: {allpair_average[10]:.2f}s"""'], {}), "(\n f'\\tMean model time: {allpair_average[9]:.2f}s, Mean data time: {allpair_average[10]:.2f}s'\n )\n", (5651, 5754), False, 'import logging\n'), ((6030, 6047), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6041, 6047), False, 'from collections import defaultdict\n'), ((6067, 6084), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6078, 6084), False, 'from collections import defaultdict\n'), ((6256, 6320), 'logging.info', 'logging.info', (['"""Scene\t¦ prec.\t¦ rec.\t¦ re\t¦ te\t¦ samples\t¦"""'], {}), "('Scene\\t¦ prec.\\t¦ rec.\\t¦ re\\t¦ te\\t¦ samples\\t¦')\n", (6268, 6320), False, 'import logging\n'), ((8957, 8982), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8980, 8982), False, 'import argparse\n'), ((9417, 9430), 'easydict.EasyDict', 'edict', (['config'], {}), '(config)\n', (9422, 9430), True, 'from easydict import EasyDict as edict\n'), ((9640, 9731), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': 'log_filename', 'filemode': '"""a"""', 'format': '""""""'}), "(level=logging.INFO, filename=log_filename, filemode='a',\n format='')\n", (9659, 9731), False, 'import logging\n'), ((928, 935), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (933, 935), False, 'from utils.timer import Timer\n'), ((937, 944), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (942, 944), False, 'from utils.timer import Timer\n'), ((5921, 5942), 'os.listdir', 'os.listdir', (['gt_folder'], {}), '(gt_folder)\n', (5931, 5942), False, 'import os\n'), ((5963, 5991), 'os.path.join', 'os.path.join', (['gt_folder', 'ele'], {}), '(gt_folder, ele)\n', (5975, 5991), False, 'import os\n'), ((9518, 9542), 'os.path.exists', 'os.path.exists', (['"""./logs"""'], {}), "('./logs')\n", (9532, 9542), False, 'import os\n'), ((9552, 9573), 'os.makedirs', 'os.makedirs', (['"""./logs"""'], {}), "('./logs')\n", (9563, 9573), False, 'import os\n'), ((9835, 9868), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (9856, 9868), False, 'import logging\n'), ((6463, 6492), 'os.path.join', 'os.path.join', (['scene', '"""gt.log"""'], {}), "(scene, 'gt.log')\n", (6475, 6492), False, 'import os\n'), ((6705, 6735), 'os.path.join', 'os.path.join', (['scene', '"""gt.info"""'], {}), "(scene, 'gt.info')\n", (6717, 6735), False, 'import os\n'), ((9804, 9823), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (9821, 9823), False, 'import logging\n')]
|
"""
文件夹相关
"""
import os
class Directory:
def __init__(self, path:str) -> None:
"""
path: 文件夹路径
"""
self.path = path
def disk_usage(self):
"""
查看文件夹的占用量
"""
# 使用内部函数避开递归函数self问题
def inner_disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path): # 如果是文件夹
for file_name in os.listdir(path):
child_path = os.path.join(path, file_name)
total += inner_disk_usage(child_path)
# 打印当前目录的占用大小
print("{0:<7}".format(total), path)
return total
# 调用内部递归函数
return inner_disk_usage(self.path)
|
[
"os.path.isdir",
"os.path.getsize",
"os.path.join",
"os.listdir"
] |
[((311, 332), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (326, 332), False, 'import os\n'), ((348, 367), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (361, 367), False, 'import os\n'), ((412, 428), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (422, 428), False, 'import os\n'), ((463, 492), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (475, 492), False, 'import os\n')]
|
import os
from os.path import expanduser
import altair as alt
import numpy as np
import pandas as pd
from scipy.stats.stats import pearsonr
import sqlite3
from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot
from config import dummy_start_date, dummy_end_date, cutoff_date
# %matplotlib inline
plot_start_date = dummy_start_date
plot_end_date = dummy_end_date
if cutoff_date is not None:
plot_start_date = cutoff_date
day = np.timedelta64(1, 'D')
fiction_scale = alt.Scale(domain=[True, False])
def get_data(library_paths=[expanduser('~/books/non-fiction/')]):
db_path = library_paths[0] + 'metadata.db'
conn = sqlite3.connect(db_path)
custom_column_index = dict(pd.read_sql_query("""
SELECT label, id FROM custom_columns
""", conn).to_dict(orient='split')['data'])
def tbl(name):
return 'custom_column_' + str(custom_column_index[name])
df = pd.read_sql_query(f"""
SELECT
title,
author_sort AS author,
series.name AS series,
series_index,
pubdate,
timestamp,
last_modified,
languages.lang_code AS language,
{tbl('started')}.value AS start,
{tbl('finished')}.value AS end,
{tbl('words')}.value AS words,
{tbl('pages')}.value AS pages,
{tbl('fre')}.value AS fre,
{tbl('fkg')}.value AS fkg,
{tbl('gfi')}.value AS gfi,
({tbl('shelf')}.value = 'Fiction') AS is_fiction,
ifnull({tbl('read')}.value, 0) AS is_read
FROM books
LEFT OUTER JOIN books_series_link
ON books.id = books_series_link.book
LEFT OUTER JOIN series
ON books_series_link.series = series.id
JOIN books_languages_link
ON books.id = books_languages_link.book
JOIN languages
ON books_languages_link.lang_code = languages.id
LEFT OUTER JOIN {tbl('pages')}
ON {tbl('pages')}.book = books.id
LEFT OUTER JOIN {tbl('words')}
ON {tbl('words')}.book = books.id
LEFT OUTER JOIN {tbl('fre')}
ON {tbl('fre')}.book = books.id
LEFT OUTER JOIN {tbl('fkg')}
ON {tbl('fkg')}.book = books.id
LEFT OUTER JOIN {tbl('gfi')}
ON {tbl('gfi')}.book = books.id
JOIN books_{tbl('shelf')}_link
ON books_{tbl('shelf')}_link.book = books.id
JOIN {tbl('shelf')}
ON {tbl('shelf')}.id = books_{tbl('shelf')}_link.value
LEFT OUTER JOIN {tbl('started')}
ON {tbl('started')}.book = books.id
LEFT OUTER JOIN {tbl('finished')}
ON {tbl('finished')}.book = books.id
LEFT OUTER JOIN {tbl('read')} ON {tbl('read')}.book = books.id
WHERE
{tbl('shelf')}.value = 'Fiction'
OR {tbl('shelf')}.value = 'Nonfiction'
""", conn, parse_dates=['start', 'end', 'pubdate', 'timestamp',
'last_modified'])
# Books with no page count are either simply placeholders, not a
# proper part of the library, or have just been added. In both
# cases, it is OK to ignore them.
df = df.loc[df.pages.notna()]
# Fix data types
df.language = df.language.astype('category')
df.pages = df.pages.astype('int64')
# We cannot make df.words an int64 column, as some PDF files have
# no word count associated with them and int64 columns cannot
# contain NAs.
df.is_fiction = df.is_fiction.astype(bool)
df.is_read = df.is_read.astype(bool)
# Compute intermediate columns
df.pubdate = df.pubdate.map(to_local)
df = df.assign(words_per_page=df.words / df.pages,
words_per_day=df.words / ((df.end - df.start) / day))
def to_numeric(x):
return pd.to_numeric(x, errors='coerce', downcast='integer')
df = df.assign(finished_year=to_numeric(df.end.map(to_year)),
finished_month=to_numeric(df.end.map(to_month)),
finished_day=to_numeric(df.end.map(to_day)))
df = df.assign(pubyear=to_numeric(df.pubdate.map(to_year)),
pubmonth=to_numeric(df.pubdate.map(to_month)),
pubday=to_numeric(df.pubdate.map(to_day)))
df.sort_values('start', inplace=True)
return df
def plot_ranges(df, output='ranges.html'):
"""Print date ranges in which the books have been is_read, how many
books have been is_read at any given point in time and how many words
have been is_read per day.
"""
if cutoff_date is not None:
# df = df[(df.start >= cutoff_date) & (df.end >= cutoff_date)]
df = df[df.end.isna() | (df.end >= cutoff_date)]
df.end.fillna(dummy_end_date)
df = df[df.start.notna()].assign(ys=-allocate_ys(df[df.start.notna()]))
bars = alt.Chart(df) \
.mark_bar(clip=True) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('ys:N', axis=None),
color=alt.Color('is_fiction', scale=fiction_scale, legend=None),
tooltip='title'
)
bars.width = 1600
overlapped = alt.Chart(df[df.start.notna()]) \
.mark_bar(clip=True, opacity=0.1) \
.encode(
x=alt.X('start', axis=None),
x2='end',
y=alt.Y('is_fiction', axis=None),
color=alt.Color('is_fiction', scale=fiction_scale, legend=None)
)
overlapped.width = bars.width
baz = df[df.series.notna()]
if cutoff_date is not None:
baz = baz[baz.start.notna() & (baz.end.isna() |
(baz.end >= cutoff_date))]
else:
baz = baz[df.start.notna()]
by_series = alt.Chart(baz) \
.mark_bar(clip=True, opacity=0.7) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('series', title='Series'),
tooltip='title'
)
by_series.width = bars.width
baz = df[df.author.notna()]
if cutoff_date is not None:
baz = baz[baz.start.notna() & (baz.end.isna() |
(baz.end >= cutoff_date))]
else:
baz = baz[df.start.notna()]
baz.ys = -allocate_ys(baz[baz.start.notna()])
by_author = alt.Chart(baz) \
.mark_bar(clip=True, opacity=0.7) \
.encode(
x=alt.X('start', axis=alt.Axis(labelAngle=45, title='Date')),
x2='end',
y=alt.Y('author', title='Author'),
color='series',
tooltip='title'
)
by_author.width = bars.width
save_plot(overlapped & bars & by_series, output)
save_plot(by_author, 'by_author.html')
def plot_yearly(df, y='count()', output='finished.html'):
chart = alt.Chart(df[df.is_read & df.end]) \
.mark_bar() \
.encode(
x='finished_year:O',
y=y,
color=alt.Color('is_fiction', scale=fiction_scale),
)
save_plot(chart, output)
def number_of_books_per_author(df, output='books_per_author.html'):
df = df[df.is_read]
x = df.author.value_counts()
foo = pd.DataFrame(data={'author': x.index,
'count': x.values})
foo.sort_values('count', ascending=False, inplace=True)
chart = alt.Chart(foo) \
.mark_bar() \
.encode(y=alt.Y('author', sort=None), x='count')
save_plot(chart, output)
def plot_pubdate(df, output='pubdate.html'):
df = df[df.pubdate.notna()]
years = alt.Chart(df).mark_bar().encode(x='pubyear:O', y='count(year):N')
years_nonfiction = alt.Chart(df[~df.is_fiction]) \
.mark_bar(color='orange') \
.encode(x='pubyear:O', y='count(year):N')
months = alt.Chart(df).mark_bar().encode(x='pubmonth:O',
y='count(pubmonth):N')
days = alt.Chart(df).mark_bar().encode(x='pubday:O', y='count(pubday):N')
years.width = 965
save_plot((years + years_nonfiction) & (months | days), output)
def reading_ease(df):
df = df[df.fre.notna() & df.fkg.notna() & df.gfi.notna()]
opacity = 0.2
color = alt.Color('is_fiction', scale=fiction_scale)
a = alt.Chart(df).mark_point(opacity=opacity) \
.encode(x='fre', y='fkg', color=color)
b = alt.Chart(df).mark_point(opacity=opacity) \
.encode(x='fre', y='gfi', color=color)
save_plot(a | b, 'reading_ease.html')
# blue_patch = mpatches.Patch(label='Fiction')
# orange_patch = mpatches.Patch(label='Nonfiction', color='orange')
#
# def plot_histogram(df):
# "Plot histogram of how many days I needed to is_read a book."
# fig = plt.figure(figsize=(8, 6), dpi=dpi)
# ax = fig.add_subplot(111)
#
# ax.hist([np.array(df[df.is_fiction].duration
# .map(lambda x: x.days).dropna(),
# dtype='float64'),
# np.array(df[~df.is_fiction].duration
# .map(lambda x: x.days).dropna(),
# dtype='float64')],
# histtype='barstacked',
# bins=list(range(-7, 1764, 14)))
#
# plt.title('Number of days spent reading a book')
# plt.legend(handles=[blue_patch, orange_patch])
# plt.xlabel("Number of days spent reading")
# plt.ylabel("Number of books")
#
# plt.savefig('histogram.png')
# return plt.show()
#
#
# def scatter_length_duration(df):
# fig = plt.figure(figsize=(8, 6), dpi=dpi)
# ax = fig.add_subplot(111)
# df = df[df.words > 0]
# fiction = df[df.is_fiction]
# nonfiction = df[~df.is_fiction]
#
# duration = np.array(fiction.duration.map(lambda x: x.days),
# dtype='float64')
# ax.scatter(fiction.words.values, duration)
#
# duration = np.array(nonfiction.duration.map(lambda x: x.days),
# dtype='float64')
# ax.scatter(nonfiction.words.values, duration)
#
# plt.title("Number of words vs. days of reading")
# plt.xlabel("Number of words")
# plt.ylabel("Days spent reading")
# plt.legend(handles=[blue_patch, orange_patch])
#
# plt.savefig('scatter.png')
# return plt.show()
#
#
# def scatter_words_vs_words_per_day(df):
# fig = plt.figure()
# ax = fig.gca()
# ax.set_xscale('log')
# ax.set_yscale('log')
# ax.set_xlabel('Words')
# ax.set_ylabel('Words per day')
# ax.plot(df.words, df.words_per_day, 'o')
os.makedirs('output', exist_ok=True)
df = get_data()
avg_words_per_page = df.words.sum() / df.pages[df.words.notna()].sum()
plot_ranges(df)
number_of_books_per_author(df)
plot_yearly(df, output='books_finished.html')
plot_yearly(df, y='sum(pages)', output='pages_finished.html')
plot_yearly(df, y='sum(words)', output='words_finished.html')
plot_pubdate(df)
values = ('words', 'pages')
table = df.pivot_table(values=values,
index=('is_read', 'is_fiction', 'language'),
aggfunc=np.sum).reset_index()
table = table.assign(combined=list(zip(table.is_fiction, table.is_read)))
chart = alt.Chart(table) \
.mark_bar() \
.encode(column='language',
x='is_read',
y='words',
color='language')
ease_df = df[df.fre.notna() & df.fkg.notna() & df.gfi.notna()]
cor_fre_fkg = pearsonr(ease_df.fre, ease_df.fkg)
cor_fre_gfi = pearsonr(ease_df.fre, ease_df.gfi)
cor_fkg_gfi = pearsonr(ease_df.fkg, ease_df.gfi)
reading_ease(df)
|
[
"pandas.DataFrame",
"os.makedirs",
"altair.Y",
"scipy.stats.stats.pearsonr",
"util.save_plot",
"altair.Chart",
"altair.Axis",
"altair.X",
"numpy.timedelta64",
"sqlite3.connect",
"pandas.read_sql_query",
"altair.Scale",
"os.path.expanduser",
"pandas.to_numeric",
"altair.Color"
] |
[((458, 480), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (472, 480), True, 'import numpy as np\n'), ((497, 528), 'altair.Scale', 'alt.Scale', ([], {'domain': '[True, False]'}), '(domain=[True, False])\n', (506, 528), True, 'import altair as alt\n'), ((10515, 10551), 'os.makedirs', 'os.makedirs', (['"""output"""'], {'exist_ok': '(True)'}), "('output', exist_ok=True)\n", (10526, 10551), False, 'import os\n'), ((11372, 11406), 'scipy.stats.stats.pearsonr', 'pearsonr', (['ease_df.fre', 'ease_df.fkg'], {}), '(ease_df.fre, ease_df.fkg)\n', (11380, 11406), False, 'from scipy.stats.stats import pearsonr\n'), ((11421, 11455), 'scipy.stats.stats.pearsonr', 'pearsonr', (['ease_df.fre', 'ease_df.gfi'], {}), '(ease_df.fre, ease_df.gfi)\n', (11429, 11455), False, 'from scipy.stats.stats import pearsonr\n'), ((11470, 11504), 'scipy.stats.stats.pearsonr', 'pearsonr', (['ease_df.fkg', 'ease_df.gfi'], {}), '(ease_df.fkg, ease_df.gfi)\n', (11478, 11504), False, 'from scipy.stats.stats import pearsonr\n'), ((655, 679), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (670, 679), False, 'import sqlite3\n'), ((6715, 6763), 'util.save_plot', 'save_plot', (['(overlapped & bars & by_series)', 'output'], {}), '(overlapped & bars & by_series, output)\n', (6724, 6763), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((6768, 6806), 'util.save_plot', 'save_plot', (['by_author', '"""by_author.html"""'], {}), "(by_author, 'by_author.html')\n", (6777, 6806), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((7083, 7107), 'util.save_plot', 'save_plot', (['chart', 'output'], {}), '(chart, output)\n', (7092, 7107), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((7245, 7302), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'author': x.index, 'count': x.values}"}), "(data={'author': x.index, 'count': x.values})\n", (7257, 7302), True, 'import pandas as pd\n'), ((7505, 7529), 'util.save_plot', 'save_plot', (['chart', 'output'], {}), '(chart, output)\n', (7514, 7529), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((8062, 8123), 'util.save_plot', 'save_plot', (['(years + years_nonfiction & (months | days))', 'output'], {}), '(years + years_nonfiction & (months | days), output)\n', (8071, 8123), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((8242, 8286), 'altair.Color', 'alt.Color', (['"""is_fiction"""'], {'scale': 'fiction_scale'}), "('is_fiction', scale=fiction_scale)\n", (8251, 8286), True, 'import altair as alt\n'), ((8491, 8528), 'util.save_plot', 'save_plot', (['(a | b)', '"""reading_ease.html"""'], {}), "(a | b, 'reading_ease.html')\n", (8500, 8528), False, 'from util import to_day, to_month, to_year, to_local, allocate_ys, save_plot\n'), ((559, 593), 'os.path.expanduser', 'expanduser', (['"""~/books/non-fiction/"""'], {}), "('~/books/non-fiction/')\n", (569, 593), False, 'from os.path import expanduser\n'), ((3857, 3910), 'pandas.to_numeric', 'pd.to_numeric', (['x'], {'errors': '"""coerce"""', 'downcast': '"""integer"""'}), "(x, errors='coerce', downcast='integer')\n", (3870, 3910), True, 'import pandas as pd\n'), ((5045, 5069), 'altair.Y', 'alt.Y', (['"""ys:N"""'], {'axis': 'None'}), "('ys:N', axis=None)\n", (5050, 5069), True, 'import altair as alt\n'), ((5089, 5146), 'altair.Color', 'alt.Color', (['"""is_fiction"""'], {'scale': 'fiction_scale', 'legend': 'None'}), "('is_fiction', scale=fiction_scale, legend=None)\n", (5098, 5146), True, 'import altair as alt\n'), ((5335, 5360), 'altair.X', 'alt.X', (['"""start"""'], {'axis': 'None'}), "('start', axis=None)\n", (5340, 5360), True, 'import altair as alt\n'), ((5398, 5428), 'altair.Y', 'alt.Y', (['"""is_fiction"""'], {'axis': 'None'}), "('is_fiction', axis=None)\n", (5403, 5428), True, 'import altair as alt\n'), ((5448, 5505), 'altair.Color', 'alt.Color', (['"""is_fiction"""'], {'scale': 'fiction_scale', 'legend': 'None'}), "('is_fiction', scale=fiction_scale, legend=None)\n", (5457, 5505), True, 'import altair as alt\n'), ((5987, 6018), 'altair.Y', 'alt.Y', (['"""series"""'], {'title': '"""Series"""'}), "('series', title='Series')\n", (5992, 6018), True, 'import altair as alt\n'), ((6578, 6609), 'altair.Y', 'alt.Y', (['"""author"""'], {'title': '"""Author"""'}), "('author', title='Author')\n", (6583, 6609), True, 'import altair as alt\n'), ((7023, 7067), 'altair.Color', 'alt.Color', (['"""is_fiction"""'], {'scale': 'fiction_scale'}), "('is_fiction', scale=fiction_scale)\n", (7032, 7067), True, 'import altair as alt\n'), ((7462, 7488), 'altair.Y', 'alt.Y', (['"""author"""'], {'sort': 'None'}), "('author', sort=None)\n", (7467, 7488), True, 'import altair as alt\n'), ((11148, 11164), 'altair.Chart', 'alt.Chart', (['table'], {}), '(table)\n', (11157, 11164), True, 'import altair as alt\n'), ((712, 797), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""\n SELECT label, id FROM custom_columns\n """', 'conn'], {}), '("""\n SELECT label, id FROM custom_columns\n """,\n conn)\n', (729, 797), True, 'import pandas as pd\n'), ((4871, 4884), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (4880, 4884), True, 'import altair as alt\n'), ((4969, 5006), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(45)', 'title': '"""Date"""'}), "(labelAngle=45, title='Date')\n", (4977, 5006), True, 'import altair as alt\n'), ((5799, 5813), 'altair.Chart', 'alt.Chart', (['baz'], {}), '(baz)\n', (5808, 5813), True, 'import altair as alt\n'), ((5911, 5948), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(45)', 'title': '"""Date"""'}), "(labelAngle=45, title='Date')\n", (5919, 5948), True, 'import altair as alt\n'), ((6390, 6404), 'altair.Chart', 'alt.Chart', (['baz'], {}), '(baz)\n', (6399, 6404), True, 'import altair as alt\n'), ((6502, 6539), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(45)', 'title': '"""Date"""'}), "(labelAngle=45, title='Date')\n", (6510, 6539), True, 'import altair as alt\n'), ((6879, 6913), 'altair.Chart', 'alt.Chart', (['df[df.is_read & df.end]'], {}), '(df[df.is_read & df.end])\n', (6888, 6913), True, 'import altair as alt\n'), ((7405, 7419), 'altair.Chart', 'alt.Chart', (['foo'], {}), '(foo)\n', (7414, 7419), True, 'import altair as alt\n'), ((7622, 7635), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (7631, 7635), True, 'import altair as alt\n'), ((7711, 7740), 'altair.Chart', 'alt.Chart', (['df[~df.is_fiction]'], {}), '(df[~df.is_fiction])\n', (7720, 7740), True, 'import altair as alt\n'), ((7842, 7855), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (7851, 7855), True, 'import altair as alt\n'), ((7969, 7982), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (7978, 7982), True, 'import altair as alt\n'), ((8296, 8309), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (8305, 8309), True, 'import altair as alt\n'), ((8395, 8408), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (8404, 8408), True, 'import altair as alt\n')]
|
#########################################################################################
#
# Python Run script for Parallel Open MP execution of the MCE / CCS program
# Written by <NAME> 07/10/2020
#
# This script is based of a similar script witted by <NAME> using bash. This script
# aims to simplify the running process and make the program more useable as python is
# widely understood and should make modifications easier to implement.
# The script is designed to compile, copy all reaquired files into an execution folder,
# and submit the program as a job. Included are various checks, output handling,
# parameter setting and module loading porcedures. This script can also be used for
# for restarting a timed-out simulation by setting the restart paramter to 'YES'.
#
# To run the program variables must be set/checked in inputs.py, inham.py
# The following arguemtns then have ot be set in the run folder
# 1) The number of repeats
# 2) The number of folders/nodes
# 3) The number of parallel cores per folder/node (max8)
#
# The propagation and basis set generation flags are also set in this file.
# To restart a run open the run script in the execution folder
# change the resart paramter to 'Yes' and run the copy of this script from the
# execution file. Starting a run with precalcualted basis funcions is not yet
# possible for MCE12.
#
#
#########################################################################################
import sys
import socket
import os
import subprocess
import getpass
import random
import shutil
import glob
import csv
import inham
import inputs
#########################################################################################
# VARIABLES TO SET FOR SIMULATION #
#########################################################################################
# Number of repeats
repeats=400
# Number of nodes/folders
nodes=1
#Number of parallel cores per folder/node (max 8)
cores=1
# Name of running folder
# Default : <method>-<system>-<random number> ie CCS-HP-31254
# Otherwise: <method>-<system>-<runfolder string>
Runfolder='clonetests_noclone'
# Generate Basis Set? YES/NO
gen='YES'
# Propagate Basis Set? YES/NO
prop='YES'
# Restart? YES/NO
# To restart a timedout run set to yes and rerun this script from the execution folder
restart='NO'
# Seed value for doing the random number routine- if you do not specify it
# (leave default value of 0) will automatically generate one
SEED=0
#########################################################################################
# END OF INPUTS #
#########################################################################################
# * NO NEED TO SCROLL FURTHER IF USING AS BLACKBOX * #
#########################################################################################
if __name__=="__main__":
#Check basic arguements
if(isinstance(repeats,int)==False):
sys.exit("Number of repeats must be an integer")
elif(isinstance(nodes,int)==False):
sys.exit("Number of folders must be an integer")
elif(isinstance(cores,int)==False):
sys.exit("Number of parallel cores must be an integer")
elif(repeats<1):
sys.exit("Not enough runs selected. Must be 1 or greater")
elif(nodes<1):
sys.exit("Not enough nodes selected. Must be 1 or greater")
elif(nodes>100):
sys.exit("Too many nodes. Maximum of 100 simultaneous submisions")
elif(cores>8):
sys.exit("Too many cores selected. Maximum of 8 available")
elif(cores<1):
sys.exit("Not enough cores selected. Must be 1 or greater")
elif((repeats/nodes)>5000):
sys.exit("Too many repeats per folder. Must be less than 500")
if(restart=="NO"):
if((inputs.Conjugate_Repeats=='YES')and((repeats%(2*nodes*cores))!=0)):
sys.exit("Number of repeats not valid for conjugate repetition. Should be integer multiple of 2*cores*nodes")
elif((repeats%(nodes*cores))!=0):
sys.exit("Number of repeats must be an integer multiple of cores*folders")
elif(nodes*cores>100):
sys.exit("Total number of cores should stay below 100")
elif(inputs.systems['freqflg']not in{0,1}):
sys.exit("Frequency flag msut be zero or 1")
else:
print("Arguments checked")
Hostname=socket.gethostname()
if(Hostname==("login2.arc4.leeds.ac.uk")):
HPCFLG=1
else:
HPCFLG=0
#Might need grid altering calibration test for chlin451 bash code
#if [[ -n $( echo $HOSTNAME | fgrep -e "chmlin451" ) ]]; then
#grdalt=1
#else
#grdalt=0
#fi
#Makes execution folder and run folder
if(HPCFLG==0):
if not os.path.exists("../EXEC"):
os.mkdir("../EXEC")
EXDIR="../EXEC"
else:
# subprocess.run(['module','load','mkl'])
os.environ['LOGNAME']
EXDIR="/nobackup/"+getpass.getuser()
if(Runfolder=="Default"):
Runfolder=inputs.method+"-"+inputs.systems["System"]+"-"+str(repeats)+"-"+str(nodes)+"-"+str(cores)
else:
Runfolder=inputs.method+"-"+inputs.systems["System"]+"-"+Runfolder
if os.path.exists(EXDIR+"/"+Runfolder):
value=input("File already exists do you want to delete it? y/n\n")
if(value=='y'):
shutil.rmtree(EXDIR+"/"+Runfolder)
else:
sys.exit("Runfolder already exists. Change the Runfolder name or delte/move it")
os.mkdir(EXDIR+"/"+Runfolder)
EXDIR1=EXDIR+"/"+Runfolder
mcerunf=os.getcwd()
#Builds result file
result=open(EXDIR1+"/result.sh","w")
result.write("python "+mcerunf+"/collate.py $PWD "+(str(repeats))+" "+str(nodes)+" '"+Runfolder+"' "+(str(HPCFLG))+" '"+prop+"'")
result.close()
subprocess.run(['chmod', 'u+x', EXDIR1+'/result.sh'])
#Copies input files
shutil.copy2("inham.py",EXDIR1)
shutil.copy2("inputs.py",EXDIR1)
shutil.copy2("run.py",EXDIR1)
shutil.copy2("combine.py",EXDIR1)
#Makes the program input file
if(inputs.method=="MCE12"):
for i in range(2):
with open('rundata'+str(i+1)+'.csv','w',newline='')as file:
writer = csv.writer(file)
writer.writerow([gen,prop,restart,inputs.cmprss,('MCEv'+str(i+1)),int(repeats/nodes),inputs.Conjugate_Repeats])
writer.writerow(inputs.systems.values())
writer.writerow(inputs.parameters.values())
writer.writerow(inputs.Train.values())
writer.writerow(inputs.clone.values())
writer.writerow(inputs.paramz.values())
writer.writerow(inham.EL.values())
writer.writerow(inputs.prop.values())
if(inputs.systems['System']=='MP'):
writer.writerow(inham.MP.values())
elif(inputs.systems['System']=='HP'):
writer.writerow(inham.HP.values())
else:
writer.writerow(inham.SB.values())
shutil.copy2('rundata'+str(i+1)+'.csv',EXDIR1)
else:
with open('rundata.csv','w',newline='')as file:
writer = csv.writer(file)
writer.writerow([gen,prop,restart,inputs.cmprss,inputs.method,int(repeats/nodes),inputs.Conjugate_Repeats])
writer.writerow(inputs.systems.values())
writer.writerow(inputs.parameters.values())
writer.writerow(inputs.Train.values())
writer.writerow(inputs.clone.values())
writer.writerow(inputs.paramz.values())
writer.writerow(inham.EL.values())
writer.writerow(inputs.prop.values())
if(inputs.systems['System']=='MP'):
writer.writerow(inham.MP.values())
elif(inputs.systems['System']=='HP'):
writer.writerow(inham.HP.values())
else:
writer.writerow(inham.SB.values())
shutil.copy2("rundata.csv",EXDIR1)
for file in glob.glob(mcerunf+"/*.csv"):
os.remove(file)
#Makes subfolders
if(inputs.method=="MCE12"):
os.mkdir(EXDIR1+"/MCEv1")
os.mkdir(EXDIR1+"/MCEv2")
for j in range(2):
for i in range (nodes):
os.mkdir(EXDIR1+"/MCEv"+str(j+1)+"/run-"+str(i+1))
else:
for i in range(nodes):
path=os.path.join(EXDIR1,"run-"+str(i+1))
os.mkdir(EXDIR1+"/run-"+str(i+1))
#Selects the right make file and executes
os.chdir("../build")
if(HPCFLG==1):
shutil.copy2("../build/makefile_arc","../build/Makefile")
subprocess.run(["make"])
else:
shutil.copy2("../build/makefile_chmlin","../build/Makefile")
subprocess.run(["make"])
shutil.copy2("MCE.exe",EXDIR1)
shutil.copy2("interpolate.exe",EXDIR1)
shutil.copy2("subavrg.exe",EXDIR1)
if(inputs.systems['freqflg']==1):
if os.path.exists(mcerunf+"/freq.dat"):
shutil.copy2(mcerunf+"/freq.dat",EXDIR1)
else:
subprocess.run(["./integrator.exe"])
shutil.copy2("freq.dat",EXDIR1)
os.chdir(EXDIR1)
EXDIR1=os.getcwd()
if(gen=='NO'):
if(inputs.method=="AIMC-MCE2"):
if (glob.glob(mcerunf+"Outbs-001-00000-0_*.out")) or (glob.glob(mcerunf+"/Outbs-0001-00000-0_*.out")):
print("Outbs-0001-00000-0_*.out found in"+mcerunf)
for file in glob.glob(mcerunf+"/Outbs-*.out"):
shutil.copy2(file,EXDIR1)
else:
sys.exit("Outbs-001-00000-0_*.out not found in runfolder For AIMC-MCE second pass, all relevant input bases must be present")
if (glob.glob(mcerunf+"Clonetrack-001_*.out")) or (glob.glob(mcerunf+"/Clonetrack-0001_*.out")):
print("Clonetrack-0001_*.out found in"+mcerunf)
for file in glob.glob(mcerunf+"/Clonetrack-*.out"):
shutil.copy2(file,EXDIR1)
else:
sys.exit("Clonetrack-001_*.out not found in runfolder For AIMC-MCE second pass, all relevant input bases must be present")
else:
if (glob.glob(mcerunf+"Outbs-001_*.out")) or (glob.glob(mcerunf+"/Outbs-0001_*.out")):
print("Outbs-0001_*.out found in"+mcerunf)
for file in glob.glob(mcerunf+"/Outbs-*.out"):
shutil.copy2(file,EXDIR1)
else:
sys.exit("Outbs-001_*.out not found in runfolder.")
if(inputs.clone['cloning']=='yes'):
if (glob.glob(mcerunf+"Clonetrack-001_*.out")) or (glob.glob(mcerunf+"/Clonetrack-0001_*.out")):
print("Clonetrack-0001_*.out found in"+mcerunf)
for file in glob.glob(mcerunf+"/Clonetrack-*.out"):
shutil.copy2(file,EXDIR1)
else:
sys.exit("Clonetrack-001_*.out not found in runfolder")
if(inputs.method=="MCE12"):
for j in range(2):
for i in range (nodes):
shutil.copy2("MCE.exe","MCEv"+str(j+1)+"/run-"+str(i+1))
shutil.copy2('rundata'+str(j+1)+'.csv',"MCEv"+str(j+1)+"/run-"+str(i+1)+"/rundata.csv")
if(inputs.systems['freqflg']==1):
for k in range(repeats/nodes):
shutil.copy2("freq.dat","MCEv"+str(j+1)+"/run-"+str(i+1)+"/freq"+str(k+1)+".dat")
else:
for i in range (nodes):
shutil.copy2("MCE.exe","run-"+str(i+1))
shutil.copy2("rundata.csv","run-"+str(i+1))
if(inputs.systems['freqflg']==1):
for k in range(repeats/nodes):
shutil.copy2("freq.dat","/run-"+str(i+1)+"/freq"+str(k+1)+".dat")
if(gen=='NO'):
for file in glob.glob("Outbs-*.out"):
shutil.copy2(file,"/run-"+str(i+1))
if(inputs.clone['cloning']=='yes'):
for file in glob.glob("Clonetrack-*.out"):
shutil.copy2(file,"/run-"+str(i+1))
elif(restart=='YES'):
Hostname=socket.gethostname()
if(Hostname==("login2.arc4.leeds.ac.uk")):
HPCFLG=1
else:
HPCFLG=0
EXDIR1=os.getcwd()
if not os.path.exists("../Outbsbackup"):
os.mkdir("../Outbsbackup")
if(inputs.method=="MCE12"):
for j in range(2):
with open('rundata'+str(i+1)+'.csv','w',newline='')as file:
writer = csv.writer(file)
writer.writerow(['NO',prop,restart,inputs.cmprss,('MCEv'+str(j+1)),int(repeats/nodes),'NO'])
writer.writerow(inputs.systems.values())
writer.writerow(inputs.parameters.values())
writer.writerow(inputs.Train.values())
writer.writerow(inputs.clone.values())
writer.writerow(inputs.paramz.values())
writer.writerow(inham.EL.values())
writer.writerow(inputs.prop.values())
if(inputs.systems['System']=='MP'):
writer.writerow(inham.MP.values())
elif(inputs.systems['System']=='HP'):
writer.writerow(inham.HP.values())
else:
writer.writerow(inham.SB.values())
for i in range(nodes):
p=1
q=1
for file in glob.glob('MCEv'+str(j+1)+'/run-'+str(i+1)+'/Outbs-*.out'):
shutil.copy2(file,'../Outbsbackup/Outbs-'+str(j+1)+'_'+str(p)+'.out_'+str(i+1)+'.out')
p=p+1
if((inputs.clone['clone'])!='no'):
for file in glob.glob('MCEv'+str(j+1)+'/run-'+str(i+1)+'/clonearr-*.out'):
shutil.copy2(file,'../Outbsbackup/clonearr-'+str(j+1)+'_'+str(p)+'.out_'+str(i+1)+'.out')
q=q+1
shutil.copy2('rundata'+str(j+1)+'.csv',"MCEv"+str(j+1)+"/run-"+str(i+1)+"/rundata.csv")
else:
with open('rundata.csv','w',newline='')as file:
writer = csv.writer(file)
writer.writerow(['NO',prop,restart,inputs.cmprss,inputs.method,int(repeats/nodes),'NO'])
writer.writerow(inputs.systems.values())
writer.writerow(inputs.parameters.values())
writer.writerow(inputs.Train.values())
writer.writerow(inputs.clone.values())
writer.writerow(inputs.paramz.values())
writer.writerow(inham.EL.values())
writer.writerow(inputs.prop.values())
if(inputs.systems['System']=='MP'):
writer.writerow(inham.MP.values())
elif(inputs.systems['System']=='HP'):
writer.writerow(inham.HP.values())
else:
writer.writerow(inham.SB.values())
for i in range (nodes):
p=1
q=1
for file in glob.glob('run-'+str(i+1)+'/Outbs-00*.out'):
shutil.copy2(file,'../Outbsbackup/Outbs-'+str(p)+'.out_'+str(i+1)+'.out')
p=p+1
if((inputs.clone['Cloning'])!='no'):
for file in glob.glob('run-'+str(i+1)+'/clonearr-*.out'):
shutil.copy2(file,'../Outbsbackup/clonearr-'+str(p)+'.out_'+str(i+1)+'.out')
q=q+1
shutil.copy2("rundata.csv","run-"+str(i+1))
#If on a SGE machine make job submission file
if(HPCFLG==1):
number=random.randint(99999,1000000)
file1="MCE"+str(number)+".sh"
f=open(file1,"w")
f.write("#$ -cwd -V \n")
if(cores!=1):
f.write("#$ -pe smp "+str(cores)+" \n") #Use shared memory parallel environemnt
f.write("#$ -l h_rt=40:00:00 \n")
f.write("#$ -l h_vmem=4G \n")
f.write("#$ -t 1-"+str(nodes)+" \n")
f.write("date \n")
f.write("cd "+EXDIR1+"/run-$SGE_TASK_ID/ \n")
f.write("echo "'"Running on $HOSTNAME in folder $PWD" \n')
f.write("module load mkl \n")
f.write("time ./MCE.exe \n")
f.write("date \n")
f.close()
if(cores!=1):
os.environ["OMP_NUM_THREADS"]=str(cores)
subprocess.call(['qsub',file1])
else:
if(cores!=1):
os.environ["OMP_NUM_THREADS"]=str(cores)
for i in range(nodes):
SUBDIR=EXDIR1+"/run-"+str(i+1)
subprocess.Popen('',executable=SUBDIR+"/MCE.exe",cwd=SUBDIR)
|
[
"os.mkdir",
"os.remove",
"getpass.getuser",
"inputs.paramz.values",
"glob.glob",
"shutil.rmtree",
"inputs.parameters.values",
"os.chdir",
"inham.SB.values",
"random.randint",
"inham.MP.values",
"os.path.exists",
"socket.gethostname",
"inputs.prop.values",
"subprocess.Popen",
"inputs.clone.values",
"csv.writer",
"shutil.copy2",
"inputs.Train.values",
"subprocess.call",
"sys.exit",
"subprocess.run",
"os.getcwd",
"inham.EL.values",
"inham.HP.values",
"inputs.systems.values"
] |
[((3155, 3203), 'sys.exit', 'sys.exit', (['"""Number of repeats must be an integer"""'], {}), "('Number of repeats must be an integer')\n", (3163, 3203), False, 'import sys\n'), ((5526, 5565), 'os.path.exists', 'os.path.exists', (["(EXDIR + '/' + Runfolder)"], {}), "(EXDIR + '/' + Runfolder)\n", (5540, 5565), False, 'import os\n'), ((5853, 5886), 'os.mkdir', 'os.mkdir', (["(EXDIR + '/' + Runfolder)"], {}), "(EXDIR + '/' + Runfolder)\n", (5861, 5886), False, 'import os\n'), ((5946, 5957), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5955, 5957), False, 'import os\n'), ((6200, 6255), 'subprocess.run', 'subprocess.run', (["['chmod', 'u+x', EXDIR1 + '/result.sh']"], {}), "(['chmod', 'u+x', EXDIR1 + '/result.sh'])\n", (6214, 6255), False, 'import subprocess\n'), ((6291, 6323), 'shutil.copy2', 'shutil.copy2', (['"""inham.py"""', 'EXDIR1'], {}), "('inham.py', EXDIR1)\n", (6303, 6323), False, 'import shutil\n'), ((6331, 6364), 'shutil.copy2', 'shutil.copy2', (['"""inputs.py"""', 'EXDIR1'], {}), "('inputs.py', EXDIR1)\n", (6343, 6364), False, 'import shutil\n'), ((6372, 6402), 'shutil.copy2', 'shutil.copy2', (['"""run.py"""', 'EXDIR1'], {}), "('run.py', EXDIR1)\n", (6384, 6402), False, 'import shutil\n'), ((6410, 6444), 'shutil.copy2', 'shutil.copy2', (['"""combine.py"""', 'EXDIR1'], {}), "('combine.py', EXDIR1)\n", (6422, 6444), False, 'import shutil\n'), ((8608, 8637), 'glob.glob', 'glob.glob', (["(mcerunf + '/*.csv')"], {}), "(mcerunf + '/*.csv')\n", (8617, 8637), False, 'import glob\n'), ((9184, 9204), 'os.chdir', 'os.chdir', (['"""../build"""'], {}), "('../build')\n", (9192, 9204), False, 'import os\n'), ((9467, 9498), 'shutil.copy2', 'shutil.copy2', (['"""MCE.exe"""', 'EXDIR1'], {}), "('MCE.exe', EXDIR1)\n", (9479, 9498), False, 'import shutil\n'), ((9506, 9545), 'shutil.copy2', 'shutil.copy2', (['"""interpolate.exe"""', 'EXDIR1'], {}), "('interpolate.exe', EXDIR1)\n", (9518, 9545), False, 'import shutil\n'), ((9553, 9588), 'shutil.copy2', 'shutil.copy2', (['"""subavrg.exe"""', 'EXDIR1'], {}), "('subavrg.exe', EXDIR1)\n", (9565, 9588), False, 'import shutil\n'), ((9868, 9884), 'os.chdir', 'os.chdir', (['EXDIR1'], {}), '(EXDIR1)\n', (9876, 9884), False, 'import os\n'), ((9900, 9911), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9909, 9911), False, 'import os\n'), ((16635, 16665), 'random.randint', 'random.randint', (['(99999)', '(1000000)'], {}), '(99999, 1000000)\n', (16649, 16665), False, 'import random\n'), ((17353, 17385), 'subprocess.call', 'subprocess.call', (["['qsub', file1]"], {}), "(['qsub', file1])\n", (17368, 17385), False, 'import subprocess\n'), ((3252, 3300), 'sys.exit', 'sys.exit', (['"""Number of folders must be an integer"""'], {}), "('Number of folders must be an integer')\n", (3260, 3300), False, 'import sys\n'), ((4069, 4188), 'sys.exit', 'sys.exit', (['"""Number of repeats not valid for conjugate repetition. Should be integer multiple of 2*cores*nodes"""'], {}), "(\n 'Number of repeats not valid for conjugate repetition. Should be integer multiple of 2*cores*nodes'\n )\n", (4077, 4188), False, 'import sys\n'), ((8544, 8579), 'shutil.copy2', 'shutil.copy2', (['"""rundata.csv"""', 'EXDIR1'], {}), "('rundata.csv', EXDIR1)\n", (8556, 8579), False, 'import shutil\n'), ((8649, 8664), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (8658, 8664), False, 'import os\n'), ((8740, 8767), 'os.mkdir', 'os.mkdir', (["(EXDIR1 + '/MCEv1')"], {}), "(EXDIR1 + '/MCEv1')\n", (8748, 8767), False, 'import os\n'), ((8778, 8805), 'os.mkdir', 'os.mkdir', (["(EXDIR1 + '/MCEv2')"], {}), "(EXDIR1 + '/MCEv2')\n", (8786, 8805), False, 'import os\n'), ((9240, 9298), 'shutil.copy2', 'shutil.copy2', (['"""../build/makefile_arc"""', '"""../build/Makefile"""'], {}), "('../build/makefile_arc', '../build/Makefile')\n", (9252, 9298), False, 'import shutil\n'), ((9310, 9334), 'subprocess.run', 'subprocess.run', (["['make']"], {}), "(['make'])\n", (9324, 9334), False, 'import subprocess\n'), ((9361, 9422), 'shutil.copy2', 'shutil.copy2', (['"""../build/makefile_chmlin"""', '"""../build/Makefile"""'], {}), "('../build/makefile_chmlin', '../build/Makefile')\n", (9373, 9422), False, 'import shutil\n'), ((9434, 9458), 'subprocess.run', 'subprocess.run', (["['make']"], {}), "(['make'])\n", (9448, 9458), False, 'import subprocess\n'), ((9646, 9683), 'os.path.exists', 'os.path.exists', (["(mcerunf + '/freq.dat')"], {}), "(mcerunf + '/freq.dat')\n", (9660, 9683), False, 'import os\n'), ((13027, 13047), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (13045, 13047), False, 'import socket\n'), ((13175, 13186), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13184, 13186), False, 'import os\n'), ((17557, 17621), 'subprocess.Popen', 'subprocess.Popen', (['""""""'], {'executable': "(SUBDIR + '/MCE.exe')", 'cwd': 'SUBDIR'}), "('', executable=SUBDIR + '/MCE.exe', cwd=SUBDIR)\n", (17573, 17621), False, 'import subprocess\n'), ((3349, 3404), 'sys.exit', 'sys.exit', (['"""Number of parallel cores must be an integer"""'], {}), "('Number of parallel cores must be an integer')\n", (3357, 3404), False, 'import sys\n'), ((4233, 4307), 'sys.exit', 'sys.exit', (['"""Number of repeats must be an integer multiple of cores*folders"""'], {}), "('Number of repeats must be an integer multiple of cores*folders')\n", (4241, 4307), False, 'import sys\n'), ((5031, 5056), 'os.path.exists', 'os.path.exists', (['"""../EXEC"""'], {}), "('../EXEC')\n", (5045, 5056), False, 'import os\n'), ((5074, 5093), 'os.mkdir', 'os.mkdir', (['"""../EXEC"""'], {}), "('../EXEC')\n", (5082, 5093), False, 'import os\n'), ((5255, 5272), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (5270, 5272), False, 'import getpass\n'), ((5686, 5724), 'shutil.rmtree', 'shutil.rmtree', (["(EXDIR + '/' + Runfolder)"], {}), "(EXDIR + '/' + Runfolder)\n", (5699, 5724), False, 'import shutil\n'), ((5755, 5840), 'sys.exit', 'sys.exit', (['"""Runfolder already exists. Change the Runfolder name or delte/move it"""'], {}), "('Runfolder already exists. Change the Runfolder name or delte/move it'\n )\n", (5763, 5840), False, 'import sys\n'), ((7710, 7726), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (7720, 7726), False, 'import csv\n'), ((9699, 9742), 'shutil.copy2', 'shutil.copy2', (["(mcerunf + '/freq.dat')", 'EXDIR1'], {}), "(mcerunf + '/freq.dat', EXDIR1)\n", (9711, 9742), False, 'import shutil\n'), ((9774, 9810), 'subprocess.run', 'subprocess.run', (["['./integrator.exe']"], {}), "(['./integrator.exe'])\n", (9788, 9810), False, 'import subprocess\n'), ((9827, 9859), 'shutil.copy2', 'shutil.copy2', (['"""freq.dat"""', 'EXDIR1'], {}), "('freq.dat', EXDIR1)\n", (9839, 9859), False, 'import shutil\n'), ((13203, 13235), 'os.path.exists', 'os.path.exists', (['"""../Outbsbackup"""'], {}), "('../Outbsbackup')\n", (13217, 13235), False, 'import os\n'), ((13253, 13279), 'os.mkdir', 'os.mkdir', (['"""../Outbsbackup"""'], {}), "('../Outbsbackup')\n", (13261, 13279), False, 'import os\n'), ((3434, 3492), 'sys.exit', 'sys.exit', (['"""Not enough runs selected. Must be 1 or greater"""'], {}), "('Not enough runs selected. Must be 1 or greater')\n", (3442, 3492), False, 'import sys\n'), ((4351, 4406), 'sys.exit', 'sys.exit', (['"""Total number of cores should stay below 100"""'], {}), "('Total number of cores should stay below 100')\n", (4359, 4406), False, 'import sys\n'), ((6664, 6680), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (6674, 6680), False, 'import csv\n'), ((7883, 7906), 'inputs.systems.values', 'inputs.systems.values', ([], {}), '()\n', (7904, 7906), False, 'import inputs\n'), ((7940, 7966), 'inputs.parameters.values', 'inputs.parameters.values', ([], {}), '()\n', (7964, 7966), False, 'import inputs\n'), ((8000, 8021), 'inputs.Train.values', 'inputs.Train.values', ([], {}), '()\n', (8019, 8021), False, 'import inputs\n'), ((8055, 8076), 'inputs.clone.values', 'inputs.clone.values', ([], {}), '()\n', (8074, 8076), False, 'import inputs\n'), ((8110, 8132), 'inputs.paramz.values', 'inputs.paramz.values', ([], {}), '()\n', (8130, 8132), False, 'import inputs\n'), ((8166, 8183), 'inham.EL.values', 'inham.EL.values', ([], {}), '()\n', (8181, 8183), False, 'import inham\n'), ((8217, 8237), 'inputs.prop.values', 'inputs.prop.values', ([], {}), '()\n', (8235, 8237), False, 'import inputs\n'), ((10001, 10047), 'glob.glob', 'glob.glob', (["(mcerunf + 'Outbs-001-00000-0_*.out')"], {}), "(mcerunf + 'Outbs-001-00000-0_*.out')\n", (10010, 10047), False, 'import glob\n'), ((10051, 10099), 'glob.glob', 'glob.glob', (["(mcerunf + '/Outbs-0001-00000-0_*.out')"], {}), "(mcerunf + '/Outbs-0001-00000-0_*.out')\n", (10060, 10099), False, 'import glob\n'), ((10203, 10238), 'glob.glob', 'glob.glob', (["(mcerunf + '/Outbs-*.out')"], {}), "(mcerunf + '/Outbs-*.out')\n", (10212, 10238), False, 'import glob\n'), ((10330, 10465), 'sys.exit', 'sys.exit', (['"""Outbs-001-00000-0_*.out not found in runfolder For AIMC-MCE second pass, all relevant input bases must be present"""'], {}), "(\n 'Outbs-001-00000-0_*.out not found in runfolder For AIMC-MCE second pass, all relevant input bases must be present'\n )\n", (10338, 10465), False, 'import sys\n'), ((10476, 10519), 'glob.glob', 'glob.glob', (["(mcerunf + 'Clonetrack-001_*.out')"], {}), "(mcerunf + 'Clonetrack-001_*.out')\n", (10485, 10519), False, 'import glob\n'), ((10523, 10568), 'glob.glob', 'glob.glob', (["(mcerunf + '/Clonetrack-0001_*.out')"], {}), "(mcerunf + '/Clonetrack-0001_*.out')\n", (10532, 10568), False, 'import glob\n'), ((10669, 10709), 'glob.glob', 'glob.glob', (["(mcerunf + '/Clonetrack-*.out')"], {}), "(mcerunf + '/Clonetrack-*.out')\n", (10678, 10709), False, 'import glob\n'), ((10801, 10933), 'sys.exit', 'sys.exit', (['"""Clonetrack-001_*.out not found in runfolder For AIMC-MCE second pass, all relevant input bases must be present"""'], {}), "(\n 'Clonetrack-001_*.out not found in runfolder For AIMC-MCE second pass, all relevant input bases must be present'\n )\n", (10809, 10933), False, 'import sys\n'), ((10962, 11000), 'glob.glob', 'glob.glob', (["(mcerunf + 'Outbs-001_*.out')"], {}), "(mcerunf + 'Outbs-001_*.out')\n", (10971, 11000), False, 'import glob\n'), ((11004, 11044), 'glob.glob', 'glob.glob', (["(mcerunf + '/Outbs-0001_*.out')"], {}), "(mcerunf + '/Outbs-0001_*.out')\n", (11013, 11044), False, 'import glob\n'), ((11140, 11175), 'glob.glob', 'glob.glob', (["(mcerunf + '/Outbs-*.out')"], {}), "(mcerunf + '/Outbs-*.out')\n", (11149, 11175), False, 'import glob\n'), ((11267, 11318), 'sys.exit', 'sys.exit', (['"""Outbs-001_*.out not found in runfolder."""'], {}), "('Outbs-001_*.out not found in runfolder.')\n", (11275, 11318), False, 'import sys\n'), ((11387, 11430), 'glob.glob', 'glob.glob', (["(mcerunf + 'Clonetrack-001_*.out')"], {}), "(mcerunf + 'Clonetrack-001_*.out')\n", (11396, 11430), False, 'import glob\n'), ((11434, 11479), 'glob.glob', 'glob.glob', (["(mcerunf + '/Clonetrack-0001_*.out')"], {}), "(mcerunf + '/Clonetrack-0001_*.out')\n", (11443, 11479), False, 'import glob\n'), ((11580, 11620), 'glob.glob', 'glob.glob', (["(mcerunf + '/Clonetrack-*.out')"], {}), "(mcerunf + '/Clonetrack-*.out')\n", (11589, 11620), False, 'import glob\n'), ((11712, 11767), 'sys.exit', 'sys.exit', (['"""Clonetrack-001_*.out not found in runfolder"""'], {}), "('Clonetrack-001_*.out not found in runfolder')\n", (11720, 11767), False, 'import sys\n'), ((12700, 12724), 'glob.glob', 'glob.glob', (['"""Outbs-*.out"""'], {}), "('Outbs-*.out')\n", (12709, 12724), False, 'import glob\n'), ((15152, 15168), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (15162, 15168), False, 'import csv\n'), ((3520, 3579), 'sys.exit', 'sys.exit', (['"""Not enough nodes selected. Must be 1 or greater"""'], {}), "('Not enough nodes selected. Must be 1 or greater')\n", (3528, 3579), False, 'import sys\n'), ((4471, 4515), 'sys.exit', 'sys.exit', (['"""Frequency flag msut be zero or 1"""'], {}), "('Frequency flag msut be zero or 1')\n", (4479, 4515), False, 'import sys\n'), ((4590, 4610), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4608, 4610), False, 'import socket\n'), ((6849, 6872), 'inputs.systems.values', 'inputs.systems.values', ([], {}), '()\n', (6870, 6872), False, 'import inputs\n'), ((6910, 6936), 'inputs.parameters.values', 'inputs.parameters.values', ([], {}), '()\n', (6934, 6936), False, 'import inputs\n'), ((6974, 6995), 'inputs.Train.values', 'inputs.Train.values', ([], {}), '()\n', (6993, 6995), False, 'import inputs\n'), ((7033, 7054), 'inputs.clone.values', 'inputs.clone.values', ([], {}), '()\n', (7052, 7054), False, 'import inputs\n'), ((7092, 7114), 'inputs.paramz.values', 'inputs.paramz.values', ([], {}), '()\n', (7112, 7114), False, 'import inputs\n'), ((7152, 7169), 'inham.EL.values', 'inham.EL.values', ([], {}), '()\n', (7167, 7169), False, 'import inham\n'), ((7207, 7227), 'inputs.prop.values', 'inputs.prop.values', ([], {}), '()\n', (7225, 7227), False, 'import inputs\n'), ((8327, 8344), 'inham.MP.values', 'inham.MP.values', ([], {}), '()\n', (8342, 8344), False, 'import inham\n'), ((10262, 10288), 'shutil.copy2', 'shutil.copy2', (['file', 'EXDIR1'], {}), '(file, EXDIR1)\n', (10274, 10288), False, 'import shutil\n'), ((10733, 10759), 'shutil.copy2', 'shutil.copy2', (['file', 'EXDIR1'], {}), '(file, EXDIR1)\n', (10745, 10759), False, 'import shutil\n'), ((11199, 11225), 'shutil.copy2', 'shutil.copy2', (['file', 'EXDIR1'], {}), '(file, EXDIR1)\n', (11211, 11225), False, 'import shutil\n'), ((11644, 11670), 'shutil.copy2', 'shutil.copy2', (['file', 'EXDIR1'], {}), '(file, EXDIR1)\n', (11656, 11670), False, 'import shutil\n'), ((12878, 12907), 'glob.glob', 'glob.glob', (['"""Clonetrack-*.out"""'], {}), "('Clonetrack-*.out')\n", (12887, 12907), False, 'import glob\n'), ((13453, 13469), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (13463, 13469), False, 'import csv\n'), ((15306, 15329), 'inputs.systems.values', 'inputs.systems.values', ([], {}), '()\n', (15327, 15329), False, 'import inputs\n'), ((15363, 15389), 'inputs.parameters.values', 'inputs.parameters.values', ([], {}), '()\n', (15387, 15389), False, 'import inputs\n'), ((15423, 15444), 'inputs.Train.values', 'inputs.Train.values', ([], {}), '()\n', (15442, 15444), False, 'import inputs\n'), ((15478, 15499), 'inputs.clone.values', 'inputs.clone.values', ([], {}), '()\n', (15497, 15499), False, 'import inputs\n'), ((15533, 15555), 'inputs.paramz.values', 'inputs.paramz.values', ([], {}), '()\n', (15553, 15555), False, 'import inputs\n'), ((15589, 15606), 'inham.EL.values', 'inham.EL.values', ([], {}), '()\n', (15604, 15606), False, 'import inham\n'), ((15640, 15660), 'inputs.prop.values', 'inputs.prop.values', ([], {}), '()\n', (15658, 15660), False, 'import inputs\n'), ((3609, 3675), 'sys.exit', 'sys.exit', (['"""Too many nodes. Maximum of 100 simultaneous submisions"""'], {}), "('Too many nodes. Maximum of 100 simultaneous submisions')\n", (3617, 3675), False, 'import sys\n'), ((7325, 7342), 'inham.MP.values', 'inham.MP.values', ([], {}), '()\n', (7340, 7342), False, 'import inham\n'), ((8436, 8453), 'inham.HP.values', 'inham.HP.values', ([], {}), '()\n', (8451, 8453), False, 'import inham\n'), ((8513, 8530), 'inham.SB.values', 'inham.SB.values', ([], {}), '()\n', (8528, 8530), False, 'import inham\n'), ((13619, 13642), 'inputs.systems.values', 'inputs.systems.values', ([], {}), '()\n', (13640, 13642), False, 'import inputs\n'), ((13680, 13706), 'inputs.parameters.values', 'inputs.parameters.values', ([], {}), '()\n', (13704, 13706), False, 'import inputs\n'), ((13744, 13765), 'inputs.Train.values', 'inputs.Train.values', ([], {}), '()\n', (13763, 13765), False, 'import inputs\n'), ((13803, 13824), 'inputs.clone.values', 'inputs.clone.values', ([], {}), '()\n', (13822, 13824), False, 'import inputs\n'), ((13862, 13884), 'inputs.paramz.values', 'inputs.paramz.values', ([], {}), '()\n', (13882, 13884), False, 'import inputs\n'), ((13922, 13939), 'inham.EL.values', 'inham.EL.values', ([], {}), '()\n', (13937, 13939), False, 'import inham\n'), ((13977, 13997), 'inputs.prop.values', 'inputs.prop.values', ([], {}), '()\n', (13995, 13997), False, 'import inputs\n'), ((15750, 15767), 'inham.MP.values', 'inham.MP.values', ([], {}), '()\n', (15765, 15767), False, 'import inham\n'), ((3703, 3762), 'sys.exit', 'sys.exit', (['"""Too many cores selected. Maximum of 8 available"""'], {}), "('Too many cores selected. Maximum of 8 available')\n", (3711, 3762), False, 'import sys\n'), ((7442, 7459), 'inham.HP.values', 'inham.HP.values', ([], {}), '()\n', (7457, 7459), False, 'import inham\n'), ((7527, 7544), 'inham.SB.values', 'inham.SB.values', ([], {}), '()\n', (7542, 7544), False, 'import inham\n'), ((14095, 14112), 'inham.MP.values', 'inham.MP.values', ([], {}), '()\n', (14110, 14112), False, 'import inham\n'), ((15859, 15876), 'inham.HP.values', 'inham.HP.values', ([], {}), '()\n', (15874, 15876), False, 'import inham\n'), ((15936, 15953), 'inham.SB.values', 'inham.SB.values', ([], {}), '()\n', (15951, 15953), False, 'import inham\n'), ((3790, 3849), 'sys.exit', 'sys.exit', (['"""Not enough cores selected. Must be 1 or greater"""'], {}), "('Not enough cores selected. Must be 1 or greater')\n", (3798, 3849), False, 'import sys\n'), ((14212, 14229), 'inham.HP.values', 'inham.HP.values', ([], {}), '()\n', (14227, 14229), False, 'import inham\n'), ((14297, 14314), 'inham.SB.values', 'inham.SB.values', ([], {}), '()\n', (14312, 14314), False, 'import inham\n'), ((3890, 3952), 'sys.exit', 'sys.exit', (['"""Too many repeats per folder. Must be less than 500"""'], {}), "('Too many repeats per folder. Must be less than 500')\n", (3898, 3952), False, 'import sys\n')]
|
# Chip: ESP32-WROOM-32 (ESP32-D0WDQ6)
# Microprocessor: Dual-Core Xtensa® 32-bit LX6
# Clock: 80MHz to 240Mhz
# Crystal: 40MHz
# SPÍ flash: 4 MB
# Operating voltage: 3.0V-3.6V
# Operating current: 80mA
# Purpose: Read temperature and humidity from DHT sensors
# Notes: The DHT driver is implemented in software and works on all pins
import dht
import machine
d = dht.DHT11(machine.Pin(4))
d.measure()
d.temperature()
d.humidity()
d = dht.DHT22(machine.Pin(4))
d.measure()
d.temperature()
d.humidity()
|
[
"machine.Pin"
] |
[((376, 390), 'machine.Pin', 'machine.Pin', (['(4)'], {}), '(4)\n', (387, 390), False, 'import machine\n'), ((448, 462), 'machine.Pin', 'machine.Pin', (['(4)'], {}), '(4)\n', (459, 462), False, 'import machine\n')]
|
from django.db import models
from decimal import Decimal
from uuid import uuid4
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
def random_cover_filename(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid4().hex, ext)
return 'covers/' + filename
def random_screen_filename(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid4().hex, ext)
return 'screenshots/' + filename
def random_avatar_filename(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid4().hex, ext)
return 'avatars/' + filename
class Platform(models.Model):
name = models.CharField(max_length=50)
category = models.CharField(max_length=50)
shorthand = models.CharField(max_length=50, blank=True, default='')
class Meta:
ordering = ['category', 'name']
def __str__(self):
return self.category + " - " + self.name
class Tag(models.Model):
name = models.CharField(max_length=50)
category = models.CharField(max_length=50)
description = models.CharField(max_length=250, blank=True)
class Meta:
ordering = ['category', 'name']
def __str__(self):
return self.category + " - " + self.name
class Genre(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Game(models.Model):
name = models.CharField(max_length=150)
year = models.IntegerField()
trailer_link = models.URLField(max_length=250, blank=True, default='')
image = models.ImageField(upload_to=random_cover_filename, null=True)
screen1 = models.ImageField(upload_to=random_screen_filename, null=True, blank=True)
screen2 = models.ImageField(upload_to=random_screen_filename, null=True, blank=True)
screen3 = models.ImageField(upload_to=random_screen_filename, null=True, blank=True)
screen4 = models.ImageField(upload_to=random_screen_filename, null=True, blank=True)
genres = models.ManyToManyField(Genre, blank=True)
tags = models.ManyToManyField(Tag, blank=True)
description = models.TextField(blank=True, default='')
aliases = models.TextField(blank=True, default='')
main_link = models.URLField(max_length=250, blank=True, default='', verbose_name="Main Website")
wikipedia_link = models.URLField(max_length=250, blank=True, default='', verbose_name="Wikipedia")
gamefaqs_link = models.URLField(max_length=250, blank=True, default='', verbose_name="GameFAQs Link")
steam_link = models.URLField(max_length=250, blank=True, default='', verbose_name="Steam Link")
howlongtobeat_link = models.URLField(max_length=250, blank=True, default='', verbose_name="HowLongToBeat Link")
pcgamingwiki_link = models.URLField(max_length=250, blank=True, default='', verbose_name="PCGamingWiki Link")
winehq_link = models.URLField(max_length=250, blank=True, default='', verbose_name="WineHQ Link")
mobygames_link = models.URLField(max_length=250, blank=True, default='', verbose_name="MobyGames Link")
vndb_link = models.URLField(max_length=250, blank=True, default='', verbose_name="VNDB Link")
temp_pop_score = models.DecimalField(max_digits=5, decimal_places=2, blank=True, null=True)
def __str__(self):
return self.name + " (" + str(self.year) + ")"
@receiver(post_save, sender=User)
def create_userprofile_signal(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
sexual_content = Tag.objects.get(name="Sexual Content")
instance.userprofile.banned_tags.add(sexual_content)
instance.userprofile.save()
class UserGameListEntry(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
game = models.ForeignKey(Game, on_delete=models.CASCADE)
platform = models.ForeignKey(Platform, blank=True, null=True, on_delete=models.CASCADE)
statuses = [
("PLAN", "Plan to Play"),
("PLAY", "Playing"),
("CMPL", "Completed"),
("DROP", "Dropped"),
("HOLD", "Paused"),
("IMPT", "Imported")
]
status = models.CharField(max_length=4, choices=statuses, default="PLAN")
score = models.DecimalField(max_digits=4, decimal_places=2, blank=True, null=True)
hours = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
comments = models.CharField(max_length=500, blank=True, default='')
start_date = models.DateField(blank=True, null=True)
stop_date = models.DateField(blank=True, null=True)
times_replayed = models.IntegerField(default=0)
def __str__(self):
return self.user.username + " - " + self.game.name
class ManualUserGameListEntry(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=150)
platform = models.ForeignKey(Platform, blank=True, null=True, on_delete=models.CASCADE)
statuses = [
("PLAY", "Playing"),
("CMPL", "Completed"),
("DROP", "Dropped"),
("HOLD", "Paused"),
("PLAN", "Plan to Play"),
("IMPT", "Imported")
]
status = models.CharField(max_length=4, choices=statuses, default="PLAN")
score = models.DecimalField(max_digits=4, decimal_places=2, blank=True, null=True)
hours = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
comments = models.CharField(max_length=500, blank=True, default='')
start_date = models.DateField(blank=True, null=True)
stop_date = models.DateField(blank=True, null=True)
times_replayed = models.IntegerField(default=0)
never_migrate = models.BooleanField(default=False, null=True)
def __str__(self):
return self.user.username + " - " + self.name
class UserGameStatus(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
game = models.ForeignKey(Game, on_delete=models.CASCADE)
statuses = [
("PLAY", "Playing"),
("CMPL", "Completed"),
("DROP", "Dropped"),
("HOLD", "Paused"),
("PLAN", "Plan to Play")
]
status = models.CharField(max_length=4, choices=statuses)
created_at = models.DateTimeField(auto_now_add=True)
liked_by = models.ManyToManyField(User, blank=True, related_name='usergamestatus_liked_by')
def __str__(self):
return self.user.username + " " + self.status + " " + self.game.name
class Notification(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
notif_type = models.CharField(max_length=10)
notif_object_id = models.IntegerField()
class Recommendation(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
slot = models.IntegerField()
game = models.ForeignKey(Game, on_delete=models.CASCADE)
rec_data = models.CharField(max_length=10)
class CollectionType(models.Model):
name = models.CharField(max_length=150)
description = models.TextField(blank=True, default='')
def __str__(self):
return self.name
class Collection(models.Model):
name = models.CharField(max_length=150)
category = models.ForeignKey(CollectionType, on_delete=models.CASCADE)
description = models.TextField(blank=True, default='')
games = models.ManyToManyField(Game, blank=True)
def __str__(self):
return self.category.name + " - " + self.name
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
avatar = models.ImageField(upload_to=random_avatar_filename, default='avatars/default.png')
banned_tags = models.ManyToManyField(Tag, blank=True)
ignored_games = models.ManyToManyField(Game, blank=True)
ignored_collections = models.ManyToManyField(Collection, blank=True)
followed_users = models.ManyToManyField(User, blank=True, related_name='userprofile_followed_users')
class UserSettings(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
rating_systems = [
("SMIL","3-Point Smiley"),
("STAR","5-Point Star"),
("DCML","10-Point Decimal")
]
score_type = models.CharField(max_length=4, choices=rating_systems, default="DCML")
class TagAdditionRequest(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.CASCADE)
requested_by = models.ForeignKey(User, on_delete=models.CASCADE)
comments = models.CharField(max_length=1000, blank=True, default='')
def __str__(self):
return self.game.name + " -> " + self.tag.name
|
[
"django.db.models.URLField",
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.OneToOneField",
"uuid.uuid4",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.dispatch.receiver",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DecimalField",
"django.db.models.DateField",
"django.db.models.DateTimeField"
] |
[((3406, 3438), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (3414, 3438), False, 'from django.dispatch import receiver\n'), ((744, 775), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (760, 775), False, 'from django.db import models\n'), ((791, 822), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (807, 822), False, 'from django.db import models\n'), ((839, 894), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)', 'default': '""""""'}), "(max_length=50, blank=True, default='')\n", (855, 894), False, 'from django.db import models\n'), ((1062, 1093), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1078, 1093), False, 'from django.db import models\n'), ((1109, 1140), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1125, 1140), False, 'from django.db import models\n'), ((1159, 1203), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'blank': '(True)'}), '(max_length=250, blank=True)\n', (1175, 1203), False, 'from django.db import models\n'), ((1372, 1403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1388, 1403), False, 'from django.db import models\n'), ((1490, 1522), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1506, 1522), False, 'from django.db import models\n'), ((1534, 1555), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1553, 1555), False, 'from django.db import models\n'), ((1575, 1630), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""'}), "(max_length=250, blank=True, default='')\n", (1590, 1630), False, 'from django.db import models\n'), ((1643, 1704), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'random_cover_filename', 'null': '(True)'}), '(upload_to=random_cover_filename, null=True)\n', (1660, 1704), False, 'from django.db import models\n'), ((1719, 1793), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'random_screen_filename', 'null': '(True)', 'blank': '(True)'}), '(upload_to=random_screen_filename, null=True, blank=True)\n', (1736, 1793), False, 'from django.db import models\n'), ((1808, 1882), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'random_screen_filename', 'null': '(True)', 'blank': '(True)'}), '(upload_to=random_screen_filename, null=True, blank=True)\n', (1825, 1882), False, 'from django.db import models\n'), ((1897, 1971), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'random_screen_filename', 'null': '(True)', 'blank': '(True)'}), '(upload_to=random_screen_filename, null=True, blank=True)\n', (1914, 1971), False, 'from django.db import models\n'), ((1986, 2060), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'random_screen_filename', 'null': '(True)', 'blank': '(True)'}), '(upload_to=random_screen_filename, null=True, blank=True)\n', (2003, 2060), False, 'from django.db import models\n'), ((2074, 2115), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Genre'], {'blank': '(True)'}), '(Genre, blank=True)\n', (2096, 2115), False, 'from django.db import models\n'), ((2127, 2166), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Tag'], {'blank': '(True)'}), '(Tag, blank=True)\n', (2149, 2166), False, 'from django.db import models\n'), ((2185, 2225), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (2201, 2225), False, 'from django.db import models\n'), ((2240, 2280), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (2256, 2280), False, 'from django.db import models\n'), ((2297, 2386), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""Main Website"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'Main Website')\n", (2312, 2386), False, 'from django.db import models\n'), ((2403, 2489), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""Wikipedia"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'Wikipedia')\n", (2418, 2489), False, 'from django.db import models\n'), ((2505, 2595), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""GameFAQs Link"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'GameFAQs Link')\n", (2520, 2595), False, 'from django.db import models\n'), ((2608, 2695), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""Steam Link"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'Steam Link')\n", (2623, 2695), False, 'from django.db import models\n'), ((2716, 2811), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""HowLongToBeat Link"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'HowLongToBeat Link')\n", (2731, 2811), False, 'from django.db import models\n'), ((2831, 2925), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""PCGamingWiki Link"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'PCGamingWiki Link')\n", (2846, 2925), False, 'from django.db import models\n'), ((2939, 3027), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""WineHQ Link"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'WineHQ Link')\n", (2954, 3027), False, 'from django.db import models\n'), ((3044, 3135), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""MobyGames Link"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'MobyGames Link')\n", (3059, 3135), False, 'from django.db import models\n'), ((3147, 3233), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(250)', 'blank': '(True)', 'default': '""""""', 'verbose_name': '"""VNDB Link"""'}), "(max_length=250, blank=True, default='', verbose_name=\n 'VNDB Link')\n", (3162, 3233), False, 'from django.db import models\n'), ((3250, 3324), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(5)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=5, decimal_places=2, blank=True, null=True)\n', (3269, 3324), False, 'from django.db import models\n'), ((3790, 3839), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (3807, 3839), False, 'from django.db import models\n'), ((3851, 3900), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Game'], {'on_delete': 'models.CASCADE'}), '(Game, on_delete=models.CASCADE)\n', (3868, 3900), False, 'from django.db import models\n'), ((3916, 3992), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Platform'], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.CASCADE'}), '(Platform, blank=True, null=True, on_delete=models.CASCADE)\n', (3933, 3992), False, 'from django.db import models\n'), ((4209, 4273), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'choices': 'statuses', 'default': '"""PLAN"""'}), "(max_length=4, choices=statuses, default='PLAN')\n", (4225, 4273), False, 'from django.db import models\n'), ((4286, 4360), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(4)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=4, decimal_places=2, blank=True, null=True)\n', (4305, 4360), False, 'from django.db import models\n'), ((4373, 4448), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=10, decimal_places=2, blank=True, null=True)\n', (4392, 4448), False, 'from django.db import models\n'), ((4464, 4520), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'blank': '(True)', 'default': '""""""'}), "(max_length=500, blank=True, default='')\n", (4480, 4520), False, 'from django.db import models\n'), ((4538, 4577), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4554, 4577), False, 'from django.db import models\n'), ((4594, 4633), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4610, 4633), False, 'from django.db import models\n'), ((4655, 4685), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4674, 4685), False, 'from django.db import models\n'), ((4826, 4875), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (4843, 4875), False, 'from django.db import models\n'), ((4887, 4919), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (4903, 4919), False, 'from django.db import models\n'), ((4935, 5011), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Platform'], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.CASCADE'}), '(Platform, blank=True, null=True, on_delete=models.CASCADE)\n', (4952, 5011), False, 'from django.db import models\n'), ((5228, 5292), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'choices': 'statuses', 'default': '"""PLAN"""'}), "(max_length=4, choices=statuses, default='PLAN')\n", (5244, 5292), False, 'from django.db import models\n'), ((5305, 5379), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(4)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=4, decimal_places=2, blank=True, null=True)\n', (5324, 5379), False, 'from django.db import models\n'), ((5392, 5467), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=10, decimal_places=2, blank=True, null=True)\n', (5411, 5467), False, 'from django.db import models\n'), ((5483, 5539), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'blank': '(True)', 'default': '""""""'}), "(max_length=500, blank=True, default='')\n", (5499, 5539), False, 'from django.db import models\n'), ((5557, 5596), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5573, 5596), False, 'from django.db import models\n'), ((5613, 5652), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5629, 5652), False, 'from django.db import models\n'), ((5674, 5704), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5693, 5704), False, 'from django.db import models\n'), ((5725, 5770), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'null': '(True)'}), '(default=False, null=True)\n', (5744, 5770), False, 'from django.db import models\n'), ((5897, 5946), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (5914, 5946), False, 'from django.db import models\n'), ((5958, 6007), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Game'], {'on_delete': 'models.CASCADE'}), '(Game, on_delete=models.CASCADE)\n', (5975, 6007), False, 'from django.db import models\n'), ((6194, 6242), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'choices': 'statuses'}), '(max_length=4, choices=statuses)\n', (6210, 6242), False, 'from django.db import models\n'), ((6260, 6299), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (6280, 6299), False, 'from django.db import models\n'), ((6315, 6400), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'blank': '(True)', 'related_name': '"""usergamestatus_liked_by"""'}), "(User, blank=True, related_name='usergamestatus_liked_by'\n )\n", (6337, 6400), False, 'from django.db import models\n'), ((6549, 6588), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (6569, 6588), False, 'from django.db import models\n'), ((6600, 6649), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (6617, 6649), False, 'from django.db import models\n'), ((6667, 6698), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (6683, 6698), False, 'from django.db import models\n'), ((6721, 6742), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (6740, 6742), False, 'from django.db import models\n'), ((6791, 6840), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (6808, 6840), False, 'from django.db import models\n'), ((6852, 6873), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (6871, 6873), False, 'from django.db import models\n'), ((6885, 6934), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Game'], {'on_delete': 'models.CASCADE'}), '(Game, on_delete=models.CASCADE)\n', (6902, 6934), False, 'from django.db import models\n'), ((6950, 6981), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (6966, 6981), False, 'from django.db import models\n'), ((7030, 7062), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (7046, 7062), False, 'from django.db import models\n'), ((7081, 7121), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (7097, 7121), False, 'from django.db import models\n'), ((7215, 7247), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (7231, 7247), False, 'from django.db import models\n'), ((7263, 7322), 'django.db.models.ForeignKey', 'models.ForeignKey', (['CollectionType'], {'on_delete': 'models.CASCADE'}), '(CollectionType, on_delete=models.CASCADE)\n', (7280, 7322), False, 'from django.db import models\n'), ((7341, 7381), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (7357, 7381), False, 'from django.db import models\n'), ((7394, 7434), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Game'], {'blank': '(True)'}), '(Game, blank=True)\n', (7416, 7434), False, 'from django.db import models\n'), ((7558, 7610), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (7578, 7610), False, 'from django.db import models\n'), ((7624, 7711), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'random_avatar_filename', 'default': '"""avatars/default.png"""'}), "(upload_to=random_avatar_filename, default=\n 'avatars/default.png')\n", (7641, 7711), False, 'from django.db import models\n'), ((7725, 7764), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Tag'], {'blank': '(True)'}), '(Tag, blank=True)\n', (7747, 7764), False, 'from django.db import models\n'), ((7785, 7825), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Game'], {'blank': '(True)'}), '(Game, blank=True)\n', (7807, 7825), False, 'from django.db import models\n'), ((7852, 7898), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Collection'], {'blank': '(True)'}), '(Collection, blank=True)\n', (7874, 7898), False, 'from django.db import models\n'), ((7920, 8008), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'blank': '(True)', 'related_name': '"""userprofile_followed_users"""'}), "(User, blank=True, related_name=\n 'userprofile_followed_users')\n", (7942, 8008), False, 'from django.db import models\n'), ((8050, 8102), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (8070, 8102), False, 'from django.db import models\n'), ((8253, 8323), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'choices': 'rating_systems', 'default': '"""DCML"""'}), "(max_length=4, choices=rating_systems, default='DCML')\n", (8269, 8323), False, 'from django.db import models\n'), ((8376, 8425), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Game'], {'on_delete': 'models.CASCADE'}), '(Game, on_delete=models.CASCADE)\n', (8393, 8425), False, 'from django.db import models\n'), ((8436, 8484), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Tag'], {'on_delete': 'models.CASCADE'}), '(Tag, on_delete=models.CASCADE)\n', (8453, 8484), False, 'from django.db import models\n'), ((8504, 8553), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (8521, 8553), False, 'from django.db import models\n'), ((8569, 8626), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'blank': '(True)', 'default': '""""""'}), "(max_length=1000, blank=True, default='')\n", (8585, 8626), False, 'from django.db import models\n'), ((320, 327), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (325, 327), False, 'from uuid import uuid4\n'), ((483, 490), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (488, 490), False, 'from uuid import uuid4\n'), ((651, 658), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (656, 658), False, 'from uuid import uuid4\n')]
|
import torch
from torch import nn
class Tacotron2Loss(nn.Module):
def __init__(self):
super(Tacotron2Loss, self).__init__()
def forward(self, model_output, targets):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
return mel_loss + gate_loss
class TPCWLoss(nn.Module):
def __init__(self):
super().__init__()
@staticmethod
def cross_entropy(w_combination, target):
return -(target * torch.log(w_combination)).sum(dim=1).mean()
def forward(self, w_combination, target):
"""
calculates cross-entropy loss over soft classes (GSTs distributions) and predicted weights
:param w_combination: predicted combination weights tensor shape of (batch_size, token_num)
or (batch_size, atn_head_num, token_num)
:param target: GSTs' combination weights tensor shape of (batch_size, token_num)
or (batch_size, atn_head_num, token_num)
:return: cross-entropy loss value or sum of cross-entropy loss values
"""
if w_combination.dim() == 2:
return self.cross_entropy(w_combination, target)
else:
losses = []
for atn_head_index in range(w_combination.size(1)):
loss = self.cross_entropy(w_combination[:, atn_head_index, :], target[:, atn_head_index, :])
losses.append(loss)
return sum(losses)
class TPSELoss(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.L1Loss()
def forward(self, predicted_tokens, target):
"""
calculate L1 loss function between predicted and target GST
:param predicted_tokens: tensor shape of (batch_size, token_dim)
:param target: tensor shape of (batch_size, token_dim)
:return: L1 loss
"""
return self.l1(predicted_tokens, target)
|
[
"torch.nn.MSELoss",
"torch.nn.BCEWithLogitsLoss",
"torch.log",
"torch.nn.L1Loss"
] |
[((2029, 2040), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (2038, 2040), False, 'from torch import nn\n'), ((604, 626), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (624, 626), False, 'from torch import nn\n'), ((492, 504), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (502, 504), False, 'from torch import nn\n'), ((542, 554), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (552, 554), False, 'from torch import nn\n'), ((857, 881), 'torch.log', 'torch.log', (['w_combination'], {}), '(w_combination)\n', (866, 881), False, 'import torch\n')]
|
import xlrd
import xlwt
import pylightxl as xl
import os
import sys
import re
import PySimpleGUI as sg
from collections import OrderedDict
class SalaryCounter:
def __init__(self, values):
self.values = values
self.payrolls = {}
self.headers = []
self.counter = False
self.main_col_num = None
self.sum = None
@staticmethod
def find_path():
if getattr(sys, 'frozen', False):
# one-file
application_path = os.path.dirname(sys.executable)
# one-folder
# application_path = sys._MEIPASS
else:
application_path = os.path.dirname(os.path.abspath(__file__))
return application_path
def parse_directory(self):
application_path = self.find_path()
for dir_path, _, file_names in os.walk(application_path):
if dir_path == application_path:
for file_name in file_names:
self.check_format(file_name=file_name)
def check_format(self, file_name: str):
pattern = re.compile(r"^([^\\]{1,50}).(xls|xlsx)$")
result = re.search(pattern=pattern, string=file_name)
if result:
if result[2] == 'xls' or result[2] == 'xlsx':
self.payrolls[file_name] = result[2]
def read(self, payroll: str, format_: str):
to_count = []
if format_ == 'xls':
book = xlrd.open_workbook(payroll)
sheet = book.sheet_by_index(0)
for row in range(sheet.nrows):
new_row = []
for element in sheet.row(row):
value = self._read_helper(value=element.value, row=sheet.row(row), format_=format_)
if value:
new_row.append(value)
if new_row:
to_count.append(new_row)
elif format_ == 'xlsx':
db = xl.readxl(fn=payroll)
ws_name = db.ws_names[0]
for row in db.ws(ws=ws_name).rows:
new_row = []
for element in row:
value = self._read_helper(value=element, row=row, format_=format_)
if value:
new_row.append(value)
if new_row:
to_count.append(new_row)
to_count.pop(0)
to_count.pop(0)
return to_count
def _read_helper(self, value, row, format_: str):
if value == "Фамилия, имя, отчество":
if format_ == 'xls':
self.headers = [el.value for el in row if
el.value and el.value != "Расписка в получении"]
elif format_ == 'xlsx':
self.headers = [el for el in row if el and el != "Расписка в получении"]
self.main_col_num = ([self.headers.index(i) for i in self.headers if i == "Сумма"])[0]
self.headers.extend([str(i) + "р" for i in self.values])
self.counter = True
elif value == "Итого":
self.counter = False
if self.counter and value:
return value
def count(self, data: list):
result = []
for_person = None
for row in data:
for num, element in enumerate(row):
if num == self.main_col_num:
for_person = self._count_one_instance(salary=element)
if for_person:
row.extend(for_person)
result.append(row)
return result
def _count_one_instance(self, salary: float):
result = []
for value in self.values:
amount = 0
if value not in self.sum:
self.sum.setdefault(value, 0)
while salary >= value:
salary -= value
amount += 1
self.sum[value] += 1
result.append(amount)
return result
def save(self, data: list, format_: str, name: str):
data.append(self.sum.keys())
data.append(self.sum.values())
name = name[:len(name) - (len(format_) + 1)]
file_name = name + "_расчёт" + "." + format_
sheetname = "Зарплаты"
if format_ == 'xls':
workbook = xlwt.Workbook()
sheet = workbook.add_sheet(sheetname=sheetname)
for col_num, column in enumerate(self.headers):
sheet.write(r=0, c=col_num, label=column)
for num, row in enumerate(data, start=1):
for col_num, column in enumerate(row):
sheet.write(r=num, c=col_num, label=column)
workbook.save(filename_or_stream=file_name)
elif format_ == 'xlsx':
new_db = xl.Database()
new_db.add_ws(ws=sheetname)
for col_num, column in enumerate(self.headers, start=1):
new_db.ws(ws=sheetname).update_index(row=1, col=col_num, val=column)
for num, row in enumerate(data, start=2):
for col_num, column in enumerate(row, start=1):
new_db.ws(ws=sheetname).update_index(row=num, col=col_num, val=column)
xl.writexl(db=new_db, fn=file_name)
def run(self):
self.parse_directory()
if not self.payrolls:
raise ImportError("Файлов с расширением .xls/.xlsx в папке нет")
for payroll, format_ in self.payrolls.items():
self.sum = OrderedDict()
to_count = self.read(payroll=payroll, format_=format_)
result = self.count(data=to_count)
self.save(data=result, name=payroll, format_=format_)
class Interface:
def __init__(self):
self.title = 'Счётчик купюр/монет'
self.theme = 'DarkAmber'
self.layout = list()
self.main_window = None
self.values = [5000, 2000, 1000, 500, 200, 100, 50, 10, 5, 2, 1, 0.50, 0.10]
def run(self):
self.start_window()
while True:
event, values = self.main_window.read()
if event == sg.WIN_CLOSED or event == "Отмена":
break
elif event == "Посчитать":
values_to_use = []
for value in self.values:
if values[f'check{value}']:
values_to_use.append(value)
try:
counter = SalaryCounter(values=values_to_use)
counter.run()
except Exception as exc:
exc_popup = self.popup_window(title="Ошибка", text=exc)
pop_event, pop_value = exc_popup.read()
if pop_event == sg.WIN_CLOSED:
exc_popup.close()
self.main_window.close()
def popup_window(self, text: Exception, title: str):
sg.theme(self.theme)
layout = [[sg.Text(text=text)]]
size = 100
popup = sg.Window(
title=title,
layout=layout,
default_button_element_size=(10, 2),
size=(size * 5, size),
element_padding=(10, 10),
auto_size_buttons=False,
)
return popup
def start_window(self):
sg.theme(self.theme)
self.layout.append([sg.Text(text="Пересчитать платёжные ведомости формата .xls/.xlsx, "
"находящиеся в папке?")])
self.layout.append([sg.Text(text="Используемые купюры:")])
for value in self.values:
spaces = (4 - len(str(value))) * " "
self.layout.append([sg.Text(f"{value}{spaces}"), sg.Checkbox(text="", default=True, key=f'check{value}')])
self.layout.append([sg.Button("Посчитать"), sg.Button("Отмена")])
window = sg.Window(
title=self.title,
layout=self.layout,
default_button_element_size=(10, 2),
size=(550, 500),
element_padding=(2, 2),
auto_size_buttons=False,
)
self.main_window = window
if __name__ == '__main__':
ui = Interface()
ui.run()
|
[
"PySimpleGUI.Button",
"PySimpleGUI.Checkbox",
"os.path.abspath",
"xlwt.Workbook",
"PySimpleGUI.theme",
"os.path.dirname",
"os.walk",
"xlrd.open_workbook",
"PySimpleGUI.Text",
"PySimpleGUI.Window",
"pylightxl.writexl",
"collections.OrderedDict",
"pylightxl.readxl",
"pylightxl.Database",
"re.search",
"re.compile"
] |
[((837, 862), 'os.walk', 'os.walk', (['application_path'], {}), '(application_path)\n', (844, 862), False, 'import os\n'), ((1076, 1118), 're.compile', 're.compile', (['"""^([^\\\\\\\\]{1,50}).(xls|xlsx)$"""'], {}), "('^([^\\\\\\\\]{1,50}).(xls|xlsx)$')\n", (1086, 1118), False, 'import re\n'), ((1135, 1179), 're.search', 're.search', ([], {'pattern': 'pattern', 'string': 'file_name'}), '(pattern=pattern, string=file_name)\n', (1144, 1179), False, 'import re\n'), ((6785, 6805), 'PySimpleGUI.theme', 'sg.theme', (['self.theme'], {}), '(self.theme)\n', (6793, 6805), True, 'import PySimpleGUI as sg\n'), ((6882, 7034), 'PySimpleGUI.Window', 'sg.Window', ([], {'title': 'title', 'layout': 'layout', 'default_button_element_size': '(10, 2)', 'size': '(size * 5, size)', 'element_padding': '(10, 10)', 'auto_size_buttons': '(False)'}), '(title=title, layout=layout, default_button_element_size=(10, 2),\n size=(size * 5, size), element_padding=(10, 10), auto_size_buttons=False)\n', (6891, 7034), True, 'import PySimpleGUI as sg\n'), ((7172, 7192), 'PySimpleGUI.theme', 'sg.theme', (['self.theme'], {}), '(self.theme)\n', (7180, 7192), True, 'import PySimpleGUI as sg\n'), ((7713, 7868), 'PySimpleGUI.Window', 'sg.Window', ([], {'title': 'self.title', 'layout': 'self.layout', 'default_button_element_size': '(10, 2)', 'size': '(550, 500)', 'element_padding': '(2, 2)', 'auto_size_buttons': '(False)'}), '(title=self.title, layout=self.layout, default_button_element_size\n =(10, 2), size=(550, 500), element_padding=(2, 2), auto_size_buttons=False)\n', (7722, 7868), True, 'import PySimpleGUI as sg\n'), ((499, 530), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (514, 530), False, 'import os\n'), ((1429, 1456), 'xlrd.open_workbook', 'xlrd.open_workbook', (['payroll'], {}), '(payroll)\n', (1447, 1456), False, 'import xlrd\n'), ((4236, 4251), 'xlwt.Workbook', 'xlwt.Workbook', ([], {}), '()\n', (4249, 4251), False, 'import xlwt\n'), ((5413, 5426), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5424, 5426), False, 'from collections import OrderedDict\n'), ((663, 688), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (678, 688), False, 'import os\n'), ((1921, 1942), 'pylightxl.readxl', 'xl.readxl', ([], {'fn': 'payroll'}), '(fn=payroll)\n', (1930, 1942), True, 'import pylightxl as xl\n'), ((4712, 4725), 'pylightxl.Database', 'xl.Database', ([], {}), '()\n', (4723, 4725), True, 'import pylightxl as xl\n'), ((5141, 5176), 'pylightxl.writexl', 'xl.writexl', ([], {'db': 'new_db', 'fn': 'file_name'}), '(db=new_db, fn=file_name)\n', (5151, 5176), True, 'import pylightxl as xl\n'), ((6825, 6843), 'PySimpleGUI.Text', 'sg.Text', ([], {'text': 'text'}), '(text=text)\n', (6832, 6843), True, 'import PySimpleGUI as sg\n'), ((7222, 7315), 'PySimpleGUI.Text', 'sg.Text', ([], {'text': '"""Пересчитать платёжные ведомости формата .xls/.xlsx, находящиеся в папке?"""'}), "(text=\n 'Пересчитать платёжные ведомости формата .xls/.xlsx, находящиеся в папке?')\n", (7229, 7315), True, 'import PySimpleGUI as sg\n'), ((7380, 7416), 'PySimpleGUI.Text', 'sg.Text', ([], {'text': '"""Используемые купюры:"""'}), "(text='Используемые купюры:')\n", (7387, 7416), True, 'import PySimpleGUI as sg\n'), ((7649, 7671), 'PySimpleGUI.Button', 'sg.Button', (['"""Посчитать"""'], {}), "('Посчитать')\n", (7658, 7671), True, 'import PySimpleGUI as sg\n'), ((7682, 7701), 'PySimpleGUI.Button', 'sg.Button', (['"""Отмена"""'], {}), "('Отмена')\n", (7691, 7701), True, 'import PySimpleGUI as sg\n'), ((7534, 7561), 'PySimpleGUI.Text', 'sg.Text', (['f"""{value}{spaces}"""'], {}), "(f'{value}{spaces}')\n", (7541, 7561), True, 'import PySimpleGUI as sg\n'), ((7563, 7618), 'PySimpleGUI.Checkbox', 'sg.Checkbox', ([], {'text': '""""""', 'default': '(True)', 'key': 'f"""check{value}"""'}), "(text='', default=True, key=f'check{value}')\n", (7574, 7618), True, 'import PySimpleGUI as sg\n')]
|
#!/usr/bin/env python
# coding=utf-8
__author__ = 'zhaoliang'
__email__ = '<EMAIL>'
__created__ = '15/12/19'
import os
CSRF_ENABLED = True
SECRET_KEY = 'guess what you can and try it'
DATABASE_URI = {
'host': 'localhost',
'user': 'root',
'passwd': '<PASSWORD>',
'port': 3306,
'db': 'vansel',
'charset': 'utf8'
}
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
PROTYPE_PATH = os.path.join(ROOT_PATH, 'cache/protype')
UPLOAD_FOLDER = os.path.join(ROOT_PATH, 'cache/data/')
UPLOAD_BASE_URL = 'http://127.0.0.1/vansel/'
|
[
"os.path.dirname",
"os.path.join"
] |
[((412, 452), 'os.path.join', 'os.path.join', (['ROOT_PATH', '"""cache/protype"""'], {}), "(ROOT_PATH, 'cache/protype')\n", (424, 452), False, 'import os\n'), ((470, 508), 'os.path.join', 'os.path.join', (['ROOT_PATH', '"""cache/data/"""'], {}), "(ROOT_PATH, 'cache/data/')\n", (482, 508), False, 'import os\n'), ((370, 395), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (385, 395), False, 'import os\n')]
|
from selenium.webdriver.support import expected_conditions as EC
from pages.page import Page
class HomePage(Page):
PAGE_URL = "https://www.wikipedia.org"
def visit(self):
self.selenium.driver.get(HomePage.PAGE_URL)
self.verify_is_loaded()
def verify_is_loaded(self):
self.selenium.load_wait.until(
EC.title_is("Wikipedia"),
f"The Wikipedia homepage did not load on {self.selenium.driver.current_url}")
|
[
"selenium.webdriver.support.expected_conditions.title_is"
] |
[((350, 374), 'selenium.webdriver.support.expected_conditions.title_is', 'EC.title_is', (['"""Wikipedia"""'], {}), "('Wikipedia')\n", (361, 374), True, 'from selenium.webdriver.support import expected_conditions as EC\n')]
|
# Generated by Django 3.1.1 on 2020-10-16 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BlogApp', '0009_newsletter'),
]
operations = [
migrations.AlterField(
model_name='newsletter',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
|
[
"django.db.models.EmailField"
] |
[((346, 392), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(254)', 'unique': '(True)'}), '(max_length=254, unique=True)\n', (363, 392), False, 'from django.db import migrations, models\n')]
|
import kakao_reviews as kr
import naverv4_blog as nv4
import gocamp_crawl as gc
import gocamp_link_crawl as gl
import datetime
from camping_server2.bot import slackbot
import camping_server2.config as config
import time
import naverv5_category as nv5
import camping_server2.scraping.ogcamp_crawl as oc
import pandas as pd
def target_list():
"""
get gocamping title list
:return:
gocamping title
"""
datas = pd.read_csv(config.Config.PATH + '/target_list.csv')
name = datas[['title']]
name = name.iloc[:]['title']
base_addr = datas[['addr']]
base_addr = base_addr.iloc[:]['addr']
return list(name), list(base_addr)
def get_nv5_result(camping_list, camping_addrs):
"""
naverv5 category review scraping
:param camping_list, camping_addrs:
:return:
naver map v5 category review crawling result csv
"""
highlight_reviews = []
try:
for i, camping_title in enumerate(camping_list):
s = nv5.CategoryScraping(camping_title)
s.switch_iframe()
title, addr = s.move_tab()
print(title, addr)
if title == '':
continue
category = s.get_categories()
cnt = 1
try:
while True:
try:
target_category = s.click_cagetory(category, cnt)
except:
break
else:
elements = s.scroll_down(config.Config.COUNT)
for j, element in enumerate(elements[:config.Config.COUNT]): # default 100
try:
info = s.get_reviews(camping_title, camping_addrs[i], addr, target_category, j)
highlight_reviews.append(info)
except:
break
cnt += 1
finally:
s.driver.quit()
time.sleep(2)
slackbot.IncomingWebhook.send_msg(f'{datetime.datetime.now()} {i}번째 {camping_title}까지 완료')
finally:
print(highlight_reviews)
s.save_res(highlight_reviews)
slackbot.IncomingWebhook.send_msg(f'crawling completed ! result line num : {len(highlight_reviews)}')
if __name__ == '__main__':
# camping_list, camping_addrs = target_list()
# get_nv5_result(camping_list[:], camping_addrs[:])
# s = kr.Scraping()
# s.get_search(target_list())
# v4 = nv4.Scraping(target_list())
# ids, place_name = v4.get_params()
# res_reviews = v4.get_reviews(ids, place_name)
# crawler = gc.CampCrawler()
# crawler.fetch_camp_list()
# crawler.fetch_camp_details()
# result = crawler.df
camp_link = gl.CampLink()
camp_link.fetch_link_list()
camp_link.fetch_link_details()
# ogcamp = oc.OgcampScraping()
# ogcamp.get_data()
# ogcamp = oc.OgcampScraping()
# ogcamp.get_data()
# ogcamp.get_details()
|
[
"pandas.read_csv",
"gocamp_link_crawl.CampLink",
"time.sleep",
"naverv5_category.CategoryScraping",
"datetime.datetime.now"
] |
[((433, 485), 'pandas.read_csv', 'pd.read_csv', (["(config.Config.PATH + '/target_list.csv')"], {}), "(config.Config.PATH + '/target_list.csv')\n", (444, 485), True, 'import pandas as pd\n'), ((2802, 2815), 'gocamp_link_crawl.CampLink', 'gl.CampLink', ([], {}), '()\n', (2813, 2815), True, 'import gocamp_link_crawl as gl\n'), ((981, 1016), 'naverv5_category.CategoryScraping', 'nv5.CategoryScraping', (['camping_title'], {}), '(camping_title)\n', (1001, 1016), True, 'import naverv5_category as nv5\n'), ((2016, 2029), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2026, 2029), False, 'import time\n'), ((2083, 2106), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2104, 2106), False, 'import datetime\n')]
|
import setuptools
with open('README.rst') as f:
readme = f.read()
setuptools.setup(
name='mkepub-fork',
version='1.2',
description='Simple minimalistic library for creating EPUB3 files',
long_description=readme,
url='https://github.com/ivanleopoldo/mkepub-fork/',
author='anqxyr, ivanleopoldo',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'],
packages=['mkepub'],
package_data={'mkepub': ['templates/*']},
tests_require=['epubcheck', 'pytest', 'pytest-cov', 'python-coveralls'],
install_requires=['jinja2'],
)
|
[
"setuptools.setup"
] |
[((72, 846), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""mkepub-fork"""', 'version': '"""1.2"""', 'description': '"""Simple minimalistic library for creating EPUB3 files"""', 'long_description': 'readme', 'url': '"""https://github.com/ivanleopoldo/mkepub-fork/"""', 'author': '"""anqxyr, ivanleopoldo"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'classifiers': "['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5']", 'packages': "['mkepub']", 'package_data': "{'mkepub': ['templates/*']}", 'tests_require': "['epubcheck', 'pytest', 'pytest-cov', 'python-coveralls']", 'install_requires': "['jinja2']"}), "(name='mkepub-fork', version='1.2', description=\n 'Simple minimalistic library for creating EPUB3 files',\n long_description=readme, url=\n 'https://github.com/ivanleopoldo/mkepub-fork/', author=\n 'anqxyr, ivanleopoldo', author_email='<EMAIL>', license='MIT',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5'], packages=['mkepub'],\n package_data={'mkepub': ['templates/*']}, tests_require=['epubcheck',\n 'pytest', 'pytest-cov', 'python-coveralls'], install_requires=['jinja2'])\n", (88, 846), False, 'import setuptools\n')]
|
# _*_coding:utf-8_*_
__author__ = 'song'
__date__ = '2017/10/24 23:03'
import xadmin
from .models import CityDict,CourseOrg,Teacher
class CityDictAdmin(object):
list_display = ['name', 'desc', 'add_time']
search_fields = ['name', 'desc', 'add_time']
list_filter = ['name', 'desc', 'add_time']
class CourseOrgAdmin(object):
list_display = ['name', 'desc', 'add_time','click_nums','fav_nums','address','city']
search_fields = ['name', 'desc', 'add_time','click_nums','fav_nums','address','city']
list_filter = ['name', 'desc', 'add_time','click_nums','fav_nums','address','city']
class TeacherAdmin(object):
list_display = ['name', 'add_time','org','work_years','work_company','work_position']
search_fields = ['name', 'add_time','org','work_years','work_company','work_position']
list_filter = ['name', 'add_time','org','work_years','work_company','work_position']
xadmin.site.register(CityDict,CityDictAdmin)
xadmin.site.register(CourseOrg,CourseOrgAdmin)
xadmin.site.register(Teacher,TeacherAdmin)
|
[
"xadmin.site.register"
] |
[((931, 976), 'xadmin.site.register', 'xadmin.site.register', (['CityDict', 'CityDictAdmin'], {}), '(CityDict, CityDictAdmin)\n', (951, 976), False, 'import xadmin\n'), ((977, 1024), 'xadmin.site.register', 'xadmin.site.register', (['CourseOrg', 'CourseOrgAdmin'], {}), '(CourseOrg, CourseOrgAdmin)\n', (997, 1024), False, 'import xadmin\n'), ((1025, 1068), 'xadmin.site.register', 'xadmin.site.register', (['Teacher', 'TeacherAdmin'], {}), '(Teacher, TeacherAdmin)\n', (1045, 1068), False, 'import xadmin\n')]
|
# -*- coding: utf-8 -*-
"""Console script for srgssr_publication_data_api."""
import sys
import json
import click
from dotenv import load_dotenv
from sgqlc.types import Variable, non_null
from srgssr_publication_data_api import PublicationDataApi
@click.command()
@click.option('--size', help='number of items', default=10, type=int)
@click.option('--after', help='id of cursor to continue from')
@click.option('--url', help='url of the API', required=True)
@click.option('--username', help='username for basic auth')
@click.option('--password', help='password for basic auth')
def main(size, after, url, username, password):
"""Console script for srgssr_publication_data_api."""
variables = {'first': size}
if after:
variables['after'] = after
client = PublicationDataApi(url, username, password)
op = client.query_op(first=non_null(int), after=str)
op.faro_items(first=Variable('first'), after=Variable('after'))
print(f"Executing GraphQL Query: {op}")
result=client.run_query(op, variables)
if result:
print(result)
return 0
else:
return 1
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
[
"sgqlc.types.non_null",
"sgqlc.types.Variable",
"click.option",
"srgssr_publication_data_api.PublicationDataApi",
"click.command"
] |
[((251, 266), 'click.command', 'click.command', ([], {}), '()\n', (264, 266), False, 'import click\n'), ((268, 336), 'click.option', 'click.option', (['"""--size"""'], {'help': '"""number of items"""', 'default': '(10)', 'type': 'int'}), "('--size', help='number of items', default=10, type=int)\n", (280, 336), False, 'import click\n'), ((338, 399), 'click.option', 'click.option', (['"""--after"""'], {'help': '"""id of cursor to continue from"""'}), "('--after', help='id of cursor to continue from')\n", (350, 399), False, 'import click\n'), ((401, 460), 'click.option', 'click.option', (['"""--url"""'], {'help': '"""url of the API"""', 'required': '(True)'}), "('--url', help='url of the API', required=True)\n", (413, 460), False, 'import click\n'), ((462, 520), 'click.option', 'click.option', (['"""--username"""'], {'help': '"""username for basic auth"""'}), "('--username', help='username for basic auth')\n", (474, 520), False, 'import click\n'), ((522, 580), 'click.option', 'click.option', (['"""--password"""'], {'help': '"""password for basic auth"""'}), "('--password', help='password for basic auth')\n", (534, 580), False, 'import click\n'), ((781, 824), 'srgssr_publication_data_api.PublicationDataApi', 'PublicationDataApi', (['url', 'username', 'password'], {}), '(url, username, password)\n', (799, 824), False, 'from srgssr_publication_data_api import PublicationDataApi\n'), ((856, 869), 'sgqlc.types.non_null', 'non_null', (['int'], {}), '(int)\n', (864, 869), False, 'from sgqlc.types import Variable, non_null\n'), ((906, 923), 'sgqlc.types.Variable', 'Variable', (['"""first"""'], {}), "('first')\n", (914, 923), False, 'from sgqlc.types import Variable, non_null\n'), ((931, 948), 'sgqlc.types.Variable', 'Variable', (['"""after"""'], {}), "('after')\n", (939, 948), False, 'from sgqlc.types import Variable, non_null\n')]
|
#!/usr/bin/env python
"""
Aiida interface for twinpy.
"""
import warnings
import numpy as np
from aiida.cmdline.utils.decorators import with_dbenv
from aiida.orm import (load_node,
Node,
QueryBuilder,
)
from aiida.plugins import WorkflowFactory
from aiida_twinpy.common.utils import get_create_node
from twinpy.interfaces.aiida.base import (check_process_class,
_WorkChain)
from twinpy.interfaces.aiida.vasp import (AiidaRelaxWorkChain)
from twinpy.interfaces.aiida.twinboundary \
import AiidaTwinBoudnaryRelaxWorkChain
@with_dbenv()
class AiidaTwinBoudnaryShearWorkChain(_WorkChain):
"""
TwinBoundaryShear work chain class.
"""
def __init__(
self,
node:Node,
):
"""
Args:
node: TwinBoundaryShearWorkChain node.
"""
process_class = 'TwinBoundaryShearWorkChain'
check_process_class(node, process_class)
super().__init__(node=node)
self._shear_strain_ratios = None
self._set_shear_strain_ratios()
self._shear_aiida_relaxes = None
self._set_shear_aiida_relaxes()
self._structure_pks = None
self._set_structure_pks()
self._aiida_twinboundary_relax = None
self._set_aiida_twinboundary_relax()
self._additional_relax_pks = None
self._set_additional_relax_pks()
self._twinboundary_analyzer = None
def _set_shear_strain_ratios(self):
"""
Set shear strain ratios.
"""
conf = self._node.inputs.twinboundary_shear_conf.get_dict()
self._shear_strain_ratios = conf['shear_strain_ratios']
@property
def shear_strain_ratios(self):
"""
Shear strain ratios.
"""
return self._shear_strain_ratios
def _set_structure_pks(self):
"""
Set structure pks.
"""
qb = QueryBuilder()
qb.append(Node, filters={'id':{'==': self._pk}}, tag='wf')
qb.append(
Node,
filters={'label': {'==': 'get_twinboundary_shear_structure'}},
project=['id'],
with_incoming='wf')
cf_pks = [ q[0] for q in qb.all() ]
shear_ratios = [ load_node(q[0]).inputs.shear_strain_ratio.value for q in qb.all() ]
orders = list(np.argsort(shear_ratios))
orig_pks = []
input_pks = []
for ix in orders:
cf = load_node(cf_pks[ix])
orig_pks.append(cf.outputs.twinboundary_shear_structure_orig.pk)
input_pks.append(cf.outputs.twinboundary_shear_structure.pk)
rlx_pks = []
for aiida_rlx, i_struct_pk in zip(self._shear_aiida_relaxes, input_pks):
pks = aiida_rlx.get_pks()
assert pks['initial_structure_pk'] == i_struct_pk, \
"Input structure does not match."
rlx_pks.append(pks['final_structure_pk'])
self._structure_pks = {
'original_structures': orig_pks,
'input_structures': input_pks,
'relax_structures': rlx_pks,
}
@property
def structure_pks(self):
"""
Structure pks.
"""
return self._structure_pks
def _set_aiida_twinboundary_relax(self):
"""
Set twinboundary relax pk.
"""
tb_rlx_wf = WorkflowFactory('twinpy.twinboundary_relax')
tb_rlx_struct_pk = self._node.inputs.twinboundary_relax_structure.pk
tb_rlx = get_create_node(tb_rlx_struct_pk, tb_rlx_wf)
self._aiida_twinboundary_relax \
= AiidaTwinBoudnaryRelaxWorkChain(tb_rlx)
def _set_shear_aiida_relaxes(self):
"""
Set list of AiidaRelaxWorkChain objects.
"""
rlx_wf = WorkflowFactory('vasp.relax')
qb = QueryBuilder()
qb.append(Node, filters={'id':{'==': self._pk}}, tag='wf')
qb.append(rlx_wf, with_incoming='wf', project=['id', 'label'])
qb_all = qb.all()
qb_all.sort(key=lambda qb_all: qb_all[1])
rlx_pks = [ q[0] for q in qb_all ]
self._shear_aiida_relaxes = [ AiidaRelaxWorkChain(load_node(pk))
for pk in rlx_pks ]
def _set_additional_relax_pks(self):
"""
Set additional relax pks.
"""
addi_struct_pks = [ self._node.inputs.__getattr__(key).pk
for key in dir(self._node.inputs)
if 'additional_relax__structure' in key ]
self._additional_relax_pks = \
[ get_create_node(pk, rlx_wf).pk for pk in addi_struct_pks ]
@property
def shear_aiida_relaxes(self):
"""
List of AiidaRelaxWorkChain class objects.
"""
return self._shear_aiida_relaxes
def set_twinboundary_analyzer(self,
twinboundary_phonon_pk:int=None,
hexagonal_relax_pk:int=None,
hexagonal_phonon_pk:int=None,
):
"""
Set twinboundary analyzer.
Args:
twinboudnary_phonon_pk: Twinboundary phonon calculation pk.
hexagonal_relax_pk: Hexagonal relax calculation pk.
hexagonal_phonon_pk: Hexagonal phonon calculation pk.
"""
tb_rlx_pk = self._aiida_twinboundary_relax.pk
addi_rlx_pks = self._additional_relax_pks
aiida_tb = AiidaTwinBoudnaryRelaxWorkChain(load_node(tb_rlx_pk))
self._twinboundary_analyzer = aiida_tb.get_twinboundary_analyzer(
twinboundary_phonon_pk=twinboundary_phonon_pk,
additional_relax_pks=addi_rlx_pks,
hexagonal_relax_pk=hexagonal_relax_pk,
hexagonal_phonon_pk=hexagonal_phonon_pk,
)
@property
def twinboundary_analyzer(self):
"""
TwinBoundaryAnalyzer class object.
"""
return self._twinboundary_analyzer
def get_twinboundary_shear_analyzer(self,
shear_phonon_pks:list,
):
"""
Get twinboundary shear analyzer.
Args:
shaer_phonon_pks: List of phonon pks.
Raises:
RuntimeError: Property twinboundary_analyzer is not set.
Note:
Length of phono_pks list must be the same as that of shear strain
ratios. If there is no phonon result, set please set None.
"""
if self._twinboundary_analyzer is None:
raise RuntimeError("Please set twinboundary_analyzer before.")
assert len(self._shear_strain_ratios) == len(shear_phonon_pks), \
"Length of shear_phonon_pks does not match with shear_strain_ratios."
tb_anal = self._twinboundary_analyzer
shr_rlx_pks = \
[ aiida_rlx.pk for aiida_rlx in self._shear_aiida_relaxes ]
ratios = self._shear_strain_ratios
if len(shr_rlx_pks) != len(ratios):
warnings.warn("Some RelaxWorkChain has not finished normally. "
+"They are ignored.")
tb_shear_analyzer = \
tb_anal.get_twinboundary_shear_analyzer_from_relax_pks(
shear_relax_pks=shr_rlx_pks,
shear_strain_ratios=ratios[:len(shr_rlx_pks)],
shear_phonon_pks=shear_phonon_pks[:len(shr_rlx_pks)],
)
return tb_shear_analyzer
def get_pks(self):
"""
Get workflow pks.
Returns:
dict: Workflow pks.
"""
wf_pks = {
'twinboundary_relax_pk': self._aiida_twinboundary_relax.pk,
'additional_relax_pks': self._additional_relax_pks,
'shear_aiida_relax_pks': [ shr_rlx.pk for shr_rlx
in self._shear_aiida_relaxes ],
}
return wf_pks
|
[
"aiida.orm.QueryBuilder",
"twinpy.interfaces.aiida.base.check_process_class",
"aiida.orm.load_node",
"aiida.plugins.WorkflowFactory",
"twinpy.interfaces.aiida.twinboundary.AiidaTwinBoudnaryRelaxWorkChain",
"numpy.argsort",
"aiida_twinpy.common.utils.get_create_node",
"warnings.warn",
"aiida.cmdline.utils.decorators.with_dbenv"
] |
[((646, 658), 'aiida.cmdline.utils.decorators.with_dbenv', 'with_dbenv', ([], {}), '()\n', (656, 658), False, 'from aiida.cmdline.utils.decorators import with_dbenv\n'), ((991, 1031), 'twinpy.interfaces.aiida.base.check_process_class', 'check_process_class', (['node', 'process_class'], {}), '(node, process_class)\n', (1010, 1031), False, 'from twinpy.interfaces.aiida.base import check_process_class, _WorkChain\n'), ((1989, 2003), 'aiida.orm.QueryBuilder', 'QueryBuilder', ([], {}), '()\n', (2001, 2003), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((3445, 3489), 'aiida.plugins.WorkflowFactory', 'WorkflowFactory', (['"""twinpy.twinboundary_relax"""'], {}), "('twinpy.twinboundary_relax')\n", (3460, 3489), False, 'from aiida.plugins import WorkflowFactory\n'), ((3584, 3628), 'aiida_twinpy.common.utils.get_create_node', 'get_create_node', (['tb_rlx_struct_pk', 'tb_rlx_wf'], {}), '(tb_rlx_struct_pk, tb_rlx_wf)\n', (3599, 3628), False, 'from aiida_twinpy.common.utils import get_create_node\n'), ((3688, 3727), 'twinpy.interfaces.aiida.twinboundary.AiidaTwinBoudnaryRelaxWorkChain', 'AiidaTwinBoudnaryRelaxWorkChain', (['tb_rlx'], {}), '(tb_rlx)\n', (3719, 3727), False, 'from twinpy.interfaces.aiida.twinboundary import AiidaTwinBoudnaryRelaxWorkChain\n'), ((3859, 3888), 'aiida.plugins.WorkflowFactory', 'WorkflowFactory', (['"""vasp.relax"""'], {}), "('vasp.relax')\n", (3874, 3888), False, 'from aiida.plugins import WorkflowFactory\n'), ((3902, 3916), 'aiida.orm.QueryBuilder', 'QueryBuilder', ([], {}), '()\n', (3914, 3916), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((2402, 2426), 'numpy.argsort', 'np.argsort', (['shear_ratios'], {}), '(shear_ratios)\n', (2412, 2426), True, 'import numpy as np\n'), ((2516, 2537), 'aiida.orm.load_node', 'load_node', (['cf_pks[ix]'], {}), '(cf_pks[ix])\n', (2525, 2537), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((5598, 5618), 'aiida.orm.load_node', 'load_node', (['tb_rlx_pk'], {}), '(tb_rlx_pk)\n', (5607, 5618), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((7158, 7248), 'warnings.warn', 'warnings.warn', (["('Some RelaxWorkChain has not finished normally. ' + 'They are ignored.')"], {}), "('Some RelaxWorkChain has not finished normally. ' +\n 'They are ignored.')\n", (7171, 7248), False, 'import warnings\n'), ((4232, 4245), 'aiida.orm.load_node', 'load_node', (['pk'], {}), '(pk)\n', (4241, 4245), False, 'from aiida.orm import load_node, Node, QueryBuilder\n'), ((4669, 4696), 'aiida_twinpy.common.utils.get_create_node', 'get_create_node', (['pk', 'rlx_wf'], {}), '(pk, rlx_wf)\n', (4684, 4696), False, 'from aiida_twinpy.common.utils import get_create_node\n'), ((2312, 2327), 'aiida.orm.load_node', 'load_node', (['q[0]'], {}), '(q[0])\n', (2321, 2327), False, 'from aiida.orm import load_node, Node, QueryBuilder\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: rander_model.py
import os
from utils import txt_load, gzip_dump
OUTPUT_DIR = "../src/main/resources/"
fileList = [
"dict.big",
"dict.std",
"dict.small",
"idf.std",
]
for file in fileList:
txtFile = os.path.join(OUTPUT_DIR, file+".txt")
gzFile = os.path.join(OUTPUT_DIR, file+".gz")
gzip_dump(txt_load(txtFile), gzFile)
|
[
"os.path.join",
"utils.txt_load"
] |
[((287, 326), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', "(file + '.txt')"], {}), "(OUTPUT_DIR, file + '.txt')\n", (299, 326), False, 'import os\n'), ((338, 376), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', "(file + '.gz')"], {}), "(OUTPUT_DIR, file + '.gz')\n", (350, 376), False, 'import os\n'), ((389, 406), 'utils.txt_load', 'txt_load', (['txtFile'], {}), '(txtFile)\n', (397, 406), False, 'from utils import txt_load, gzip_dump\n')]
|
#!/usr/local/python3.4.6/bin/python3.4
# DISCLAIMER
#
# This is a sample script for demo and reference purpose only.
# It is subject to change for content updates without warning.
#
# DESCRIPTION
# This sample script uses two back-to-back Ixia ports.
# Supports both IxNetwork API server and Linux API server connection.
#
# - Configure two IPv4 Topology Groups
# - Configure BGP and network advertising routes.
# - Start protocols
# - Verify protocol session
# - Create Traffic Item
# - Apply Traffic
# - Start Traffic
# - Get stats
#
import requests, json, sys, os, time, traceback
import IxN_RestApiPy3
# Which REST API server do you want to connect to: linux or windows
connectToApiServer = 'linux'
# Settings for Windows
if connectToApiServer == 'windows':
ixNetRestServerIp = '192.168.70.127'
ixNetRestServerPort = '11009'
# Setitngs for Linux API Server
if connectToApiServer == 'linux':
linuxServerIp = '192.168.70.137'
username = 'admin'
password = '<PASSWORD>'
deleteLinuxSessionWhenDone = True
# Set to True if the Linux API Server is newly installed.
# We need to set the license server settings once.
isLinuxApiServerNewlyInstalled = False
licenseServerIp = '192.168.70.127'
licenseMode = 'subscription' ;# IxVM uses subscription. Physical chassis uses perpetual.
licenseTier = 'tier3'
linuxServerUrl = 'https://%s' % linuxServerIp
ixChassisIp = '192.168.70.10'
portList = [[ixChassisIp, '1', '1'],
[ixChassisIp, '2', '1']]
# For connecting to Linux API server that supports SSL. Provide your SSL certificate here.
verifySslCert = False
try:
# If connecting to Linux API server
if connectToApiServer == 'linux':
# This will disable all the SSL warnings on your terminal.
requests.packages.urllib3.disable_warnings()
returnList = IxN_RestApiPy3.connectToLinuxApiServer(linuxServerIp, username=username, password=password)
sessionUrl, sessionId, apiKey = returnList
if isLinuxApiServerNewlyInstalled:
IxN_RestApiPy3.linuxServerConfigGlobalLicenseServer(linuxServerIp, licenseServerIp,
licenseMode, licenseTier,
apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.linuxServerConfigNewSessionLicense(sessionUrl, linuxServerIp, apiKey, verifySslCert=verifySslCert)
# If connecting to Windows API server
if connectToApiServer == 'windows':
sessionUrl = IxN_RestApiPy3.getSessionUrl(ixNetRestServerIp, ixNetRestServerPort)
apiKey=None
sessionId = sessionUrl.split('/ixnetwork')[0]
IxN_RestApiPy3.newBlankConfig(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.connectIxChassis(sessionUrl,chassisIp=ixChassisIp, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.createVports(sessionUrl, portList, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.assignPorts(sessionUrl, portList, apiKey=apiKey, verifySslCert=verifySslCert)
topologyObj1 = IxN_RestApiPy3.createTopologyNgpf(sessionUrl,
portList=[portList[0]],
topologyName='MyTopo1',
apiKey=apiKey, verifySslCert=verifySslCert)
deviceGroupObj1 = IxN_RestApiPy3.createDeviceGroupNgpf(topologyObj1,
multiplier=1,
deviceGroupName='myDG1',
apiKey=apiKey, verifySslCert=verifySslCert)
topologyObj2 = IxN_RestApiPy3.createTopologyNgpf(sessionUrl,
portList=[portList[1]],
topologyName='MyTopo2',
apiKey=apiKey, verifySslCert=verifySslCert)
deviceGroupObj2 = IxN_RestApiPy3.createDeviceGroupNgpf(topologyObj2,
multiplier=1,
deviceGroupName='myDG2',
apiKey=apiKey, verifySslCert=verifySslCert)
ethernetObj1 = IxN_RestApiPy3.createEthernetNgpf(deviceGroupObj1,
ethernetName='MyEth1',
macAddress={'start': '00:01:01:00:00:01',
'direction': 'increment',
'step': '00:00:00:00:00:01'},
macAddressPortStep='disabled',
vlanId={'start': 103,
'direction': 'increment',
'step':0},
apiKey=apiKey, verifySslCert=verifySslCert)
ethernetObj2 = IxN_RestApiPy3.createEthernetNgpf(deviceGroupObj2,
ethernetName='MyEth2',
macAddress={'start': '00:01:02:00:00:01',
'direction': 'increment',
'step': '00:00:00:00:00:01'},
macAddressPortStep='disabled',
vlanId={'start': 103,
'direction': 'increment',
'step':0},
apiKey=apiKey, verifySslCert=verifySslCert)
ipv4Obj1 = IxN_RestApiPy3.createIpv4Ngpf(ethernetObj1,
ipv4Address={'start': '1.1.1.1',
'direction': 'increment',
'step': '0.0.0.1'},
ipv4AddressPortStep='disabled',
gateway={'start': '1.1.1.2',
'direction': 'increment',
'step': '0.0.0.0'},
gatewayPortStep='disabled',
prefix=24,
resolveGateway=True, apiKey=apiKey, verifySslCert=verifySslCert)
ipv4Obj2 = IxN_RestApiPy3.createIpv4Ngpf(ethernetObj2,
ipv4Address={'start': '1.1.1.2',
'direction': 'increment',
'step': '0.0.0.1'},
ipv4AddressPortStep='disabled',
gateway={'start': '1.1.1.1',
'direction': 'increment',
'step': '0.0.0.0'},
gatewayPortStep='disabled',
prefix=24,
resolveGateway=True, apiKey=apiKey, verifySslCert=verifySslCert)
# flap = true or false.
# If there is only one host IP interface, then single value = True or False.
# If there are multiple host IP interfaces, then single value = a list ['true', 'false']
# Provide a list of total true or false according to the total amount of host IP interfaces.
bgpObj1 = IxN_RestApiPy3.configBgp(ipv4Obj1,
apiKey=apiKey,
verifySslCert=verifySslCert,
name = 'bgp_1',
enableBgp = True,
holdTimer = 90,
dutIp={'start': '1.1.1.2',
'direction': 'increment',
'step': '0.0.0.0'},
enableGracefulRestart = False,
restartTime = 45,
type = 'internal',
enableBgpIdSameasRouterId = True,
staleTime = 0,
flap = False)
bgpObj2 = IxN_RestApiPy3.configBgp(ipv4Obj2,
apiKey=apiKey,
verifySslCert=verifySslCert,
name = 'bgp_2',
enableBgp = True,
holdTimer = 90,
dutIp={'start': '1.1.1.1',
'direction': 'increment',
'step': '0.0.0.0'},
enableGracefulRestart = False,
restartTime = 45,
type = 'internal',
enableBgpIdSameasRouterId = True,
staleTime = 0,
flap = False)
networkGroupObj1 = IxN_RestApiPy3.configNetworkGroup(deviceGroupObj1,
name='networkGroup1',
multiplier = 100,
networkAddress = {'start': '172.16.17.32',
'step': '0.0.0.1',
'direction': 'increment'},
prefixLength = 24,
apiKey=apiKey, verifySslCert=verifySslCert)
networkGroupObj2 = IxN_RestApiPy3.configNetworkGroup(deviceGroupObj2,
name='networkGroup2',
multiplier = 100,
networkAddress = {'start': '172.16.17.32',
'step': '0.0.0.1',
'direction': 'increment'},
prefixLength = 24,
apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.startAllProtocols(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.verifyProtocolSessionsNgpf([ipv4Obj1, ipv4Obj2, bgpObj1, bgpObj2],
apiKey=apiKey, verifySslCert=verifySslCert)
# For all parameter options, please go to the API configTrafficItem
# mode = create or modify
trafficStatus = IxN_RestApiPy3.configTrafficItem(sessionUrl, apiKey=apiKey,
verifySslCert=verifySslCert,
mode='create',
trafficItem = {
'name':'Topo1 to Topo2',
'trafficType':'ipv4',
'biDirectional':True,
'srcDestMesh':'one-to-one',
'routeMesh':'oneToOne',
'allowSelfDestined':False,
'trackBy': ['flowGroup0', 'vlanVlanId0']
},
endpoints = [{'name':'Flow-Group-1',
'sources': [topologyObj1],
'destinations': [topologyObj2]}],
configElements = [{'transmissionType': 'fixedFrameCount',
'frameCount': 50000,
'frameRate': 88,
'frameRateType': 'percentLineRate',
'frameSize': 128}])
trafficItemObj = trafficStatus[0]
endpointObjList = trafficStatus[1]
configElementObjList = trafficStatus[2]
IxN_RestApiPy3.regenerateTrafficItems(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.applyTraffic(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.startTraffic(sessionUrl, apiKey=apiKey, verifySslCert=verifySslCert)
IxN_RestApiPy3.checkTrafficState(sessionUrl,
expectedState=['started', 'startedWaitingForStats'],
apiKey=apiKey, verifySslCert=verifySslCert
)
#If you're sending a large number of packets, you need to set timeout to a larger number.
IxN_RestApiPy3.checkTrafficState(sessionUrl,
expectedState=['stopped'],
timeout=45, apiKey=apiKey, verifySslCert=verifySslCert)
stats = IxN_RestApiPy3.getStats(sessionUrl, viewName='Flow Statistics', apiKey=apiKey)
print('\n{txPort:10} {txFrames:15} {rxPort:10} {rxFrames:15} {frameLoss:10}'.format(
txPort='txPort', txFrames='txFrames', rxPort='rxPort', rxFrames='rxFrames', frameLoss='frameLoss'))
print('-'*90)
for flowGroup,values in stats.items():
txPort = values['Tx Port']
rxPort = values['Rx Port']
txFrames = values['Tx Frames']
rxFrames = values['Rx Frames']
frameLoss = values['Frames Delta']
print('{txPort:10} {txFrames:15} {rxPort:10} {rxFrames:15} {frameLoss:10} '.format(
txPort=txPort, txFrames=txFrames, rxPort=rxPort, rxFrames=rxFrames, frameLoss=frameLoss))
#if txFrames != rxFrames:
# print('\nFrame loss error:', int(txFrames) - int(rxFrames))
if connectToApiServer == 'linux' and deleteLinuxSessionWhenDone == True:
IxN_RestApiPy3.linuxServerStopAndDeleteSession(sessionId, apiKey=apiKey, verifySslCert=verifySslCert)
except IxN_RestApiPy3.IxNetRestUtilityException as errMsg:
print('\nTest failed! {0}\n'.format(errMsg))
if connectToApiServer == 'linux' and 'sessionId' in locals() and deleteLinuxSessionWhenDone == True:
IxN_RestApiPy3.linuxServerStopAndDeleteSession(sessionId, apiKey=apiKey, verifySslCert=verifySslCert)
except Exception as errMsg:
print('\nTest failed! {0}\n'.format(traceback.print_exc()))
if connectToApiServer == 'linux' and 'sessionId' in locals() and deleteLinuxSessionWhenDone == True:
IxN_RestApiPy3.linuxServerStopAndDeleteSession(sessionId, apiKey=apiKey, verifySslCert=verifySslCert)
except KeyboardInterrupt:
print('\nAborting ...')
if connectToApiServer == 'linux' and 'sessionId' in locals() and deleteLinuxSessionWhenDone == True:
IxN_RestApiPy3.linuxServerStopAndDeleteSession(sessionId, apiKey=apiKey, verifySslCert=verifySslCert)
|
[
"requests.packages.urllib3.disable_warnings",
"IxN_RestApiPy3.startAllProtocols",
"IxN_RestApiPy3.createIpv4Ngpf",
"IxN_RestApiPy3.createEthernetNgpf",
"IxN_RestApiPy3.assignPorts",
"IxN_RestApiPy3.getStats",
"IxN_RestApiPy3.newBlankConfig",
"IxN_RestApiPy3.regenerateTrafficItems",
"traceback.print_exc",
"IxN_RestApiPy3.applyTraffic",
"IxN_RestApiPy3.createTopologyNgpf",
"IxN_RestApiPy3.connectToLinuxApiServer",
"IxN_RestApiPy3.linuxServerStopAndDeleteSession",
"IxN_RestApiPy3.connectIxChassis",
"IxN_RestApiPy3.linuxServerConfigNewSessionLicense",
"IxN_RestApiPy3.configNetworkGroup",
"IxN_RestApiPy3.createDeviceGroupNgpf",
"IxN_RestApiPy3.configTrafficItem",
"IxN_RestApiPy3.checkTrafficState",
"IxN_RestApiPy3.startTraffic",
"IxN_RestApiPy3.configBgp",
"IxN_RestApiPy3.createVports",
"IxN_RestApiPy3.getSessionUrl",
"IxN_RestApiPy3.linuxServerConfigGlobalLicenseServer",
"IxN_RestApiPy3.verifyProtocolSessionsNgpf"
] |
[((2751, 2841), 'IxN_RestApiPy3.newBlankConfig', 'IxN_RestApiPy3.newBlankConfig', (['sessionUrl'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionUrl, apiKey=apiKey, verifySslCert=\n verifySslCert)\n', (2780, 2841), False, 'import IxN_RestApiPy3\n'), ((2841, 2956), 'IxN_RestApiPy3.connectIxChassis', 'IxN_RestApiPy3.connectIxChassis', (['sessionUrl'], {'chassisIp': 'ixChassisIp', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionUrl, chassisIp=ixChassisIp, apiKey=\n apiKey, verifySslCert=verifySslCert)\n', (2872, 2956), False, 'import IxN_RestApiPy3\n'), ((2955, 3052), 'IxN_RestApiPy3.createVports', 'IxN_RestApiPy3.createVports', (['sessionUrl', 'portList'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionUrl, portList, apiKey=apiKey,\n verifySslCert=verifySslCert)\n', (2982, 3052), False, 'import IxN_RestApiPy3\n'), ((3053, 3149), 'IxN_RestApiPy3.assignPorts', 'IxN_RestApiPy3.assignPorts', (['sessionUrl', 'portList'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionUrl, portList, apiKey=apiKey,\n verifySslCert=verifySslCert)\n', (3079, 3149), False, 'import IxN_RestApiPy3\n'), ((3166, 3307), 'IxN_RestApiPy3.createTopologyNgpf', 'IxN_RestApiPy3.createTopologyNgpf', (['sessionUrl'], {'portList': '[portList[0]]', 'topologyName': '"""MyTopo1"""', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(sessionUrl, portList=[portList[0]],\n topologyName='MyTopo1', apiKey=apiKey, verifySslCert=verifySslCert)\n", (3199, 3307), False, 'import IxN_RestApiPy3\n'), ((3486, 3623), 'IxN_RestApiPy3.createDeviceGroupNgpf', 'IxN_RestApiPy3.createDeviceGroupNgpf', (['topologyObj1'], {'multiplier': '(1)', 'deviceGroupName': '"""myDG1"""', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(topologyObj1, multiplier=1,\n deviceGroupName='myDG1', apiKey=apiKey, verifySslCert=verifySslCert)\n", (3522, 3623), False, 'import IxN_RestApiPy3\n'), ((3817, 3958), 'IxN_RestApiPy3.createTopologyNgpf', 'IxN_RestApiPy3.createTopologyNgpf', (['sessionUrl'], {'portList': '[portList[1]]', 'topologyName': '"""MyTopo2"""', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(sessionUrl, portList=[portList[1]],\n topologyName='MyTopo2', apiKey=apiKey, verifySslCert=verifySslCert)\n", (3850, 3958), False, 'import IxN_RestApiPy3\n'), ((4137, 4274), 'IxN_RestApiPy3.createDeviceGroupNgpf', 'IxN_RestApiPy3.createDeviceGroupNgpf', (['topologyObj2'], {'multiplier': '(1)', 'deviceGroupName': '"""myDG2"""', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(topologyObj2, multiplier=1,\n deviceGroupName='myDG2', apiKey=apiKey, verifySslCert=verifySslCert)\n", (4173, 4274), False, 'import IxN_RestApiPy3\n'), ((4468, 4791), 'IxN_RestApiPy3.createEthernetNgpf', 'IxN_RestApiPy3.createEthernetNgpf', (['deviceGroupObj1'], {'ethernetName': '"""MyEth1"""', 'macAddress': "{'start': '00:01:01:00:00:01', 'direction': 'increment', 'step':\n '00:00:00:00:00:01'}", 'macAddressPortStep': '"""disabled"""', 'vlanId': "{'start': 103, 'direction': 'increment', 'step': 0}", 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(deviceGroupObj1, ethernetName='MyEth1',\n macAddress={'start': '00:01:01:00:00:01', 'direction': 'increment',\n 'step': '00:00:00:00:00:01'}, macAddressPortStep='disabled', vlanId={\n 'start': 103, 'direction': 'increment', 'step': 0}, apiKey=apiKey,\n verifySslCert=verifySslCert)\n", (4501, 4791), False, 'import IxN_RestApiPy3\n'), ((5316, 5639), 'IxN_RestApiPy3.createEthernetNgpf', 'IxN_RestApiPy3.createEthernetNgpf', (['deviceGroupObj2'], {'ethernetName': '"""MyEth2"""', 'macAddress': "{'start': '00:01:02:00:00:01', 'direction': 'increment', 'step':\n '00:00:00:00:00:01'}", 'macAddressPortStep': '"""disabled"""', 'vlanId': "{'start': 103, 'direction': 'increment', 'step': 0}", 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(deviceGroupObj2, ethernetName='MyEth2',\n macAddress={'start': '00:01:02:00:00:01', 'direction': 'increment',\n 'step': '00:00:00:00:00:01'}, macAddressPortStep='disabled', vlanId={\n 'start': 103, 'direction': 'increment', 'step': 0}, apiKey=apiKey,\n verifySslCert=verifySslCert)\n", (5349, 5639), False, 'import IxN_RestApiPy3\n'), ((6156, 6506), 'IxN_RestApiPy3.createIpv4Ngpf', 'IxN_RestApiPy3.createIpv4Ngpf', (['ethernetObj1'], {'ipv4Address': "{'start': '1.1.1.1', 'direction': 'increment', 'step': '0.0.0.1'}", 'ipv4AddressPortStep': '"""disabled"""', 'gateway': "{'start': '1.1.1.2', 'direction': 'increment', 'step': '0.0.0.0'}", 'gatewayPortStep': '"""disabled"""', 'prefix': '(24)', 'resolveGateway': '(True)', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(ethernetObj1, ipv4Address={'start': '1.1.1.1',\n 'direction': 'increment', 'step': '0.0.0.1'}, ipv4AddressPortStep=\n 'disabled', gateway={'start': '1.1.1.2', 'direction': 'increment',\n 'step': '0.0.0.0'}, gatewayPortStep='disabled', prefix=24,\n resolveGateway=True, apiKey=apiKey, verifySslCert=verifySslCert)\n", (6185, 6506), False, 'import IxN_RestApiPy3\n'), ((7000, 7350), 'IxN_RestApiPy3.createIpv4Ngpf', 'IxN_RestApiPy3.createIpv4Ngpf', (['ethernetObj2'], {'ipv4Address': "{'start': '1.1.1.2', 'direction': 'increment', 'step': '0.0.0.1'}", 'ipv4AddressPortStep': '"""disabled"""', 'gateway': "{'start': '1.1.1.1', 'direction': 'increment', 'step': '0.0.0.0'}", 'gatewayPortStep': '"""disabled"""', 'prefix': '(24)', 'resolveGateway': '(True)', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(ethernetObj2, ipv4Address={'start': '1.1.1.2',\n 'direction': 'increment', 'step': '0.0.0.1'}, ipv4AddressPortStep=\n 'disabled', gateway={'start': '1.1.1.1', 'direction': 'increment',\n 'step': '0.0.0.0'}, gatewayPortStep='disabled', prefix=24,\n resolveGateway=True, apiKey=apiKey, verifySslCert=verifySslCert)\n", (7029, 7350), False, 'import IxN_RestApiPy3\n'), ((8160, 8492), 'IxN_RestApiPy3.configBgp', 'IxN_RestApiPy3.configBgp', (['ipv4Obj1'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert', 'name': '"""bgp_1"""', 'enableBgp': '(True)', 'holdTimer': '(90)', 'dutIp': "{'start': '1.1.1.2', 'direction': 'increment', 'step': '0.0.0.0'}", 'enableGracefulRestart': '(False)', 'restartTime': '(45)', 'type': '"""internal"""', 'enableBgpIdSameasRouterId': '(True)', 'staleTime': '(0)', 'flap': '(False)'}), "(ipv4Obj1, apiKey=apiKey, verifySslCert=\n verifySslCert, name='bgp_1', enableBgp=True, holdTimer=90, dutIp={\n 'start': '1.1.1.2', 'direction': 'increment', 'step': '0.0.0.0'},\n enableGracefulRestart=False, restartTime=45, type='internal',\n enableBgpIdSameasRouterId=True, staleTime=0, flap=False)\n", (8184, 8492), False, 'import IxN_RestApiPy3\n'), ((9068, 9400), 'IxN_RestApiPy3.configBgp', 'IxN_RestApiPy3.configBgp', (['ipv4Obj2'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert', 'name': '"""bgp_2"""', 'enableBgp': '(True)', 'holdTimer': '(90)', 'dutIp': "{'start': '1.1.1.1', 'direction': 'increment', 'step': '0.0.0.0'}", 'enableGracefulRestart': '(False)', 'restartTime': '(45)', 'type': '"""internal"""', 'enableBgpIdSameasRouterId': '(True)', 'staleTime': '(0)', 'flap': '(False)'}), "(ipv4Obj2, apiKey=apiKey, verifySslCert=\n verifySslCert, name='bgp_2', enableBgp=True, holdTimer=90, dutIp={\n 'start': '1.1.1.1', 'direction': 'increment', 'step': '0.0.0.0'},\n enableGracefulRestart=False, restartTime=45, type='internal',\n enableBgpIdSameasRouterId=True, staleTime=0, flap=False)\n", (9092, 9400), False, 'import IxN_RestApiPy3\n'), ((9985, 10233), 'IxN_RestApiPy3.configNetworkGroup', 'IxN_RestApiPy3.configNetworkGroup', (['deviceGroupObj1'], {'name': '"""networkGroup1"""', 'multiplier': '(100)', 'networkAddress': "{'start': '172.16.17.32', 'step': '0.0.0.1', 'direction': 'increment'}", 'prefixLength': '(24)', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(deviceGroupObj1, name='networkGroup1',\n multiplier=100, networkAddress={'start': '172.16.17.32', 'step':\n '0.0.0.1', 'direction': 'increment'}, prefixLength=24, apiKey=apiKey,\n verifySslCert=verifySslCert)\n", (10018, 10233), False, 'import IxN_RestApiPy3\n'), ((10687, 10935), 'IxN_RestApiPy3.configNetworkGroup', 'IxN_RestApiPy3.configNetworkGroup', (['deviceGroupObj2'], {'name': '"""networkGroup2"""', 'multiplier': '(100)', 'networkAddress': "{'start': '172.16.17.32', 'step': '0.0.0.1', 'direction': 'increment'}", 'prefixLength': '(24)', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(deviceGroupObj2, name='networkGroup2',\n multiplier=100, networkAddress={'start': '172.16.17.32', 'step':\n '0.0.0.1', 'direction': 'increment'}, prefixLength=24, apiKey=apiKey,\n verifySslCert=verifySslCert)\n", (10720, 10935), False, 'import IxN_RestApiPy3\n'), ((11370, 11463), 'IxN_RestApiPy3.startAllProtocols', 'IxN_RestApiPy3.startAllProtocols', (['sessionUrl'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionUrl, apiKey=apiKey, verifySslCert=\n verifySslCert)\n', (11402, 11463), False, 'import IxN_RestApiPy3\n'), ((11464, 11593), 'IxN_RestApiPy3.verifyProtocolSessionsNgpf', 'IxN_RestApiPy3.verifyProtocolSessionsNgpf', (['[ipv4Obj1, ipv4Obj2, bgpObj1, bgpObj2]'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '([ipv4Obj1, ipv4Obj2, bgpObj1,\n bgpObj2], apiKey=apiKey, verifySslCert=verifySslCert)\n', (11505, 11593), False, 'import IxN_RestApiPy3\n'), ((11759, 12353), 'IxN_RestApiPy3.configTrafficItem', 'IxN_RestApiPy3.configTrafficItem', (['sessionUrl'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert', 'mode': '"""create"""', 'trafficItem': "{'name': 'Topo1 to Topo2', 'trafficType': 'ipv4', 'biDirectional': True,\n 'srcDestMesh': 'one-to-one', 'routeMesh': 'oneToOne',\n 'allowSelfDestined': False, 'trackBy': ['flowGroup0', 'vlanVlanId0']}", 'endpoints': "[{'name': 'Flow-Group-1', 'sources': [topologyObj1], 'destinations': [\n topologyObj2]}]", 'configElements': "[{'transmissionType': 'fixedFrameCount', 'frameCount': 50000, 'frameRate': \n 88, 'frameRateType': 'percentLineRate', 'frameSize': 128}]"}), "(sessionUrl, apiKey=apiKey, verifySslCert=\n verifySslCert, mode='create', trafficItem={'name': 'Topo1 to Topo2',\n 'trafficType': 'ipv4', 'biDirectional': True, 'srcDestMesh':\n 'one-to-one', 'routeMesh': 'oneToOne', 'allowSelfDestined': False,\n 'trackBy': ['flowGroup0', 'vlanVlanId0']}, endpoints=[{'name':\n 'Flow-Group-1', 'sources': [topologyObj1], 'destinations': [\n topologyObj2]}], configElements=[{'transmissionType': 'fixedFrameCount',\n 'frameCount': 50000, 'frameRate': 88, 'frameRateType':\n 'percentLineRate', 'frameSize': 128}])\n", (11791, 12353), False, 'import IxN_RestApiPy3\n'), ((13602, 13699), 'IxN_RestApiPy3.regenerateTrafficItems', 'IxN_RestApiPy3.regenerateTrafficItems', (['sessionUrl'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionUrl, apiKey=apiKey,\n verifySslCert=verifySslCert)\n', (13639, 13699), False, 'import IxN_RestApiPy3\n'), ((13701, 13789), 'IxN_RestApiPy3.applyTraffic', 'IxN_RestApiPy3.applyTraffic', (['sessionUrl'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionUrl, apiKey=apiKey, verifySslCert=\n verifySslCert)\n', (13728, 13789), False, 'import IxN_RestApiPy3\n'), ((13790, 13878), 'IxN_RestApiPy3.startTraffic', 'IxN_RestApiPy3.startTraffic', (['sessionUrl'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionUrl, apiKey=apiKey, verifySslCert=\n verifySslCert)\n', (13817, 13878), False, 'import IxN_RestApiPy3\n'), ((13879, 14024), 'IxN_RestApiPy3.checkTrafficState', 'IxN_RestApiPy3.checkTrafficState', (['sessionUrl'], {'expectedState': "['started', 'startedWaitingForStats']", 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(sessionUrl, expectedState=['started',\n 'startedWaitingForStats'], apiKey=apiKey, verifySslCert=verifySslCert)\n", (13911, 14024), False, 'import IxN_RestApiPy3\n'), ((14195, 14326), 'IxN_RestApiPy3.checkTrafficState', 'IxN_RestApiPy3.checkTrafficState', (['sessionUrl'], {'expectedState': "['stopped']", 'timeout': '(45)', 'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), "(sessionUrl, expectedState=['stopped'],\n timeout=45, apiKey=apiKey, verifySslCert=verifySslCert)\n", (14227, 14326), False, 'import IxN_RestApiPy3\n'), ((14410, 14488), 'IxN_RestApiPy3.getStats', 'IxN_RestApiPy3.getStats', (['sessionUrl'], {'viewName': '"""Flow Statistics"""', 'apiKey': 'apiKey'}), "(sessionUrl, viewName='Flow Statistics', apiKey=apiKey)\n", (14433, 14488), False, 'import IxN_RestApiPy3\n'), ((1835, 1879), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (1877, 1879), False, 'import requests, json, sys, os, time, traceback\n'), ((1902, 1997), 'IxN_RestApiPy3.connectToLinuxApiServer', 'IxN_RestApiPy3.connectToLinuxApiServer', (['linuxServerIp'], {'username': 'username', 'password': 'password'}), '(linuxServerIp, username=username,\n password=password)\n', (1940, 1997), False, 'import IxN_RestApiPy3\n'), ((2384, 2501), 'IxN_RestApiPy3.linuxServerConfigNewSessionLicense', 'IxN_RestApiPy3.linuxServerConfigNewSessionLicense', (['sessionUrl', 'linuxServerIp', 'apiKey'], {'verifySslCert': 'verifySslCert'}), '(sessionUrl, linuxServerIp,\n apiKey, verifySslCert=verifySslCert)\n', (2433, 2501), False, 'import IxN_RestApiPy3\n'), ((2602, 2670), 'IxN_RestApiPy3.getSessionUrl', 'IxN_RestApiPy3.getSessionUrl', (['ixNetRestServerIp', 'ixNetRestServerPort'], {}), '(ixNetRestServerIp, ixNetRestServerPort)\n', (2630, 2670), False, 'import IxN_RestApiPy3\n'), ((15329, 15434), 'IxN_RestApiPy3.linuxServerStopAndDeleteSession', 'IxN_RestApiPy3.linuxServerStopAndDeleteSession', (['sessionId'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionId, apiKey=apiKey,\n verifySslCert=verifySslCert)\n', (15375, 15434), False, 'import IxN_RestApiPy3\n'), ((2101, 2256), 'IxN_RestApiPy3.linuxServerConfigGlobalLicenseServer', 'IxN_RestApiPy3.linuxServerConfigGlobalLicenseServer', (['linuxServerIp', 'licenseServerIp', 'licenseMode', 'licenseTier', 'apiKey'], {'verifySslCert': 'verifySslCert'}), '(linuxServerIp,\n licenseServerIp, licenseMode, licenseTier, apiKey, verifySslCert=\n verifySslCert)\n', (2152, 2256), False, 'import IxN_RestApiPy3\n'), ((15653, 15758), 'IxN_RestApiPy3.linuxServerStopAndDeleteSession', 'IxN_RestApiPy3.linuxServerStopAndDeleteSession', (['sessionId'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionId, apiKey=apiKey,\n verifySslCert=verifySslCert)\n', (15699, 15758), False, 'import IxN_RestApiPy3\n'), ((15961, 16066), 'IxN_RestApiPy3.linuxServerStopAndDeleteSession', 'IxN_RestApiPy3.linuxServerStopAndDeleteSession', (['sessionId'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionId, apiKey=apiKey,\n verifySslCert=verifySslCert)\n', (16007, 16066), False, 'import IxN_RestApiPy3\n'), ((16231, 16336), 'IxN_RestApiPy3.linuxServerStopAndDeleteSession', 'IxN_RestApiPy3.linuxServerStopAndDeleteSession', (['sessionId'], {'apiKey': 'apiKey', 'verifySslCert': 'verifySslCert'}), '(sessionId, apiKey=apiKey,\n verifySslCert=verifySslCert)\n', (16277, 16336), False, 'import IxN_RestApiPy3\n'), ((15824, 15845), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (15843, 15845), False, 'import requests, json, sys, os, time, traceback\n')]
|
#!/usr/bin/env python
from distutils.core import setup
import os
from setuptools import find_packages
name = '<NAME>'
# I might be just a little bit too much afraid of those bots..
address = name.lower().replace(' ', '.')+chr(64)+'gmail.com'
setup(name='robpy',
version='0.1',
description='Test runner - Robot Framework pure Python runner',
author=name,
author_email=address,
url='https://github.com/mkorpela/robpy',
packages=find_packages(),
scripts = [os.path.join('scripts', 'robpy'), os.path.join('scripts', 'robpy.bat')],
license='Apache License, Version 2.0',
install_requires = ['robotframework'])
|
[
"os.path.join",
"setuptools.find_packages"
] |
[((465, 480), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (478, 480), False, 'from setuptools import find_packages\n'), ((499, 531), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""robpy"""'], {}), "('scripts', 'robpy')\n", (511, 531), False, 'import os\n'), ((533, 569), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""robpy.bat"""'], {}), "('scripts', 'robpy.bat')\n", (545, 569), False, 'import os\n')]
|
import random
import stdio
a = random.randrange(1, 7)
b = random.randrange(1, 7)
stdio.writeln("Sum after rolling 2 dices: " + str(a) + " + " + str(b) + " = " + str(a + b))
|
[
"random.randrange"
] |
[((32, 54), 'random.randrange', 'random.randrange', (['(1)', '(7)'], {}), '(1, 7)\n', (48, 54), False, 'import random\n'), ((59, 81), 'random.randrange', 'random.randrange', (['(1)', '(7)'], {}), '(1, 7)\n', (75, 81), False, 'import random\n')]
|
import re
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional, Sequence, Union
from PyQt5.QtCore import QEventLoop, pyqtSignal, QTimer
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QDialog, QWidget, QApplication
from Cat.CatPythonGUI.GUI import CORNERS, PythonGUI
from Cat.CatPythonGUI.GUI.Widgets import HTMLDelegate
from Cat.CatPythonGUI.GUI.codeEditor import Position, SearchOptions, SearchMode
from Cat.CatPythonGUI.GUI.framelessWindow.catFramelessWindowMixin import CatFramelessWindowMixin
from Cat.CatPythonGUI.GUI.treeBuilders import DataTreeBuilder
from Cat.CatPythonGUI.utilities import connectOnlyOnce
from Cat.icons import icons
from Cat.utils import escapeForXml
from Cat.utils.collections_ import OrderedMultiDict
from Cat.utils.profiling import TimedMethod
from gui.datapackEditorGUI import ContextMenuEntries, makeTextSearcher
from model.Model import Datapack
from model.pathUtils import FilePath, ZipFilePool, loadTextFile
from session.session import getSession
@dataclass(unsafe_hash=True)
class Occurrence:
file: FilePath
position: Position
line: str
@dataclass
class SearchResult:
filesToSearch: list[FilePath] = field(default_factory=list)
occurrences: OrderedMultiDict[FilePath, Occurrence] = field(default_factory=OrderedMultiDict)
filesSearched: int = 0
error: Optional[Exception] = None
class SearchAllDialog(CatFramelessWindowMixin, QDialog):
def __init__(self, parent: Optional[QWidget] = None):
super().__init__(GUICls=PythonGUI, parent=parent)
self._includedDatapacks: list[Datapack] = []
self.searchExpr: str = ''
self.searchOptions: SearchOptions = SearchOptions(
searchMode=SearchMode.Normal,
isCaseSensitive=False,
isMultiLine=False,
)
self._searchResult: SearchResult = SearchResult()
self.htmlDelegate = HTMLDelegate()
self.setWindowTitle('Search')
progressSignal = pyqtSignal(int)
def resetUserInterface(self):
allFilePaths = self.filePathsToSearch
self._searchResult = SearchResult(allFilePaths)
def OnSidebarGUI(self, gui: PythonGUI):
includedDatapacks = []
with gui.vLayout(preventVStretch=True, verticalSpacing=0):
for dp in getSession().world.datapacks:
if gui.checkboxLeft(None, dp.name):
includedDatapacks.append(dp)
self._includedDatapacks = includedDatapacks
def OnGUI(self, gui: PythonGUI):
with gui.vLayout(preventVStretch=False):
with gui.vLayout(preventVStretch=False):
with gui.hLayout(horizontalSpacing=0):
self.searchExpr = gui.codeField(self.searchExpr, isMultiline=False, roundedCorners=CORNERS.NONE)
if gui.toolButton(icon=icons.search, overlap=(1, 0), roundedCorners=(False, True, False, True), default=True, windowShortcut=QKeySequence("Return")):
self.resetUserInterface()
QTimer.singleShot(1, self.search)
if self.searchOptions.searchMode == SearchMode.RegEx:
try:
re.compile(self.searchExpr)
except Exception as e:
gui.helpBox(str(e), 'error', hasLabel=False)
self._searchOptionsGUI(gui)
gui.progressBar(self.progressSignal, min=0, max=len(self._searchResult.filesToSearch), value=self._searchResult.filesSearched, format='', textVisible=True)
resultsGUI = gui.subGUI(PythonGUI, self._resultsGUI1, suppressRedrawLogging=False)
connectOnlyOnce(self, self.progressSignal, lambda i: resultsGUI.redrawGUI(), 'resultsGUI')
resultsGUI.redrawGUI()
self._resultsGUI2(gui)
def _searchOptionsGUI(self, gui: PythonGUI):
so = self.searchOptions
# ============ Search Options: ============
with gui.hLayout(preventHStretch=True):
# Search Mode:
so.searchMode = SearchMode.Normal if gui.radioButton(so.searchMode == SearchMode.Normal, 'Normal', group='searchMode', id=0) else so.searchMode
so.searchMode = SearchMode.RegEx if gui.radioButton(so.searchMode == SearchMode.RegEx, 'RegEx', group='searchMode', id=2) else so.searchMode
# Search Options:
so.isCaseSensitive = gui.toggleLeft(so.isCaseSensitive, 'Case sensitive')
so.isMultiLine = gui.toggleLeft(so.isMultiLine, 'Multiline', enabled=so.searchMode == SearchMode.RegEx)
def _resultsGUI1(self, gui: PythonGUI) -> None:
if self._searchResult.error is not None:
gui.helpBox(f'error during search: {self._searchResult.error}', style='error')
else:
gui.label(f'found {len(self._searchResult.occurrences)} occurrences in {len(self._searchResult.occurrences.uniqueKeys())} files ({self._searchResult.filesSearched} files searched total): (double-click to open)')
def _resultsGUI2(self, gui: PythonGUI) -> None:
def labelMaker(x: Union[SearchResult, FilePath, Occurrence], i: int) -> str:
if isinstance(x, Occurrence):
return x.line
else:
countInFile = len(self._searchResult.occurrences.getall(x))
if isinstance(x, tuple):
filename = x[1].rpartition('/')[2]
return f'{filename} - ({countInFile}) - "{str(x[0])}"'
elif isinstance(x, str):
filename = x.rpartition('/')[2]
return f'{filename} - (countInFile) - "{str(x[0])}"'
return '<root>'
def openDocument(x: Union[FilePath, Occurrence], *, s=self):
if isinstance(x, Occurrence):
s.parent()._tryOpenOrSelectDocument(x.file, x.position)
else:
s.parent()._tryOpenOrSelectDocument(x)
def onContextMenu(x: Union[SearchResult, FilePath, Occurrence], column: int, *, s=self):
if isinstance(x, Occurrence):
with gui.popupMenu(atMousePosition=True) as menu:
menu.addItems(ContextMenuEntries.fileItems(x.file, s.parent()._tryOpenOrSelectDocument))
elif not isinstance(x, SearchResult):
with gui.popupMenu(atMousePosition=True) as menu:
menu.addItems(ContextMenuEntries.fileItems(x, s.parent()._tryOpenOrSelectDocument))
def childrenMaker(x: Union[SearchResult, FilePath, Occurrence], *, s=self) -> Sequence:
if isinstance(x, SearchResult):
return list(x.occurrences.uniqueKeys())
# return [(fp, x.occurrences.getall(fp)) for fp in x.occurrences.uniqueKeys()]
elif isinstance(x, Occurrence):
return tuple()
else:
return self._searchResult.occurrences.getall(x)
gui.tree(
DataTreeBuilder(
self._searchResult,
childrenMaker, # lambda x: x.occurrences.items() if isinstance(x, SearchResult) else [],
labelMaker,
None, None, 1,
showRoot=False,
onDoubleClick=lambda x: openDocument(x),
onContextMenu=onContextMenu
),
headerVisible=True,
itemDelegate=self.htmlDelegate
)
@property
def filePathsToSearch(self) -> list[FilePath]:
filePathsToSearch: list[FilePath] = []
for datapack in self._includedDatapacks:
filePathsToSearch.extend(datapack.files)
return filePathsToSearch
@TimedMethod()
def search(self) -> None:
searchResult = self._searchResult
try:
try:
searcher = makeTextSearcher(self.searchExpr, self.searchOptions)
except Exception as e:
searchResult.error = e
return
with ZipFilePool() as zipFilePool:
for i, filePath in enumerate(searchResult.filesToSearch):
self._searchResult.filesSearched = i + 1
if i % 100 == 0:
self.progressSignal.emit(i+1)
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents, 1)
try:
text = loadTextFile(filePath, zipFilePool)
except UnicodeDecodeError:
continue
lastStart = 0
lastLineNr = 0
for matchStart, matchEnd in searcher(text):
start = text.rfind('\n', 0, matchStart)
start = start + 1 # skip \n at beginning of line # if start != -1 else 0
end = text.find('\n', matchEnd)
end = end if end != -1 else len(text)
occurrenceStr = f'<font>{escapeForXml(text[start:matchStart])}<b>{escapeForXml(text[matchStart:matchEnd])}</b>{escapeForXml(text[matchEnd:end])}</font>'
lastLineNr += text.count('\n', lastStart, start)
lastStart = start
searchResult.occurrences.add(filePath, Occurrence(filePath, Position(lastLineNr, matchStart - start), occurrenceStr))
finally:
self._gui.redrawGUI()
|
[
"PyQt5.QtCore.pyqtSignal",
"Cat.CatPythonGUI.GUI.codeEditor.Position",
"model.pathUtils.loadTextFile",
"Cat.CatPythonGUI.GUI.Widgets.HTMLDelegate",
"session.session.getSession",
"PyQt5.QtGui.QKeySequence",
"Cat.utils.escapeForXml",
"Cat.utils.profiling.TimedMethod",
"PyQt5.QtCore.QTimer.singleShot",
"gui.datapackEditorGUI.makeTextSearcher",
"dataclasses.field",
"Cat.CatPythonGUI.GUI.codeEditor.SearchOptions",
"PyQt5.QtWidgets.QApplication.processEvents",
"model.pathUtils.ZipFilePool",
"dataclasses.dataclass",
"re.compile"
] |
[((1041, 1068), 'dataclasses.dataclass', 'dataclass', ([], {'unsafe_hash': '(True)'}), '(unsafe_hash=True)\n', (1050, 1068), False, 'from dataclasses import dataclass, field\n'), ((1200, 1227), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1205, 1227), False, 'from dataclasses import dataclass, field\n'), ((1283, 1322), 'dataclasses.field', 'field', ([], {'default_factory': 'OrderedMultiDict'}), '(default_factory=OrderedMultiDict)\n', (1288, 1322), False, 'from dataclasses import dataclass, field\n'), ((1905, 1920), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['int'], {}), '(int)\n', (1915, 1920), False, 'from PyQt5.QtCore import QEventLoop, pyqtSignal, QTimer\n'), ((6638, 6651), 'Cat.utils.profiling.TimedMethod', 'TimedMethod', ([], {}), '()\n', (6649, 6651), False, 'from Cat.utils.profiling import TimedMethod\n'), ((1663, 1752), 'Cat.CatPythonGUI.GUI.codeEditor.SearchOptions', 'SearchOptions', ([], {'searchMode': 'SearchMode.Normal', 'isCaseSensitive': '(False)', 'isMultiLine': '(False)'}), '(searchMode=SearchMode.Normal, isCaseSensitive=False,\n isMultiLine=False)\n', (1676, 1752), False, 'from Cat.CatPythonGUI.GUI.codeEditor import Position, SearchOptions, SearchMode\n'), ((1838, 1852), 'Cat.CatPythonGUI.GUI.Widgets.HTMLDelegate', 'HTMLDelegate', ([], {}), '()\n', (1850, 1852), False, 'from Cat.CatPythonGUI.GUI.Widgets import HTMLDelegate\n'), ((6746, 6799), 'gui.datapackEditorGUI.makeTextSearcher', 'makeTextSearcher', (['self.searchExpr', 'self.searchOptions'], {}), '(self.searchExpr, self.searchOptions)\n', (6762, 6799), False, 'from gui.datapackEditorGUI import ContextMenuEntries, makeTextSearcher\n'), ((6872, 6885), 'model.pathUtils.ZipFilePool', 'ZipFilePool', ([], {}), '()\n', (6883, 6885), False, 'from model.pathUtils import FilePath, ZipFilePool, loadTextFile\n'), ((2184, 2196), 'session.session.getSession', 'getSession', ([], {}), '()\n', (2194, 2196), False, 'from session.session import getSession\n'), ((2794, 2827), 'PyQt5.QtCore.QTimer.singleShot', 'QTimer.singleShot', (['(1)', 'self.search'], {}), '(1, self.search)\n', (2811, 2827), False, 'from PyQt5.QtCore import QEventLoop, pyqtSignal, QTimer\n'), ((2902, 2929), 're.compile', 're.compile', (['self.searchExpr'], {}), '(self.searchExpr)\n', (2912, 2929), False, 'import re\n'), ((7074, 7138), 'PyQt5.QtWidgets.QApplication.processEvents', 'QApplication.processEvents', (['QEventLoop.ExcludeUserInputEvents', '(1)'], {}), '(QEventLoop.ExcludeUserInputEvents, 1)\n', (7100, 7138), False, 'from PyQt5.QtWidgets import QDialog, QWidget, QApplication\n'), ((7162, 7197), 'model.pathUtils.loadTextFile', 'loadTextFile', (['filePath', 'zipFilePool'], {}), '(filePath, zipFilePool)\n', (7174, 7197), False, 'from model.pathUtils import FilePath, ZipFilePool, loadTextFile\n'), ((2731, 2753), 'PyQt5.QtGui.QKeySequence', 'QKeySequence', (['"""Return"""'], {}), "('Return')\n", (2743, 2753), False, 'from PyQt5.QtGui import QKeySequence\n'), ((7573, 7609), 'Cat.utils.escapeForXml', 'escapeForXml', (['text[start:matchStart]'], {}), '(text[start:matchStart])\n', (7585, 7609), False, 'from Cat.utils import escapeForXml\n'), ((7614, 7653), 'Cat.utils.escapeForXml', 'escapeForXml', (['text[matchStart:matchEnd]'], {}), '(text[matchStart:matchEnd])\n', (7626, 7653), False, 'from Cat.utils import escapeForXml\n'), ((7659, 7691), 'Cat.utils.escapeForXml', 'escapeForXml', (['text[matchEnd:end]'], {}), '(text[matchEnd:end])\n', (7671, 7691), False, 'from Cat.utils import escapeForXml\n'), ((7848, 7888), 'Cat.CatPythonGUI.GUI.codeEditor.Position', 'Position', (['lastLineNr', '(matchStart - start)'], {}), '(lastLineNr, matchStart - start)\n', (7856, 7888), False, 'from Cat.CatPythonGUI.GUI.codeEditor import Position, SearchOptions, SearchMode\n')]
|
from subprocess import call
import os
from sstcam_sandbox.d181123_tf_cell_check import TF_Storage, \
TF_Sampling, all_files
def process(file, tf):
r0_path = file.r0_path
r1_name = os.path.basename(r0_path).replace("_r0", "_r1")
r1_path = os.path.join(tf.r1_dir, r1_name)
ped_path = file.ped_path
tf_path = tf.path
output_dir = os.path.dirname(r1_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
apply_calibration = "apply_calibration -i {} -p {} -t {} -o {}"
cmd = apply_calibration.format(r0_path, ped_path, tf_path, r1_path)
print(cmd)
call(cmd, shell=True)
def main():
for f in all_files:
process(f, TF_Sampling())
process(f, TF_Storage())
if __name__ == '__main__':
main()
|
[
"os.makedirs",
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"subprocess.call",
"sstcam_sandbox.d181123_tf_cell_check.TF_Storage",
"os.path.join",
"sstcam_sandbox.d181123_tf_cell_check.TF_Sampling"
] |
[((256, 288), 'os.path.join', 'os.path.join', (['tf.r1_dir', 'r1_name'], {}), '(tf.r1_dir, r1_name)\n', (268, 288), False, 'import os\n'), ((358, 382), 'os.path.dirname', 'os.path.dirname', (['r1_path'], {}), '(r1_path)\n', (373, 382), False, 'import os\n'), ((614, 635), 'subprocess.call', 'call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (618, 635), False, 'from subprocess import call\n'), ((394, 420), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (408, 420), False, 'import os\n'), ((430, 453), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (441, 453), False, 'import os\n'), ((194, 219), 'os.path.basename', 'os.path.basename', (['r0_path'], {}), '(r0_path)\n', (210, 219), False, 'import os\n'), ((693, 706), 'sstcam_sandbox.d181123_tf_cell_check.TF_Sampling', 'TF_Sampling', ([], {}), '()\n', (704, 706), False, 'from sstcam_sandbox.d181123_tf_cell_check import TF_Storage, TF_Sampling, all_files\n'), ((727, 739), 'sstcam_sandbox.d181123_tf_cell_check.TF_Storage', 'TF_Storage', ([], {}), '()\n', (737, 739), False, 'from sstcam_sandbox.d181123_tf_cell_check import TF_Storage, TF_Sampling, all_files\n')]
|
import os
import sys
import argparse
import json
import re
SCHEMA_PATH = os.path.abspath("../resources/schema")
def main():
parser = argparse.ArgumentParser(description="Do stuff")
parser.add_argument("--schema", default=None)
arguments = parser.parse_args()
schema_file = arguments.schema
schema_file_path = SCHEMA_PATH + os.sep + schema_file
name = schema_file.replace(".json", "")
with open(schema_file_path, "r") as myfile:
schema = myfile.read()
schema_json = json.loads(schema)
autogenerate_parse(schema_json, name)
def autogenerate_parse(schema_json, name):
myfile = open(name + ".py", "w")
final_string = "\n"
final_string += "def parse_{0}({0}, data):\n".format(name)
for key in schema_json.keys():
py_key = convert_var(key)
key_json = schema_json[key]
if key == "kind":
continue
if type(schema_json[key]) is not dict:
final_string += "{3}{0}.{2} = data['{1}']\n".format(
name, key, py_key, 4 * " "
)
continue
final_string += "\n"
final_string += 4 * " " + "# " + key + "\n"
final_string += "{2}{1}_data = data.get('{0}', False)\n".format(
key, py_key, 4 * " "
)
final_string += "{1}if {0}_data:\n".format(py_key, 4 * " ")
for key2 in key_json.keys():
if type(key_json[key2]) is dict:
continue
py_key2 = convert_var(key2)
key2_json = key_json[key2]
final_string += "{4}{0}.{3} = {2}_data.get('{1}', None)\n".format(
name, key2, py_key, py_key2, 8 * " "
)
final_string += "\n"
final_string += "{1}return {0}".format(name, 4 * " ")
myfile.write(final_string)
myfile.close()
# print(final_string)
def convert_var(var_name):
return re.sub("(?<!^)(?=[A-Z])", "_", var_name).lower()
def contains_only_primitive_keys(json_dict):
for key in json_dict.keys():
t = type(json_dict[key])
if t is not dict:
return True
return False
if __name__ == "__main__":
main()
|
[
"os.path.abspath",
"re.sub",
"argparse.ArgumentParser",
"json.loads"
] |
[((74, 112), 'os.path.abspath', 'os.path.abspath', (['"""../resources/schema"""'], {}), "('../resources/schema')\n", (89, 112), False, 'import os\n'), ((140, 187), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Do stuff"""'}), "(description='Do stuff')\n", (163, 187), False, 'import argparse\n'), ((511, 529), 'json.loads', 'json.loads', (['schema'], {}), '(schema)\n', (521, 529), False, 'import json\n'), ((1888, 1928), 're.sub', 're.sub', (['"""(?<!^)(?=[A-Z])"""', '"""_"""', 'var_name'], {}), "('(?<!^)(?=[A-Z])', '_', var_name)\n", (1894, 1928), False, 'import re\n')]
|
# ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
from typing import List
from aws_cdk.aws_stepfunctions import (
StateMachineFragment,
State,
INextable,
Choice,
Pass,
Condition,
Map,
JsonPath,
)
from aws_cdk.core import Construct, Duration
from personalize.aws_lambda.functions import (
CreateFilter,
)
class FilterFragment(StateMachineFragment):
def __init__(
self,
scope: Construct,
id: str,
create_filter: CreateFilter,
):
super().__init__(scope, id)
# total allowed elapsed duration ~ 11m30s
retry_config = {
"backoff_rate": 1.25,
"interval": Duration.seconds(8),
"max_attempts": 15,
}
self.prepare_filter_input = Pass(
self,
"Prepare Filter Input Data",
input_path="$.datasetGroupArn",
result_path="$.filter.serviceConfig.datasetGroupArn",
)
self.create_filter = create_filter.state(
self,
"Create Filter",
input_path="$.filter",
**retry_config,
)
self.not_required = Pass(self, "Filters Not Required")
self.create_filters = Map(
self,
"Create Filters",
items_path="$.filters",
parameters={
"datasetGroupArn.$": "$.datasetGroup.serviceConfig.datasetGroupArn",
"filter.$": "$$.Map.Item.Value",
},
result_path=JsonPath.DISCARD,
)
self.start = (
Choice(self, "Check if Filters Required")
.when(
Condition.is_present("$.filters[0]"),
self.create_filters.iterator(
self.prepare_filter_input.next(self.create_filter)
),
)
.otherwise(self.not_required)
)
@property
def start_state(self) -> State:
return self.start.start_state
@property
def end_states(self) -> List[INextable]:
return [self.not_required, self.create_filters]
|
[
"aws_cdk.aws_stepfunctions.Choice",
"aws_cdk.core.Duration.seconds",
"aws_cdk.aws_stepfunctions.Condition.is_present",
"aws_cdk.aws_stepfunctions.Map",
"aws_cdk.aws_stepfunctions.Pass"
] |
[((2179, 2308), 'aws_cdk.aws_stepfunctions.Pass', 'Pass', (['self', '"""Prepare Filter Input Data"""'], {'input_path': '"""$.datasetGroupArn"""', 'result_path': '"""$.filter.serviceConfig.datasetGroupArn"""'}), "(self, 'Prepare Filter Input Data', input_path='$.datasetGroupArn',\n result_path='$.filter.serviceConfig.datasetGroupArn')\n", (2183, 2308), False, 'from aws_cdk.aws_stepfunctions import StateMachineFragment, State, INextable, Choice, Pass, Condition, Map, JsonPath\n'), ((2562, 2596), 'aws_cdk.aws_stepfunctions.Pass', 'Pass', (['self', '"""Filters Not Required"""'], {}), "(self, 'Filters Not Required')\n", (2566, 2596), False, 'from aws_cdk.aws_stepfunctions import StateMachineFragment, State, INextable, Choice, Pass, Condition, Map, JsonPath\n'), ((2627, 2832), 'aws_cdk.aws_stepfunctions.Map', 'Map', (['self', '"""Create Filters"""'], {'items_path': '"""$.filters"""', 'parameters': "{'datasetGroupArn.$': '$.datasetGroup.serviceConfig.datasetGroupArn',\n 'filter.$': '$$.Map.Item.Value'}", 'result_path': 'JsonPath.DISCARD'}), "(self, 'Create Filters', items_path='$.filters', parameters={\n 'datasetGroupArn.$': '$.datasetGroup.serviceConfig.datasetGroupArn',\n 'filter.$': '$$.Map.Item.Value'}, result_path=JsonPath.DISCARD)\n", (2630, 2832), False, 'from aws_cdk.aws_stepfunctions import StateMachineFragment, State, INextable, Choice, Pass, Condition, Map, JsonPath\n'), ((2079, 2098), 'aws_cdk.core.Duration.seconds', 'Duration.seconds', (['(8)'], {}), '(8)\n', (2095, 2098), False, 'from aws_cdk.core import Construct, Duration\n'), ((3054, 3090), 'aws_cdk.aws_stepfunctions.Condition.is_present', 'Condition.is_present', (['"""$.filters[0]"""'], {}), "('$.filters[0]')\n", (3074, 3090), False, 'from aws_cdk.aws_stepfunctions import StateMachineFragment, State, INextable, Choice, Pass, Condition, Map, JsonPath\n'), ((2977, 3018), 'aws_cdk.aws_stepfunctions.Choice', 'Choice', (['self', '"""Check if Filters Required"""'], {}), "(self, 'Check if Filters Required')\n", (2983, 3018), False, 'from aws_cdk.aws_stepfunctions import StateMachineFragment, State, INextable, Choice, Pass, Condition, Map, JsonPath\n')]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import logging
import time
import threading
from functools import partial
from .partition_context import PartitionContext
from .ownership_manager import OwnershipManager
from .common import CloseReason
from. _eventprocessor_mixin import EventProcessorMixin
_LOGGER = logging.getLogger(__name__)
class EventProcessor(EventProcessorMixin): # pylint:disable=too-many-instance-attributes
"""
An EventProcessor constantly receives events from one or multiple partitions of the Event Hub
in the context of a given consumer group.
"""
def __init__(self, eventhub_client, consumer_group, on_event, **kwargs):
self._consumer_group = consumer_group
self._eventhub_client = eventhub_client
self._namespace = eventhub_client._address.hostname # pylint: disable=protected-access
self._eventhub_name = eventhub_client.eventhub_name
self._event_handler = on_event
self._partition_id = kwargs.get("partition_id", None)
self._error_handler = kwargs.get("on_error", None)
self._partition_initialize_handler = kwargs.get("on_partition_initialize", None)
self._partition_close_handler = kwargs.get("on_partition_close", None)
self._checkpoint_store = kwargs.get("checkpoint_store", None)
self._initial_event_position = kwargs.get("initial_event_position", "-1")
self._initial_event_position_inclusive = kwargs.get("initial_event_position_inclusive", False)
self._load_balancing_interval = kwargs.get("load_balancing_interval", 10.0)
self._ownership_timeout = self._load_balancing_interval * 2
self._partition_contexts = {}
# Receive parameters
self._owner_level = kwargs.get("owner_level", None)
if self._checkpoint_store and self._owner_level is None:
self._owner_level = 0
self._prefetch = kwargs.get("prefetch", None)
self._track_last_enqueued_event_properties = kwargs.get("track_last_enqueued_event_properties", False)
self._id = str(uuid.uuid4())
self._running = False
self._lock = threading.RLock()
self._consumers = {}
self._ownership_manager = OwnershipManager(
self._eventhub_client,
self._consumer_group,
self._id,
self._checkpoint_store,
self._ownership_timeout,
self._partition_id
)
def __repr__(self):
return 'EventProcessor: id {}'.format(self._id)
def _cancel_tasks_for_partitions(self, to_cancel_partitions):
with self._lock:
for partition_id in to_cancel_partitions:
if partition_id in self._consumers:
self._consumers[partition_id].stop = True
if to_cancel_partitions:
_LOGGER.info("EventProcesor %r has cancelled partitions %r", self._id, to_cancel_partitions)
def _create_tasks_for_claimed_ownership(self, claimed_partitions, checkpoints=None):
with self._lock:
for partition_id in claimed_partitions:
if partition_id not in self._consumers:
if partition_id in self._partition_contexts:
partition_context = self._partition_contexts[partition_id]
else:
partition_context = PartitionContext(
self._namespace,
self._eventhub_name,
self._consumer_group,
partition_id,
self._checkpoint_store
)
self._partition_contexts[partition_id] = partition_context
checkpoint = checkpoints.get(partition_id) if checkpoints else None
initial_event_position, event_postition_inclusive =\
self.get_init_event_position(partition_id, checkpoint)
event_received_callback = partial(self._on_event_received, partition_context)
self._consumers[partition_id] = self.create_consumer(partition_id,
initial_event_position,
event_postition_inclusive,
event_received_callback)
if self._partition_initialize_handler:
self._handle_callback(
[self._partition_initialize_handler,
self._partition_contexts[partition_id]]
)
def _handle_callback(self, callback_and_args):
callback = callback_and_args[0]
try:
callback(*callback_and_args[1:])
except Exception as exp: # pylint:disable=broad-except
partition_context = callback_and_args[1]
if self._error_handler and callback != self._error_handler:
self._handle_callback([self._error_handler, partition_context, exp])
else:
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" has another error during running process_error(). The exception is %r.",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
exp
)
def _on_event_received(self, partition_context, event):
with self._context(event):
if self._track_last_enqueued_event_properties:
partition_context._last_received_event = event # pylint: disable=protected-access
self._handle_callback([self._event_handler, partition_context, event])
def _load_balancing(self):
"""Start the EventProcessor.
The EventProcessor will try to claim and balance partition ownership with other `EventProcessor`
and start receiving EventData from EventHub and processing events.
:return: None
"""
while self._running:
try:
checkpoints = self._ownership_manager.get_checkpoints() if self._checkpoint_store else None
claimed_partition_ids = self._ownership_manager.claim_ownership()
if claimed_partition_ids:
to_cancel_list = set(self._consumers.keys()) - set(claimed_partition_ids)
self._create_tasks_for_claimed_ownership(claimed_partition_ids, checkpoints)
else:
_LOGGER.info("EventProcessor %r hasn't claimed an ownership. It keeps claiming.", self._id)
to_cancel_list = set(self._consumers.keys())
if to_cancel_list:
self._cancel_tasks_for_partitions(to_cancel_list)
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning("An exception (%r) occurred during balancing and claiming ownership for "
"eventhub %r consumer group %r. Retrying after %r seconds",
err, self._eventhub_name, self._consumer_group, self._load_balancing_interval)
# ownership_manager.get_checkpoints() and ownership_manager.claim_ownership() may raise exceptions
# when there are load balancing and/or checkpointing (checkpoint_store isn't None).
# They're swallowed here to retry every self._load_balancing_interval seconds.
# Meanwhile this event processor won't lose the partitions it has claimed before.
# If it keeps failing, other EventProcessors will start to claim ownership of the partitions
# that this EventProcessor is working on. So two or multiple EventProcessors may be working
# on the same partition.
time.sleep(self._load_balancing_interval)
def _close_consumer(self, partition_id, consumer, reason):
consumer.close()
with self._lock:
del self._consumers[partition_id]
_LOGGER.info(
"PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" is being closed. Reason is: %r",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
reason
)
if self._partition_close_handler:
self._handle_callback([self._partition_close_handler, self._partition_contexts[partition_id], reason])
self._ownership_manager.release_ownership(partition_id)
def start(self):
if self._running:
_LOGGER.info("EventProcessor %r has already started.", self._id)
return
_LOGGER.info("EventProcessor %r is being started", self._id)
self._running = True
thread = threading.Thread(target=self._load_balancing)
thread.daemon = True
thread.start()
while self._running:
for partition_id, consumer in list(self._consumers.items()):
if consumer.stop:
self._close_consumer(partition_id, consumer, CloseReason.OWNERSHIP_LOST)
continue
try:
consumer.receive()
except Exception as error: # pylint:disable=broad-except
_LOGGER.warning(
"PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" has met an error. The exception is %r.",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
error
)
if self._error_handler:
self._handle_callback([self._error_handler, self._partition_contexts[partition_id], error])
self._close_consumer(partition_id, consumer, CloseReason.OWNERSHIP_LOST)
with self._lock:
for partition_id, consumer in list(self._consumers.items()):
self._close_consumer(partition_id, consumer, CloseReason.SHUTDOWN)
def stop(self):
"""Stop the EventProcessor.
The EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions
it is working on.
Other running EventProcessor will take over these released partitions.
A stopped EventProcessor can be restarted by calling method `start` again.
:return: None
"""
if not self._running:
_LOGGER.info("EventProcessor %r has already been stopped.", self._id)
return
self._running = False
_LOGGER.info("EventProcessor %r has been stopped.", self._id)
|
[
"threading.Thread",
"functools.partial",
"uuid.uuid4",
"threading.RLock",
"time.sleep",
"logging.getLogger"
] |
[((626, 653), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (643, 653), False, 'import logging\n'), ((2451, 2468), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (2466, 2468), False, 'import threading\n'), ((9490, 9535), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._load_balancing'}), '(target=self._load_balancing)\n', (9506, 9535), False, 'import threading\n'), ((2386, 2398), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2396, 2398), False, 'import uuid\n'), ((8378, 8419), 'time.sleep', 'time.sleep', (['self._load_balancing_interval'], {}), '(self._load_balancing_interval)\n', (8388, 8419), False, 'import time\n'), ((4329, 4380), 'functools.partial', 'partial', (['self._on_event_received', 'partition_context'], {}), '(self._on_event_received, partition_context)\n', (4336, 4380), False, 'from functools import partial\n')]
|
#!/usr/bin/env python3
#
# Project homepage: https://github.com/mwoolweaver
# Licence: <http://unlicense.org/>
# Created by <NAME> <<EMAIL>>
# ================================================================================
import pandas
from lib.debug import debuginfo, debuginfoDBV, debuginfoDBVV, debuginfoDBVVV, sqlError
def fetchEntries(filesWeNeed):
listWeNeed = []
for fileNeeded in filesWeNeed:
location = pandas.read_csv(fileNeeded,delimiter='\t',encoding='utf-8')
test3 = list(location.itertuples(index=False, name=None))
listWeNeed.append(test3)
#
return (listWeNeed)
def fetchGroups(fileNeeded):
location = pandas.read_csv(fileNeeded,delimiter='\t',encoding='utf-8')
groups = list(location.itertuples(index=False, name=None))
lists =[]
for group in groups:
lists.append(r"domains/" + group[4])
#
return (groups, lists)
|
[
"pandas.read_csv"
] |
[((668, 729), 'pandas.read_csv', 'pandas.read_csv', (['fileNeeded'], {'delimiter': '"""\t"""', 'encoding': '"""utf-8"""'}), "(fileNeeded, delimiter='\\t', encoding='utf-8')\n", (683, 729), False, 'import pandas\n'), ((433, 494), 'pandas.read_csv', 'pandas.read_csv', (['fileNeeded'], {'delimiter': '"""\t"""', 'encoding': '"""utf-8"""'}), "(fileNeeded, delimiter='\\t', encoding='utf-8')\n", (448, 494), False, 'import pandas\n')]
|
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sexpdata'
copyright = u'2012, <NAME>'
# The short X.Y version.
version = '0.0.4'
# The full version, including alpha/beta/rc tags.
release = '0.0.4.dev1'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
html_theme = 'default'
#html_theme_options = {}
html_static_path = [] # default: ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'sexpdatadoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'sexpdata.tex', u'sexpdata Documentation',
u'Takafumi Arakaki', 'manual'),
]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sexpdata', u'sexpdata Documentation',
[u'Takafumi Arakaki'], 1)
]
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sexpdata', u'sexpdata Documentation',
u'Takafumi Arakaki', 'sexpdata', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
[
"os.path.abspath"
] |
[((66, 90), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (81, 90), False, 'import os\n')]
|
import gym
import pybullet as p
import numpy as np
from gym_delta_robot_trampoline.resources.delta_robot_trampoline import Omnid_Simulator
import matplotlib.pyplot as plt
import os
import pybullet_data
"""
Action space (1,3) : [theta_1_torque, theta_2_torque, theta_3_torque]
Observation space (1,18) : [3 joint_positions, 3 joint velocities, 3 eef positions, 3 eef velocities, 3
3 ball positions, 3 ball velocities]
"""
FAIL_ALTITUDE = 0.20
BONUS_ALTITUDE_DIFF = 0.16
MAX_STEP_NUM = 800
class DeltaRobotTrampolineEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.step_counter = 0
#TODO
# self.client = p.connect(p.DIRECT)
self.client = p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraYaw=0, cameraPitch=-40, cameraTargetPosition=[0.05,-0.35,0.2])
self.action_space = gym.spaces.box.Box(
low=np.array([-100] * 3),
high=np.array([100] * 3))
self.observation_space = gym.spaces.box.Box(
low=np.array([-np.pi/4, -np.pi/4, -np.pi/4, -100, -100, -100, \
-5, -5, -5, -50, -50, -50, \
-20, -20, 0, -50, -50, -50]),
high=np.array([np.pi/2, np.pi/2, np.pi/2, 100, 100, 100, \
5, 5, 5, 50, 50, 50, \
20, 20, 20, 50, 50, 50]))
self.np_random, _ = gym.utils.seeding.np_random()
#enable visualization
#TODO
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)
def reset(self):
p.resetSimulation()
# episode params
self.step_counter = 0
self.above_BONUS_ALTITUDE_DIFF = False
p.loadURDF(os.path.join(pybullet_data.getDataPath(), "plane.urdf")) #loads from the root pybullet library
p.setGravity(0,0,-10)
p.setRealTimeSimulation(0)
#set up the robot and the ball
self.omnid_simulator = Omnid_Simulator()
initialized = False
self.omnid_simulator.attachBallToRobot() # we want the robot to land safely onto the robot.
while not initialized:
self.omnid_simulator.updateStates()
if self.omnid_simulator.ballonRobot():
self.omnid_simulator.detachBallFromRobot() #now we can let the ball move freely!
initialized = True
p.stepSimulation()
self.observation = self.omnid_simulator.updateStates().astype(np.float32)
return self.observation
def step(self, action):
self.omnid_simulator.applyJointTorque({"theta_1": action[0], \
"theta_2": action[1], \
"theta_3": action[2]})
p.stepSimulation()
self.step_counter += 1
self.observation = self.omnid_simulator.updateStates()
#z < 0, -100. else, if get over height threshold, we get 100.
z= self.observation[14]
if z < FAIL_ALTITUDE:
reward = -25
done = True
else:
height_diff = z - self.observation[8]
if height_diff >= BONUS_ALTITUDE_DIFF:
done = False
if not self.above_BONUS_ALTITUDE_DIFF:
reward = 50
self.above_BONUS_ALTITUDE_DIFF = True
self.step_counter = 0
else:
reward = 0
else: #ball is above the platform but lower than the relative height threshold
if self.above_BONUS_ALTITUDE_DIFF:
self.above_BONUS_ALTITUDE_DIFF = False
reward = -0.1
done = False
if self.step_counter >= MAX_STEP_NUM:
done = True
info = {"eef position: ": self.observation[6:9], \
"ball position: ": self.observation[12:15]}
return self.observation.astype(np.float32), reward, done, info
def render(self, mode='human'):
""" Render is an interface function. Since we are using GUI, we do not need this.
We use GUI because computing view matrices and projection matrices is much slower. """
pass
def close(self):
p.disconnect(self.client)
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
|
[
"pybullet.setRealTimeSimulation",
"pybullet.resetSimulation",
"pybullet.stepSimulation",
"pybullet.setGravity",
"pybullet.configureDebugVisualizer",
"gym_delta_robot_trampoline.resources.delta_robot_trampoline.Omnid_Simulator",
"pybullet.resetDebugVisualizerCamera",
"pybullet.disconnect",
"numpy.array",
"pybullet_data.getDataPath",
"pybullet.connect",
"gym.utils.seeding.np_random"
] |
[((716, 732), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (725, 732), True, 'import pybullet as p\n'), ((741, 865), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(1.5)', 'cameraYaw': '(0)', 'cameraPitch': '(-40)', 'cameraTargetPosition': '[0.05, -0.35, 0.2]'}), '(cameraDistance=1.5, cameraYaw=0, cameraPitch=-\n 40, cameraTargetPosition=[0.05, -0.35, 0.2])\n', (769, 865), True, 'import pybullet as p\n'), ((1427, 1456), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', ([], {}), '()\n', (1454, 1456), False, 'import gym\n'), ((1510, 1563), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(1)'], {}), '(p.COV_ENABLE_RENDERING, 1)\n', (1536, 1563), True, 'import pybullet as p\n'), ((1593, 1612), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (1610, 1612), True, 'import pybullet as p\n'), ((1840, 1863), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (1852, 1863), True, 'import pybullet as p\n'), ((1870, 1896), 'pybullet.setRealTimeSimulation', 'p.setRealTimeSimulation', (['(0)'], {}), '(0)\n', (1893, 1896), True, 'import pybullet as p\n'), ((1968, 1985), 'gym_delta_robot_trampoline.resources.delta_robot_trampoline.Omnid_Simulator', 'Omnid_Simulator', ([], {}), '()\n', (1983, 1985), False, 'from gym_delta_robot_trampoline.resources.delta_robot_trampoline import Omnid_Simulator\n'), ((2771, 2789), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2787, 2789), True, 'import pybullet as p\n'), ((4244, 4269), 'pybullet.disconnect', 'p.disconnect', (['self.client'], {}), '(self.client)\n', (4256, 4269), True, 'import pybullet as p\n'), ((4333, 4366), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', (['seed'], {}), '(seed)\n', (4360, 4366), False, 'import gym\n'), ((2388, 2406), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2404, 2406), True, 'import pybullet as p\n'), ((924, 944), 'numpy.array', 'np.array', (['([-100] * 3)'], {}), '([-100] * 3)\n', (932, 944), True, 'import numpy as np\n'), ((963, 982), 'numpy.array', 'np.array', (['([100] * 3)'], {}), '([100] * 3)\n', (971, 982), True, 'import numpy as np\n'), ((1054, 1177), 'numpy.array', 'np.array', (['[-np.pi / 4, -np.pi / 4, -np.pi / 4, -100, -100, -100, -5, -5, -5, -50, -50,\n -50, -20, -20, 0, -50, -50, -50]'], {}), '([-np.pi / 4, -np.pi / 4, -np.pi / 4, -100, -100, -100, -5, -5, -5,\n -50, -50, -50, -20, -20, 0, -50, -50, -50])\n', (1062, 1177), True, 'import numpy as np\n'), ((1242, 1350), 'numpy.array', 'np.array', (['[np.pi / 2, np.pi / 2, np.pi / 2, 100, 100, 100, 5, 5, 5, 50, 50, 50, 20, \n 20, 20, 50, 50, 50]'], {}), '([np.pi / 2, np.pi / 2, np.pi / 2, 100, 100, 100, 5, 5, 5, 50, 50, \n 50, 20, 20, 20, 50, 50, 50])\n', (1250, 1350), True, 'import numpy as np\n'), ((1749, 1776), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (1774, 1776), False, 'import pybullet_data\n')]
|
"""cc_licenses URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
# Third-party
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views.generic import TemplateView
# First-party/Local
from licenses.views import branch_status, translation_status
urlpatterns = [
url(r"^admin/", admin.site.urls),
path("", TemplateView.as_view(template_name="home.html"), name="home"),
url(
r"status/(?P<id>\d+)/$",
branch_status,
name="branch_status",
),
url(
r"status/$",
translation_status,
name="translation_status",
),
url(r"licenses/", include("licenses.urls")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# Third-party
import debug_toolbar
urlpatterns += [
url(r"^__debug__/", include(debug_toolbar.urls)),
]
|
[
"django.views.generic.TemplateView.as_view",
"django.conf.urls.static.static",
"django.conf.urls.url",
"django.urls.include"
] |
[((1329, 1390), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1335, 1390), False, 'from django.conf.urls.static import static\n'), ((964, 995), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (967, 995), False, 'from django.conf.urls import url\n'), ((1078, 1143), 'django.conf.urls.url', 'url', (['"""status/(?P<id>\\\\d+)/$"""', 'branch_status'], {'name': '"""branch_status"""'}), "('status/(?P<id>\\\\d+)/$', branch_status, name='branch_status')\n", (1081, 1143), False, 'from django.conf.urls import url\n'), ((1180, 1242), 'django.conf.urls.url', 'url', (['"""status/$"""', 'translation_status'], {'name': '"""translation_status"""'}), "('status/$', translation_status, name='translation_status')\n", (1183, 1242), False, 'from django.conf.urls import url\n'), ((1011, 1058), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""home.html"""'}), "(template_name='home.html')\n", (1031, 1058), False, 'from django.views.generic import TemplateView\n'), ((1298, 1322), 'django.urls.include', 'include', (['"""licenses.urls"""'], {}), "('licenses.urls')\n", (1305, 1322), False, 'from django.urls import include, path\n'), ((1504, 1531), 'django.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (1511, 1531), False, 'from django.urls import include, path\n')]
|
from rpython.rlib.rbigint import BASE10
from typhon.nano import checkpoints
from typhon.quoting import quoteChar, quoteStr
def doNanoSmallCaps(expr):
expr = checkpoints.AddCheckpoints().visitExpr(expr)
expr = checkpoints.CollectCheckpoints().visitExpr(expr)
return expr
class PrettySmallCaps(checkpoints.CheckpointIR.makePassTo(None)):
def __init__(self):
self.buf = []
def asUnicode(self):
return u"".join(self.buf)
def write(self, s):
self.buf.append(s)
def visitNullExpr(self):
self.write(u"null")
def visitCharExpr(self, c):
self.write(quoteChar(c[0]))
def visitDoubleExpr(self, d):
self.write(u"%f" % d)
def visitIntExpr(self, i):
self.write(i.format(BASE10).decode("utf-8"))
def visitStrExpr(self, s):
self.write(quoteStr(s))
def visitAssignExpr(self, name, rvalue):
self.write(name)
self.write(u" := ")
self.visitExpr(rvalue)
def visitBindingExpr(self, name):
self.write(u"&&")
self.write(name)
def visitCallExpr(self, obj, verb, args, namedArgs):
self.visitExpr(obj)
self.write(u".")
self.write(verb)
self.write(u"(")
if args:
self.visitExpr(args[0])
for arg in args[1:]:
self.write(u", ")
self.visitExpr(arg)
if namedArgs:
self.visitNamedArg(args[0])
for namedArg in namedArgs[1:]:
self.write(u", ")
self.visitNamedArg(namedArg)
self.write(u")")
def visitDefExpr(self, patt, ex, rvalue):
if isinstance(patt, checkpoints.CheckpointIR.VarPatt):
self.write(u"def ")
self.visitPatt(patt)
if not isinstance(ex, checkpoints.CheckpointIR.NullExpr):
self.write(u" exit ")
self.visitExpr(ex)
self.write(u" := ")
self.visitExpr(rvalue)
def visitEscapeOnlyExpr(self, patt, body):
self.write(u"escape ")
self.visitPatt(patt)
self.write(u" {")
self.visitExpr(body)
self.write(u"}")
def visitEscapeExpr(self, patt, body, catchPatt, catchBody):
self.write(u"escape ")
self.visitPatt(patt)
self.write(u" {")
self.visitExpr(body)
self.write(u"} catch ")
self.visitPatt(catchPatt)
self.write(u" {")
self.visitExpr(catchBody)
self.write(u"}")
def visitFinallyExpr(self, body, atLast):
self.write(u"try {")
self.visitExpr(body)
self.write(u"} finally {")
self.visitExpr(atLast)
self.write(u"}")
def visitHideExpr(self, body):
self.write(u"{")
self.visitExpr(body)
self.write(u"}")
def visitIfExpr(self, test, cons, alt):
self.write(u"if (")
self.visitExpr(test)
self.write(u") {")
self.visitExpr(cons)
self.write(u"} else {")
self.visitExpr(alt)
self.write(u"}")
def visitMetaContextExpr(self):
self.write(u"meta.context()")
def visitMetaStateExpr(self):
self.write(u"meta.state()")
def visitNounExpr(self, name):
self.write(name)
def visitObjectExpr(self, doc, patt, auditors, methods, matchers):
self.write(u"object ")
self.visitPatt(patt)
if auditors:
self.write(u" as ")
self.visitExpr(auditors[0])
auditors = auditors[1:]
if auditors:
self.write(u" implements ")
self.visitExpr(auditors[0])
for auditor in auditors[1:]:
self.write(u", ")
self.visitExpr(auditor)
self.write(u" {")
for method in methods:
self.visitMethod(method)
for matcher in matchers:
self.visitMatcher(matcher)
self.write(u"}")
def visitSeqExpr(self, exprs):
if exprs:
self.visitExpr(exprs[0])
for expr in exprs[1:]:
self.write(u"; ")
self.visitExpr(expr)
def visitTryExpr(self, body, catchPatt, catchBody):
self.write(u"try {")
self.visitExpr(body)
self.write(u"} catch ")
self.visitPatt(catchPatt)
self.write(u" {")
self.visitExpr(catchBody)
self.write(u"}")
def visitIgnorePatt(self, guard):
self.write(u"_")
if not isinstance(guard, checkpoints.CheckpointIR.NullExpr):
self.write(u" :")
self.visitExpr(guard)
def visitBindingPatt(self, name):
self.write(u"&&")
self.write(name)
def visitFinalPatt(self, name, guard):
self.write(name)
if not isinstance(guard, checkpoints.CheckpointIR.NullExpr):
self.write(u" :")
self.visitExpr(guard)
def visitVarPatt(self, name, guard):
self.write(u"var ")
self.write(name)
if not isinstance(guard, checkpoints.CheckpointIR.NullExpr):
self.write(u" :")
self.visitExpr(guard)
def visitListPatt(self, patts):
self.write(u"[")
if patts:
self.visitPatt(patts[0])
for patt in patts[1:]:
self.write(u", ")
self.visitPatt(patt)
self.write(u"]")
def visitViaPatt(self, trans, patt):
self.write(u"via (")
self.visitExpr(trans)
self.write(u") ")
self.visitPatt(patt)
def visitNamedArgExpr(self, key, value):
self.visitExpr(key)
self.write(u" => ")
self.visitExpr(value)
def visitNamedPattern(self, key, patt, default):
self.visitExpr(key)
self.write(u" => ")
self.visitPatt(patt)
self.write(u" := ")
self.visitExpr(default)
def visitMatcherExpr(self, patt, body):
self.write(u"match ")
self.visitPatt(patt)
self.write(u" {")
self.visitExpr(body)
self.write(u"}")
def visitMethodExpr(self, doc, verb, patts, namedPatts, guard, body):
self.write(u"method ")
self.write(verb)
self.write(u"(")
if patts:
self.visitPatt(patts[0])
for patt in patts[1:]:
self.write(u", ")
self.visitPatt(patt)
if patts and namedPatts:
self.write(u", ")
if namedPatts:
self.visitNamedPatt(namedPatts[0])
for namedPatt in namedPatts[1:]:
self.write(u", ")
self.visitNamedPatt(namedPatt)
self.write(u")")
if not isinstance(guard, checkpoints.CheckpointIR.NullExpr):
self.write(u" :")
self.visitExpr(guard)
self.write(u" {")
self.visitExpr(body)
self.write(u"}")
def visitCheckpointExpr(self, count):
self.write(u"meta.checkpoint(")
self.write(u"%d" % count)
self.write(u")")
|
[
"typhon.nano.checkpoints.AddCheckpoints",
"typhon.nano.checkpoints.CollectCheckpoints",
"typhon.quoting.quoteStr",
"typhon.quoting.quoteChar",
"typhon.nano.checkpoints.CheckpointIR.makePassTo"
] |
[((307, 348), 'typhon.nano.checkpoints.CheckpointIR.makePassTo', 'checkpoints.CheckpointIR.makePassTo', (['None'], {}), '(None)\n', (342, 348), False, 'from typhon.nano import checkpoints\n'), ((163, 191), 'typhon.nano.checkpoints.AddCheckpoints', 'checkpoints.AddCheckpoints', ([], {}), '()\n', (189, 191), False, 'from typhon.nano import checkpoints\n'), ((219, 251), 'typhon.nano.checkpoints.CollectCheckpoints', 'checkpoints.CollectCheckpoints', ([], {}), '()\n', (249, 251), False, 'from typhon.nano import checkpoints\n'), ((620, 635), 'typhon.quoting.quoteChar', 'quoteChar', (['c[0]'], {}), '(c[0])\n', (629, 635), False, 'from typhon.quoting import quoteChar, quoteStr\n'), ((838, 849), 'typhon.quoting.quoteStr', 'quoteStr', (['s'], {}), '(s)\n', (846, 849), False, 'from typhon.quoting import quoteChar, quoteStr\n')]
|
from django.contrib import admin
from .models import CsvDownload
class CsvDownloadAdmin(admin.ModelAdmin):
list_display = ("user", "timestamp", "row_count", "filename")
list_filter = ("timestamp",)
search_fields = ("user", "filename")
raw_id_fields = ("user",)
readonly_fields = (
"user",
"timestamp",
"filename",
"row_count",
"columns",
)
admin.site.register(CsvDownload, CsvDownloadAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((409, 459), 'django.contrib.admin.site.register', 'admin.site.register', (['CsvDownload', 'CsvDownloadAdmin'], {}), '(CsvDownload, CsvDownloadAdmin)\n', (428, 459), False, 'from django.contrib import admin\n')]
|
""" Default Runner & Worker components
Local Runner
Memmap Interface (numpy)
Template Preprocessor
JSON Postprocessor
NumpytxtPostprocessor
HDF5Postprocessor
"""
from .runner import Runner, RunnerInterface
from .worker import Interface, Preprocessor, Postprocessor, Worker
import subprocess
from multiprocessing import Process
from time import sleep
import logging
import numpy as np
import os
from shutil import rmtree
# === Local Runner === #
@Runner.register('local')
class LocalRunner(Runner):
""" Runner for executing simulations locally
- forks the worker, thereby having less overhead (especially with a custom python Worker)
- per default uses all available CPUs
"""
def spawn_run(self, params=None, wait=False):
super().spawn_run(params, wait)
if self.run_config['custom'] or not self.config['fork']:
env = self.env.copy()
env['PROFIT_RUN_ID'] = str(self.next_run_id)
if self.run_config['custom']:
cmd = self.run_config['command']
else:
cmd = 'profit-worker'
self.runs[self.next_run_id] = subprocess.Popen(cmd, shell=True, env=env, cwd=self.base_config['run_dir'])
if wait:
self.runs[self.next_run_id].wait()
del self.runs[self.next_run_id]
else:
def work():
worker = Worker.from_config(self.run_config, self.next_run_id)
worker.main()
os.chdir(self.base_config['run_dir'])
process = Process(target=work)
self.runs[self.next_run_id] = process
process.start()
if wait:
process.join()
del self.runs[self.next_run_id]
os.chdir(self.base_config['base_dir'])
self.next_run_id += 1
def spawn_array(self, params_array, blocking=True):
""" spawn an array of runs, maximum 'parallel' at the same time, blocking until all are done """
if not blocking:
raise NotImplementedError
for params in params_array:
self.spawn_run(params)
while len(self.runs) >= self.config['parallel']:
sleep(self.config['sleep'])
self.check_runs(poll=True)
while len(self.runs):
sleep(self.config['sleep'])
self.check_runs(poll=True)
def check_runs(self, poll=False):
""" check the status of runs via the interface """
self.interface.poll()
if self.run_config['custom'] or not self.config['fork']:
for run_id, process in list(self.runs.items()): # preserve state before deletions
if self.interface.internal['DONE'][run_id]:
process.wait() # just to make sure
del self.runs[run_id]
elif poll and process.poll() is not None:
del self.runs[run_id]
else:
for run_id, process in list(self.runs.items()): # preserve state before deletions
if self.interface.internal['DONE'][run_id]:
process.join() # just to make sure
del self.runs[run_id]
elif poll and process.exitcode is not None:
process.terminate()
del self.runs[run_id]
def cancel_all(self):
if self.run_config['custom'] or not self.config['fork']:
for process in self.runs.values():
process.terminate()
else:
for process in self.runs.values():
process.terminate()
self.runs = {}
# === Numpy Memmap Inerface === #
@RunnerInterface.register('memmap')
class MemmapRunnerInterface(RunnerInterface):
""" Runner-Worker Interface using a memory mapped numpy array
- expected to be very fast with the *local* Runner as each Worker can access the array directly (unverified)
- expected to be inefficient if used on a cluster with a shared filesystem (unverified)
- reliable
- known issue: resizing the array (to add more runs) is dangerous, needs a workaround
(e.g. several arrays in the same file)
"""
def __init__(self, config, size, input_config, output_config, *, logger_parent: logging.Logger = None):
super().__init__(config, size, input_config, output_config, logger_parent=logger_parent)
init_data = np.zeros(size, dtype=self.input_vars + self.internal_vars + self.output_vars)
np.save(self.config['path'], init_data)
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.runner.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
# should return views on memmap
self.input = self._memmap[[v[0] for v in self.input_vars]]
self.output = self._memmap[[v[0] for v in self.output_vars]]
self.internal = self._memmap[[v[0] for v in self.internal_vars]]
def resize(self, size):
""" Resizing Memmap Runner Interfac
Attention: this is dangerous and may lead to unexpected errors!
The problem is that the memory mapped file is overwritten.
Any Workers which have this file mapped will run into severe problems.
Possible future workarounds: multiple files or multiple headers in one file.
"""
if size <= self.size:
self.logger.warning('shrinking RunnerInterface is not supported')
return
self.logger.warning('resizing MemmapRunnerInterface is dangerous')
self.clean()
init_data = np.zeros(size, dtype=self.input_vars + self.internal_vars + self.output_vars)
np.save(self.config['path'], init_data)
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.runner.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
self.input = self._memmap[[v[0] for v in self.input_vars]]
self.output = self._memmap[[v[0] for v in self.output_vars]]
self.internal = self._memmap[[v[0] for v in self.internal_vars]]
def clean(self):
if os.path.exists(self.config['path']):
os.remove(self.config['path'])#
@Interface.register('memmap')
class MemmapInterface(Interface):
""" Runner-Worker Interface using a memory mapped numpy array
counterpart to :py:class:`MemmapRunnerInterface`
"""
def __init__(self, config, run_id: int, *, logger_parent: logging.Logger = None):
super().__init__(config, run_id, logger_parent=logger_parent)
# ToDo: multiple arrays after another to allow extending the file dynamically
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.worker.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
# should return views on memmap
inputs, outputs = [], []
k = 0
for k, key in enumerate(self._memmap.dtype.names):
if key == 'DONE':
break
inputs.append(key)
for key in self._memmap.dtype.names[k:]:
if key not in ['DONE', 'TIME']:
outputs.append(key)
self.input = self._memmap[inputs][run_id]
self.output = self._memmap[outputs][run_id]
self._data = self._memmap[run_id]
def done(self):
self._memmap['TIME'] = self.time
self._memmap['DONE'] = True
self._memmap.flush()
def clean(self):
if os.path.exists(self.config['path']):
os.remove(self.config['path'])
# === Template Preprocessor === #
@Preprocessor.register('template')
class TemplatePreprocessor(Preprocessor):
""" Preprocessor which substitutes the variables with a given template
- copies the given template directory to the target run directory
- searches all files for variables templates of the form {name} and replaces them with their values
- for file formats which use curly braces (e.g. json) the template identifier is {{name}}
- substitution can be restricted to certain files by specifying `param_files`
- relative symbolic links are converted to absolute symbolic links on copying
- linked files are ignored with `param_files: all`, but if specified explicitly the link target is copied to the run
directory and then substituted
"""
def pre(self, data, run_dir):
# No call to super()! replaces the default preprocessing
from profit.pre import fill_run_dir_single
if os.path.exists(run_dir):
rmtree(run_dir)
fill_run_dir_single(data, self.config['path'], run_dir, ignore_path_exists=True,
param_files=self.config['param_files'])
os.chdir(run_dir)
# === JSON Postprocessor === #
@Postprocessor.register('json')
class JSONPostprocessor(Postprocessor):
""" Postprocessor to read output from a JSON file
- variables are assumed to be stored with the correct key and able to be converted immediately
- not extensively tested
"""
def post(self, data):
import json
with open(self.config['path']) as f:
output = json.load(f)
for key, value in output.items():
data[key] = value
# === Numpy Text Postprocessor === #
@Postprocessor.register('numpytxt')
class NumpytxtPostprocessor(Postprocessor):
""" Postprocessor to read output from a tabular text file (e.g. csv, tsv) with numpy ``genfromtxt``
- the data is assumed to be row oriented
- vector variables are spread across the row and have to be in the right order, only the name of the variable should
be specified once in ``names``
- ``names`` which are not specified as output variables are ignored
- additional options are passed directly to ``numpy.genfromtxt()`
"""
def post(self, data):
dtype = [(name, float, data.dtype[name].shape if name in data.dtype.names else ())
for name in self.config['names']]
try:
raw = np.genfromtxt(self.config['path'], dtype=dtype, **self.config['options'])
except OSError:
self.logger.error(f'output file {self.config["path"]} not found')
self.logger.info(f'cwd = {os.getcwd()}')
dirname = os.path.dirname(self.config['path']) or '.'
self.logger.info(f'ls {dirname} = {os.listdir(dirname)}')
raise
for key in self.config['names']:
if key in data.dtype.names:
data[key] = raw[key]
# === HDF5 Postprocessor === #
@Postprocessor.register('hdf5')
class HDF5Postprocessor(Postprocessor):
""" Postprocessor to read output from a HDF5 file
- variables are assumed to be stored with the correct key and able to be converted immediately
- not extensively tested
"""
def post(self, data):
import h5py
with h5py.File(self.config['path'], 'r') as f:
for key in f.keys():
data[key] = f[key]
|
[
"os.listdir",
"subprocess.Popen",
"numpy.save",
"numpy.load",
"os.remove",
"json.load",
"h5py.File",
"profit.pre.fill_run_dir_single",
"os.getcwd",
"os.path.dirname",
"numpy.zeros",
"os.path.exists",
"numpy.genfromtxt",
"time.sleep",
"shutil.rmtree",
"multiprocessing.Process",
"os.chdir"
] |
[((4409, 4486), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': '(self.input_vars + self.internal_vars + self.output_vars)'}), '(size, dtype=self.input_vars + self.internal_vars + self.output_vars)\n', (4417, 4486), True, 'import numpy as np\n'), ((4495, 4534), 'numpy.save', 'np.save', (["self.config['path']", 'init_data'], {}), "(self.config['path'], init_data)\n", (4502, 4534), True, 'import numpy as np\n'), ((5718, 5795), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': '(self.input_vars + self.internal_vars + self.output_vars)'}), '(size, dtype=self.input_vars + self.internal_vars + self.output_vars)\n', (5726, 5795), True, 'import numpy as np\n'), ((5804, 5843), 'numpy.save', 'np.save', (["self.config['path']", 'init_data'], {}), "(self.config['path'], init_data)\n", (5811, 5843), True, 'import numpy as np\n'), ((6367, 6402), 'os.path.exists', 'os.path.exists', (["self.config['path']"], {}), "(self.config['path'])\n", (6381, 6402), False, 'import os\n'), ((7826, 7861), 'os.path.exists', 'os.path.exists', (["self.config['path']"], {}), "(self.config['path'])\n", (7840, 7861), False, 'import os\n'), ((8856, 8879), 'os.path.exists', 'os.path.exists', (['run_dir'], {}), '(run_dir)\n', (8870, 8879), False, 'import os\n'), ((8917, 9042), 'profit.pre.fill_run_dir_single', 'fill_run_dir_single', (['data', "self.config['path']", 'run_dir'], {'ignore_path_exists': '(True)', 'param_files': "self.config['param_files']"}), "(data, self.config['path'], run_dir, ignore_path_exists=\n True, param_files=self.config['param_files'])\n", (8936, 9042), False, 'from profit.pre import fill_run_dir_single\n'), ((9074, 9091), 'os.chdir', 'os.chdir', (['run_dir'], {}), '(run_dir)\n', (9082, 9091), False, 'import os\n'), ((1134, 1209), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'env': 'env', 'cwd': "self.base_config['run_dir']"}), "(cmd, shell=True, env=env, cwd=self.base_config['run_dir'])\n", (1150, 1209), False, 'import subprocess\n'), ((1489, 1526), 'os.chdir', 'os.chdir', (["self.base_config['run_dir']"], {}), "(self.base_config['run_dir'])\n", (1497, 1526), False, 'import os\n'), ((1549, 1569), 'multiprocessing.Process', 'Process', ([], {'target': 'work'}), '(target=work)\n', (1556, 1569), False, 'from multiprocessing import Process\n'), ((1760, 1798), 'os.chdir', 'os.chdir', (["self.base_config['base_dir']"], {}), "(self.base_config['base_dir'])\n", (1768, 1798), False, 'import os\n'), ((2316, 2343), 'time.sleep', 'sleep', (["self.config['sleep']"], {}), "(self.config['sleep'])\n", (2321, 2343), False, 'from time import sleep\n'), ((4576, 4620), 'numpy.load', 'np.load', (["self.config['path']"], {'mmap_mode': '"""r+"""'}), "(self.config['path'], mmap_mode='r+')\n", (4583, 4620), True, 'import numpy as np\n'), ((5885, 5929), 'numpy.load', 'np.load', (["self.config['path']"], {'mmap_mode': '"""r+"""'}), "(self.config['path'], mmap_mode='r+')\n", (5892, 5929), True, 'import numpy as np\n'), ((6416, 6446), 'os.remove', 'os.remove', (["self.config['path']"], {}), "(self.config['path'])\n", (6425, 6446), False, 'import os\n'), ((6924, 6968), 'numpy.load', 'np.load', (["self.config['path']"], {'mmap_mode': '"""r+"""'}), "(self.config['path'], mmap_mode='r+')\n", (6931, 6968), True, 'import numpy as np\n'), ((7875, 7905), 'os.remove', 'os.remove', (["self.config['path']"], {}), "(self.config['path'])\n", (7884, 7905), False, 'import os\n'), ((8893, 8908), 'shutil.rmtree', 'rmtree', (['run_dir'], {}), '(run_dir)\n', (8899, 8908), False, 'from shutil import rmtree\n'), ((9502, 9514), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9511, 9514), False, 'import json\n'), ((10365, 10438), 'numpy.genfromtxt', 'np.genfromtxt', (["self.config['path']"], {'dtype': 'dtype'}), "(self.config['path'], dtype=dtype, **self.config['options'])\n", (10378, 10438), True, 'import numpy as np\n'), ((11231, 11266), 'h5py.File', 'h5py.File', (["self.config['path']", '"""r"""'], {}), "(self.config['path'], 'r')\n", (11240, 11266), False, 'import h5py\n'), ((2203, 2230), 'time.sleep', 'sleep', (["self.config['sleep']"], {}), "(self.config['sleep'])\n", (2208, 2230), False, 'from time import sleep\n'), ((10616, 10652), 'os.path.dirname', 'os.path.dirname', (["self.config['path']"], {}), "(self.config['path'])\n", (10631, 10652), False, 'import os\n'), ((4781, 4792), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4790, 4792), False, 'import os\n'), ((6090, 6101), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6099, 6101), False, 'import os\n'), ((7129, 7140), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7138, 7140), False, 'import os\n'), ((10579, 10590), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10588, 10590), False, 'import os\n'), ((10707, 10726), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (10717, 10726), False, 'import os\n')]
|
# <NAME> 3/5/20
import os
from time_stepping import *
num_sensors: int = 20
sensing_radius: float = 0.2
timestep_size: float = 0.01
unit_square: Boundary = RectangularDomain(spacing=sensing_radius)
# noinspection PyTypeChecker
billiard: MotionModel = BilliardMotion(dt=timestep_size, boundary=unit_square, vel=1, n_int_sensors=num_sensors)
output_dir: str = "./output"
filename_base: str = "data"
n_runs: int = 1
# Unlike the animation, each simulation needs to create its own simulation object
def simulate() -> float:
simulation = EvasionPathSimulation(boundary=unit_square,
motion_model=billiard,
n_int_sensors=num_sensors,
sensing_radius=sensing_radius,
dt=timestep_size)
return simulation.run()
def output_data(filename: str, data_points: list) -> None:
with open(filename, 'a+') as file:
for d in data_points:
if type(d) != str:
file.writelines("%.2f\n" % d)
else:
file.writelines(str(d) + "\n")
def run_experiment() -> None:
times = [simulate() for _ in range(n_runs)]
filename = output_dir + "/" + filename_base + ".txt"
output_data(filename, times)
def main() -> None:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
run_experiment()
if __name__ == "__main__":
main()
|
[
"os.mkdir",
"os.path.exists"
] |
[((1348, 1374), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1362, 1374), False, 'import os\n'), ((1384, 1404), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (1392, 1404), False, 'import os\n')]
|
from Bio import pairwise2
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.pairwise2 import format_alignment
adjacency_list = {
"A": {"R", "W", "M"},
"G": {"R", "K", "S"},
"T": {"W", "K", "Y"},
"C": {"M", "S", "Y"},
"R": {"D", "V"},
"W": {"D", "H"},
"M": {"V", "H"},
"K": {"D", "B"},
"S": {"V", "B"},
"Y": {"H", "B"},
"D": {"N"},
"V": {"N"},
"H": {"N"},
"B": {"N"},
"-": {"N"},
"N": {"N"}
}
def g(symbol_a: str, symbol_b: str) -> str:
def g_internal(symbols_a, symbols_b):
if len(symbols_a.intersection(symbols_b)) > 0:
return next(iter(symbols_a.intersection(symbols_b)))
else:
with_parents_a = symbols_a.union(*[adjacency_list[k] for k in symbols_a])
with_parents_b = symbols_b.union(*[adjacency_list[k] for k in symbols_b])
if len(with_parents_a.intersection(symbols_b)) > 0:
return next(iter(with_parents_a.intersection(symbols_b)))
elif len(with_parents_b.intersection(symbols_a)) > 0:
return next(iter(with_parents_b.intersection(symbols_a)))
else:
return g_internal(with_parents_a, with_parents_b)
return g_internal({symbol_a}, {symbol_b})
def generalize(seq_a: str, seq_b: str) -> str:
return "".join(g(symbol_a, symbol_b) for symbol_a, symbol_b in zip(seq_a, seq_b))
def level(symbol: str) -> int:
if symbol in ["A", "G", "T", "C"]:
return 1
elif symbol in ["R", "W", "M", "K", "S", "Y"]:
return 2
elif symbol in ["D", "V", "H", "B", "-"]:
return 3
else:
return 4
def d(symbol_a: str, symbol_b: str) -> int:
return 2*level(g(symbol_a, symbol_b)) - level(symbol_a) - level(symbol_b)
def dist(seq_a: str, seq_b: str) -> int:
return sum(d(symbol_a, symbol_b) for symbol_a, symbol_b in zip(seq_a, seq_b))
def seq_str(sequence: SeqRecord) -> str:
return str(sequence.seq)
def pairwise_align(seq_a: SeqRecord, seq_b: SeqRecord):
# this is slow af, but I haven't found a way to make the pairwise2 matcher behave the same as CLUSTAL
# (it always introduces gaps for sequences of the same length)
print(f"Pairwise aligning sequence {seq_a.id} to {seq_b.id}")
# penalize gaps like the default parameters of CLUSTALW according to https://www.genome.jp/tools-bin/clustalw
alignments = pairwise2.align.globalxs(seq_str(seq_a), seq_str(seq_b), -10.0, -0.1)
print(format_alignment(*alignments[0]))
return SeqRecord(Seq(alignments[0].seqA), id=seq_a.id), SeqRecord(Seq(alignments[0].seqB), id=seq_b.id)
def build_pairwise_alignments(sequences: [SeqRecord]):
pairwise_alignments = {}
min_dist_pair = None
for i in range(len(sequences)):
pairwise_alignments_i = {}
for j in range(i+1, len(sequences)):
aligned_seq_i, aligned_seq_j = pairwise_align(sequences[i], sequences[j])
d = dist(seq_str(aligned_seq_i), seq_str(aligned_seq_j))
pairwise_alignments_i[j] = (aligned_seq_i, aligned_seq_j, d)
if min_dist_pair is None or min_dist_pair[2] > d:
min_dist_pair = (i, j, d)
pairwise_alignments[i] = pairwise_alignments_i
return {
"pairwise_alignments": pairwise_alignments,
"min_dist_pair": min_dist_pair
}
def fix_odd_sequence_count(sequences, pairwise_alignments, min_dist_pair):
# if there's an odd number of sequences, replace the min distance pair by its generalization f in the distance map
if len(sequences) % 2 == 1:
a, b, d = min_dist_pair
print(f"Min-Distance Pair: {min_dist_pair}")
# remove sequences a and b from the distance map
pairwise_alignments.pop(a, None)
pairwise_alignments.pop(b, None)
for pairwise_alignments_i in pairwise_alignments.values():
pairwise_alignments_i.pop(a, None)
pairwise_alignments_i.pop(b, None)
# add f with index "len(sequences)" to the distance map, calculate the pairwise distances to the other seqs
seq_f = SeqRecord(Seq(generalize(seq_str(sequences[a]), seq_str(sequences[b]))), id="f")
for (i, pairwise_alignments_i) in pairwise_alignments.items():
aligned_seq_i, aligned_seq_f = pairwise_align(sequences[i], seq_f)
fd = dist(seq_str(aligned_seq_i), seq_str(aligned_seq_f))
pairwise_alignments_i[len(sequences)] = (aligned_seq_i, aligned_seq_f, fd)
return pairwise_alignments
|
[
"Bio.Seq.Seq",
"Bio.pairwise2.format_alignment"
] |
[((2492, 2524), 'Bio.pairwise2.format_alignment', 'format_alignment', (['*alignments[0]'], {}), '(*alignments[0])\n', (2508, 2524), False, 'from Bio.pairwise2 import format_alignment\n'), ((2548, 2571), 'Bio.Seq.Seq', 'Seq', (['alignments[0].seqA'], {}), '(alignments[0].seqA)\n', (2551, 2571), False, 'from Bio.Seq import Seq\n'), ((2597, 2620), 'Bio.Seq.Seq', 'Seq', (['alignments[0].seqB'], {}), '(alignments[0].seqB)\n', (2600, 2620), False, 'from Bio.Seq import Seq\n')]
|