code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import os
from django.core.wsgi import get_wsgi_application
from rest_base.utils import dotenv
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
dotenv.load(os.path.join(os.path.dirname(__file__), '../.env'))
application = get_wsgi_application()
| [
"os.environ.setdefault",
"os.path.dirname",
"django.core.wsgi.get_wsgi_application"
] | [((97, 164), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""project.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'project.settings')\n", (118, 164), False, 'import os\n'), ((244, 266), 'django.core.wsgi.get_wsgi_application', 'get_wsgi_application', ([], {}), '()\n', (264, 266), False, 'from django.core.wsgi import get_wsgi_application\n'), ((190, 215), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (205, 215), False, 'import os\n')] |
from .context import CoCoDataset
import os
from torchvision import transforms
import torch.utils.data as data
from src.data_loader import get_loader
from context import COCO_SMALL
from context import clean_sentence
def test_coco_dataset():
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
mode = "train"
batch_size = 3
vocab_threshold = 5
vocab_file = '../vocab.pkl'
start_word = "<start>"
end_word = "<end>"
unk_word = "<unk>"
vocab_from_file = False
cocoapi_loc = COCO_SMALL
img_folder = os.path.join(cocoapi_loc, 'cocoapi/images/val2014/')
annotations_file = os.path.join(cocoapi_loc, 'cocoapi/annotations/captions_val2014.json')
dataset = CoCoDataset(transform=transform_train,
mode=mode,
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_file=vocab_file,
start_word=start_word,
end_word=end_word,
unk_word=unk_word,
annotations_file=annotations_file,
vocab_from_file=vocab_from_file,
img_folder=img_folder)
# data loader for COCO dataset.
data_loader = data.DataLoader(dataset=dataset,
num_workers=4
)
images, captions = next(iter(data_loader))
print(images.shape)
print(captions.shape)
def test_data_loader():
# Define a transform to pre-process the training images.
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
vocab_threshold = 5
# Specify the batch size.
batch_size = 10
# Obtain the data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=False,
cocoapi_loc=COCO_SMALL # uncomment for running on local
)
print('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))
images, captions = next(iter(data_loader))
print('images.shape:', images.shape)
print('captions.shape:', captions.shape)
print(captions)
print(data_loader.dataset.vocab.idx2word)
for caption in captions:
sentence = clean_sentence(caption, data_loader)
print(caption)
print(sentence) | [
"src.data_loader.get_loader",
"os.path.join",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomCrop",
"context.clean_sentence",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] | [((981, 1033), 'os.path.join', 'os.path.join', (['cocoapi_loc', '"""cocoapi/images/val2014/"""'], {}), "(cocoapi_loc, 'cocoapi/images/val2014/')\n", (993, 1033), False, 'import os\n'), ((1057, 1127), 'os.path.join', 'os.path.join', (['cocoapi_loc', '"""cocoapi/annotations/captions_val2014.json"""'], {}), "(cocoapi_loc, 'cocoapi/annotations/captions_val2014.json')\n", (1069, 1127), False, 'import os\n'), ((1739, 1786), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'dataset', 'num_workers': '(4)'}), '(dataset=dataset, num_workers=4)\n', (1754, 1786), True, 'import torch.utils.data as data\n'), ((2664, 2827), 'src.data_loader.get_loader', 'get_loader', ([], {'transform': 'transform_train', 'mode': '"""train"""', 'batch_size': 'batch_size', 'vocab_threshold': 'vocab_threshold', 'vocab_from_file': '(False)', 'cocoapi_loc': 'COCO_SMALL'}), "(transform=transform_train, mode='train', batch_size=batch_size,\n vocab_threshold=vocab_threshold, vocab_from_file=False, cocoapi_loc=\n COCO_SMALL)\n", (2674, 2827), False, 'from src.data_loader import get_loader\n'), ((3362, 3398), 'context.clean_sentence', 'clean_sentence', (['caption', 'data_loader'], {}), '(caption, data_loader)\n', (3376, 3398), False, 'from context import clean_sentence\n'), ((293, 315), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (310, 315), False, 'from torchvision import transforms\n'), ((365, 391), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224)'], {}), '(224)\n', (386, 391), False, 'from torchvision import transforms\n'), ((442, 475), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (473, 475), False, 'from torchvision import transforms\n'), ((533, 554), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (552, 554), False, 'from torchvision import transforms\n'), ((601, 667), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (621, 667), False, 'from torchvision import transforms\n'), ((2092, 2114), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (2109, 2114), False, 'from torchvision import transforms\n'), ((2164, 2190), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224)'], {}), '(224)\n', (2185, 2190), False, 'from torchvision import transforms\n'), ((2241, 2274), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2272, 2274), False, 'from torchvision import transforms\n'), ((2332, 2353), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2351, 2353), False, 'from torchvision import transforms\n'), ((2400, 2466), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (2420, 2466), False, 'from torchvision import transforms\n')] |
#!/usr/bin/env python3
#
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file A test driver for testing retro-fuse filesystem handlers.
#
import os
import sys
import unittest
import argparse
scriptName = os.path.basename(__file__)
scriptDirName = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
class TestResult(unittest.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(TestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
def getDescription(self, test):
return test.shortDescription()
def startTest(self, test):
super(TestResult, self).startTest(test)
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TestResult, self).addSuccess(test)
self.stream.writeln("PASS")
def addError(self, test, err):
super(TestResult, self).addError(test, err)
self.stream.writeln("ERROR")
def addFailure(self, test, err):
super(TestResult, self).addFailure(test, err)
self.stream.writeln("FAIL")
def addSkip(self, test, reason):
super(TestResult, self).addSkip(test, reason)
self.stream.writeln("skipped {0!r}".format(reason))
def addExpectedFailure(self, test, err):
super(TestResult, self).addExpectedFailure(test, err)
self.stream.writeln("expected failure")
def addUnexpectedSuccess(self, test):
super(TestResult, self).addUnexpectedSuccess(test)
self.stream.writeln("unexpected success")
def printErrors(self):
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln("%s" % err)
# Parse command line arguments
argParser = argparse.ArgumentParser()
argParser.add_argument('-s', '--simh', dest='simhCmd', default='pdp11',
help='Path to pdp11 simh executable')
argParser.add_argument('-v', '--verbose', dest='verbosity', action='store_const', const=2, default=1,
help='Verbose output')
argParser.add_argument('-q', '--quiet', dest='verbosity', action='store_const', const=0,
help='Quiet output')
argParser.add_argument('-f', '--failfast', dest='failfast', action='store_true', default=False,
help='Stop on first test failure')
argParser.add_argument('-k', '--keep', dest='keepFS', action='store_true', default=False,
help='Retain the test filesystem on exit')
argParser.add_argument('-i', '--fs-image', dest='fsImage',
help='Use specified file/device as backing store for test filesystem (implies -k)')
argParser.add_argument('fsHandler', help='Filesystem handler executable to be tested')
testOpts = argParser.parse_args()
if testOpts.fsImage is not None:
testOpts.keepFS = True
# Verify access to filesystem handler executable
if not os.access(testOpts.fsHandler, os.F_OK):
print(f'{scriptName}: File not found: {testOpts.fsHandler}', file=sys.stderr)
sys.exit(1)
if not os.access(testOpts.fsHandler, os.X_OK):
print(f'{scriptName}: Unable to execute filesystem handler: {testOpts.fsHandler}', file=sys.stderr)
sys.exit(1)
# Load the appropriate test cases
fsHandlerBaseName = os.path.basename(testOpts.fsHandler)
if fsHandlerBaseName == 'bsd29fs':
import BSD29Tests
testSuite = unittest.TestLoader().loadTestsFromModule(BSD29Tests)
elif fsHandlerBaseName == 'v7fs':
import V7Tests
testSuite = unittest.TestLoader().loadTestsFromModule(V7Tests)
elif fsHandlerBaseName == 'v6fs':
import V6Tests
testSuite = unittest.TestLoader().loadTestsFromModule(V6Tests)
else:
print(f'{scriptName}: Unknown filesystem handler: {testOpts.fsHandler}', file=sys.stderr)
print('Expected a file named v6fs, v7fs or bsd29fs', file=sys.stderr)
sys.exit(1)
# Run the tests
if testOpts.verbosity > 0:
resultStream = sys.stderr
else:
resultStream = open(os.devnull, 'a')
testRunner = unittest.TextTestRunner(stream=resultStream, resultclass=TestResult, verbosity=testOpts.verbosity, failfast=testOpts.failfast)
result = testRunner.run(testSuite)
sys.exit(0 if result.wasSuccessful() else 1)
| [
"argparse.ArgumentParser",
"os.access",
"os.path.realpath",
"os.path.basename",
"sys.exit",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((744, 770), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (760, 770), False, 'import os\n'), ((2561, 2586), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2584, 2586), False, 'import argparse\n'), ((4081, 4117), 'os.path.basename', 'os.path.basename', (['testOpts.fsHandler'], {}), '(testOpts.fsHandler)\n', (4097, 4117), False, 'import os\n'), ((4809, 4939), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'stream': 'resultStream', 'resultclass': 'TestResult', 'verbosity': 'testOpts.verbosity', 'failfast': 'testOpts.failfast'}), '(stream=resultStream, resultclass=TestResult,\n verbosity=testOpts.verbosity, failfast=testOpts.failfast)\n', (4832, 4939), False, 'import unittest\n'), ((3721, 3759), 'os.access', 'os.access', (['testOpts.fsHandler', 'os.F_OK'], {}), '(testOpts.fsHandler, os.F_OK)\n', (3730, 3759), False, 'import os\n'), ((3847, 3858), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3855, 3858), False, 'import sys\n'), ((3866, 3904), 'os.access', 'os.access', (['testOpts.fsHandler', 'os.X_OK'], {}), '(testOpts.fsHandler, os.X_OK)\n', (3875, 3904), False, 'import os\n'), ((4014, 4025), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4022, 4025), False, 'import sys\n'), ((819, 845), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (835, 845), False, 'import os\n'), ((4191, 4212), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (4210, 4212), False, 'import unittest\n'), ((4663, 4674), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4671, 4674), False, 'import sys\n'), ((4314, 4335), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (4333, 4335), False, 'import unittest\n'), ((4434, 4455), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (4453, 4455), False, 'import unittest\n')] |
import numpy as np
from stardist import star_dist, relabel_image_stardist
import pytest
from utils import random_image, real_image2d, check_similar, circle_image
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_types(img, n_rays):
mode = "cpp"
gt = star_dist(img, n_rays=n_rays, mode=mode)
for dtype in (np.int8, np.int16, np.int32,
np.uint8, np.uint16, np.uint32):
x = star_dist(img.astype(dtype), n_rays=n_rays, mode=mode)
print("test_stardist2D (mode {mode}) for shape {img.shape} and type {dtype}".format(
mode=mode, img=img, dtype=dtype))
check_similar(gt, x)
@pytest.mark.gpu
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_types_gpu(img, n_rays):
mode = "opencl"
gt = star_dist(img, n_rays=n_rays, mode=mode)
for dtype in (np.int8, np.int16, np.int32,
np.uint8, np.uint16, np.uint32):
x = star_dist(img.astype(dtype), n_rays=n_rays, mode=mode)
print("test_stardist2D with mode {mode} for shape {img.shape} and type {dtype}".format(
mode=mode, img=img, dtype=dtype))
check_similar(gt, x)
@pytest.mark.gpu
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_cpu_gpu(img, n_rays):
s_cpp = star_dist(img, n_rays=n_rays, mode="cpp")
s_ocl = star_dist(img, n_rays=n_rays, mode="opencl")
check_similar(s_cpp, s_ocl)
@pytest.mark.parametrize('n_rays', (32,64))
@pytest.mark.parametrize('eps', ((1,1),(.4,1.3)))
def test_relabel_consistency(n_rays, eps, plot = False):
""" test whether an already star-convex label image gets perfectly relabeld"""
# img = random_image((128, 123))
lbl1 = circle_image(shape=(32,32), radius=8, eps = eps)
lbl1 = relabel_image_stardist(lbl1, n_rays)
lbl2 = relabel_image_stardist(lbl1, n_rays)
rel_error = 1-np.count_nonzero(np.bitwise_and(lbl1>0, lbl2>0))/np.count_nonzero(lbl1>0)
print(rel_error)
assert rel_error<1e-1
if plot:
import matplotlib.pyplot as plt
plt.figure(num=1, figsize=(8,4))
plt.subplot(1,3,1);plt.imshow(lbl1);plt.title("GT")
plt.subplot(1,3,2);plt.imshow(lbl2);plt.title("Reco")
plt.subplot(1,3,3);plt.imshow(1*(lbl1>0)+2*(lbl2>0));plt.title("Overlay")
plt.tight_layout()
plt.show()
return lbl1, lbl2
if __name__ == '__main__':
lbl1, lbl2 = test_relabel_consistency(32,eps = (.7,1), plot = True)
| [
"matplotlib.pyplot.imshow",
"stardist.star_dist",
"utils.check_similar",
"utils.circle_image",
"utils.random_image",
"numpy.count_nonzero",
"pytest.mark.parametrize",
"stardist.relabel_image_stardist",
"matplotlib.pyplot.figure",
"numpy.bitwise_and",
"matplotlib.pyplot.tight_layout",
"matplotl... | [((244, 290), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rays"""', '(4, 16, 32)'], {}), "('n_rays', (4, 16, 32))\n", (267, 290), False, 'import pytest\n'), ((819, 865), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rays"""', '(4, 16, 32)'], {}), "('n_rays', (4, 16, 32))\n", (842, 865), False, 'import pytest\n'), ((1404, 1450), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rays"""', '(4, 16, 32)'], {}), "('n_rays', (4, 16, 32))\n", (1427, 1450), False, 'import pytest\n'), ((1632, 1675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rays"""', '(32, 64)'], {}), "('n_rays', (32, 64))\n", (1655, 1675), False, 'import pytest\n'), ((1676, 1728), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""eps"""', '((1, 1), (0.4, 1.3))'], {}), "('eps', ((1, 1), (0.4, 1.3)))\n", (1699, 1728), False, 'import pytest\n'), ((346, 386), 'stardist.star_dist', 'star_dist', (['img'], {'n_rays': 'n_rays', 'mode': 'mode'}), '(img, n_rays=n_rays, mode=mode)\n', (355, 386), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((928, 968), 'stardist.star_dist', 'star_dist', (['img'], {'n_rays': 'n_rays', 'mode': 'mode'}), '(img, n_rays=n_rays, mode=mode)\n', (937, 968), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((1494, 1535), 'stardist.star_dist', 'star_dist', (['img'], {'n_rays': 'n_rays', 'mode': '"""cpp"""'}), "(img, n_rays=n_rays, mode='cpp')\n", (1503, 1535), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((1548, 1592), 'stardist.star_dist', 'star_dist', (['img'], {'n_rays': 'n_rays', 'mode': '"""opencl"""'}), "(img, n_rays=n_rays, mode='opencl')\n", (1557, 1592), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((1597, 1624), 'utils.check_similar', 'check_similar', (['s_cpp', 's_ocl'], {}), '(s_cpp, s_ocl)\n', (1610, 1624), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1914, 1961), 'utils.circle_image', 'circle_image', ([], {'shape': '(32, 32)', 'radius': '(8)', 'eps': 'eps'}), '(shape=(32, 32), radius=8, eps=eps)\n', (1926, 1961), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1979, 2015), 'stardist.relabel_image_stardist', 'relabel_image_stardist', (['lbl1', 'n_rays'], {}), '(lbl1, n_rays)\n', (2001, 2015), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((2028, 2064), 'stardist.relabel_image_stardist', 'relabel_image_stardist', (['lbl1', 'n_rays'], {}), '(lbl1, n_rays)\n', (2050, 2064), False, 'from stardist import star_dist, relabel_image_stardist\n'), ((699, 719), 'utils.check_similar', 'check_similar', (['gt', 'x'], {}), '(gt, x)\n', (712, 719), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((216, 240), 'utils.random_image', 'random_image', (['(128, 123)'], {}), '((128, 123))\n', (228, 240), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1284, 1304), 'utils.check_similar', 'check_similar', (['gt', 'x'], {}), '(gt, x)\n', (1297, 1304), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((791, 815), 'utils.random_image', 'random_image', (['(128, 123)'], {}), '((128, 123))\n', (803, 815), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1376, 1400), 'utils.random_image', 'random_image', (['(128, 123)'], {}), '((128, 123))\n', (1388, 1400), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((2267, 2300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'figsize': '(8, 4)'}), '(num=1, figsize=(8, 4))\n', (2277, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2328), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (2319, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2343), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lbl1'], {}), '(lbl1)\n', (2337, 2343), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2359), 'matplotlib.pyplot.title', 'plt.title', (['"""GT"""'], {}), "('GT')\n", (2353, 2359), True, 'import matplotlib.pyplot as plt\n'), ((2368, 2388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (2379, 2388), True, 'import matplotlib.pyplot as plt\n'), ((2387, 2403), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lbl2'], {}), '(lbl2)\n', (2397, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2404, 2421), 'matplotlib.pyplot.title', 'plt.title', (['"""Reco"""'], {}), "('Reco')\n", (2413, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2430, 2450), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (2441, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2449, 2492), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(1 * (lbl1 > 0) + 2 * (lbl2 > 0))'], {}), '(1 * (lbl1 > 0) + 2 * (lbl2 > 0))\n', (2459, 2492), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2503), 'matplotlib.pyplot.title', 'plt.title', (['"""Overlay"""'], {}), "('Overlay')\n", (2492, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2530), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2528, 2530), True, 'import matplotlib.pyplot as plt\n'), ((2539, 2549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2547, 2549), True, 'import matplotlib.pyplot as plt\n'), ((197, 211), 'utils.real_image2d', 'real_image2d', ([], {}), '()\n', (209, 211), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((772, 786), 'utils.real_image2d', 'real_image2d', ([], {}), '()\n', (784, 786), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((1357, 1371), 'utils.real_image2d', 'real_image2d', ([], {}), '()\n', (1369, 1371), False, 'from utils import random_image, real_image2d, check_similar, circle_image\n'), ((2133, 2159), 'numpy.count_nonzero', 'np.count_nonzero', (['(lbl1 > 0)'], {}), '(lbl1 > 0)\n', (2149, 2159), True, 'import numpy as np\n'), ((2101, 2135), 'numpy.bitwise_and', 'np.bitwise_and', (['(lbl1 > 0)', '(lbl2 > 0)'], {}), '(lbl1 > 0, lbl2 > 0)\n', (2115, 2135), True, 'import numpy as np\n')] |
import math
import functools
from scipy.stats import binom
import numpy as np
import itertools
import sys
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from copy import copy
def combine_distribs(deletes, inserts):
"""
Combine insert and delete models/distributions
:param deletes: ndarray - delete distribution
:param inserts: ndarray - insert distribution
:return: ndarray - combined array of the same length
"""
# how much to fill?
to_fill = sum(deletes == 0.0) + 1
while to_fill < len(inserts) and inserts[to_fill] > 0.0001:
to_fill += 1
# create the end array
len_del = len(deletes)
end_distr = np.zeros_like(deletes, dtype=float)
# fill it!
for i, a in enumerate(inserts[:to_fill]):
# print i,a,(deletes*a)[:len_del-i]
end_distr[i:] += (deletes * a)[:len_del - i]
# print("end_distr", end_distr[:3], deletes[:3], inserts[:3])
return end_distr
def const_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Constant rate function.
:param n: int - allele number (unused)
:param p1: float - constant parameter
:param p2: float - linear parameter (unused)
:param p3: float - additional parameter (unused)
:return: float - p1
"""
return p1
def linear_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Linear rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - additional parameter (unused)
:return: float - p1 + p2 * n
"""
return p1 + p2 * n
def n2_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Quadratic rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - quadratic parameter
:return: float - p1 + p2 * n + p3 * n * n
"""
return p1 + p2 * n + p3 * n * n
def exp_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Exponential rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - exponential parameter
:return: float - p1 + p2 * e^(p3 * n)
"""
return p1 + p2 * math.exp(p3 * n)
def clip(value, minimal, maximal):
"""
Clips value to range <minimal, maximal>
:param value: ? - value
:param minimal: ? - minimal value
:param maximal: ? - maximal value
:return: ? - clipped value
"""
return min(max(minimal, value), maximal)
def model_full(rng, model_params, n, rate_func=linear_rate):
"""
Create binomial model for both deletes and inserts of STRs
:param rng: int - max_range of distribution
:param model_params: 4-tuple - parameters for inserts and deletes
:param n: int - target allele number
:param rate_func: function - rate function for deletes
:return: ndarray - combined distribution
"""
p1, p2, p3, q = model_params
deletes = binom.pmf(np.arange(rng), n, clip(1 - rate_func(n, p1, p2, p3), 0.0, 1.0))
inserts = binom.pmf(np.arange(rng), n, q)
return combine_distribs(deletes, inserts)
def model_template(rng, model_params, rate_func=linear_rate):
"""
Partial function for model creation.
:param rng: int - max_range of distribution
:param model_params: 4-tuple - parameters for inserts and deletes
:param rate_func: function - rate function for deletes
:return: partial function with only 1 parameter - n - target allele number
"""
return functools.partial(model_full, rng, model_params, rate_func=rate_func)
class Inference:
""" Class for inference of alleles. """
MIN_REPETITIONS = 1
# default parameters for inference
DEFAULT_MODEL_PARAMS = (-0.0107736, 0.00244419, 0.0, 0.00440608)
DEFAULT_FIT_FUNCTION = "linear"
def __init__(self, read_distribution, params_file, str_rep=3, minl_primer1=5, minl_primer2=5, minl_str=5, p_bckg_closed=None, p_bckg_open=None, p_expanded=None):
"""
Initialization of the Inference class + setup of all models and their probabilities.
:param read_distribution: ndarray(int) - read distribution
:param params_file: str - filename of parameters
:param str_rep: int - length of the STR
:param minl_primer1: int - minimal length of the left primer
:param minl_primer2: int - minimal length of the right primer
:param minl_str: int - minimal length of the STR
:param p_bckg_closed: float - probability of the background model for closed observation
:param p_bckg_open: float - probability of the background model for open observation
:param p_expanded: float - probability of the expanded model (if None it is equal to other models)
"""
# assign variables
self.str_rep = str_rep
self.minl_primer1 = minl_primer1
self.minl_primer2 = minl_primer2
self.minl_str = minl_str
self.read_distribution = read_distribution
self.sum_reads_log = np.log(np.sum(read_distribution))
self.sum_reads = np.sum(read_distribution)
self.params_file = params_file
self.p_expanded = p_expanded
self.p_bckg_closed = p_bckg_closed
self.p_bckg_open = p_bckg_open
def construct_models(self, min_rep, max_rep, e_model):
"""
Construct all models needed for current inference.
:param min_rep: int - minimal allele to model
:param max_rep: int - maximal allele to model
:param e_model: int - model for expanded alleles
:return: None
"""
# extract params
model_params, rate_func_str = self.read_params(self.params_file)
str_to_func = {"linear": linear_rate, "const": const_rate, "exponential": exp_rate, "square": n2_rate}
rate_func = const_rate
if rate_func_str in str_to_func.keys():
rate_func = str_to_func[rate_func_str]
# save min_rep and max_rep
self.min_rep = min_rep
self.max_rep = max_rep # non-inclusive
self.max_with_e = e_model + 1 # non-inclusive
# get models
mt = model_template(self.max_with_e, model_params, rate_func)
self.background_model = np.concatenate([np.zeros(self.min_rep, dtype=float), np.ones(self.max_with_e - self.min_rep, dtype=float) / float(self.max_with_e - self.min_rep)])
self.expanded_model = mt(self.max_with_e - 1)
self.allele_models = {i: mt(i) for i in range(min_rep, max_rep)}
self.models = {'E': self.expanded_model, 'B': self.background_model}
self.models.update(self.allele_models)
# get model likelihoods
open_to_closed = 10.0
l_others = 1.0
l_bckg_open = 0.01
l_exp = 1.01
l_bckg_model_open = 1.0
if self.p_expanded is None:
self.p_expanded = l_exp
if self.p_bckg_open is None and self.p_bckg_closed is None:
self.p_bckg_open = l_bckg_open
self.p_bckg_closed = self.p_bckg_open / open_to_closed
if self.p_bckg_closed is None:
self.p_bckg_closed = self.p_bckg_open / open_to_closed
if self.p_bckg_open is None:
self.p_bckg_open = self.p_bckg_closed * open_to_closed
self.model_probabilities = {'E': self.p_expanded, 'B': l_bckg_model_open}
self.model_probabilities.update({i: l_others for i in self.allele_models.keys()})
def read_params(self, params_file):
"""
Reads all parameters written with write_params(print_all=True)
:param params_file: str - filename to read parameters from, if None, load default params
:return: 4-tuple, 2-tuple, function - parameters for model, read count drop, and error function for model distributions
"""
if params_file is None:
return self.DEFAULT_MODEL_PARAMS, self.DEFAULT_FIT_FUNCTION
# read 2nd and last line of the file
with open(params_file) as f:
lines = f.readlines()
fit_function = lines[1].strip().split()[1]
split = list(map(float, lines[-1].strip().split()))
if len(split) < 4:
print("ERROR: parameters were not read successfully, using defaults!", file=sys.stderr)
return self.DEFAULT_MODEL_PARAMS, self.DEFAULT_FIT_FUNCTION
# extract parameters from last line of file
model_params = tuple(split[0:4])
return model_params, fit_function
def likelihood_rl(self, rl):
"""
Likelihood of a read with this length.
:param rl: int - read length
:return: float - likelihood of a read this long
"""
# print('rl', self.read_distribution[rl] / float(self.sum_reads))
return self.read_distribution[rl] / float(self.sum_reads)
@staticmethod
def likelihood_model(model, g):
"""
Likelihood of a generated allele al from a model of
:param model: ndarray - model that we evaluate
:param g: int - observed read count
:return: float - likelihood of a read coming from this model
"""
return model[g]
def likelihood_intersection(self, model_i, model_j, g):
return min(model_i[g], model_j[g])
def likelihood_coverage(self, true_length, rl, closed=True):
"""
Likelihood of generating a read with this length and this allele.
:param true_length: int - true number of repetitions of an STR
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return: float - likelihood of a read being generated with this attributes
"""
whole_inside_str = max(0, true_length * self.str_rep + self.minl_primer1 + self.minl_primer2 - rl + 1)
# closed_overlapping = max(0, rl - self.minl_primer1 - self.minl_primer2 - true_length * self.str_rep + 1)
open_overlapping = max(0, rl + true_length * self.str_rep - 2 * self.minl_str + 1)
assert open_overlapping > whole_inside_str, '%d open %d whole inside %d %d %d' % (open_overlapping, whole_inside_str, true_length, rl, self.minl_str)
return 1.0 / float(open_overlapping - whole_inside_str)
def likelihood_read_allele(self, model, observed, rl, closed=True):
"""
Likelihood of generation of read with observed allele count and rl.
:param model: ndarray - model for the allele
:param observed: int - observed allele count
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return:
"""
if closed:
return self.likelihood_rl(rl) * self.likelihood_model(model, observed) * self.likelihood_coverage(observed, rl, True)
else:
number_of_options = 0
partial_likelihood = 0
for true_length in itertools.chain(range(observed, self.max_rep), [self.max_with_e - 1]):
partial_likelihood += self.likelihood_model(model, true_length) * self.likelihood_coverage(true_length, rl, False)
number_of_options += 1
return self.likelihood_rl(rl) * partial_likelihood / float(number_of_options)
def likelihood_read_intersection(self, model_i, model_j, observed, rl, closed=True):
"""
Likelihood of generation of read with observed allele count and rl.
:param model: ndarray - model for the allele
:param observed: int - observed allele count
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return:
"""
if closed:
return self.likelihood_rl(rl) * self.likelihood_intersection(model_i, model_j, observed) * self.likelihood_coverage(observed, rl, True)
else:
number_of_options = 0
partial_likelihood = 0
for true_length in itertools.chain(range(observed, self.max_rep), [self.max_with_e - 1]):
partial_likelihood += self.likelihood_intersection(model_i, model_j, true_length) * self.likelihood_coverage(true_length, rl, False)
number_of_options += 1
return self.likelihood_rl(rl) * partial_likelihood / float(number_of_options)
def likelihood_read(self, observed, rl, model_index1, model_index2, closed=True):
"""
Compute likelihood of generation of a read from either of those models.
:param observed: int - observed allele count
:param rl: int - read length
:param model_index1: char/int - model index for left allele
:param model_index2: char/int - model index for right allele
:param closed: bool - if the read is closed - i.e. both primers are therse
:return: float - likelihood of this read generation
"""
# print('testing', model_index1, model_index2)
model_i = self.models[model_index1]
model_j = self.models[model_index2]
model_prob_i = self.model_probabilities[model_index1]
model_prob_j = self.model_probabilities[model_index2]
# TODO: tuto podla mna nemoze byt len tak +, chyba tam korelacia modelov, ale v ramci zjednodusenia asi ok
allele1_likelihood = model_prob_i * self.likelihood_read_allele(model_i, observed, rl, closed)
allele2_likelihood = model_prob_j * self.likelihood_read_allele(model_j, observed, rl, closed)
p_bckg = self.p_bckg_closed if closed else self.p_bckg_open
bckgrnd_likelihood = p_bckg * self.likelihood_read_allele(self.models['B'], observed, rl, closed)
# alleles_intersection = min(model_prob_j, model_prob_i) * self.likelihood_read_intersection(model_i, model_j, observed, rl, closed)
# if alleles_intersection > 0.0:
# print('%g %g %g %s %s %d' % (alleles_intersection, allele2_likelihood, allele1_likelihood, str(model_index1), str(model_index2), observed))
assert not np.isnan(allele2_likelihood)
assert not np.isnan(allele1_likelihood)
assert not np.isnan(bckgrnd_likelihood)
# assert alleles_intersection <= max(allele1_likelihood, allele2_likelihood), '%g %g %g %s %s %d' % (
# alleles_intersection, allele2_likelihood, allele1_likelihood, str(model_index1), str(model_index2), observed)
# print('read_%s' % (str(closed)), observed, 'all1_lh', allele1_likelihood, 'all2_lh', allele2_likelihood)
return allele1_likelihood + allele2_likelihood + bckgrnd_likelihood # - alleles_intersection
def infer(self, annotations, filt_annotations, index_rep, verbose=True):
"""
Does all of the inference, computes for which 2 combination of alleles are these annotations and parameters the best.
argmax_{G1, G2} P(G1, G2 | AL, COV, RL) ~ P(AL, COV, RL | G1, G2) * P(G1, G2) = prod_{read_i} P(al_i, cov_i, rl_i | G1, G2) * P(G1, G2) =independent G1 G2=
= prod_{read_i} P(al_i, cov_i, rl_i | G1) * P(al_i, cov_i, rl_i | G2) * P(G1) * P(G2) {here G1, G2 is from possible alleles, background, and expanded, priors are from params}
P(al_i, cov_i, rl_i | G1) - 2 options: 1. closed evidence (al_i = X), we know X; 2. open evidence (al_i >= X), cl_i == True if i is closed
1.: P(al_i, cov_i, rl_i, cl_i | G1) = P(rl_i is from read distribution) * p(allele is al_i | G1) * P(read generated closed evidence | rl_i, al_i)
2.: P(rl_i is from r.distr.) * P(allele is >= al_i | G1) * P(read generated open evidence | rl_i, al_i)
:param annotations: iterator(reads) - closed reads (both primers set)
:param filt_annotations: iterator(reads) - open reads (only one primer set)
:param index_rep: int - index of a repetition
:param verbose: bool - print more stuff?
:return: dict(tuple(int, int):float) - directory of model indices to their likelihood
"""
# generate closed observed and read_length arrays
observed_annots = list(map(lambda x: x.module_repetitions[index_rep], annotations))
rl_annots = list(map(lambda x: len(x.read.sequence), annotations))
closed_annots = np.ones_like(observed_annots, dtype=bool)
# generate open observed and read_length arrays
observed_fa = list(map(lambda x: x.module_repetitions[index_rep], filt_annotations))
rl_fa = list(map(lambda x: len(x.read.sequence), filt_annotations))
closed_fa = np.zeros_like(observed_fa, dtype=bool)
# join them and keep the information if they are open or closed
observed_arr = np.concatenate([observed_annots, observed_fa]).astype(int)
rl_arr = np.concatenate([rl_annots, rl_fa]).astype(int)
closed_arr = np.concatenate([closed_annots, closed_fa]).astype(bool)
# generate the boundaries:
overhead = 3
if len(observed_annots) == 0:
max_rep = max(observed_fa) + overhead # non-inclusive
min_rep = max(self.MIN_REPETITIONS, max(observed_fa) - overhead) # inclusive
else:
max_rep = max(observed_annots) + overhead + 1 # non-inclusive
min_rep = max(self.MIN_REPETITIONS, min(observed_annots) - overhead) # inclusive
# expanded allele
e_allele = max_rep
if len(observed_fa) > 0:
e_allele = max(max_rep, max(observed_fa) + 1)
# generate all the models
self.construct_models(min_rep, max_rep, e_allele)
tested_models = []
for model_index1 in range(min_rep, max_rep):
for model_index2 in range(model_index1, max_rep):
tested_models.append((model_index1, model_index2))
tested_models.append((model_index1, 'E'))
# tested_models.append(('B', model_index1))
tested_models.append(('B', 'B'))
tested_models.append(('E', 'E'))
# go through every model and evaluate:
evaluated_models = {}
for m1, m2 in tested_models:
evaluated_models[(m1, m2)] = 0
if verbose:
print('model', m1, m2)
# go through every reads
for obs, rl, closed in zip(observed_arr, rl_arr, closed_arr):
lh = self.likelihood_read(obs, rl, m1, m2, closed=closed)
# TODO weighted sum according to the closeness/openness of reads?
evaluated_models[(m1, m2)] += np.log(lh)
if verbose:
print('model', m1, m2, 'log-likelihood', evaluated_models[(m1, m2)])
return evaluated_models
def print_pcolor(self, lh_dict, display_file, name, lognorm=True):
"""
Get maximum likelihood option and alternatively print it to image file.
:param lh_dict: dict(tuple(int, int):float) - directory of model indices to their likelihood
:param display_file: str - filename for pcolor image output
:param name: str - name to use in title
:param lognorm: bool - use loglog scale in displaying likelihood array
:return: tuple(int, int) - option with highest likelihood
"""
# convert to a numpy array:
lh_array = np.zeros((self.max_rep, self.max_rep + 1))
for (k1, k2), v in lh_dict.items():
if k1 == 'B':
k1 = 0
if k2 == 'B':
k2 = 0
if k1 == 'E':
k1 = 0
if k2 == 'E':
k2 = self.max_rep
lh_array[k1, k2] = v
# print(lh_dict, lh_array)
# get minimal and maximal likelihood
ind_good = (lh_array < 0.0) & (lh_array > -1e10) & (lh_array != np.nan)
if len(lh_array[ind_good]) == 0:
return lh_array, (0, 0)
lh_array[~ind_good] = np.NINF
z_min, z_max = min(lh_array[ind_good]), max(lh_array[ind_good])
max_str = len(lh_array)
# generate image file if specified:
if display_file is not None:
plt.figure()
if lognorm:
lh_view = -np.log(-lh_array)
z_min = -np.log(-z_min)
z_max = -np.log(-z_max)
else:
lh_view = lh_array
# background:
bg_size = max(2, (len(lh_view) - self.min_rep) // 6)
if len(lh_view) - self.min_rep <= 6:
bg_size = 1
lh_view[-bg_size:, self.min_rep:self.min_rep + bg_size] = lh_view[0, 0]
# expanded
lh_view[-bg_size:, self.min_rep + bg_size:self.min_rep + 2 * bg_size] = lh_view[0, self.max_rep]
# plotting
plt.title("%s likelihood of each option for %s" % ("Loglog" if lognorm else "Log", name))
plt.xlabel('2nd allele')
plt.ylabel('1st allele')
start_ticks = 5
step_ticks = 5
plt.xticks(np.concatenate([np.array(range(start_ticks - self.min_rep, max_str - self.min_rep, step_ticks)), [max_str - self.min_rep]]) + 0.5,
list(range(start_ticks, max_str, step_ticks)) + ['E(>%d)' % (self.max_with_e - 2)])
plt.yticks(np.array(range(start_ticks - self.min_rep, max_str - self.min_rep, step_ticks)) + 0.5, range(start_ticks, max_str, step_ticks))
palette = copy(plt.cm.jet)
palette.set_under('gray', 1.0)
plt.pcolor(lh_view[self.min_rep:, self.min_rep:], cmap=palette, vmin=z_min, vmax=z_max)
plt.colorbar()
# draw dividing line:
plt.plot([max_str - self.min_rep, max_str - self.min_rep], [0, max_str - self.min_rep], 'k', linewidth=3)
# background:
plt.text(float(bg_size) / 2.0, max_str - self.min_rep - float(bg_size) / 2.0, 'BG', size=20, horizontalalignment='center',
verticalalignment='center', path_effects=[PathEffects.withStroke(linewidth=2.5, foreground="w")])
# expanded
plt.text(bg_size + float(bg_size) / 2.0, max_str - self.min_rep - float(bg_size) / 2.0, 'Exp', size=20, horizontalalignment='center',
verticalalignment='center', path_effects=[PathEffects.withStroke(linewidth=2.5, foreground="w")])
# save
plt.savefig(display_file + '.pdf')
plt.savefig(display_file + '.png')
plt.close()
# output best option
best = sorted(np.unravel_index(np.argmax(lh_array), lh_array.shape))
# and convert it to symbols
if best[0] == 0 and best[1] == 0:
best_sym = ('B', 'B')
else:
best_sym = list(map(lambda x: 'E' if x == self.max_rep or x == 0 else x, best))
return lh_array, best, best_sym
def get_confidence(self, lh_array, predicted):
"""
Get confidence of a prediction.
:param lh_array: 2D-ndarray - log likelihoods of the prediction
:param predicted: tuple(int, int) - predicted alleles
:return: tuple(float, float, float) - prediction confidence of all, first, and second allele(s)
"""
# get confidence
lh_corr_array = lh_array - np.max(lh_array)
lh_sum = np.sum(np.exp(lh_corr_array))
confidence = np.exp(lh_corr_array[predicted[0], predicted[1]]) / lh_sum
confidence1 = np.sum(np.exp(lh_corr_array[predicted[0], :])) / lh_sum
confidence2 = np.sum(np.exp(lh_corr_array[:, predicted[1]])) / lh_sum
confidence_back = np.exp(lh_corr_array[0, 0]) / lh_sum
confidence_back_all = np.sum(np.exp(lh_corr_array[0, :])) / lh_sum
confidence_exp = np.exp(lh_corr_array[0, self.max_rep]) / lh_sum
confidence_exp_all = np.sum(np.exp(lh_corr_array[:, self.max_rep])) / lh_sum
return confidence, confidence1, confidence2, confidence_back, confidence_back_all, confidence_exp, confidence_exp_all
@staticmethod
def write_output(file_desc, predicted, conf, name):
"""
Write result of one prediction.
:param file_desc: file descriptor - where to write to
:param predicted: tuple(int/char, int/char) - predicted alleles
:param conf: tuple(float, float, float) - confidence of prediction (whole, 1st allele, 2nd allele)
:param name: str/int - name/number of the sample
:return: None
"""
def write_output_fd(f, predicted, conf, name):
print("Predicted alleles for %s: (confidence = %5.1f%%)" % (str(name), conf[0] * 100.0), file=f)
print("\t%3s (confidence = %5.1f%%)" % (str(predicted[0]), conf[1] * 100.0), file=f)
print("\t%3s (confidence = %5.1f%%)" % (str(predicted[1]), conf[2] * 100.0), file=f)
print("B B %7.3f%%" % (conf[3] * 100.0), file=f)
print("all B %7.3f%%" % (conf[4] * 100.0), file=f)
print("B E %7.3f%%" % (conf[5] * 100.0), file=f)
print("all E %7.3f%%" % (conf[6] * 100.0), file=f)
if type(file_desc) is str:
with open(file_desc, 'w') as f:
write_output_fd(f, predicted, conf, name)
else:
write_output_fd(file_desc, predicted, conf, name)
def all_call(self, annotations, filt_annotations, index_rep, file_pcolor, file_output, name):
"""
Run All_call - inference of likelihoods, printing of pcolor and writing output.
:param annotations: list(Annotation) - good (blue) annotations
:param filt_annotations: list(Annotation) - (grey) annotations with one primer
:param index_rep: int - index of a repetition
:param file_pcolor: str - file prefix for a pcolor image
:param file_output: str - file for all_call output
:param name: str - name of the sample
:return: None
"""
# if we do not have any good annotations, then quit
if len(annotations) == 0 and len(filt_annotations) == 0:
# write output
# self.write_output(file_output, ('B', 'B'), (0.0, 0.0, 0.0), name)
return None
# infer likelihoods
lh_dict = self.infer(annotations, filt_annotations, index_rep, verbose=False)
# print pcolor image
lh_array, predicted, predicted_sym = self.print_pcolor(lh_dict, file_pcolor, name)
# get confidence of our prediction
conf = self.get_confidence(lh_array, predicted)
# write output
self.write_output(file_output, predicted_sym, conf, name)
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.pcolor",
"numpy.log",
"copy.copy",
"math.exp",
"matplotlib.patheffects.withStroke",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.concatenate",
"matplotlib.pyp... | [((691, 726), 'numpy.zeros_like', 'np.zeros_like', (['deletes'], {'dtype': 'float'}), '(deletes, dtype=float)\n', (704, 726), True, 'import numpy as np\n'), ((3544, 3613), 'functools.partial', 'functools.partial', (['model_full', 'rng', 'model_params'], {'rate_func': 'rate_func'}), '(model_full, rng, model_params, rate_func=rate_func)\n', (3561, 3613), False, 'import functools\n'), ((2999, 3013), 'numpy.arange', 'np.arange', (['rng'], {}), '(rng)\n', (3008, 3013), True, 'import numpy as np\n'), ((3088, 3102), 'numpy.arange', 'np.arange', (['rng'], {}), '(rng)\n', (3097, 3102), True, 'import numpy as np\n'), ((5108, 5133), 'numpy.sum', 'np.sum', (['read_distribution'], {}), '(read_distribution)\n', (5114, 5133), True, 'import numpy as np\n'), ((16179, 16220), 'numpy.ones_like', 'np.ones_like', (['observed_annots'], {'dtype': 'bool'}), '(observed_annots, dtype=bool)\n', (16191, 16220), True, 'import numpy as np\n'), ((16467, 16505), 'numpy.zeros_like', 'np.zeros_like', (['observed_fa'], {'dtype': 'bool'}), '(observed_fa, dtype=bool)\n', (16480, 16505), True, 'import numpy as np\n'), ((19163, 19205), 'numpy.zeros', 'np.zeros', (['(self.max_rep, self.max_rep + 1)'], {}), '((self.max_rep, self.max_rep + 1))\n', (19171, 19205), True, 'import numpy as np\n'), ((2243, 2259), 'math.exp', 'math.exp', (['(p3 * n)'], {}), '(p3 * n)\n', (2251, 2259), False, 'import math\n'), ((5056, 5081), 'numpy.sum', 'np.sum', (['read_distribution'], {}), '(read_distribution)\n', (5062, 5081), True, 'import numpy as np\n'), ((14002, 14030), 'numpy.isnan', 'np.isnan', (['allele2_likelihood'], {}), '(allele2_likelihood)\n', (14010, 14030), True, 'import numpy as np\n'), ((14050, 14078), 'numpy.isnan', 'np.isnan', (['allele1_likelihood'], {}), '(allele1_likelihood)\n', (14058, 14078), True, 'import numpy as np\n'), ((14098, 14126), 'numpy.isnan', 'np.isnan', (['bckgrnd_likelihood'], {}), '(bckgrnd_likelihood)\n', (14106, 14126), True, 'import numpy as np\n'), ((19966, 19978), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19976, 19978), True, 'import matplotlib.pyplot as plt\n'), ((20603, 20696), 'matplotlib.pyplot.title', 'plt.title', (["('%s likelihood of each option for %s' % ('Loglog' if lognorm else 'Log', name)\n )"], {}), "('%s likelihood of each option for %s' % ('Loglog' if lognorm else\n 'Log', name))\n", (20612, 20696), True, 'import matplotlib.pyplot as plt\n'), ((20705, 20729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""2nd allele"""'], {}), "('2nd allele')\n", (20715, 20729), True, 'import matplotlib.pyplot as plt\n'), ((20742, 20766), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""1st allele"""'], {}), "('1st allele')\n", (20752, 20766), True, 'import matplotlib.pyplot as plt\n'), ((21256, 21272), 'copy.copy', 'copy', (['plt.cm.jet'], {}), '(plt.cm.jet)\n', (21260, 21272), False, 'from copy import copy\n'), ((21328, 21419), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['lh_view[self.min_rep:, self.min_rep:]'], {'cmap': 'palette', 'vmin': 'z_min', 'vmax': 'z_max'}), '(lh_view[self.min_rep:, self.min_rep:], cmap=palette, vmin=z_min,\n vmax=z_max)\n', (21338, 21419), True, 'import matplotlib.pyplot as plt\n'), ((21428, 21442), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (21440, 21442), True, 'import matplotlib.pyplot as plt\n'), ((21490, 21599), 'matplotlib.pyplot.plot', 'plt.plot', (['[max_str - self.min_rep, max_str - self.min_rep]', '[0, max_str - self.min_rep]', '"""k"""'], {'linewidth': '(3)'}), "([max_str - self.min_rep, max_str - self.min_rep], [0, max_str -\n self.min_rep], 'k', linewidth=3)\n", (21498, 21599), True, 'import matplotlib.pyplot as plt\n'), ((22197, 22231), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(display_file + '.pdf')"], {}), "(display_file + '.pdf')\n", (22208, 22231), True, 'import matplotlib.pyplot as plt\n'), ((22244, 22278), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(display_file + '.png')"], {}), "(display_file + '.png')\n", (22255, 22278), True, 'import matplotlib.pyplot as plt\n'), ((22291, 22302), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22300, 22302), True, 'import matplotlib.pyplot as plt\n'), ((23085, 23101), 'numpy.max', 'np.max', (['lh_array'], {}), '(lh_array)\n', (23091, 23101), True, 'import numpy as np\n'), ((23126, 23147), 'numpy.exp', 'np.exp', (['lh_corr_array'], {}), '(lh_corr_array)\n', (23132, 23147), True, 'import numpy as np\n'), ((23170, 23219), 'numpy.exp', 'np.exp', (['lh_corr_array[predicted[0], predicted[1]]'], {}), '(lh_corr_array[predicted[0], predicted[1]])\n', (23176, 23219), True, 'import numpy as np\n'), ((23412, 23439), 'numpy.exp', 'np.exp', (['lh_corr_array[0, 0]'], {}), '(lh_corr_array[0, 0])\n', (23418, 23439), True, 'import numpy as np\n'), ((23549, 23587), 'numpy.exp', 'np.exp', (['lh_corr_array[0, self.max_rep]'], {}), '(lh_corr_array[0, self.max_rep])\n', (23555, 23587), True, 'import numpy as np\n'), ((6272, 6307), 'numpy.zeros', 'np.zeros', (['self.min_rep'], {'dtype': 'float'}), '(self.min_rep, dtype=float)\n', (6280, 6307), True, 'import numpy as np\n'), ((16602, 16648), 'numpy.concatenate', 'np.concatenate', (['[observed_annots, observed_fa]'], {}), '([observed_annots, observed_fa])\n', (16616, 16648), True, 'import numpy as np\n'), ((16678, 16712), 'numpy.concatenate', 'np.concatenate', (['[rl_annots, rl_fa]'], {}), '([rl_annots, rl_fa])\n', (16692, 16712), True, 'import numpy as np\n'), ((16746, 16788), 'numpy.concatenate', 'np.concatenate', (['[closed_annots, closed_fa]'], {}), '([closed_annots, closed_fa])\n', (16760, 16788), True, 'import numpy as np\n'), ((18416, 18426), 'numpy.log', 'np.log', (['lh'], {}), '(lh)\n', (18422, 18426), True, 'import numpy as np\n'), ((22372, 22391), 'numpy.argmax', 'np.argmax', (['lh_array'], {}), '(lh_array)\n', (22381, 22391), True, 'import numpy as np\n'), ((23258, 23296), 'numpy.exp', 'np.exp', (['lh_corr_array[predicted[0], :]'], {}), '(lh_corr_array[predicted[0], :])\n', (23264, 23296), True, 'import numpy as np\n'), ((23336, 23374), 'numpy.exp', 'np.exp', (['lh_corr_array[:, predicted[1]]'], {}), '(lh_corr_array[:, predicted[1]])\n', (23342, 23374), True, 'import numpy as np\n'), ((23486, 23513), 'numpy.exp', 'np.exp', (['lh_corr_array[0, :]'], {}), '(lh_corr_array[0, :])\n', (23492, 23513), True, 'import numpy as np\n'), ((23633, 23671), 'numpy.exp', 'np.exp', (['lh_corr_array[:, self.max_rep]'], {}), '(lh_corr_array[:, self.max_rep])\n', (23639, 23671), True, 'import numpy as np\n'), ((6309, 6361), 'numpy.ones', 'np.ones', (['(self.max_with_e - self.min_rep)'], {'dtype': 'float'}), '(self.max_with_e - self.min_rep, dtype=float)\n', (6316, 6361), True, 'import numpy as np\n'), ((20031, 20048), 'numpy.log', 'np.log', (['(-lh_array)'], {}), '(-lh_array)\n', (20037, 20048), True, 'import numpy as np\n'), ((20074, 20088), 'numpy.log', 'np.log', (['(-z_min)'], {}), '(-z_min)\n', (20080, 20088), True, 'import numpy as np\n'), ((20114, 20128), 'numpy.log', 'np.log', (['(-z_max)'], {}), '(-z_max)\n', (20120, 20128), True, 'import numpy as np\n'), ((21821, 21874), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(2.5)', 'foreground': '"""w"""'}), "(linewidth=2.5, foreground='w')\n", (21843, 21874), True, 'import matplotlib.patheffects as PathEffects\n'), ((22109, 22162), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(2.5)', 'foreground': '"""w"""'}), "(linewidth=2.5, foreground='w')\n", (22131, 22162), True, 'import matplotlib.patheffects as PathEffects\n')] |
"""
Metrics to calculate and manipulate the ROC Convex Hull on a classification task given scores.
"""
# Author: <NAME> <<EMAIL>>
from collections import namedtuple
from math import sqrt
from typing import List, Dict, Tuple, Union
# DESCRIPTION:
#
# This program computes the convex hull of a set of ROC points
# (technically, the upper left triangular convex hull, bounded
# by (0,0) and (1,1)). The ROC Convex Hull is used to find dominant
# (and locally best) classifiers in ROC space. For more information
# on the ROC convex hull and its uses, see the references below.
#
# FP and TP are the False Positive (X axis) and True Positive (Y axis)
# values for the point.
#
#
# REFERENCES:
#
# The first paper below is probably best for an introduction and
# general discussion of the ROC Convex Hull and its uses.
#
# 1) <NAME>. and <NAME>. "Analysis and visualization of
# classifier performance: Comparison under imprecise class and cost
# distributions". In Proceedings of the Third International
# Conference on Knowledge Discovery and Data Mining (KDD-97),
# pp.43-48. AAAI Press.
#
# 2) <NAME>. and <NAME>. "Robust Classification Systems for
# Imprecise Environments".
#
# 3) <NAME>., <NAME>., and <NAME>. "The Case
# Against Accuracy Estimation for Comparing Induction Algorithms".
# Available from:
#
#
# BUG REPORTS / SUGGESTIONS / QUESTIONS: <NAME> <<EMAIL>>
#
#
"""
Typical use is something like this:
rocch = ROCCH(keep_intermediate=False)
for clf in classifiers:
y_scores = clf.decision_function(y_test)
rocch.fit(clfname, roc_curve(y_scores, y_true))
...
plt.plot(rocch.hull())
rocch.describe()
"""
Point = namedtuple( "Point", ["x", "y", "clfname"] )
Point.__new__.__defaults__ = ("",) # make clfname optional
INFINITY: float = float( "inf" )
class ROCCH( object ):
"""ROC Convex Hull.
Some other stuff.
"""
_hull: List[Point]
def __init__(self, keep_intermediate=False):
"""Initialize the object."""
self.keep_intermediate = keep_intermediate
self.classifiers: Dict[str, List[Tuple]] = { }
self._hull = [Point( 0, 0, "AllNeg" ), Point( 1, 1, "AllPos" )]
def fit(self, clfname: str, points):
"""Fit (add) a classifier's ROC points to the ROCCH.
:param clfname: A classifier name or identifier. This is only used to record the
identity of the classifier producing the points. It can be anything, such as a
(classifier, threshold) pair.
TODO: Let clfname be a string or a list; add some way to incorporate info per point so we
can associate each point with a parameter.
:param points: A sequence of ROC points, contained in a list or array. Each point should
be an (FP, TP) pair. TODO: Make this more general.
:return: None
"""
points_instances = [Point( x, y, clfname ) for (x, y) in points]
points_instances.extend( self._hull )
points_instances.sort( key=lambda pt: pt.x )
hull = []
# TODO: Make this more efficient by simply using pointers rather than append-pop.
while points_instances:
hull.append( points_instances.pop( 0 ) )
# Now test the top three on new_hull
test_top = True
while len( hull ) >= 3 and test_top:
turn_dir = turn( *hull[-3:] )
if turn_dir > 0: # CCW turn, this introduced a concavity.
hull.pop( -2 )
elif turn_dir == 0: # Co-linear, should we keep it?
if not self.keep_intermediate:
# No, treat it as if it's under the hull
hull.pop( -2 )
else: # Treat this as convex
test_top = False
else: # CW turn, this is convex
test_top = False
self._hull = hull
def _check_hull(self) -> None:
"""Check a list of hull points for convexity.
This is a simple utility function for testing.
Throws an AssertionError if a hull segment is concave or if the terminal AllNeg and
AllPos are not present.
Colinear segments (turn==0) will be considered violations unless keep_intermediate is on.
"""
hull = self._hull
assert len( hull ) >= 2, "Hull is damaged"
assert hull[0].clfname == "AllNeg", "First hull point is not AllNeg"
assert hull[-1].clfname == "AllPos", "Last hull point is not AllPos"
for hull_idx in range( len( hull ) - 2 ):
segment = hull[hull_idx: hull_idx + 3]
turn_val = turn( *segment )
assert turn_val <= 0, f"Concavity in hull: {segment}"
if not self.keep_intermediate:
assert turn_val < 0, "Intermediate (colinear) point in hull"
@property
def hull(self) -> List[Tuple]:
"""
Return a list of points constituting the convex hull of classifiers in ROC space.
Returns a list of tuples (FP, TP, CLF) where each (FP,TP) is a point in ROC space
and CLF is the classifier producing that performance point.
"""
# Defined just in case postprocessing needs to be done.
return self._hull
def dominant_classifiers(self) -> List[Tuple]:
"""
Return a list describing the hull in terms of the dominant classifiers.
Start at point (1,1) and work counter-clockwise down the hull to (0,0).
Iso-performance line slope starts at 0.0 and works up to infinity.
:return: A list consisting of (prob_min, prob_max, point) where
:rtype: List[Tuple]
"""
slope = 0.0
last_point = None
last_slope = None
segment_right_boundary: Union[Point,None] = None
dominant_list: List[Tuple] = []
# TODO: Check for hull uninitialized.
point: Point
for point in self._hull:
if last_point is not None:
slope: float = calculate_slope( point, last_point )
else:
segment_right_boundary = point
if last_slope is not None:
if self.keep_intermediate or last_slope != slope:
dominant_list.append( (last_slope, slope, segment_right_boundary) )
last_slope = slope
segment_right_boundary = point
else: # last_slope is undefined
last_slope = slope
last_point = point
if last_slope != INFINITY:
slope = INFINITY
# Output final point
dominant_list.append( (last_slope, slope, segment_right_boundary) )
return dominant_list
def best_classifiers_for_conditions(self, class_ratio=1.0, cost_ratio=1.0):
"""
Given a set of operating conditions (class and cost ratios), return best classifiers.
Given a class ratio (P/N) and a cost ratio (cost(FP),cost(FN)), return a set of
classifiers that will perform optimally for those conditions. The class ratio is the
fraction of positives per negative. The cost ratio is the cost of a False Positive
divided by the cost of a False Negative.
The return value will be a list of either one or two classifiers. If the conditions
identify a single best classifier, the result will be simply:
[ (clf, 1.0) ]
indicating that clf should be chosen.
If the conditions are between the performance of two classifiers, the result will be:
[ (clf1, p1), (clf2, p2) ]
indicating that clf1's decisions should be sampled at a rate of p1 and clf2's at a rate
of p2, with p1 and p2 summing to 1.
:param class_ratio, float: The ratio of positives to negatives: P/N
:param cost_ratio, float: The ratio of the cost of a False Positive error to a False
Negative Error: cost(FP)/cost(FN)
:return:
:rtype:
"""
assert 0 < class_ratio < 1.0, "Class ratio must be between 0 and 1"
assert 0 < cost_ratio < 1.0, "Cost ratio must be between 0 and 1"
def calculate_slope(pt1, pt2: Point):
"""
Return the slope from pt1 to pt2, or inf if slope is infinite
:param pt1:
:type pt1: Point
:param pt2:
:type pt2: Point
:return:
:rtype: float
"""
dx = pt2.x - pt1.x
dy = pt2.y - pt1.y
if dx == 0:
return INFINITY
else:
return dy / dx
def _check_hull(hull):
"""Check a list of hull points for convexity.
This is a simple utility function for testing.
Throws an AssertionError if a hull segment is concave.
Colinear segments (turn==0) are not considered violations.
:param hull: A list of Point instances describing an ROC convex hull.
:return: None
"""
for hull_idx in range( len( hull ) - 2 ):
segment = hull[hull_idx: hull_idx + 3]
assert turn( *segment ) <= 0, f"Concavity in hull: {segment}"
def ROC_order(pt1, pt2: Point) -> bool:
"""Predicate for determining ROC_order for sorting.
Either pt1's x is ahead of pt2's x, or the x's are equal and pt1's y is ahead of pt2's y.
"""
return (pt1.x < pt2.x) or (pt1.x == pt2.x and pt1.y < pt2.y)
def compute_theta(p1, p2: Point) -> float:
"""Compute theta, an ordering function on a point pair.
Theta has the same properties as the angle between the horizontal axis and
the line segment between the points, but is much faster to compute than
arctangent. Range is 0 to 360. Defined on P.353 of _Algorithms in C_.
"""
dx = p2.x - p1.x
ax = abs( dx )
dy = p2.y - p1.y
ay = abs( dy )
if dx == 0 and dy == 0:
t = 0
else:
t = dy / (ax + ay)
# Adjust for quadrants two through four
if dx < 0:
t = 2 - t
elif dy < 0:
t = 4 + t
return t * 90.0
def euclidean(p1, p2: Point) -> float:
"""Compute Euclidean distance.
"""
return sqrt( (p1.x - p2.x)**2 + (p1.y - p2.y)**2 )
def turn(a, b, c: Point) -> float:
"""Determine the turn direction going from a to b to c.
Going from a->b->c, is the turn clockwise, counterclockwise, or straight.
positive => CCW
negative => CW
zero => colinear
See: https://algs4.cs.princeton.edu/91primitives/
>>> a = Point(1,1)
>>> b = Point(2,2)
>>> turn(a, b, Point(3,2))
-1
>>> turn(a, b, Point(2,3))
1
>>> turn(a, b, Point(3,3))
0
>>> turn(a, b, Point(1.5, 1.5)) == 0
True
>>> turn(a, b, Point(1.5,1.7)) > 0
True
:param Point a:
:param Point b:
:param Point c:
:rtype: float
"""
return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End of rocch.py
| [
"doctest.testmod",
"collections.namedtuple",
"math.sqrt"
] | [((1651, 1693), 'collections.namedtuple', 'namedtuple', (['"""Point"""', "['x', 'y', 'clfname']"], {}), "('Point', ['x', 'y', 'clfname'])\n", (1661, 1693), False, 'from collections import namedtuple\n'), ((10010, 10055), 'math.sqrt', 'sqrt', (['((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2)'], {}), '((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2)\n', (10014, 10055), False, 'from math import sqrt\n'), ((10815, 10832), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (10830, 10832), False, 'import doctest\n')] |
import os
import re
from setuptools import find_packages, setup
def get_version(package):
path = os.path.join(os.path.dirname(__file__), package, "__init__.py")
with open(path, "rb") as f:
init_py = f.read().decode("utf-8")
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
setup(
name='cli-pto',
author='<NAME>',
description='A CLI text editor with encryption.',
version=get_version('cli_pto'),
url='https://github.com/ozencb/cli-pto',
packages=find_packages(),
install_requires=['prompt-toolkit', 'Pygments', 'pycryptodome'],
entry_points={'console_scripts': 'cli-pto = cli_pto.clipto:main'},
license=open('LICENSE').read(),
keywords=['text', 'editor', 'encryption', 'encrypted', 'password', 'manager']
)
| [
"os.path.dirname",
"setuptools.find_packages",
"re.search"
] | [((117, 142), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (132, 142), False, 'import os\n'), ((517, 532), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (530, 532), False, 'from setuptools import find_packages, setup\n'), ((254, 309), 're.search', 're.search', (['"""__version__ = [\'"]([^\'"]+)[\'"]"""', 'init_py'], {}), '(\'__version__ = [\\\'"]([^\\\'"]+)[\\\'"]\', init_py)\n', (263, 309), False, 'import re\n')] |
from django.shortcuts import render
# Create your views here.
from user.forms import UserForm
def register(request):
form = UserForm()
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
form.save()
return render(request, 'user/registeration/register.html', {'form': form})
| [
"django.shortcuts.render",
"user.forms.UserForm"
] | [((131, 141), 'user.forms.UserForm', 'UserForm', ([], {}), '()\n', (139, 141), False, 'from user.forms import UserForm\n'), ((276, 343), 'django.shortcuts.render', 'render', (['request', '"""user/registeration/register.html"""', "{'form': form}"], {}), "(request, 'user/registeration/register.html', {'form': form})\n", (282, 343), False, 'from django.shortcuts import render\n'), ((190, 212), 'user.forms.UserForm', 'UserForm', (['request.POST'], {}), '(request.POST)\n', (198, 212), False, 'from user.forms import UserForm\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import (QApplication, QDialog,
QProgressBar, QPushButton, QMessageBox)
import matplotlib.pyplot as plt
from matplotlib import style
import T2H, PLOT
import flopy
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
#%%
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("T2H Graphical User Interface")
MainWindow.resize(1280, 800)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
#%% QFrames
self.frame_1 = QtWidgets.QFrame(self.centralWidget)
self.frame_1.setGeometry(QtCore.QRect(810, 70, 461, 201))
self.frame_1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_1.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_1.setObjectName("frame_2")
self.frame_2 = QtWidgets.QFrame(self.centralWidget)
self.frame_2.setGeometry(QtCore.QRect(810, 280, 461, 101))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.frame_3 = QtWidgets.QFrame(self.centralWidget)
self.frame_3.setGeometry(QtCore.QRect(810, 390, 461, 31))
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
#%% QLabels
self.sedK = QtWidgets.QLabel(self.frame_2)
self.sedK.setGeometry(QtCore.QRect(30, 10, 141, 16))
self.sedK.setObjectName("sedK")
self.aqK = QtWidgets.QLabel(self.frame_2)
self.aqK.setGeometry(QtCore.QRect(30, 40, 141, 16))
self.aqK.setObjectName("aqK")
self.faultK = QtWidgets.QLabel(self.frame_2)
self.faultK.setGeometry(QtCore.QRect(30, 70, 141, 16))
self.faultK.setObjectName("faultK")
self.sedKN = QtWidgets.QLabel(self.centralWidget)
self.sedKN.setGeometry(QtCore.QRect(910, 500, 141, 16))
self.sedKN.setObjectName("sedKN")
self.sedKNlabel = QtWidgets.QLabel(self.centralWidget)
self.sedKNlabel.setGeometry(QtCore.QRect(1100, 500, 61, 16))
self.sedKNlabel.setObjectName("sedKNlabel")
self.aquiferKNlabel = QtWidgets.QLabel(self.centralWidget)
self.aquiferKNlabel.setGeometry(QtCore.QRect(1100, 520, 61, 16))
self.aquiferKNlabel.setObjectName("aquiferKNlabel")
self.aqKN = QtWidgets.QLabel(self.centralWidget)
self.aqKN.setGeometry(QtCore.QRect(910, 520, 81, 16))
self.aqKN.setObjectName("aqKN")
self.faultKN = QtWidgets.QLabel(self.centralWidget)
self.faultKN.setGeometry(QtCore.QRect(910, 540, 81, 16))
self.faultKN.setObjectName("faultKN")
self.faultKNlabel = QtWidgets.QLabel(self.centralWidget)
self.faultKNlabel.setGeometry(QtCore.QRect(1100, 540, 61, 16))
self.faultKNlabel.setObjectName("faultKNlabel")
self.label_21 = QtWidgets.QLabel(self.frame_3)
self.label_21.setGeometry(QtCore.QRect(10, 7, 141, 16))
self.label_21.setObjectName("label_21")
self.visoptionsLabel = QtWidgets.QLabel(self.centralWidget)
self.visoptionsLabel.setGeometry(QtCore.QRect(20, 540, 141, 16))
self.visoptionsLabel.setObjectName("visoptionsLabel")
self.fileLabel = QtWidgets.QLabel(self.centralWidget)
self.fileLabel.setGeometry(QtCore.QRect(810, 4, 60, 16))
self.fileLabel.setObjectName("fileLabel")
self.fileLabel_path = QtWidgets.QLabel(self.centralWidget)
self.fileLabel_path.setGeometry(QtCore.QRect(880, 4, 320, 16))
self.fileLabel_path.setObjectName("fileLabel_path")
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setGeometry(QtCore.QRect(814, 51, 241, 16))
self.label.setObjectName("label")
self.nz = QtWidgets.QLabel(self.centralWidget)
self.nz.setGeometry(QtCore.QRect(840, 104, 141, 16))
self.nz.setObjectName("nz")
self.targetperiod = QtWidgets.QLabel(self.centralWidget)
self.targetperiod.setGeometry(QtCore.QRect(840, 80, 151, 16))
self.targetperiod.setObjectName("targetperiod")
self.nzfixed = QtWidgets.QLabel(self.centralWidget)
self.nzfixed.setGeometry(QtCore.QRect(840, 128, 141, 16))
self.nzfixed.setObjectName("nzfixed")
self.constrecharge = QtWidgets.QLabel(self.centralWidget)
self.constrecharge.setGeometry(QtCore.QRect(840, 176, 151, 16))
self.constrecharge.setObjectName("constrecharge")
#
self.hiniratio = QtWidgets.QLabel(self.centralWidget)
self.hiniratio.setGeometry(QtCore.QRect(840, 242, 151, 16))
self.hiniratio.setObjectName("hiniratio")
self.datvar = QtWidgets.QLabel(self.centralWidget)
self.datvar.setGeometry(QtCore.QRect(840, 152, 161, 16))
self.datvar.setObjectName("datvar")
# Recharge input
self.constrecharge_2 = QtWidgets.QLabel(self.centralWidget)
self.constrecharge_2.setGeometry(QtCore.QRect(840, 200, 151, 16))
self.constrecharge_2.setObjectName("constrecharge_2")
# Image pane
self.image = QtWidgets.QLabel(self.centralWidget)
self.image.setGeometry(QtCore.QRect(10, 10, 780, 520))
self.image.setObjectName("image")
self.pixmap = QtGui.QPixmap("logo.png")
self.image.setPixmap(self.pixmap)
#%% QLineEdits
self.sedKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.sedKlineEdit.setGeometry(QtCore.QRect(260, 10, 113, 21))
self.sedKlineEdit.setObjectName("sedKlineEdit")
self.sedKlineEdit.setText("547.5")
#
self.aqKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.aqKlineEdit.setGeometry(QtCore.QRect(260, 40, 113, 21))
self.aqKlineEdit.setObjectName("aqKlineEdit")
self.aqKlineEdit.setText("36.5")
#
self.faultKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.faultKlineEdit.setGeometry(QtCore.QRect(260, 70, 113, 21))
self.faultKlineEdit.setObjectName("faultKlineEdit")
self.faultKlineEdit.setText("0.0365")
#
self.nzfline = QtWidgets.QLineEdit(self.centralWidget)
self.nzfline.setGeometry(QtCore.QRect(1070, 128, 113, 21))
self.nzfline.setObjectName("nzfline")
self.nzfline.setText("10")
#
self.nzline = QtWidgets.QLineEdit(self.centralWidget)
self.nzline.setGeometry(QtCore.QRect(1070, 104, 113, 21))
self.nzline.setObjectName("nzline")
self.nzline.setText("40")
#
self.datline = QtWidgets.QLineEdit(self.centralWidget)
self.datline.setGeometry(QtCore.QRect(1070, 152, 113, 21))
self.datline.setObjectName("datline")
self.datline.setText("-10000")
#
self.hiniratioLineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.hiniratioLineEdit.setGeometry(QtCore.QRect(1070, 242, 113, 21))
self.hiniratioLineEdit.setObjectName("hiniratioLineEdit")
self.hiniratioLineEdit.setText("0.9")
#
self.datvarline = QtWidgets.QLineEdit(self.centralWidget)
self.datvarline.setGeometry(QtCore.QRect(1070, 176, 113, 21))
self.datvarline.setObjectName("datvarline")
self.datvarline.setText("-3000")
self.rchline = QtWidgets.QLineEdit(self.centralWidget)
self.rchline.setGeometry(QtCore.QRect(1070, 200, 113, 21))
self.rchline.setObjectName("rchline")
self.rchline.setText("0.05")
# Ma input lineedit
self.maline = QtWidgets.QLineEdit(self.centralWidget)
self.maline.setGeometry(QtCore.QRect(1070, 80, 113, 21))
self.maline.setObjectName("maline")
self.maline.setText("12.5")
#%% QPushButtons
self.load = QtWidgets.QPushButton(self.centralWidget)
self.load.setGeometry(QtCore.QRect(1100, -1, 71, 32))
self.load.setObjectName("loadButton")
self.load.clicked.connect(self.fileloader)
self.load1 = QtWidgets.QPushButton(self.centralWidget)
self.load1.setGeometry(QtCore.QRect(1170, -1, 101, 32))
self.load1.setObjectName("loadButton1")
self.load1.clicked.connect(self.fileloader)
self.applyButton = QtWidgets.QPushButton(self.frame_1)
self.applyButton.setGeometry(QtCore.QRect(380, 60, 81, 81))
self.applyButton.setObjectName("applyButton")
self.applyButton.clicked.connect(self.applyclicked)
self.fileDialog_3 = QtWidgets.QPushButton(self.frame_2)
self.fileDialog_3.setGeometry(QtCore.QRect(380, 20, 81, 71))
self.fileDialog_3.setObjectName("fileDialog_3")
self.fileDialog_3.clicked.connect(self.applyCalClicked)
# Model run button
self.ModelRunButton = QtWidgets.QPushButton(self.centralWidget)
self.ModelRunButton.setGeometry(QtCore.QRect(640, 620, 113, 32))
self.ModelRunButton.setObjectName("ModelRunButton")
self.ModelRunButton.clicked.connect(self.run)
self.QuitButton = QtWidgets.QPushButton(self.centralWidget)
self.QuitButton.setGeometry(QtCore.QRect(760, 620, 113, 32))
self.QuitButton.setObjectName("QuitButton")
self.QuitButton.clicked.connect(QCoreApplication.instance().quit)
self.VtkOutputButton = QtWidgets.QPushButton(self.centralWidget)
self.VtkOutputButton.setGeometry(QtCore.QRect(880, 620, 113, 32))
self.VtkOutputButton.setObjectName("VtkOutputButton")
# self.VtkOutputButton.clicked.connect(self.vtk)
self.PlotButton = QtWidgets.QPushButton(self.centralWidget)
self.PlotButton.setGeometry(QtCore.QRect(460, 560, 113, 32))
self.PlotButton.setObjectName("PlotButton")
self.PlotButton.clicked.connect(self.plot)
#%% QGraphicsViews
self.figure = plt.figure(figsize=(12,12))
self.canvas = FigureCanvas(self.figure)
#%% QComboBoxes
# File combo box
self.fileBox = QtWidgets.QComboBox(self.centralWidget)
self.fileBox.setGeometry(QtCore.QRect(808, 25, 461, 26))
self.fileBox.setObjectName("fileBox")
# Solver selection combo box
self.solverBox = QtWidgets.QComboBox(self.frame_3)
self.solverBox.setGeometry(QtCore.QRect(63, 2, 281, 26))
self.solverBox.setObjectName("solverBox")
self.solverBox.addItem("xMD")
self.solverBox.addItem("GMRES")
#
self.visComboBox = QtWidgets.QComboBox(self.centralWidget)
self.visComboBox.setGeometry(QtCore.QRect(10, 560, 441, 26))
self.visComboBox.setObjectName("visComboBox")
self.visComboBox.addItem("Cross Section")
self.visComboBox.addItem("Fault Plane")
self.visComboBox.addItem("Vertical Flow Barriers (VFB)")
self.visComboBox.addItem("Horizontal Flow Barriers (HFB)")
#%% QCheckBoxes
#
self.elevdependentChecker = QtWidgets.QCheckBox(self.centralWidget)
self.elevdependentChecker.setGeometry(QtCore.QRect(860, 220, 231, 20))
self.elevdependentChecker.setObjectName("elevdependentChecker")
#%% QProgressBars
self.progress = QProgressBar(self.centralWidget)
self.progress.setGeometry(10, 620, 600, 25)
self.progress.setMaximum(100)
#%% Mainwindows
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1024, 22))
self.menuBar.setObjectName("menuBar")
self.menuT2H_Main = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Main.setObjectName("menuT2H_Main")
self.menuT2H_Checker = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Checker.setObjectName("menuT2H_Checker")
self.menuT2H_Plot = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Plot.setObjectName("menuT2H_Plot")
MainWindow.setMenuBar(self.menuBar)
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
self.mainToolBar.setObjectName("mainToolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menuBar.addAction(self.menuT2H_Main.menuAction())
self.menuBar.addAction(self.menuT2H_Checker.menuAction())
self.menuBar.addAction(self.menuT2H_Plot.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
#%% Functions
def applyclicked(self):
self.Ma = float(self.maline.text())
self.Ma = format(self.Ma, '.1f')
self.nz = int(self.nzline.text())
self.nz_fixed = int(self.nzfline.text())
self.dx = 1000
self.dy = 1000
self.inz = self.nz - self.nz_fixed
self.dat = int(self.datline.text())
self.dat_var = int(self.datvarline.text())
self.idat = self.dat - self.dat_var
self.rech = float(self.rchline.text())
self.perm_sed = float(self.sedKlineEdit.text())
self.hratio = float(self.hiniratioLineEdit.text())
self.Kconst = float(self.aqKlineEdit.text())
self.hydchr = self.Kconst/1000
self.target_row = 101
self.iskip = 4
self.ivtk = 1
self.h_tol = 1e-4
self.fileLabel_path.setText("/tisc_output/topo_" + self.Ma +"0Ma.txt")
self.ans = QMessageBox.question(self.centralWidget, "Confirmation",\
"Are these correct?\n" + "Period: " + self.Ma\
+ "Ma\n" + "Nz: " + str(self.nz) +"\n" + "Datum: "\
+ str(self.dat) + " m\n", QMessageBox.Yes, QMessageBox.No)
if self.ans == QMessageBox.Yes:
self.rchline.setEnabled(False)
self.maline.setEnabled(False)
self.nzline.setEnabled(False)
self.nzfline.setEnabled(False)
self.datline.setEnabled(False)
self.datvarline.setEnabled(False)
self.hiniratioLineEdit.setEnabled(False)
QMessageBox.about(self.centralWidget, "Confirmed", "Properties confirmed")
else:
QMessageBox.about(self.centralWidget, "Check values", "Check values again!")
def applyCalClicked(self):
self.perm_sed = self.sedKlineEdit.text()
self.Kconst = self.aqKlineEdit.text()
self.hydchr = self.faultKlineEdit.text()
self.sedKNlabel.setText(str(float(self.perm_sed)/float(self.rchline.text())))
self.aquiferKNlabel.setText(str(float(self.Kconst)/float(self.rchline.text())))
self.faultKNlabel.setText(str(float(self.hydchr)/float(self.rchline.text())))
self.ans = QMessageBox.question(self.centralWidget, "Confirmation",\
"Are these correct?\n" + "Period: " + self.Ma\
+ "Ma\n" + "Nz: " + str(self.nz) +"\n" + "Datum: "\
+ str(self.dat) + " m\n", QMessageBox.Yes, QMessageBox.No)
if self.ans == QMessageBox.Yes:
self.sedKlineEdit.setEnabled(False)
self.aqKlineEdit.setEnabled(False)
self.faultKlineEdit.setEnabled(False)
QMessageBox.about(self.centralWidget, "Confirmed", "Properties confirmed")
else:
QMessageBox.about(self.centralWidget, "Check values", "Check values again!")
#%%
def run(self):
self.Ma = float(self.maline.text())
self.Ma = format(self.Ma, '.1f')
self.nz = int(self.nzline.text())
self.nz_fixed = int(self.nzfline.text())
self.dx = 1000
self.dy = 1000
self.inz = self.nz - self.nz_fixed
self.dat = int(self.datline.text())
self.dat_var = int(self.datvarline.text())
self.idat = self.dat - self.dat_var
self.rech = float(self.rchline.text())
self.perm_sed = float(self.sedKlineEdit.text())
self.hratio = float(self.hiniratioLineEdit.text())
self.Kconst = float(self.aqKlineEdit.text())
self.hydchr = self.Kconst/1000
self.target_row = 101
self.iskip = 4
self.ivtk = 1
self.h_tol = 1e-4
self.model = T2H.main(self.Ma, self.nz, self.nz_fixed, self.inz, self.dx,\
self.dy, self.dat, self.dat_var, self.idat\
, self.rech, self.perm_sed, self.target_row,\
self.Kconst, self.hratio, self.hydchr,\
self.iskip, self.ivtk, self.h_tol)
self.mf = self.model.mf
self.mf.dis.check()
self.mf.write_input()
self.mf.run_model()
return self.mf
def plot(self):
try:
self.mf
except AttributeError:
QMessageBox.about(self.centralWidget, "Warning", "Please run a model first")
else:
self.vcb = self.visComboBox.itemData
print(self.vcb)
if self.vcb == "Cross Section":
figheadxsect, axheadxsect = plt.subplots(figsize=(40,5))
self.mfxsect = PLOT.fmfxsect(self.mf, self.model.mfdis, self.target_row, axheadxsect).mfxsect
self.a = PLOT.head(self.mf, self.model.fdirmodel).a
self.headc = PLOT.headc(self.mfxsect, self.a)
self.headcontour = self.headc.headcontour
self.gdplot = self.mfxsect.plot_grid(color='r', linewidths=0.2)
self.BCplot = self.mfxsect.plot_ibound(self.model.ibound, color_noflow = 'black',\
color_ch = 'blue', head = self.a)
self.canvas.draw()
print("plot")
def fileloader(self):
self.path = os.getcwd() + "/tisc_output/"
self.l = os.listdir(self.path)
self.bdtopo = [0]*len(self.l)
self.topo = [0]*len(self.l)
self.fault = [0]*len(self.l)
self.sedthick = [0]*len(self.l)
for file in range(len(self.l)):
if self.l[file].startswith("bdtopo"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.bdtopo[file] = float(self.l[file][7:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("topo"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.topo[file] = float(self.l[file][5:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("fault"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.fault[file] = float(self.l[file][6:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("sedthick"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.sedthick[file] = float(self.l[file][9:]\
.split("Ma.txt")[0])
self.a = list(filter((0).__ne__, self.topo))
self.a.sort()
self.b = list(filter((0).__ne__, self.bdtopo))
self.b.sort()
self.c = list(filter((0).__ne__, self.fault))
self.c.sort()
self.d = list(filter((0).__ne__, self.sedthick))
self.d.sort()
self.df = []
for nfile in range(len(self.a)):
if self.b.count(self.a[nfile]) == 1:
if self.c.count(self.a[nfile]) == 1:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "y", "y", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "y", "y", "n"]
self.df.append(data)
elif self.c.count(self.a[nfile]) == 0:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "y", "n", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "y", "n", "n"]
self.df.append(data)
elif self.b.count(self.a[nfile]) == 0:
if self.c.count(self.a[nfile]) == 1:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "n", "y", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "n", "y", "n"]
self.df.append(data)
elif self.c.count(self.a[nfile]) == 0:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "n", "n", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "n", "n", "n"]
self.df.append(data)
for age in range(len(self.a)):
if self.df[age][2] == "y" and self.df[age][3] == "y" and self.df[age][4] == "y":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | Faults | Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "y" and self.df[age][4] == "n":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | Faults | No Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "n" and self.df[age][4] == "y":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | No Faults | Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "n" and self.df[age][4] == "n":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | No Faults | No Sediments")
#%%
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "T2H Graphical User Interface"))
self.applyButton.setText(_translate("MainWindow", "Apply"))
self.sedK.setText(_translate("MainWindow", "Sediment K (m/yr)"))
self.aqK.setText(_translate("MainWindow", "Aquifer K (m/yr)"))
self.faultK.setText(_translate("MainWindow", "Fault zone K (m/yr)"))
self.fileDialog_3.setText(_translate("MainWindow", "Apply"))
self.sedKN.setText(_translate("MainWindow", "Sediment K / N:"))
self.sedKNlabel.setText(_translate("MainWindow", "N/A"))
self.aquiferKNlabel.setText(_translate("MainWindow", "N/A"))
self.aqKN.setText(_translate("MainWindow", "Aquifer K / N:"))
self.faultKN.setText(_translate("MainWindow", "Fault K / N:"))
self.faultKNlabel.setText(_translate("MainWindow", "N/A"))
self.label_21.setText(_translate("MainWindow", "Solver"))
self.ModelRunButton.setText(_translate("MainWindow", "Execute"))
self.load.setText(_translate("MainWindow", "Load"))
self.load1.setText(_translate("MainWindow", "Set selected"))
self.QuitButton.setText(_translate("MainWindow", "Abort"))
self.VtkOutputButton.setText(_translate("MainWindow", "VTK output"))
self.PlotButton.setText(_translate("MainWindow", "Plot"))
self.visoptionsLabel.setText(_translate("MainWindow", "Visualization options"))
self.fileLabel.setText(_translate("MainWindow", "File: "))
self.fileLabel_path.setText(_translate("MainWindow", "path"))
self.label.setText(_translate("MainWindow", "*dx = dy = 1,000 m fixed in this version"))
self.nz.setText(_translate("MainWindow", "Number of layers (nz)"))
self.targetperiod.setText(_translate("MainWindow", "Target period (Ma)"))
self.nzfixed.setText(_translate("MainWindow", "Fixed layers (nz_fixed)"))
self.constrecharge.setText(_translate("MainWindow", "Datum of variable dz (m)"))
self.hiniratio.setText(_translate("MainWindow", "Initial head ratio to topo."))
self.elevdependentChecker.setText(_translate("MainWindow", "Elevation-dependent recharge"))
self.datvar.setText(_translate("MainWindow", "Model datum (m)"))
self.constrecharge_2.setText(_translate("MainWindow", "Const. Recharge (m/yr)"))
self.menuT2H_Main.setTitle(_translate("MainWindow", "T2H Main"))
self.menuT2H_Checker.setTitle(_translate("MainWindow", "T2H Checker"))
self.menuT2H_Plot.setTitle(_translate("MainWindow", "T2H Plot"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"matplotlib.backends.qt_compat.QtWidgets.QCheckBox",
"PLOT.fmfxsect",
"matplotlib.backends.qt_compat.QtWidgets.QMenu",
"matplotlib.backends.qt_compat.QtCore.QMetaObject.connectSlotsByName",
"matplotlib.backends.qt_compat.QtWidgets.QMainWindow",
"matplotlib.backends.qt_compat.QtWidgets.QPushButton",
"mat... | [((581, 591), 'matplotlib.backends.qt_compat.is_pyqt5', 'is_pyqt5', ([], {}), '()\n', (589, 591), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((25636, 25668), 'matplotlib.backends.qt_compat.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (25658, 25668), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((25686, 25709), 'matplotlib.backends.qt_compat.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (25707, 25709), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((1072, 1101), 'matplotlib.backends.qt_compat.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (1089, 1101), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((1196, 1232), 'matplotlib.backends.qt_compat.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.centralWidget'], {}), '(self.centralWidget)\n', (1212, 1232), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((1503, 1539), 'matplotlib.backends.qt_compat.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.centralWidget'], {}), '(self.centralWidget)\n', (1519, 1539), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((1803, 1839), 'matplotlib.backends.qt_compat.QtWidgets.QFrame', 'QtWidgets.QFrame', (['self.centralWidget'], {}), '(self.centralWidget)\n', (1819, 1839), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2126, 2156), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.frame_2'], {}), '(self.frame_2)\n', (2142, 2156), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2286, 2316), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.frame_2'], {}), '(self.frame_2)\n', (2302, 2316), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2446, 2476), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.frame_2'], {}), '(self.frame_2)\n', (2462, 2476), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2609, 2645), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (2625, 2645), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2787, 2823), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (2803, 2823), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2984, 3020), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (3000, 3020), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3183, 3219), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (3199, 3219), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3354, 3390), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (3370, 3390), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3539, 3575), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (3555, 3575), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3728, 3758), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.frame_3'], {}), '(self.frame_3)\n', (3744, 3758), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3903, 3939), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (3919, 3939), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4109, 4145), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (4125, 4145), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4300, 4336), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (4316, 4336), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4498, 4534), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (4514, 4534), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4667, 4703), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (4683, 4703), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4830, 4866), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (4846, 4866), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5025, 5061), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (5041, 5061), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5204, 5240), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (5220, 5240), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5407, 5443), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (5423, 5443), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5585, 5621), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (5601, 5621), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5798, 5834), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (5814, 5834), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((6022, 6058), 'matplotlib.backends.qt_compat.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralWidget'], {}), '(self.centralWidget)\n', (6038, 6058), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((6186, 6211), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""logo.png"""'], {}), "('logo.png')\n", (6199, 6211), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6297, 6330), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.frame_2'], {}), '(self.frame_2)\n', (6316, 6330), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((6537, 6570), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.frame_2'], {}), '(self.frame_2)\n', (6556, 6570), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((6775, 6808), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.frame_2'], {}), '(self.frame_2)\n', (6794, 6808), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7020, 7059), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralWidget'], {}), '(self.centralWidget)\n', (7039, 7059), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7240, 7279), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralWidget'], {}), '(self.centralWidget)\n', (7259, 7279), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7457, 7496), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralWidget'], {}), '(self.centralWidget)\n', (7476, 7496), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7692, 7731), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralWidget'], {}), '(self.centralWidget)\n', (7711, 7731), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7966, 8005), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralWidget'], {}), '(self.centralWidget)\n', (7985, 8005), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8193, 8232), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralWidget'], {}), '(self.centralWidget)\n', (8212, 8232), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8442, 8481), 'matplotlib.backends.qt_compat.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralWidget'], {}), '(self.centralWidget)\n', (8461, 8481), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8673, 8714), 'matplotlib.backends.qt_compat.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralWidget'], {}), '(self.centralWidget)\n', (8694, 8714), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8907, 8948), 'matplotlib.backends.qt_compat.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralWidget'], {}), '(self.centralWidget)\n', (8928, 8948), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((9152, 9187), 'matplotlib.backends.qt_compat.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.frame_1'], {}), '(self.frame_1)\n', (9173, 9187), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((9402, 9437), 'matplotlib.backends.qt_compat.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.frame_2'], {}), '(self.frame_2)\n', (9423, 9437), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((9686, 9727), 'matplotlib.backends.qt_compat.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralWidget'], {}), '(self.centralWidget)\n', (9707, 9727), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((9950, 9991), 'matplotlib.backends.qt_compat.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralWidget'], {}), '(self.centralWidget)\n', (9971, 9991), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((10227, 10268), 'matplotlib.backends.qt_compat.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralWidget'], {}), '(self.centralWidget)\n', (10248, 10268), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((10496, 10537), 'matplotlib.backends.qt_compat.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralWidget'], {}), '(self.centralWidget)\n', (10517, 10537), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((10755, 10783), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (10765, 10783), True, 'import matplotlib.pyplot as plt\n'), ((10805, 10830), 'matplotlib.backends.backend_qt4agg.FigureCanvas', 'FigureCanvas', (['self.figure'], {}), '(self.figure)\n', (10817, 10830), False, 'from matplotlib.backends.backend_qt4agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((10896, 10935), 'matplotlib.backends.qt_compat.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.centralWidget'], {}), '(self.centralWidget)\n', (10915, 10935), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((11109, 11142), 'matplotlib.backends.qt_compat.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.frame_3'], {}), '(self.frame_3)\n', (11128, 11142), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((11373, 11412), 'matplotlib.backends.qt_compat.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.centralWidget'], {}), '(self.centralWidget)\n', (11392, 11412), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((11837, 11876), 'matplotlib.backends.qt_compat.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['self.centralWidget'], {}), '(self.centralWidget)\n', (11856, 11876), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((12071, 12103), 'PyQt5.QtWidgets.QProgressBar', 'QProgressBar', (['self.centralWidget'], {}), '(self.centralWidget)\n', (12083, 12103), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QProgressBar, QPushButton, QMessageBox\n'), ((12300, 12330), 'matplotlib.backends.qt_compat.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (12318, 12330), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((12468, 12497), 'matplotlib.backends.qt_compat.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menuBar'], {}), '(self.menuBar)\n', (12483, 12497), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((12585, 12614), 'matplotlib.backends.qt_compat.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menuBar'], {}), '(self.menuBar)\n', (12600, 12614), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((12705, 12734), 'matplotlib.backends.qt_compat.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menuBar'], {}), '(self.menuBar)\n', (12720, 12734), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((12871, 12901), 'matplotlib.backends.qt_compat.QtWidgets.QToolBar', 'QtWidgets.QToolBar', (['MainWindow'], {}), '(MainWindow)\n', (12889, 12901), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((13064, 13096), 'matplotlib.backends.qt_compat.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (13084, 13096), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((13444, 13493), 'matplotlib.backends.qt_compat.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (13481, 13493), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((17173, 17403), 'T2H.main', 'T2H.main', (['self.Ma', 'self.nz', 'self.nz_fixed', 'self.inz', 'self.dx', 'self.dy', 'self.dat', 'self.dat_var', 'self.idat', 'self.rech', 'self.perm_sed', 'self.target_row', 'self.Kconst', 'self.hratio', 'self.hydchr', 'self.iskip', 'self.ivtk', 'self.h_tol'], {}), '(self.Ma, self.nz, self.nz_fixed, self.inz, self.dx, self.dy, self.\n dat, self.dat_var, self.idat, self.rech, self.perm_sed, self.target_row,\n self.Kconst, self.hratio, self.hydchr, self.iskip, self.ivtk, self.h_tol)\n', (17181, 17403), False, 'import T2H, PLOT\n'), ((18782, 18803), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (18792, 18803), False, 'import os\n'), ((1266, 1297), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(810)', '(70)', '(461)', '(201)'], {}), '(810, 70, 461, 201)\n', (1278, 1297), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((1573, 1605), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(810)', '(280)', '(461)', '(101)'], {}), '(810, 280, 461, 101)\n', (1585, 1605), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((1873, 1904), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(810)', '(390)', '(461)', '(31)'], {}), '(810, 390, 461, 31)\n', (1885, 1904), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2187, 2216), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(30)', '(10)', '(141)', '(16)'], {}), '(30, 10, 141, 16)\n', (2199, 2216), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2346, 2375), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(30)', '(40)', '(141)', '(16)'], {}), '(30, 40, 141, 16)\n', (2358, 2375), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2509, 2538), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(30)', '(70)', '(141)', '(16)'], {}), '(30, 70, 141, 16)\n', (2521, 2538), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2677, 2708), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(910)', '(500)', '(141)', '(16)'], {}), '(910, 500, 141, 16)\n', (2689, 2708), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((2860, 2891), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1100)', '(500)', '(61)', '(16)'], {}), '(1100, 500, 61, 16)\n', (2872, 2891), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3061, 3092), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1100)', '(520)', '(61)', '(16)'], {}), '(1100, 520, 61, 16)\n', (3073, 3092), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3250, 3280), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(910)', '(520)', '(81)', '(16)'], {}), '(910, 520, 81, 16)\n', (3262, 3280), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3424, 3454), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(910)', '(540)', '(81)', '(16)'], {}), '(910, 540, 81, 16)\n', (3436, 3454), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3614, 3645), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1100)', '(540)', '(61)', '(16)'], {}), '(1100, 540, 61, 16)\n', (3626, 3645), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3793, 3821), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(10)', '(7)', '(141)', '(16)'], {}), '(10, 7, 141, 16)\n', (3805, 3821), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((3981, 4011), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(20)', '(540)', '(141)', '(16)'], {}), '(20, 540, 141, 16)\n', (3993, 4011), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4181, 4209), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(810)', '(4)', '(60)', '(16)'], {}), '(810, 4, 60, 16)\n', (4193, 4209), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4377, 4406), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(880)', '(4)', '(320)', '(16)'], {}), '(880, 4, 320, 16)\n', (4389, 4406), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4566, 4596), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(814)', '(51)', '(241)', '(16)'], {}), '(814, 51, 241, 16)\n', (4578, 4596), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4732, 4763), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(840)', '(104)', '(141)', '(16)'], {}), '(840, 104, 141, 16)\n', (4744, 4763), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((4905, 4935), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(840)', '(80)', '(151)', '(16)'], {}), '(840, 80, 151, 16)\n', (4917, 4935), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5095, 5126), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(840)', '(128)', '(141)', '(16)'], {}), '(840, 128, 141, 16)\n', (5107, 5126), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5280, 5311), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(840)', '(176)', '(151)', '(16)'], {}), '(840, 176, 151, 16)\n', (5292, 5311), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5479, 5510), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(840)', '(242)', '(151)', '(16)'], {}), '(840, 242, 151, 16)\n', (5491, 5510), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5654, 5685), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(840)', '(152)', '(161)', '(16)'], {}), '(840, 152, 161, 16)\n', (5666, 5685), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((5876, 5907), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(840)', '(200)', '(151)', '(16)'], {}), '(840, 200, 151, 16)\n', (5888, 5907), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((6090, 6120), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(780)', '(520)'], {}), '(10, 10, 780, 520)\n', (6102, 6120), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((6369, 6399), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(260)', '(10)', '(113)', '(21)'], {}), '(260, 10, 113, 21)\n', (6381, 6399), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((6608, 6638), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(260)', '(40)', '(113)', '(21)'], {}), '(260, 40, 113, 21)\n', (6620, 6638), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((6849, 6879), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(260)', '(70)', '(113)', '(21)'], {}), '(260, 70, 113, 21)\n', (6861, 6879), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7093, 7125), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1070)', '(128)', '(113)', '(21)'], {}), '(1070, 128, 113, 21)\n', (7105, 7125), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7312, 7344), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1070)', '(104)', '(113)', '(21)'], {}), '(1070, 104, 113, 21)\n', (7324, 7344), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7530, 7562), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1070)', '(152)', '(113)', '(21)'], {}), '(1070, 152, 113, 21)\n', (7542, 7562), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((7775, 7807), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1070)', '(242)', '(113)', '(21)'], {}), '(1070, 242, 113, 21)\n', (7787, 7807), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8042, 8074), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1070)', '(176)', '(113)', '(21)'], {}), '(1070, 176, 113, 21)\n', (8054, 8074), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8266, 8298), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1070)', '(200)', '(113)', '(21)'], {}), '(1070, 200, 113, 21)\n', (8278, 8298), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8514, 8545), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1070)', '(80)', '(113)', '(21)'], {}), '(1070, 80, 113, 21)\n', (8526, 8545), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8745, 8775), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1100)', '(-1)', '(71)', '(32)'], {}), '(1100, -1, 71, 32)\n', (8757, 8775), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((8980, 9011), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(1170)', '(-1)', '(101)', '(32)'], {}), '(1170, -1, 101, 32)\n', (8992, 9011), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((9225, 9254), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(380)', '(60)', '(81)', '(81)'], {}), '(380, 60, 81, 81)\n', (9237, 9254), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((9476, 9505), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(380)', '(20)', '(81)', '(71)'], {}), '(380, 20, 81, 71)\n', (9488, 9505), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((9768, 9799), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(640)', '(620)', '(113)', '(32)'], {}), '(640, 620, 113, 32)\n', (9780, 9799), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((10028, 10059), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(760)', '(620)', '(113)', '(32)'], {}), '(760, 620, 113, 32)\n', (10040, 10059), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((10310, 10341), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(880)', '(620)', '(113)', '(32)'], {}), '(880, 620, 113, 32)\n', (10322, 10341), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((10574, 10605), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(460)', '(560)', '(113)', '(32)'], {}), '(460, 560, 113, 32)\n', (10586, 10605), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((10969, 10999), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(808)', '(25)', '(461)', '(26)'], {}), '(808, 25, 461, 26)\n', (10981, 10999), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((11178, 11206), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(63)', '(2)', '(281)', '(26)'], {}), '(63, 2, 281, 26)\n', (11190, 11206), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((11450, 11480), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(10)', '(560)', '(441)', '(26)'], {}), '(10, 560, 441, 26)\n', (11462, 11480), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((11923, 11954), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(860)', '(220)', '(231)', '(20)'], {}), '(860, 220, 231, 20)\n', (11935, 11954), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((12364, 12392), 'matplotlib.backends.qt_compat.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(1024)', '(22)'], {}), '(0, 0, 1024, 22)\n', (12376, 12392), False, 'from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\n'), ((15055, 15129), 'PyQt5.QtWidgets.QMessageBox.about', 'QMessageBox.about', (['self.centralWidget', '"""Confirmed"""', '"""Properties confirmed"""'], {}), "(self.centralWidget, 'Confirmed', 'Properties confirmed')\n", (15072, 15129), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QProgressBar, QPushButton, QMessageBox\n'), ((15156, 15232), 'PyQt5.QtWidgets.QMessageBox.about', 'QMessageBox.about', (['self.centralWidget', '"""Check values"""', '"""Check values again!"""'], {}), "(self.centralWidget, 'Check values', 'Check values again!')\n", (15173, 15232), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QProgressBar, QPushButton, QMessageBox\n'), ((16179, 16253), 'PyQt5.QtWidgets.QMessageBox.about', 'QMessageBox.about', (['self.centralWidget', '"""Confirmed"""', '"""Properties confirmed"""'], {}), "(self.centralWidget, 'Confirmed', 'Properties confirmed')\n", (16196, 16253), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QProgressBar, QPushButton, QMessageBox\n'), ((16280, 16356), 'PyQt5.QtWidgets.QMessageBox.about', 'QMessageBox.about', (['self.centralWidget', '"""Check values"""', '"""Check values again!"""'], {}), "(self.centralWidget, 'Check values', 'Check values again!')\n", (16297, 16356), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QProgressBar, QPushButton, QMessageBox\n'), ((18735, 18746), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (18744, 18746), False, 'import os\n'), ((17755, 17831), 'PyQt5.QtWidgets.QMessageBox.about', 'QMessageBox.about', (['self.centralWidget', '"""Warning"""', '"""Please run a model first"""'], {}), "(self.centralWidget, 'Warning', 'Please run a model first')\n", (17772, 17831), False, 'from PyQt5.QtWidgets import QApplication, QDialog, QProgressBar, QPushButton, QMessageBox\n'), ((18011, 18040), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(40, 5)'}), '(figsize=(40, 5))\n', (18023, 18040), True, 'import matplotlib.pyplot as plt\n'), ((18247, 18279), 'PLOT.headc', 'PLOT.headc', (['self.mfxsect', 'self.a'], {}), '(self.mfxsect, self.a)\n', (18257, 18279), False, 'import T2H, PLOT\n'), ((18071, 18141), 'PLOT.fmfxsect', 'PLOT.fmfxsect', (['self.mf', 'self.model.mfdis', 'self.target_row', 'axheadxsect'], {}), '(self.mf, self.model.mfdis, self.target_row, axheadxsect)\n', (18084, 18141), False, 'import T2H, PLOT\n'), ((18175, 18215), 'PLOT.head', 'PLOT.head', (['self.mf', 'self.model.fdirmodel'], {}), '(self.mf, self.model.fdirmodel)\n', (18184, 18215), False, 'import T2H, PLOT\n'), ((19064, 19097), 'os.stat', 'os.stat', (['(self.path + self.l[file])'], {}), '(self.path + self.l[file])\n', (19071, 19097), False, 'import os\n'), ((19317, 19350), 'os.stat', 'os.stat', (['(self.path + self.l[file])'], {}), '(self.path + self.l[file])\n', (19324, 19350), False, 'import os\n'), ((19567, 19600), 'os.stat', 'os.stat', (['(self.path + self.l[file])'], {}), '(self.path + self.l[file])\n', (19574, 19600), False, 'import os\n'), ((19822, 19855), 'os.stat', 'os.stat', (['(self.path + self.l[file])'], {}), '(self.path + self.l[file])\n', (19829, 19855), False, 'import os\n')] |
'''
file: donkey_env.py
author: <NAME>
date: 2018-08-31
'''
import os
from threading import Thread
import numpy as np
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from donkey_gym.envs.donkey_sim import DonkeyUnitySimContoller
from donkey_gym.envs.donkey_proc import DonkeyUnityProcess
class DonkeyEnv(gym.Env):
"""
OpenAI Gym Environment for Donkey
"""
metadata = {
"render.modes": ["human", "rgb_array"],
}
ACTION = ["steer", "throttle"]
def __init__(self, level, time_step=0.05, frame_skip=2):
print("starting DonkeyGym env")
# start Unity simulation subprocess
self.proc = DonkeyUnityProcess()
try:
exe_path = os.environ['DONKEY_SIM_PATH']
except:
print("Missing DONKEY_SIM_PATH environment var. Using defaults")
#you must start the executable on your own
exe_path = "self_start"
try:
port = int(os.environ['DONKEY_SIM_PORT'])
except:
print("Missing DONKEY_SIM_PORT environment var. Using defaults")
port = 9090
try:
headless = os.environ['DONKEY_SIM_HEADLESS']=='1'
except:
print("Missing DONKEY_SIM_HEADLESS environment var. Using defaults")
headless = False
self.proc.start(exe_path, headless=headless, port=port)
# start simulation com
self.viewer = DonkeyUnitySimContoller(level=level, time_step=time_step, port=port)
# steering
# TODO(r7vme): Add throttle
self.action_space = spaces.Box(low=np.array([-1.0]), high=np.array([1.0]))
# camera sensor data
self.observation_space = spaces.Box(0, 255, self.viewer.get_sensor_size(), dtype=np.uint8)
# simulation related variables.
self.seed()
# Frame Skipping
self.frame_skip = frame_skip
# wait until loaded
self.viewer.wait_until_loaded()
def close(self):
self.proc.quit()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
for i in range(self.frame_skip):
self.viewer.take_action(action)
observation, reward, done, info = self.viewer.observe()
return observation, reward, done, info
def reset(self):
self.viewer.reset()
observation, reward, done, info = self.viewer.observe()
return observation
def render(self, mode="human", close=False):
if close:
self.viewer.quit()
return self.viewer.render(mode)
def is_game_over(self):
return self.viewer.is_game_over()
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
class GeneratedRoadsEnv(DonkeyEnv):
def __init__(self):
super(GeneratedRoadsEnv, self).__init__(level=0)
class WarehouseEnv(DonkeyEnv):
def __init__(self):
super(WarehouseEnv, self).__init__(level=1)
class AvcSparkfunEnv(DonkeyEnv):
def __init__(self):
super(AvcSparkfunEnv, self).__init__(level=2)
class GeneratedTrackEnv(DonkeyEnv):
def __init__(self):
super(GeneratedTrackEnv, self).__init__(level=3)
| [
"gym.utils.seeding.np_random",
"donkey_gym.envs.donkey_sim.DonkeyUnitySimContoller",
"numpy.array",
"donkey_gym.envs.donkey_proc.DonkeyUnityProcess"
] | [((705, 725), 'donkey_gym.envs.donkey_proc.DonkeyUnityProcess', 'DonkeyUnityProcess', ([], {}), '()\n', (723, 725), False, 'from donkey_gym.envs.donkey_proc import DonkeyUnityProcess\n'), ((1535, 1603), 'donkey_gym.envs.donkey_sim.DonkeyUnitySimContoller', 'DonkeyUnitySimContoller', ([], {'level': 'level', 'time_step': 'time_step', 'port': 'port'}), '(level=level, time_step=time_step, port=port)\n', (1558, 1603), False, 'from donkey_gym.envs.donkey_sim import DonkeyUnitySimContoller\n'), ((2199, 2222), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (2216, 2222), False, 'from gym.utils import seeding\n'), ((1707, 1723), 'numpy.array', 'np.array', (['[-1.0]'], {}), '([-1.0])\n', (1715, 1723), True, 'import numpy as np\n'), ((1730, 1745), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1738, 1745), True, 'import numpy as np\n')] |
"""
____ _____ _ _ _
| _ \ | __ \ (_) | | |
| |_) |_ _ | |__) |_ _ _ __ _____| |__ _ _| |_ ___
| _ <| | | | | ___/ _` | '__|_ / | '_ \| | | | __/ _ \
| |_) | |_| | | | | (_| | | / /| | |_) | |_| | || __/
|____/ \__, | |_| \__,_|_| /___|_|_.__/ \__, |\__\___|
__/ | __/ |
|___/ |___/
____________________________________
/ Si necesitas ayuda, contáctame en \
\ https://parzibyte.me /
------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
Creado por Parzibyte (https://parzibyte.me).
------------------------------------------------------------------------------------------------
| IMPORTANTE |
Si vas a borrar este encabezado, considera:
Seguirme: https://parzibyte.me/blog/sigueme/
Y compartir mi blog con tus amigos
También tengo canal de YouTube: https://www.youtube.com/channel/UCroP4BTWjfM0CkGB6AFUoBg?sub_confirmation=1
Twitter: https://twitter.com/parzibyte
Facebook: https://facebook.com/parzibyte.fanpage
Instagram: https://instagram.com/parzibyte
Hacer una donación vía PayPal: https://paypal.me/LuisCabreraBenito
------------------------------------------------------------------------------------------------
"""
from flask import Flask, render_template, request, redirect, session, flash
app = Flask(__name__)
"""
Clave secreta. Esta debe ser aleatoria, puedes generarla tú.
Primero instala Python y agrega python a la PATH: https://parzibyte.me/blog/2019/10/08/instalar-python-pip-64-bits-windows/
Luego abre una terminal y ejecuta:
python
Entrarás a la CLI de Python, ahí ejecuta:
import os; print(os.urandom(16));
Eso te dará algo como:
b'\x11\xad\xec\t\x99\x8f\xfa\x86\xe8A\xd9\x1a\xf6\x12Z\xf4'
Simplemente remplaza la clave que se ve a continuación con los bytes aleatorios que generaste
"""
app.secret_key = b'\<KEY>'
"""
Definición de rutas
"""
# Protegida. Solo pueden entrar los que han iniciado sesión
@app.route("/escritorio")
def escritorio():
return render_template("escritorio.html")
# Formulario para iniciar sesión
@app.route("/login")
def login():
return render_template("login.html")
# Manejar login
@app.route("/hacer_login", methods=["POST"])
def hacer_login():
correo = request.form["correo"]
palabra_secreta = request.form["palabra_secreta"]
# Aquí comparamos. Lo hago así de fácil por simplicidad
# En la vida real debería ser con una base de datos y una contraseña <PASSWORD>
if correo == "<EMAIL>" and palabra_secreta == "123":
# Si coincide, iniciamos sesión y además redireccionamos
session["usuario"] = correo
# Aquí puedes colocar más datos. Por ejemplo
# session["nivel"] = "administrador"
return redirect("/escritorio")
else:
# Si NO coincide, lo regresamos
flash("Correo o contraseña incorrectos")
return redirect("/login")
# Cerrar sesión
@app.route("/logout")
def logout():
session.pop("usuario", None)
return redirect("/login")
# Un "middleware" que se ejecuta antes de responder a cualquier ruta. Aquí verificamos si el usuario ha iniciado sesión
@app.before_request
def antes_de_cada_peticion():
ruta = request.path
# Si no ha iniciado sesión y no quiere ir a algo relacionado al login, lo redireccionamos al login
if not 'usuario' in session and ruta != "/login" and ruta != "/hacer_login" and ruta != "/logout" and not ruta.startswith("/static"):
flash("Inicia sesión para continuar")
return redirect("/login")
# Si ya ha iniciado, no hacemos nada, es decir lo dejamos pasar
# Iniciar el servidor
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True)
| [
"flask.render_template",
"flask.flash",
"flask.Flask",
"flask.redirect",
"flask.session.pop"
] | [((1540, 1555), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1545, 1555), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((2218, 2252), 'flask.render_template', 'render_template', (['"""escritorio.html"""'], {}), "('escritorio.html')\n", (2233, 2252), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((2334, 2363), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (2349, 2363), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((3167, 3195), 'flask.session.pop', 'session.pop', (['"""usuario"""', 'None'], {}), "('usuario', None)\n", (3178, 3195), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((3207, 3225), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (3215, 3225), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((2952, 2975), 'flask.redirect', 'redirect', (['"""/escritorio"""'], {}), "('/escritorio')\n", (2960, 2975), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((3034, 3074), 'flask.flash', 'flash', (['"""Correo o contraseña incorrectos"""'], {}), "('Correo o contraseña incorrectos')\n", (3039, 3074), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((3090, 3108), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (3098, 3108), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((3671, 3708), 'flask.flash', 'flash', (['"""Inicia sesión para continuar"""'], {}), "('Inicia sesión para continuar')\n", (3676, 3708), False, 'from flask import Flask, render_template, request, redirect, session, flash\n'), ((3724, 3742), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (3732, 3742), False, 'from flask import Flask, render_template, request, redirect, session, flash\n')] |
import json
import traceback
from db_adapter.exceptions import DatabaseAdapterError
from db_adapter.logger import logger
"""
Source JSON Object would looks like this
e.g.:
{
'model' : 'wrfSE',
'version' : 'v3',
'parameters': { }
}
{
'model' : 'OBS_WATER_LEVEL',
'version' : '',
'parameters': {
"CHANNEL_CELL_MAP" : {
"594" : "Wellawatta", "1547": "Ingurukade", "3255": "Yakbedda", "3730": "Wellampitiya",
"7033": "<NAME>"
}, "FLOOD_PLAIN_CELL_MAP": { }
}
}
"""
def get_source_by_id(pool, id_):
"""
Retrieve source by id
:param pool: database connection pool
:param id_: source id
:return: Source if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT * FROM `source` WHERE `id`=%s"
row_count = cursor.execute(sql_statement, id_)
if row_count > 0:
return cursor.fetchone()
else:
return None
except Exception as exception:
error_message = "Retrieving source with source_id {} failed".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def get_source_id(pool, model, version) -> str:
"""
Retrieve Source id
:param pool: database connection pool
:param model:
:param version:
:return: str: source id if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id` FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
if row_count > 0:
return cursor.fetchone()['id']
else:
return None
except Exception as exception:
error_message = "Retrieving source id: model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def add_source(pool, model, version, parameters=None):
"""
Insert sources into the database
:param pool: database connection pool
:param model: string
:param version: string
:param parameters: JSON
:return: True if the source has been added to the "Source' table of the database, else False
"""
connection = pool.connection()
try:
if get_source_id(pool=pool, model=model, version=version) is None:
with connection.cursor() as cursor:
sql_statement = "INSERT INTO `source` (`model`, `version`, `parameters`) VALUES ( %s, %s, %s)"
row_count = cursor.execute(sql_statement, (model, version, json.dumps(parameters)))
connection.commit()
return True if row_count > 0 else False
else:
logger.info("Source with model={} and version={} already exists in the database".format(model, version))
return False
except Exception as exception:
connection.rollback()
error_message = "Insertion of source: model={}, version={} and parameters={} failed".format(model, version, parameters)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def add_sources(sources, pool):
"""
Add sources into Source table
:param sources: list of json objects that define source attributes
e.g.:
{
'model' : 'wrfSE',
'version' : 'v3',
'parameters': { }
}
{
'model' : 'OBS_WATER_LEVEL',
'version' : '',
'parameters': {
"CHANNEL_CELL_MAP" : {
"594" : "Wellawatta", "1547": "Ingurukade", "3255": "Yakbedda", "3730": "Wellampitiya",
"7033": "<NAME>"
}, "FLOOD_PLAIN_CELL_MAP": { }
}
}
:return:
"""
for source in sources:
print(add_source(pool=pool, model=source.get('model'), version=source.get('version'),
parameters=source.get('parameters')))
print(source.get('model'))
def delete_source(pool, model, version):
"""
Delete source from Source table, given model and version
:param pool: database connection pool
:param model: str
:param version: str
:return: True if the deletion was successful
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "DELETE FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
connection.commit()
if row_count > 0:
return True
else:
logger.info("There's no record of source in the database with model={} and version={}".format(model, version))
return False
except Exception as exception:
connection.rollback()
error_message = "Deleting source with model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def delete_source_by_id(pool, id_):
"""
Delete source from Source table by id
:param pool: database connection pool
:param id_:
:return: True if the deletion was successful, else False
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "DELETE FROM `source` WHERE `id`=%s"
row_count = cursor.execute(sql_statement, id_)
connection.commit()
if row_count > 0 :
return True
else:
logger.info("There's no record of source in the database with the source id {}".format(id_))
return False
except Exception as exception:
connection.rollback()
error_message = "Deleting source with id {} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def get_source_parameters(pool, model, version):
"""
Retrieve Source parameters
:param pool: database connection pool
:param model:
:param version:
:return: str: json object parameters if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `parameters` FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
if row_count > 0:
return cursor.fetchone()['parameters']
else:
return None
except Exception as exception:
error_message = "Retrieving source parameters: model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
| [
"db_adapter.logger.logger.error",
"traceback.print_exc",
"json.dumps"
] | [((1322, 1349), 'db_adapter.logger.logger.error', 'logger.error', (['error_message'], {}), '(error_message)\n', (1334, 1349), False, 'from db_adapter.logger import logger\n'), ((1358, 1379), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1377, 1379), False, 'import traceback\n'), ((2245, 2272), 'db_adapter.logger.logger.error', 'logger.error', (['error_message'], {}), '(error_message)\n', (2257, 2272), False, 'from db_adapter.logger import logger\n'), ((2281, 2302), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2300, 2302), False, 'import traceback\n'), ((3563, 3590), 'db_adapter.logger.logger.error', 'logger.error', (['error_message'], {}), '(error_message)\n', (3575, 3590), False, 'from db_adapter.logger import logger\n'), ((3599, 3620), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3618, 3620), False, 'import traceback\n'), ((5535, 5562), 'db_adapter.logger.logger.error', 'logger.error', (['error_message'], {}), '(error_message)\n', (5547, 5562), False, 'from db_adapter.logger import logger\n'), ((5571, 5592), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5590, 5592), False, 'import traceback\n'), ((6518, 6545), 'db_adapter.logger.logger.error', 'logger.error', (['error_message'], {}), '(error_message)\n', (6530, 6545), False, 'from db_adapter.logger import logger\n'), ((6554, 6575), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6573, 6575), False, 'import traceback\n'), ((7511, 7538), 'db_adapter.logger.logger.error', 'logger.error', (['error_message'], {}), '(error_message)\n', (7523, 7538), False, 'from db_adapter.logger import logger\n'), ((7547, 7568), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7566, 7568), False, 'import traceback\n'), ((3089, 3111), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (3099, 3111), False, 'import json\n')] |
import unittest
from katas.kyu_7.binary_addition import add_binary
class AddBinaryTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(add_binary(1, 1), '10')
def test_equals_2(self):
self.assertEqual(add_binary(0, 1), '1')
def test_equals_3(self):
self.assertEqual(add_binary(1, 0), '1')
def test_equals_4(self):
self.assertEqual(add_binary(2, 2), '100')
def test_equals_5(self):
self.assertEqual(add_binary(51, 12), '111111')
| [
"katas.kyu_7.binary_addition.add_binary"
] | [((166, 182), 'katas.kyu_7.binary_addition.add_binary', 'add_binary', (['(1)', '(1)'], {}), '(1, 1)\n', (176, 182), False, 'from katas.kyu_7.binary_addition import add_binary\n'), ((245, 261), 'katas.kyu_7.binary_addition.add_binary', 'add_binary', (['(0)', '(1)'], {}), '(0, 1)\n', (255, 261), False, 'from katas.kyu_7.binary_addition import add_binary\n'), ((323, 339), 'katas.kyu_7.binary_addition.add_binary', 'add_binary', (['(1)', '(0)'], {}), '(1, 0)\n', (333, 339), False, 'from katas.kyu_7.binary_addition import add_binary\n'), ((401, 417), 'katas.kyu_7.binary_addition.add_binary', 'add_binary', (['(2)', '(2)'], {}), '(2, 2)\n', (411, 417), False, 'from katas.kyu_7.binary_addition import add_binary\n'), ((481, 499), 'katas.kyu_7.binary_addition.add_binary', 'add_binary', (['(51)', '(12)'], {}), '(51, 12)\n', (491, 499), False, 'from katas.kyu_7.binary_addition import add_binary\n')] |
from presentation.models import Author, Follower, Post, Comment
from django.shortcuts import get_object_or_404
from presentation.Serializers.comment_serializer import CommentSerializer
from rest_framework import viewsets, status
from django.http import JsonResponse
from rest_framework.response import Response
import uuid
from urllib.parse import urlparse
from . import urlutil
'''
URL: ://service/author/{author_id}/posts/{post_id}/comments access
GET get comments of the post
POST if you post an object of “type”:”comment”, it will add your comment to the post
'''
def getAuthorIDFromRequestURL(request, id):
host = urlutil.getSafeURL(request.build_absolute_uri())
author_id = f"{host}/author/{id}"
return author_id
def getPostIDFromRequestURL(request, id):
post_id = f"/posts/{id}"
return post_id
def getCommentIDFromRequestURL(request, id):
comment_id = f"/comments/{id}"
return comment_id
class CommentViewSet(viewsets.ModelViewSet):
serializer_class = CommentSerializer
queryset = Comment.objects.all()
# GET a list of comments of the post
def list(self, request, *args, **kwargs):
author_id = getAuthorIDFromRequestURL(
request, self.kwargs['author_id'])
post_id = getPostIDFromRequestURL(
request, self.kwargs['post_id'])
post_id = author_id + post_id
queryset = Comment.objects.filter(post=post_id)
if queryset.exists():
comments = list(queryset.values())
# May have mistakes here, do we need to change comment model?
return JsonResponse(comments, safe=False)
else:
return JsonResponse([], safe=False)
# GET a single comment using comment_id
def retrieve(self, request, *args, **kwargs):
comment_id = request.build_absolute_uri()[:-1]
queryset = Comment.objects.get(id=comment_id)
if queryset.exists():
serializer = CommentSerializer(queryset)
return Response(serializer.data, 200)
else:
return Response({"msg": "No comment for given id"}, 404)
# POST a new comment under a post
def create(self, request, *args, **kwargs):
request_data = request.data.copy()
# assume the id of the commmenter is part of the data
# CHANGE THIS LATER!
commenter_id = request_data.get('author', None)
author_id = getAuthorIDFromRequestURL(
request, self.kwargs['author_id'])
post_id = getPostIDFromRequestURL(
request, self.kwargs['post_id'])
post_id = author_id + post_id
comment = request_data.get('comment', None)
content_type = request_data.get('contentType', None)
# create comment id
cuuid = str(uuid.uuid4().hex)
comment_id = f"{post_id}/comments/{cuuid}"
comment_data = {'type': 'comment', 'author': commenter_id, 'comment': comment, 'contentType': content_type,
'post': post_id, 'id': comment_id}
serializer = self.serializer_class(data=comment_data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, 200)
else:
return Response(serializer.errors,
status=400)
def delete(self, request, *args, **kwargs):
author_id = getAuthorIDFromRequestURL(
request, self.kwargs['author_id'])
post_id = getPostIDFromRequestURL(
request, self.kwargs['post_id'])
post_id = author_id + post_id
comments = get_object_or_404(Comment, post=post_id)
comment_id = getCommentIDFromRequestURL(
request, self.kwargs['comment_id'])
comment_id = post_id + comment_id
comment = get_object_or_404(Comment, id=comment_id)
# Possible mistake?
try:
comment.delete()
except ValueError:
return Response("No such a comment. Deletion fails.", 500)
return Response("Delete successful")
| [
"presentation.models.Comment.objects.filter",
"django.http.JsonResponse",
"presentation.Serializers.comment_serializer.CommentSerializer",
"presentation.models.Comment.objects.get",
"django.shortcuts.get_object_or_404",
"presentation.models.Comment.objects.all",
"uuid.uuid4",
"rest_framework.response.... | [((1033, 1054), 'presentation.models.Comment.objects.all', 'Comment.objects.all', ([], {}), '()\n', (1052, 1054), False, 'from presentation.models import Author, Follower, Post, Comment\n'), ((1382, 1418), 'presentation.models.Comment.objects.filter', 'Comment.objects.filter', ([], {'post': 'post_id'}), '(post=post_id)\n', (1404, 1418), False, 'from presentation.models import Author, Follower, Post, Comment\n'), ((1855, 1889), 'presentation.models.Comment.objects.get', 'Comment.objects.get', ([], {'id': 'comment_id'}), '(id=comment_id)\n', (1874, 1889), False, 'from presentation.models import Author, Follower, Post, Comment\n'), ((3573, 3613), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Comment'], {'post': 'post_id'}), '(Comment, post=post_id)\n', (3590, 3613), False, 'from django.shortcuts import get_object_or_404\n'), ((3771, 3812), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Comment'], {'id': 'comment_id'}), '(Comment, id=comment_id)\n', (3788, 3812), False, 'from django.shortcuts import get_object_or_404\n'), ((3996, 4025), 'rest_framework.response.Response', 'Response', (['"""Delete successful"""'], {}), "('Delete successful')\n", (4004, 4025), False, 'from rest_framework.response import Response\n'), ((1589, 1623), 'django.http.JsonResponse', 'JsonResponse', (['comments'], {'safe': '(False)'}), '(comments, safe=False)\n', (1601, 1623), False, 'from django.http import JsonResponse\n'), ((1657, 1685), 'django.http.JsonResponse', 'JsonResponse', (['[]'], {'safe': '(False)'}), '([], safe=False)\n', (1669, 1685), False, 'from django.http import JsonResponse\n'), ((1945, 1972), 'presentation.Serializers.comment_serializer.CommentSerializer', 'CommentSerializer', (['queryset'], {}), '(queryset)\n', (1962, 1972), False, 'from presentation.Serializers.comment_serializer import CommentSerializer\n'), ((1992, 2022), 'rest_framework.response.Response', 'Response', (['serializer.data', '(200)'], {}), '(serializer.data, 200)\n', (2000, 2022), False, 'from rest_framework.response import Response\n'), ((2056, 2105), 'rest_framework.response.Response', 'Response', (["{'msg': 'No comment for given id'}", '(404)'], {}), "({'msg': 'No comment for given id'}, 404)\n", (2064, 2105), False, 'from rest_framework.response import Response\n'), ((3153, 3183), 'rest_framework.response.Response', 'Response', (['serializer.data', '(200)'], {}), '(serializer.data, 200)\n', (3161, 3183), False, 'from rest_framework.response import Response\n'), ((3217, 3256), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': '(400)'}), '(serializer.errors, status=400)\n', (3225, 3256), False, 'from rest_framework.response import Response\n'), ((2764, 2776), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2774, 2776), False, 'import uuid\n'), ((3929, 3980), 'rest_framework.response.Response', 'Response', (['"""No such a comment. Deletion fails."""', '(500)'], {}), "('No such a comment. Deletion fails.', 500)\n", (3937, 3980), False, 'from rest_framework.response import Response\n')] |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
String functions on Koalas Series
"""
from typing import TYPE_CHECKING
import numpy as np
from pyspark.sql.types import StringType, BinaryType, BooleanType
from databricks.koalas.base import _wrap_accessor_pandas
if TYPE_CHECKING:
import databricks.koalas as ks
class StringMethods(object):
"""String methods for Koalas Series"""
def __init__(self, series: 'ks.Series'):
if not isinstance(series.spark_type, (StringType, BinaryType)):
raise ValueError(
"Cannot call StringMethods on type {}"
.format(series.spark_type))
self._data = series
self.name = self._data.name
# Methods
def capitalize(self) -> 'ks.Series':
"""
Convert Strings in the series to be capitalized.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.capitalize(),
StringType()
).alias(self.name)
def lower(self) -> 'ks.Series':
"""
Convert strings in the Series/Index to all lowercase.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.lower(),
StringType()
).alias(self.name)
def upper(self) -> 'ks.Series':
"""
Convert strings in the Series/Index to all uppercase.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.upper(),
StringType()
).alias(self.name)
def swapcase(self) -> 'ks.Series':
"""
Convert strings in the Series/Index to be swapcased.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.swapcase(),
StringType()
).alias(self.name)
def startswith(self, pattern, na=np.NaN) -> 'ks.Series':
"""
Test if the start of each string element matches a pattern.
Equivalent to :func:`str.startswith`.
Parameters
----------
pattern : str
Character sequence. Regular expressions are not accepted.
na : object, defulat NaN
Object shown if element is not a string.
Returns
-------
Series of bool
Koalas Series of booleans indicating whether the given pattern
matches the start of each string element.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.startswith(pattern, na),
BooleanType()
).alias(self.name)
def endswith(self, pattern, na=np.NaN) -> 'ks.Series':
"""
Test if the end of each string element matches a pattern.
Equivalent to :func:`str.endswith`.
Parameters
----------
pattern : str
Character sequence. Regular expressions are not accepted.
na : object, defulat NaN
Object shown if element is not a string.
Returns
-------
Series of bool
Koalas Series of booleans indicating whether the given pattern
matches the end of each string element.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.endswith(pattern, na),
BooleanType()
).alias(self.name)
def strip(self, to_strip=None) -> 'ks.Series':
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from left and
right sides. Equivalent to :func:`str.strip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of str
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.strip(to_strip),
StringType()
).alias(self.name)
def lstrip(self, to_strip=None) -> 'ks.Series':
"""
Remove leading characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from left side.
Equivalent to :func:`str.lstrip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of str
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.lstrip(to_strip),
StringType()
).alias(self.name)
def rstrip(self, to_strip=None) -> 'ks.Series':
"""
Remove trailing characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from right side.
Equivalent to :func:`str.rstrip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of str
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.rstrip(to_strip),
StringType()
).alias(self.name)
def get(self, i) -> 'ks.Series':
"""
Extract element from each string in the Series/Index at the
specified position.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series of objects
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.get(i),
StringType()
).alias(self.name)
def isalnum(self) -> 'ks.Series':
"""
Check whether all characters in each string are alphanumeric.
This is equivalent to running the Python string method
:func:`str.isalnum` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isalnum(),
BooleanType()
).alias(self.name)
def isalpha(self) -> 'ks.Series':
"""
Check whether all characters in each string are alphabetic.
This is equivalent to running the Python string method
:func:`str.isalpha` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isalpha(),
BooleanType()
).alias(self.name)
def isdigit(self) -> 'ks.Series':
"""
Check whether all characters in each string are digits.
This is equivalent to running the Python string method
:func:`str.isdigit` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isdigit(),
BooleanType()
).alias(self.name)
def isspace(self) -> 'ks.Series':
"""
Check whether all characters in each string are whitespaces.
This is equivalent to running the Python string method
:func:`str.isspace` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isspace(),
BooleanType()
).alias(self.name)
def islower(self) -> 'ks.Series':
"""
Check whether all characters in each string are lowercase.
This is equivalent to running the Python string method
:func:`str.islower` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.islower(),
BooleanType()
).alias(self.name)
def isupper(self) -> 'ks.Series':
"""
Check whether all characters in each string are uppercase.
This is equivalent to running the Python string method
:func:`str.isupper` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isupper(),
BooleanType()
).alias(self.name)
def istitle(self) -> 'ks.Series':
"""
Check whether all characters in each string are titlecase.
This is equivalent to running the Python string method
:func:`str.istitle` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.istitle(),
BooleanType()
).alias(self.name)
def isnumeric(self) -> 'ks.Series':
"""
Check whether all characters in each string are numeric.
This is equivalent to running the Python string method
:func:`str.isnumeric` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isnumeric(),
BooleanType()
).alias(self.name)
def isdecimal(self) -> 'ks.Series':
"""
Check whether all characters in each string are decimals.
This is equivalent to running the Python string method
:func:`str.isdecimal` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isdecimal(),
BooleanType()
).alias(self.name)
| [
"pyspark.sql.types.StringType",
"pyspark.sql.types.BooleanType"
] | [((1494, 1506), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1504, 1506), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((1762, 1774), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (1772, 1774), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((2030, 2042), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2040, 2042), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((2303, 2315), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (2313, 2315), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((3067, 3080), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (3078, 3080), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((3822, 3835), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (3833, 3835), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((4594, 4606), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (4604, 4606), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((5344, 5356), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (5354, 5356), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((6096, 6108), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (6106, 6108), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((6556, 6568), 'pyspark.sql.types.StringType', 'StringType', ([], {}), '()\n', (6566, 6568), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((7041, 7054), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (7052, 7054), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((7525, 7538), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (7536, 7538), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((8005, 8018), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (8016, 8018), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((8490, 8503), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (8501, 8503), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((8973, 8986), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (8984, 8986), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((9456, 9469), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (9467, 9469), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((9939, 9952), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (9950, 9952), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((10426, 10439), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (10437, 10439), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n'), ((10914, 10927), 'pyspark.sql.types.BooleanType', 'BooleanType', ([], {}), '()\n', (10925, 10927), False, 'from pyspark.sql.types import StringType, BinaryType, BooleanType\n')] |
from src import most_invoices
EXPECTED_RESULT = (14, 'Berlin')
def test_most_invoices() -> None:
tested_result = most_invoices.get_city_with_most_invoices()
assert tested_result == EXPECTED_RESULT
| [
"src.most_invoices.get_city_with_most_invoices"
] | [((120, 163), 'src.most_invoices.get_city_with_most_invoices', 'most_invoices.get_city_with_most_invoices', ([], {}), '()\n', (161, 163), False, 'from src import most_invoices\n')] |
from __future__ import annotations
import csv
import pathlib
import textwrap
from operator import itemgetter
from typing import Any
from typing import Callable
from typing import Dict
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
from typing import cast
import openpyxl
import xlsxwriter
from lark import ParseError
from spinta import commands
from spinta import spyna
from spinta.backends import Backend
from spinta.backends.components import BackendOrigin
from spinta.components import Context
from spinta.datasets.components import Resource
from spinta.dimensions.comments.components import Comment
from spinta.dimensions.enum.components import EnumItem
from spinta.components import Model
from spinta.components import Namespace
from spinta.components import Property
from spinta.core.enums import Access
from spinta.core.ufuncs import unparse
from spinta.datasets.components import Dataset
from spinta.dimensions.enum.components import Enums
from spinta.dimensions.lang.components import LangData
from spinta.dimensions.prefix.components import UriPrefix
from spinta.exceptions import MultipleErrors
from spinta.exceptions import PropertyNotFound
from spinta.manifests.components import Manifest
from spinta.manifests.helpers import load_manifest_nodes
from spinta.manifests.tabular.components import ACCESS
from spinta.manifests.tabular.components import BackendRow
from spinta.manifests.tabular.components import BaseRow
from spinta.manifests.tabular.components import CommentData
from spinta.manifests.tabular.components import DESCRIPTION
from spinta.manifests.tabular.components import DatasetRow
from spinta.manifests.tabular.components import ParamRow
from spinta.manifests.tabular.components import EnumRow
from spinta.manifests.tabular.components import ID
from spinta.manifests.tabular.components import MANIFEST_COLUMNS
from spinta.manifests.tabular.components import ManifestColumn
from spinta.manifests.tabular.components import ManifestRow
from spinta.manifests.tabular.components import ManifestTableRow
from spinta.manifests.tabular.components import ModelRow
from spinta.manifests.tabular.components import PREPARE
from spinta.manifests.tabular.components import PROPERTY
from spinta.manifests.tabular.components import PrefixRow
from spinta.manifests.tabular.components import PropertyRow
from spinta.manifests.tabular.components import REF
from spinta.manifests.tabular.components import ResourceRow
from spinta.manifests.tabular.components import SOURCE
from spinta.manifests.tabular.components import TITLE
from spinta.manifests.tabular.components import TabularFormat
from spinta.manifests.tabular.constants import DATASET
from spinta.manifests.tabular.formats.gsheets import read_gsheets_manifest
from spinta.spyna import SpynaAST
from spinta.types.datatype import Ref
from spinta.utils.data import take
from spinta.utils.schema import NA
from spinta.utils.schema import NotAvailable
ParsedRow = Tuple[int, Dict[str, Any]]
MAIN_DIMENSIONS = [
'dataset',
'resource',
'base',
'model',
'property',
]
EXTRA_DIMENSIONS = [
'',
'prefix',
'enum',
'param',
'comment',
'ns',
'lang',
]
class TabularManifestError(Exception):
pass
def _detect_header(
path: Optional[str],
line: int, # Line number
row: Iterable[str],
) -> List[str]:
header = [h.strip().lower() for h in row]
unknown_columns = set(header[:len(DATASET)]) - set(DATASET)
if unknown_columns:
unknown_columns = ', '.join(sorted(unknown_columns, key=header.index))
raise TabularManifestError(
f"{path}:{line}: Unknown columns: {unknown_columns}."
)
return header
def _detect_dimension(
path: Optional[pathlib.Path],
line: str, # Line number with a prefix (depends on manifest format)
row: Dict[str, str],
) -> Optional[str]:
dimensions = [k for k in MAIN_DIMENSIONS if row[k]]
if len(dimensions) == 1:
return dimensions[0]
if len(dimensions) > 1:
dimensions = ', '.join(dimensions)
raise TabularManifestError(
f"{path}:{line}: In one row only single dimension can be used, "
f"but found more than one: {dimensions}"
)
if row['type']:
if row['type'] not in EXTRA_DIMENSIONS:
raise TabularManifestError(
f"{path}:{line}:type: Unknown additional dimension name "
f"{row['type']}."
)
return row['type']
return ''
def _parse_spyna(
reader: TabularReader,
formula: str,
) -> Union[SpynaAST, NotAvailable, None]:
if formula:
try:
return spyna.parse(formula)
except ParseError as e:
reader.error(f"Error while parsing formula {formula!r}:\n{e}")
return NA
class TabularReader:
state: State
path: str
line: str
type: str
name: str
data: ManifestRow # Used when `appendable` is False
rows: List[Dict[str, Any]] # Used when `appendable` is True
appendable: bool = False # Tells if reader is appendable.
def __init__(
self,
state: State,
path: str,
line: str,
):
self.state = state
self.path = path
self.line = line
self.data = {}
self.rows = []
def __str__(self):
return f"<{type(self).__name__} name={self.name!r}>"
def read(self, row: Dict[str, str]) -> None:
raise NotImplementedError
def append(self, row: Dict[str, str]) -> None:
if any(row.values()):
self.error(
f"Updates are not supported in context of {self.type!r}."
)
def release(self, reader: TabularReader = None) -> bool:
raise NotImplementedError
def items(self) -> Iterator[ParsedRow]:
if self.appendable:
for data in self.rows:
yield self.line, data
else:
yield self.line, self.data
def enter(self) -> None:
raise NotImplementedError
def leave(self) -> None:
raise NotImplementedError
def error(self, message: str) -> None:
raise TabularManifestError(f"{self.path}:{self.line}: {message}")
class ManifestReader(TabularReader):
type: str = 'manifest'
datasets: Set[str]
namespaces: Set[str]
data: ManifestTableRow
def read(self, row: ManifestRow) -> None:
self.name = str(self.path)
self.data = {
'type': 'manifest',
}
def release(self, reader: TabularReader = None) -> bool:
return reader is None
def enter(self) -> None:
self.datasets = set()
self.namespaces = set()
self.state.manifest = self
def leave(self) -> None:
self.state.manifest = None
class DatasetReader(TabularReader):
type: str = 'dataset'
data: DatasetRow
def read(self, row: Dict[str, str]) -> None:
self.name = row['dataset']
if row['dataset'] in self.state.manifest.datasets:
self.error("Dataset already defined.")
self.data = {
'type': 'dataset',
'id': row['id'],
'name': row['dataset'],
'level': row['level'],
'access': row['access'],
'title': row['title'],
'description': row['description'],
'resources': {},
}
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
))
def enter(self) -> None:
self.state.dataset = self
def leave(self) -> None:
self.state.dataset = None
class ResourceReader(TabularReader):
type: str = 'resource'
data: Union[BackendRow, ResourceRow]
def read(self, row: Dict[str, str]) -> None:
self.name = row['resource']
if self.state.dataset is None:
self.read_backend(row)
else:
self.read_resource(row)
def read_backend(self, row: Dict[str, str]) -> None:
# Backends will be loaded using
# `spinta.manifests.helpers._load_manifest_backends`.
if 'backends' not in self.state.manifest.data:
self.state.manifest.data['backends'] = {}
backends = self.state.manifest.data['backends']
if self.name in backends:
self.error(
f"Backend {self.name!r} with the same name already defined."
)
self.data = {
'type': row['type'],
'name': self.name,
'dsn': row['source'],
'title': row['title'],
'description': row['description'],
}
backends[self.name] = self.data
def read_resource(self, row: Dict[str, str]) -> None:
dataset = self.state.dataset.data
if self.name in dataset['resources']:
self.error("Resource with the same name already defined in ")
self.data = {
'type': row['type'],
'backend': row['ref'],
'external': row['source'],
'prepare': _parse_spyna(self, row[PREPARE]),
'level': row['level'],
'access': row['access'],
'title': row['title'],
'description': row['description'],
}
dataset['resources'][self.name] = self.data
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
ResourceReader,
))
def enter(self) -> None:
self.state.resource = self
def leave(self) -> None:
self.state.resource = None
class BaseReader(TabularReader):
type: str = 'base'
data: BaseRow
def read(self, row: Dict[str, str]) -> None:
self.name = row['base']
dataset = self.state.dataset.data if self.state.dataset else None
self.data = {
'model': get_relative_model_name(dataset, row['base']),
'pk': row['ref'],
}
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
ResourceReader,
BaseReader,
))
def enter(self) -> None:
self.state.base = self
def leave(self) -> None:
self.state.base = None
class ModelReader(TabularReader):
type: str = 'model'
data: ModelRow
def read(self, row: Dict[str, str]) -> None:
dataset = self.state.dataset
resource = self.state.resource
base = self.state.base
name = get_relative_model_name(
dataset.data if dataset else None,
row['model'],
)
if self.state.rename_duplicates:
dup = 1
_name = name
while _name in self.state.models:
_name = f'{name}_{dup}'
dup += 1
name = _name
elif name in self.state.models:
self.error(f"Model {name!r} with the same name is already defined.")
self.name = name
self.data = {
'type': 'model',
'id': row['id'],
'name': name,
'base': base.name if base else None,
'level': row['level'],
'access': row['access'],
'title': row['title'],
'description': row['description'],
'properties': {},
'external': {
'dataset': dataset.name if dataset else '',
'resource': resource.name if dataset and resource else '',
'pk': (
[x.strip() for x in row['ref'].split(',')]
if row['ref'] else []
),
'name': row['source'],
'prepare': _parse_spyna(self, row[PREPARE]),
},
}
if resource and not dataset:
self.data['backend'] = resource.name
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
ResourceReader,
BaseReader,
ModelReader,
))
def enter(self) -> None:
self.state.model = self
self.state.models.add(self.name)
def leave(self) -> None:
self.state.model = None
def _parse_property_ref(ref: str) -> Tuple[str, List[str]]:
if '[' in ref:
ref = ref.rstrip(']')
ref_model, ref_props = ref.split('[', 1)
ref_props = [p.strip() for p in ref_props.split(',')]
else:
ref_model = ref
ref_props = []
return ref_model, ref_props
class PropertyReader(TabularReader):
type: str = 'property'
data: PropertyRow
enums: Set[str]
def read(self, row: Dict[str, str]) -> None:
self.name = row['property']
if self.state.model is None:
context = self.state.stack[-1]
self.error(
f"Property {self.name!r} must be defined in a model context. "
f"Now it is defined in {context.name!r} {context.type} context."
)
if row['property'] in self.state.model.data['properties']:
self.error(
f"Property {self.name!r} with the same name is already "
f"defined for this {self.state.model.name!r} model."
)
self.data = {
'type': row['type'],
'prepare': _parse_spyna(self, row[PREPARE]),
'level': row['level'],
'access': row['access'],
'uri': row['uri'],
'title': row['title'],
'description': row['description'],
}
dataset = self.state.dataset.data if self.state.dataset else None
if row['ref']:
if row['type'] in ('ref', 'backref', 'generic'):
ref_model, ref_props = _parse_property_ref(row['ref'])
self.data['model'] = get_relative_model_name(dataset, ref_model)
self.data['refprops'] = ref_props
else:
# TODO: Detect if ref is a unit or an enum.
self.data['enum'] = row['ref']
if dataset or row['source']:
self.data['external'] = {
'name': row['source'],
'prepare': self.data.pop('prepare'),
}
self.state.model.data['properties'][row['property']] = self.data
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
ResourceReader,
BaseReader,
ModelReader,
PropertyReader,
))
def enter(self) -> None:
self.state.prop = self
def leave(self) -> None:
self.state.prop = None
class AppendReader(TabularReader):
type: str = 'append'
data: ManifestRow
def read(self, row: ManifestRow) -> None:
self.name = row[REF]
self.data = row
def release(self, reader: TabularReader = None) -> bool:
return True
def enter(self) -> None:
pass
def leave(self) -> None:
self.state.stack[-1].append(self.data)
class PrefixReader(TabularReader):
type: str = 'prefix'
data: PrefixRow
def read(self, row: Dict[str, str]) -> None:
if not row['ref']:
# `ref` is a required parameter.
return
self.name = row['ref']
node = (
self.state.prop or
self.state.model or
self.state.base or
self.state.resource or
self.state.dataset or
self.state.manifest
)
if 'prefixes' not in node.data:
node.data['prefixes'] = {}
prefixes = node.data['prefixes']
if self.name in prefixes:
self.error(
f"Prefix {self.name!r} with the same name is already "
f"defined for this {node.name!r} {node.type}."
)
self.data = {
'id': row['id'],
'eid': f'{self.path}:{self.line}',
'type': self.type,
'name': self.name,
'uri': row['uri'],
'title': row['title'],
'description': row['description'],
}
prefixes[self.name] = self.data
def append(self, row: Dict[str, str]) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, AppendReader)
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class NamespaceReader(TabularReader):
type: str = 'ns'
appendable: bool = True
def read(self, row: Dict[str, str]) -> None:
if not row['ref']:
# `ref` is a required parameter.
return
self.name = row['ref']
manifest = self.state.manifest
if self.name in manifest.namespaces:
self.error(
f"Namespace {self.name!r} with the same name is already "
f"defined."
)
manifest.namespaces.add(self.name)
self.rows.append({
'id': row['id'],
'type': self.type,
'name': self.name,
'title': row['title'],
'description': row['description'],
})
def append(self, row: Dict[str, str]) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, AppendReader)
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class ParamReader(TabularReader):
type: str = 'param'
data: ParamRow
name: str = None
def _get_node(self) -> TabularReader:
return (
self.state.prop or
self.state.model or
self.state.base or
self.state.resource or
self.state.dataset or
self.state.manifest
)
def _get_data(self, name: str, row: ManifestRow):
return {
'name': name,
'source': [row[SOURCE]],
'prepare': [_parse_spyna(self, row[PREPARE])],
'title': row[TITLE],
'description': row[DESCRIPTION],
}
def _ensure_params_list(self, node: TabularReader, name: str) -> None:
if 'params' not in node.data:
node.data['params'] = {}
if name not in node.data['params']:
node.data['params'][name] = []
def _check_param_name(self, node: TabularReader, name: str) -> None:
if 'params' in node.data and name in node.data['params']:
self.error(
f"Parameter {name!r} with the same name already defined!"
)
def read(self, row: ManifestRow) -> None:
node = self._get_node()
self.name = row[REF]
if not self.name:
self.error("Parameter must have a name.")
self._check_param_name(node, self.name)
self._ensure_params_list(node, self.name)
self.data = self._get_data(self.name, row)
node.data['params'][self.name].append(self.data)
def append(self, row: ManifestRow) -> None:
node = self._get_node()
if row[REF]:
self.name = row[REF]
self._check_param_name(node, self.name)
self._ensure_params_list(node, self.name)
self.data = self._get_data(self.name, row)
node.data['params'][self.name].append(self.data)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, (AppendReader, LangReader))
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class EnumReader(TabularReader):
type: str = 'enum'
data: EnumRow
name: str = None
def read(self, row: ManifestRow) -> None:
if row[REF]:
self.name = row[REF]
else:
self.name = self.name or ''
if not any([
row[SOURCE],
row[PREPARE],
row[ACCESS],
row[TITLE],
row[DESCRIPTION],
]):
return
# source = row[SOURCE] if row[SOURCE] is not None else row[PREPARE]
source = str(row[SOURCE]) or row[PREPARE]
if not source:
self.error(
"At least source or prepare must be specified for an enum."
)
self.data = {
'name': self.name,
'source': row[SOURCE],
'prepare': _parse_spyna(self, row[PREPARE]),
'access': row[ACCESS],
'title': row[TITLE],
'description': row[DESCRIPTION],
}
node = (
self.state.prop or
self.state.model or
self.state.base or
self.state.resource or
self.state.dataset or
self.state.manifest
)
if 'enums' not in node.data:
node.data['enums'] = {}
if self.name not in node.data['enums']:
node.data['enums'][self.name] = {}
enum = node.data['enums'][self.name]
if source in enum:
self.error(
f"Enum {self.name!r} item {source!r} with the same value is "
f"already defined."
)
enum[source] = self.data
def append(self, row: ManifestRow) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, (AppendReader, LangReader))
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class LangReader(TabularReader):
type: str = 'lang'
def read(self, row: ManifestRow) -> None:
reader = self.state.stack[-1]
if not isinstance(reader, (
DatasetReader,
ResourceReader,
BaseReader,
ModelReader,
PropertyReader,
EnumReader,
)):
self.error(f'Language metadata is not supported on {reader.type}.')
return
if 'lang' not in reader.data:
reader.data['lang'] = {}
lang = reader.data['lang']
self.name = row[REF]
if self.name in lang:
self.error(
f"Language {self.name!r} with the same name is already "
f"defined for this {reader.name!r} {reader.type}."
)
lang[self.name] = {
'id': row[ID],
'eid': f'{self.path}:{self.line}',
'type': self.type,
'ref': self.name,
'title': row[TITLE],
'description': row[DESCRIPTION],
}
def append(self, row: ManifestRow) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, AppendReader)
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class CommentReader(TabularReader):
type: str = 'comment'
data: CommentData
def read(self, row: ManifestRow) -> None:
reader = self.state.stack[-1]
if 'comments' not in reader.data:
reader.data['comments'] = []
comments = reader.data['comments']
comments.append({
'id': row[ID],
'parent': row[REF],
'author': row[SOURCE],
'access': row[ACCESS],
# TODO: parse datetime
'created': row[TITLE],
'comment': row[DESCRIPTION],
})
def append(self, row: ManifestRow) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, AppendReader)
def enter(self) -> None:
pass
def leave(self) -> None:
pass
READERS = {
# Main dimensions
'dataset': DatasetReader,
'resource': ResourceReader,
'base': BaseReader,
'model': ModelReader,
'property': PropertyReader,
# Extra dimensions
'': AppendReader,
'prefix': PrefixReader,
'ns': NamespaceReader,
'param': ParamReader,
'enum': EnumReader,
'lang': LangReader,
'comment': CommentReader,
}
class State:
stack: List[TabularReader]
backends: Dict[str, Dict[str, str]] = None
models: Set[str]
manifest: ManifestReader = None
dataset: DatasetReader = None
resource: ResourceReader = None
base: BaseReader = None
model: ModelReader = None
prop: PropertyReader = None
rename_duplicates: bool = False
def __init__(self):
self.stack = []
self.models = set()
def release(self, reader: TabularReader = None) -> Iterator[ParsedRow]:
for parent in list(reversed(self.stack)):
if parent.release(reader):
if isinstance(parent, (
ManifestReader,
NamespaceReader,
DatasetReader,
ModelReader,
)):
yield from parent.items()
self.stack.pop()
parent.leave()
else:
break
if reader:
reader.enter()
self.stack.append(reader)
def _read_tabular_manifest_rows(
path: Optional[str],
rows: Iterator[Tuple[str, List[str]]],
*,
rename_duplicates: bool = True,
) -> Iterator[ParsedRow]:
_, header = next(rows, (None, None))
if header is None:
# Looks like an empty file.
return
header = _detect_header(path, 1, header)
defaults = {k: '' for k in MANIFEST_COLUMNS}
state = State()
state.rename_duplicates = rename_duplicates
reader = ManifestReader(state, path, '1')
reader.read({})
yield from state.release(reader)
for line, row in rows:
row = dict(zip(header, row))
row = {**defaults, **row}
dimension = _detect_dimension(path, line, row)
Reader = READERS[dimension]
reader = Reader(state, path, line)
reader.read(row)
yield from state.release(reader)
yield from state.release()
def read_tabular_manifest(
format_: TabularFormat = None,
*,
path: str = None,
file: IO = None,
rename_duplicates: bool = False,
) -> Iterator[ParsedRow]:
if format_ == TabularFormat.GSHEETS:
rows = read_gsheets_manifest(path)
elif format_ == TabularFormat.CSV:
rows = _read_csv_manifest(path, file)
elif format_ == TabularFormat.ASCII:
rows = _read_txt_manifest(path, file)
elif format_ == TabularFormat.XLSX:
rows = _read_xlsx_manifest(path)
else:
raise ValueError(f"Unknown tabular manifest format {format_!r}.")
yield from _read_tabular_manifest_rows(
path,
rows,
rename_duplicates=rename_duplicates,
)
def _read_txt_manifest(
path: str,
file: IO[str] = None,
) -> Iterator[Tuple[str, List[str]]]:
if file:
yield from _read_ascii_tabular_manifest(file)
else:
with pathlib.Path(path).open(encoding='utf-8-sig') as f:
yield from _read_ascii_tabular_manifest(f)
def _read_csv_manifest(
path: str,
file: IO[str] = None,
) -> Iterator[Tuple[str, List[str]]]:
if file:
rows = csv.reader(file)
for i, row in enumerate(rows, 1):
yield str(i), row
else:
with pathlib.Path(path).open(encoding='utf-8-sig') as f:
rows = csv.reader(f)
for i, row in enumerate(rows, 1):
yield str(i), row
def _read_xlsx_manifest(path: str) -> Iterator[Tuple[str, List[str]]]:
wb = openpyxl.load_workbook(path)
yield '1', DATASET
for sheet in wb:
rows = sheet.iter_rows(values_only=True)
cols = next(rows, None)
if cols is None:
continue
cols = normalizes_columns(cols)
cols = [cols.index(c) if c in cols else None for c in DATASET]
for i, row in enumerate(rows, 2):
row = [row[c] if c is not None else None for c in cols]
yield f'{sheet.title}:{i}', row
def striptable(table):
return textwrap.dedent(table).strip()
def _join_escapes(row: List[str]) -> List[str]:
res = []
for v in row:
if res and res[-1] and res[-1].endswith('\\'):
res[-1] = res[-1][:-1] + '|' + v
else:
res.append(v)
return res
def _read_ascii_tabular_manifest(
lines: Iterable[str],
*,
check_column_names: bool = True,
) -> Iterator[Tuple[str, List[str]]]:
lines = (line.strip() for line in lines)
lines = filter(None, lines)
# Read header
header = next(lines, None)
if header is None:
return
header = normalizes_columns(
header.split('|'),
check_column_names=check_column_names,
)
yield '1', header
# Find index where dimension columns end.
dim = sum(1 for h in header if h in DATASET[:6])
for i, line in enumerate(lines, 2):
row = _join_escapes(line.split('|'))
row = [x.strip() for x in row]
row = row[:len(header)]
rem = len(header) - len(row)
row = row[:dim - rem] + [''] * rem + row[dim - rem:]
assert len(header) == len(row), line
yield str(i), row
def read_ascii_tabular_rows(
manifest: str,
*,
strip: bool = False,
check_column_names: bool = True,
) -> Iterator[List[str]]:
if strip:
manifest = striptable(manifest)
rows = _read_ascii_tabular_manifest(
manifest.splitlines(),
check_column_names=check_column_names,
)
for line, row in rows:
yield row
def read_ascii_tabular_manifest(
manifest: str,
*,
strip: bool = False,
rename_duplicates: bool = False,
) -> Iterator[ParsedRow]:
if strip:
manifest = striptable(manifest)
rows = _read_ascii_tabular_manifest(manifest.splitlines())
yield from _read_tabular_manifest_rows(
None,
rows,
rename_duplicates=rename_duplicates,
)
def load_ascii_tabular_manifest(
context: Context,
manifest: Manifest,
manifest_ascii_table: str,
*,
strip: bool = False,
) -> None:
schemas = read_ascii_tabular_manifest(manifest_ascii_table, strip=strip)
load_manifest_nodes(context, manifest, schemas)
commands.link(context, manifest)
def get_relative_model_name(dataset: dict, name: str) -> str:
if name.startswith('/'):
return name[1:]
elif dataset is None:
return name
else:
return '/'.join([
dataset['name'],
name,
])
def to_relative_model_name(model: Model, dataset: Dataset = None) -> str:
"""Convert absolute model `name` to relative."""
if dataset is None:
return model.name
if model.name.startswith(dataset.name):
prefix = dataset.name
return model.name[len(prefix) + 1:]
else:
return '/' + model.name
def tabular_eid(model: Model):
if isinstance(model.eid, int):
return model.eid
else:
return 0
class OrderBy(NamedTuple):
func: Callable[[Union[Dataset, Model, Property, EnumItem]], Any]
reverse: bool = False
def _order_datasets_by_access(dataset: Dataset):
return dataset.access or Access.private
def _order_datasets_by_name(dataset: Dataset):
return dataset.name
DATASETS_ORDER_BY = {
'access': OrderBy(_order_datasets_by_access, reverse=True),
'default': OrderBy(_order_datasets_by_name),
}
def _order_models_by_access(model: Model):
return model.access or Access.private
MODELS_ORDER_BY = {
'access': OrderBy(_order_models_by_access, reverse=True),
'default': OrderBy(tabular_eid),
}
def _order_properties_by_access(prop: Property):
return prop.access or Access.private
PROPERTIES_ORDER_BY = {
'access': OrderBy(_order_properties_by_access, reverse=True),
}
T = TypeVar('T', Dataset, Model, Property, EnumItem)
def sort(
ordering: Dict[str, OrderBy],
items: Iterable[T],
order_by: Optional[str],
) -> Iterable[T]:
order: Optional[OrderBy] = None
if order_by:
order = ordering[order_by]
elif 'default' in ordering:
order = ordering['default']
if order:
return sorted(items, key=order.func, reverse=order.reverse)
else:
return items
def _prefixes_to_tabular(
prefixes: Dict[str, UriPrefix],
*,
separator: bool = False,
) -> Iterator[ManifestRow]:
first = True
for name, prefix in prefixes.items():
yield torow(DATASET, {
'id': prefix.id,
'type': prefix.type if first else '',
'ref': name,
'uri': prefix.uri,
'title': prefix.title,
'description': prefix.description,
})
first = False
if separator and prefixes:
yield torow(DATASET, {})
def _backends_to_tabular(
backends: Dict[str, Backend],
*,
separator: bool = False,
) -> Iterator[ManifestRow]:
for name, backend in backends.items():
yield torow(DATASET, {
'type': backend.type,
'resource': name,
'source': backend.config.get('dsn'),
})
if separator and backends:
yield torow(DATASET, {})
def _namespaces_to_tabular(
namespaces: Dict[str, Namespace],
*,
separator: bool = False,
) -> Iterator[ManifestRow]:
namespaces = {
k: ns
for k, ns in namespaces.items() if not ns.generated
}
first = True
for name, ns in namespaces.items():
yield torow(DATASET, {
'type': ns.type if first else '',
'ref': name,
'title': ns.title,
'description': ns.description,
})
first = False
if separator and namespaces:
yield torow(DATASET, {})
def _order_enums_by_access(item: EnumItem):
return item.access or Access.private
ENUMS_ORDER_BY = {
'access': OrderBy(_order_enums_by_access, reverse=True),
}
def _enums_to_tabular(
enums: Optional[Enums],
*,
external: bool = True,
access: Access = Access.private,
order_by: ManifestColumn = None,
separator: bool = False,
) -> Iterator[ManifestRow]:
if enums is None:
return
for name, enum in enums.items():
first = True
items = sort(ENUMS_ORDER_BY, enum.values(), order_by)
for item in items:
if item.access is not None and item.access < access:
continue
yield torow(DATASET, {
'type': 'enum' if first else '',
'ref': name if first else '',
'source': item.source if external else '',
'prepare': unparse(item.prepare),
'access': item.given.access,
'title': item.title,
'description': item.description,
})
if lang := list(_lang_to_tabular(item.lang)):
first = True
yield from lang
else:
first = False
if separator and enums:
yield torow(DATASET, {})
def _lang_to_tabular(
lang: Optional[LangData],
) -> Iterator[ManifestRow]:
if lang is None:
return
first = True
for name, data in sorted(lang.items(), key=itemgetter(0)):
yield torow(DATASET, {
'type': 'lang' if first else '',
'ref': name if first else '',
'title': data['title'],
'description': data['description'],
})
first = False
def _comments_to_tabular(
comments: Optional[List[Comment]],
*,
access: Access = Access.private,
) -> Iterator[ManifestRow]:
if comments is None:
return
first = True
for comment in comments:
if comment.access < access:
return
yield torow(DATASET, {
'id': comment.id,
'type': 'comment' if first else '',
'ref': comment.parent,
'source': comment.author,
'access': comment.given.access,
'title': comment.created,
'description': comment.comment,
})
first = False
def _dataset_to_tabular(
dataset: Dataset,
*,
external: bool = True,
access: Access = Access.private,
order_by: ManifestColumn = None,
) -> Iterator[ManifestRow]:
yield torow(DATASET, {
'id': dataset.id,
'dataset': dataset.name,
'level': dataset.level,
'access': dataset.given.access,
'title': dataset.title,
'description': dataset.description,
})
yield from _lang_to_tabular(dataset.lang)
yield from _prefixes_to_tabular(dataset.prefixes, separator=True)
yield from _enums_to_tabular(
dataset.ns.enums,
external=external,
access=access,
order_by=order_by,
)
def _resource_to_tabular(
resource: Resource,
*,
external: bool = True,
) -> Iterator[ManifestRow]:
backend = resource.backend
yield torow(DATASET, {
'resource': resource.name,
'source': resource.external if external else '',
'prepare': unparse(resource.prepare or NA) if external else '',
'type': resource.type,
'ref': (
backend.name
if (
external and
backend and
backend.origin != BackendOrigin.resource
)
else ''
),
'level': resource.level,
'access': resource.given.access,
'title': resource.title,
'description': resource.description,
})
yield from _lang_to_tabular(resource.lang)
def _property_to_tabular(
prop: Property,
*,
external: bool = True,
access: Access = Access.private,
order_by: ManifestColumn = None,
) -> Iterator[ManifestRow]:
if prop.name.startswith('_'):
return
if prop.access < access:
return
data = {
'property': prop.place,
'type': prop.dtype.name,
'level': prop.level,
'access': prop.given.access,
'uri': prop.uri,
'title': prop.title,
'description': prop.description,
}
if external and prop.external:
if isinstance(prop.external, list):
# data['source'] = ', '.join(x.name for x in prop.external)
# data['prepare'] = ', '.join(
# unparse(x.prepare or NA)
# for x in prop.external if x.prepare
# )
raise DeprecationWarning(
"Source can't be a list, use prepare instead."
)
elif prop.external:
data['source'] = prop.external.name
data['prepare'] = unparse(prop.external.prepare or NA)
if isinstance(prop.dtype, Ref):
model = prop.model
if model.external and model.external.dataset:
data['ref'] = to_relative_model_name(
prop.dtype.model,
model.external.dataset,
)
pkeys = prop.dtype.model.external.pkeys
rkeys = prop.dtype.refprops
if rkeys and pkeys != rkeys:
rkeys = ', '.join([p.place for p in rkeys])
data['ref'] += f'[{rkeys}]'
else:
data['ref'] = prop.dtype.model.name
elif prop.enum is not None:
data['ref'] = prop.given.enum
elif prop.unit is not None:
data['ref'] = prop.given.unit
yield torow(DATASET, data)
yield from _comments_to_tabular(prop.comments, access=access)
yield from _lang_to_tabular(prop.lang)
yield from _enums_to_tabular(
prop.enums,
external=external,
access=access,
order_by=order_by,
)
def _model_to_tabular(
model: Model,
*,
external: bool = True,
access: Access = Access.private,
order_by: ManifestColumn = None,
) -> Iterator[ManifestRow]:
data = {
'id': model.id,
'model': model.name,
'level': model.level,
'access': model.given.access,
'title': model.title,
'description': model.description,
}
if model.external and model.external.dataset:
data['model'] = to_relative_model_name(
model,
model.external.dataset,
)
if external and model.external:
data.update({
'source': model.external.name,
'prepare': unparse(model.external.prepare or NA),
})
if (
not model.external.unknown_primary_key and
all(p.access >= access for p in model.external.pkeys)
):
# Add `ref` only if all properties are available in the
# resulting manifest.
data['ref'] = ', '.join([
p.name for p in model.external.pkeys
])
yield torow(DATASET, data)
yield from _comments_to_tabular(model.comments, access=access)
yield from _lang_to_tabular(model.lang)
props = sort(PROPERTIES_ORDER_BY, model.properties.values(), order_by)
for prop in props:
yield from _property_to_tabular(
prop,
external=external,
access=access,
order_by=order_by,
)
def datasets_to_tabular(
manifest: Manifest,
*,
external: bool = True, # clean content of source and prepare
access: Access = Access.private,
internal: bool = False, # internal models with _ prefix like _txn
order_by: ManifestColumn = None,
) -> Iterator[ManifestRow]:
yield from _prefixes_to_tabular(manifest.prefixes, separator=True)
yield from _backends_to_tabular(manifest.backends, separator=True)
yield from _namespaces_to_tabular(manifest.namespaces, separator=True)
yield from _enums_to_tabular(
manifest.enums,
external=external,
access=access,
order_by=order_by,
separator=True,
)
seen_datasets = set()
dataset = None
resource = None
models = manifest.models if internal else take(manifest.models)
models = sort(MODELS_ORDER_BY, models.values(), order_by)
separator = False
for model in models:
if model.access < access:
continue
if model.external:
if dataset is None or dataset.name != model.external.dataset.name:
dataset = model.external.dataset
if dataset:
seen_datasets.add(dataset.name)
resource = None
separator = True
yield from _dataset_to_tabular(
dataset,
external=external,
access=access,
order_by=order_by,
)
if external and model.external and model.external.resource and (
resource is None or
resource.name != model.external.resource.name
):
resource = model.external.resource
if resource:
separator = True
yield from _resource_to_tabular(resource, external=external)
if separator:
yield torow(DATASET, {})
else:
separator = False
yield from _model_to_tabular(
model,
external=external,
access=access,
order_by=order_by,
)
datasets = sort(DATASETS_ORDER_BY, manifest.datasets.values(), order_by)
for dataset in datasets:
if dataset.name in seen_datasets:
continue
yield from _dataset_to_tabular(
dataset,
external=external,
access=access,
order_by=order_by,
)
for resource in dataset.resources.values():
yield from _resource_to_tabular(resource)
def torow(keys, values) -> ManifestRow:
return {k: values.get(k) for k in keys}
def render_tabular_manifest(
manifest: Manifest,
cols: List[ManifestColumn] = None,
*,
sizes: Dict[ManifestColumn, int] = None,
) -> str:
rows = datasets_to_tabular(manifest)
return render_tabular_manifest_rows(rows, cols, sizes=sizes)
def render_tabular_manifest_rows(
rows: Iterable[ManifestRow],
cols: List[ManifestColumn] = None,
*,
sizes: Dict[ManifestColumn, int] = None,
) -> str:
cols = cols or MANIFEST_COLUMNS
hs = 1 if ID in cols else 0 # hierarchical cols start
he = cols.index(PROPERTY) # hierarchical cols end
hsize = 1 # hierarchical column size
bsize = 3 # border size
if sizes is None:
sizes = dict(
[(c, len(c)) for c in cols[:hs]] +
[(c, 1) for c in cols[hs:he]] +
[(c, len(c)) for c in cols[he:]]
)
rows = list(rows)
for row in rows:
for i, col in enumerate(cols):
val = '' if row[col] is None else str(row[col])
if col == ID:
sizes[col] = 2
elif i < he:
size = (hsize + bsize) * (he - hs - i) + sizes[PROPERTY]
if size < len(val):
sizes[PROPERTY] += len(val) - size
elif sizes[col] < len(val):
sizes[col] = len(val)
line = []
for col in cols:
size = sizes[col]
line.append(col[:size].ljust(size))
lines = [line]
for row in rows:
if ID in cols:
line = [row[ID][:2] if row[ID] else ' ']
else:
line = []
for i, col in enumerate(cols[hs:he + 1]):
val = row[col] or ''
if val:
depth = i
break
else:
val = ''
depth = 0
line += [' ' * hsize] * depth
size = (hsize + bsize) * (he - hs - depth) + sizes[PROPERTY]
line += [val.ljust(size)]
for col in cols[he + 1:]:
val = '' if row[col] is None else str(row[col])
val = val.replace('|', '\\|')
size = sizes[col]
line.append(val.ljust(size))
lines.append(line)
lines = [' | '.join(line) for line in lines]
lines = [l.rstrip() for l in lines]
return '\n'.join(lines)
SHORT_NAMES = {
'd': 'dataset',
'r': 'resource',
'b': 'base',
'm': 'model',
'p': 'property',
't': 'type',
}
def normalizes_columns(
cols: List[str],
*,
check_column_names: bool = True,
) -> List[ManifestColumn]:
result: List[ManifestColumn] = []
unknown: List[str] = []
invalid: List[str] = []
for col in cols:
col = col or ''
col = col.strip().lower()
col = SHORT_NAMES.get(col, col)
col = cast(ManifestColumn, col)
if col not in MANIFEST_COLUMNS:
unknown.append(col)
else:
if unknown:
result += unknown
invalid += unknown
unknown = []
result.append(col)
if check_column_names and invalid:
if len(invalid) == 1:
raise PropertyNotFound(property=invalid[0])
else:
raise MultipleErrors(
PropertyNotFound(property=col) for col in invalid
)
return result
def write_tabular_manifest(
path: str,
rows: Union[
Manifest,
Iterable[ManifestRow],
None,
] = None,
cols: List[ManifestColumn] = None,
) -> None:
cols = cols or DATASET
if rows is None:
rows = []
elif isinstance(rows, Manifest):
rows = datasets_to_tabular(rows)
rows = ({c: row[c] for c in cols} for row in rows)
if path.endswith('.csv'):
_write_csv(pathlib.Path(path), rows, cols)
elif path.endswith('.xlsx'):
_write_xlsx(pathlib.Path(path), rows, cols)
else:
raise ValueError(f"Unknown tabular manifest format {path!r}.")
def _write_csv(
path: pathlib.Path,
rows: Iterator[ManifestRow],
cols: List[ManifestColumn],
) -> None:
with path.open('w') as f:
writer = csv.DictWriter(f, fieldnames=cols)
writer.writeheader()
writer.writerows(rows)
def _write_xlsx(
path: pathlib.Path,
rows: Iterator[ManifestRow],
cols: List[ManifestColumn],
) -> None:
workbook = xlsxwriter.Workbook(path, {
'strings_to_formulas': False,
'strings_to_urls': False,
})
bold = workbook.add_format({'bold': True})
formats = {
'id': workbook.add_format({
'align': 'right',
'valign': 'top',
}),
'dataset': workbook.add_format({
'bold': True,
'valign': 'top',
'font_color': '#127622',
}),
'resource': workbook.add_format({
'valign': 'top',
}),
'base': workbook.add_format({
'valign': 'top',
}),
'model': workbook.add_format({
'bold': True,
'valign': 'top',
'font_color': '#127622',
}),
'property': workbook.add_format({
'valign': 'top',
'font_color': '#127622',
}),
'type': workbook.add_format({
'valign': 'top',
}),
'ref': workbook.add_format({
'valign': 'top',
'font_color': '#127622',
}),
'source': workbook.add_format({
'valign': 'top',
'font_color': '#c9211e',
}),
'prepare': workbook.add_format({
'valign': 'top',
'font_color': '#c9211e',
}),
'level': workbook.add_format({
'valign': 'top',
}),
'access': workbook.add_format({
'valign': 'top',
}),
'uri': workbook.add_format({
'valign': 'top',
'font_color': '#284f80',
}),
'title': workbook.add_format({
'valign': 'top',
'text_wrap': True,
}),
'description': workbook.add_format({
'valign': 'top',
'text_wrap': True,
}),
}
sheet = workbook.add_worksheet()
sheet.freeze_panes(1, 0) # Freeze the first row.
sheet.set_column('A:E', 2) # id, d, r, b, m
sheet.set_column('F:F', 20) # property
sheet.set_column('I:J', 20) # source, prepare
sheet.set_column('N:N', 20) # title
sheet.set_column('O:O', 30) # description
for j, col in enumerate(cols):
sheet.write(0, j, col, bold)
for i, row in enumerate(rows, 1):
for j, col in enumerate(cols):
val = row[col]
fmt = formats.get(col)
sheet.write(i, j, val, fmt)
workbook.close()
| [
"csv.DictWriter",
"textwrap.dedent",
"spinta.manifests.helpers.load_manifest_nodes",
"spinta.core.ufuncs.unparse",
"spinta.commands.link",
"openpyxl.load_workbook",
"spinta.manifests.tabular.formats.gsheets.read_gsheets_manifest",
"pathlib.Path",
"typing.cast",
"spinta.utils.data.take",
"spinta.... | [((32209, 32257), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'Dataset', 'Model', 'Property', 'EnumItem'], {}), "('T', Dataset, Model, Property, EnumItem)\n", (32216, 32257), False, 'from typing import TypeVar\n'), ((27941, 27969), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['path'], {}), '(path)\n', (27963, 27969), False, 'import openpyxl\n'), ((30575, 30622), 'spinta.manifests.helpers.load_manifest_nodes', 'load_manifest_nodes', (['context', 'manifest', 'schemas'], {}), '(context, manifest, schemas)\n', (30594, 30622), False, 'from spinta.manifests.helpers import load_manifest_nodes\n'), ((30627, 30659), 'spinta.commands.link', 'commands.link', (['context', 'manifest'], {}), '(context, manifest)\n', (30640, 30659), False, 'from spinta import commands\n'), ((48585, 48672), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['path', "{'strings_to_formulas': False, 'strings_to_urls': False}"], {}), "(path, {'strings_to_formulas': False, 'strings_to_urls':\n False})\n", (48604, 48672), False, 'import xlsxwriter\n'), ((26658, 26685), 'spinta.manifests.tabular.formats.gsheets.read_gsheets_manifest', 'read_gsheets_manifest', (['path'], {}), '(path)\n', (26679, 26685), False, 'from spinta.manifests.tabular.formats.gsheets import read_gsheets_manifest\n'), ((27582, 27598), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (27592, 27598), False, 'import csv\n'), ((42274, 42295), 'spinta.utils.data.take', 'take', (['manifest.models'], {}), '(manifest.models)\n', (42278, 42295), False, 'from spinta.utils.data import take\n'), ((47017, 47042), 'typing.cast', 'cast', (['ManifestColumn', 'col'], {}), '(ManifestColumn, col)\n', (47021, 47042), False, 'from typing import cast\n'), ((48356, 48390), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'cols'}), '(f, fieldnames=cols)\n', (48370, 48390), False, 'import csv\n'), ((4850, 4870), 'spinta.spyna.parse', 'spyna.parse', (['formula'], {}), '(formula)\n', (4861, 4870), False, 'from spinta import spyna\n'), ((27765, 27778), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (27775, 27778), False, 'import csv\n'), ((28445, 28467), 'textwrap.dedent', 'textwrap.dedent', (['table'], {}), '(table)\n', (28460, 28467), False, 'import textwrap\n'), ((35591, 35604), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (35601, 35604), False, 'from operator import itemgetter\n'), ((47369, 47406), 'spinta.exceptions.PropertyNotFound', 'PropertyNotFound', ([], {'property': 'invalid[0]'}), '(property=invalid[0])\n', (47385, 47406), False, 'from spinta.exceptions import PropertyNotFound\n'), ((47993, 48011), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (48005, 48011), False, 'import pathlib\n'), ((38992, 39028), 'spinta.core.ufuncs.unparse', 'unparse', (['(prop.external.prepare or NA)'], {}), '(prop.external.prepare or NA)\n', (38999, 39028), False, 'from spinta.core.ufuncs import unparse\n'), ((40680, 40717), 'spinta.core.ufuncs.unparse', 'unparse', (['(model.external.prepare or NA)'], {}), '(model.external.prepare or NA)\n', (40687, 40717), False, 'from spinta.core.ufuncs import unparse\n'), ((48078, 48096), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (48090, 48096), False, 'import pathlib\n'), ((27342, 27360), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (27354, 27360), False, 'import pathlib\n'), ((27694, 27712), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (27706, 27712), False, 'import pathlib\n'), ((37431, 37462), 'spinta.core.ufuncs.unparse', 'unparse', (['(resource.prepare or NA)'], {}), '(resource.prepare or NA)\n', (37438, 37462), False, 'from spinta.core.ufuncs import unparse\n'), ((47471, 47501), 'spinta.exceptions.PropertyNotFound', 'PropertyNotFound', ([], {'property': 'col'}), '(property=col)\n', (47487, 47501), False, 'from spinta.exceptions import PropertyNotFound\n'), ((35011, 35032), 'spinta.core.ufuncs.unparse', 'unparse', (['item.prepare'], {}), '(item.prepare)\n', (35018, 35032), False, 'from spinta.core.ufuncs import unparse\n')] |
import glob, os
import subprocess
from difflib import context_diff
class TestRunner(object):
def __init__(self, context):
self.context = context
self.error_count = 0
self.test_count = 0
self.success_count = 0
def run(self):
os.getcwd()
os.chdir(self.context.repo_dir)
search = os.path.join(self.context.testcasedir, '*.in')
problem_files = glob.glob(search)
if len(problem_files) == 0:
self.warn("No problem files found. Does directory '" +
self.context.testcasedir + "' exist?")
return
file = self.context.runscript
if not os.path.isfile(file):
self.error("Could not find file '{}' to run the program.".format(file))
return
for problem_file in problem_files:
self.test_count += 1
output = self.get_output(problem_file)
expected_output = self.get_expected_output(problem_file)
if self.compare(problem_file, output, expected_output):
self.success_count += 1
else:
self.error_count += 1
def compare(self, problem_file, output, expected_output):
diff_iterator = context_diff(output, expected_output,
fromfile='program output', tofile='expected')
diff = ""
for char in diff_iterator:
diff += char
if len(diff) == 0:
self.info("Testing '" + problem_file + "'. Result: output CORRECT")
return True
else:
self.error("Testing '" + problem_file + "'. Result: output DIFFERENT")
self.error(" Expected:")
for line in expected_output.split('\n'):
self.error(" " + line)
self.error(" Actual:")
for line in output.split('\n'):
self.error(" " + line)
return False
def info(self, message):
self.context.logger.info(message)
def warn(self, message):
self.context.logger.warn(message)
def error(self, message):
self.context.logger.error(message)
def get_expected_output(self, problem_file):
expected_output_file = problem_file[:-2] + 'out'
with open(expected_output_file, 'r') as file:
return file.read()
def get_output(self, problem_file):
runscript = os.path.join(self.context.repo_dir, self.context.runscript)
out = ''
err = ''
try:
with open(problem_file, 'r') as input:
p = subprocess.Popen(runscript,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin = input)
out, err = p.communicate()
p.wait()
if len(err) > 0:
self.warn('Stderr is outputting text:')
for line in err.split('\n'):
self.warn(line)
except Exception as e:
self.error('Caught unexpected error: ' + str(e))
return out
| [
"subprocess.Popen",
"os.path.join",
"os.getcwd",
"os.chdir",
"os.path.isfile",
"difflib.context_diff",
"glob.glob"
] | [((274, 285), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (283, 285), False, 'import glob, os\n'), ((294, 325), 'os.chdir', 'os.chdir', (['self.context.repo_dir'], {}), '(self.context.repo_dir)\n', (302, 325), False, 'import glob, os\n'), ((344, 390), 'os.path.join', 'os.path.join', (['self.context.testcasedir', '"""*.in"""'], {}), "(self.context.testcasedir, '*.in')\n", (356, 390), False, 'import glob, os\n'), ((415, 432), 'glob.glob', 'glob.glob', (['search'], {}), '(search)\n', (424, 432), False, 'import glob, os\n'), ((1242, 1330), 'difflib.context_diff', 'context_diff', (['output', 'expected_output'], {'fromfile': '"""program output"""', 'tofile': '"""expected"""'}), "(output, expected_output, fromfile='program output', tofile=\n 'expected')\n", (1254, 1330), False, 'from difflib import context_diff\n'), ((2401, 2460), 'os.path.join', 'os.path.join', (['self.context.repo_dir', 'self.context.runscript'], {}), '(self.context.repo_dir, self.context.runscript)\n', (2413, 2460), False, 'import glob, os\n'), ((669, 689), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (683, 689), False, 'import glob, os\n'), ((2579, 2671), 'subprocess.Popen', 'subprocess.Popen', (['runscript'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'stdin': 'input'}), '(runscript, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n stdin=input)\n', (2595, 2671), False, 'import subprocess\n')] |
# coding=utf-8
import optparse
import torch
import time
import pickle
from torch.autograd import Variable
from loader import *
from utils import *
# python -m visdom.server
optparser = optparse.OptionParser()
optparser.add_option(
"-t", "--test", default="data/eng.testb",
help="Test set location"
)
optparser.add_option(
'--score', default='evaluation/temp/score.txt',
help='score file location'
)
optparser.add_option(
"-g", '--use_gpu', default='1',
type='int', help='whether or not to ues gpu'
)
optparser.add_option(
'--loss', default='loss.txt',
help='loss file location'
)
optparser.add_option(
'--model_path', default='models/test',
help='model path'
)
optparser.add_option(
'--map_path', default='models/mapping.pkl',
help='model path'
)
optparser.add_option(
'--char_mode', choices=['CNN', 'LSTM'], default='CNN',
help='char_CNN or char_LSTM'
)
opts = optparser.parse_args()[0]
mapping_file = opts.map_path
with open(mapping_file, 'rb') as f:
mappings = pickle.load(f)
word_to_id = mappings['word_to_id']
tag_to_id = mappings['tag_to_id']
id_to_tag = {k[1]: k[0] for k in tag_to_id.items()}
char_to_id = mappings['char_to_id']
parameters = mappings['parameters']
word_embeds = mappings['word_embeds']
use_gpu = opts.use_gpu == 1 and torch.cuda.is_available()
assert os.path.isfile(opts.test)
assert parameters['tag_scheme'] in ['iob', 'iobes']
if not os.path.isfile(eval_script):
raise Exception('CoNLL evaluation script not found at "%s"' % eval_script)
if not os.path.exists(eval_temp):
os.makedirs(eval_temp)
lower = parameters['lower']
zeros = parameters['zeros']
tag_scheme = parameters['tag_scheme']
test_sentences = load_sentences(opts.test, lower, zeros)
update_tag_scheme(test_sentences, tag_scheme)
test_data = prepare_dataset(
test_sentences, word_to_id, char_to_id, tag_to_id, lower
)
model = torch.load(opts.model_path)
model_name = opts.model_path.split('/')[-1].split('.')[0]
if use_gpu:
model.cuda()
model.eval()
def eval(model, datas):
prediction = []
confusion_matrix = torch.zeros((len(tag_to_id) - 2, len(tag_to_id) - 2))
for data in datas:
ground_truth_id = data['tags']
words = data['str_words']
chars2 = data['chars']
caps = data['caps']
if parameters['char_mode'] == 'LSTM':
chars2_sorted = sorted(chars2, key=lambda p: len(p), reverse=True)
d = {}
for i, ci in enumerate(chars2):
for j, cj in enumerate(chars2_sorted):
if ci == cj and not j in d and not i in d.values():
d[j] = i
continue
chars2_length = [len(c) for c in chars2_sorted]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_sorted), char_maxl), dtype='int')
for i, c in enumerate(chars2_sorted):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
if parameters['char_mode'] == 'CNN':
d = {}
chars2_length = [len(c) for c in chars2]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_length), char_maxl), dtype='int')
for i, c in enumerate(chars2):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
dwords = Variable(torch.LongTensor(data['words']))
dcaps = Variable(torch.LongTensor(caps))
if use_gpu:
val, out = model(dwords.cuda(), chars2_mask.cuda(), dcaps.cuda(),chars2_length, d)
else:
val, out = model(dwords, chars2_mask, dcaps, chars2_length, d)
predicted_id = out
for (word, true_id, pred_id) in zip(words, ground_truth_id, predicted_id):
line = ' '.join([word, id_to_tag[true_id], id_to_tag[pred_id]])
prediction.append(line)
confusion_matrix[true_id, pred_id] += 1
prediction.append('')
predf = eval_temp + '/pred.' + model_name
scoref = eval_temp + '/score.' + model_name
with open(predf, 'w') as f:
f.write('\n'.join(prediction))
os.system('%s < %s > %s' % (eval_script, predf, scoref))
with open(scoref, 'r') as f:
for l in f.readlines():
print(l.strip())
print(("{: >2}{: >7}{: >7}%s{: >9}" % ("{: >7}" * confusion_matrix.size(0))).format(
"ID", "NE", "Total",
*([id_to_tag[i] for i in range(confusion_matrix.size(0))] + ["Percent"])
))
for i in range(confusion_matrix.size(0)):
print(("{: >2}{: >7}{: >7}%s{: >9}" % ("{: >7}" * confusion_matrix.size(0))).format(
str(i), id_to_tag[i], str(confusion_matrix[i].sum().item()),
*([confusion_matrix[i][j] for j in range(confusion_matrix.size(0))] +
["%.3f" % (confusion_matrix[i][i] * 100. / max(1, confusion_matrix[i].sum()))])
))
t = time.time()
eval(model, test_data)
print(time.time() - t) | [
"torch.LongTensor",
"torch.load",
"pickle.load",
"optparse.OptionParser",
"torch.cuda.is_available",
"time.time"
] | [((189, 212), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (210, 212), False, 'import optparse\n'), ((1903, 1930), 'torch.load', 'torch.load', (['opts.model_path'], {}), '(opts.model_path)\n', (1913, 1930), False, 'import torch\n'), ((5002, 5013), 'time.time', 'time.time', ([], {}), '()\n', (5011, 5013), False, 'import time\n'), ((1031, 1045), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1042, 1045), False, 'import pickle\n'), ((1313, 1338), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1336, 1338), False, 'import torch\n'), ((5043, 5054), 'time.time', 'time.time', ([], {}), '()\n', (5052, 5054), False, 'import time\n'), ((3478, 3509), 'torch.LongTensor', 'torch.LongTensor', (["data['words']"], {}), "(data['words'])\n", (3494, 3509), False, 'import torch\n'), ((3536, 3558), 'torch.LongTensor', 'torch.LongTensor', (['caps'], {}), '(caps)\n', (3552, 3558), False, 'import torch\n'), ((3015, 3044), 'torch.LongTensor', 'torch.LongTensor', (['chars2_mask'], {}), '(chars2_mask)\n', (3031, 3044), False, 'import torch\n'), ((3420, 3449), 'torch.LongTensor', 'torch.LongTensor', (['chars2_mask'], {}), '(chars2_mask)\n', (3436, 3449), False, 'import torch\n')] |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
def get_version(filename):
from re import findall
with open(filename) as f:
metadata = dict(findall(r"__([a-z]+)__ = '([^']+)'", f.read()))
return metadata['version']
project = 'carreralib'
copyright = '2015-2017 <NAME>'
version = get_version(b'../carreralib/__init__.py')
release = version
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.todo'
]
exclude_patterns = ['_build']
master_doc = 'index'
html_theme = 'default'
| [
"os.path.abspath"
] | [((41, 62), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (56, 62), False, 'import os\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: sherlock
@contact: <EMAIL>
"""
import logging
import os
import sys
sys.path.append('.')
from fastreid.config import get_cfg
from projects.InterpretationReID.interpretationreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from fastreid.utils.checkpoint import Checkpointer
from projects.InterpretationReID.interpretationreid.evaluation import ReidEvaluator
import projects.InterpretationReID.interpretationreid as PII
from fastreid.utils.logger import setup_logger
class Trainer(DefaultTrainer):
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`fastreid.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
logger = logging.getLogger(__name__)
logger.info("Prepare training set")
return PII.add_build_reid_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`fastreid.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return PII.add_build_reid_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, num_query, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return ReidEvaluator(cfg, num_query)
def setup(args):
"""
Create configs_old and perform basic setups.
"""
cfg = get_cfg()
PII.add_interpretation_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
model = Trainer.build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.INTERPRETATION.PRETRAIN_MODEL:
#print("trainer.load_n_or_not()")
trainer.load_n_or_not()
#print("load success")
#print(trainer.model)
#for p in trainer.model.backbone_1.parameters():
#p.requires_grad=False
#for p in trainer.model.backbone_2.parameters():
#p.requires_grad=False
#for p in trainer.model.heads.parameters(): #.module
#p.requires_grad=False
#print("trainer.train()")
#print(cfg)
#print(trainer._hooks)
#setup_logger()
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| [
"logging.getLogger",
"projects.InterpretationReID.interpretationreid.add_build_reid_test_loader",
"fastreid.utils.checkpoint.Checkpointer",
"projects.InterpretationReID.interpretationreid.add_build_reid_train_loader",
"projects.InterpretationReID.interpretationreid.add_interpretation_config",
"os.path.joi... | [((123, 143), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (138, 143), False, 'import sys\n'), ((1635, 1644), 'fastreid.config.get_cfg', 'get_cfg', ([], {}), '()\n', (1642, 1644), False, 'from fastreid.config import get_cfg\n'), ((1649, 1683), 'projects.InterpretationReID.interpretationreid.add_interpretation_config', 'PII.add_interpretation_config', (['cfg'], {}), '(cfg)\n', (1678, 1683), True, 'import projects.InterpretationReID.interpretationreid as PII\n'), ((1782, 1806), 'projects.InterpretationReID.interpretationreid.engine.default_setup', 'default_setup', (['cfg', 'args'], {}), '(cfg, args)\n', (1795, 1806), False, 'from projects.InterpretationReID.interpretationreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((2875, 3009), 'projects.InterpretationReID.interpretationreid.engine.launch', 'launch', (['main', 'args.num_gpus'], {'num_machines': 'args.num_machines', 'machine_rank': 'args.machine_rank', 'dist_url': 'args.dist_url', 'args': '(args,)'}), '(main, args.num_gpus, num_machines=args.num_machines, machine_rank=\n args.machine_rank, dist_url=args.dist_url, args=(args,))\n', (2881, 3009), False, 'from projects.InterpretationReID.interpretationreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((852, 879), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import logging\n'), ((939, 975), 'projects.InterpretationReID.interpretationreid.add_build_reid_train_loader', 'PII.add_build_reid_train_loader', (['cfg'], {}), '(cfg)\n', (970, 975), True, 'import projects.InterpretationReID.interpretationreid as PII\n'), ((1257, 1306), 'projects.InterpretationReID.interpretationreid.add_build_reid_test_loader', 'PII.add_build_reid_test_loader', (['cfg', 'dataset_name'], {}), '(cfg, dataset_name)\n', (1287, 1306), True, 'import projects.InterpretationReID.interpretationreid as PII\n'), ((1511, 1540), 'projects.InterpretationReID.interpretationreid.evaluation.ReidEvaluator', 'ReidEvaluator', (['cfg', 'num_query'], {}), '(cfg, num_query)\n', (1524, 1540), False, 'from projects.InterpretationReID.interpretationreid.evaluation import ReidEvaluator\n'), ((1454, 1495), 'os.path.join', 'os.path.join', (['cfg.OUTPUT_DIR', '"""inference"""'], {}), "(cfg.OUTPUT_DIR, 'inference')\n", (1466, 1495), False, 'import os\n'), ((2794, 2819), 'projects.InterpretationReID.interpretationreid.engine.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (2817, 2819), False, 'from projects.InterpretationReID.interpretationreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch\n'), ((2002, 2021), 'fastreid.utils.checkpoint.Checkpointer', 'Checkpointer', (['model'], {}), '(model)\n', (2014, 2021), False, 'from fastreid.utils.checkpoint import Checkpointer\n')] |
#!/usr/bin/env python
'''Tests for the likelihood.py module'''
from time import perf_counter_ns
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
from scipy.stats import gamma
import likelihood
SMALL_FIT_PARAMS = {
'baseline_intensities': np.asarray([1, 2, np.nan, np.nan]),
'r_h': 1.5,
'r_c': 0.5
}
SIMPLE_DIST_PARAMS = {
'self_excitation_shape': 2,
'self_excitation_scale': 1,
'discharge_excitation_shape': 3,
'discharge_excitation_scale': 2
}
SMALL_CASES_FILE = 'tests/fixtures/small.csv'
SMALL_COVARIATES_FILE = 'tests/fixtures/small_covariates.csv'
LARGE_FIT_PARAMS = {
'baseline_intensities': np.asarray([0.3, 0.4, 0.6, 0.9]),
'r_h': 1.5,
'r_c': 0.5
}
FULL_DIST_PARAMS = {
'self_excitation_shape': 2.6,
'self_excitation_scale': 2.5,
'discharge_excitation_shape': 2.6,
'discharge_excitation_scale': 2.5
}
def test_gamma_pdf():
x = np.linspace(0, 10, 100)
shape = FULL_DIST_PARAMS['self_excitation_shape']
scale = FULL_DIST_PARAMS['self_excitation_scale']
assert_almost_equal(
gamma.pdf(x, a=shape, scale=scale),
likelihood.gamma_pdf(x, shape, scale)
)
@pytest.mark.parametrize(
"test_element,result_dtype",
[(123_456_789, np.uint32), (65_535, np.uint16), (255, np.uint8)]
)
def test_compactify(test_element, result_dtype):
'''Test that arrays compactify correctly, and to the correct data types'''
array = np.asarray([[1, 2], [3, test_element]], dtype=np.uint32)
result = likelihood.compactify(array)
assert result.dtype == result_dtype
assert_array_equal(array, result)
def test_read_and_tidy_data():
'''Test that a CSV file with care home IDs as a header row
is read, sorted, and split correctly.'''
ids, values = likelihood.read_and_tidy_data(SMALL_CASES_FILE)
assert_array_equal(ids, [14, 16, 35])
assert_array_equal(
values,
[[4, 1, 6], [4, 0, 3], [6, 66, 2]]
)
@pytest.fixture
def small_cases():
'''Get a small data file that could be cases or discharges.'''
return likelihood.read_and_tidy_data(SMALL_CASES_FILE)
@pytest.fixture
def small_covariates():
'''Get a small data file containing covariates.'''
return likelihood.read_and_tidy_data(SMALL_COVARIATES_FILE)
def test_carehome_intensity_null(small_cases, small_covariates):
'''Test that calculating the null-case intensity (based on mapping banded
carehome size to a base intensity) gives the correct result'''
_, cases = small_cases
_, covariates = small_covariates
intensity = likelihood.carehome_intensity_null(
covariates=covariates,
cases=cases,
fit_params=SMALL_FIT_PARAMS
)
assert_array_equal(intensity, [[1, 2, 2], [1, 2, 2], [1, 2, 2]])
def test_single_excitation(small_cases):
'''Test that excitation terms of the form
e_i(t) = \\sum_{s<t} f(t - s) triggers_i(s)
are correctly calculated'''
_, cases = small_cases
excitation = likelihood.single_excitation(cases, 2, 1)
assert_almost_equal(
excitation,
[[0, 0, 0], [1.472, 0.368, 2.207], [2.554, 0.271, 2.728]],
decimal=3
)
def test_cached_single_excitation(small_cases):
'''
Test that the caching of the single_excitation function works correctly.
'''
_, cases = small_cases
cases.flags.writeable = False
shape = SIMPLE_DIST_PARAMS['self_excitation_shape']
scale = SIMPLE_DIST_PARAMS['self_excitation_scale']
uncached_start = perf_counter_ns()
uncached_excitation = likelihood.single_excitation(cases, shape, scale)
uncached_end = perf_counter_ns()
first_excitation = likelihood.cached_single_excitation(
cases, shape, scale
)
assert_array_equal(uncached_excitation, first_excitation)
cached_start = perf_counter_ns()
cached_excitation = likelihood.cached_single_excitation(
cases, shape, scale
)
cached_end = perf_counter_ns()
assert_array_equal(uncached_excitation, cached_excitation)
# Cached version should be quicker
assert (cached_end - cached_start) < (uncached_end - uncached_start)
def test_carehome_intensity_no_discharges(small_cases, small_covariates):
'''Test that the behaviour of carehome_intensity in the case where
discharges are not considered.'''
_, cases = small_cases
_, covariates = small_covariates
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None}
intensity = likelihood.carehome_intensity(
covariates=covariates,
cases=cases,
fit_params=fit_params_no_rh,
dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(
intensity,
[[1, 2, 2], [1.736, 2.184, 3.104], [2.277, 2.135, 3.364]],
decimal=3
)
def test_carehome_intensity_with_discharges(small_cases, small_covariates):
'''Test that the behaviour of carehome_intensity is correct in the case
where discharges are considered.'''
_, cases = small_cases
_, covariates = small_covariates
discharges = cases[::-1]
intensity = likelihood.carehome_intensity(
covariates=covariates,
cases=cases,
fit_params=SMALL_FIT_PARAMS,
dist_params=SIMPLE_DIST_PARAMS,
discharges=discharges
)
assert_almost_equal(
intensity,
[[1, 2, 2], [2.077, 5.937, 3.217], [3.332, 11.240, 3.810]],
decimal=3
)
@pytest.mark.parametrize("mean, cv, expected_shape, expected_scale",
[(1, 1, 1, 1), (6.5, 0.62, 2.601, 2.499)])
def test_calculate_gamma_parameters(mean, cv, expected_shape, expected_scale):
'''Test that calculation of Scipy-style gamma parameters from "descriptive"
gamma parameters is correct.'''
shape, scale = likelihood.calculate_gamma_parameters(mean, cv)
assert_almost_equal([shape, scale], [expected_shape, expected_scale],
decimal=3)
def test_likelihood():
'''Test that the likelihood calculation is correct'''
cases = np.asarray([[3, 1, 0, 1], [1, 0, 2, 1], [0, 0, 0, 1]])
intensity = np.asarray(
[[1, 3, 1.5, 6], [4.2, 3.1, 7, 1.4], [2, 5.1, 4.2, 8.9]]
)
result = likelihood.likelihood(intensity, cases)
assert_almost_equal(result, -39.145, decimal=3)
def test_calculate_likelihood_from_files_no_discharges():
'''Test that likelihood is correctly calculated from input files
when discharges are not considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -187.443, decimal=3)
def test_calculate_likelihood_from_files_no_cases():
'''Test that likelihood is correctly calculated from input files
when cases are not considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_c': 0}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
discharges_file=SMALL_CASES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -189.046, decimal=3)
def test_calculate_likelihood_from_files_no_discharges_or_cases():
'''Test that likelihood is correctly calculated from input files
when neither cases nor discharges are considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None, 'r_c': 0}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -196.466, decimal=3)
def test_calculate_likelihood_from_files_with_discharges():
'''Test that likelihood is correctly calculated from input files
when discharges are considered.'''
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
discharges_file=SMALL_CASES_FILE,
fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -182.761, decimal=3)
def test_calculate_likelihood_from_files_missing_discharges():
'''Test that an error is generated when r_h is provided but discharge data
are not'''
with pytest.raises(AssertionError):
likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS
)
@pytest.mark.parametrize(
'r_c, r_h, expect',
[(0, 0, 196.466),
(0.5, 1.5, 182.761),
(0.5, 0, 187.443),
(0, 1.5, 189.046)]
)
def test_fittable_likelihood(r_c, r_h, expect):
'''Test that the closure to give a version of intensity and likelihood that
can be fitted by scipy works correctly.'''
fittable_likelihood = likelihood.get_fittable_likelihood(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE, SMALL_CASES_FILE
)
fit_params = np.asarray(
[r_c, r_h, *SMALL_FIT_PARAMS['baseline_intensities']]
)
assert_almost_equal(
fittable_likelihood(
fit_params, *map(
SIMPLE_DIST_PARAMS.get,
(('self_excitation_shape', 'self_excitation_scale',
'discharge_excitation_shape', 'discharge_excitation_scale'))
)
),
expect,
decimal=3
)
@pytest.fixture
def large_test_data():
'''Generate test data of the size expected from SAIL.'''
max_categories = 4
num_care_homes = 1000
num_cases = 2000
num_case_homes = 330
num_discharges = 3000
num_discharge_homes = 500
num_days = 181
num_covariates = 1
max_carehome_id = 32767
cases = np.zeros((num_days, num_care_homes), dtype=np.int8)
discharges = np.zeros((num_days, num_care_homes), dtype=np.int8)
covariates = np.zeros((num_covariates, num_care_homes), dtype=np.int8)
# For runs with the same version of numpy, we should get the same
# test data each time. Not guaranteed to work between versions
# because default_rng can change.
rng = np.random.default_rng(seed=0)
care_home_ids = rng.choice(
max_carehome_id, size=num_care_homes, replace=False
)
for sample_array, num_instances, num_places in (
(cases, num_cases, num_case_homes),
(discharges, num_discharges, num_discharge_homes)
):
for _ in range(num_instances):
sample_array[rng.integers(num_days), rng.integers(num_places)] += 1
covariates[0] = rng.choice(max_categories, size=num_care_homes)
for array in care_home_ids, cases, covariates, discharges:
array.flags.writeable = False
return care_home_ids, cases, covariates, discharges
def test_intensity_performance_base(large_test_data, benchmark):
'''
Test the performance of the intensity function for the base case
'''
_, cases, covariates, _ = large_test_data
kwargs = {
'fit_params': {**LARGE_FIT_PARAMS, 'r_h': None, 'r_c': None},
'covariates': covariates,
'cases': cases
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity_null(**kwargs)
benchmark(likelihood.carehome_intensity_null, **kwargs)
@pytest.mark.parametrize("use_cache", [True, False])
def test_intensity_performance_self(large_test_data, benchmark, use_cache):
'''
Test the performance of the intensity function with self-excitation
'''
_, cases, covariates, _ = large_test_data
if not use_cache:
# Writeable arrays are not cached
cases.flags.writeable = True
covariates.flags.writeable = True
kwargs = {
'fit_params': {**LARGE_FIT_PARAMS, 'r_h': None},
'covariates': covariates,
'cases': cases,
'dist_params': FULL_DIST_PARAMS
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity(**kwargs)
benchmark(likelihood.carehome_intensity, **kwargs)
@pytest.mark.parametrize("use_cache", [True, False])
def test_intensity_performance_hospitals(
large_test_data, benchmark, use_cache
):
'''
Test the performance of the intensity function with self- and
discharge excitations.'''
_, cases, covariates, discharges = large_test_data
if not use_cache:
# Writeable arrays are not cached
cases.flags.writeable = True
covariates.flags.writeable = True
discharges.flags.writeable = True
kwargs = {
'fit_params': LARGE_FIT_PARAMS,
'covariates': covariates,
'cases': cases,
'discharges': discharges,
'dist_params': FULL_DIST_PARAMS
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity(**kwargs)
benchmark(likelihood.carehome_intensity, **kwargs)
def test_likelihood_performance(large_test_data, benchmark):
'''
Test the performance of the calculation of likelihood from the intensity
and case distribution.'''
_, cases, covariates, discharges = large_test_data
intensity = likelihood.carehome_intensity(
fit_params=LARGE_FIT_PARAMS,
covariates=covariates,
cases=cases,
discharges=discharges,
dist_params=FULL_DIST_PARAMS
)
benchmark(likelihood.likelihood, intensity, cases)
| [
"numpy.random.default_rng",
"likelihood.read_and_tidy_data",
"time.perf_counter_ns",
"likelihood.likelihood",
"scipy.stats.gamma.pdf",
"numpy.asarray",
"likelihood.cached_single_excitation",
"numpy.testing.assert_almost_equal",
"numpy.linspace",
"numpy.testing.assert_array_equal",
"likelihood.si... | [((1212, 1332), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_element,result_dtype"""', '[(123456789, np.uint32), (65535, np.uint16), (255, np.uint8)]'], {}), "('test_element,result_dtype', [(123456789, np.uint32\n ), (65535, np.uint16), (255, np.uint8)])\n", (1235, 1332), False, 'import pytest\n'), ((5434, 5548), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mean, cv, expected_shape, expected_scale"""', '[(1, 1, 1, 1), (6.5, 0.62, 2.601, 2.499)]'], {}), "('mean, cv, expected_shape, expected_scale', [(1, 1,\n 1, 1), (6.5, 0.62, 2.601, 2.499)])\n", (5457, 5548), False, 'import pytest\n'), ((8580, 8706), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""r_c, r_h, expect"""', '[(0, 0, 196.466), (0.5, 1.5, 182.761), (0.5, 0, 187.443), (0, 1.5, 189.046)]'], {}), "('r_c, r_h, expect', [(0, 0, 196.466), (0.5, 1.5, \n 182.761), (0.5, 0, 187.443), (0, 1.5, 189.046)])\n", (8603, 8706), False, 'import pytest\n'), ((11357, 11408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_cache"""', '[True, False]'], {}), "('use_cache', [True, False])\n", (11380, 11408), False, 'import pytest\n'), ((12105, 12156), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_cache"""', '[True, False]'], {}), "('use_cache', [True, False])\n", (12128, 12156), False, 'import pytest\n'), ((297, 331), 'numpy.asarray', 'np.asarray', (['[1, 2, np.nan, np.nan]'], {}), '([1, 2, np.nan, np.nan])\n', (307, 331), True, 'import numpy as np\n'), ((688, 720), 'numpy.asarray', 'np.asarray', (['[0.3, 0.4, 0.6, 0.9]'], {}), '([0.3, 0.4, 0.6, 0.9])\n', (698, 720), True, 'import numpy as np\n'), ((956, 979), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (967, 979), True, 'import numpy as np\n'), ((1481, 1537), 'numpy.asarray', 'np.asarray', (['[[1, 2], [3, test_element]]'], {'dtype': 'np.uint32'}), '([[1, 2], [3, test_element]], dtype=np.uint32)\n', (1491, 1537), True, 'import numpy as np\n'), ((1551, 1579), 'likelihood.compactify', 'likelihood.compactify', (['array'], {}), '(array)\n', (1572, 1579), False, 'import likelihood\n'), ((1624, 1657), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['array', 'result'], {}), '(array, result)\n', (1642, 1657), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((1817, 1864), 'likelihood.read_and_tidy_data', 'likelihood.read_and_tidy_data', (['SMALL_CASES_FILE'], {}), '(SMALL_CASES_FILE)\n', (1846, 1864), False, 'import likelihood\n'), ((1869, 1906), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['ids', '[14, 16, 35]'], {}), '(ids, [14, 16, 35])\n', (1887, 1906), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((1911, 1973), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['values', '[[4, 1, 6], [4, 0, 3], [6, 66, 2]]'], {}), '(values, [[4, 1, 6], [4, 0, 3], [6, 66, 2]])\n', (1929, 1973), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((2110, 2157), 'likelihood.read_and_tidy_data', 'likelihood.read_and_tidy_data', (['SMALL_CASES_FILE'], {}), '(SMALL_CASES_FILE)\n', (2139, 2157), False, 'import likelihood\n'), ((2266, 2318), 'likelihood.read_and_tidy_data', 'likelihood.read_and_tidy_data', (['SMALL_COVARIATES_FILE'], {}), '(SMALL_COVARIATES_FILE)\n', (2295, 2318), False, 'import likelihood\n'), ((2611, 2714), 'likelihood.carehome_intensity_null', 'likelihood.carehome_intensity_null', ([], {'covariates': 'covariates', 'cases': 'cases', 'fit_params': 'SMALL_FIT_PARAMS'}), '(covariates=covariates, cases=cases,\n fit_params=SMALL_FIT_PARAMS)\n', (2645, 2714), False, 'import likelihood\n'), ((2745, 2809), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['intensity', '[[1, 2, 2], [1, 2, 2], [1, 2, 2]]'], {}), '(intensity, [[1, 2, 2], [1, 2, 2], [1, 2, 2]])\n', (2763, 2809), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((3027, 3068), 'likelihood.single_excitation', 'likelihood.single_excitation', (['cases', '(2)', '(1)'], {}), '(cases, 2, 1)\n', (3055, 3068), False, 'import likelihood\n'), ((3073, 3179), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['excitation', '[[0, 0, 0], [1.472, 0.368, 2.207], [2.554, 0.271, 2.728]]'], {'decimal': '(3)'}), '(excitation, [[0, 0, 0], [1.472, 0.368, 2.207], [2.554, \n 0.271, 2.728]], decimal=3)\n', (3092, 3179), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((3543, 3560), 'time.perf_counter_ns', 'perf_counter_ns', ([], {}), '()\n', (3558, 3560), False, 'from time import perf_counter_ns\n'), ((3587, 3636), 'likelihood.single_excitation', 'likelihood.single_excitation', (['cases', 'shape', 'scale'], {}), '(cases, shape, scale)\n', (3615, 3636), False, 'import likelihood\n'), ((3656, 3673), 'time.perf_counter_ns', 'perf_counter_ns', ([], {}), '()\n', (3671, 3673), False, 'from time import perf_counter_ns\n'), ((3697, 3753), 'likelihood.cached_single_excitation', 'likelihood.cached_single_excitation', (['cases', 'shape', 'scale'], {}), '(cases, shape, scale)\n', (3732, 3753), False, 'import likelihood\n'), ((3772, 3829), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['uncached_excitation', 'first_excitation'], {}), '(uncached_excitation, first_excitation)\n', (3790, 3829), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((3850, 3867), 'time.perf_counter_ns', 'perf_counter_ns', ([], {}), '()\n', (3865, 3867), False, 'from time import perf_counter_ns\n'), ((3892, 3948), 'likelihood.cached_single_excitation', 'likelihood.cached_single_excitation', (['cases', 'shape', 'scale'], {}), '(cases, shape, scale)\n', (3927, 3948), False, 'import likelihood\n'), ((3980, 3997), 'time.perf_counter_ns', 'perf_counter_ns', ([], {}), '()\n', (3995, 3997), False, 'from time import perf_counter_ns\n'), ((4002, 4060), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['uncached_excitation', 'cached_excitation'], {}), '(uncached_excitation, cached_excitation)\n', (4020, 4060), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((4496, 4626), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {'covariates': 'covariates', 'cases': 'cases', 'fit_params': 'fit_params_no_rh', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(covariates=covariates, cases=cases,\n fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS)\n', (4525, 4626), False, 'import likelihood\n'), ((4665, 4770), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['intensity', '[[1, 2, 2], [1.736, 2.184, 3.104], [2.277, 2.135, 3.364]]'], {'decimal': '(3)'}), '(intensity, [[1, 2, 2], [1.736, 2.184, 3.104], [2.277, \n 2.135, 3.364]], decimal=3)\n', (4684, 4770), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((5099, 5257), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {'covariates': 'covariates', 'cases': 'cases', 'fit_params': 'SMALL_FIT_PARAMS', 'dist_params': 'SIMPLE_DIST_PARAMS', 'discharges': 'discharges'}), '(covariates=covariates, cases=cases,\n fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS, discharges\n =discharges)\n', (5128, 5257), False, 'import likelihood\n'), ((5299, 5403), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['intensity', '[[1, 2, 2], [2.077, 5.937, 3.217], [3.332, 11.24, 3.81]]'], {'decimal': '(3)'}), '(intensity, [[1, 2, 2], [2.077, 5.937, 3.217], [3.332, \n 11.24, 3.81]], decimal=3)\n', (5318, 5403), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((5784, 5831), 'likelihood.calculate_gamma_parameters', 'likelihood.calculate_gamma_parameters', (['mean', 'cv'], {}), '(mean, cv)\n', (5821, 5831), False, 'import likelihood\n'), ((5836, 5921), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['[shape, scale]', '[expected_shape, expected_scale]'], {'decimal': '(3)'}), '([shape, scale], [expected_shape, expected_scale], decimal=3\n )\n', (5855, 5921), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((6037, 6091), 'numpy.asarray', 'np.asarray', (['[[3, 1, 0, 1], [1, 0, 2, 1], [0, 0, 0, 1]]'], {}), '([[3, 1, 0, 1], [1, 0, 2, 1], [0, 0, 0, 1]])\n', (6047, 6091), True, 'import numpy as np\n'), ((6108, 6176), 'numpy.asarray', 'np.asarray', (['[[1, 3, 1.5, 6], [4.2, 3.1, 7, 1.4], [2, 5.1, 4.2, 8.9]]'], {}), '([[1, 3, 1.5, 6], [4.2, 3.1, 7, 1.4], [2, 5.1, 4.2, 8.9]])\n', (6118, 6176), True, 'import numpy as np\n'), ((6205, 6244), 'likelihood.likelihood', 'likelihood.likelihood', (['intensity', 'cases'], {}), '(intensity, cases)\n', (6226, 6244), False, 'import likelihood\n'), ((6249, 6296), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-39.145)'], {'decimal': '(3)'}), '(result, -39.145, decimal=3)\n', (6268, 6296), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((6539, 6692), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'fit_params': 'fit_params_no_rh', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, fit_params=fit_params_no_rh, dist_params=\n SIMPLE_DIST_PARAMS)\n', (6581, 6692), False, 'import likelihood\n'), ((6710, 6758), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-187.443)'], {'decimal': '(3)'}), '(result, -187.443, decimal=3)\n', (6729, 6758), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((6988, 7175), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'discharges_file': 'SMALL_CASES_FILE', 'fit_params': 'fit_params_no_rh', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, discharges_file=SMALL_CASES_FILE, fit_params=\n fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS)\n', (7030, 7175), False, 'import likelihood\n'), ((7201, 7249), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-189.046)'], {'decimal': '(3)'}), '(result, -189.046, decimal=3)\n', (7220, 7249), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((7525, 7678), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'fit_params': 'fit_params_no_rh', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, fit_params=fit_params_no_rh, dist_params=\n SIMPLE_DIST_PARAMS)\n', (7567, 7678), False, 'import likelihood\n'), ((7696, 7744), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-196.466)'], {'decimal': '(3)'}), '(result, -196.466, decimal=3)\n', (7715, 7744), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((7928, 8115), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'discharges_file': 'SMALL_CASES_FILE', 'fit_params': 'SMALL_FIT_PARAMS', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, discharges_file=SMALL_CASES_FILE, fit_params=\n SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS)\n', (7970, 8115), False, 'import likelihood\n'), ((8141, 8189), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['result', '(-182.761)'], {'decimal': '(3)'}), '(result, -182.761, decimal=3)\n', (8160, 8189), False, 'from numpy.testing import assert_array_equal, assert_almost_equal\n'), ((8929, 9026), 'likelihood.get_fittable_likelihood', 'likelihood.get_fittable_likelihood', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE', 'SMALL_CASES_FILE'], {}), '(SMALL_CASES_FILE, SMALL_COVARIATES_FILE,\n SMALL_CASES_FILE)\n', (8963, 9026), False, 'import likelihood\n'), ((9054, 9119), 'numpy.asarray', 'np.asarray', (["[r_c, r_h, *SMALL_FIT_PARAMS['baseline_intensities']]"], {}), "([r_c, r_h, *SMALL_FIT_PARAMS['baseline_intensities']])\n", (9064, 9119), True, 'import numpy as np\n'), ((9807, 9858), 'numpy.zeros', 'np.zeros', (['(num_days, num_care_homes)'], {'dtype': 'np.int8'}), '((num_days, num_care_homes), dtype=np.int8)\n', (9815, 9858), True, 'import numpy as np\n'), ((9876, 9927), 'numpy.zeros', 'np.zeros', (['(num_days, num_care_homes)'], {'dtype': 'np.int8'}), '((num_days, num_care_homes), dtype=np.int8)\n', (9884, 9927), True, 'import numpy as np\n'), ((9945, 10002), 'numpy.zeros', 'np.zeros', (['(num_covariates, num_care_homes)'], {'dtype': 'np.int8'}), '((num_covariates, num_care_homes), dtype=np.int8)\n', (9953, 10002), True, 'import numpy as np\n'), ((10189, 10218), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': '(0)'}), '(seed=0)\n', (10210, 10218), True, 'import numpy as np\n'), ((11249, 11293), 'likelihood.carehome_intensity_null', 'likelihood.carehome_intensity_null', ([], {}), '(**kwargs)\n', (11283, 11293), False, 'import likelihood\n'), ((12007, 12046), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {}), '(**kwargs)\n', (12036, 12046), False, 'import likelihood\n'), ((12854, 12893), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {}), '(**kwargs)\n', (12883, 12893), False, 'import likelihood\n'), ((13199, 13356), 'likelihood.carehome_intensity', 'likelihood.carehome_intensity', ([], {'fit_params': 'LARGE_FIT_PARAMS', 'covariates': 'covariates', 'cases': 'cases', 'discharges': 'discharges', 'dist_params': 'FULL_DIST_PARAMS'}), '(fit_params=LARGE_FIT_PARAMS, covariates=\n covariates, cases=cases, discharges=discharges, dist_params=\n FULL_DIST_PARAMS)\n', (13228, 13356), False, 'import likelihood\n'), ((1121, 1155), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['x'], {'a': 'shape', 'scale': 'scale'}), '(x, a=shape, scale=scale)\n', (1130, 1155), False, 'from scipy.stats import gamma\n'), ((1165, 1202), 'likelihood.gamma_pdf', 'likelihood.gamma_pdf', (['x', 'shape', 'scale'], {}), '(x, shape, scale)\n', (1185, 1202), False, 'import likelihood\n'), ((8359, 8388), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (8372, 8388), False, 'import pytest\n'), ((8398, 8551), 'likelihood.calculate_likelihood_from_files', 'likelihood.calculate_likelihood_from_files', (['SMALL_CASES_FILE', 'SMALL_COVARIATES_FILE'], {'fit_params': 'SMALL_FIT_PARAMS', 'dist_params': 'SIMPLE_DIST_PARAMS'}), '(SMALL_CASES_FILE,\n SMALL_COVARIATES_FILE, fit_params=SMALL_FIT_PARAMS, dist_params=\n SIMPLE_DIST_PARAMS)\n', (8440, 8551), False, 'import likelihood\n')] |
import requests, os
from dotenv import load_dotenv
load_dotenv()
API_TOKEN = os.getenv('API_TOKEN')
def sendMessage(user_id: str, text: str, max_retries: int = 1):
url = f'https://api.telegram.org/bot{API_TOKEN}/sendMessage'
payload = {
"chat_id": user_id,
"text": text
}
for i in range(max_retries):
r = requests.get(url, params=payload)
isOk = False
try:
isOk = r.json()["ok"] == True
except:
pass
if isOk == True:
return isOk
return isOk | [
"requests.get",
"os.getenv",
"dotenv.load_dotenv"
] | [((52, 65), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (63, 65), False, 'from dotenv import load_dotenv\n'), ((79, 101), 'os.getenv', 'os.getenv', (['"""API_TOKEN"""'], {}), "('API_TOKEN')\n", (88, 101), False, 'import requests, os\n'), ((348, 381), 'requests.get', 'requests.get', (['url'], {'params': 'payload'}), '(url, params=payload)\n', (360, 381), False, 'import requests, os\n')] |
from numpy.testing import assert_array_almost_equal as array_assert
from badboids.boids import SimulationParameters
def test_simulation_parameters_init():
"""Tests Simulation Parameters constructor"""
# Arrange
formation_flying_distance = 800
formation_flying_strength = 0.10
alert_distance = 8
move_to_middle_strength = 0.2
delta_t = 1.5
# Act
sut = SimulationParameters(formation_flying_distance, formation_flying_strength, alert_distance,
move_to_middle_strength, delta_t)
# Assert
array_assert(sut.formation_flying_distance, formation_flying_distance)
array_assert(sut.formation_flying_strength, formation_flying_strength)
array_assert(sut.alert_distance, alert_distance)
array_assert(sut.move_to_middle_strength, move_to_middle_strength)
array_assert(sut.delta_t, delta_t)
def test_get_defaults():
"""Tests Simulation Parameters get defaults method"""
# Arrange
expected_formation_flying_distance = 10000
expected_formation_flying_strength = 0.125
expected_alert_distance = 100
expected_move_to_middle_strength = 0.01
expected_delta_t = 1.0
# Act
parameters = SimulationParameters.get_defaults()
# Assert
assert parameters.formation_flying_distance == expected_formation_flying_distance
assert parameters.formation_flying_strength == expected_formation_flying_strength
assert parameters.alert_distance == expected_alert_distance
assert parameters.move_to_middle_strength == expected_move_to_middle_strength
assert parameters.delta_t == expected_delta_t
| [
"badboids.boids.SimulationParameters",
"numpy.testing.assert_array_almost_equal",
"badboids.boids.SimulationParameters.get_defaults"
] | [((392, 520), 'badboids.boids.SimulationParameters', 'SimulationParameters', (['formation_flying_distance', 'formation_flying_strength', 'alert_distance', 'move_to_middle_strength', 'delta_t'], {}), '(formation_flying_distance, formation_flying_strength,\n alert_distance, move_to_middle_strength, delta_t)\n', (412, 520), False, 'from badboids.boids import SimulationParameters\n'), ((566, 636), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.formation_flying_distance', 'formation_flying_distance'], {}), '(sut.formation_flying_distance, formation_flying_distance)\n', (578, 636), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((641, 711), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.formation_flying_strength', 'formation_flying_strength'], {}), '(sut.formation_flying_strength, formation_flying_strength)\n', (653, 711), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((716, 764), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.alert_distance', 'alert_distance'], {}), '(sut.alert_distance, alert_distance)\n', (728, 764), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((769, 835), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.move_to_middle_strength', 'move_to_middle_strength'], {}), '(sut.move_to_middle_strength, move_to_middle_strength)\n', (781, 835), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((840, 874), 'numpy.testing.assert_array_almost_equal', 'array_assert', (['sut.delta_t', 'delta_t'], {}), '(sut.delta_t, delta_t)\n', (852, 874), True, 'from numpy.testing import assert_array_almost_equal as array_assert\n'), ((1202, 1237), 'badboids.boids.SimulationParameters.get_defaults', 'SimulationParameters.get_defaults', ([], {}), '()\n', (1235, 1237), False, 'from badboids.boids import SimulationParameters\n')] |
#-*- coding: utf-8 -*-
from django_town.core.settings import OAUTH2_SETTINGS
try:
if not OAUTH2_SETTINGS.ACCESS_TOKEN_SECRET_KEY:
raise ImportError
except KeyError:
# import traceback
# traceback.print_exc()
raise ImportError
from django.db import models
from django.conf import settings
from django.contrib import admin
from django_town.cache.model import CachingModel
from django_town.core.fields import JSONField
from django_town.utils import generate_random_from_vschar_set
class Service(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
# class ServiceSecretKey(CachingModel):
# cache_key_format = "_ut_o2ss:%(service__pk)d"
#
# service = models.ForeignKey(Service, unique=True)
# secret_key = models.CharField(max_length=OAUTH2_SETTINGS.SERVICE_SECRET_KEY_LENGTH,
# default=lambda: generate_random_from_vschar_set(
# OAUTH2_SETTINGS.SERVICE_SECRET_KEY_LENGTH))
def _generate_random_from_vschar_set_for_client_id():
return generate_random_from_vschar_set(OAUTH2_SETTINGS.CLIENT_ID_LENGTH)
def _generate_random_from_vschar_set_for_client_secret():
return generate_random_from_vschar_set(OAUTH2_SETTINGS.CLIENT_ID_LENGTH)
class Client(CachingModel):
IOS_CLIENT = 1
CLIENT_TYPE = (
(0, "Web"),
(1, "iOS"),
(2, "Android"),
(3, "Win"),
)
cache_key_format = "_ut_o2c:%(client_id)s"
name = models.CharField(max_length=200)
service = models.ForeignKey(Service)
client_id = models.CharField(max_length=OAUTH2_SETTINGS.CLIENT_ID_LENGTH, unique=True,
default=_generate_random_from_vschar_set_for_client_id)
client_secret = models.CharField(max_length=OAUTH2_SETTINGS.CLIENT_SECRET_LENGTH,
default=_generate_random_from_vschar_set_for_client_secret)
redirect_uris = JSONField(blank=True)
default_redirect_uri = models.URLField()
available_scope = JSONField(blank=True)
client_type = models.IntegerField(default=IOS_CLIENT, choices=CLIENT_TYPE)
client_min_version = models.CharField(max_length=20, default="")
client_cur_version = models.CharField(max_length=20, default="")
client_store_id = models.CharField(max_length=30, default="")
def __unicode__(self):
return self.name
def _generate_random_from_vschar_set_for_secret_key():
return generate_random_from_vschar_set(OAUTH2_SETTINGS.USER_SECRET_KEY_LENGTH)
class UserClientSecretKey(CachingModel):
cache_key_format = "_ut_o2u:%(user_id)d:%(client__pk)d"
user_id = models.IntegerField()
client = models.ForeignKey(Client)
secret_key = models.CharField(max_length=OAUTH2_SETTINGS.USER_SECRET_KEY_LENGTH,
default=_generate_random_from_vschar_set_for_secret_key)
unique_together = (("user_id", "client"),)
class Scope(models.Model):
name = models.CharField(max_length=30, unique=True)
class ClientAdmin(admin.ModelAdmin):
readonly_fields = ['client_id', 'client_secret']
admin.site.register(Client, admin.ModelAdmin)
admin.site.register(Service, admin.ModelAdmin)
| [
"django_town.utils.generate_random_from_vschar_set",
"django_town.core.fields.JSONField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.contrib.admin.site.register",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((3154, 3199), 'django.contrib.admin.site.register', 'admin.site.register', (['Client', 'admin.ModelAdmin'], {}), '(Client, admin.ModelAdmin)\n', (3173, 3199), False, 'from django.contrib import admin\n'), ((3200, 3246), 'django.contrib.admin.site.register', 'admin.site.register', (['Service', 'admin.ModelAdmin'], {}), '(Service, admin.ModelAdmin)\n', (3219, 3246), False, 'from django.contrib import admin\n'), ((547, 579), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (563, 579), False, 'from django.db import models\n'), ((1109, 1174), 'django_town.utils.generate_random_from_vschar_set', 'generate_random_from_vschar_set', (['OAUTH2_SETTINGS.CLIENT_ID_LENGTH'], {}), '(OAUTH2_SETTINGS.CLIENT_ID_LENGTH)\n', (1140, 1174), False, 'from django_town.utils import generate_random_from_vschar_set\n'), ((1246, 1311), 'django_town.utils.generate_random_from_vschar_set', 'generate_random_from_vschar_set', (['OAUTH2_SETTINGS.CLIENT_ID_LENGTH'], {}), '(OAUTH2_SETTINGS.CLIENT_ID_LENGTH)\n', (1277, 1311), False, 'from django_town.utils import generate_random_from_vschar_set\n'), ((1533, 1565), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1549, 1565), False, 'from django.db import models\n'), ((1580, 1606), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Service'], {}), '(Service)\n', (1597, 1606), False, 'from django.db import models\n'), ((1623, 1757), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': 'OAUTH2_SETTINGS.CLIENT_ID_LENGTH', 'unique': '(True)', 'default': '_generate_random_from_vschar_set_for_client_id'}), '(max_length=OAUTH2_SETTINGS.CLIENT_ID_LENGTH, unique=True,\n default=_generate_random_from_vschar_set_for_client_id)\n', (1639, 1757), False, 'from django.db import models\n'), ((1807, 1937), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': 'OAUTH2_SETTINGS.CLIENT_SECRET_LENGTH', 'default': '_generate_random_from_vschar_set_for_client_secret'}), '(max_length=OAUTH2_SETTINGS.CLIENT_SECRET_LENGTH, default=\n _generate_random_from_vschar_set_for_client_secret)\n', (1823, 1937), False, 'from django.db import models\n'), ((1986, 2007), 'django_town.core.fields.JSONField', 'JSONField', ([], {'blank': '(True)'}), '(blank=True)\n', (1995, 2007), False, 'from django_town.core.fields import JSONField\n'), ((2035, 2052), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (2050, 2052), False, 'from django.db import models\n'), ((2075, 2096), 'django_town.core.fields.JSONField', 'JSONField', ([], {'blank': '(True)'}), '(blank=True)\n', (2084, 2096), False, 'from django_town.core.fields import JSONField\n'), ((2115, 2175), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'IOS_CLIENT', 'choices': 'CLIENT_TYPE'}), '(default=IOS_CLIENT, choices=CLIENT_TYPE)\n', (2134, 2175), False, 'from django.db import models\n'), ((2201, 2244), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '""""""'}), "(max_length=20, default='')\n", (2217, 2244), False, 'from django.db import models\n'), ((2270, 2313), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '""""""'}), "(max_length=20, default='')\n", (2286, 2313), False, 'from django.db import models\n'), ((2336, 2379), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'default': '""""""'}), "(max_length=30, default='')\n", (2352, 2379), False, 'from django.db import models\n'), ((2502, 2573), 'django_town.utils.generate_random_from_vschar_set', 'generate_random_from_vschar_set', (['OAUTH2_SETTINGS.USER_SECRET_KEY_LENGTH'], {}), '(OAUTH2_SETTINGS.USER_SECRET_KEY_LENGTH)\n', (2533, 2573), False, 'from django_town.utils import generate_random_from_vschar_set\n'), ((2692, 2713), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2711, 2713), False, 'from django.db import models\n'), ((2727, 2752), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Client'], {}), '(Client)\n', (2744, 2752), False, 'from django.db import models\n'), ((2770, 2899), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': 'OAUTH2_SETTINGS.USER_SECRET_KEY_LENGTH', 'default': '_generate_random_from_vschar_set_for_secret_key'}), '(max_length=OAUTH2_SETTINGS.USER_SECRET_KEY_LENGTH, default\n =_generate_random_from_vschar_set_for_secret_key)\n', (2786, 2899), False, 'from django.db import models\n'), ((3016, 3060), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)'}), '(max_length=30, unique=True)\n', (3032, 3060), False, 'from django.db import models\n')] |
# List files in a directory. Useful for testing the path
from local_tools import *
from image_renamer import ImageRenamer
if confirm_config('path'):
img_path = get_config('path')
fl = ImageRenamer(img_path)
for ff in fl.image_files():
print(ff)
| [
"image_renamer.ImageRenamer"
] | [((192, 214), 'image_renamer.ImageRenamer', 'ImageRenamer', (['img_path'], {}), '(img_path)\n', (204, 214), False, 'from image_renamer import ImageRenamer\n')] |
#!/usr/bin/env python3
import logging
import argparse
from pathlib import Path
from vosk import list_models, list_languages
from vosk.transcriber.transcriber import Transcriber
parser = argparse.ArgumentParser(
description = 'Transcribe audio file and save result in selected format')
parser.add_argument(
'--model', '-m', type=str,
help='model path')
parser.add_argument(
'--list-models', default=False, action='store_true',
help='list available models')
parser.add_argument(
'--list-languages', default=False, action='store_true',
help='list available languages')
parser.add_argument(
'--model-name', '-n', type=str,
help='select model by name')
parser.add_argument(
'--lang', '-l', default='en-us', type=str,
help='select model by language')
parser.add_argument(
'--input', '-i', type=str,
help='audiofile')
parser.add_argument(
'--output', '-o', default='', type=str,
help='optional output filename path')
parser.add_argument(
'--output-type', '-t', default='txt', type=str,
help='optional arg output data type')
parser.add_argument(
'--log-level', default='INFO',
help='logging level')
def main():
args = parser.parse_args()
log_level = args.log_level.upper()
logging.getLogger().setLevel(log_level)
if args.list_models == True:
list_models()
return
if args.list_languages == True:
list_languages()
return
if not args.input:
logging.info('Please specify input file or directory')
exit(1)
if not Path(args.input).exists():
logging.info("File/folder '%s' does not exist, please specify an existing file/directory" % (args.input))
exit(1)
transcriber = Transcriber(args)
if Path(args.input).is_dir():
transcriber.process_dir(args)
return
elif Path(args.input).is_file():
transcriber.process_file(args)
else:
logging.info('Wrong arguments')
exit(1)
if __name__ == "__main__":
main()
| [
"vosk.list_languages",
"logging.getLogger",
"argparse.ArgumentParser",
"pathlib.Path",
"vosk.list_models",
"vosk.transcriber.transcriber.Transcriber",
"logging.info"
] | [((189, 289), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Transcribe audio file and save result in selected format"""'}), "(description=\n 'Transcribe audio file and save result in selected format')\n", (212, 289), False, 'import argparse\n'), ((1814, 1831), 'vosk.transcriber.transcriber.Transcriber', 'Transcriber', (['args'], {}), '(args)\n', (1825, 1831), False, 'from vosk.transcriber.transcriber import Transcriber\n'), ((1417, 1430), 'vosk.list_models', 'list_models', ([], {}), '()\n', (1428, 1430), False, 'from vosk import list_models, list_languages\n'), ((1491, 1507), 'vosk.list_languages', 'list_languages', ([], {}), '()\n', (1505, 1507), False, 'from vosk import list_models, list_languages\n'), ((1555, 1609), 'logging.info', 'logging.info', (['"""Please specify input file or directory"""'], {}), "('Please specify input file or directory')\n", (1567, 1609), False, 'import logging\n'), ((1673, 1786), 'logging.info', 'logging.info', (['("File/folder \'%s\' does not exist, please specify an existing file/directory" %\n args.input)'], {}), '(\n "File/folder \'%s\' does not exist, please specify an existing file/directory"\n % args.input)\n', (1685, 1786), False, 'import logging\n'), ((1335, 1354), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1352, 1354), False, 'import logging\n'), ((1840, 1856), 'pathlib.Path', 'Path', (['args.input'], {}), '(args.input)\n', (1844, 1856), False, 'from pathlib import Path\n'), ((2014, 2045), 'logging.info', 'logging.info', (['"""Wrong arguments"""'], {}), "('Wrong arguments')\n", (2026, 2045), False, 'import logging\n'), ((1638, 1654), 'pathlib.Path', 'Path', (['args.input'], {}), '(args.input)\n', (1642, 1654), False, 'from pathlib import Path\n'), ((1929, 1945), 'pathlib.Path', 'Path', (['args.input'], {}), '(args.input)\n', (1933, 1945), False, 'from pathlib import Path\n')] |
import numpy as np
import cv2
from imutils.object_detection import non_max_suppression
import pytesseract
from matplotlib import pyplot as plt
def ocr(images):
results = []
for image in images:
args = {"image": image, "east": "frozen_east_text_detection.pb", "min_confidence": 0.5, "width": 320,
"height": 320}
args['image'] = image
image = cv2.imread(args['image'])
orig = image.copy()
(origH, origW) = image.shape[:2]
(newW, newH) = (args["width"], args["height"])
rW = origW / float(newW)
rH = origH / float(newH)
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
net = cv2.dnn.readNet(args["east"])
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
def predictions(prob_score, geo):
(numR, numC) = prob_score.shape[2:4]
boxes = []
confidence_val = []
for y in range(0, numR):
scoresData = prob_score[0, 0, y]
x0 = geo[0, 0, y]
x1 = geo[0, 1, y]
x2 = geo[0, 2, y]
x3 = geo[0, 3, y]
anglesData = geo[0, 4, y]
for i in range(0, numC):
if scoresData[i] < args["min_confidence"]:
continue
(offX, offY) = (i * 4.0, y * 4.0)
angle = anglesData[i]
cos = np.cos(angle)
sin = np.sin(angle)
h = x0[i] + x2[i]
w = x1[i] + x3[i]
endX = int(offX + (cos * x1[i]) + (sin * x2[i]))
endY = int(offY - (sin * x1[i]) + (cos * x2[i]))
startX = int(endX - w)
startY = int(endY - h)
boxes.append((startX, startY, endX, endY))
confidence_val.append(scoresData[i])
return (boxes, confidence_val)
(boxes, confidence_val) = predictions(scores, geometry)
boxes = non_max_suppression(np.array(boxes), probs=confidence_val)
result = []
for (startX, startY, endX, endY) in boxes:
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
r = orig[startY:endY, startX:endX]
configuration = ("-l eng --oem 1 --psm 8")
text = pytesseract.image_to_string(r, config=configuration)
result.append(text)
results.append(result)
return results
print(ocr(["./images/car_wash.png"])) | [
"cv2.dnn.blobFromImage",
"numpy.array",
"numpy.cos",
"pytesseract.image_to_string",
"numpy.sin",
"cv2.resize",
"cv2.imread",
"cv2.dnn.readNet"
] | [((391, 416), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (401, 416), False, 'import cv2\n'), ((627, 658), 'cv2.resize', 'cv2.resize', (['image', '(newW, newH)'], {}), '(image, (newW, newH))\n', (637, 658), False, 'import cv2\n'), ((708, 805), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1.0)', '(W, H)', '(123.68, 116.78, 103.94)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=\n True, crop=False)\n', (729, 805), False, 'import cv2\n'), ((853, 882), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (["args['east']"], {}), "(args['east'])\n", (868, 882), False, 'import cv2\n'), ((2370, 2385), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (2378, 2385), True, 'import numpy as np\n'), ((2749, 2801), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['r'], {'config': 'configuration'}), '(r, config=configuration)\n', (2776, 2801), False, 'import pytesseract\n'), ((1747, 1760), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1753, 1760), True, 'import numpy as np\n'), ((1787, 1800), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1793, 1800), True, 'import numpy as np\n')] |
""" Tests for validators
"""
from decimal import Decimal
from unittest.mock import Mock
import random
import string
from styler_validation import validators as va
from styler_validation import messages as msg
class MyModel:
pass
class TestIsRequired:
def test_is_required(self):
val = va.is_required()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.REQUIRED_VALUE,)
def test_valid(self):
val = va.is_required()
model = MyModel()
model.prop = 'something'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_accepts(self):
val = va.is_required(accepts={0})
model = MyModel()
model.prop = 0
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsInteger:
def test_is_integer(self):
val = va.is_integer()
model = MyModel()
model.prop = '123'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_is_none(self):
val = va.is_integer()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_type_mismatch(self):
val = va.is_integer()
model = MyModel()
model.prop = {'123'}
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_value_mismatch(self):
val = va.is_integer()
model = MyModel()
model.prop = 'abc123'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
class TestIsBetween:
def test_is_between(self):
val = va.is_between(min_=0, max_=10)
model = MyModel()
model.prop = 2
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_outside_interval(self):
val = va.is_between(min_=0, max_=10)
model = MyModel()
model.prop = 20
model.prop2 = -1
valid, error = val(model, 'prop')
valid2, error2 = val(model, 'prop2')
assert not valid
assert error == (msg.LESS_OR_EQUAL_THAN, 10)
assert not valid2
assert error2 == (msg.GREATER_OR_EQUAL_THAN, 0)
def test_no_interval_set(self):
val = va.is_between()
model = MyModel()
model.prop = 20
model.prop2 = -1
valid, error = val(model, 'prop')
valid2, error2 = val(model, 'prop2')
assert valid
assert error is None
assert valid2
assert error2 is None
def test_one_sided_interval(self):
val = va.is_between(min_=0)
val2 = va.is_between(max_=10)
model = MyModel()
model.prop = 20
model.prop2 = -1
valid, error = val(model, 'prop')
valid2, error2 = val(model, 'prop2')
assert valid
assert error is None
assert not valid2
assert error2 == (msg.GREATER_OR_EQUAL_THAN, 0)
valid, error = val2(model, 'prop')
valid2, error2 = val2(model, 'prop2')
assert not valid
assert error == (msg.LESS_OR_EQUAL_THAN, 10)
assert valid2
assert error2 is None
def test_none(self):
val = va.is_between(min_=0, max_=10)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_type_mismatch(self):
val = va.is_between(min_=0, max_=10)
model = MyModel()
model.prop = {'123'}
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
class TestIsInside:
def test_is_inside(self):
accepted_values = {'a', 'b'}
val = va.is_inside(accepted=accepted_values)
model = MyModel()
model.prop = 'b'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_inside(self):
accepted_values = {'a', 'b'}
val = va.is_inside(accepted=accepted_values)
model = MyModel()
model.prop = 'c'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_none(self):
accepted_values = {'a', 'b'}
val = va.is_inside(accepted=accepted_values)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsOfType:
def test_is_of_type(self):
val = va.is_of_type(Decimal)
model = MyModel()
model.prop = Decimal('12.33')
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_type(self):
val = va.is_of_type(Decimal)
model = MyModel()
model.prop = '12.33'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_none(self):
val = va.is_of_type(Decimal)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsMoney:
def test_is_money(self):
val = va.is_money()
model = MyModel()
model.prop = Decimal('12.33')
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_allow_zero(self):
val = va.is_money(allow_zero=False)
model = MyModel()
model.prop = Decimal('0.0')
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.NOT_ZERO,)
def test_negative(self):
val = va.is_money()
model = MyModel()
model.prop = Decimal('-12.33')
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.POSITIVE_VALUE,)
def test_none(self):
val = va.is_money()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_type_mismatch(self):
val = va.is_money()
model = MyModel()
model.prop = {'sdfads'}
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_value_mismatch(self):
val = va.is_money()
model = MyModel()
model.prop = 'sdfads'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
class TestIsValidTime:
def test_is_valid_time(self):
val = va.is_valid_time()
model = MyModel()
model.prop = '12:33'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_valid_time()
model = MyModel()
model.prop = '12:73'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_TIME,)
def test_none(self):
val = va.is_valid_time()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsGreaterThanField:
def test_is_greater_than_field(self):
val = va.is_greater_than_field('prop2')
model = MyModel()
model.prop = 333
model.prop2 = 222
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_greater_than_field('prop2')
model = MyModel()
model.prop = 11
model.prop2 = 12
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.GREATER_THAN, 'mymodel.prop2')
def test_none(self):
val = va.is_greater_than_field('prop2')
model = MyModel()
model.prop = 1
model.prop2 = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_greater_than_field('prop2', default=True)
model = MyModel()
model.prop = 1
model.prop2 = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_allow_equal(self):
val = va.is_greater_than_field('prop2', allow_equal=True)
model = MyModel()
model.prop = 1
model.prop2 = 1
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsLessThanField:
def test_is_less_than_field(self):
val = va.is_less_than_field('prop2')
model = MyModel()
model.prop = 111
model.prop2 = 222
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_less_than_field('prop2')
model = MyModel()
model.prop = 13
model.prop2 = 12
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.LESS_THAN, 'mymodel.prop2')
def test_none(self):
val = va.is_less_than_field('prop2')
model = MyModel()
model.prop = 1
model.prop2 = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_less_than_field('prop2', default=True)
model = MyModel()
model.prop = 1
model.prop2 = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_allow_equal(self):
val = va.is_less_than_field('prop2', allow_equal=True)
model = MyModel()
model.prop = 1
model.prop2 = 1
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsGreaterThanNumber:
def test_is_greater_than_number(self):
val = va.is_greater_than_number(10)
model = MyModel()
model.prop = 111
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_greater_than_number(10)
model = MyModel()
model.prop = 1
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.GREATER_THAN, 10)
def test_none(self):
val = va.is_greater_than_number(10)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_greater_than_number(10, default=True)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_allow_equal(self):
val = va.is_greater_than_number(10, allow_equal=True)
model = MyModel()
model.prop = 10
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsLessThanNumber:
def test_is_less_than_number(self):
val = va.is_less_than_number(10)
model = MyModel()
model.prop = 1
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_less_than_number(10)
model = MyModel()
model.prop = 11
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.LESS_THAN, 10)
def test_none(self):
val = va.is_less_than_number(10)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_less_than_number(10, default=True)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_allow_equal(self):
val = va.is_less_than_number(10, allow_equal=True)
model = MyModel()
model.prop = 10
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsNotEmpty:
def test_is_not_empty(self):
val = va.is_not_empty()
model = MyModel()
model.prop = 'something'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_invalid(self):
val = va.is_not_empty()
model = MyModel()
model.prop = ' '
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.NOT_EMPTY,)
def test_none(self):
val = va.is_not_empty()
model = MyModel()
model.prop = 0
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_not_empty(default=True)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestObjectValidator:
def test_child_object(self):
child = Mock()
child.is_valid.return_value = (True, {})
model = MyModel()
model.prop = child
val = va.object_validator()
valid, error = val(model, 'prop')
assert valid
assert error == {}
child.is_valid.assert_called_once()
def test_multiple_child_objects(self):
child1 = Mock()
child1.is_valid.return_value = (False, {'error1': 'error'})
child2 = Mock()
child2.is_valid.return_value = (True, {})
child3 = Mock()
child3.is_valid.return_value = (False, {'error3': 'error'})
model = MyModel()
model.prop = [child1, child2, child3]
val = va.object_validator()
valid, error = val(model, 'prop')
assert not valid
assert error == {
'error1': 'error',
'error3': 'error'
}
child1.is_valid.assert_called_once()
child2.is_valid.assert_called_once()
child3.is_valid.assert_called_once()
def test_none(self):
model = MyModel()
model.prop = None
val = va.object_validator()
valid, error = val(model, 'prop')
assert valid
assert error == {}
class TestIsUuid:
def test_is_uuid(self):
val = va.is_uuid()
model = MyModel()
model.prop = '42fb4cf1-bd85-469c-8266-9dfcd54796a4'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_invalid(self):
val = va.is_uuid()
model = MyModel()
model.prop = 'anything'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_none(self):
val = va.is_uuid()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_type_mismatch(self):
val = va.is_uuid()
model = MyModel()
model.prop = 1234
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
class TestIf_:
def test_if_true(self):
validation = Mock(return_value=(True, None))
val = va.if_(lambda x: True, validation)
model = MyModel()
model.prop = '123'
valid, error = val(model, 'prop')
assert valid
assert error is None
validation.assert_called_once()
def test_if_false(self):
validation = Mock(return_value=(True, None))
val = va.if_(lambda x: False, validation)
model = MyModel()
model.prop = '123'
valid, error = val(model, 'prop')
assert valid
assert error is None
validation.assert_not_called()
class TestMaxLength:
def test_valid_max_length(self):
val = va.max_length(255)
model = MyModel()
model.prop = 'string_with_length_under_255'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_invalid_max_length(self):
length = 255
val = va.max_length(length)
model = MyModel()
model.prop = ''.join(random.choices(string.ascii_uppercase +
string.digits, k=256))
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.STRING_TOO_LONG, length)
def test_none(self):
val = va.max_length(255)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_invalid_type(self):
val = va.max_length(255)
model = MyModel()
model.prop = 1
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
| [
"styler_validation.validators.is_integer",
"styler_validation.validators.is_greater_than_number",
"styler_validation.validators.is_money",
"styler_validation.validators.is_valid_time",
"random.choices",
"styler_validation.validators.if_",
"styler_validation.validators.is_greater_than_field",
"styler_v... | [((307, 323), 'styler_validation.validators.is_required', 'va.is_required', ([], {}), '()\n', (321, 323), True, 'from styler_validation import validators as va\n'), ((532, 548), 'styler_validation.validators.is_required', 'va.is_required', ([], {}), '()\n', (546, 548), True, 'from styler_validation import validators as va\n'), ((745, 772), 'styler_validation.validators.is_required', 'va.is_required', ([], {'accepts': '{0}'}), '(accepts={0})\n', (759, 772), True, 'from styler_validation import validators as va\n'), ((984, 999), 'styler_validation.validators.is_integer', 'va.is_integer', ([], {}), '()\n', (997, 999), True, 'from styler_validation import validators as va\n'), ((1190, 1205), 'styler_validation.validators.is_integer', 'va.is_integer', ([], {}), '()\n', (1203, 1205), True, 'from styler_validation import validators as va\n'), ((1401, 1416), 'styler_validation.validators.is_integer', 'va.is_integer', ([], {}), '()\n', (1414, 1416), True, 'from styler_validation import validators as va\n'), ((1636, 1651), 'styler_validation.validators.is_integer', 'va.is_integer', ([], {}), '()\n', (1649, 1651), True, 'from styler_validation import validators as va\n'), ((1890, 1920), 'styler_validation.validators.is_between', 'va.is_between', ([], {'min_': '(0)', 'max_': '(10)'}), '(min_=0, max_=10)\n', (1903, 1920), True, 'from styler_validation import validators as va\n'), ((2116, 2146), 'styler_validation.validators.is_between', 'va.is_between', ([], {'min_': '(0)', 'max_': '(10)'}), '(min_=0, max_=10)\n', (2129, 2146), True, 'from styler_validation import validators as va\n'), ((2522, 2537), 'styler_validation.validators.is_between', 'va.is_between', ([], {}), '()\n', (2535, 2537), True, 'from styler_validation import validators as va\n'), ((2858, 2879), 'styler_validation.validators.is_between', 'va.is_between', ([], {'min_': '(0)'}), '(min_=0)\n', (2871, 2879), True, 'from styler_validation import validators as va\n'), ((2895, 2917), 'styler_validation.validators.is_between', 'va.is_between', ([], {'max_': '(10)'}), '(max_=10)\n', (2908, 2917), True, 'from styler_validation import validators as va\n'), ((3475, 3505), 'styler_validation.validators.is_between', 'va.is_between', ([], {'min_': '(0)', 'max_': '(10)'}), '(min_=0, max_=10)\n', (3488, 3505), True, 'from styler_validation import validators as va\n'), ((3701, 3731), 'styler_validation.validators.is_between', 'va.is_between', ([], {'min_': '(0)', 'max_': '(10)'}), '(min_=0, max_=10)\n', (3714, 3731), True, 'from styler_validation import validators as va\n'), ((4004, 4042), 'styler_validation.validators.is_inside', 'va.is_inside', ([], {'accepted': 'accepted_values'}), '(accepted=accepted_values)\n', (4016, 4042), True, 'from styler_validation import validators as va\n'), ((4271, 4309), 'styler_validation.validators.is_inside', 'va.is_inside', ([], {'accepted': 'accepted_values'}), '(accepted=accepted_values)\n', (4283, 4309), True, 'from styler_validation import validators as va\n'), ((4552, 4590), 'styler_validation.validators.is_inside', 'va.is_inside', ([], {'accepted': 'accepted_values'}), '(accepted=accepted_values)\n', (4564, 4590), True, 'from styler_validation import validators as va\n'), ((4804, 4826), 'styler_validation.validators.is_of_type', 'va.is_of_type', (['Decimal'], {}), '(Decimal)\n', (4817, 4826), True, 'from styler_validation import validators as va\n'), ((4874, 4890), 'decimal.Decimal', 'Decimal', (['"""12.33"""'], {}), "('12.33')\n", (4881, 4890), False, 'from decimal import Decimal\n'), ((5029, 5051), 'styler_validation.validators.is_of_type', 'va.is_of_type', (['Decimal'], {}), '(Decimal)\n', (5042, 5051), True, 'from styler_validation import validators as va\n'), ((5261, 5283), 'styler_validation.validators.is_of_type', 'va.is_of_type', (['Decimal'], {}), '(Decimal)\n', (5274, 5283), True, 'from styler_validation import validators as va\n'), ((5494, 5507), 'styler_validation.validators.is_money', 'va.is_money', ([], {}), '()\n', (5505, 5507), True, 'from styler_validation import validators as va\n'), ((5555, 5571), 'decimal.Decimal', 'Decimal', (['"""12.33"""'], {}), "('12.33')\n", (5562, 5571), False, 'from decimal import Decimal\n'), ((5716, 5745), 'styler_validation.validators.is_money', 'va.is_money', ([], {'allow_zero': '(False)'}), '(allow_zero=False)\n', (5727, 5745), True, 'from styler_validation import validators as va\n'), ((5793, 5807), 'decimal.Decimal', 'Decimal', (['"""0.0"""'], {}), "('0.0')\n", (5800, 5807), False, 'from decimal import Decimal\n'), ((5961, 5974), 'styler_validation.validators.is_money', 'va.is_money', ([], {}), '()\n', (5972, 5974), True, 'from styler_validation import validators as va\n'), ((6022, 6039), 'decimal.Decimal', 'Decimal', (['"""-12.33"""'], {}), "('-12.33')\n", (6029, 6039), False, 'from decimal import Decimal\n'), ((6195, 6208), 'styler_validation.validators.is_money', 'va.is_money', ([], {}), '()\n', (6206, 6208), True, 'from styler_validation import validators as va\n'), ((6404, 6417), 'styler_validation.validators.is_money', 'va.is_money', ([], {}), '()\n', (6415, 6417), True, 'from styler_validation import validators as va\n'), ((6640, 6653), 'styler_validation.validators.is_money', 'va.is_money', ([], {}), '()\n', (6651, 6653), True, 'from styler_validation import validators as va\n'), ((6897, 6915), 'styler_validation.validators.is_valid_time', 'va.is_valid_time', ([], {}), '()\n', (6913, 6915), True, 'from styler_validation import validators as va\n'), ((7110, 7128), 'styler_validation.validators.is_valid_time', 'va.is_valid_time', ([], {}), '()\n', (7126, 7128), True, 'from styler_validation import validators as va\n'), ((7337, 7355), 'styler_validation.validators.is_valid_time', 'va.is_valid_time', ([], {}), '()\n', (7353, 7355), True, 'from styler_validation import validators as va\n'), ((7590, 7623), 'styler_validation.validators.is_greater_than_field', 'va.is_greater_than_field', (['"""prop2"""'], {}), "('prop2')\n", (7614, 7623), True, 'from styler_validation import validators as va\n'), ((7840, 7873), 'styler_validation.validators.is_greater_than_field', 'va.is_greater_than_field', (['"""prop2"""'], {}), "('prop2')\n", (7864, 7873), True, 'from styler_validation import validators as va\n'), ((8118, 8151), 'styler_validation.validators.is_greater_than_field', 'va.is_greater_than_field', (['"""prop2"""'], {}), "('prop2')\n", (8142, 8151), True, 'from styler_validation import validators as va\n'), ((8385, 8432), 'styler_validation.validators.is_greater_than_field', 'va.is_greater_than_field', (['"""prop2"""'], {'default': '(True)'}), "('prop2', default=True)\n", (8409, 8432), True, 'from styler_validation import validators as va\n'), ((8650, 8701), 'styler_validation.validators.is_greater_than_field', 'va.is_greater_than_field', (['"""prop2"""'], {'allow_equal': '(True)'}), "('prop2', allow_equal=True)\n", (8674, 8701), True, 'from styler_validation import validators as va\n'), ((8951, 8981), 'styler_validation.validators.is_less_than_field', 'va.is_less_than_field', (['"""prop2"""'], {}), "('prop2')\n", (8972, 8981), True, 'from styler_validation import validators as va\n'), ((9198, 9228), 'styler_validation.validators.is_less_than_field', 'va.is_less_than_field', (['"""prop2"""'], {}), "('prop2')\n", (9219, 9228), True, 'from styler_validation import validators as va\n'), ((9470, 9500), 'styler_validation.validators.is_less_than_field', 'va.is_less_than_field', (['"""prop2"""'], {}), "('prop2')\n", (9491, 9500), True, 'from styler_validation import validators as va\n'), ((9734, 9778), 'styler_validation.validators.is_less_than_field', 'va.is_less_than_field', (['"""prop2"""'], {'default': '(True)'}), "('prop2', default=True)\n", (9755, 9778), True, 'from styler_validation import validators as va\n'), ((9996, 10044), 'styler_validation.validators.is_less_than_field', 'va.is_less_than_field', (['"""prop2"""'], {'allow_equal': '(True)'}), "('prop2', allow_equal=True)\n", (10017, 10044), True, 'from styler_validation import validators as va\n'), ((10302, 10331), 'styler_validation.validators.is_greater_than_number', 'va.is_greater_than_number', (['(10)'], {}), '(10)\n', (10327, 10331), True, 'from styler_validation import validators as va\n'), ((10522, 10551), 'styler_validation.validators.is_greater_than_number', 'va.is_greater_than_number', (['(10)'], {}), '(10)\n', (10547, 10551), True, 'from styler_validation import validators as va\n'), ((10757, 10786), 'styler_validation.validators.is_greater_than_number', 'va.is_greater_than_number', (['(10)'], {}), '(10)\n', (10782, 10786), True, 'from styler_validation import validators as va\n'), ((10996, 11039), 'styler_validation.validators.is_greater_than_number', 'va.is_greater_than_number', (['(10)'], {'default': '(True)'}), '(10, default=True)\n', (11021, 11039), True, 'from styler_validation import validators as va\n'), ((11233, 11280), 'styler_validation.validators.is_greater_than_number', 'va.is_greater_than_number', (['(10)'], {'allow_equal': '(True)'}), '(10, allow_equal=True)\n', (11258, 11280), True, 'from styler_validation import validators as va\n'), ((11509, 11535), 'styler_validation.validators.is_less_than_number', 'va.is_less_than_number', (['(10)'], {}), '(10)\n', (11531, 11535), True, 'from styler_validation import validators as va\n'), ((11724, 11750), 'styler_validation.validators.is_less_than_number', 'va.is_less_than_number', (['(10)'], {}), '(10)\n', (11746, 11750), True, 'from styler_validation import validators as va\n'), ((11954, 11980), 'styler_validation.validators.is_less_than_number', 'va.is_less_than_number', (['(10)'], {}), '(10)\n', (11976, 11980), True, 'from styler_validation import validators as va\n'), ((12190, 12230), 'styler_validation.validators.is_less_than_number', 'va.is_less_than_number', (['(10)'], {'default': '(True)'}), '(10, default=True)\n', (12212, 12230), True, 'from styler_validation import validators as va\n'), ((12424, 12468), 'styler_validation.validators.is_less_than_number', 'va.is_less_than_number', (['(10)'], {'allow_equal': '(True)'}), '(10, allow_equal=True)\n', (12446, 12468), True, 'from styler_validation import validators as va\n'), ((12684, 12701), 'styler_validation.validators.is_not_empty', 'va.is_not_empty', ([], {}), '()\n', (12699, 12701), True, 'from styler_validation import validators as va\n'), ((12898, 12915), 'styler_validation.validators.is_not_empty', 'va.is_not_empty', ([], {}), '()\n', (12913, 12915), True, 'from styler_validation import validators as va\n'), ((13117, 13134), 'styler_validation.validators.is_not_empty', 'va.is_not_empty', ([], {}), '()\n', (13132, 13134), True, 'from styler_validation import validators as va\n'), ((13341, 13370), 'styler_validation.validators.is_not_empty', 'va.is_not_empty', ([], {'default': '(True)'}), '(default=True)\n', (13356, 13370), True, 'from styler_validation import validators as va\n'), ((13595, 13601), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (13599, 13601), False, 'from unittest.mock import Mock\n'), ((13718, 13739), 'styler_validation.validators.object_validator', 'va.object_validator', ([], {}), '()\n', (13737, 13739), True, 'from styler_validation import validators as va\n'), ((13937, 13943), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (13941, 13943), False, 'from unittest.mock import Mock\n'), ((14029, 14035), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (14033, 14035), False, 'from unittest.mock import Mock\n'), ((14103, 14109), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (14107, 14109), False, 'from unittest.mock import Mock\n'), ((14265, 14286), 'styler_validation.validators.object_validator', 'va.object_validator', ([], {}), '()\n', (14284, 14286), True, 'from styler_validation import validators as va\n'), ((14680, 14701), 'styler_validation.validators.object_validator', 'va.object_validator', ([], {}), '()\n', (14699, 14701), True, 'from styler_validation import validators as va\n'), ((14856, 14868), 'styler_validation.validators.is_uuid', 'va.is_uuid', ([], {}), '()\n', (14866, 14868), True, 'from styler_validation import validators as va\n'), ((15092, 15104), 'styler_validation.validators.is_uuid', 'va.is_uuid', ([], {}), '()\n', (15102, 15104), True, 'from styler_validation import validators as va\n'), ((15317, 15329), 'styler_validation.validators.is_uuid', 'va.is_uuid', ([], {}), '()\n', (15327, 15329), True, 'from styler_validation import validators as va\n'), ((15525, 15537), 'styler_validation.validators.is_uuid', 'va.is_uuid', ([], {}), '()\n', (15535, 15537), True, 'from styler_validation import validators as va\n'), ((15770, 15801), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True, None)'}), '(return_value=(True, None))\n', (15774, 15801), False, 'from unittest.mock import Mock\n'), ((15816, 15850), 'styler_validation.validators.if_', 'va.if_', (['(lambda x: True)', 'validation'], {}), '(lambda x: True, validation)\n', (15822, 15850), True, 'from styler_validation import validators as va\n'), ((16089, 16120), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True, None)'}), '(return_value=(True, None))\n', (16093, 16120), False, 'from unittest.mock import Mock\n'), ((16135, 16170), 'styler_validation.validators.if_', 'va.if_', (['(lambda x: False)', 'validation'], {}), '(lambda x: False, validation)\n', (16141, 16170), True, 'from styler_validation import validators as va\n'), ((16431, 16449), 'styler_validation.validators.max_length', 'va.max_length', (['(255)'], {}), '(255)\n', (16444, 16449), True, 'from styler_validation import validators as va\n'), ((16697, 16718), 'styler_validation.validators.max_length', 'va.max_length', (['length'], {}), '(length)\n', (16710, 16718), True, 'from styler_validation import validators as va\n'), ((17044, 17062), 'styler_validation.validators.max_length', 'va.max_length', (['(255)'], {}), '(255)\n', (17057, 17062), True, 'from styler_validation import validators as va\n'), ((17257, 17275), 'styler_validation.validators.max_length', 'va.max_length', (['(255)'], {}), '(255)\n', (17270, 17275), True, 'from styler_validation import validators as va\n'), ((16774, 16835), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(256)'}), '(string.ascii_uppercase + string.digits, k=256)\n', (16788, 16835), False, 'import random\n')] |
from django.core.files.storage import FileSystemStorage
from django.db import models
# Create your models here.
from datetime import date
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
from django.contrib.auth.models import User #Blog author or commenter
def attachment_path(instance, filename):
return "media/animal/" + str(instance.animal.id) + "/attachments/" + filename
""" Metoda vrací cestu k uploadovanému plakátu. """
def poster_path(instance, filename):
return "animals/" + str(instance.name) +"/foto/"+ filename
class Type(models.Model):
name = models.CharField(max_length=50, unique=True, verbose_name="Type of animal", help_text='Enter a type of animal (e.g. Savec)')
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
class Animal(models.Model):
name = models.CharField(max_length=50, unique=True, verbose_name="Name of animal")
poster = models.ImageField(upload_to=poster_path, verbose_name="Poster")
latin = models.CharField(max_length=50, unique=True, verbose_name="Latin name for animal")
description = models.CharField(max_length=5000, verbose_name="Description of animal")
# Metadata
class Meta:
ordering = ["name"]
# Methods
def __str__(self):
"""Součástí textové reprezentace filmu bude jeho název, rok uvedení a hodnocení"""
return f"{self.name}, {str(self.poster)}, {str(self.latin)}"
def get_absolute_url(self):
"""Metoda vrací URL stránky, na které se vypisují podrobné informace o filmu"""
return reverse('animal-detail', args=[str(self.id)])
class Attachment(models.Model):
# Fields
# Povinný titulek přílohy - text do délky 200 znaků
title = models.CharField(max_length=200, verbose_name="Title")
# Časový údaj o poslední aktualizaci přílohy - automaticky se ukládá aktuální čas
last_update = models.DateTimeField(auto_now=True)
# Pole pro upload souboru
# Parametr upload_to zajistí uložení souboru do složky specifikované v návratové hodnotě metody attachment_path
file = models.FileField(upload_to=attachment_path, null=True, verbose_name="File")
# Konstanta, v níž jsou ve formě n-tic (tuples) předdefinovány různé typy příloh
TYPE_OF_ATTACHMENT = (
('audio', 'Audio'),
('image', 'Image'),
('text', 'Text'),
('video', 'Video'),
('other', 'Other'),
)
# Pole s definovanými předvolbami pro uložení typu přílohy
#type = models.CharField(max_length=5, choices=TYPE_OF_ATTACHMENT, blank=True, default='image',
# help_text='Select allowed attachment type', verbose_name="Attachment type")
# Cizí klíč, který zajišťuje propojení přílohy s daným filmem (vztah N:1)
# Parametr on_delete slouží k zajištění tzv. referenční integrity - v případě odstranění filmu
# budou odstraněny i všechny jeho přílohy (models.CASCADE)
film = models.ForeignKey(Animal, on_delete=models.CASCADE)
# Metadata
class Meta:
# Primární seřazeno podle poslední aktualizace souborů, sekundárně podle typu přílohy
ordering = ["-last_update"]
# Methods
def __str__(self):
""" Textová reprezentace objektu """
return f"{self.title})" | [
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.DateTimeField",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((614, 742), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)', 'verbose_name': '"""Type of animal"""', 'help_text': '"""Enter a type of animal (e.g. Savec)"""'}), "(max_length=50, unique=True, verbose_name='Type of animal',\n help_text='Enter a type of animal (e.g. Savec)')\n", (630, 742), False, 'from django.db import models\n'), ((879, 954), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)', 'verbose_name': '"""Name of animal"""'}), "(max_length=50, unique=True, verbose_name='Name of animal')\n", (895, 954), False, 'from django.db import models\n'), ((968, 1031), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'poster_path', 'verbose_name': '"""Poster"""'}), "(upload_to=poster_path, verbose_name='Poster')\n", (985, 1031), False, 'from django.db import models\n'), ((1044, 1131), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)', 'verbose_name': '"""Latin name for animal"""'}), "(max_length=50, unique=True, verbose_name=\n 'Latin name for animal')\n", (1060, 1131), False, 'from django.db import models\n'), ((1145, 1216), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5000)', 'verbose_name': '"""Description of animal"""'}), "(max_length=5000, verbose_name='Description of animal')\n", (1161, 1216), False, 'from django.db import models\n'), ((1772, 1826), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Title"""'}), "(max_length=200, verbose_name='Title')\n", (1788, 1826), False, 'from django.db import models\n'), ((1931, 1966), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1951, 1966), False, 'from django.db import models\n'), ((2124, 2199), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'attachment_path', 'null': '(True)', 'verbose_name': '"""File"""'}), "(upload_to=attachment_path, null=True, verbose_name='File')\n", (2140, 2199), False, 'from django.db import models\n'), ((2977, 3028), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Animal'], {'on_delete': 'models.CASCADE'}), '(Animal, on_delete=models.CASCADE)\n', (2994, 3028), False, 'from django.db import models\n')] |
"""
github : https://github.com/amingolnari/Deep-Learning-Course
Author : <NAME>
Keras Version : 2.2.4
Date : 4/12/2018
Keras CNN Classification on MNIST Data
Code 301
"""
## If your GPU is AMD , you can use PlaidML Backend
# import os
# os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D
from keras.optimizers import SGD
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
# Load MNIST Data (Download for First)
def LoadData():
(Xtrain, Ytrain), (Xtest, Ytest) = mnist.load_data()
Xtrain = Xtrain.reshape(60000, 28, 28, 1).astype('float32')
Xtrain = Xtrain / 255 # Normalize to 0-1
Xtest = Xtest.reshape(10000, 28, 28, 1).astype('float32')
Xtest = Xtest / 255
Ytrain = to_categorical(Ytrain, 10) # for exam Label 2 : [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
Ytest = to_categorical(Ytest, 10)
return (Xtrain, Xtest), (Ytrain, Ytest)
def BuildModel():
model = Sequential()
model.add(Conv2D(filters = 128, kernel_size = (5, 5),
activation = 'relu',
padding = 'same',
input_shape = (28, 28, 1))) # Just First (Input) Layer Need Init input_shape
model.add(Conv2D(filters = 64, kernel_size = (3, 3),
padding = 'same',
activation = 'relu'))
model.add(MaxPool2D(pool_size = (2, 2), padding = 'same'))
model.add(Conv2D(filters = 64, kernel_size = (3, 3),
activation = 'relu',
padding = 'same'))
model.add(MaxPool2D(pool_size = (2, 2)))
model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(300, activation = 'tanh')) # Hidden Layer #1
model.add(Dense(200, activation = 'sigmoid')) # Hidden Layer #1
model.add(Dense(10, activation = 'softmax'))# Output Layer
return model
def PlotHistory(history):
plt.title('Keras Model loss/accuracy')
plt.ylabel('loss/accuracy')
plt.xlabel('epochs')
# Accuracy
plt.plot(history.history['acc'], '.-')
plt.plot(history.history['val_acc'], '-.')
# Loss
plt.plot(history.history['loss'], '-*')
plt.plot(history.history['val_loss'], '*-')
plt.legend(['Train loss', 'Validation loss', 'Train acc', 'Validation acc'], loc='upper right')
plt.grid(True, linestyle = '-.')
plt.tick_params(labelcolor = 'b', labelsize = 'medium', width = 3)
fig = plt.gcf()
fig.savefig('images/model loss.jpg')
plt.show()
return
def main():
(Xtrain, Xtest), (Ytrain, Ytest) = LoadData()
model = BuildModel()
model.summary()
model.compile(loss = 'categorical_crossentropy',
optimizer = SGD(lr = 0.1),
metrics = ['accuracy'])
History = model.fit(Xtrain, Ytrain,
batch_size = 256,
epochs = 100,
validation_split = .3)
PlotHistory(History)
score, acc = model.evaluate(Xtest, Ytest)
print('Test Accuracy : ', acc)
if __name__ == "__main__":
main()
| [
"keras.layers.Conv2D",
"matplotlib.pyplot.grid",
"keras.layers.Flatten",
"keras.datasets.mnist.load_data",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
"keras.models.Sequential",
"keras.utils.np_utils.... | [((640, 657), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (655, 657), False, 'from keras.datasets import mnist\n'), ((866, 892), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['Ytrain', '(10)'], {}), '(Ytrain, 10)\n', (880, 892), False, 'from keras.utils.np_utils import to_categorical\n'), ((960, 985), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['Ytest', '(10)'], {}), '(Ytest, 10)\n', (974, 985), False, 'from keras.utils.np_utils import to_categorical\n'), ((1062, 1074), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1072, 1074), False, 'from keras.models import Sequential\n'), ((1998, 2036), 'matplotlib.pyplot.title', 'plt.title', (['"""Keras Model loss/accuracy"""'], {}), "('Keras Model loss/accuracy')\n", (2007, 2036), True, 'import matplotlib.pyplot as plt\n'), ((2039, 2066), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss/accuracy"""'], {}), "('loss/accuracy')\n", (2049, 2066), True, 'import matplotlib.pyplot as plt\n'), ((2069, 2089), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2079, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2106, 2144), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']", '""".-"""'], {}), "(history.history['acc'], '.-')\n", (2114, 2144), True, 'import matplotlib.pyplot as plt\n'), ((2147, 2189), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']", '"""-."""'], {}), "(history.history['val_acc'], '-.')\n", (2155, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2240), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']", '"""-*"""'], {}), "(history.history['loss'], '-*')\n", (2209, 2240), True, 'import matplotlib.pyplot as plt\n'), ((2243, 2286), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']", '"""*-"""'], {}), "(history.history['val_loss'], '*-')\n", (2251, 2286), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2388), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train loss', 'Validation loss', 'Train acc', 'Validation acc']"], {'loc': '"""upper right"""'}), "(['Train loss', 'Validation loss', 'Train acc', 'Validation acc'],\n loc='upper right')\n", (2299, 2388), True, 'import matplotlib.pyplot as plt\n'), ((2387, 2417), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'linestyle': '"""-."""'}), "(True, linestyle='-.')\n", (2395, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2482), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelcolor': '"""b"""', 'labelsize': '"""medium"""', 'width': '(3)'}), "(labelcolor='b', labelsize='medium', width=3)\n", (2437, 2482), True, 'import matplotlib.pyplot as plt\n'), ((2497, 2506), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2504, 2506), True, 'import matplotlib.pyplot as plt\n'), ((2548, 2558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2556, 2558), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1192), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(5, 5)', 'activation': '"""relu"""', 'padding': '"""same"""', 'input_shape': '(28, 28, 1)'}), "(filters=128, kernel_size=(5, 5), activation='relu', padding='same',\n input_shape=(28, 28, 1))\n", (1095, 1192), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1326, 1399), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), padding='same', activation='relu')\n", (1332, 1399), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1465, 1508), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (1474, 1508), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1529, 1602), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), "(filters=64, kernel_size=(3, 3), activation='relu', padding='same')\n", (1535, 1602), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1668, 1695), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1677, 1695), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1714, 1723), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1721, 1723), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1739, 1752), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1746, 1752), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1769, 1798), 'keras.layers.Dense', 'Dense', (['(300)'], {'activation': '"""tanh"""'}), "(300, activation='tanh')\n", (1774, 1798), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1834, 1866), 'keras.layers.Dense', 'Dense', (['(200)'], {'activation': '"""sigmoid"""'}), "(200, activation='sigmoid')\n", (1839, 1866), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((1902, 1933), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (1907, 1933), False, 'from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D\n'), ((2761, 2772), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.1)'}), '(lr=0.1)\n', (2764, 2772), False, 'from keras.optimizers import SGD\n')] |
# derwin.py - testing a window within a window
import curses
def main(stdscr):
# Create container window from stdscr
sh, sw = stdscr.getmaxyx()
container_win = curses.newwin(sh-1, sw-1, 1, 1)
# Create inner window from container win
bh, bw = container_win.getmaxyx()
box_win = container_win.derwin(bh-2, bw-2, 1, 1)
# Add size of inner win
box_win.addstr(1, 1, f"{bh}x{bw}")
# Draw borders
container_win.box()
box_win.box()
# Render and wait for char
container_win.refresh()
container_win.getch()
# main
if __name__ == "__main__":
curses.wrapper(main)
| [
"curses.wrapper",
"curses.newwin"
] | [((174, 209), 'curses.newwin', 'curses.newwin', (['(sh - 1)', '(sw - 1)', '(1)', '(1)'], {}), '(sh - 1, sw - 1, 1, 1)\n', (187, 209), False, 'import curses\n'), ((600, 620), 'curses.wrapper', 'curses.wrapper', (['main'], {}), '(main)\n', (614, 620), False, 'import curses\n')] |
import sys
import logging
from unicorn import *
from unicorn.arm_const import *
from androidemu.emulator import Emulator
from UnicornTraceDebugger import udbg
logging.basicConfig(stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s %(levelname)7s %(name)34s | %(message)s")
logger = logging.getLogger(__name__)
emulator = Emulator()
libc = emulator.load_library('jnilibs/libc.so', do_init=False)
libso = emulator.load_library('jnilibs/libnative-lib.so', do_init=False)
# data segment
data_base = 0xa00000
data_size = 0x10000 * 3
emulator.mu.mem_map(data_base, data_size)
emulator.mu.mem_write(data_base, b'123')
emulator.mu.reg_write(UC_ARM_REG_R0, data_base)
try:
dbg = udbg.UnicornDebugger(emulator.mu)
addr_start = 0xcbc66000 + 0x9B68 + 1
addr_end = 0xcbc66000 + 0x9C2C
emulator.mu.emu_start(addr_start, addr_end)
r2 = emulator.mu.reg_read(UC_ARM_REG_R2)
result = emulator.mu.mem_read(r2, 16)
print(result.hex())
except UcError as e:
list_tracks = dbg.get_tracks()
for addr in list_tracks[-100:-1]:
print(hex(addr - 0xcbc66000))
print (e)
| [
"logging.basicConfig",
"UnicornTraceDebugger.udbg.UnicornDebugger",
"androidemu.emulator.Emulator",
"logging.getLogger"
] | [((160, 286), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)7s %(name)34s | %(message)s"""'}), "(stream=sys.stdout, level=logging.DEBUG, format=\n '%(asctime)s %(levelname)7s %(name)34s | %(message)s')\n", (179, 286), False, 'import logging\n'), ((299, 326), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (316, 326), False, 'import logging\n'), ((339, 349), 'androidemu.emulator.Emulator', 'Emulator', ([], {}), '()\n', (347, 349), False, 'from androidemu.emulator import Emulator\n'), ((695, 728), 'UnicornTraceDebugger.udbg.UnicornDebugger', 'udbg.UnicornDebugger', (['emulator.mu'], {}), '(emulator.mu)\n', (715, 728), False, 'from UnicornTraceDebugger import udbg\n')] |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from trajectory_msgs.msg import JointTrajectory
from control_msgs.msg import JointTrajectoryControllerState
def callback_odom(data):
print("odom\n" + str(data))
def callback_JointTrajectory(data):
print("gripper_controller/command\n" + str(data))
def callback_gripper_JointTrajectory(data):
print("gripper_controller\n" + str(data))
def listener():
rospy.init_node("listener12", anonymous=True)
# rospy.Subscriber("/odom", Odometry, callback_odom)
# rospy.Subscriber("/arm_1/gripper_controller/command", JointTrajectory, callback_JointTrajectory)
rospy.Subscriber(
"/arm_1/gripper_controller/state",
JointTrajectoryControllerState,
callback_gripper_JointTrajectory,
)
rospy.spin()
if __name__ == "__main__":
listener()
| [
"rospy.init_node",
"rospy.Subscriber",
"rospy.spin"
] | [((474, 519), 'rospy.init_node', 'rospy.init_node', (['"""listener12"""'], {'anonymous': '(True)'}), "('listener12', anonymous=True)\n", (489, 519), False, 'import rospy\n'), ((685, 806), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/arm_1/gripper_controller/state"""', 'JointTrajectoryControllerState', 'callback_gripper_JointTrajectory'], {}), "('/arm_1/gripper_controller/state',\n JointTrajectoryControllerState, callback_gripper_JointTrajectory)\n", (701, 806), False, 'import rospy\n'), ((838, 850), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (848, 850), False, 'import rospy\n')] |
#Botpic:https://upload.wikimedia.org/wikipedia/commons/thumb/b/b8/Red_Rose_Photography.jpg/800px-Red_Rose_Photography.jpg
#Botpic:https://commons.wikimedia.org/wiki/File:Red_Rose_Photography.jpg
#reference:https://www.youtube.com/watch?v=SPTfmiYiuok
import discord
import os
import requests
import json
import math, random
from replit import db
from keep_alive import keep_alive
import asyncpraw, asyncprawcore
#import commands
import time, asyncio, datetime
from discord.ext import tasks
from discord import Member
from discord.ext.commands import has_permissions, MissingPermissions
from prawcore import NotFound
import ffmpeg
from discord import FFmpegPCMAudio
from dotenv import load_dotenv
from youtube_search import YoutubeSearch
load_dotenv()
client = discord.Client()
# To cache the every user For on_remove_reaction to be usable
# Also enable members intent from https://discord.com/developers/ in bot secition
intents = discord.Intents.default()
intents.members = True
global playing, stream
global currently_playing_message
def say_hello():
print(time.ctime())
#await message.channel.send("hello :-)" + str(joke))
#t1 = threading.Timer(10, say_hello)
#t1.start()
#---------- To keep the bot alive --------------------------
#1. keeping the bot alive
'''
#------------------- adding a background task -----------------
status = cycle(['with Python','JetHub'])
@bot.event
async def on_ready():
change_status.start()
print("Your bot is ready")
@tasks.loop(seconds=10)
async def change_status():
await bot.change_presence(activity=discord.Game(next(status)))
#--------------------------------------------------------------
3. Setup the Uptime Robot :
create an account on uptime robot.
After creating an account, go to the dashboard and click on Add new monitor (preview)
select monitor type Http(s) (preview)
then go to to ur project on repl.it and copy the url from the top of the console and paste it in url section of the monitor (preview)
now set the monitoring interval to every 5 mins (so that it will ping the bot every 5 mins) and click on create monitor twice (preview)
That’s it…Now go to ur project on repl.it and hit the Run button
'''
class MySchedule:
async def schedule_message(sth,
author='anonymous',
message='please provide a message',
id=863298114949218324,
seconds=0):
print('received:')
print(author, message, id, seconds)
#await ctx.message.delete()
if author == 'anonymous':
#author = 'anonymous'
description = 'command: .anon your_message'
else:
author = author + ' <scheduled_message>'
description = "command: .schedule time_in_seconds your_message"
time.sleep(seconds)
print('sleep 10 seconds')
print('author : ', author)
#channel = bot.get_channel(id=ctx.channel.id)
#print('sending {}'.format(message))
#retStr = str("""```css\nThis is some colored Text```""")
#embed = discord.Embed(title="Random test")
#embed.add_field(name="Name field can't be colored as it seems",value=retStr)
#await ctx.send(embed=embed)
#message = str(ctx.message.author).split('#')[0] + ' : ' + message
embed = discord.Embed(title=author, colour=discord.Color.blue())
embed.add_field(
name=message,
value=description,
)
channel = bot.get_channel(id=id)
await channel.send(embed=embed)
reddit = asyncpraw.Reddit(
client_id="nnhGBCiBxSJysTobl6SLPQ",
client_secret=os.environ['rd_client_secret'],
password=os.environ['rd_pass'],
user_agent="praw_test",
username="Alternative-Ad-8849",
)
async def sub_exists(subreddit_name):
exists = True
if subreddit_name.startswith(('/r/', 'r/')):
subreddit_name = subreddit_name.split('r/')[-1] # -1 gets the last element in the list
try:
subreddit = await reddit.subreddit(subreddit_name, fetch=True) # by default Async PRAW doesn't make network requests when subreddit is called
# do something with subreddit
except asyncprawcore.Redirect:
exists=False
return(exists)
# Reddit will redirect to reddit.com/search if the subreddit doesn't exist
#await ctx.send(f"Subreddit {subreddit_name} doesn't exist.")
def get_nude():
memes_submissions = reddit.subreddit('BustyPetite').hot()
print('got memes')
post_to_pick = random.randint(1, 15)
print('choosen random')
for i in range(0, post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
return (submission.url)
def get_crazy(sub_reddit_name='memes'):
memes_submissions = reddit.subreddit(sub_reddit_name).hot()
#print('got memes')
#post_to_pick = random.randint(1, 15)
#print('choosen random')
start = random.randint(100, 1000)
end = random.randint(start, start + 100)
print('start:{} end:{}'.format(start, end))
for i in range(start, end):
#print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
yield (submission.url)
def get_memes_crazy():
memes_submissions = reddit.subreddit('memes').hot()
print('got memes')
#post_to_pick = random.randint(1, 50)
print('choosen random')
for i in range(0, 50): #post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
yield (submission.url)
#return submission
async def get_one(sub_reddit='memes'):
#Working
#submission = list(reddit.subreddit(sub_reddit_name).random()#.hot(limit=None))
#submissions = list(reddit.subreddit('redditdev').hot(limit=None))
'''urls=[]
submissions = await list(reddit.subreddit('redditdev').hot(limit=None))
print(await submissions)'''
#submissions = await reddit.subreddit("memes").hot(limit=random.randint(1,150))
#for submission in submissions:
# pass
subreddit = await reddit.subreddit(sub_reddit)
async for submission in subreddit.random_rising(
limit=random.randint(1, 150)):
pass
#print(submission.title)
#urls.append([submission.title,submission.url])
#yield(submission.title, submission.url)
#print(submission.title)'''
#submissionn = random.choice(submissions)
#submission = reddit.subreddit("AskReddit").random()
#submissions = reddit.subreddit('redditdev').hot(limit=None))
#submission = random.choice(submissions)
#print('got memes')
#post_to_pick = random.randint(1, 50)
#print('choosen random')
'''for i in range(0, 50):#post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)'''
#submission = await random.choice(memes_submissions)
#return(submission.url)
#print(submissionn.url)
#print(submission.title)
#return('hi')
embed = discord.Embed(title=submission.title,
url=submission.url,
description=submission.selftext,
colour=discord.Color.red())
embed.set_image(url=submission.url)
#await channel.send(embed=embed)
return (embed)
from discord.ext import commands
bot = commands.Bot(command_prefix='.', help_command=None, intents=intents)
'''
class MyHelpCommand(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
e = discord.Embed(colour=discord.Color.blurple(), description='')
for page in self.paginator.pages:
e.description += page
await destination.send(embed=e)
bot.help_command = MyHelpCommand()'''
# My sample help command:
@bot.command(name='help',
brief='`.help` for help',
help='Plesae enter `.help` for help')
async def help(ctx, args=None):
""" Check which mods are online on current guild """
help_embed = discord.Embed(
title="Encouragement Bot Help!",
#url="https:ioee.herokuapp.com/",
description=
"Type `.help <command name>` for more details about each command. e.g. `.help joke`",
)
command_names_list = [x.name for x in bot.commands]
# If there are no arguments, just list the commands:
if not args:
help_embed.add_field(
name="List of supported commands:",
value='value',
#value="\n".join([str(i+1)+". "+x.name for i,x in enumerate(bot.commands)]),
inline=False,
#colour=discord.Color.blue()
)
#bot.get_command(x.name).help
for i in bot.commands:
help_embed.add_field(
name='***{}***'.format(i.name),
#value='value'
value='> {}\n\n\n'.format(bot.get_command(i.name).brief),
inline=False,
#colour=discord.Color.blue()
)
#print(i.name)
#print(i)
#print(bot.get_command(i.name).help)
'''for i,command in enumerate(bot.commands):
help_embed.add_field(
name = command,
value = bot.get_command(command),
inline=True
)'''
help_embed.add_field(
name="Details",
value=
"Type `.help <command name>` for more details about each command.",
inline=False)
# If the argument is a command, get the help text from that command:
elif args in command_names_list:
help_embed.add_field(name=args,
value=str(bot.get_command(args).brief) + ' \n' +
str(bot.get_command(args).help))
# If someone is just trolling:
else:
help_embed.add_field(name="Nope.",
value="Don't think I got that command, boss!")
await ctx.send(embed=help_embed)
# My sample help command:
@bot.command(name='share_info',
brief='`.share_info` for share_info',
help='Plesae enter `.share_info` for mero_share_info')
async def info(ctx, args=None):
response = requests.get('http://ioee.herokuapp.com/meroshare/')
response = response.text.strip()
print(response)
try:
previous_messages = await ctx.channel.history(limit=1).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
pass
if (str(prev_message).strip() != response):
print('not same messages:prev_message and rseponse')
await ctx.send(response)
else:
print('same message as previous message, so not sending')
pass
@bot.command(name='ping',
brief=" short_help:to test if bot responding ",
help='long_help: e.g. .ping')
async def ping(ctx, subreddit='jokes', no_of_posts=1, user='.'):
#channel = bot.get_channel(id=int(channel_id))
'''for n, submission in enumerate(reddit.subreddit('memes').top('day',limit=int(no_of_posts/3))):
print('Unleash for loop:{}'.format(n))
title = submission.title
body = submission.selftext
embed = discord.Embed(
title=submission.title,
url=submission.url,
description=body,
colour=discord.Color.green())
embed.set_image(url=submission.url)
await ctx.send(embed=embed)'''
await ctx.send('pong ')
print('Ping-Pong is invoked: ', user, ctx)
@bot.command(name='embed', help='e.g.`.embed`', brief='embedding help')
async def embed(ctx):
embed = discord.Embed(title="Text Formatting",
url="https://realdrewdata.medium.com/",
description="Here are some ways to format text",
colour=discord.Color.blue())
embed.set_author(
name="RealDrewData",
url="https://twitter.com/RealDrewData",
icon_url=
"https://cdn-images-1.medium.com/fit/c/32/32/1*QVYjh50XJuOLQBeH_RZoGw.jpeg"
)
#embed.set_author(name=ctx.author.display_name, url="https://twitter.com/RealDrewData", icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/axLm3p6.jpeg")
embed.add_field(name="*Italics*",
value="Surround your text in asterisks (\*)",
inline=False)
embed.add_field(name="**Bold**",
value="Surround your text in double asterisks (\*\*)",
inline=False)
embed.add_field(name="__Underline__",
value="Surround your text in double underscores (\_\_)",
inline=False)
embed.add_field(name="~~Strikethrough~~",
value="Surround your text in double tildes (\~\~)",
inline=False)
embed.add_field(name="`Code Chunks`",
value="Surround your text in backticks (\`)",
inline=False)
embed.add_field(name="Blockquotes",
value="> Start your text with a greater than symbol (\>)",
inline=False)
embed.add_field(name="Secrets",
value="||Surround your text with double pipes (\|\|)||",
inline=False)
embed.set_footer(text="Learn more here: realdrewdata.medium.com")
await ctx.send(embed=embed)
@bot.command(name='schedule',
brief='to schedule message to be sent in any group.',
help='e.g. `.schedule 10 scheduled for ten seconds.')
async def schedule(ctx, seconds: int = 3, *, message='Hello There'):
#print(ctx.channel.id)
print('Seconds: ', seconds)
msg = str(message)
#print(msg)
await ctx.message.delete()
id = ctx.channel.id
author = str(ctx.message.author).split('#')[0]
#print(author)
#print(type(id))
sch = {
'1': '',
'2': '',
'3': '',
'4': '',
'5': '',
}
r = random.randint(1, 5)
sch[str(r)] = MySchedule()
await sch[str(r)].schedule_message(author=author,
message=msg,
id=id,
seconds=int(seconds))
#await schedule_message(author=author, message=msg, id=id, seconds=3)
#print(id)
#print(channel)
#await channel.send('hi')
'''@bot.command()
async def schedule(ctx, message='Hello There', seconds = 3):
#print(ctx.channel.id)
m=str(message)
id = ctx.message.id
print('\n\n\n{}\n\n'.format(m))
author = str(ctx.message.author).split('#')[0]
await ctx.message.delete()
#id=ctx.channel.id
channel = bot.get_channel(id=id)
print(id)
print(channel)
#await channel.send('hi')
#await schedule_message(author, m, id, seconds = seconds)
#print(ctx.message)
#await ctx.message.delete(ctx.message)
#await channel.send('hi')
#await ctx.send('pong')
#print('Im invoked')'''
@bot.command(name='anon',
brief='to send message anonymously',
help='e.g. `.anon Guess who!`')
async def anon(ctx, *, message='please provide a message'):
msg = str(message)
#print(msg)
await ctx.message.delete()
id = ctx.channel.id
a = {'anon': ''}
a['anon'] = MySchedule()
await a['anon'].schedule_message('anonymous', msg, id)
print('send')
print(msg, id)
#await schedule_message(author='', message=msg, id=id)
@bot.command(name="echo",
pass_context=True,
brief='ehhoes/repeat the message deleting the user\'s message',
help='e.g. `.echo I am echoed`')
async def echo(ctx, *, message='please provide a message'):
msg = message
#print(ctx.message)
try:
await ctx.message.delete()
except:
pass
#id=ctx.channel.id
await ctx.send(msg)
@echo.error
async def echo_error(ctx, error):
if isinstance(error, MissingPermissions):
text = "Sorry {}, you do not have permissions to do that!".format(
ctx.message.author)
await bot.send_message(ctx.message.channel, text)
@bot.command(name='unleash',
brief='unleahes the subreddit to c channel',
help='e.g.To unleash r/jokes `.unleash jokes`')
async def unleash(ctx, subreddit='none'):
if subreddit == 'none':
await ctx.send('Please enter the subreddit to be unleashed')
else:
print(ctx.channel.id)
#if "unleash" not in db.keys():db['unleash']={}
if await sub_exists(subreddit):
if str(ctx.channel.id) not in db['unleash']:
#i.e. channel doesn't exists in database
db['unleash'][str(ctx.channel.id)] = []
#db['unleash'][str(ctx.channel.id)].append(str(subreddit))
else:
#i.e. channel doesn't exists in database
if str(subreddit) not in db['unleash'][str(ctx.channel.id)]:
db['unleash'][str(ctx.channel.id)].append(str(subreddit))
await ctx.send('unleashing r/{} to {}'.format(subreddit, ctx.channel))
else:
await ctx.send('r/{} already unleashed to {}'.format(subreddit, ctx.channel))
else:
await ctx.send('Sorry! subreddit r/{} doesnot exists.'.format(
subreddit, ctx.channel))
@bot.command(name='contain',
brief='to contain/stop unleashed subreddit message',
help='e.g. `.contain jokes`')
async def contain(ctx, subreddit='none'):
if subreddit == 'none':
await ctx.send('Please enter the subreddit to be unleashed')
else:
print(ctx.channel.id)
if str(ctx.channel.id) in db['unleash'] and str(
subreddit) in db['unleash'][str(ctx.channel.id)]:
db['unleash'][str(ctx.channel.id)].remove(str(subreddit))
await ctx.send(
'successfully contained subreddit r/{} from {}'.format(
subreddit, ctx.channel))
else:
await ctx.send('Subreddit r/{} not unleashed in .'.format(
subreddit, ctx.channel))
#print(ctx.channel.id)
#await ctx.send(ctx.channel.id)
@bot.command(
name='go',
brief='to see memes from r/memes or nude from r/\'BustyPetite\'',
help='e.g. `.go meme`, `.go meme crazy`, `.go nude`, `.go nude crazy`')
async def go(ctx, what='', what2=''):
print('wHat:{} what2:{}'.format(what, what2))
if what == 'nude':
if what2 == 'crazy':
print('1')
urls = get_crazy('BustyPetite')
for url in urls:
print('sending nude')
await ctx.send(url)
else:
print('11')
urls = get_one('BustyPetite')
await ctx.send(urls)
#print(urls)
else:
if what == 'meme' and what2 == 'crazy':
urls = get_crazy()
#print(submission.url)
for url in urls:
await ctx.send(url)
else:
urls = get_one()
await ctx.send(urls)
#name='', brief='', help='e.g. `.`'
'''@bot.command(name='', brief='', help='e.g. `.`')
async def h(ctx, what='general'):
#await ctx.send('pong')
if str(what).lower()=='general':
for command in commands:
await ctx.send(command)
elif str(what).lower() == 'fuse':
for command in fuse_help_commands:
await ctx.send(command)'''
@bot.command(
name='add_user',
brief='to activate fuse user\'s auto attendance',
help='get code from https://ioee.herokuapp.com e.g. `.add_user *code*`')
async def add_user(ctx, pseudo_id):
add_fuse_user(pseudo_id)
await ctx.send("User {} has been activated successfully.".format(pseudo_id)
)
@bot.command(name='check',
brief='checks if live class has been started',
help='e.g. `.check`')
async def check(ctx, pseudo_id):
add_fuse_user(pseudo_id)
await ctx.send("Sorry mannual checking is unavailable for a while")
#await test.start(True)
@bot.command(
name='remove_user',
brief='to deactivate fuse auto attadance of specific',
help='get code from https://ioee.herokuapp.com e.g. `.remove_user *code*`')
async def remove_user(ctx, pseudo_id):
remove_fuse_user(pseudo_id)
#users = []
if "users" in db.keys():
#pseudocode = pseudo_id.split(".remove_user",1)[1].strip()
remove_fuse_user(pseudo_id)
await ctx.send("User {} has been removed successfully.".format(pseudo_id))
#------------------------------------------
@bot.command(name='joke',
brief='to get jokes',
help='e.g. `.joke`, `.joke 10`')
async def joke(ctx, n=1):
if n == 1:
joke = get_joke()
await ctx.send("\n\n\nJoke:" + str(joke))
else:
jokes = list(get_jokes(n))
for joke in jokes:
ctx.send("\n\n\nJoke:" + str(joke))
@bot.command(name='jokes',
brief='to get jokes',
help='e.g. `.jokes`, `.jokes 10`')
async def jokes(ctx, n=5, subreddit='jokes'):
await unleash_reddit(subreddit, str(ctx.channel.id), n)
@bot.command(name='riddle', brief='to get a riddle', help='e.g. `.riddle`')
async def riddle(ctx):
riddle = get_riddles()
await ctx.send(riddle)
@bot.command(name='quote', brief='to get an inspiring quote', help='e.g. `.`')
async def quote(ctx):
quote = get_quote()
await ctx.send(quote)
@bot.command(name='inspire',
brief='To get inspirational message',
help='e.g. `.inspire`')
async def inspire(ctx):
inspiration = get_quote()
await ctx.send(inspiration)
@bot.command(name='puns', brief='To get puns', help='e.g. `.puns`')
async def puns(ctx):
puns = get_puns()
await ctx.send(puns)
@bot.command(name='one_liners',
brief='to get one liner jokes',
help='e.g. `.one_liners`')
async def one_liners(ctx):
#add_fuse_user()
await ctx.send("Sorry mannual checking is unavailable for a while")
@bot.command(name='meme',
brief='to display meme from r/memes',
help='e.g. `.meme`')
async def meme(ctx, what='memes'):
embed = await get_one(what)
await ctx.send(embed=embed)
#for i,j in get_one():
# await ctx.send(i)
# await ctx.send(j)
@bot.command(name='memes',
brief='to display memes from r/memes',
help='e.g. `.memes 3`')
async def memes(ctx, n=5, subreddit='memes'):
await unleash_reddit(subreddit, str(ctx.channel.id), n)
#embed = await get_one(what)
#await ctx.send(embed=embed)
@bot.command(name='reddit',
brief='to display subreddit from r/subreddit',
help='e.g. `.reddit motivation 3`')
async def memes(ctx, subreddit='motivation', n=3):
await unleash_reddit(subreddit, str(ctx.channel.id), n)
@bot.command(name='deactivate',
brief='to deactivate the bot',
help='e.g. `.deactivate`')
async def deactivate(ctx):
db["responding"] = False
await ctx.send(
"Encouragement bot is deactivated.\nPlease enter: .activate to activate."
)
@bot.command(name='activate', brief='to activate bot', help='e.g. `.activate`')
async def activate(ctx):
db["responding"] = True
#await ctx.send("YaY.. I'm turned on baby...")"
await ctx.send("Encouragement bot is enabled.. sorry for being rude.")
@bot.command(
name='mute',
brief=
'to mute sucesful/unsuccessful attendance attempts of fuse auto attend.',
help='e.g. `.mute successful`, `.mute unsuccessful`')
async def mute(ctx, what):
if what == 'unsuccessful':
db["unsuccessful_logs"] = False
await ctx.send("unsuccessful attending_logs are muted.")
elif what == 'successful':
db["successful_logs"] = False
await ctx.send(
"successful attending_log are muted. to unmute please enter: .unmute successful"
)
@bot.command(
name='unmute',
brief=
'to unmute sucesful/unsuccessful attendance attempts of fuse auto attend.',
help='e.g. `.unmute successful`, `.unmute unsuccessful`')
async def unmute(ctx, what):
if what == 'unsuccessful':
db["unsuccessful_logs"] = True
await ctx.send("successful attending_logs are unmuted.")
elif what == 'successful':
db["successful_logs"] = True
await ctx.send(
"usuccessful attending_log are unmuted. to mute please enter: .mute unsuccessful"
)
#print('\n\nwhat==\'\'', end=' ')
#print(what=='')
# db["responding"] = False
# await message.channel.send("Encouragement bot is deactivated.\nPlease Enter: .activate to activate")
@bot.command(name='list',
brief='to list the current encouraging messages',
help='e.g. `.list`')
async def list(ctx, what='encouragements'):
if what == 'users':
users = []
if "users" in db.keys():
users = list(db["users"])
await ctx.send('Users: ' + str(users))
else:
encouragements = []
if "encouragements" in db.keys():
encouragements = list(db["encouragements"])
await ctx.send(encouragements)
@bot.command(name='delete',
brief='To delete encouragement message',
help='e.g. `.delete Every passsing second is makingyou better`')
async def delete(ctx, index):
encouragements = []
if "encouragements" in db.keys():
index = index.split(".del", 1)[1].strip()
delete_encouragment(index)
encouragements = list(db["encouragements"])
await ctx.send(encouragements)
@bot.command(name='new',
brief='To add new encouraging message to database',
help='e.g. `.new Every passsing second is makingyou better`')
async def new(ctx, msg):
encouraging_message = msg.strip()
update_encouragements(encouraging_message)
await ctx.send("New encouraging message added.")
@bot.command(name='avatar',
brief='To see avatar of specific member in the group',
help='e.g. `.avatar @Encouragement Bot`')
async def avatar(ctx, *, avamember: discord.Member = None):
userAvatarUrl = avamember.avatar_url
await ctx.send(userAvatarUrl)
# _______________________________________________________________________
# ---------------------------- For Music Bot : https://medium.com/pythonland/build-a-discord-bot-in-python-that-plays-music-and-send-gifs-856385e605a1
# _______________________________________________________________________
import os, youtube_dl
import ffmpeg
@bot.command(
name='join',
help='Tells the bot to join the voice channel before playing music ')
async def join(ctx):
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(
ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
await channel.connect()
@bot.command(name='leave', help='To make the bot leave the voice channel')
async def leave(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_connected():
await voice_client.disconnect()
else:
await ctx.send("The bot is not connected to a voice channel.")
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = ""
@classmethod
async def from_url(cls, url, *, loop=None, stream=False, download=False):
SAVE_PATH = os.path.join(os.getcwd(), 'downloads')
ydl_opts = {
'format': 'bestaudio/best',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address':
'0.0.0.0', # bind to ipv4 since ipv6 addresses cause issues sometimes
'preferredcodec': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'webm',
'preferredquality': '192',
}],
'outtmpl':SAVE_PATH + '/%(title)s.%(ext)s',
}
#results = YoutubeSearch(url, max_results=3).to_dict()
#vid_url = 'https://www.youtube.com' + results[0]['url_suffix']
#thumbnails = results[0]['thumbnails']
#title = results[0]['title']
#print('vid_url:{}, thumbnails:{}, title:{}, download:{},url:{}'.format(vid_url, thumbnails, title, download, url))
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
data = ydl.extract_info(f"ytsearch:{url}", download=download)['entries'][0]
URL = data['url']
thumbnails = data['thumbnails']
title = data['title']
vid_url = data['webpage_url']
print(URL)
#Renaming files if downloaded
if download==True:
files = os.listdir(os.path.join(os.getcwd(), 'downloads'))
for file_name in files:
if not file_name.endswith('.part'):
# To download files as .mp3
#mp3_format = os.path.join(os.getcwd(), 'downloads', file_name.replace(file_name.split('.')[-1], 'mp3'))
file_name = os.path.join(os.getcwd(), 'downloads', file_name)
os.rename(file_name, title + '.mp3')
return(URL,thumbnails, title, vid_url)
@bot.command(name='p',
brief='To play song note: Please enter: `.join` first',
help="example: `.play gangnam style`")
async def play(ctx, *, url):
global playing
playing = url
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(
ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
try:
global player
player = await channel.connect()
except:
pass
#joined the channel
try:
server = ctx.message.guild
voice_channel = server.voice_client
#print('voice_channel : ' + str(voice_channel))
async with ctx.typing():
URL, thumbnails, title, vid_url = await YTDLSource.from_url(url, loop=bot.loop)
#to stop playing if already playing another
player.stop()
player.play(discord.FFmpegPCMAudio(URL))
print('vid_url:{}, thumbnails:{}, title:{}, URL:{},url:{}'.format(vid_url, thumbnails, title, URL, url))
embed=discord.Embed(title=title,
#description=stream['longDesc'],
color=0x00FFFF,
url=vid_url)
embed.set_author(name=ctx.message.author)
embed.set_thumbnail(url=thumbnails[0]['url'])
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f'Added by {ctx.author}')
message = await ctx.send(embed=embed)
emos=['⏸️','⏹️', '⬇️']#['⏮️', '⏸️', '⏹️', '⏭️', '⬇️']
for emoji in emos:
await message.add_reaction(emoji)
except Exception as e:
print(e)
await ctx.send("The bot is not connected to a voice channel.")
#Downloads videb name/url and returns full filename
async def download_from_youtube(url):
SAVE_PATH = os.path.join(os.getcwd(), 'downloads')
ydl_opts = {
'format': 'bestaudio/best',
'preferredcodec': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'webm',
'preferredquality': '192',
}],'outtmpl':SAVE_PATH + '/%(title)s.%(ext)s',
}
print(' downloading!!! ')
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
ydl.download([url])
except:
video = ydl.extract_info(f"ytsearch:{url}", download=True)['entries'][0]
else:
video = ydl.extract_info(url, download=False)
#return video
#print('type_of'+str(type(video)))
# Didnot work for filename we extracted did not match with actual file_name
'''file_name=str(video['title'] + '-' +video['id'] + '.' +video['formats'][3]['ext'])
file_name = file_name.replace('/','_')
'''
files = os.listdir(os.path.join(os.getcwd(), 'downloads'))
for file_name in files:
if not file_name.endswith('.part'):
# To download files as .mp3
mp3_format = os.path.join(os.getcwd(), 'downloads', file_name.replace(file_name.split('.')[-1], 'mp3'))
file_name = os.path.join(os.getcwd(), 'downloads', file_name)
os.rename(file_name, mp3_format)
print('file_name: {}'.format(file_name))
print('mp3_format: {}'.format(mp3_format))
return(mp3_format)
@bot.command(name='d',
brief='To download song note: Please enter: `.d song name` ',
help="example: `.d gangnam style`")
async def d(ctx, *, url:str):
if not 'downloads' in os.listdir():
os.mkdir('downloads')
print('Try download')
async with ctx.typing():
URL, thumbnails, title, vid_url = await YTDLSource.from_url(url, loop=bot.loop, download=True)
full_downloaded_file_name = title + '.mp3'
await ctx.send(file=discord.File(full_downloaded_file_name))
os.remove(full_downloaded_file_name)
print(' downloaded!!! ')
@bot.command(name='pause', help='This command pauses the song')
async def pause(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.pause()
else:
await ctx.send("The bot is not playing anything at the moment.")
@bot.command(name='resume', help='Resumes the song')
async def resume(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_paused():
await voice_client.resume()
else:
await ctx.send(
"The bot was not playing anything before this. Use play_song command"
)
@bot.command(name='stop', help='Stops the song')
async def stop(ctx):
await ctx.message.add_reaction('🛑')
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.stop()
voice_client
#os.remove(
else:
await ctx.send("The bot is not playing anything at the moment.")
#To make leave voice channel if bot is alone in voice channel
@bot.event
async def on_voice_state_update(member, before, after):
print('\n\n Fired on_voice_state_update function \n\n')
voice_state = member.guild.voice_client
if voice_state is None:
# Exiting if the bot it's not connected to a voice channel
return
if len(voice_state.channel.members) == 1:
await voice_state.disconnect()
@bot.command(aliases=['donation', 'support'])
async def donate(ctx, url: str = 'http://stream.radioparadise.com/rock-128'):
embed=discord.Embed(title='Support:',
description='''Thank you :-) \nesewa/khalti id:\n 9840445934 \n\n Paytreon:\nhttps://www.patreon.com/join/7095305? \n\n Coinbase:\n https://commerce.coinbase.com/checkout/63a4b635-8510-459f-b091-a4f0697993e6
\n\n
And please vote for me here: https://top.gg/bot/862191340355715093/vote
''',
color=0x00FFFF,
#url=stream['url']
)
embed.set_author(
name=ctx.message.author,
)
#embed.set_thumbnail(url=stream['image'])
#embed.pfp = author.avatar_url
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f'Added by {ctx.author}')
message = await ctx.send(embed=embed)
#_______________________________________________________________________
# ----------------------------- ---------------------------------------
# _______________________________________________________________________
# ----------------------------- FM Player -----------------------------
from discord import FFmpegPCMAudio
from discord.ext.commands import Bot
from dotenv import load_dotenv
load_dotenv()
#To be implemented
global streams
streams = None
def start_load_streams():
global streams
try:
streams[0]
except:
with open('test_fm_list.json','r') as F:
streams = json.load(F)
#To get current, next, previous streams
def get_stream(which=None, current=None):
global streams
try:
streams[0]
print('Streams already defined')
except:
with open('test_fm_list.json','r') as F:
streams = json.load(F)
streams = streams['stream_links']
print(streams)
#global streams_url
#streams=streams['stream_links']
#streams_url = [i['url'] for i in streams]
finally:
if current==None:
current={
"name": "Radio Nepal",
"city" : "kathmandu",
"url": "https://radionepal.news/live/audio/mp3",
"image": "https://radionepal.gov.np/wp-content/themes/rdnp/images/logo-en.png",
"desc": "am/sw/fm radio",
"longDesc": "Radio Nepal, oldest radio of nepal."
}
if which=='next':
nxt = streams.index(current) + 1
# Triggred to get next station at the end of stations list
if nxt >= len(streams):
nxt -= len(streams)
current = streams[nxt]
print(nxt)
elif which=='prev':
prev = streams.index(current) - 1
print(prev)
# Triggred to get previous station at the beginning of stations list
if prev < 0:
prev += len(streams)
print('current:{}, prev:{}'.format(streams.index(current),prev))
current = streams[prev]
return(current)
@bot.command(aliases=['fm', 'radio'])
async def playfm(ctx, url: str = 'http://stream.radioparadise.com/rock-128'):
global playing
playing = "fm"
global currently_playing_message
global stream
stream = get_stream()
#url = "https://radio-streaming-serv-1.hamropatro.com/radio/8050/radio.mp3"
#url = 'https://radionepal.news/live/audio/mp3'
#global channel
channel = ctx.message.author.voice.channel
global player
try:
player = await channel.connect()
except:
pass
player.play(FFmpegPCMAudio(stream['url']))
#global message
embed=discord.Embed(title=stream['name'],
description=stream['longDesc'],
color=0x00FFFF,
url=stream['url'])
embed.set_author(
name=ctx.message.author,
)
#icon_url=ctx.message.author.avatar_url)
embed.set_thumbnail(url=stream['image'])
#embed.pfp = author.avatar_url
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f'Added by {ctx.author}')
currently_playing_message = await ctx.send(embed=embed)
#emojis = [':track_previous:', ':pause_button:', ':stop_button:', ':track_next:', ':record_button:', ':arrow_down:']
emos=['⏮️', '⏸️', '⏹️', '⏭️']#, '⏺️', '⬇️']
for emoji in emos:
await currently_playing_message.add_reaction(emoji)
def get_embed(reaction, user, stream):
embed=discord.Embed(title=stream['name'],
#description=stream['longDesc'],
color=0x00FFFF,
url=stream['url'])
embed.set_author(
name=user,
)
#icon_url=ctx.message.author.avatar_url)
embed.set_thumbnail(url=stream['image'])
#embed.pfp = author.avatar_url
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f'Added by {user}')
return embed
@bot.event
async def on_reaction_add(reaction, user,a=''):
#embed = reaction.embeds[0]
#emoji = reaction.emoji
#print('hii')
#await reaction.message.add_reaction('♥️')
global stream
if not user.bot:
# stop emoji
if str(reaction.emoji) == "⏹️":
player.stop()
# pause emoji
elif str(reaction.emoji) == "⏸️":
if player.is_playing():
player.pause()
print('paused')
else:
player.resume()
print('resume')
# next emoji
elif str(reaction.emoji) == "⏭️":
if playing=='fm':
print('Playing next, current:{}'.format(stream))
stream = get_stream('next',stream)
player.stop()
player.play(FFmpegPCMAudio(stream['url']))
embed=get_embed(reaction, user, stream)
await currently_playing_message.edit(embed=embed)
#message.send('Hello World')
#play_next()
# previous emoji
elif str(reaction.emoji) == "⏮️":
if playing=='fm':
print('Playing next, current:{}'.format(stream))
stream = get_stream('prev', stream)
player.stop()
player.play(FFmpegPCMAudio(stream['url']))
embed=get_embed(reaction, user, stream)
await currently_playing_message.edit(embed=embed)
print('Playing next')
# download emoji
elif str(reaction.emoji) == "⬇️":
if playing!='fm':
if not 'downloads' in os.listdir():
os.mkdir('downloads')
print('Try download')
async with reaction.message.channel.typing():
URL, thumbnails, title, vid_url = await YTDLSource.from_url(playing, loop=bot.loop, download=True)
full_downloaded_file_name = title + '.mp3'
await reaction.message.channel.send(file=discord.File(full_downloaded_file_name))
os.remove(full_downloaded_file_name)
print(' downloaded!!! ')
else:
await reaction.message.add_reaction(reaction)
#print('hii')
#print(reaction)
#print(reaction.message)
#print(user)
#if user.bot:
# return
#else:
# previous_messages = await channel.history(limit=1).flatten()
# prev_message.add_reaction('♥️')
'''if emoji == "emoji 1":
fixed_channel = bot.get_channel(channel_id)
await fixed_channel.send(embed=embed)
elif emoji == "emoji 2":
#do stuff
elif emoji == "emoji 3":
#do stuff
else:
return'''
@bot.event
async def on_reaction_remove(reaction, user):
print('\nremoved reaction\n')
global stream
if not user.bot:
# stop emoji
if str(reaction.emoji) == "⏹️":
player.stop()
# pause emoji
elif str(reaction.emoji) == "⏸️":
if player.is_playing():
player.pause()
print('paused')
else:
player.resume()
print('resume')
# next emoji
elif str(reaction.emoji) == "⏭️":
if playing=='fm':
print('Playing next, current:{}'.format(stream))
stream = get_stream('next',stream)
player.stop()
player.play(FFmpegPCMAudio(stream['url']))
embed=get_embed(reaction, user, stream)
await currently_playing_message.edit(embed=embed)
#message.send('Hello World')
#play_next()
# previous emoji
elif str(reaction.emoji) == "⏮️":
if playing=='fm':
print('Playing next, current:{}'.format(stream))
stream = get_stream('prev', stream)
player.stop()
player.play(FFmpegPCMAudio(stream['url']))
embed=get_embed(reaction, user, stream)
await currently_playing_message.edit(embed=embed)
print('Playing next')
# download emoji
elif str(reaction.emoji) == "⬇️":
if playing=='fm':
if not 'downloads' in os.listdir():
os.mkdir('downloads')
print('Try download')
async with reaction.message.channel.typing():
full_downloaded_file_name = await download_from_youtube(playing)
await reaction.message.channel.send(file=discord.File(full_downloaded_file_name))
os.remove(full_downloaded_file_name)
print(' downloaded!!! ')
else:
await reaction.message.add_reaction(reaction)
# _____________________________________________________
# ///////////////////// FM Player /////////////////////
# _____________________________________________________
@bot.command(aliases=['s', 'sto'])
async def stopfm(ctx):
player.stop()
@bot.command(
name='disable_unleashing',
brief='To disable/stop add unleashing all reddit posts to the server',
help='e.g. `.disable_unleashing`')
async def disable_unleashing(ctx):
try:
unleashing.stop()
await ctx.send('unleashing disabled successfully.')
except:
await ctx.send('already disabled.')
@bot.command(
name='enable_unleashing',
brief=
'To enable/start unleashing previously stopped reddit posts to the server',
help='e.g. `.enable_unleashing`')
async def enable_unleashing(ctx):
try:
unleashing.start()
await ctx.send('unleashing enabled successfully.')
except:
await ctx.send('already enabled.')
@bot.command(name='disable_autoattend',
brief='To start autoattending in fuse classroom.',
help='e.g. `.start_unleashing`')
async def disable_autoattend(ctx):
try:
auto_attend.stop()
await ctx.send('fuse auto-attend disabled successfully.')
except:
await ctx.send('already disabled.')
@bot.command(name='enable_autoattend',
brief='To enable/stopsrt autoattending in fuse classroom.',
help='e.g. `.enable_unleashing`')
async def enable_autoattend(ctx):
try:
auto_attend.start()
await ctx.send('fuse auto-attend enabled successfully.')
except:
await ctx.send('already enabled.')
@bot.command(name='video_embed_test', brief='', help='e')
async def video(ctx):
embed = discord.Embed(
title='title',
url='https://thumbs2.redgifs.com/WelcomeSweetTadpole-mobile.mp4',
description='body',
colour=discord.Color.red())
embed.set_image(
url="https://thumbs2.redgifs.com/WelcomeSweetTadpole-mobile.mp4")
embed.set_video(
url="https://www.redgifs.com/watch/blissfulimperfectyardant")
await ctx.send(embed=embed)
'''
async def my_background_task():
await client.wait_until_ready()
counter = 0
channel = client.get_channel(id=123456789) # replace with channel_id
while not client.is_closed():
counter += 1
await channel.send(counter)
print(counter)
await asyncio.sleep(60) # task runs every 60 seconds''' ''
sad_words = [
"sad", "depressed", "unhappy", "angry", "miserable", "depressing", "hurt",
"pain"
]
starter_encouragements = [
"Cheer up!",
"You are a great person / bot!",
]
commandss = [
'\".h fuse\" or \".help fuse\" -> for fuse_auto_attend help',
'fuse auto-attend registration at: https://ioee.herokuapp.com/',
'\".inspire\" or \".quote\" -> to display quote ',
'\".joke\" -> to display joke',
'\".meme\" -> displays best random meme',
'\".riddle\" -> displays best random riddle',
'\".puns\" -> displays best random puns',
'\".knock knock\" -> displays knock knock joke',
'\".deactivate\" -> deactivates the bot .activate -> activates the bot',
'\".new inspirational_message\" -> Adds new inspirationsl message to db',
'\".del inspirational_message\" -> deletes inspirational message from db',
'\".list\" -> lists the current inspirational messages',
]
fuse_help_commands = [
'\".h\" or \".help\" - for general help',
'----------- ------------------------- -----------',
'fuse auto-attend registration at: https://ioee.herokuapp.com/',
'---------------------------------',
'\".add_user user_token\" -> to add user for auto-fuse attandance',
'.remove_user user_token -> to remove user',
'\".list_user\" -> to list available users',
'\".check class\" or \".snoop class\" -> checks if live class started.',
'\".mute unsuccessful\" -> to mute unsuccessful attending_logs. ie. hide \"Live Class not started\" messages',
'\".mute successful\" -> to mute successful attending_logs ie. hide messages when attended successfully',
'\".unmute unsuccessful\" -> to unmute unsuccessful attending_logs ie. show \"Live Class not started\" messages',
'\".umute successful\" -> to unmute successful attending_logs ie. show messages when attended successfully',
]
#from discord.ext import commands
#bot = commands.Bot(command_prefix='.')
#@bot.command()
#async def test(ctx):
# await ctx.send('I heard you! {0}'.format(ctx.author))
'''print('--------------Test Mode--------------------------------')
print(client.servers)
print('-------------------------------------------------------')'''
if "responding" not in db.keys():
db["responding"] = True
if "unsuccessful_logs" not in db.keys():
db["unsuccessful_logs"] = False
if "successful_logs" not in db.keys():
db["successful_logs"] = True
def get_quote():
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
quote = json_data[0]['q'] + " -" + json_data[0]['a']
return (quote)
def get_joke():
response = requests.get("https://imao.herokuapp.com/jokes/api/random/")
json_data = response.json()
joke = str(json_data['title']) + ' : ' + str(
json_data['body']) + ' - ' + str(json_data['author'])
return (joke)
def get_jokes(no_of_jokes):
response = requests.get("https://imao.herokuapp.com/jokes/api/{}/".format(
int(no_of_jokes)))
jokes = []
for joke in response.json()['jokes']:
jokes.append(
str(joke['title']) + ' : ' + str(joke['body']) + ' - ' +
str(joke['author']))
return (jokes)
def get_puns():
return ('Puns are comming very very soon!')
def get_riddles():
return ('Riddles are comming very very soon!')
def add_fuse_user(pseudoid):
if "users" in db.keys():
users = db["users"]
if pseudoid not in users:
users.append(pseudoid)
db["users"] = users
else:
db["users"] = [pseudoid]
def remove_fuse_user(pseudoid):
users = list(db["users"])
#if len(encouragements) > index:
if pseudoid in users:
#del encouragements[index]
users.remove(pseudoid)
db["users"] = users
def update_encouragements(encouraging_message):
if "encouragements" in db.keys():
encouragements = db["encouragements"]
encouragements.append(encouraging_message)
db["encouragements"] = encouragements
else:
db["encouragements"] = [encouraging_message]
def delete_encouragment(index):
encouragements = list(db["encouragements"])
#if len(encouragements) > index:
if index in encouragements:
#del encouragements[index]
encouragements.remove(index)
db["encouragements"] = encouragements
def sanitize_db():
users = list(set(list(db["users"])))
users_sanitized = []
for user in users:
users_sanitized.append(
user.replace('\'', '').replace('\"', '').strip())
db["users"] = users_sanitized
print('Users sanitized. \n Users:')
print(list(db["users"]))
def attend_each(usr):
custom_url = 'https:ioee.herokuapp.com/attend/{}/'.format(usr)
response = requests.get(custom_url)
return (response.text)
#---------------Working------------------------
# For scrapping quotes every 1 min.
@tasks.loop(minutes=1)
async def start_scrapping():
with open('quotes.json','r') as f:
saved_quotes = json.load(f)
# Got saved quotes
saved_quotes = saved_quotes['quotes']
new_quotes=requests.get('https://zenquotes.io/api/quotes').json()
# To combine new and old quotes
n=0
for quote in new_quotes:
if quote not in saved_quotes:
saved_quotes.append(quote)
n+=1
total_quotes = len(saved_quotes)
with open('quotes.json','w') as file:
json.dump({'quotes' : saved_quotes}, file, indent = 4)
print('Saved {} quotes, total:{}'.format(n,total_quotes))
@tasks.loop(minutes=30)
async def auto_attend(mannual_attempt=False):
intents = discord.Intents.default()
intents.members = True
#user= await client.get_user("487904509670337509")
#await client.send_message(user, "Your message goes here")
#client.get_user(487904509670337509).send('hi')
#sanitize_db()
print("Users: ")
users = list(db['users'])
print(users)
# To limit attend time from 9 am to 5:45
# i.e. (03:15 to 11:45) UTC
now = datetime.datetime.now()
morning = now.replace(hour=3, minute=5, second=0, microsecond=0)
evening = now.replace(hour=11, minute=45, second=0, microsecond=0)
#print('hello fella')
if (now.strftime("%A") != "Saturday") and (now >= morning
and now <= evening):
channel = bot.get_channel(id=862205194283253763)
#user = client.get_user("487904509670337509")
#await user.send_message('hi')
#username = bot.get_user('861131196779331624')
#print('bot:')
#print(username)
users = []
if "users" in db.keys():
users = db["users"]
for user in users:
#response = str(attend_each(user))
custom_url = 'https://ioee.herokuapp.com/attend/{}/'.format(user)
print(custom_url)
response = requests.get(str(custom_url))
response = response.text.strip()
print(response)
print(response)
if response == "Live Class not started" and db[
"unsuccessful_logs"] == False and mannual_attempt == False:
continue
elif db["successful_logs"] == False and mannual_attempt == False:
await channel.send(
"Successful attending attempt_logs are muted. to unmute please enter: .unmute unsuccessful"
)
else:
try:
previous_messages = await channel.history(
limit=1).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
prev_message = "Your Attendance is done.Discord prevened previous message view"
if (str(prev_message).strip() != response):
#print("mannual_attempt:{} db[\"successful_logs\"]:{} db[\"unsuccessful_logs\"]:{} response=={}, response:{}".format(mannual_attempt, db["successful_logs"],db["unsuccessful_logs"], response == "Live Class not started", response ) )
print('not same messages:prev_message and rseponse')
await channel.send(response)
#await channel.send('user:'+str(user))
else:
print('same message as previous message, so not sending')
#print(prev_message)
#print(response)
#print(str(prev_message)==str(response))
#print(type(response))
pass
#----------- To list discord servers ---------
@tasks.loop(hours=25)
async def share_info():
intents = discord.Intents.default()
intents.members = True
channel = bot.get_channel(id=882664470692909056)
response = requests.get('http://ioee.herokuapp.com/meroshare/')
response = response.text.strip()
print(response)
try:
previous_messages = await channel.history(limit=1).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
pass
if (str(prev_message).strip() != response):
print('not same messages:prev_message and rseponse')
await channel.send(response)
else:
print('same message as previous message, so not sending')
pass
@tasks.loop(hours=25)
async def meroshare(mannual_attempt=False):
intents = discord.Intents.default()
intents.members = True
#user= await client.get_user("487904509670337509")
#await client.send_message(user, "Your message goes here")
#client.get_user(487904509670337509).send('hi')
#sanitize_db()
#print("Users: ")
#users=list(db['users'])
#print(users)
# To limit attend time from 9 am to 5:45
# i.e. (03:15 to 11:45) UTC
now = datetime.datetime.now()
morning = now.replace(hour=1, minute=00, second=0, microsecond=0)
evening = now.replace(hour=10, minute=15, second=0, microsecond=0)
#print('hello fella')
if (now.strftime("%A") != "Saturday") and (now >= morning
and now <= evening):
channel = bot.get_channel(id=882655060050444288)
#user = client.get_user("487904509670337509")
#await user.send_message('hi')
#username = bot.get_user('861131196779331624')
#print('bot:')
#print(username)
users = []
if "users" in db.keys():
users = db["users"]
for user in users:
#response = str(attend_each(user))
custom_url = 'https://ioee.herokuapp.com/attend/{}/'.format(user)
print(custom_url)
response = requests.get(str(custom_url))
response = response.text.strip()
print(response)
print(response)
if response == "Live Class not started" and db[
"unsuccessful_logs"] == False and mannual_attempt == False:
continue
elif db["successful_logs"] == False and mannual_attempt == False:
await channel.send(
"Successful attending attempt_logs are muted. to unmute please enter: .unmute unsuccessful"
)
else:
try:
previous_messages = await channel.history(limit=1
).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
prev_message = "Your Attendance is done.Discord prevened previous message view"
if (str(prev_message).strip() != response):
#print("mannual_attempt:{} db[\"successful_logs\"]:{} db[\"unsuccessful_logs\"]:{} response=={}, response:{}".format(mannual_attempt, db["successful_logs"],db["unsuccessful_logs"], response == "Live Class not started", response ) )
print('not same messages:prev_message and rseponse')
await channel.send(response)
#await channel.send('user:'+str(user))
else:
print('same message as previous message, so not sending')
#print(prev_message)
#print(response)
#print(str(prev_message)==str(response))
#print(type(response))
pass
#----------- To list discord servers ---------
class OwnerCommands(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("OwnerCommands Is Ready")
@bot.command()
async def servers(ctx):
discord.Intents.members = True
activeservers = bot.guilds
embed = discord.Embed(
title='Servers and members',
description='',
colour=discord.Color.green(),
)
'''for guild in activeservers:
print('guild.channels')
print(guild.channels)
embed.add_field(
name = str(guild.name) + ' ({}) own({})'.format(guild.member_count, guild.owner),
value=str([i.name for i in guild.members]),
)
'''
print('members:')
for i in bot.guilds[1:]:
a = i.fetch_members(limit=None)
aa = []
async for ii in a:
aa.append(ii.name)
#print(i, ii)
embed.add_field(name=str(i) +
' ({}) own({})'.format(i.member_count, i.owner),
value=str(aa))
await ctx.send(embed=embed)
#print(a)
# print(channel)
#await ctx.send(guild.name)
#print(guild.name)
def setup(client):
bot.add_cog(OwnerCommands(bot))
#-------------------------
async def unleash_reddit_jokes(subreddit, channel_id, no_of_posts=7):
channel = bot.get_channel(id=int(channel_id))
for n, submission in enumerate(
reddit.subreddit('jokes').top('day', limit=int(no_of_posts / 2))):
print('Unleash for loop:{}'.format(n))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.blue())
embed.set_image(url=submission.url)
await channel.send(embed=embed)
for n, submission in enumerate(
reddit.subreddit('jokes').hot(limit=no_of_posts -
int(no_of_posts / 4))):
print('Unleash for loop:{}'.format(n))
title = str(submission.title)[:256]
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.blue())
await channel.send(embed=embed)
for n, submission in enumerate(
reddit.subreddit('jokes').new(limit=no_of_posts -
math.ceil(no_of_posts / 4))):
print('Unleash for loop:{}'.format(n))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.blue())
await channel.send(embed=embed)
async def unleash_reddit(subreddit, channel_id, no_of_posts=5):
channel = bot.get_channel(id=int(channel_id))
submissions = await reddit.subreddit(subreddit)
donot_proceed = 0
#To display hot post if only one is to be fetched
if no_of_posts == 1:
donot_proceed = 1
no_of_posts = 2
async for submission in submissions.hot(limit=int(no_of_posts / 4)):
print('Unleash for loop:{}'.format(0))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.red())
embed.set_image(url=submission.url)
print('Submission_url: ', submission.url)
try:
#To filter lenthy messages > 2500 letters
if len(str(body)) < 2500:
image_formats = ['jpg', 'jpeg', 'png']
#checks if image_format in submission.url
if sum([(i in str(submission.url)) for i in image_formats]):
await channel.send(embed=embed)
else:
await channel.send(submission.url)
except:
pass
if donot_proceed != 1:
async for submission in submissions.top('day',
limit=int(no_of_posts / 2)):
print('Unleash for loop:{}'.format('n'))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.red())
embed.set_image(url=submission.url)
print('Submission_url: \"', submission.url, '\"')
if submission.url == '':
print('Guess What')
try:
if len(str(body)) < 2500:
image_formats = ['jpg', 'jpeg', 'png']
#checks if image_format in submission.url
if sum([(i in str(submission.url))
for i in image_formats]):
await channel.send(embed=embed)
else:
await channel.send(submission.url)
except:
pass
async for submission in submissions.new(limit=no_of_posts -
math.ceil(no_of_posts / 4)):
print('Unleash for loop:{}'.format(0))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.red())
embed.set_image(url=submission.url)
print('Submission_url: ', submission.url)
try:
if len(str(body)) < 2500:
image_formats = ['jpg', 'jpeg', 'png']
#checks if image_format in submission.url
if sum([(i in str(submission.url))
for i in image_formats]):
await channel.send(embed=embed)
else:
await channel.send(submission.url)
except:
pass
'''
async def unleash_reddit(subreddit, channel_id, no_of_posts=5):
channel = bot.get_channel(id=int(channel_id))
submissions_top = await reddit.subreddit(subreddit)
submissions_hot = await reddit.subreddit(subreddit)
submissions_new = await reddit.subreddit(subreddit)
#30% top, 40%hot, 30%new
for i in range(0, no_of_posts):
print('Unleash for loop:{}'.format(i))
if i < int(no_of_posts/3):
submission=random.choice([x async for x in submissions_top.top(limit=25)])
print(a)
''async for x in submissions_top.top(limit=15):
if not x.stickied:
submission = x
#submission = next(x async for x in submissions_top.top('all') if not x.stickied)''
elif i < int(no_of_posts/7):
#submission = next(x async for x in submissions_hot.hot('all') if not x.stickied)
submission=random.choice([x async for x in submissions_top.hot(limit=35)])
else:
#submission = next(x async for x in submissions_new.new('all') if not x.stickied)
submission=random.choice([x async for x in #submissions_top.new(limit=15)])
embed=discord.Embed(
title=submission.title,
description=submission.selftext,
#description=submission.title,
colour=discord.Color.green())
embed.set_image(url=submission.url)
await channel.send(embed=embed)'''
@tasks.loop(hours=6)
async def unleashing():
print('\nstart Unleashing')
intents = discord.Intents.default()
#discord.Intents.members = True
intents.members = True
intents.all()
for channel_id in dict(db['unleash']).keys():
for each_subreddit in db['unleash'][str(channel_id)]:
await unleash_reddit(each_subreddit, str(channel_id), 10)
print('Unleashed')
@bot.event
async def on_ready():
print('We have logged in as \"{0.user.name}\"'.format(bot))
print(bot.user.id)
#For fuse attendance trying
#auto_attend.start()
#For viewing share_info
#share_info.start()
#for unleashing from reddit
unleashing.start()
start_scrapping.start()
game = discord.Game("Chilling out.")
streaming = discord.Streaming(name='pubg lite',
url="https://www.twitch.tv/caterpileer")
#movie.url="https://thumbs2.redgifs.com/WelcomeSweetTadpole-mobile.mp4"
await bot.change_presence(status=discord.Status.online, activity=streaming)
#await bot.process_commands(message)
@bot.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == bot.user:
return
if message.content.startswith('.guess'):
await message.channel.send('Guess a number between 1 to 10')
def guess_check(m):
return m.content.isdigit()
guess = await bot.wait_for('message', timeout=5.0, check=guess_check)
answer = random.randint(1, 10)
if guess is None:
fmt = 'Sorry, you took too long. It was {}.'
await message.channel.send(fmt.format(answer))
return
if int(guess.content) == answer:
await message.channel.send('You are right!')
else:
await message.channel.send(
'Sorry. It is actually {}.'.format(answer))
else:
if message.guild is None and message.author != bot.user:
#await channel.send(str(message.author) + str(message.content))
embed = discord.Embed(title=message.author,
description=message.content)
channel = bot.get_channel(id=873477235477184522)
await channel.send(embed=embed)
print(str(message.content))
if any(word in str(message) for word in sad_words):
options = starter_encouragements
if "encouragements" in db.keys():
#print(list(db["encouragements"]))
options = options #+ list(db["encouragements"])
await message.channel.send(random.choice(options))
await bot.process_commands(message)
#await message.channel.send('hello')
#if message!='fuck':
# await message.add_reaction('♥️')
# return
'''print('Author_id:')
print(message.author.id)
#message.author.send_message('hi')
print('Hello')
msg = message.content.strip()
if msg==".help" or msg == '.h':
for command in commands:
await message.channel.send(command)
elif msg==".help fuse" or msg == '.h fuse':
for command in fuse_help_commands:
await message.channel.send(command)
elif msg.startswith(".add_user"):
pseudocode = msg.split(".add_user",1)[1].replace('\"','').replace('\'','').strip()
add_fuse_user(pseudocode)
await message.channel.send("User {} has been added/activated successfully.".format(pseudocode))
elif msg.startswith(".remove_user"):
users = []
if "users" in db.keys():
pseudocode = msg.split(".remove_user",1)[1].strip()
remove_fuse_user(pseudocode)
await message.channel.send("User {} has been removed successfully.".format(pseudocode))
elif msg==".snoop class" or msg==".check class" or message == ".check live-class" or message == ".check live_class" or message == ".check live class":
await message.channel.send("Sorry mannual checking is unavailable for a while");
await test.start(True)
\'''if db["successful_logs"] == False:
await message.channel.send("Successful attending attempt_logs are muted. to unmute please enter: .unmute unsuccessful")
await message.channel.send("Checking Live Classes: ")
m = test_mannual()
await message.channel.send(m)\'''
elif msg.startswith(".deactivate"):
#switch = msg.split(".deac ",1)[1].lower()
db["responding"] = False
await message.channel.send("Encouragement bot is deactivated.\nPlease Enter: .activate to activate")
elif msg.startswith(".activate"):
db["responding"] = True
await message.channel.send("Encouragement bot is activated.\nPlease enter: .deactivate to deactivate.")
elif msg == (".mute unsuccessful") or msg == (".mute unsuccessful logs"):
db["unsuccessful_logs"] = False
await message.channel.send("unsuccessful attending_logs are muted.")
elif msg == (".unmute unsuccessful") or msg == (".unmute unsuccessful logs"):
db["unsuccessful_logs"] = True
await message.channel.send("unsuccessful attending_logs are unmuted.")
elif msg == (".mute successful") or msg == (".mute successful logs"):
db["successful_logs"] = False
await message.channel.send("successful attending_log are muted. to unmute please enter: .unmute successful")
elif msg == (".unmute successful") or msg == (".unmute successful logs"):
db["successful_logs"] = True
await message.channel.send("successful attending_logs are unmuted.")
if db["responding"]:
if msg == ".list_users" or msg == ".list_user":
users = []
if "users" in db.keys():
users = list(db["users"])
await message.channel.send('Users: '+str(users))
elif msg.startswith('.inspire') or msg.startswith('.quote'):
quote = get_quote()
await message.channel.send(quote)
elif msg.startswith('.joke'):
if msg == '.joke':
joke = get_joke()
await message.channel.send("\n\n\nJoke:" + str(joke))
else:
try:
n = int(msg.split(' ')[1].strip())
except:
n = int(msg.split(' ')[2].strip())
jokes = list(get_jokes(n))
for joke in jokes:
await message.channel.send("\n\n\nJoke:" + str(joke))
elif msg.startswith('.riddle'):
riddle = get_riddles()
await message.channel.send(riddle)
elif msg.startswith('.puns'):
puns = get_puns()
await message.channel.send(puns)
elif msg.startswith('.memes') or msg.startswith('.knock knock'):
await message.channel.send('Sorry! ' + str(msg) + ' are comming very very soon!' )
elif msg.startswith(".new"):
encouraging_message = msg.split(".new ",1)[1].strip()
update_encouragements(encouraging_message)
await message.channel.send("New encouraging message added.")
elif msg.startswith(".del"):
encouragements = []
if "encouragements" in db.keys():
index = msg.split(".del",1)[1].strip()
delete_encouragment(index)
encouragements = list(db["encouragements"])
await message.channel.send(encouragements)
elif msg.startswith(".list"):
encouragements = []
if "encouragements" in db.keys():
encouragements = list(db["encouragements"])
await message.channel.send(encouragements)'''
keep_alive()
bot.run(os.environ['TOKEN'])
#client.loop.create_task(my_background_task())
bot.run('token') #
| [
"discord.Game",
"discord.FFmpegPCMAudio",
"keep_alive.keep_alive",
"time.sleep",
"youtube_dl.YoutubeDL",
"discord.Client",
"os.remove",
"discord.ext.commands.Cog.listener",
"time.ctime",
"os.listdir",
"discord.ext.commands.Bot",
"dotenv.load_dotenv",
"os.mkdir",
"discord.Color.blue",
"di... | [((742, 755), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (753, 755), False, 'from dotenv import load_dotenv\n'), ((766, 782), 'discord.Client', 'discord.Client', ([], {}), '()\n', (780, 782), False, 'import discord\n'), ((939, 964), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (962, 964), False, 'import discord\n'), ((3617, 3813), 'asyncpraw.Reddit', 'asyncpraw.Reddit', ([], {'client_id': '"""nnhGBCiBxSJysTobl6SLPQ"""', 'client_secret': "os.environ['rd_client_secret']", 'password': "os.environ['rd_pass']", 'user_agent': '"""praw_test"""', 'username': '"""Alternative-Ad-8849"""'}), "(client_id='nnhGBCiBxSJysTobl6SLPQ', client_secret=os.\n environ['rd_client_secret'], password=os.environ['rd_pass'], user_agent\n ='praw_test', username='Alternative-Ad-8849')\n", (3633, 3813), False, 'import asyncpraw, asyncprawcore\n'), ((7479, 7547), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""."""', 'help_command': 'None', 'intents': 'intents'}), "(command_prefix='.', help_command=None, intents=intents)\n", (7491, 7547), False, 'from discord.ext import commands\n'), ((36495, 36508), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (36506, 36508), False, 'from dotenv import load_dotenv\n'), ((52610, 52631), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'minutes': '(1)'}), '(minutes=1)\n', (52620, 52631), False, 'from discord.ext import tasks\n'), ((53266, 53288), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'minutes': '(30)'}), '(minutes=30)\n', (53276, 53288), False, 'from discord.ext import tasks\n'), ((56435, 56455), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'hours': '(25)'}), '(hours=25)\n', (56445, 56455), False, 'from discord.ext import tasks\n'), ((57184, 57204), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'hours': '(25)'}), '(hours=25)\n', (57194, 57204), False, 'from discord.ext import tasks\n'), ((68295, 68314), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'hours': '(6)'}), '(hours=6)\n', (68305, 68314), False, 'from discord.ext import tasks\n'), ((75590, 75602), 'keep_alive.keep_alive', 'keep_alive', ([], {}), '()\n', (75600, 75602), False, 'from keep_alive import keep_alive\n'), ((4574, 4595), 'random.randint', 'random.randint', (['(1)', '(15)'], {}), '(1, 15)\n', (4588, 4595), False, 'import math, random\n'), ((5016, 5041), 'random.randint', 'random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (5030, 5041), False, 'import math, random\n'), ((5052, 5086), 'random.randint', 'random.randint', (['start', '(start + 100)'], {}), '(start, start + 100)\n', (5066, 5086), False, 'import math, random\n'), ((8158, 8312), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Encouragement Bot Help!"""', 'description': '"""Type `.help <command name>` for more details about each command. e.g. `.help joke`"""'}), "(title='Encouragement Bot Help!', description=\n 'Type `.help <command name>` for more details about each command. e.g. `.help joke`'\n )\n", (8171, 8312), False, 'import discord\n'), ((10353, 10405), 'requests.get', 'requests.get', (['"""http://ioee.herokuapp.com/meroshare/"""'], {}), "('http://ioee.herokuapp.com/meroshare/')\n", (10365, 10405), False, 'import requests\n'), ((14147, 14167), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (14161, 14167), False, 'import math, random\n'), ((35401, 35756), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Support:"""', 'description': '"""Thank you :-) \nesewa/khalti id:\n 9840445934 \n\n Paytreon:\nhttps://www.patreon.com/join/7095305? \n\n Coinbase:\n https://commerce.coinbase.com/checkout/63a4b635-8510-459f-b091-a4f0697993e6\n \n \n\n\n And please vote for me here: https://top.gg/bot/862191340355715093/vote\n """', 'color': '(65535)'}), '(title=\'Support:\', description=\n """Thank you :-) \nesewa/khalti id:\n 9840445934 \n\n Paytreon:\nhttps://www.patreon.com/join/7095305? \n\n Coinbase:\n https://commerce.coinbase.com/checkout/63a4b635-8510-459f-b091-a4f0697993e6\n \n \n\n\n And please vote for me here: https://top.gg/bot/862191340355715093/vote\n """\n , color=65535)\n', (35414, 35756), False, 'import discord\n'), ((35970, 35996), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (35994, 35996), False, 'import time, asyncio, datetime\n'), ((39004, 39108), 'discord.Embed', 'discord.Embed', ([], {'title': "stream['name']", 'description': "stream['longDesc']", 'color': '(65535)', 'url': "stream['url']"}), "(title=stream['name'], description=stream['longDesc'], color=\n 65535, url=stream['url'])\n", (39017, 39108), False, 'import discord\n'), ((39340, 39366), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (39364, 39366), False, 'import time, asyncio, datetime\n'), ((39785, 39852), 'discord.Embed', 'discord.Embed', ([], {'title': "stream['name']", 'color': '(65535)', 'url': "stream['url']"}), "(title=stream['name'], color=65535, url=stream['url'])\n", (39798, 39852), False, 'import discord\n'), ((40100, 40126), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (40124, 40126), False, 'import time, asyncio, datetime\n'), ((49923, 49932), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (49930, 49932), False, 'from replit import db\n'), ((49993, 50002), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (50000, 50002), False, 'from replit import db\n'), ((50069, 50078), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (50076, 50078), False, 'from replit import db\n'), ((50147, 50194), 'requests.get', 'requests.get', (['"""https://zenquotes.io/api/random"""'], {}), "('https://zenquotes.io/api/random')\n", (50159, 50194), False, 'import requests\n'), ((50211, 50236), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (50221, 50236), False, 'import json\n'), ((50346, 50406), 'requests.get', 'requests.get', (['"""https://imao.herokuapp.com/jokes/api/random/"""'], {}), "('https://imao.herokuapp.com/jokes/api/random/')\n", (50358, 50406), False, 'import requests\n'), ((52469, 52493), 'requests.get', 'requests.get', (['custom_url'], {}), '(custom_url)\n', (52481, 52493), False, 'import requests\n'), ((53349, 53374), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (53372, 53374), False, 'import discord\n'), ((53749, 53772), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (53770, 53772), False, 'import time, asyncio, datetime\n'), ((56494, 56519), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (56517, 56519), False, 'import discord\n'), ((56616, 56668), 'requests.get', 'requests.get', (['"""http://ioee.herokuapp.com/meroshare/"""'], {}), "('http://ioee.herokuapp.com/meroshare/')\n", (56628, 56668), False, 'import requests\n'), ((57263, 57288), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (57286, 57288), False, 'import discord\n'), ((57664, 57687), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (57685, 57687), False, 'import time, asyncio, datetime\n'), ((60490, 60513), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (60511, 60513), False, 'from discord.ext import commands\n'), ((68385, 68410), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (68408, 68410), False, 'import discord\n'), ((69038, 69067), 'discord.Game', 'discord.Game', (['"""Chilling out."""'], {}), "('Chilling out.')\n", (69050, 69067), False, 'import discord\n'), ((69084, 69160), 'discord.Streaming', 'discord.Streaming', ([], {'name': '"""pubg lite"""', 'url': '"""https://www.twitch.tv/caterpileer"""'}), "(name='pubg lite', url='https://www.twitch.tv/caterpileer')\n", (69101, 69160), False, 'import discord\n'), ((1074, 1086), 'time.ctime', 'time.ctime', ([], {}), '()\n', (1084, 1086), False, 'import time, asyncio, datetime\n'), ((20566, 20575), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (20573, 20575), False, 'from replit import db\n'), ((25670, 25679), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (25677, 25679), False, 'from replit import db\n'), ((31008, 31060), 'discord.Embed', 'discord.Embed', ([], {'title': 'title', 'color': '(65535)', 'url': 'vid_url'}), '(title=title, color=65535, url=vid_url)\n', (31021, 31060), False, 'import discord\n'), ((31251, 31277), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (31275, 31277), False, 'import time, asyncio, datetime\n'), ((31782, 31793), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (31791, 31793), False, 'import os, youtube_dl\n'), ((32100, 32130), 'youtube_dl.YoutubeDL', 'youtube_dl.YoutubeDL', (['ydl_opts'], {}), '(ydl_opts)\n', (32120, 32130), False, 'import os, youtube_dl\n'), ((33445, 33466), 'os.mkdir', 'os.mkdir', (['"""downloads"""'], {}), "('downloads')\n", (33453, 33466), False, 'import os, youtube_dl\n'), ((33794, 33830), 'os.remove', 'os.remove', (['full_downloaded_file_name'], {}), '(full_downloaded_file_name)\n', (33803, 33830), False, 'import os, youtube_dl\n'), ((38938, 38967), 'discord.FFmpegPCMAudio', 'FFmpegPCMAudio', (["stream['url']"], {}), "(stream['url'])\n", (38952, 38967), False, 'from discord import FFmpegPCMAudio\n'), ((51092, 51101), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (51099, 51101), False, 'from replit import db\n'), ((51569, 51578), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (51576, 51578), False, 'from replit import db\n'), ((52723, 52735), 'json.load', 'json.load', (['f'], {}), '(f)\n', (52732, 52735), False, 'import json\n'), ((53145, 53196), 'json.dump', 'json.dump', (["{'quotes': saved_quotes}", 'file'], {'indent': '(4)'}), "({'quotes': saved_quotes}, file, indent=4)\n", (53154, 53196), False, 'import json\n'), ((69813, 69834), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (69827, 69834), False, 'import math, random\n'), ((2850, 2869), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (2860, 2869), False, 'import time, asyncio, datetime\n'), ((6286, 6308), 'random.randint', 'random.randint', (['(1)', '(150)'], {}), '(1, 150)\n', (6300, 6308), False, 'import math, random\n'), ((7319, 7338), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (7336, 7338), False, 'import discord\n'), ((12007, 12027), 'discord.Color.blue', 'discord.Color.blue', ([], {}), '()\n', (12025, 12027), False, 'import discord\n'), ((25154, 25163), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (25161, 25163), False, 'from replit import db\n'), ((25321, 25330), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (25328, 25330), False, 'from replit import db\n'), ((27852, 27863), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (27861, 27863), False, 'import os, youtube_dl\n'), ((29006, 29036), 'youtube_dl.YoutubeDL', 'youtube_dl.YoutubeDL', (['ydl_opts'], {}), '(ydl_opts)\n', (29026, 29036), False, 'import os, youtube_dl\n'), ((32679, 32690), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (32688, 32690), False, 'import os, youtube_dl\n'), ((33023, 33055), 'os.rename', 'os.rename', (['file_name', 'mp3_format'], {}), '(file_name, mp3_format)\n', (33032, 33055), False, 'import os, youtube_dl\n'), ((33425, 33437), 'os.listdir', 'os.listdir', ([], {}), '()\n', (33435, 33437), False, 'import os, youtube_dl\n'), ((47106, 47125), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (47123, 47125), False, 'import discord\n'), ((52826, 52873), 'requests.get', 'requests.get', (['"""https://zenquotes.io/api/quotes"""'], {}), "('https://zenquotes.io/api/quotes')\n", (52838, 52873), False, 'import requests\n'), ((54366, 54375), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (54373, 54375), False, 'from replit import db\n'), ((58282, 58291), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (58289, 58291), False, 'from replit import db\n'), ((70381, 70445), 'discord.Embed', 'discord.Embed', ([], {'title': 'message.author', 'description': 'message.content'}), '(title=message.author, description=message.content)\n', (70394, 70445), False, 'import discord\n'), ((3411, 3431), 'discord.Color.blue', 'discord.Color.blue', ([], {}), '()\n', (3429, 3431), False, 'import discord\n'), ((30839, 30866), 'discord.FFmpegPCMAudio', 'discord.FFmpegPCMAudio', (['URL'], {}), '(URL)\n', (30861, 30866), False, 'import discord\n'), ((32856, 32867), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (32865, 32867), False, 'import os, youtube_dl\n'), ((32971, 32982), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (32980, 32982), False, 'import os, youtube_dl\n'), ((36719, 36731), 'json.load', 'json.load', (['F'], {}), '(F)\n', (36728, 36731), False, 'import json\n'), ((36991, 37003), 'json.load', 'json.load', (['F'], {}), '(F)\n', (37000, 37003), False, 'import json\n'), ((60826, 60847), 'discord.Color.green', 'discord.Color.green', ([], {}), '()\n', (60845, 60847), False, 'import discord\n'), ((62317, 62337), 'discord.Color.blue', 'discord.Color.blue', ([], {}), '()\n', (62335, 62337), False, 'import discord\n'), ((62892, 62912), 'discord.Color.blue', 'discord.Color.blue', ([], {}), '()\n', (62910, 62912), False, 'import discord\n'), ((63418, 63438), 'discord.Color.blue', 'discord.Color.blue', ([], {}), '()\n', (63436, 63438), False, 'import discord\n'), ((64166, 64185), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (64183, 64185), False, 'import discord\n'), ((70762, 70771), 'replit.db.keys', 'db.keys', ([], {}), '()\n', (70769, 70771), False, 'from replit import db\n'), ((29413, 29424), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (29422, 29424), False, 'import os, youtube_dl\n'), ((29816, 29852), 'os.rename', 'os.rename', (['file_name', "(title + '.mp3')"], {}), "(file_name, title + '.mp3')\n", (29825, 29852), False, 'import os, youtube_dl\n'), ((33743, 33782), 'discord.File', 'discord.File', (['full_downloaded_file_name'], {}), '(full_downloaded_file_name)\n', (33755, 33782), False, 'import discord\n'), ((63095, 63121), 'math.ceil', 'math.ceil', (['(no_of_posts / 4)'], {}), '(no_of_posts / 4)\n', (63104, 63121), False, 'import math, random\n'), ((65222, 65241), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (65239, 65241), False, 'import discord\n'), ((66005, 66031), 'math.ceil', 'math.ceil', (['(no_of_posts / 4)'], {}), '(no_of_posts / 4)\n', (66014, 66031), False, 'import math, random\n'), ((66355, 66374), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (66372, 66374), False, 'import discord\n'), ((29759, 29770), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (29768, 29770), False, 'import os, youtube_dl\n'), ((70932, 70954), 'random.choice', 'random.choice', (['options'], {}), '(options)\n', (70945, 70954), False, 'import math, random\n'), ((41052, 41081), 'discord.FFmpegPCMAudio', 'FFmpegPCMAudio', (["stream['url']"], {}), "(stream['url'])\n", (41066, 41081), False, 'from discord import FFmpegPCMAudio\n'), ((43826, 43855), 'discord.FFmpegPCMAudio', 'FFmpegPCMAudio', (["stream['url']"], {}), "(stream['url'])\n", (43840, 43855), False, 'from discord import FFmpegPCMAudio\n'), ((41585, 41614), 'discord.FFmpegPCMAudio', 'FFmpegPCMAudio', (["stream['url']"], {}), "(stream['url'])\n", (41599, 41614), False, 'from discord import FFmpegPCMAudio\n'), ((44374, 44403), 'discord.FFmpegPCMAudio', 'FFmpegPCMAudio', (["stream['url']"], {}), "(stream['url'])\n", (44388, 44403), False, 'from discord import FFmpegPCMAudio\n'), ((41958, 41979), 'os.mkdir', 'os.mkdir', (['"""downloads"""'], {}), "('downloads')\n", (41966, 41979), False, 'import os, youtube_dl\n'), ((42416, 42452), 'os.remove', 'os.remove', (['full_downloaded_file_name'], {}), '(full_downloaded_file_name)\n', (42425, 42452), False, 'import os, youtube_dl\n'), ((44747, 44768), 'os.mkdir', 'os.mkdir', (['"""downloads"""'], {}), "('downloads')\n", (44755, 44768), False, 'import os, youtube_dl\n'), ((45061, 45097), 'os.remove', 'os.remove', (['full_downloaded_file_name'], {}), '(full_downloaded_file_name)\n', (45070, 45097), False, 'import os, youtube_dl\n'), ((41928, 41940), 'os.listdir', 'os.listdir', ([], {}), '()\n', (41938, 41940), False, 'import os, youtube_dl\n'), ((44717, 44729), 'os.listdir', 'os.listdir', ([], {}), '()\n', (44727, 44729), False, 'import os, youtube_dl\n'), ((42359, 42398), 'discord.File', 'discord.File', (['full_downloaded_file_name'], {}), '(full_downloaded_file_name)\n', (42371, 42398), False, 'import discord\n'), ((45004, 45043), 'discord.File', 'discord.File', (['full_downloaded_file_name'], {}), '(full_downloaded_file_name)\n', (45016, 45043), False, 'import discord\n')] |
# Code Taken from https://github.com/LYH-YF/MWPToolkit
# -*- encoding: utf-8 -*-
# @Author: <NAME>
# @Time: 2021/08/29 22:05:03
# @File: transformer_layer.py
import torch
import math
from torch import nn
from torch.nn import functional as F
from transformers.activations import gelu_new as gelu_bert
from module.Attention.multi_head_attention import MultiHeadAttention
from module.Attention.multi_head_attention import EPTMultiHeadAttention
from module.Attention.group_attention import GroupAttention
from utils.utils import clones
class TransformerLayer(nn.Module):
r"""Transformer Layer, including
a multi-head self-attention,
a external multi-head self-attention layer (only for conditional decoder) and
a point-wise feed-forward layer.
Args:
self_padding_mask (torch.bool): the padding mask for the multi head attention sublayer.
self_attn_mask (torch.bool): the attention mask for the multi head attention sublayer.
external_states (torch.Tensor): the external context for decoder, e.g., hidden states from encoder.
external_padding_mask (torch.bool): the padding mask for the external states.
Returns:
feedforward_output (torch.Tensor): the output of the point-wise feed-forward sublayer, is the output of the transformer layer
"""
def __init__(self, embedding_size, ffn_size, num_heads, attn_dropout_ratio=0.0, attn_weight_dropout_ratio=0.0, ffn_dropout_ratio=0.0, with_external=False):
super(TransformerLayer, self).__init__()
self.multi_head_attention = MultiHeadAttention(embedding_size, num_heads, attn_weight_dropout_ratio)
self.feed_forward_1 = nn.Linear(embedding_size, ffn_size)
self.feed_forward_2 = nn.Linear(ffn_size, embedding_size)
self.attn_layer_norm = nn.LayerNorm(embedding_size, eps=1e-6)
self.ffn_layer_norm = nn.LayerNorm(embedding_size, eps=1e-6)
self.attn_dropout = nn.Dropout(attn_dropout_ratio)
self.ffn_dropout = nn.Dropout(ffn_dropout_ratio)
self.with_external = with_external
if self.with_external:
self.external_multi_head_attention = MultiHeadAttention(embedding_size, num_heads, attn_weight_dropout_ratio)
self.external_layer_norm = nn.LayerNorm(embedding_size)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.feed_forward_1.weight, std=0.02)
nn.init.normal_(self.feed_forward_2.weight, std=0.02)
nn.init.constant_(self.feed_forward_1.bias, 0.)
nn.init.constant_(self.feed_forward_2.bias, 0.)
def gelu(self, x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def forward(self, x, kv=None, self_padding_mask=None, self_attn_mask=None, external_states=None, external_padding_mask=None):
residual = x
if kv is None:
x, self_attn_weights = self.multi_head_attention(query=x, key=x, value=x, key_padding_mask=self_padding_mask, attn_mask=self_attn_mask)
else:
x, self_attn_weights = self.multi_head_attention(query=x, key=kv, value=kv, key_padding_mask=self_padding_mask, attn_mask=self_attn_mask)
x = self.attn_dropout(x)
x = self.attn_layer_norm(residual + x)
if self.with_external:
residual = x
x, external_attn_weights = self.external_multi_head_attention(query=x, key=external_states, value=external_states, key_padding_mask=external_padding_mask)
x = self.attn_dropout(x)
x = self.external_layer_norm(residual + x)
else:
external_attn_weights = None
residual = x
x = self.feed_forward_2(self.gelu(self.feed_forward_1(x)))
x = self.ffn_dropout(x)
x = self.ffn_layer_norm(residual + x)
return x, self_attn_weights, external_attn_weights
class GAEncoderLayer(nn.Module):
"""Group attentional encoder layer, encoder is made up of self-attn and feed forward.
"""
def __init__(self, size, self_attn, feed_forward, dropout):
super(GAEncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"""Follow Figure 1 (left) for connections."""
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
#self.norm = LayerNorm(size)
self.norm = nn.LayerNorm(size, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(sublayer(self.norm(x)))
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionwiseFeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class EPTTransformerLayer(nn.Module):
"""
Class for Transformer Encoder/Decoder layer (follows the paper, 'Attention is all you need')
"""
def __init__(self, hidden_dim = None, num_decoder_heads = None, layernorm_eps = None,intermediate_dim= None):
"""
Initialize TransformerLayer class
:param ModelConfig config: Configuration of this Encoder/Decoder layer
"""
super().__init__()
# Self-attention layer
self.attn = EPTMultiHeadAttention(hidden_dim=hidden_dim, num_heads=num_decoder_heads,
layernorm_eps=layernorm_eps, dropout=0.0)
# Source-Target attention layer
self.mem = EPTMultiHeadAttention(hidden_dim=hidden_dim, num_heads=num_decoder_heads,
layernorm_eps=layernorm_eps, dropout=0.0)
# Dropout for self-attention
self.dropout_attn = nn.Dropout(0.0)
# Dropout for source-target attention
self.dropout_mem = nn.Dropout(0.0)
# Dropout for expansion before outputting
self.dropout_expand = nn.Dropout(0.0)
# Dropout for outputting
self.dropout_out = nn.Dropout(0.0)
# Linear transformation layer for expansion (H -> I) where I = vector dimension of intermediate state
self.lin_expand = nn.Linear(hidden_dim, intermediate_dim)
# Linear transformation layer for output (I -> H)
self.lin_collapse = nn.Linear(intermediate_dim, hidden_dim)
# Post Layer Normalization for self-attention
self.norm_attn = nn.LayerNorm(hidden_dim, eps=layernorm_eps)
# Post Layer Normalization for source-target attention
self.norm_mem = nn.LayerNorm(hidden_dim, eps=layernorm_eps)
# Post Layer Normalization for outputting
self.norm_out = nn.LayerNorm(hidden_dim, eps=layernorm_eps)
def forward(self, target, target_ignorance_mask=None, target_attention_mask=None,
memory=None, memory_ignorance_mask=None):
"""
Forward-computation of Transformer Encoder/Decoder layers
Args:
target (torch.Tensor): FloatTensor indicating Sequence of target vectors. Shape [batch_size, target_length, hidden_size].
target_ignorance_mask (torch.Tensor): BoolTensor indicating Mask for target tokens that should be ignored. Shape [batch_size, target_length].
target_attention_mask (torch.Tensor) : BoolTensor indicating Target-to-target Attention mask for target tokens. Shape [target_length, target_length].
memory (torch.Tensor): FloatTensor indicating Sequence of source vectors. Shape [batch_size, sequence_length, hidden_size]. This can be None when you want to use this layer as an encoder layer.
memory_ignorance_mask (torch.Tensor): BoolTensor indicating Mask for source tokens that should be ignored. Shape [batch_size, sequence_length].
Returns:
torch.FloatTensor: Decoder hidden states per each target token, shape [batch_size, sequence_length, hidden_size].
"""
# Compute self-attention
attented = self.attn(query=target, attention_mask=target_attention_mask,
key_ignorance_mask=target_ignorance_mask)
target = target + self.dropout_attn(attented)
target = self.norm_attn(target)
# Compute attention over targets with source as queries.
if memory is not None:
attented = self.mem(query=target, key_value=memory, key_ignorance_mask=memory_ignorance_mask)
target = target + self.dropout_mem(attented)
target = self.norm_mem(target)
# Pass linear transformations
output = self.lin_collapse(self.dropout_expand(gelu_bert(self.lin_expand(target))))
target = target + self.dropout_out(output)
target = self.norm_out(target)
return target
| [
"module.Attention.multi_head_attention.EPTMultiHeadAttention",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.LayerNorm",
"math.sqrt",
"torch.nn.Linear",
"module.Attention.multi_head_attention.MultiHeadAttention",
"torch.zeros",
"torch.nn.init.normal_",
"torch.ones"
] | [((1572, 1644), 'module.Attention.multi_head_attention.MultiHeadAttention', 'MultiHeadAttention', (['embedding_size', 'num_heads', 'attn_weight_dropout_ratio'], {}), '(embedding_size, num_heads, attn_weight_dropout_ratio)\n', (1590, 1644), False, 'from module.Attention.multi_head_attention import MultiHeadAttention\n'), ((1675, 1710), 'torch.nn.Linear', 'nn.Linear', (['embedding_size', 'ffn_size'], {}), '(embedding_size, ffn_size)\n', (1684, 1710), False, 'from torch import nn\n'), ((1741, 1776), 'torch.nn.Linear', 'nn.Linear', (['ffn_size', 'embedding_size'], {}), '(ffn_size, embedding_size)\n', (1750, 1776), False, 'from torch import nn\n'), ((1809, 1848), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {'eps': '(1e-06)'}), '(embedding_size, eps=1e-06)\n', (1821, 1848), False, 'from torch import nn\n'), ((1878, 1917), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {'eps': '(1e-06)'}), '(embedding_size, eps=1e-06)\n', (1890, 1917), False, 'from torch import nn\n'), ((1946, 1976), 'torch.nn.Dropout', 'nn.Dropout', (['attn_dropout_ratio'], {}), '(attn_dropout_ratio)\n', (1956, 1976), False, 'from torch import nn\n'), ((2004, 2033), 'torch.nn.Dropout', 'nn.Dropout', (['ffn_dropout_ratio'], {}), '(ffn_dropout_ratio)\n', (2014, 2033), False, 'from torch import nn\n'), ((2374, 2427), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.feed_forward_1.weight'], {'std': '(0.02)'}), '(self.feed_forward_1.weight, std=0.02)\n', (2389, 2427), False, 'from torch import nn\n'), ((2436, 2489), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.feed_forward_2.weight'], {'std': '(0.02)'}), '(self.feed_forward_2.weight, std=0.02)\n', (2451, 2489), False, 'from torch import nn\n'), ((2498, 2546), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.feed_forward_1.bias', '(0.0)'], {}), '(self.feed_forward_1.bias, 0.0)\n', (2515, 2546), False, 'from torch import nn\n'), ((2554, 2602), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.feed_forward_2.bias', '(0.0)'], {}), '(self.feed_forward_2.bias, 0.0)\n', (2571, 2602), False, 'from torch import nn\n'), ((4804, 4833), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['size'], {'eps': '(1e-06)'}), '(size, eps=1e-06)\n', (4816, 4833), False, 'from torch import nn\n'), ((4856, 4875), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (4866, 4875), False, 'from torch import nn\n'), ((5739, 5763), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (5748, 5763), False, 'from torch import nn\n'), ((5783, 5807), 'torch.nn.Linear', 'nn.Linear', (['d_ff', 'd_model'], {}), '(d_ff, d_model)\n', (5792, 5807), False, 'from torch import nn\n'), ((5831, 5850), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (5841, 5850), False, 'from torch import nn\n'), ((6430, 6549), 'module.Attention.multi_head_attention.EPTMultiHeadAttention', 'EPTMultiHeadAttention', ([], {'hidden_dim': 'hidden_dim', 'num_heads': 'num_decoder_heads', 'layernorm_eps': 'layernorm_eps', 'dropout': '(0.0)'}), '(hidden_dim=hidden_dim, num_heads=num_decoder_heads,\n layernorm_eps=layernorm_eps, dropout=0.0)\n', (6451, 6549), False, 'from module.Attention.multi_head_attention import EPTMultiHeadAttention\n'), ((6644, 6763), 'module.Attention.multi_head_attention.EPTMultiHeadAttention', 'EPTMultiHeadAttention', ([], {'hidden_dim': 'hidden_dim', 'num_heads': 'num_decoder_heads', 'layernorm_eps': 'layernorm_eps', 'dropout': '(0.0)'}), '(hidden_dim=hidden_dim, num_heads=num_decoder_heads,\n layernorm_eps=layernorm_eps, dropout=0.0)\n', (6665, 6763), False, 'from module.Attention.multi_head_attention import EPTMultiHeadAttention\n'), ((6865, 6880), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (6875, 6880), False, 'from torch import nn\n'), ((6954, 6969), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (6964, 6969), False, 'from torch import nn\n'), ((7050, 7065), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (7060, 7065), False, 'from torch import nn\n'), ((7126, 7141), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (7136, 7141), False, 'from torch import nn\n'), ((7279, 7318), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'intermediate_dim'], {}), '(hidden_dim, intermediate_dim)\n', (7288, 7318), False, 'from torch import nn\n'), ((7405, 7444), 'torch.nn.Linear', 'nn.Linear', (['intermediate_dim', 'hidden_dim'], {}), '(intermediate_dim, hidden_dim)\n', (7414, 7444), False, 'from torch import nn\n'), ((7525, 7568), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hidden_dim'], {'eps': 'layernorm_eps'}), '(hidden_dim, eps=layernorm_eps)\n', (7537, 7568), False, 'from torch import nn\n'), ((7656, 7699), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hidden_dim'], {'eps': 'layernorm_eps'}), '(hidden_dim, eps=layernorm_eps)\n', (7668, 7699), False, 'from torch import nn\n'), ((7774, 7817), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hidden_dim'], {'eps': 'layernorm_eps'}), '(hidden_dim, eps=layernorm_eps)\n', (7786, 7817), False, 'from torch import nn\n'), ((2159, 2231), 'module.Attention.multi_head_attention.MultiHeadAttention', 'MultiHeadAttention', (['embedding_size', 'num_heads', 'attn_weight_dropout_ratio'], {}), '(embedding_size, num_heads, attn_weight_dropout_ratio)\n', (2177, 2231), False, 'from module.Attention.multi_head_attention import MultiHeadAttention\n'), ((2271, 2299), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {}), '(embedding_size)\n', (2283, 2299), False, 'from torch import nn\n'), ((5261, 5281), 'torch.ones', 'torch.ones', (['features'], {}), '(features)\n', (5271, 5281), False, 'import torch\n'), ((5315, 5336), 'torch.zeros', 'torch.zeros', (['features'], {}), '(features)\n', (5326, 5336), False, 'import torch\n'), ((2672, 2686), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (2681, 2686), False, 'import math\n')] |
from correlate import *
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import rc, rcParams
rc('axes', linewidth=1)
rc('font', weight='bold', size=10)
def barplots(prefixes, regions, stains, corre, error, name, folder, ylim):
for stain in stains:
barplot(prefixes, regions, stain, corre, error, name, folder, ylim)
def barplot(prefixes, regions, stain, corre, error, name, folder, ylim):
colors = ['#bfe2e3', '#69869c', '#36896e', '#c22e00', '#c6d645', '#ffd3b6', '#b2b2b2', '#4724a9',
'#9bc84d', '#7141ae', '#d2a782', '#933b61', '#435299', '#d88770', '#765aa8', '#719795']
Val, Std = [], []
for i, prefix in enumerate(prefixes):
val, std = corre[prefix][stain], error[prefix][stain]
Val.append(val)
Std.append(std)
fig, ax = plt.subplots(dpi=300, figsize=(6, 3))
index = [i for i in range(len(regions))]
ax.bar(index, Val, yerr=Std, capsize=2, color=colors[:len(prefixes)])
ax.set_ylabel('Spearman\'s rank correlation', fontweight='bold')
ax.set_ylim(ylim[0]-0.05, ylim[1]+0.05)
ax.set_xticks(index)
ax.set_xticklabels(regions, rotation=45, ha="right")
ax.grid(which='major', axis='both', linestyle='--')
plt.savefig(folder + 'bar_{}_{}.png'.format(stain, name), bbox_inches='tight')
plt.close()
if __name__ == "__main__":
years = 2
layername = 'block2BN'
time_threshold, type = 365*years, 'COG'
folder = type + '_correlation_{}_years/'.format(years)
if not os.path.exists(folder):
os.mkdir(folder)
interval = file_interval_info(type)
y_lim = [0, 0]
corre = collections.defaultdict(dict)
error = collections.defaultdict(dict)
pool = [[0, prefixes[i], regions[i]] for i in range(len(regions))]
for i, region in enumerate(prefixes):
for stain in stains:
corr, std = get_correlation(region + '_' + stain, prefix_idx[region], time_threshold, interval, folder, type, layername, missing=0)
corre[region][stain] = corr
error[region][stain] = 0
y_lim[1] = max(y_lim[1], corr)
y_lim[0] = min(y_lim[0], corr)
pool[i][0] -= corr
pool.sort()
prefixes = [p[1] for p in pool]
regions = [p[2] for p in pool]
barplots(prefixes, regions, stains, corre, error, '{}days_{}shap_{}'.format(time_threshold, type, layername), folder, y_lim)
| [
"matplotlib.pyplot.subplots",
"matplotlib.rc",
"matplotlib.pyplot.close"
] | [((133, 156), 'matplotlib.rc', 'rc', (['"""axes"""'], {'linewidth': '(1)'}), "('axes', linewidth=1)\n", (135, 156), False, 'from matplotlib import rc, rcParams\n'), ((157, 191), 'matplotlib.rc', 'rc', (['"""font"""'], {'weight': '"""bold"""', 'size': '(10)'}), "('font', weight='bold', size=10)\n", (159, 191), False, 'from matplotlib import rc, rcParams\n'), ((835, 872), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(300)', 'figsize': '(6, 3)'}), '(dpi=300, figsize=(6, 3))\n', (847, 872), True, 'import matplotlib.pyplot as plt\n'), ((1330, 1341), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1339, 1341), True, 'import matplotlib.pyplot as plt\n')] |
# Generated by Django 2.2.1 on 2019-05-16 23:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SocialProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('provider', models.SmallIntegerField(choices=[('1', 'Facebook')])),
('social_id', models.CharField(max_length=255, unique=True)),
('photo', models.TextField(blank=True)),
('extra_data', models.TextField(blank=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='social_profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.AutoField",
"django.db.models.SmallIntegerField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((442, 535), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (458, 535), False, 'from django.db import migrations, models\n'), ((563, 616), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'choices': "[('1', 'Facebook')]"}), "(choices=[('1', 'Facebook')])\n", (587, 616), False, 'from django.db import migrations, models\n'), ((649, 694), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (665, 694), False, 'from django.db import migrations, models\n'), ((723, 751), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (739, 751), False, 'from django.db import migrations, models\n'), ((785, 813), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (801, 813), False, 'from django.db import migrations, models\n'), ((841, 970), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""social_profile"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE,\n related_name='social_profile', to=settings.AUTH_USER_MODEL)\n", (861, 970), False, 'from django.db import migrations, models\n')] |
from rest_framework import serializers, exceptions
from greenbudget.lib.rest_framework_utils.serializers import (
EnhancedModelSerializer)
from greenbudget.app.account.models import BudgetAccount, TemplateAccount
from greenbudget.app.tagging.serializers import ColorField
from greenbudget.app.subaccount.models import (
BudgetSubAccount, TemplateSubAccount)
from .models import (
Group,
BudgetAccountGroup,
TemplateAccountGroup,
BudgetSubAccountGroup,
TemplateSubAccountGroup
)
class GroupSerializer(EnhancedModelSerializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(
required=False,
allow_blank=False,
allow_null=False
)
created_by = serializers.PrimaryKeyRelatedField(read_only=True)
updated_by = serializers.PrimaryKeyRelatedField(read_only=True)
created_at = serializers.DateTimeField(read_only=True)
updated_at = serializers.DateTimeField(read_only=True)
estimated = serializers.FloatField(read_only=True)
color = ColorField(content_type_model=Group, required=False)
class Meta:
model = Group
fields = (
'id', 'name', 'created_by', 'created_at', 'updated_by',
'updated_at', 'color', 'estimated')
def validate_name(self, value):
parent = self.context.get('parent')
if parent is None:
parent = self.instance.parent
validator = serializers.UniqueTogetherValidator(
queryset=parent.groups.all(),
fields=('name', ),
)
validator({'name': value}, self)
return value
def create(self, *args, **kwargs):
"""
Overridden to perform cleanup of empty :obj:`Group` instances.
When a :obj:`Group` is created, it can be created with children that
already belong to another :obj:`Group`. When this happens, the children
are removed from the other :obj:`Group` and included in the new
:obj:`Group`. We want to perform cleanup, and remove empty :obj:`Group`
instances that have no children.
This is partially accomplished with :obj:`track_model`. However, when
the above situation occurs, the :obj:`track_model` will not work,
because both of these implementations will fail:
(1) :obj:`track_model` on :obj:`Group`
@track_model(on_field_change_hooks={'parent': remove_empty_groups})
class BudgetAccountGroup(Group):
...
parent = models.ForeignKey(
to='budget.Budget',
...
)
The mechanics of :obj:`track_model` responsible for removing the
:obj:`Group` if it has no more children will not be triggered because
:obj:`track_model` will only trigger when the :obj:`Group` with no more
children itself is updated. In this situation, we are not updating the
:obj:`Group` that has no more children, we are updating the new
:obj:`Group` that will have the children that previously existed on the
old :obj:`Group`.
(2) :obj:`track_model` on :obj:`Account` or :obj:`SubAccount`
class BudgetAccountGroup(Group):
...
parent = models.ForeignKey(
to='budget.Budget',
...
)
@track_model(on_field_change_hooks={'group': remove_empty_groups})
class BudgetAccount(Account):
group = models.ForeignKey(
to='group.BudgetAccountGroup',
related_name='children'
)
Here, we cannot use the :obj:`track_model` on the :obj:`Account` or
:obj:`SubAccount` models to remove empty groups after the group assigned
to those models changes because for DRF we are updating the models via
the `children` attribute, which is the reverse FK accessor, and
apparently that does not trigger the post_save hooks on the primary
model:
PATCH /v1/groups/<pk>/ { children: [...] } -> Updating the `children`
on the :obj:`BudgetAccountGroup` (i.e. updating a reverse FK accessor)
will not trigger the `post_save` on :obj:`BudgetAccount`.
For the above reason, we need to address this problem without the
:obj:`track_model` behavior.
(3) `post_save` signals directly on :obj:`Group`
We cannot accomplish this at the database level (or model level) via
post_save signals. Consider we try to accomplish this with the
following signal:
@dispatch.receiver(post_save, sender=BudgetSubAccountGroup)
def remove_empty_groups(instance, **kwargs):
for sibling_group in instance.parent.groups.all():
if sibling_group.children.count() == 0:
sibling_group.delete()
If we were to do this, we would run into issues creating instances of
:obj:`Group`. Since the `children` field is a reverse FK accessor,
the :obj:`Group` has to be created before an entity can be assigned
a group. That means a :obj:`Group` will at times have no children just
before children are assigned - and we cannot have those groups incident-
ally deleted before children are assigned.
For this reason, we need to accomplish this behavior at the request/
response interface - which is why we override this method here.
TODO:
----
We should investigate whether or not there is a better way around this
problem. At the very least, we should develop CRON tasks that should
remove remnant empty groups.
"""
instance = super().create(*args, **kwargs)
for sibling_group in instance.parent.groups.all():
if sibling_group != instance \
and sibling_group.children.count() == 0:
sibling_group.delete()
return instance
def update(self, *args, **kwargs):
"""
Overridden to perform cleanup of empty :obj:`Group` instances. See
docstring in `.create()` method for a more detailed explanation.
"""
instance = super().update(*args, **kwargs)
siblings = [
sib for sib in instance.parent.groups.all()
if sib != instance
]
for sibling_group in siblings:
if sibling_group.children.count() == 0:
sibling_group.delete()
return instance
class AbstractAccountGroupSerializer(GroupSerializer):
class Meta:
abstract = True
def validate_children(self, value):
parent = self.context.get('parent')
if parent is None:
parent = self.instance.parent
for child in value:
if child.budget != parent:
raise exceptions.ValidationError(
"The %s %s does not belong to the same %s "
"that the Group does (%s)." % (
type(child).__name__, child.pk, type(parent).__name__,
parent.pk)
)
return value
class BudgetAccountGroupSerializer(AbstractAccountGroupSerializer):
actual = serializers.FloatField(read_only=True)
variance = serializers.FloatField(read_only=True)
children = serializers.PrimaryKeyRelatedField(
many=True,
required=True,
queryset=BudgetAccount.objects.active()
)
class Meta(GroupSerializer.Meta):
model = BudgetAccountGroup
fields = GroupSerializer.Meta.fields + (
'children', 'actual', 'variance')
class TemplateAccountGroupSerializer(AbstractAccountGroupSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True,
required=True,
queryset=TemplateAccount.objects.active()
)
class Meta(GroupSerializer.Meta):
model = TemplateAccountGroup
fields = GroupSerializer.Meta.fields + ('children', )
class AbstractSubAccountGroupSerializer(GroupSerializer):
class Meta:
abstract = True
def validate_children(self, value):
parent = self.context.get('parent')
if parent is None:
parent = self.instance.parent
for child in value:
if child.parent != parent:
raise exceptions.ValidationError(
"The %s %s does not belong to the same %s "
"that the Group does (%s)." % (
type(child).__name__, child.pk, type(parent).__name__,
parent.pk)
)
# Is this check necessary? Would this otherwise be constrained
# by model restrictions?
elif child.budget != parent.budget:
raise exceptions.ValidationError(
"The %s %s does not belong to the same %s "
"that the Group does (%s)." % (
type(child).__name__, child.pk,
type(child.budget).__name__, parent.pk)
)
return value
class BudgetSubAccountGroupSerializer(AbstractSubAccountGroupSerializer):
actual = serializers.FloatField(read_only=True)
variance = serializers.FloatField(read_only=True)
children = serializers.PrimaryKeyRelatedField(
many=True,
required=True,
queryset=BudgetSubAccount.objects.active()
)
class Meta(GroupSerializer.Meta):
model = BudgetSubAccountGroup
fields = GroupSerializer.Meta.fields + (
'children', 'actual', 'variance')
class TemplateSubAccountGroupSerializer(AbstractSubAccountGroupSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True,
required=True,
queryset=TemplateSubAccount.objects.active()
)
class Meta(GroupSerializer.Meta):
model = TemplateSubAccountGroup
fields = GroupSerializer.Meta.fields + ('children', )
| [
"rest_framework.serializers.DateTimeField",
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.PrimaryKeyRelatedField",
"greenbudget.app.subaccount.models.BudgetSubAccount.objects.active",
"greenbudget.app.subaccount.models.TemplateSubAccount.objects.active",
"greenbudget.app.account.m... | [((568, 608), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (592, 608), False, 'from rest_framework import serializers, exceptions\n'), ((620, 694), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'allow_blank': '(False)', 'allow_null': '(False)'}), '(required=False, allow_blank=False, allow_null=False)\n', (641, 694), False, 'from rest_framework import serializers, exceptions\n'), ((742, 792), 'rest_framework.serializers.PrimaryKeyRelatedField', 'serializers.PrimaryKeyRelatedField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (776, 792), False, 'from rest_framework import serializers, exceptions\n'), ((810, 860), 'rest_framework.serializers.PrimaryKeyRelatedField', 'serializers.PrimaryKeyRelatedField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (844, 860), False, 'from rest_framework import serializers, exceptions\n'), ((878, 919), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (903, 919), False, 'from rest_framework import serializers, exceptions\n'), ((937, 978), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (962, 978), False, 'from rest_framework import serializers, exceptions\n'), ((995, 1033), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1017, 1033), False, 'from rest_framework import serializers, exceptions\n'), ((1046, 1098), 'greenbudget.app.tagging.serializers.ColorField', 'ColorField', ([], {'content_type_model': 'Group', 'required': '(False)'}), '(content_type_model=Group, required=False)\n', (1056, 1098), False, 'from greenbudget.app.tagging.serializers import ColorField\n'), ((7202, 7240), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (7224, 7240), False, 'from rest_framework import serializers, exceptions\n'), ((7256, 7294), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (7278, 7294), False, 'from rest_framework import serializers, exceptions\n'), ((9164, 9202), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (9186, 9202), False, 'from rest_framework import serializers, exceptions\n'), ((9218, 9256), 'rest_framework.serializers.FloatField', 'serializers.FloatField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (9240, 9256), False, 'from rest_framework import serializers, exceptions\n'), ((7405, 7435), 'greenbudget.app.account.models.BudgetAccount.objects.active', 'BudgetAccount.objects.active', ([], {}), '()\n', (7433, 7435), False, 'from greenbudget.app.account.models import BudgetAccount, TemplateAccount\n'), ((7793, 7825), 'greenbudget.app.account.models.TemplateAccount.objects.active', 'TemplateAccount.objects.active', ([], {}), '()\n', (7823, 7825), False, 'from greenbudget.app.account.models import BudgetAccount, TemplateAccount\n'), ((9367, 9400), 'greenbudget.app.subaccount.models.BudgetSubAccount.objects.active', 'BudgetSubAccount.objects.active', ([], {}), '()\n', (9398, 9400), False, 'from greenbudget.app.subaccount.models import BudgetSubAccount, TemplateSubAccount\n'), ((9767, 9802), 'greenbudget.app.subaccount.models.TemplateSubAccount.objects.active', 'TemplateSubAccount.objects.active', ([], {}), '()\n', (9800, 9802), False, 'from greenbudget.app.subaccount.models import BudgetSubAccount, TemplateSubAccount\n')] |
# -*- coding: utf-8 -*-
"""
INTRO
@author: <NAME>. Created on Tue May 21 11:57:52 2019
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft,
Delft, the Netherlands
"""
import inspect
from screws.freeze.main import FrozenOnly
from typing import Dict, Union
import numpy as np
from screws.decorators.classproperty.main import classproperty
class DomainInputBase(FrozenOnly):
def __init__(self, domain_name='domain without name'):
self.domain_name = domain_name
self._ndim_ = 2
self._region_corner_coordinates_ = None
self._region_edge_types_ = None
self._boundary_region_edges_ = None
self._region_interpolators_ = None
self._boundary_names_ = None
self._periodic_boundary_pairs_ = dict()
self._periodic_boundaries_ = set()
self._region_sequence_ = None
self._region_type_wr2_metric_ = None
self._internal_parameters_ = list()
INSP = inspect.getfullargspec(self.__init__)
self.__arg_names___ = INSP[0][1:]
assert INSP[1] is None and INSP[2] is None, "A domain input class can not have *args and **kwargs."
assert len(INSP[3]) == len(self.__arg_names___), "A domain input class can only have kwargs."
self._freeze_self_()
@property
def internal_parameters(self):
"""Internal parameters only affect internal metric, does not affect the domain shape."""
return self._internal_parameters_
@internal_parameters.setter
def internal_parameters(self, internal_parameters):
if isinstance(internal_parameters, list):
pass
elif isinstance(internal_parameters, str):
internal_parameters = [internal_parameters,]
elif isinstance(internal_parameters, (tuple, set)):
internal_parameters = list(internal_parameters)
else:
raise NotImplementedError(f"internal_parameters = {internal_parameters} not acceptable.")
assert isinstance(internal_parameters, list), \
f"please put internal_parameters({internal_parameters}) in list."
if len(internal_parameters) > 0:
assert all([ip in self.__arg_names___ for ip in internal_parameters])
self._internal_parameters_ = internal_parameters
@property
def domain_name(self):
""" Mesh name. """
return self._domain_name_
@domain_name.setter
def domain_name(self, dn):
assert isinstance(dn, str), " <DomainInput> : domain name needs to be str."
self._domain_name_ = dn
@property
def ndim(self):
""" dimensions n. """
return self._ndim_
@property
def region_interpolators(self):
return self._region_interpolators_
@region_interpolators.setter
def region_interpolators(self, region_interpolators):
self._region_interpolators_ = region_interpolators
def ___PRIVATE_region_name_requirement_checker___(self, regionDict):
"""
Requirements:
1). must be str
2). != domain name.
3). length > 2
4). Starts with 'R:'
5). can only have letters and _
"""
for R in regionDict:
assert isinstance(R, str), f"region name={R} wrong, need be str!"
assert R != self.domain_name, f"region name == domain.name! wrong!"
assert len(R) > 2, f"region name = {R} too short, must > 2."
assert R[0:2] == 'R:', f"regions name = {R} does not start with 'R:'"
R2 = R[2:].replace('_', '')
assert R2.isalpha(),f"region_name = {R} wrong, can only have letter and _ (at >2)."
@property
def region_corner_coordinates(self):
"""
Store the regions 4 corners' coordinates.
Returns
-------
region_coordinates : dict
A dict whose keys represent the regions names, and values represent
the coordinates of regions corner points.
In 2D: (UL, DL, UR, DR).
L: Left, R: Right, U: Upper, D: Down
"""
return self._region_corner_coordinates_
@region_corner_coordinates.setter
def region_corner_coordinates(self, _dict_):
assert isinstance(_dict_, dict), " <DomainInput> : region_coordinates needs to be a dict."
self.___PRIVATE_region_name_requirement_checker___(_dict_)
for R in _dict_:
assert np.shape(_dict_[R])[0] == 4, \
" <DomainInput> : region_coordinates[{}]={} is wrong.".format(R, _dict_[R])
self._region_corner_coordinates_ = _dict_
@property
def region_edge_types(self):
"""
Store the regions' boundaries' types.
Returns
-------
region_boundary_type : dict
A dict that contains the region boundary info. The keys indicate
the region boundary, the value indicate the info. value[0] indicate
the type, value[1:] indicate the rest info which will be parsed
into full information. The not mentioned regions boundaries will be
set into default type: ('plane',)
Notice that the value will be sent to edge_geometry. And
if this info (value[1:]) to be parsed, it will be done there in
edge_geometry. And the value is stored in the
`edge_geometry.edge_types`.
"""
return self._region_edge_types_
@region_edge_types.setter
def region_edge_types(self, _dict_):
assert self.region_corner_coordinates is not None, " <DomainInput> : please first set region_coordinates."
assert isinstance(_dict_, dict), " <DomainInput> : region_boundary_type needs to be a dict."
for item in _dict_:
R, S = item.split('-')
assert R in self.region_corner_coordinates and S in ('U', 'D', 'L', 'R'), \
" <DomainInput> : region_edge_type key {} is wrong.".format(item)
self._region_edge_types_ = _dict_
def ___PRIVATE_boundary_name_requirement_checker___(self, boundaryRegionSidesDict):
"""
Requirements:
1). != domain name.
2). Length > 2
3). Can not start with 'R:' (So it must be different from regions names).
4). Only have letters
"""
for boundary_name in boundaryRegionSidesDict.keys():
assert boundary_name != self.domain_name
assert len(boundary_name) > 2, f"boundary_name = {boundary_name} is too short (>2 must)."
assert boundary_name[0:2] != 'R:', f"boundary_name = {boundary_name} wrong."
assert boundary_name.isalpha(), f"boundary_name = {boundary_name} wrong, boundary_name can only contain letters."
@property
def boundary_region_edges(self):
"""
Store the domain boundary information.
Returns
-------
domain_boundary : dict
For example:
{'Down': ("Body_center-D", 'Body_back-D', ...),
'West': ("Body_center-R", 'Body_back-R', ...),
......}
This means we have domain boundaries: South, West and so on.
"""
return self._boundary_region_edges_
@boundary_region_edges.setter
def boundary_region_edges(self, _dict_):
assert self.region_corner_coordinates is not None, " <DomainInput> : please first set region_coordinates."
assert isinstance(_dict_, dict), " <DomainInput> : domain_boundary needs to be a dict."
self.___PRIVATE_boundary_name_requirement_checker___(_dict_)
for boundary_names in _dict_.keys():
assert isinstance(boundary_names, str) and boundary_names != '' and '-' not in boundary_names, \
" <DomainInput> : boundary_names = {} is wrong.".format(boundary_names)
assert boundary_names not in self.region_corner_coordinates.keys(), \
" <DomainInput>: boundary_names={} is taken by one of the regions.".format(boundary_names)
for item in _dict_:
if isinstance(_dict_[item], str):
_dict_[item] = (_dict_[item],)
if isinstance(_dict_[item], list) or isinstance(_dict_[item], tuple):
for item_i in _dict_[item]:
R, S = item_i.split('-')
assert R in self.region_corner_coordinates and S in ('U', 'D', 'L', 'R'), \
" <DomainInput> : domain_boundary[{}]={} is wrong.".format(item, _dict_[item])
else:
raise Exception(" <DomainInput> : boundary_region_edges input value accepts only str, tuple of list.")
self._boundary_region_edges_ = _dict_
self._boundary_names_ = list(_dict_.keys())
def ___PRIVATE_periodic_boundary_requirement_checker___(self, pBd):
"""
Here we only do a simple check. We make sure that the keys are in format of:
0). boundary_name_1=boundary_name_2.
1). A boundary name at most appear in one pair.
"""
assert isinstance(pBd, dict)
bnPOOL = set()
for pair in pBd:
assert '=' in pair
bn1, bn2 = pair.split('=')
lengthPOOL = len(bnPOOL)
assert bn1 in self._boundary_names_ and bn2 in self._boundary_names_
bnPOOL.add(bn1)
bnPOOL.add(bn2)
newLengthPOOL = len(bnPOOL)
assert newLengthPOOL == lengthPOOL + 2, "Boundary(s) used for multiple periodic pairs!"
self._periodic_boundaries_ = bnPOOL
@property
def periodic_boundary_pairs(self):
return self._periodic_boundary_pairs_
@periodic_boundary_pairs.setter
def periodic_boundary_pairs(self, pBd):
""" """
self.___PRIVATE_periodic_boundary_requirement_checker___(pBd)
self._periodic_boundary_pairs_ = pBd
@property
def periodic_boundaries(self):
"""(set) Return a set of all boundary names those involved in the periodic boundary setting."""
return self._periodic_boundaries_
@property
def periodic_boundaries_involved_regions(self):
"""The regions that involve periodic boundaries."""
regions = set()
for pb in self.periodic_boundaries:
region_sides = self.boundary_region_edges[pb]
for rs in region_sides:
rn = rs.split('-')[0]
if rn not in regions:
regions.add(rn)
return regions
@property
def region_sequence(self):
"""
This will fix the sequence of regions by fix their names in property
region_names or regions.names. This is very important for numbering. Sometimes, a bad
regions sequence can make the numbering wrong.
"""
return self._region_sequence_
@region_sequence.setter
def region_sequence(self, rS: tuple):
assert len(rS) == len(self.region_corner_coordinates.keys())
assert all([rSi in self.region_corner_coordinates for rSi in rS]) & \
all([rSi in rS for rSi in self.region_corner_coordinates.keys()]), \
f"region_sequence={rS} has invalid regions name(s)."
self._region_sequence_ = rS
@property
def region_type_wr2_metric(self):
return self._region_type_wr2_metric_
@region_type_wr2_metric.setter
def region_type_wr2_metric(self, rTwr2M: Union[str, Dict[str, str]]):
if isinstance(rTwr2M, str):
_D_ = dict()
for region_name in self.region_corner_coordinates:
_D_[region_name] = rTwr2M
rTwr2M = _D_
assert isinstance(rTwr2M, dict), "region_type_wr2_metric needs to be a dictionary."
for key in rTwr2M:
assert key in self.region_corner_coordinates, f"Region name={key} not valid."
self._region_type_wr2_metric_ = rTwr2M
# class properties -------------------------
@classproperty
def statistic(cls):
raise NotImplementedError()
@classproperty
def random_parameters(cls):
raise NotImplementedError()
| [
"numpy.shape",
"inspect.getfullargspec"
] | [((1001, 1038), 'inspect.getfullargspec', 'inspect.getfullargspec', (['self.__init__'], {}), '(self.__init__)\n', (1023, 1038), False, 'import inspect\n'), ((4446, 4465), 'numpy.shape', 'np.shape', (['_dict_[R]'], {}), '(_dict_[R])\n', (4454, 4465), True, 'import numpy as np\n')] |
from app import db
from app.models.serializer import Serializer
class Weather(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
ip = db.Column(db.String(15), index=True, unique=True)
country = db.Column(db.String(80))
flag = db.Column(db.String(512))
town = db.Column(db.String(80))
tendency = db.Column(db.String(80))
wind_speed = db.Column(db.String(20))
temperature_min = db.Column(db.String(20))
temperature_max = db.Column(db.String(20))
temperature = db.Column(db.String(20))
humidity = db.Column(db.String(40))
clouds = db.Column(db.String(80))
def __repr__(self):
return '<Weather {} : {}>'.format(self.town, self.temperature)
def serialize(self):
return Serializer.serialize(self)
| [
"app.db.String",
"app.db.Column",
"app.models.serializer.Serializer.serialize"
] | [((112, 151), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (121, 151), False, 'from app import db\n'), ((171, 184), 'app.db.String', 'db.String', (['(15)'], {}), '(15)\n', (180, 184), False, 'from app import db\n'), ((235, 248), 'app.db.String', 'db.String', (['(80)'], {}), '(80)\n', (244, 248), False, 'from app import db\n'), ((271, 285), 'app.db.String', 'db.String', (['(512)'], {}), '(512)\n', (280, 285), False, 'from app import db\n'), ((308, 321), 'app.db.String', 'db.String', (['(80)'], {}), '(80)\n', (317, 321), False, 'from app import db\n'), ((348, 361), 'app.db.String', 'db.String', (['(80)'], {}), '(80)\n', (357, 361), False, 'from app import db\n'), ((390, 403), 'app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (399, 403), False, 'from app import db\n'), ((437, 450), 'app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (446, 450), False, 'from app import db\n'), ((484, 497), 'app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (493, 497), False, 'from app import db\n'), ((527, 540), 'app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (536, 540), False, 'from app import db\n'), ((567, 580), 'app.db.String', 'db.String', (['(40)'], {}), '(40)\n', (576, 580), False, 'from app import db\n'), ((605, 618), 'app.db.String', 'db.String', (['(80)'], {}), '(80)\n', (614, 618), False, 'from app import db\n'), ((757, 783), 'app.models.serializer.Serializer.serialize', 'Serializer.serialize', (['self'], {}), '(self)\n', (777, 783), False, 'from app.models.serializer import Serializer\n')] |
from pgdrive.component.blocks.curve import Curve
from pgdrive.component.blocks.first_block import FirstPGBlock
from pgdrive.component.blocks.std_t_intersection import StdTInterSection
from pgdrive.component.blocks.straight import Straight
from pgdrive.component.road.road_network import RoadNetwork
from pgdrive.tests.vis_block.vis_block_base import TestBlock
if __name__ == "__main__":
test = TestBlock(True)
from pgdrive.engine.asset_loader import initialize_asset_loader
initialize_asset_loader(test)
global_network = RoadNetwork()
first = FirstPGBlock(global_network, 3.0, 2, test.render, test.world, 1)
curve = Curve(1, first.get_socket(0), global_network, 1)
curve.construct_block(test.render, test.world)
straight = Straight(2, curve.get_socket(0), global_network, 1)
straight.construct_block(test.render, test.world)
intersection = StdTInterSection(3, straight.get_socket(0), global_network, 1)
print(intersection.construct_block(test.render, test.world))
id = 4
for socket_idx in range(intersection.SOCKET_NUM):
block = Curve(id, intersection.get_socket(socket_idx), global_network, id + 1)
block.construct_block(test.render, test.world)
id += 1
test.show_bounding_box(global_network)
test.run()
| [
"pgdrive.component.blocks.first_block.FirstPGBlock",
"pgdrive.tests.vis_block.vis_block_base.TestBlock",
"pgdrive.component.road.road_network.RoadNetwork",
"pgdrive.engine.asset_loader.initialize_asset_loader"
] | [((399, 414), 'pgdrive.tests.vis_block.vis_block_base.TestBlock', 'TestBlock', (['(True)'], {}), '(True)\n', (408, 414), False, 'from pgdrive.tests.vis_block.vis_block_base import TestBlock\n'), ((488, 517), 'pgdrive.engine.asset_loader.initialize_asset_loader', 'initialize_asset_loader', (['test'], {}), '(test)\n', (511, 517), False, 'from pgdrive.engine.asset_loader import initialize_asset_loader\n'), ((540, 553), 'pgdrive.component.road.road_network.RoadNetwork', 'RoadNetwork', ([], {}), '()\n', (551, 553), False, 'from pgdrive.component.road.road_network import RoadNetwork\n'), ((566, 630), 'pgdrive.component.blocks.first_block.FirstPGBlock', 'FirstPGBlock', (['global_network', '(3.0)', '(2)', 'test.render', 'test.world', '(1)'], {}), '(global_network, 3.0, 2, test.render, test.world, 1)\n', (578, 630), False, 'from pgdrive.component.blocks.first_block import FirstPGBlock\n')] |
#!/usr/bin/env python3
"""
Author : <NAME> <<EMAIL>>
Date : 2021-11-09
Purpose: FInd the similarities between sequences.
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
help='Input file',
metavar='FILE',
type=argparse.FileType('rt'))
return parser.parse_args()
# --------------------------------------------------
def main():
"""Makes it work"""
args = get_args()
sequences = []
for line in args.file:
temp_list = []
for char in line.strip():
temp_list.append(char)
sequences.append(temp_list)
out_seq = ""
for base in range(len(sequences[0])):
temp_base = []
# for line in range(len(sequences)):
# had to use enumerate to get pylint to shut up.
# also had to then use the variable so pylint would shut up
# because If i use the range(len()) method pylint doesn't like it
# even though it makes more sense than enumerate becuase
# it doesn't create unnecessary variables#
for line, value in enumerate(sequences):
temp_base.append(sequences[line][base])
# print(temp_base)
all_same(value) # only here to get enumerate to shup up
if all_same(temp_base):
out_seq = out_seq + "|"
else:
out_seq = out_seq + "X"
for line in sequences:
temp_line = ""
for char in line:
temp_line = temp_line + char
print(temp_line)
print(out_seq)
def all_same(list1):
"""checks if all items in list are equal"""
return all(x == list1[0] for x in list1)
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"argparse.FileType",
"argparse.ArgumentParser"
] | [((266, 381), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Rock the Casbah"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Rock the Casbah', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (289, 381), False, 'import argparse\n'), ((539, 562), 'argparse.FileType', 'argparse.FileType', (['"""rt"""'], {}), "('rt')\n", (556, 562), False, 'import argparse\n')] |
import pygame
import my_colors as color
pygame.init()
screen_width = 800
screen_height = 600
screen_size = (screen_width, screen_height)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Search the green pixel at the coordinates (x=10, y=100)")
running = True
while running:
screen.set_at((1, 1), color.white)
screen.set_at((10, 100), color.green)
pygame.display.update()
event = pygame.event.wait()
if event.type == pygame.QUIT:
running = False
pygame.quit()
print("Goodbye!")
| [
"pygame.init",
"pygame.quit",
"pygame.display.set_mode",
"pygame.event.wait",
"pygame.display.set_caption",
"pygame.display.update"
] | [((41, 54), 'pygame.init', 'pygame.init', ([], {}), '()\n', (52, 54), False, 'import pygame\n'), ((147, 183), 'pygame.display.set_mode', 'pygame.display.set_mode', (['screen_size'], {}), '(screen_size)\n', (170, 183), False, 'import pygame\n'), ((184, 274), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Search the green pixel at the coordinates (x=10, y=100)"""'], {}), "(\n 'Search the green pixel at the coordinates (x=10, y=100)')\n", (210, 274), False, 'import pygame\n'), ((501, 514), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (512, 514), False, 'import pygame\n'), ((386, 409), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (407, 409), False, 'import pygame\n'), ((422, 441), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (439, 441), False, 'import pygame\n')] |
#!/usr/bin/env python3
import os
import re
import glob
import sys
import operator
import ast
import argparse
###############################################################################################
# This script was written as part of the analysis conducted on the output generated by #
# hmmsearch, when the ViPhOG database was searched against UniProtKB. The ViPhOG profile HMM #
# files were stored in different directories, each containing maximum 2000 files and named #
# using a sequential number from 1 to 16 (hmm1...hmm16). For each one of these a corresponding#
# output directory was generated, each containing a domtbl output file for each of the files #
# stored in the hmm directories. The output directories were named using the same sequential #
# numbers as the directories storing the hmm files (hmm1domtbl...hmm16domtbl). #
###############################################################################################
parser = argparse.ArgumentParser(description = "Step 3: Generate summary tables for each taxonomic rank. Make sure to run the script from within the directory containing the domtbl output directories (check comment block for guidance) and following the scripts that execute Step 1 and Step 2")
parser.add_argument("-i", "--input", dest = "input_file", help = "Path to summary chunk file", required = True)
if len(sys.argv) == 1:
parser.print_help()
else:
args = parser.parse_args()
summ_file = args.input_file
with open(summ_file) as input_file:
header_line = input_file.readline().rstrip()
taxa_ranks = []
for x,y in enumerate(header_line.split("\t")):
if x >= 2:
taxa_ranks.append((x, y))
for x,y in taxa_ranks:
input_file.seek(0)
next(input_file)
with open(f"{os.path.splitext(summ_file)[0]}_{y}.tsv", "w") as output_file:
output_file.write("ViPhOG\t#_taxons\tMost_significant\tMax_min_score\tOverlapping_taxons\tNext_max_score\n")
for line in input_file:
line = line.rstrip()
viphog_id = line.split("\t")[0]
rank_hits = ast.literal_eval(line.split("\t")[x])
total_hits = len(rank_hits)
most_significant = ""
score_range = ""
overlap = ""
next_max_score = ""
if total_hits > 0:
rank_hits_sorted = sorted(rank_hits, key = operator.itemgetter(2), reverse = True)
most_significant = rank_hits_sorted[0][0]
score_range = (rank_hits_sorted[0][2], rank_hits_sorted[0][3])
if total_hits > 1:
overlap = []
for elem in rank_hits_sorted[1:]:
if elem[2] >= score_range[1]:
overlap.append((elem[0], elem[2]))
if len(overlap) < 1:
overlap = ""
next_max_score = rank_hits_sorted[1][2]
output_file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(viphog_id, total_hits, most_significant, score_range, overlap, next_max_score)) | [
"operator.itemgetter",
"os.path.splitext",
"argparse.ArgumentParser"
] | [((985, 1277), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Step 3: Generate summary tables for each taxonomic rank. Make sure to run the script from within the directory containing the domtbl output directories (check comment block for guidance) and following the scripts that execute Step 1 and Step 2"""'}), "(description=\n 'Step 3: Generate summary tables for each taxonomic rank. Make sure to run the script from within the directory containing the domtbl output directories (check comment block for guidance) and following the scripts that execute Step 1 and Step 2'\n )\n", (1008, 1277), False, 'import argparse\n'), ((1770, 1797), 'os.path.splitext', 'os.path.splitext', (['summ_file'], {}), '(summ_file)\n', (1786, 1797), False, 'import os\n'), ((2290, 2312), 'operator.itemgetter', 'operator.itemgetter', (['(2)'], {}), '(2)\n', (2309, 2312), False, 'import operator\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from openstack.telemetry.v2 import sample
SAMPLE = {
'id': None,
'metadata': {'1': 'one'},
'meter': '2',
'project_id': '3',
'recorded_at': '4',
'resource_id': '5',
'source': '6',
'timestamp': '7',
'type': '8',
'unit': '9',
'user_id': '10',
'volume': '11.1',
}
OLD_SAMPLE = {
'counter_name': '1',
'counter_type': '2',
'counter_unit': '3',
'counter_volume': '4',
'message_id': None,
'project_id': '5',
'recorded_at': '6',
'resource_id': '7',
'resource_metadata': '8',
'source': '9',
'timestamp': '10',
'user_id': '11',
}
class TestSample(testtools.TestCase):
def test_basic(self):
sot = sample.Sample(SAMPLE)
self.assertIsNone(sot.resource_key)
self.assertIsNone(sot.resources_key)
self.assertEqual('/meters/%(meter)s', sot.base_path)
self.assertEqual('metering', sot.service.service_type)
self.assertTrue(sot.allow_create)
self.assertFalse(sot.allow_retrieve)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_new(self):
sot = sample.Sample(SAMPLE)
self.assertIsNone(sot.id)
self.assertEqual(SAMPLE['metadata'], sot.metadata)
self.assertEqual(SAMPLE['meter'], sot.meter)
self.assertEqual(SAMPLE['project_id'], sot.project_id)
self.assertEqual(SAMPLE['recorded_at'], sot.recorded_at)
self.assertEqual(SAMPLE['resource_id'], sot.resource_id)
self.assertIsNone(sot.sample_id)
self.assertEqual(SAMPLE['source'], sot.source)
self.assertEqual(SAMPLE['timestamp'], sot.generated_at)
self.assertEqual(SAMPLE['type'], sot.type)
self.assertEqual(SAMPLE['unit'], sot.unit)
self.assertEqual(SAMPLE['user_id'], sot.user_id)
self.assertEqual(SAMPLE['volume'], sot.volume)
def test_make_old(self):
sot = sample.Sample(OLD_SAMPLE)
self.assertIsNone(sot.id)
self.assertIsNone(sot.sample_id),
self.assertEqual(OLD_SAMPLE['counter_name'], sot.meter)
self.assertEqual(OLD_SAMPLE['counter_type'], sot.type)
self.assertEqual(OLD_SAMPLE['counter_unit'], sot.unit)
self.assertEqual(OLD_SAMPLE['counter_volume'], sot.volume)
self.assertEqual(OLD_SAMPLE['project_id'], sot.project_id)
self.assertEqual(OLD_SAMPLE['recorded_at'], sot.recorded_at)
self.assertEqual(OLD_SAMPLE['resource_id'], sot.resource_id)
self.assertEqual(OLD_SAMPLE['resource_metadata'], sot.metadata)
self.assertEqual(OLD_SAMPLE['source'], sot.source)
self.assertEqual(OLD_SAMPLE['timestamp'], sot.generated_at)
self.assertEqual(OLD_SAMPLE['user_id'], sot.user_id)
def test_list(self):
sess = mock.Mock()
resp = mock.Mock()
resp.body = [SAMPLE, OLD_SAMPLE]
sess.get = mock.Mock(return_value=resp)
path_args = {'meter': 'name_of_meter'}
found = sample.Sample.list(sess, path_args=path_args)
self.assertEqual(2, len(found))
first = found[0]
self.assertIsNone(first.id)
self.assertIsNone(first.sample_id)
self.assertEqual(SAMPLE['metadata'], first.metadata)
self.assertEqual(SAMPLE['meter'], first.meter)
self.assertEqual(SAMPLE['project_id'], first.project_id)
self.assertEqual(SAMPLE['recorded_at'], first.recorded_at)
self.assertEqual(SAMPLE['resource_id'], first.resource_id)
self.assertEqual(SAMPLE['source'], first.source)
self.assertEqual(SAMPLE['timestamp'], first.generated_at)
self.assertEqual(SAMPLE['type'], first.type)
self.assertEqual(SAMPLE['unit'], first.unit)
self.assertEqual(SAMPLE['user_id'], first.user_id)
self.assertEqual(SAMPLE['volume'], first.volume)
def test_create(self):
sess = mock.Mock()
resp = mock.Mock()
resp.body = [SAMPLE]
sess.post = mock.Mock(return_value=resp)
data = {'id': None,
'meter': 'temperature',
'project_id': 'project',
'resource_id': 'resource',
'type': 'gauge',
'unit': 'instance',
'volume': '98.6'}
new_sample = sample.Sample.new(**data)
new_sample.create(sess)
url = '/meters/temperature'
sess.post.assert_called_with(url, service=new_sample.service,
json=[data])
self.assertIsNone(new_sample.id)
| [
"mock.Mock",
"openstack.telemetry.v2.sample.Sample.list",
"openstack.telemetry.v2.sample.Sample",
"openstack.telemetry.v2.sample.Sample.new"
] | [((1269, 1290), 'openstack.telemetry.v2.sample.Sample', 'sample.Sample', (['SAMPLE'], {}), '(SAMPLE)\n', (1282, 1290), False, 'from openstack.telemetry.v2 import sample\n'), ((1761, 1782), 'openstack.telemetry.v2.sample.Sample', 'sample.Sample', (['SAMPLE'], {}), '(SAMPLE)\n', (1774, 1782), False, 'from openstack.telemetry.v2 import sample\n'), ((2540, 2565), 'openstack.telemetry.v2.sample.Sample', 'sample.Sample', (['OLD_SAMPLE'], {}), '(OLD_SAMPLE)\n', (2553, 2565), False, 'from openstack.telemetry.v2 import sample\n'), ((3405, 3416), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3414, 3416), False, 'import mock\n'), ((3432, 3443), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3441, 3443), False, 'import mock\n'), ((3504, 3532), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'resp'}), '(return_value=resp)\n', (3513, 3532), False, 'import mock\n'), ((3597, 3642), 'openstack.telemetry.v2.sample.Sample.list', 'sample.Sample.list', (['sess'], {'path_args': 'path_args'}), '(sess, path_args=path_args)\n', (3615, 3642), False, 'from openstack.telemetry.v2 import sample\n'), ((4490, 4501), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4499, 4501), False, 'import mock\n'), ((4517, 4528), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4526, 4528), False, 'import mock\n'), ((4578, 4606), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'resp'}), '(return_value=resp)\n', (4587, 4606), False, 'import mock\n'), ((4884, 4909), 'openstack.telemetry.v2.sample.Sample.new', 'sample.Sample.new', ([], {}), '(**data)\n', (4901, 4909), False, 'from openstack.telemetry.v2 import sample\n')] |
# -*- coding: utf8 -*-
"""
======================================
Project Name: NLP
File Name: linears
Author: czh
Create Date: 2021/11/15
--------------------------------------
Change Activity:
======================================
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.nn.parameter import Parameter
class Linears(nn.Module):
def __init__(self, input_dim: int, output_dim: int = 1, bias: bool = True):
super().__init__()
self.fn1 = nn.Linear(input_dim, input_dim)
self.fn2 = nn.Linear(input_dim, input_dim)
self.fn3 = nn.Linear(input_dim, output_dim, bias=bias)
nn.init.orthogonal_(self.fn1.weight, gain=1)
nn.init.orthogonal_(self.fn2.weight, gain=1)
nn.init.orthogonal_(self.fn3.weight, gain=1)
def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor):
logits = self.fn3(torch.tanh(
self.fn1(hidden_states).unsqueeze(2) + self.fn2(encoder_hidden_states).unsqueeze(1)
)).squeeze()
return logits
class EntityLinears(nn.Module):
def __init__(self, input_dim: int, output_dim: int = 1, bias: bool = True):
super().__init__()
self.head = Linears(input_dim=input_dim, output_dim=output_dim, bias=bias)
self.tail = Linears(input_dim=input_dim, output_dim=output_dim, bias=bias)
def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor):
# [bsz, num_triples, seq_len, output_dim]
return self.head(hidden_states, encoder_hidden_states), self.tail(hidden_states, encoder_hidden_states)
class FeedForwardNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout_rate=0):
super(FeedForwardNetwork, self).__init__()
self.dropout_rate = dropout_rate
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x_proj = func.dropout(func.relu(self.linear1(x)), p=self.dropout_rate, training=self.training)
x_proj = self.linear2(x_proj)
return x_proj
class PoolerStartLogits(nn.Module):
"""
bert_ner_span
"""
def __init__(self, hidden_size, num_classes):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(hidden_size, num_classes)
def forward(self, hidden_states):
x = self.dense(hidden_states)
return x
class PoolerEndLogits(nn.Module):
"""
bert_ner_span
"""
def __init__(self, hidden_size, num_classes):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(hidden_size)
self.dense_1 = nn.Linear(hidden_size, num_classes)
def forward(self, hidden_states, start_positions=None):
x = self.dense_0(torch.cat([hidden_states, start_positions], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x)
return x
class MultiNonLinearClassifier(nn.Module):
def __init__(self, hidden_size, num_label, dropout_rate, act_func="gelu", intermediate_hidden_size=None):
super(MultiNonLinearClassifier, self).__init__()
self.num_label = num_label
self.intermediate_hidden_size = hidden_size if intermediate_hidden_size is None else intermediate_hidden_size
self.classifier1 = nn.Linear(hidden_size, self.intermediate_hidden_size)
self.classifier2 = nn.Linear(self.intermediate_hidden_size, self.num_label)
self.dropout = nn.Dropout(dropout_rate)
self.act_func = act_func
def forward(self, input_features):
features_output1 = self.classifier1(input_features)
if self.act_func == "gelu":
features_output1 = func.gelu(features_output1)
elif self.act_func == "relu":
features_output1 = func.relu(features_output1)
elif self.act_func == "tanh":
features_output1 = func.tanh(features_output1)
else:
raise ValueError
features_output1 = self.dropout(features_output1)
features_output2 = self.classifier2(features_output1)
return features_output2
class SingleLinearClassifier(nn.Module):
def __init__(self, hidden_size, num_label):
super(SingleLinearClassifier, self).__init__()
self.num_label = num_label
self.classifier = nn.Linear(hidden_size, num_label)
def forward(self, input_features):
features_output = self.classifier(input_features)
return features_output
class BERTTaggerClassifier(nn.Module):
def __init__(self, hidden_size, num_label, dropout_rate, act_func="gelu", intermediate_hidden_size=None):
super(BERTTaggerClassifier, self).__init__()
self.num_label = num_label
self.intermediate_hidden_size = hidden_size if intermediate_hidden_size is None else intermediate_hidden_size
self.classifier1 = nn.Linear(hidden_size, self.intermediate_hidden_size)
self.classifier2 = nn.Linear(self.intermediate_hidden_size, self.num_label)
self.dropout = nn.Dropout(dropout_rate)
self.act_func = act_func
def forward(self, input_features):
features_output1 = self.classifier1(input_features)
if self.act_func == "gelu":
features_output1 = func.gelu(features_output1)
elif self.act_func == "relu":
features_output1 = func.relu(features_output1)
elif self.act_func == "tanh":
features_output1 = func.tanh(features_output1)
else:
raise ValueError
features_output1 = self.dropout(features_output1)
features_output2 = self.classifier2(features_output1)
return features_output2
class ClassifierLayer(nn.Module):
# https://github.com/Akeepers/LEAR/blob/master/utils/model_utils.py
def __init__(self, class_num, out_features, bias=True):
super(ClassifierLayer, self).__init__()
self.class_num = class_num
self.out_features = out_features
self.weight = Parameter(torch.Tensor(class_num, out_features))
if bias:
self.bias = Parameter(torch.Tensor(class_num))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, inputs):
x = torch.mul(inputs, self.weight)
# (class_num, 1)
x = torch.sum(x, -1) # [-1, class_num]
if self.bias is not None:
x = x + self.bias
return x
def extra_repr(self):
return 'class_num={}, out_features={}, bias={}'.format(
self.class_num, self.out_features, self.bias is not None)
class MultiNonLinearClassifierForMultiLabel(nn.Module):
# https://github.com/Akeepers/LEAR/blob/master/utils/model_utils.py
def __init__(self, hidden_size, num_label, dropout_rate):
super(MultiNonLinearClassifierForMultiLabel, self).__init__()
self.num_label = num_label
self.classifier1 = nn.Linear(hidden_size, hidden_size)
self.classifier2 = ClassifierLayer(num_label, hidden_size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, input_features):
features_output1 = self.classifier1(input_features)
features_output1 = func.gelu(features_output1)
features_output1 = self.dropout(features_output1)
features_output2 = self.classifier2(features_output1)
return features_output2
| [
"torch.mul",
"torch.nn.functional.tanh",
"torch.nn.Dropout",
"torch.nn.Tanh",
"torch.nn.LayerNorm",
"torch.nn.functional.gelu",
"torch.Tensor",
"math.sqrt",
"torch.nn.init.orthogonal_",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.sum",
"torch.nn.Linear",
"torch.nn.functional.relu",... | [((537, 568), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'input_dim'], {}), '(input_dim, input_dim)\n', (546, 568), True, 'import torch.nn as nn\n'), ((588, 619), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'input_dim'], {}), '(input_dim, input_dim)\n', (597, 619), True, 'import torch.nn as nn\n'), ((639, 682), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'output_dim'], {'bias': 'bias'}), '(input_dim, output_dim, bias=bias)\n', (648, 682), True, 'import torch.nn as nn\n'), ((692, 736), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.fn1.weight'], {'gain': '(1)'}), '(self.fn1.weight, gain=1)\n', (711, 736), True, 'import torch.nn as nn\n'), ((745, 789), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.fn2.weight'], {'gain': '(1)'}), '(self.fn2.weight, gain=1)\n', (764, 789), True, 'import torch.nn as nn\n'), ((798, 842), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.fn3.weight'], {'gain': '(1)'}), '(self.fn3.weight, gain=1)\n', (817, 842), True, 'import torch.nn as nn\n'), ((1901, 1935), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1910, 1935), True, 'import torch.nn as nn\n'), ((1959, 1994), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1968, 1994), True, 'import torch.nn as nn\n'), ((2378, 2413), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (2387, 2413), True, 'import torch.nn as nn\n'), ((2699, 2734), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (2708, 2734), True, 'import torch.nn as nn\n'), ((2761, 2770), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2768, 2770), True, 'import torch.nn as nn\n'), ((2796, 2821), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size'], {}), '(hidden_size)\n', (2808, 2821), True, 'import torch.nn as nn\n'), ((2845, 2880), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (2854, 2880), True, 'import torch.nn as nn\n'), ((3518, 3571), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'self.intermediate_hidden_size'], {}), '(hidden_size, self.intermediate_hidden_size)\n', (3527, 3571), True, 'import torch.nn as nn\n'), ((3599, 3655), 'torch.nn.Linear', 'nn.Linear', (['self.intermediate_hidden_size', 'self.num_label'], {}), '(self.intermediate_hidden_size, self.num_label)\n', (3608, 3655), True, 'import torch.nn as nn\n'), ((3679, 3703), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (3689, 3703), True, 'import torch.nn as nn\n'), ((4528, 4561), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_label'], {}), '(hidden_size, num_label)\n', (4537, 4561), True, 'import torch.nn as nn\n'), ((5075, 5128), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'self.intermediate_hidden_size'], {}), '(hidden_size, self.intermediate_hidden_size)\n', (5084, 5128), True, 'import torch.nn as nn\n'), ((5156, 5212), 'torch.nn.Linear', 'nn.Linear', (['self.intermediate_hidden_size', 'self.num_label'], {}), '(self.intermediate_hidden_size, self.num_label)\n', (5165, 5212), True, 'import torch.nn as nn\n'), ((5236, 5260), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (5246, 5260), True, 'import torch.nn as nn\n'), ((6766, 6796), 'torch.mul', 'torch.mul', (['inputs', 'self.weight'], {}), '(inputs, self.weight)\n', (6775, 6796), False, 'import torch\n'), ((6834, 6850), 'torch.sum', 'torch.sum', (['x', '(-1)'], {}), '(x, -1)\n', (6843, 6850), False, 'import torch\n'), ((7436, 7471), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (7445, 7471), True, 'import torch.nn as nn\n'), ((7562, 7586), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (7572, 7586), True, 'import torch.nn as nn\n'), ((7714, 7741), 'torch.nn.functional.gelu', 'func.gelu', (['features_output1'], {}), '(features_output1)\n', (7723, 7741), True, 'import torch.nn.functional as func\n'), ((2967, 3018), 'torch.cat', 'torch.cat', (['[hidden_states, start_positions]'], {'dim': '(-1)'}), '([hidden_states, start_positions], dim=-1)\n', (2976, 3018), False, 'import torch\n'), ((3904, 3931), 'torch.nn.functional.gelu', 'func.gelu', (['features_output1'], {}), '(features_output1)\n', (3913, 3931), True, 'import torch.nn.functional as func\n'), ((5461, 5488), 'torch.nn.functional.gelu', 'func.gelu', (['features_output1'], {}), '(features_output1)\n', (5470, 5488), True, 'import torch.nn.functional as func\n'), ((6202, 6239), 'torch.Tensor', 'torch.Tensor', (['class_num', 'out_features'], {}), '(class_num, out_features)\n', (6214, 6239), False, 'import torch\n'), ((6574, 6624), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (6611, 6624), True, 'import torch.nn as nn\n'), ((6679, 6721), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (6695, 6721), True, 'import torch.nn as nn\n'), ((4001, 4028), 'torch.nn.functional.relu', 'func.relu', (['features_output1'], {}), '(features_output1)\n', (4010, 4028), True, 'import torch.nn.functional as func\n'), ((5558, 5585), 'torch.nn.functional.relu', 'func.relu', (['features_output1'], {}), '(features_output1)\n', (5567, 5585), True, 'import torch.nn.functional as func\n'), ((6292, 6315), 'torch.Tensor', 'torch.Tensor', (['class_num'], {}), '(class_num)\n', (6304, 6315), False, 'import torch\n'), ((6502, 6514), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (6511, 6514), False, 'import math\n'), ((6649, 6666), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (6658, 6666), False, 'import math\n'), ((4098, 4125), 'torch.nn.functional.tanh', 'func.tanh', (['features_output1'], {}), '(features_output1)\n', (4107, 4125), True, 'import torch.nn.functional as func\n'), ((5655, 5682), 'torch.nn.functional.tanh', 'func.tanh', (['features_output1'], {}), '(features_output1)\n', (5664, 5682), True, 'import torch.nn.functional as func\n')] |
import requests
def company(SIREN):
r = requests.get('https://entreprise.data.gouv.fr/api/sirene/v2/siren/'+SIREN+'')
json_object = r.json()
settings = dict()
if json_object['sirene']['status'] == 404:
return None
if json_object['sirene']['data']['siege_social']['nom_raison_sociale'] != None :
settings['name'] = json_object['sirene']['data']['siege_social']['nom_raison_sociale']
else :
settings['name'] = ''
try :
settings['address'] = json_object['sirene']['data']['siege_social']['numero_voie'] + ' ' + json_object['sirene']['data']['siege_social']['type_voie'] + ' ' + json_object['sirene']['data']['siege_social']['libelle_voie'] + ' ' + json_object['sirene']['data']['siege_social']['code_postal'] + ' ' + json_object['sirene']['data']['siege_social']['libelle_commune']
except :
settings['address'] = ''
if json_object['sirene']['data']['total_results'] != None :
settings['group'] = json_object['sirene']['data']['total_results']
else :
settings['group'] = ''
Dictionnaire_effectifs = {'NN': "No staff members", '00': '0', '01': "1-2", '02': "3-5",
'03': "6-9", '11': "10-19", '12': "20-49", '21': "50-99", '22': "100-199",
'31': "200-249", '32': "250-499", '41': "500-999", '42': "1000-1999",
'51': "2000-4999", '52': "5000-9999", '53': "+10 000"}
if json_object['sirene']['data']['siege_social']['tranche_effectif_salarie'] != None :
settings['staff'] = Dictionnaire_effectifs[json_object['sirene']['data']['siege_social']['tranche_effectif_salarie']]
else :
settings['staff'] = ''
if json_object['sirene']['data']['siege_social']['libelle_activite_principale_entreprise'] != None :
settings['activity'] = json_object['sirene']['data']['siege_social']['libelle_activite_principale_entreprise']
else :
settings['activity'] = ''
return settings
| [
"requests.get"
] | [((48, 133), 'requests.get', 'requests.get', (["('https://entreprise.data.gouv.fr/api/sirene/v2/siren/' + SIREN + '')"], {}), "('https://entreprise.data.gouv.fr/api/sirene/v2/siren/' + SIREN +\n '')\n", (60, 133), False, 'import requests\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
import json
def list_peering_legacy(cmd, client,
peering_location=None,
kind=None):
return client.list(peering_location=peering_location, kind=kind)
def create_peering_asn(cmd, client,
name,
peer_asn=None,
emails=None,
phone=None,
peer_name=None,
validation_state=None):
body = {}
body['peer_asn'] = peer_asn # number
body.setdefault('peer_contact_info', {})['emails'] = None if emails is None else emails.split(',')
body.setdefault('peer_contact_info', {})['phone'] = None if phone is None else phone.split(',')
body['peer_name'] = peer_name # str
body['validation_state'] = validation_state # str
return client.create_or_update(peer_asn_name=name, peer_asn=body)
def update_peering_asn(cmd, client,
name,
peer_asn=None,
emails=None,
phone=None,
peer_name=None,
validation_state=None):
body = client.get(peer_asn_name=name).as_dict()
body.peer_asn = peer_asn # number
body.peer_contact_info.emails = None if emails is None else emails.split(',')
body.peer_contact_info.phone = None if phone is None else phone.split(',')
body.peer_name = peer_name # str
body.validation_state = validation_state # str
return client.create_or_update(peer_asn_name=name, peer_asn=body)
def delete_peering_asn(cmd, client,
name):
return client.delete(peer_asn_name=name)
def list_peering_asn(cmd, client):
return client.list_by_subscription()
def list_peering_location(cmd, client,
kind=None,
direct_peering_type=None):
return client.list(kind=kind, direct_peering_type=direct_peering_type)
def create_peering(cmd, client,
resource_group,
name,
kind,
location,
sku_name=None,
sku_tier=None,
sku_family=None,
sku_size=None,
direct_connections=None,
direct_peer_asn=None,
direct_direct_peering_type=None,
exchange_connections=None,
exchange_peer_asn=None,
peering_location=None,
tags=None):
body = {}
body.setdefault('sku', {})['name'] = sku_name # str
body.setdefault('sku', {})['tier'] = sku_tier # str
body.setdefault('sku', {})['family'] = sku_family # str
body.setdefault('sku', {})['size'] = sku_size # str
body['kind'] = kind # str
body.setdefault('direct', {})['connections'] = json.loads(direct_connections) if isinstance(direct_connections, str) else direct_connections
body.setdefault('direct', {}).setdefault('peer_asn', {})['id'] = direct_peer_asn
body.setdefault('direct', {})['direct_peering_type'] = direct_direct_peering_type # str
# body.setdefault('exchange', {})['connections'] = json.loads(exchange_connections) if isinstance(exchange_connections, str) else exchange_connections
# body.setdefault('exchange', {}).setdefault('peer_asn', {})['id'] = exchange_peer_asn
body['peering_location'] = peering_location # str
body['location'] = location # str
body['tags'] = tags # dictionary
return client.create_or_update(resource_group_name=resource_group, peering_name=name, peering=body)
def update_peering(cmd, client,
resource_group,
name,
sku_name=None,
sku_tier=None,
sku_family=None,
sku_size=None,
kind=None,
direct_connections=None,
direct_peer_asn=None,
direct_direct_peering_type=None,
exchange_connections=None,
exchange_peer_asn=None,
peering_location=None,
location=None,
tags=None):
body = client.get(resource_group_name=resource_group, peering_name=name).as_dict()
body.sku.name = sku_name # str
body.sku.tier = sku_tier # str
body.sku.family = sku_family # str
body.sku.size = sku_size # str
body.kind = kind # str
body.direct.connections = json.loads(direct_connections) if isinstance(direct_connections, str) else direct_connections
body.direct.peer_asn = direct_peer_asn
body.direct.direct_peering_type = direct_direct_peering_type # str
body.exchange.connections = json.loads(exchange_connections) if isinstance(exchange_connections, str) else exchange_connections
body.exchange.peer_asn = exchange_peer_asn
body.peering_location = peering_location # str
body.location = location # str
body.tags = tags # dictionary
return client.create_or_update(resource_group_name=resource_group, peering_name=name, peering=body)
def delete_peering(cmd, client,
resource_group,
name):
return client.delete(resource_group_name=resource_group, peering_name=name)
def list_peering(cmd, client,
resource_group):
if resource_group is not None:
return client.list_by_resource_group(resource_group_name=resource_group)
return client.list_by_subscription()
def list_peering_service_location(cmd, client):
return client.list()
def create_peering_service_prefix(cmd, client,
resource_group,
peering_service_name,
name,
prefix=None):
return client.create_or_update(resource_group_name=resource_group, peering_service_name=peering_service_name, prefix_name=name, prefix=prefix)
def update_peering_service_prefix(cmd, client,
resource_group,
peering_service_name,
name,
prefix=None):
return client.create_or_update(resource_group_name=resource_group, peering_service_name=peering_service_name, prefix_name=name, prefix=prefix)
def delete_peering_service_prefix(cmd, client,
resource_group,
peering_service_name,
name):
return client.delete(resource_group_name=resource_group, peering_service_name=peering_service_name, prefix_name=name)
def list_peering_service_prefix(cmd, client,
resource_group,
peering_service_name):
return client.list_by_peering_service(resource_group_name=resource_group, peering_service_name=peering_service_name)
def list_peering_service_provider(cmd, client):
return client.list()
def create_peering_service(cmd, client,
resource_group,
name,
location,
peering_service_location=None,
peering_service_provider=None,
tags=None):
body = {}
body['peering_service_location'] = peering_service_location # str
body['peering_service_provider'] = peering_service_provider # str
body['location'] = location # str
body['tags'] = tags # dictionary
return client.create_or_update(resource_group_name=resource_group, peering_service_name=name, peering_service=body)
def update_peering_service(cmd, client,
resource_group,
name,
peering_service_location=None,
peering_service_provider=None,
location=None,
tags=None):
body = client.get(resource_group_name=resource_group, peering_service_name=name).as_dict()
body.peering_service_location = peering_service_location # str
body.peering_service_provider = peering_service_provider # str
body.location = location # str
body.tags = tags # dictionary
return client.create_or_update(resource_group_name=resource_group, peering_service_name=name, peering_service=body)
def delete_peering_service(cmd, client,
resource_group,
name):
return client.delete(resource_group_name=resource_group, peering_service_name=name)
def list_peering_service(cmd, client,
resource_group):
if resource_group is not None:
return client.list_by_resource_group(resource_group_name=resource_group)
return client.list_by_subscription()
| [
"json.loads"
] | [((3391, 3421), 'json.loads', 'json.loads', (['direct_connections'], {}), '(direct_connections)\n', (3401, 3421), False, 'import json\n'), ((5033, 5063), 'json.loads', 'json.loads', (['direct_connections'], {}), '(direct_connections)\n', (5043, 5063), False, 'import json\n'), ((5274, 5306), 'json.loads', 'json.loads', (['exchange_connections'], {}), '(exchange_connections)\n', (5284, 5306), False, 'import json\n')] |
"""Coverage based QC calculations.
"""
import glob
import os
import subprocess
from bcbio.bam import ref, readstats, utils
from bcbio.distributed import transaction
from bcbio.heterogeneity import chromhacks
import bcbio.pipeline.datadict as dd
from bcbio.provenance import do
from bcbio.variation import coverage as cov
from bcbio.variation import bedutils
def run(bam_file, data, out_dir):
"""Run coverage QC analysis
"""
out = dict()
out_dir = utils.safe_makedir(out_dir)
if dd.get_coverage(data) and dd.get_coverage(data) not in ["None"]:
merged_bed_file = bedutils.clean_file(dd.get_coverage_merged(data), data, prefix="cov-", simple=True)
target_name = "coverage"
elif dd.get_coverage_interval(data) != "genome":
merged_bed_file = dd.get_variant_regions_merged(data)
target_name = "variant_regions"
else:
merged_bed_file = None
target_name = "genome"
avg_depth = cov.get_average_coverage(target_name, merged_bed_file, data)
if target_name == "coverage":
out_files = cov.coverage_region_detailed_stats(target_name, merged_bed_file, data, out_dir)
else:
out_files = []
out['Avg_coverage'] = avg_depth
samtools_stats_dir = os.path.join(out_dir, os.path.pardir, 'samtools')
from bcbio.qc import samtools
samtools_stats = samtools.run(bam_file, data, samtools_stats_dir)["metrics"]
out["Total_reads"] = total_reads = int(samtools_stats["Total_reads"])
out["Mapped_reads"] = mapped = int(samtools_stats["Mapped_reads"])
out["Mapped_paired_reads"] = int(samtools_stats["Mapped_paired_reads"])
out['Duplicates'] = dups = int(samtools_stats["Duplicates"])
if total_reads:
out["Mapped_reads_pct"] = 100.0 * mapped / total_reads
if mapped:
out['Duplicates_pct'] = 100.0 * dups / mapped
if dd.get_coverage_interval(data) == "genome":
mapped_unique = mapped - dups
else:
mapped_unique = readstats.number_of_mapped_reads(data, bam_file, keep_dups=False)
out['Mapped_unique_reads'] = mapped_unique
if merged_bed_file:
ontarget = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=merged_bed_file, target_name=target_name)
out["Ontarget_unique_reads"] = ontarget
if mapped_unique:
out["Ontarget_pct"] = 100.0 * ontarget / mapped_unique
out['Offtarget_pct'] = 100.0 * (mapped_unique - ontarget) / mapped_unique
if dd.get_coverage_interval(data) != "genome":
# Skip padded calculation for WGS even if the "coverage" file is specified
# the padded statistic makes only sense for exomes and panels
padded_bed_file = bedutils.get_padded_bed_file(out_dir, merged_bed_file, 200, data)
ontarget_padded = readstats.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=padded_bed_file, target_name=target_name + "_padded")
out["Ontarget_padded_pct"] = 100.0 * ontarget_padded / mapped_unique
if total_reads:
out['Usable_pct'] = 100.0 * ontarget / total_reads
indexcov_files = _goleft_indexcov(bam_file, data, out_dir)
out_files += [x for x in indexcov_files if x and utils.file_exists(x)]
out = {"metrics": out}
if len(out_files) > 0:
out["base"] = out_files[0]
out["secondary"] = out_files[1:]
return out
def _goleft_indexcov(bam_file, data, out_dir):
"""Use goleft indexcov to estimate coverage distributions using BAM index.
Only used for whole genome runs as captures typically don't have enough data
to be useful for index-only summaries.
"""
if not dd.get_coverage_interval(data) == "genome":
return []
out_dir = utils.safe_makedir(os.path.join(out_dir, "indexcov"))
out_files = [os.path.join(out_dir, "%s-indexcov.%s" % (dd.get_sample_name(data), ext))
for ext in ["roc", "ped", "bed.gz"]]
if not utils.file_uptodate(out_files[-1], bam_file):
with transaction.tx_tmpdir(data) as tmp_dir:
tmp_dir = utils.safe_makedir(os.path.join(tmp_dir, dd.get_sample_name(data)))
gender_chroms = [x.name for x in ref.file_contigs(dd.get_ref_file(data)) if chromhacks.is_sex(x.name)]
gender_args = "--sex %s" % (",".join(gender_chroms)) if gender_chroms else ""
cmd = "goleft indexcov --directory {tmp_dir} {gender_args} -- {bam_file}"
try:
do.run(cmd.format(**locals()), "QC: goleft indexcov")
except subprocess.CalledProcessError as msg:
if not ("indexcov: no usable" in str(msg) or
("indexcov: expected" in str(msg) and "sex chromosomes, found:" in str(msg))):
raise
for out_file in out_files:
orig_file = os.path.join(tmp_dir, os.path.basename(out_file))
if utils.file_exists(orig_file):
utils.copy_plus(orig_file, out_file)
# MultiQC needs non-gzipped/BED inputs so unpack the file
out_bed = out_files[-1].replace(".bed.gz", ".tsv")
if utils.file_exists(out_files[-1]) and not utils.file_exists(out_bed):
with transaction.file_transaction(data, out_bed) as tx_out_bed:
cmd = "gunzip -c %s > %s" % (out_files[-1], tx_out_bed)
do.run(cmd, "Unpack indexcov BED file")
out_files[-1] = out_bed
return [x for x in out_files if utils.file_exists(x)]
| [
"bcbio.bam.readstats.number_of_mapped_reads",
"bcbio.distributed.transaction.tx_tmpdir",
"bcbio.bam.utils.safe_makedir",
"bcbio.bam.utils.file_exists",
"bcbio.heterogeneity.chromhacks.is_sex",
"bcbio.qc.samtools.run",
"bcbio.variation.coverage.coverage_region_detailed_stats",
"bcbio.pipeline.datadict.... | [((466, 493), 'bcbio.bam.utils.safe_makedir', 'utils.safe_makedir', (['out_dir'], {}), '(out_dir)\n', (484, 493), False, 'from bcbio.bam import ref, readstats, utils\n'), ((953, 1013), 'bcbio.variation.coverage.get_average_coverage', 'cov.get_average_coverage', (['target_name', 'merged_bed_file', 'data'], {}), '(target_name, merged_bed_file, data)\n', (977, 1013), True, 'from bcbio.variation import coverage as cov\n'), ((1244, 1293), 'os.path.join', 'os.path.join', (['out_dir', 'os.path.pardir', '"""samtools"""'], {}), "(out_dir, os.path.pardir, 'samtools')\n", (1256, 1293), False, 'import os\n'), ((501, 522), 'bcbio.pipeline.datadict.get_coverage', 'dd.get_coverage', (['data'], {}), '(data)\n', (516, 522), True, 'import bcbio.pipeline.datadict as dd\n'), ((1068, 1147), 'bcbio.variation.coverage.coverage_region_detailed_stats', 'cov.coverage_region_detailed_stats', (['target_name', 'merged_bed_file', 'data', 'out_dir'], {}), '(target_name, merged_bed_file, data, out_dir)\n', (1102, 1147), True, 'from bcbio.variation import coverage as cov\n'), ((1349, 1397), 'bcbio.qc.samtools.run', 'samtools.run', (['bam_file', 'data', 'samtools_stats_dir'], {}), '(bam_file, data, samtools_stats_dir)\n', (1361, 1397), False, 'from bcbio.qc import samtools\n'), ((1857, 1887), 'bcbio.pipeline.datadict.get_coverage_interval', 'dd.get_coverage_interval', (['data'], {}), '(data)\n', (1881, 1887), True, 'import bcbio.pipeline.datadict as dd\n'), ((1973, 2038), 'bcbio.bam.readstats.number_of_mapped_reads', 'readstats.number_of_mapped_reads', (['data', 'bam_file'], {'keep_dups': '(False)'}), '(data, bam_file, keep_dups=False)\n', (2005, 2038), False, 'from bcbio.bam import ref, readstats, utils\n'), ((2130, 2251), 'bcbio.bam.readstats.number_of_mapped_reads', 'readstats.number_of_mapped_reads', (['data', 'bam_file'], {'keep_dups': '(False)', 'bed_file': 'merged_bed_file', 'target_name': 'target_name'}), '(data, bam_file, keep_dups=False, bed_file=\n merged_bed_file, target_name=target_name)\n', (2162, 2251), False, 'from bcbio.bam import ref, readstats, utils\n'), ((3821, 3854), 'os.path.join', 'os.path.join', (['out_dir', '"""indexcov"""'], {}), "(out_dir, 'indexcov')\n", (3833, 3854), False, 'import os\n'), ((4012, 4056), 'bcbio.bam.utils.file_uptodate', 'utils.file_uptodate', (['out_files[-1]', 'bam_file'], {}), '(out_files[-1], bam_file)\n', (4031, 4056), False, 'from bcbio.bam import ref, readstats, utils\n'), ((5173, 5205), 'bcbio.bam.utils.file_exists', 'utils.file_exists', (['out_files[-1]'], {}), '(out_files[-1])\n', (5190, 5205), False, 'from bcbio.bam import ref, readstats, utils\n'), ((527, 548), 'bcbio.pipeline.datadict.get_coverage', 'dd.get_coverage', (['data'], {}), '(data)\n', (542, 548), True, 'import bcbio.pipeline.datadict as dd\n'), ((612, 640), 'bcbio.pipeline.datadict.get_coverage_merged', 'dd.get_coverage_merged', (['data'], {}), '(data)\n', (634, 640), True, 'import bcbio.pipeline.datadict as dd\n'), ((718, 748), 'bcbio.pipeline.datadict.get_coverage_interval', 'dd.get_coverage_interval', (['data'], {}), '(data)\n', (742, 748), True, 'import bcbio.pipeline.datadict as dd\n'), ((788, 823), 'bcbio.pipeline.datadict.get_variant_regions_merged', 'dd.get_variant_regions_merged', (['data'], {}), '(data)\n', (817, 823), True, 'import bcbio.pipeline.datadict as dd\n'), ((3726, 3756), 'bcbio.pipeline.datadict.get_coverage_interval', 'dd.get_coverage_interval', (['data'], {}), '(data)\n', (3750, 3756), True, 'import bcbio.pipeline.datadict as dd\n'), ((4071, 4098), 'bcbio.distributed.transaction.tx_tmpdir', 'transaction.tx_tmpdir', (['data'], {}), '(data)\n', (4092, 4098), False, 'from bcbio.distributed import transaction\n'), ((5214, 5240), 'bcbio.bam.utils.file_exists', 'utils.file_exists', (['out_bed'], {}), '(out_bed)\n', (5231, 5240), False, 'from bcbio.bam import ref, readstats, utils\n'), ((5255, 5298), 'bcbio.distributed.transaction.file_transaction', 'transaction.file_transaction', (['data', 'out_bed'], {}), '(data, out_bed)\n', (5283, 5298), False, 'from bcbio.distributed import transaction\n'), ((5394, 5433), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""Unpack indexcov BED file"""'], {}), "(cmd, 'Unpack indexcov BED file')\n", (5400, 5433), False, 'from bcbio.provenance import do\n'), ((5498, 5518), 'bcbio.bam.utils.file_exists', 'utils.file_exists', (['x'], {}), '(x)\n', (5515, 5518), False, 'from bcbio.bam import ref, readstats, utils\n'), ((2502, 2532), 'bcbio.pipeline.datadict.get_coverage_interval', 'dd.get_coverage_interval', (['data'], {}), '(data)\n', (2526, 2532), True, 'import bcbio.pipeline.datadict as dd\n'), ((2749, 2814), 'bcbio.variation.bedutils.get_padded_bed_file', 'bedutils.get_padded_bed_file', (['out_dir', 'merged_bed_file', '(200)', 'data'], {}), '(out_dir, merged_bed_file, 200, data)\n', (2777, 2814), False, 'from bcbio.variation import bedutils\n'), ((2849, 2982), 'bcbio.bam.readstats.number_of_mapped_reads', 'readstats.number_of_mapped_reads', (['data', 'bam_file'], {'keep_dups': '(False)', 'bed_file': 'padded_bed_file', 'target_name': "(target_name + '_padded')"}), "(data, bam_file, keep_dups=False, bed_file=\n padded_bed_file, target_name=target_name + '_padded')\n", (2881, 2982), False, 'from bcbio.bam import ref, readstats, utils\n'), ((3288, 3308), 'bcbio.bam.utils.file_exists', 'utils.file_exists', (['x'], {}), '(x)\n', (3305, 3308), False, 'from bcbio.bam import ref, readstats, utils\n'), ((4962, 4990), 'bcbio.bam.utils.file_exists', 'utils.file_exists', (['orig_file'], {}), '(orig_file)\n', (4979, 4990), False, 'from bcbio.bam import ref, readstats, utils\n'), ((3915, 3939), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (3933, 3939), True, 'import bcbio.pipeline.datadict as dd\n'), ((4174, 4198), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (4192, 4198), True, 'import bcbio.pipeline.datadict as dd\n'), ((4289, 4314), 'bcbio.heterogeneity.chromhacks.is_sex', 'chromhacks.is_sex', (['x.name'], {}), '(x.name)\n', (4306, 4314), False, 'from bcbio.heterogeneity import chromhacks\n'), ((4915, 4941), 'os.path.basename', 'os.path.basename', (['out_file'], {}), '(out_file)\n', (4931, 4941), False, 'import os\n'), ((5012, 5048), 'bcbio.bam.utils.copy_plus', 'utils.copy_plus', (['orig_file', 'out_file'], {}), '(orig_file, out_file)\n', (5027, 5048), False, 'from bcbio.bam import ref, readstats, utils\n'), ((4263, 4284), 'bcbio.pipeline.datadict.get_ref_file', 'dd.get_ref_file', (['data'], {}), '(data)\n', (4278, 4284), True, 'import bcbio.pipeline.datadict as dd\n')] |
from concepts.letter_addition import LetterAddition
from learners.sim_memoryless_learner import SimMemorylessLearner
def test_see_example():
concept = LetterAddition(6)
learner = SimMemorylessLearner(concept, list(range(0, 7)))
learner.see_example(((0, 1), 10))
| [
"concepts.letter_addition.LetterAddition"
] | [((157, 174), 'concepts.letter_addition.LetterAddition', 'LetterAddition', (['(6)'], {}), '(6)\n', (171, 174), False, 'from concepts.letter_addition import LetterAddition\n')] |
import logging
import pandas as pd
from datetime import datetime
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Union,
)
logger = logging.getLogger(__name__)
# add_jde_batch() {{{1
def add_jde_batch(df: pd.DataFrame,
col_prefix: str = 'ed',
userid: str = 'userid',
batch: str = 'ABC',
start: int = 100,
step: int = 100) -> pd.DataFrame:
''' Add 'standard' JDE timestamp/default columns.
For given dataframe, adds the following standard Z-file columns.
User ID (edus)
Batch Number (edbt)
Transaction Number (edtn)
Line Number (edln)
Examples
--------
from piper.defaults import *
from piper.jde import *
.. code-block:
%%piper
sample_sales() >>
select('-target_profit', '-location', '-month') >>
reset_index(drop=True) >>
add_jde_batch(start=3) >>
head(tablefmt='plain')
edus edbt edtn edln product target_sales actual_sales actual_profit
0 userid ABC_20210331 1 3 Beachwear 31749 29209 1753
1 userid ABC_20210331 1 103 Beachwear 37833 34050 5448
2 userid ABC_20210331 1 203 Jeans 29485 31549 4417
3 userid ABC_20210331 1 303 Jeans 37524 40901 4090
Parameters
----------
df : the pandas dataframe object
col_prefix : 2 character (e.g. 'ed') column name prefix to be
applied to the added columns
userid : default userid text value
batch : 2 character prefix to concatenated to current timestamp
trans_no : start number in xxln column
step : step increment in xxln column
Returns
-------
A pandas dataframe
'''
timestamp = datetime.now().strftime('_%Y%m%d')
start_position = 0
range_seq = range(start, (df.shape[0]+1)*step, step)
df.insert(start_position, f'{col_prefix}us', userid)
df.insert(start_position+1, f'{col_prefix}bt', batch + timestamp)
df.insert(start_position+2, f'{col_prefix}tn', 1)
df.insert(start_position+3, f'{col_prefix}ln', pd.Series(range_seq))
return df
| [
"logging.getLogger",
"datetime.datetime.now",
"pandas.Series"
] | [((265, 292), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (282, 292), False, 'import logging\n'), ((2496, 2516), 'pandas.Series', 'pd.Series', (['range_seq'], {}), '(range_seq)\n', (2505, 2516), True, 'import pandas as pd\n'), ((2137, 2151), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2149, 2151), False, 'from datetime import datetime\n')] |
import os
import shutil
import logging
class BColors(object):
HEADER = "\033[95m"
OK_BLUE = "\033[94m"
OK_CYAN = "\033[96m"
OK_GREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
END_C = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def touch(fname: str, times=None, create_dirs: bool = False):
if create_dirs:
base_dir = os.path.dirname(fname)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with open(fname, "a"):
os.utime(fname, times)
def touch_dir(base_dir: str) -> None:
if not os.path.exists(base_dir):
os.makedirs(base_dir)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def top1_accuracy(output, target):
return accuracy(output, target, topk=(1,))[0].item()
def log(*args, **kwargs):
pass
def log_dict(*args, **kwargs):
pass
def initialize_logger(log_root):
if not os.path.exists(log_root):
os.makedirs(log_root)
else:
shutil.rmtree(log_root)
os.makedirs(log_root)
print(f"Logging files to {log_root}")
# Only to file; One dict per line; Easy to process
json_logger = logging.getLogger("stats")
json_logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(log_root, "stats"))
fh.setLevel(logging.INFO)
fh.setFormatter(logging.Formatter("%(message)s"))
json_logger.addHandler(fh)
debug_logger = logging.getLogger("debug")
debug_logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter("%(message)s"))
debug_logger.addHandler(ch)
fh = logging.FileHandler(os.path.join(log_root, "debug"))
fh.setLevel(logging.INFO)
debug_logger.addHandler(fh)
| [
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"os.makedirs",
"logging.Formatter",
"os.path.join",
"os.utime",
"os.path.dirname",
"shutil.rmtree"
] | [((1556, 1582), 'logging.getLogger', 'logging.getLogger', (['"""stats"""'], {}), "('stats')\n", (1573, 1582), False, 'import logging\n'), ((1819, 1845), 'logging.getLogger', 'logging.getLogger', (['"""debug"""'], {}), "('debug')\n", (1836, 1845), False, 'import logging\n'), ((1895, 1918), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1916, 1918), False, 'import logging\n'), ((383, 405), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (398, 405), False, 'import os\n'), ((516, 538), 'os.utime', 'os.utime', (['fname', 'times'], {}), '(fname, times)\n', (524, 538), False, 'import os\n'), ((590, 614), 'os.path.exists', 'os.path.exists', (['base_dir'], {}), '(base_dir)\n', (604, 614), False, 'import os\n'), ((624, 645), 'os.makedirs', 'os.makedirs', (['base_dir'], {}), '(base_dir)\n', (635, 645), False, 'import os\n'), ((1311, 1335), 'os.path.exists', 'os.path.exists', (['log_root'], {}), '(log_root)\n', (1325, 1335), False, 'import os\n'), ((1345, 1366), 'os.makedirs', 'os.makedirs', (['log_root'], {}), '(log_root)\n', (1356, 1366), False, 'import os\n'), ((1385, 1408), 'shutil.rmtree', 'shutil.rmtree', (['log_root'], {}), '(log_root)\n', (1398, 1408), False, 'import shutil\n'), ((1417, 1438), 'os.makedirs', 'os.makedirs', (['log_root'], {}), '(log_root)\n', (1428, 1438), False, 'import os\n'), ((1651, 1682), 'os.path.join', 'os.path.join', (['log_root', '"""stats"""'], {}), "(log_root, 'stats')\n", (1663, 1682), False, 'import os\n'), ((1734, 1766), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (1751, 1766), False, 'import logging\n'), ((1969, 2001), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (1986, 2001), False, 'import logging\n'), ((2064, 2095), 'os.path.join', 'os.path.join', (['log_root', '"""debug"""'], {}), "(log_root, 'debug')\n", (2076, 2095), False, 'import os\n'), ((421, 445), 'os.path.exists', 'os.path.exists', (['base_dir'], {}), '(base_dir)\n', (435, 445), False, 'import os\n'), ((459, 480), 'os.makedirs', 'os.makedirs', (['base_dir'], {}), '(base_dir)\n', (470, 480), False, 'import os\n')] |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from setuptools import setup
import os
import re
import shutil
_versionRE = re.compile(r'__version__\s*=\s*\"([^\"]+)\"')
# read the version number for the settings file
with open('drawBot/drawBotSettings.py', "r") as settings:
code = settings.read()
found = _versionRE.search(code)
assert found is not None, "drawBot __version__ not found"
__version__ = found.group(1)
externalTools = ("ffmpeg", "gifsicle", "mkbitmap", "potrace")
externalToolsSourceRoot = os.path.join(os.path.dirname(__file__), "Resources", "externalTools")
externalToolsDestRoot = os.path.join(os.path.dirname(__file__), "drawBot", "context", "tools")
# copy all external tools into drawBot.context.tools folder
for externalTool in externalTools:
source = os.path.join(externalToolsSourceRoot, externalTool)
dest = os.path.join(externalToolsDestRoot, externalTool)
shutil.copyfile(source, dest)
os.chmod(dest, 0o775)
setup(name="drawBot",
version=__version__,
description="DrawBot is a powerful tool that invites you to write simple Python scripts to generate two-dimensional graphics. The builtin graphics primitives support rectangles, ovals, (bezier) paths, polygons, text objects and transparency.",
author="<NAME>, <NAME>, <NAME>",
author_email="<EMAIL>",
url="http://drawbot.com",
license="BSD",
packages=[
"drawBot",
"drawBot.context",
"drawBot.context.tools",
"drawBot.ui"
],
package_data={
"drawBot": [
"context/tools/ffmpeg",
"context/tools/gifsicle",
"context/tools/mkbitmap",
"context/tools/potrace"
]
},
install_requires=[
"pyobjc",
"fontTools",
"booleanOperations",
"pillow"
],
include_package_data=True,
)
# remove all external tools
for externalTool in externalTools:
dest = os.path.join(externalToolsDestRoot, externalTool)
os.remove(dest)
| [
"re.compile",
"os.path.join",
"setuptools.setup",
"os.chmod",
"os.path.dirname",
"shutil.copyfile",
"os.remove"
] | [((165, 214), 're.compile', 're.compile', (['"""__version__\\\\s*=\\\\s*\\\\"([^\\\\"]+)\\\\\\""""'], {}), '(\'__version__\\\\s*=\\\\s*\\\\"([^\\\\"]+)\\\\"\')\n', (175, 214), False, 'import re\n'), ((1015, 1745), 'setuptools.setup', 'setup', ([], {'name': '"""drawBot"""', 'version': '__version__', 'description': '"""DrawBot is a powerful tool that invites you to write simple Python scripts to generate two-dimensional graphics. The builtin graphics primitives support rectangles, ovals, (bezier) paths, polygons, text objects and transparency."""', 'author': '"""<NAME>, <NAME>, <NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""http://drawbot.com"""', 'license': '"""BSD"""', 'packages': "['drawBot', 'drawBot.context', 'drawBot.context.tools', 'drawBot.ui']", 'package_data': "{'drawBot': ['context/tools/ffmpeg', 'context/tools/gifsicle',\n 'context/tools/mkbitmap', 'context/tools/potrace']}", 'install_requires': "['pyobjc', 'fontTools', 'booleanOperations', 'pillow']", 'include_package_data': '(True)'}), "(name='drawBot', version=__version__, description=\n 'DrawBot is a powerful tool that invites you to write simple Python scripts to generate two-dimensional graphics. The builtin graphics primitives support rectangles, ovals, (bezier) paths, polygons, text objects and transparency.'\n , author='<NAME>, <NAME>, <NAME>', author_email='<EMAIL>', url=\n 'http://drawbot.com', license='BSD', packages=['drawBot',\n 'drawBot.context', 'drawBot.context.tools', 'drawBot.ui'], package_data\n ={'drawBot': ['context/tools/ffmpeg', 'context/tools/gifsicle',\n 'context/tools/mkbitmap', 'context/tools/potrace']}, install_requires=[\n 'pyobjc', 'fontTools', 'booleanOperations', 'pillow'],\n include_package_data=True)\n", (1020, 1745), False, 'from setuptools import setup\n'), ((578, 603), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (593, 603), False, 'import os\n'), ((672, 697), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (687, 697), False, 'import os\n'), ((840, 891), 'os.path.join', 'os.path.join', (['externalToolsSourceRoot', 'externalTool'], {}), '(externalToolsSourceRoot, externalTool)\n', (852, 891), False, 'import os\n'), ((903, 952), 'os.path.join', 'os.path.join', (['externalToolsDestRoot', 'externalTool'], {}), '(externalToolsDestRoot, externalTool)\n', (915, 952), False, 'import os\n'), ((957, 986), 'shutil.copyfile', 'shutil.copyfile', (['source', 'dest'], {}), '(source, dest)\n', (972, 986), False, 'import shutil\n'), ((991, 1010), 'os.chmod', 'os.chmod', (['dest', '(509)'], {}), '(dest, 509)\n', (999, 1010), False, 'import os\n'), ((1974, 2023), 'os.path.join', 'os.path.join', (['externalToolsDestRoot', 'externalTool'], {}), '(externalToolsDestRoot, externalTool)\n', (1986, 2023), False, 'import os\n'), ((2028, 2043), 'os.remove', 'os.remove', (['dest'], {}), '(dest)\n', (2037, 2043), False, 'import os\n')] |
# Copyright 2013 Google Inc. All Rights Reserved.
"""Deletes a Cloud SQL instance."""
from googlecloudapis.apitools.base import py as apitools_base
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.util import console_io
from googlecloudsdk.sql import util
class Delete(base.Command):
"""Deletes a Cloud SQL instance."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'instance',
help='Cloud SQL instance ID.')
def Run(self, args):
"""Deletes a Cloud SQL instance.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the delete
operation if the delete was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
util.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
if not console_io.PromptContinue(
'All of the instance data will be lost when the instance is deleted.'):
return None
try:
result = sql_client.instances.Delete(
sql_messages.SqlInstancesDeleteRequest(
instance=instance_ref.instance,
project=instance_ref.project))
operation_ref = resources.Create(
'sql.operations',
operation=result.operation,
project=instance_ref.project,
instance=instance_ref.instance,
)
unused_operation = sql_client.operations.Get(operation_ref.Request())
log.DeletedResource(instance_ref)
except apitools_base.HttpError as error:
raise exceptions.HttpException(util.GetErrorMessage(error))
def Display(self, unused_args, result):
"""Display prints information about what just happened to stdout.
Args:
unused_args: The same as the args in Run.
result: A dict object representing the operations resource describing the
delete operation if the delete was successful.
"""
self.format(result)
| [
"googlecloudsdk.sql.util.ValidateInstanceName",
"googlecloudsdk.core.util.console_io.PromptContinue",
"googlecloudsdk.sql.util.GetErrorMessage",
"googlecloudsdk.core.log.DeletedResource"
] | [((1461, 1501), 'googlecloudsdk.sql.util.ValidateInstanceName', 'util.ValidateInstanceName', (['args.instance'], {}), '(args.instance)\n', (1486, 1501), False, 'from googlecloudsdk.sql import util\n'), ((1592, 1693), 'googlecloudsdk.core.util.console_io.PromptContinue', 'console_io.PromptContinue', (['"""All of the instance data will be lost when the instance is deleted."""'], {}), "(\n 'All of the instance data will be lost when the instance is deleted.')\n", (1617, 1693), False, 'from googlecloudsdk.core.util import console_io\n'), ((2192, 2225), 'googlecloudsdk.core.log.DeletedResource', 'log.DeletedResource', (['instance_ref'], {}), '(instance_ref)\n', (2211, 2225), False, 'from googlecloudsdk.core import log\n'), ((2309, 2336), 'googlecloudsdk.sql.util.GetErrorMessage', 'util.GetErrorMessage', (['error'], {}), '(error)\n', (2329, 2336), False, 'from googlecloudsdk.sql import util\n')] |
# lines 1-4 imports the necessary libraries
import pygame
import os
import random
import math
import sys
import hlp
import intro
import dsb # this is the last module with the description files
'''
declaring some global variables beacause in Python, we can set global variables that can be used in future functions
setting the variables false allows us to activate them in the game loop, or vice versa
creating empty lists as global variables allows us to access them outside of the functions they are being used
'''
cursor = False
randomLine = False
randomTimer = True
run = False
stop = False
start = False
clear = False
lines = []
colours = []
brutecolours = []
points = []
line_name = []
intersect_name = []
orderList = []
# initialise Pygame library, it is necessary in Programs using Pygame
pygame.init()
line_colour = pygame.Color(50, 50, 120)
# initialise window size at 800 * 550 with a caption
display = pygame.display.set_mode((1280, 550), pygame.FULLSCREEN |
pygame.DOUBLEBUF | pygame.HWSURFACE)
pygame.display.set_caption("Line Segment Intersection Visualisation Tool")
# frames per second determines how many frames should be refreshed per second
clock = pygame.time.Clock()
# load cursor image for inserting line, os.path method points to the path of the cursor image file
pointer = pygame.image.load(os.path.join("resources", "pointer.png"))
# BitterFont text used throughout the program
bitterfont = os.path.abspath("resources/bitterfont.otf")
def AddPoints(p):
'''
this function takes a point as an argument, then append the 'points' list by using iteration over every item in the points list
if that point is already in the list, the function does nothing
if not, the function appends the points list object with the argument p.
'''
# make sure we're referring to the points object outside of this function
global points
# step through all the current items in points list
for point in points:
# is p the same as the current item
if point == p:
# if so, stop stepping through and drop out of this function without doing anything
return
# if we get here, we've gone through the whole list without a match
# add the new point to the list
points.append(p)
def TransValue(OldValue, oldMax, oldMin):
'''
scales the data
'''
newMax = 350
newMin = 0
OldRange = (oldMax - oldMin)
NewRange = (newMax - newMin)
NewValue = int((((OldValue - oldMin) * NewRange) / OldRange) + newMin)
return NewValue
def GenerateRandomLine():
'''
generates random lines
'''
x1 = random.randrange(51, 450) # randomly choses between 51 and 450
y1 = random.randrange(50, 450) # randomly choses between 50 and 450
x2 = random.randrange(51, 450) # randomly choses between 51 and 450
y2 = random.randrange(50, 450) # randomly choses between 50 and 450
# calls for the AddNewLine function to create new lines
AddNewLine([(x1, y1), (x2, y2)])
def CheckIntersect(p1, p2, q1, q2):
'''
this function determines if two lines intersect
p1,p2, q1, q2 are start and end points of the lines
it uses Cramer's rule of linear algebra to determine whether lines intersect
'''
# getting the distance between end points by accessing the second index of the p1 and p2 list items and appointing it to variable a1
a1 = p2[1] - p1[1]
b1 = p1[0] - p2[0] # same as above but accessing to the first index
c1 = a1 * p1[0] + b1 * p1[1]
a2 = q2[1] - q1[1] # same as a1 but for q instead of p
b2 = q1[0] - q2[0] # same as b1 but for q instead of p
c2 = a2 * q1[0] + b2 * q1[1]
d = (a1 * b2 - a2 * b1) # finding the determinant
if d == 0: # paralel or same line, determinant is zero
return
x = int((c1 * b2 - c2 * b1) / d) # solving for x
y = int((a1 * c2 - a2 * c1) / d) # solving for y
if min(p1[0], p2[0]) <= x <= max(p1[0], p2[0]) and min(p1[1], p2[1]) <= y <= max(p1[1], p2[1]):
if min(q1[0], q2[0]) <= x <= max(q1[0], q2[0]) and min(q1[1], q2[1]) <= y <= max(q1[1], q2[1]):
# found the intersection by checking solution of x and y for existing points
AddPoints((x, y))
return True # returns true
return False
def BruteForceMain():
'''
this function is the Brute-Force Algorithm function with main display loop
'''
# acessing the global variables
global cursor, lines, brutecolours, points, randomLine, randomTimer, run, stop, clear, intersect_name
# first the lines are accessing necessary global variables
global display, line_name, orderList
pygame.display.set_caption("Brute-Force Algorithm") # adding a caption
# setting the display for the algorithm
display = pygame.display.set_mode((1280, 550), pygame.FULLSCREEN)
cursor = False # until while true line, which is the main loop, lines below creating the default values
randomLine = False # again the default placeholder for the randomline
clickedPos = [] # default place holder value for position
orderList = [] # same for the order list, empty now all these values will be appended during the game loop
efficiency = 0 # default place holder value for algorithm efficieny
eventQueue = [] # event queue place holder, empty now
back = 0 # if this becomes one, you go back
while True: # starting the game loop
# pygame method to fill the screen, takes colours and a display object
display.fill((0, 0, 0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
'''
if mouse clicked on the below coordinates, create a line
pygame GUI property detecting when mouse click is on
MOUSEBUTTONDOWN and MOUSEBUTTONUP should be used as a small loops so that the computer can understand when that instance of the mouse movement is over
'''
if cursor == True and event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
mouse_pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# pygame property pos[0] is the mouse cursor in the X axis and pos[1] is the Y axis
if 50 < pos[0] < 450 and 50 < pos[1] < 450:
# here it adds the clicked postion corresponding to the positon of the mouse
clickedPos.append(pos)
if event.type == pygame.MOUSEBUTTONUP:
randomTimer = True # turning the random from false to true so the timer can activate
for i in range(0, 41): # choosing coordinates for drawing, exiting the previous iteration, range (0,41) goes between 0 and 40
# for the pygame method of drawing below, we need to determine the position on the screen as a tuple object
pos = i * 10 + 50
# pygame method, takes display, colour, and positions of where the lines start and end. i.e, starts in (50,pos) ends in (450,pos), 1 at the end is the width of the line
pygame.draw.line(display, line_colour, (50, pos), (450, pos), 1)
# same as above but takes pos as y, by doing so and iterating through the range, you cover all the plane
pygame.draw.line(display, line_colour, (pos, 50), (pos, 450), 1)
i = 0 # index determining for data structure, taking it back to zero
for line in lines: # iterating through lines which is a global variable for the priority queue aka eventQueue
'''
having [i] next to colour allows me to colour each line differently
each line has tuple object in the global variable
line[0] accesses the nth item's first coordinates in the iteration and drawing ends in the line[1], nth item's second object
'''
pygame.draw.line(display, brutecolours[i], line[0], line[1], 1)
# calling the hlp.AddText function that was created before in the script
hlp.AddText(line_name[i], line[0])
i += 1 # remember, need to increase the index.
orderList = [] # creating the placeholder list object to secure future items
i = 50
while i < 450: # this is the start of the brute force algorithm, it uses a try and error methods by iterating through all existing points
j = 0 # that's why it enumarates through all possible points on the screen to go through, thus, I have the second while loop here
for point in points: # 450 is the max number of points on the display, therefore, indexing goes until 450 i < 450
if point[0] == i: # while trying all the points, if the x value of the selected point intersects with the given index
# then add it to the orderList
orderList.append(intersect_name[j])
j += 1 # as before, increse indexing values by one
i += 1 # as before in the previous function, increase the index by one
n = len(lines) # finding out how many lines are drawn already
for point in points: # iterating over the points
# use this pygame method to draw a small circle where the lines intersect
pygame.draw.circle(display, hlp.red, point, 3)
efficiency = n * n # this is the efficieny formula for the brute-force algorithm
if cursor == True: # arrange the mouse cursors
pygame.mouse.set_visible(False)
pos = pygame.mouse.get_pos() # this is a pygame method for mouse cursor
# the cursor with the existing pointer image, pygame method called display.blit which adds a spirit to the screen
display.blit(pointer, pos)
# if you clicked on the screen, this checks the number of clicks and starts drawing
if len(clickedPos) > 0:
# again pygame method to draw, if clicked then draw this
pygame.draw.circle(display, hlp.white, clickedPos[0], 2)
# if clicked then draw this
pygame.draw.line(display, hlp.white, clickedPos[0], pos, 1)
if len(clickedPos) >= 2: # if the cursor is in a positon which is longer than 2 that can draw lines, if you clicked on more or equal to 2 times, which means begining and end for the lines
# then add lines according to the points saved in the clickedPos object. [0] is the begining index and clickedPos[1] is the ending index.
AddNewLine([clickedPos[0], clickedPos[1]])
cursor = False # disable the cursor after drawing
clickedPos = [] # empty the placeholder after drawing the line
else: # now you are entering into the scene of mouse action
# again pygame GUI method enabling mouse action on the screen to interact
pygame.mouse.set_visible(True)
if randomLine == True: # if mouse clicked on the randomline
GenerateRandomLine() # then create a random line, calling the existing function
randomLine = False # turn it off after drawing so it would not keep drawing forever
randomTimer = False # and stop the timer so it won't go forever
if clear == True: # clear action is enabled, clear back all the placeholders to default
lines = [] # everything is back to the default value
colours = [] # everything is back to the default value
brutecolours = [] # everything is back to the default value
points = [] # everything is back to the default value
orderList = [] # everything is back to the default value
efficiency = 0 # everything is back to the default value
eventQueue = [] # everything is back to the default value
intersect_name = [] # everything is back to the default value
line_name = [] # everything is back to the default value
clear = False
'''
adding text positions and texts for the frame
calling existing functions, giving text, position and when applicable the action
my helper functions are button and addtext that help me in my larger script.
'''
# adding the texts and buttons as above function
hlp.AddText("(0,0)", (30, 25))
hlp.AddText("(50,0)", (430, 25))
hlp.AddText("(0,50)", (30, 450))
hlp.AddText("(50,50)", (430, 450))
hlp.Button("Clear", 200, 5, 100, 30, ClearActive)
hlp.Button("Random Segment", 50, 500, 180, 30, RandomActive)
hlp.Button("Insert Segment", 280, 500, 180, 35, CursorActive)
hlp.Button("Exit", 500, 5, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 900, 5, 100, 30, 1)
if back > 0: # if back has a value, which means it has been clicked, stop the bigger loop that we started, i.e. the game loop, and break the game loop
break
# calls the helper function
nxt = hlp.ButtonWithReturn("Next", 700, 5, 100, 30, 1)
if nxt > 0: # so if the next button is clicked
# calls for the description function
hlp.Description(dsb.bf_desc)
# pygame method to draw an object
pygame.draw.rect(display, line_colour, [500, 50, 750, 490], 2)
# adding the text on the given location
hlp.AddText("Brute-Force Algorithm", (520, 70))
# adding the text on the given location.
hlp.AddText("Order List:", (520, 120))
# creating indexing i and x, y positions to display on the GUI, this is an important way to assign values to a tuplae object
i, o_x, o_y = 0, 540, 150
'''
iterating through the existing values in the orderList.
because we don't want the texts to overlap on the screen
most of the numbers below are finetuning to prevent overlapping of the texts for the order list and the eventqueue list.
'''
for val in orderList: # going through the items in the orderList
# calling the helper function to add the text of the values in the orderList
hlp.AddText(val, (o_x, o_y), (255, 255, 255))
o_x += 50 # moving 50 pix on the x axis for each item
i += 1 # going to next item by increasing the index
if i % 14 == 0: # check if the line ends
o_x = 540 # text is on the edge, there no more horizontol space
o_y += 20 # # go to the next line by adding 20 to the y axis
# adding the text on the given location
hlp.AddText("Efficiency O(n*n):", (520, 480))
# adding the text on the given location
hlp.AddText(str(efficiency), (540, 505), (255, 255, 255))
# updates the screen every turn
pygame.display.flip()
# will not run more than 30 frames per second
clock.tick(90)
intro.Introduction2() # calls back the introduction function
def BentleyMain():
'''
this function is the Bentley-Ottmann Algorithm function with main display loop
'''
global cursor, lines, colours, points, randomLine, randomTimer, run, stop, clear, intersect_name
# first the lines are accessing necessary global variables
global display, line_name, orderList
pygame.display.set_caption("Bentley-Ottmann Algorithm") # adding a caption
# setting the display for the algorithm
display = pygame.display.set_mode((1280, 550), pygame.FULLSCREEN)
cursor = False # until while true line, which is the main loop, lines below creating the default values
randomLine = False # again the default placeholder for the randomline
clickedPos = [] # default place holder value for position
efficiency = 0 # default place holder value for algorithm efficieny
eventQueue = [] # event queue place holder, empty now
orderList = [] # same for the order list, empty now all these values will be appended during the game loop
x = 50 # location of the x value on the screen
back = 0 # if this becomes one, you go back
while True: # starting the game loop
# pygame method to fill the screen. takes colours and a display object
display.fill((0, 0, 0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
'''
if mouse clicked on the below coordinates, create a line
pygame GUI property detecting when mouse click is on
MOUSEBUTTONDOWN and MOUSEBUTTONUP should be used as a small loops so that the computer can understand when that instance of the mouse movement is over
'''
if cursor == True and event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
mouse_pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# pygame property pos[0] is the mouse cursor in the X axis and pos[1] is the Y axis
if 50 < pos[0] < 450 and 50 < pos[1] < 450:
# here it adds the clicked postion corresponding to the positon of the mouse
clickedPos.append(pos)
if event.type == pygame.MOUSEBUTTONUP:
randomTimer = True # turning the random from false to true so the timer can activate
for i in range(0, 41): # choosing coordinates for drawing, exiting the previous iteration, range (0,41) goes between 0 and 40
# for the pygame method of drawing below, we need to determine the position on the screen as a tuple object
pos = i * 10 + 50
# pygame method, takes display, colour, and positions of where the lines start and end. i.e, starts in (50,pos) ends in (450,pos), 1 at the end is the width of the line
pygame.draw.line(display, line_colour, (50, pos), (450, pos), 1)
# same as above but takes pos as y, by doing so and iterating through the range, you cover all the plane
pygame.draw.line(display, line_colour, (pos, 50), (pos, 450), 1)
i = 0 # index determining for data structure, taking it back to zero
for line in lines: # iterating through lines which is a global variable for the priority queue aka eventQueue
'''
having [i] next to colour allows me to colour each line differently
each line has tuple object in the global variable
line[0] accesses the nth item's first coordinates in the iteration and drawing ends in the line[1], nth item's second object
'''
pygame.draw.line(display, colours[i], line[0], line[1], 1)
# calling the addText function that was created before in the script
hlp.AddText(line_name[i], line[0])
'''
nested indexing, as I am accessing the first item of the first item in the line object which is in the lines global variable
result of this nested indexing should access a point of x- coordinated saved in a tuple
'''
if x == line[0][0]:
# if that begining point of the line's x coordinates equals to the preset x, then append the queue list with the name of this line
eventQueue.append(line_name[i])
if x == line[1][0]: # again the nested indexing
# removes the line from the queue if the end of the line's x coordinates equals to x variable
eventQueue.remove(line_name[i])
# increasing the index number at the end of the iteration loop so I can access the other items saved
i += 1
if stop == True: # tells to stop if stop is clicked
run = False # turns off the run, if it is stop, then run must be false
x = 50 # set x to default
# if I don't make the stop false at the end of this clause, there would be a logic error as stop must be false after it was used otherwise, it will be true forever
stop = False
if run == True: # tells it to start if run is clicked
cursor = False # when it is running cursor can't draw any newlines
randomLine = False # again no new random lines too
x += 1 # since I am scanning, the x value should scan the screen pixel after pixel, thus, adding 1 to the x value
# this draws the scan line on the screen
pygame.draw.line(display, hlp.red, (x, 50), (x, 450), 1)
# j and k are placeholders to keep track of the index
j = 0
k = 0
# iterating through points to draw the intersection circle in the run
for point in points:
# if the first item's x value is smaller or equal to the present x variable
if point[0] <= x:
# use this pygame method to draw a small circle where the lines intersect
pygame.draw.circle(display, hlp.white, point, 3)
k += 1 # increase the placeholders value
if point[0] == x: # if x value is already equal to the preset x
# then append the orderList with the name of the intersection
orderList.append(intersect_name[j])
j += 1 # increase the j once more
if k > 0: # so it means there is already an intersection
n = len(lines) # check how many lines were drawn already
if n > 0: # if the number of lines are more than 0, it means that there are existing lines
# measure the algorithm's speed
efficiency = (n + k) * math.log10(n)
'''
since the display stars from 50th pixel, I substract 50 from that, and the script uses //8 as divide without remainers to convert the x values pixel to coordinates
this is so it can be used to name the incident of intersection
'''
c = (x - 50) // 8
# adding the text as well for the intersection
hlp.AddText("(X, Y) = (" + str(c) + ", 0)",
(200, 470), (255, 255, 255))
if cursor == True: # arrange the mouse cursors
pygame.mouse.set_visible(False)
pos = pygame.mouse.get_pos() # this is a pygame method for mouse cursor
# the cursor with the existing pointer image, pygame method called display.blit which adds a spirit to the screen
display.blit(pointer, pos)
# if you clicked on the screen, this checks the number of clicks and starts drawing
if len(clickedPos) > 0:
# again pygame method to draw, if clicked then draw this
pygame.draw.circle(display, hlp.white, clickedPos[0], 2)
# if clicked then draw this
pygame.draw.line(display, hlp.white, clickedPos[0], pos, 1)
if len(clickedPos) >= 2: # if the cursor is in a positon which is longer than 2 that can draw lines, if you clicked on more or equal to 2 times, which means begining and end for the lines
# then add lines according to the points saved in the clickedPos object. [0] is the begining index and clickedPos[1] is the ending index.
AddNewLine([clickedPos[0], clickedPos[1]])
cursor = False # disable the cursor after drawing
clickedPos = [] # empty the placeholder after drawing the line
else: # now you are entering into the scene of mouse action
# again pygame GUI method enabling mouse action on the screen to interact
pygame.mouse.set_visible(True)
if randomLine == True: # if mouse clicked on the randomline
GenerateRandomLine() # then create a random line, calling the existing function
randomLine = False # turn it off after drawing so it would not keep drawing forever
randomTimer = False # and stop the timer so it won't go forever
if run == True and x > 450: # if run function is enabled however the x value is out of the screen
x = 50 # put x back to the default of 50
run = False # and disable the run
if clear == True: # clear action is enabled, clear back all the placeholders to default
lines = [] # everything is back to the default value
colours = [] # everything is back to the default value
points = [] # everything is back to the default value
orderList = [] # everything is back to the default value
efficiency = 0 # everything is back to the default value
eventQueue = [] # everything is back to the default value
intersect_name = [] # everything is back to the default value
line_name = [] # everything is back to the default value
x = 50 # everything is back to the default value
run = False # everything is back to the default value
clear = False # everything is back to the default value
'''
adding text positions and texts for the frame
calling existing functions, giving text, position and when applicable the action
my helper functions are button and addtext that help me in my larger script
'''
# adding text positions and texts for the frame
hlp.AddText("(0,0)", (30, 25))
hlp.AddText("(50,0)", (430, 25))
hlp.AddText("(0,50)", (30, 450))
hlp.AddText("(50,50)", (430, 450))
# drawing buttons and determining positions
hlp.Button("Run", 80, 5, 100, 35, RunActive)
hlp.Button("Stop", 200, 5, 100, 35, StopActive)
hlp.Button("Clear", 320, 5, 100, 30, ClearActive)
hlp.Button("Random Segment", 50, 500, 180, 30, RandomActive)
hlp.Button("Insert Segment", 280, 500, 180, 35, CursorActive)
hlp.Button("Exit", 500, 5, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 900, 5, 100, 30, 1)
if back > 0: # if back has a value, which means it has been clicked, stop the bigger loop that we started, i.e. the game loop, and break the game loop
break
# calls the helper function
nxt = hlp.ButtonWithReturn("Next", 700, 5, 100, 30, 1)
if nxt > 0: # so if the next button is clicked
# calls for the description function
hlp.Description(dsb.bo_desc)
text = ["If you are learning to play, it is recommended", # and displays this text
"you chose your own starting area."]
# pygame method to draw an object
pygame.draw.rect(display, line_colour, [500, 50, 750, 490], 2)
# adding the text on the given location
hlp.AddText("Bentley-Ottmann Algorithm", (520, 70))
# adding the text on the given location
hlp.AddText("Event Queue:", (520, 120))
# creating indexing i and x, y positions to display on the GUI, this is an important way to assign values to a tuplae object
i, o_x, o_y = 0, 540, 150
'''
iterating through the existing values in the eventQueue
because we don't want the texts to overlap on the screen
most of the numbers below are finetuning to prevent overlapping of the texts for the order list and the eventqueue list
'''
for val in eventQueue:
# val is each text saved in the eventQueue, and these values are not to overlap on the screen
hlp.AddText(val, (o_x, o_y), (255, 255, 255))
o_x += 30 # therefore for each value, I'm adding +30 for each one
i += 1 # adding one to the index to access to the next item
if i % 23 == 0: # 23rd item appears on the righest point on the screen so for the next one you need to go on the y axis
o_x = 540 # text is on the edge, there no more horizontol space
# text needs to appear on the next line, so adding 20 onto the y axis, vertical move
o_y += 20
hlp.AddText("Order List:", (520, 200)) # adding the text
i, o_x, o_y = 0, 540, 230
for val in orderList: # same as above iteration but for the order list this time
hlp.AddText(val, (o_x, o_y), (255, 255, 255))
o_x += 50 # adding to x axis
i += 1 # increasing the index
if i % 14 == 0: # this is 14, because the text has less horizontal space to appear.
o_x = 540 # reached the end of the line
o_y += 20 # go to the next line, move vertical, thus adding to the y value
# adding the text on the given location
hlp.AddText("Efficiency O((n+k)logn):", (520, 480))
# adding the text on the given location
hlp.AddText(str(efficiency), (540, 505), (255, 255, 255))
# updates the screen every turn
pygame.display.flip()
# will not run more than 30 frames per second
clock.tick(30)
intro.Introduction2() # calls back the introduction function
def ShamosHoeyMain():
'''
this function is the Shamos-Hoey Algorithm function with main display loop
'''
global cursor, lines, colours, points, randomLine, randomTimer, run, stop, clear, intersect_name
global display, line_name # first the lines are accessing necessary global variables
pygame.display.set_caption("Shamos-Hoey Algorithm") # adding a caption
# setting the display for the algorithm
display = pygame.display.set_mode((1280, 550), pygame.FULLSCREEN)
cursor = False # until while true line, which is the main loop, lines below creating the default values
randomLine = False # again the default placeholder for the randomline
clickedPos = [] # default place holder value for position
firstPoint = None # first intersection point identified
efficiency = 0 # default place holder value for algorithm efficieny
eventQueue = [] # event queue place holder, empty now
run = False
x = 50 # location of the x value on the screen
back = 0 # if this becomes one, you go back
while True: # starting the game loop
# pygame method to fill the screen, takes colours and a display object
display.fill((0, 0, 0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
'''
if mouse clicked on the below coordinates, create a line
pygame GUI property detecting when mouse click is on
MOUSEBUTTONDOWN and MOUSEBUTTONUP should be used as a small loops so that the computer can understand when that instance of the mouse movement is over
'''
if cursor == True and event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
mouse_pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# pygame property pos[0] is the mouse cursor in the X axis and pos[1] is the Y axis
if 50 < pos[0] < 450 and 50 < pos[1] < 450:
# here it adds the clicked postion corresponding to the positon of the mouse
clickedPos.append(pos)
if event.type == pygame.MOUSEBUTTONUP:
randomTimer = True # turning the random from false to true so the timer can activate
for i in range(0, 41): # choosing coordinates for drawing, exiting the previous iteration, range (0,41) goes between 0 and 40
# for the pygame method of drawing below, we need to determine the position on the screen as a tuple object
pos = i * 10 + 50
# pygame method, takes display, colour, and positions of where the lines start and end. i.e, starts in (50,pos) ends in (450,pos), 1 at the end is the width of the line
pygame.draw.line(display, line_colour, (50, pos), (450, pos), 1)
# same as above but takes pos as y, by doing so and iterating through the range, you cover all the plane
pygame.draw.line(display, line_colour, (pos, 50), (pos, 450), 1)
i = 0 # index determining for data structure, taking it back to zero
for line in lines: # iterating through lines which is a global variable for the priority queue aka eventQueue
'''
having [i] next to colour allows me to colour each line differently
each line has tuple object in the global variable
line[0] accesses the nth item's first coordinates in the iteration and drawing ends in the line[1], nth item's second object
'''
pygame.draw.line(display, colours[i], line[0], line[1], 1)
# calling the addText function that was created before in the script
hlp.AddText(line_name[i], line[0])
'''
nested indexing, as I am accessing the first item of the first item in the line object which is in the lines global variable
result of this nested indexing should access a point of x- coordinated saved in a tuple
' '''
if x == line[0][0]:
# if that begining point of the line's x coordinates equals to the preset x, then append the queue list with the name of this line
eventQueue.append(line_name[i])
if x == line[1][0]: # again the nested indexing
# removes the line from the queue if the end of the line's x coordinates equals to x variable
eventQueue.remove(line_name[i])
# increasing the index number at the end of the iteration loop so I can access the other items saved
i += 1
if stop == True: # tells to stop if stop is clicked
run = False # turns off the run, if it is stop, then run must be false
x = 50 # set x to default
# if I don't make the stop false at the end of this clause, there would be a logic error as stop must be false after it was used otherwise, it will be true forever
stop = False
eventQueue = [] # empties the eventQueue
if run == True: # tells it to start if run is clicked
cursor = False # when it is running cursor can't draw any newlines
randomLine = False # again no new random lines too
x += 1 # since I am scanning, the x value should scan the screen pixel after pixel, thus, adding 1 to the x value
# this draws the scan line on the screen
pygame.draw.line(display, hlp.red, (x, 50), (x, 450), 1)
# iterating through points to draw the intersection circle in the run
for point in points:
# if the first item's x value is smaller or equal to the present x variable
if point[0] == x:
firstPoint = point # having a designated first point variable
run = False # setting variables to default.
x = 50 # setting variables to default.
eventQueue = [] # setting variables to default.
efficiency = 0 # setting variables to default.
break # break the loop
n = len(lines) # number of existing lines
if n > 0: # if the number of lines are more than 0, it means that there are existing lines
efficiency = n * math.log10(n) # measure the algorithm's speed
'''
since the display stars from 50th pixel, I substract 50 from that, and the script uses //8 as divide without remainers to convert the x values pixel to coordinates
this is so it can be used to name the incident of intersection
'''
c = (x - 50) // 8
# adding the text as well for the intersection
hlp.AddText("(X, Y) = (" + str(c) + ", 0)", (200, 470),
hlp.white) # adding the intersection
if firstPoint != None: # if there is a first point
# use this pygame method of drawing a circle.
pygame.draw.circle(display, hlp.white, firstPoint, 3)
if cursor == True: # arrange the mouse cursors
pygame.mouse.set_visible(False)
pos = pygame.mouse.get_pos() # this is a pygame method for mouse cursor
# the cursor with the existing pointer image, pygame method called display.blit which adds a spirit to the screen
display.blit(pointer, pos)
# if you clicked on the screen, this checks the number of clicks and starts drawing
if len(clickedPos) > 0:
pygame.draw.circle(display, hlp.white, clickedPos[0], 2)
# if clicked then draw this
pygame.draw.line(display, hlp.white, clickedPos[0], pos, 1)
if len(clickedPos) >= 2: # if the cursor is in a positon which is longer than 2 that can draw lines, if you clicked on more or equal to 2 times, which means begining and end for the lines
# then add lines according to the points saved in the clickedPos object. [0] is the begining index and clickedPos[1] is the ending index.
AddNewLine([clickedPos[0], clickedPos[1]])
cursor = False # disable the cursor after drawing
clickedPos = [] # empty the placeholder after drawing the line
else: # now you are entering into the scene of mouse action
# again pygame GUI method enabling mouse action on the screen to interact
pygame.mouse.set_visible(True)
if randomLine == True: # if mouse clicked on the randomline
GenerateRandomLine() # then create a random line, calling the existing function
randomLine = False # turn it off after drawing so it would not keep drawing forever
randomTimer = False # and stop the timer so it won't go forever
if run == True and x > 450: # if run function is enabled however the x value is out of the screen
x = 50 # put x back to the default of 50
run = False # and disable the run
if clear == True: # clear action is enabled, clear back all the placeholders to default
lines = [] # everything is back to the default value
colours = [] # everything is back to the default value
points = [] # everything is back to the default value
efficiency = 0 # everything is back to the default value
firstPoint = None # everything is back to the default value
eventQueue = [] # everything is back to the default value
intersect_name = [] # everything is back to the default value
line_name = [] # everything is back to the default value
x = 50 # everything is back to the default value
run = False # everything is back to the default value
clear = False # everything is back to the default value
'''
adding text positions and texts for the frame
calling existing functions, giving text, position and when applicable the action
my helper functions are button and addtext that help me in my larger script.
'''
# adding text positions and texts for the frame
hlp.AddText("(0,0)", (30, 25))
hlp.AddText("(50,0)", (430, 25))
hlp.AddText("(0,50)", (30, 450))
hlp.AddText("(50,50)", (430, 450))
# drawing buttons and determining positions
hlp.Button("Run", 80, 5, 100, 35, RunActive)
hlp.Button("Stop", 200, 5, 100, 35, StopActive)
hlp.Button("Clear", 320, 5, 100, 30, ClearActive)
hlp.Button("Random Segment", 50, 500, 180, 30, RandomActive)
hlp.Button("Insert Segment", 280, 500, 180, 35, CursorActive)
hlp.Button("Exit", 500, 5, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 900, 5, 100, 30, 1)
if back > 0: # if back has a value, which means it has been clicked, stop the bigger loop that we started, i.e. the game loop, and break the game loop
break
# calls the helper function
nxt = hlp.ButtonWithReturn("Next", 700, 5, 100, 30, 1)
if nxt > 0: # so if the next button is clicked
# calls for the description function
hlp.Description(dsb.sh_desc)
# pygame method to draw an object
pygame.draw.rect(display, line_colour, [500, 50, 750, 490], 2)
# adding caption, frame size, texts, buttons and their positions
# adding the text on the given location
hlp.AddText("<NAME>", (520, 70))
# adding the text on the given location
hlp.AddText("Event Queue:", (520, 120))
# creating indexing i and x, y positions to display on the GUI, this is an important way to assign values to a tuplae object
i, o_x, o_y = 0, 540, 150
'''
iterating through the existing values in the eventQueue.
because we don't want the texts to overlap on the screen
most of the numbers below are finetuning to prevent overlapping of the texts for the order list and the eventqueue list.
'''
for val in eventQueue:
# val is each text saved in the eventQueue, and these values are not to overlap on the screen
# calling the helper function.
hlp.AddText(val, (o_x, o_y), hlp.white)
o_x += 30 # adding 30 to the x-axis for each item.
i += 1 # adding one to the index to access to the next item
if i % 23 == 0: # 23rd item appears on the righest point on the screen so for the next one you need to go on the y axis
o_x = 540 # text is on the edge, there no more horizontol space
# text needs to appear on the next line, so adding 20 onto the y axis, vertical move
o_y += 20 # go to the next line by adding 20 to the y axis
# adding the text on the given location
hlp.AddText("Efficiency O(nlogn):", (520, 200))
# adding the text on the given location
hlp.AddText(str(efficiency), (540, 230), hlp.white)
# updates the screen every turn
pygame.display.flip()
# will not run more than 30 frames per second
clock.tick(30)
intro.Introduction2() # calls back the introduction function
def Efficiency():
'''
this function compares the efficiency of the algorithms
'''
pygame.display.set_caption("Efficiency Comparison")
display = pygame.display.set_mode(
(1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF)
n = 0 # number segment
k = 0 # intersection
posX1 = 180 # position to appear
posX2 = 400 # position to appear
posY = 20 # position to appear
bPos = 450 # position to appear
bo = 0 # bentley-ottmann placeholders
bf = 0 # brute-force placeholders
sh = 0 # shamos-hoey placeholders
bog = 0 # bentley-Ottman placeholders
bfg = 0 # brute-force placeholders
shg = 0 # shamos-hoey placeholders
while True: # starting the initial loop with first game events, ie. quit and mouse button
# starting the initial loop with first game events, ie. quit and mouse button
display.fill((0, 0, 0))
# display.blit(hlp.dscbg,(0,0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
# again iterating as an important pygame method to set the features.
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
# starting the initial loop with first game events, i.e. quit and mouse button
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# starting the initial loop with first game events, ie. quit and mouse button
if posX1 < pos[0] < posX1 + 130 and posY < pos[1] < posY + 60:
# getting the number of lines
lineTxt = hlp.InsertNumber("Line Number:")
if lineTxt != "": # if the string is not empty
try:
# input gives string so this one turns it into an integer
n = int(lineTxt)
except: # if that is not happening
n = 0 # make n equals to zero, this is a error-handling method by managing the possible error by wrong input, i.e. linetxt can't be converted to an integer
# same as above but for the intersect number
elif posX2 < pos[0] < posX2 + 170 and posY < pos[1] < posY + 60:
intersectTxt = hlp.InsertNumber("Intersect Number :")
if intersectTxt != "":
try:
k = int(intersectTxt)
except:
k = 0
if n > 0:
# using established algorithm efficiency calculation for every algorithm
bo = int((n + k) * math.log10(n))
bog = bo # number to be used in the graph string
# using established algorithm efficiency calculation for every algorithm
bf = int(n * n)
bfg = bf # number to be used in the graph string
# using established algorithm efficiency calculation for every algorithm
sh = int(n * math.log10(n))
shg = sh # number to be used in the graph string
if bo > 350 or bf > 350 or sh > 350: # multiply by 350 for later on to use for rectangle object below
m = max(bo, bf, sh)
bo = int((bo / m) * 350)
bf = int((bf / m) * 350)
sh = int((sh / m) * 350)
if bo == 0: # handling zeros for graphs below
bo = 1 # handling zeros for graphs below
if bf == 0: # handling zeros for graphs below
bf = 1 # handling zeros for graphs below
if sh == 0: # handling zeros for graphs below
sh = 1 # handling zeros for graphs below
# setting the texts and buttons
hlp.Button("Insert Line", posX1, posY, 130, 30, None)
hlp.Button("Insert Intersect", posX2, posY, 160, 30, None)
hlp.AddText("Line: " + str(n), (600, 20), hlp.white)
hlp.AddText("Intersect: " + str(k), (600, 50), hlp.white)
hlp.AddText("BF", (180, 460), hlp.white)
hlp.AddText("BO", (330, 460), hlp.white)
hlp.AddText("SH", (480, 460), hlp.white)
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (100, 100), (100, 500), 2)
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (50, 450), (650, 450), 2)
if bf > 0: # comparing here which one is better, if bf exists
# comparing here which one is better
hlp.AddText(str(bfg), (165, bPos - bf - 30), hlp.white)
pygame.draw.rect(display, hlp.button_colour, (165, bPos - bf, 50, bf)
) # drawing a rectangular bar on the screen
if bo > 0: # comparing here which one is better, if bo exists
# comparing here which one is better
hlp.AddText(str(bog), (315, bPos - bo - 30), hlp.white)
pygame.draw.rect(display, hlp.button_colour, (315, bPos - bo, 50, bo)
) # drawing a rectangular bar on the screen
if sh > 0: # comparing here which one is better, if sh exists
# comparing here which one is better
hlp.AddText(str(shg), (465, bPos - sh - 30), hlp.white)
# drawing a rectangular bar on the screen. # bPos- algorithm name determines the rectangle's dimensions
pygame.draw.rect(display, hlp.button_colour,
(465, bPos - sh, 50, sh))
# setting and drawing the next/back buttons
hlp.Button("Exit", 350, 500, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 650, 500, 100, 30, 1)
if back > 0:
break
nxt = hlp.ButtonWithReturn("Next", 500, 500, 100, 30, 1)
if nxt > 0:
hlp.Description(dsb.effic_desc)
pygame.display.flip() # updates the screen every turn
clock.tick(60) # will not run more than 15 frames per second
intro.Introduction2() # calls back the introduction function
def Efficiency2():
'''
this function compares the efficiency of the algorithms
'''
pygame.display.set_caption("Efficiency Comparison")
display = pygame.display.set_mode(
(1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF)
n = range(10, 1001) # number segment
bet = False
posX1 = 180 # position to appear
posX2 = 400 # position to appear
posY = 20 # position to appear
bPos = 450 # position to appear
sheffc = [i * math.log10(i) for i in n] # it is a list comprehension method for sh algoritm efficiency.
bfeffc = [i**2 for i in n] # it is a list comprehension method for bf algoritm efficiency.
boeffc = [((i + (((i**2) - i) / 2)) * math.log10(i)) for i in n] # it is a list comprehension method for bo algoritm efficiency.
topalg = sheffc + bfeffc + boeffc # here compiles all efficency into one list
mx = max(topalg) # getting the max value from the list
mn = min(topalg) # getting the min value from the list
transsheffc = [TransValue(i, mx, mn) for i in sheffc] #here it starts a list comprehension to normalize the values for across three efficiencies
transshefc2 = random.sample(transsheffc, 550) #then getting 550 values to represent equally across the pixels
transshefc2.sort() # sorting in descending order
shno = 0 #starting an index for iteration
shpoints = [] #placeholder value
for i in transshefc2[:200]: #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shpoints.append((100 + shno, 450 - int(i))) #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shno += 1 #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
for i in transshefc2[200:349]: #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shpoints.append((100 + shno, 450 - (int(i + 2)))) #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shno += 1 #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
for i in transshefc2[349:]: #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shpoints.append((100 + shno, 450 - (int(i + 4)))) #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
shno += 1 #here it uses indexing and iteration for creating display pixel points for sh algoritm. First one is the x value, other one is y value.
transbfeffc = [TransValue(i, mx, mn) for i in bfeffc] # between lines 910 and 917, same as above but for bf algoritm
transbfeffc2 = random.sample(transbfeffc, 550)
transbfeffc2.sort()
bfno = 0
bfpoints = []
for i in(transbfeffc2):
bfpoints.append((100 + bfno, 450 - int(i)))
bfno += 1
transboeffc = [TransValue(i, mx, mn) for i in boeffc] # between lines 919 and 926, same as above but for bo algoritm
transboeffc2 = random.sample(transboeffc, 550)
transboeffc2.sort()
bono = 0
bopoints = []
for i in(transboeffc2):
bopoints.append((100 + bono, 450 - int(i)))
bono += 1
while True: # starting the initial loop with first game events, ie. quit and mouse button
# starting the initial loop with first game events, ie. quit and mouse button
display.fill((0, 0, 0))
# display.blit(hlp.dscbg,(0,0))
# pygame method, iterates over the events in pygame to determine what we are doing with every event
# again iterating as an important pygame method to set the features.
for event in pygame.event.get():
if event.type == pygame.QUIT: # this one quits
pygame.quit() # putting the quit pygame method
exit() # takes the user from GUI to the script for exiting
# Here is to tell the computer to recognise if a keybord key is pressed.
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE: # if that keyboard key is ESC
exit() # call for the exit function.
# starting the initial loop with first game events, i.e. quit and mouse button
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # pygame method defining the button in the GUI
pos = pygame.mouse.get_pos() # displays the mouse position on the screen
# starting the initial loop with first game events, ie. quit and mouse button
if posX2 < pos[0] < posX2 + 170 and posY < pos[1] < posY + 60:
bet = True
hlp.Button("Start", posX2, posY, 160, 30, None)
hlp.AddText("Lines: 10, 100, 1000", (600, 20), hlp.white)
hlp.AddText("10", (115, 460), hlp.white)
hlp.AddText("100", (350, 460), hlp.white)
hlp.AddText("1000", (650, 460), hlp.white)
hlp.AddText("max", (50, 100), hlp.white)
hlp.AddText("0", (50, 460), hlp.white)
sidefont = pygame.font.Font(bitterfont, 16)
sidetext = sidefont.render("Algorithm Efficiency", True, hlp.white)
sidetext = pygame.transform.rotate(sidetext, 90)
display.blit(sidetext, (70, 235))
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (100, 100), (100, 500), 2)
# pygame method, takes display, colour, and positions of where the lines start and end
pygame.draw.line(display, line_colour, (50, 450), (650, 450), 2)
if bet:
pygame.draw.lines(display, (62, 150, 81), False, bfpoints, 4)
pygame.draw.lines(display, (255, 255, 0), False, shpoints, 4)
pygame.draw.lines(display, (255, 0, 0), False, bopoints, 4)
hlp.AddText("Brute Force", (750, 150), hlp.white)
hlp.AddText("Bentley-Ottmann", (750, 250), hlp.white)
hlp.AddText("Shamos-Hoey", (750, 350), hlp.white)
pygame.draw.line(display, (62, 150, 81), (720, 160), (740, 160), 4)
pygame.draw.line(display, (255, 0, 0), (720, 260), (740, 260), 4)
pygame.draw.line(display, (255, 255, 0), (720, 360), (740, 360), 4)
hlp.AddText("n=10;100;1000", (720, 390), hlp.white)
hlp.AddText("Brute Force = " + str(round(bfeffc[9])) + "; " + str(
round(bfeffc[499])) + "; " + str(round(bfeffc[989])), (720, 405), hlp.white)
hlp.AddText("Bentley-Ottmann = " + str(round(boeffc[9])) + "; " + str(
round(boeffc[499])) + "; " + str(round(boeffc[989])), (720, 420), hlp.white)
hlp.AddText("Shamos-Hoey = " + str(round(sheffc[9])) + "; " + str(
round(sheffc[499])) + "; " + str(round(sheffc[989])), (720, 435), hlp.white)
hlp.Button("Exit", 350, 500, 100,
30, sys.exit)
back = hlp.ButtonWithReturn("Back", 650, 500, 100, 30, 1)
if back > 0:
break
nxt = hlp.ButtonWithReturn("Next", 500, 500, 100, 30, 1)
if nxt > 0:
hlp.Description(dsb.effic_desc)
pygame.display.flip() # updates the screen every turn
clock.tick(60) # will not run more than 15 frames per second
intro.Introduction2() # calls back the introduction function
def AddNewColour():
'''
this function selects random colours and appends the global colours variable
used for adding random colour to each line
'''
global colours # accessing the variable
r = random.randrange(1, 255) # choosing the red tone
g = random.randrange(1, 255) # choosing the green tone
b = random.randrange(1, 255) # choosing the blue tone
randomColour = pygame.Color(r, g, b) # appointing the colour
colours.append(randomColour) # appending the global variable
def AddNewLine(newLine):
'''
this function adds a new line to the list
it iterates through the lines list item and checks whether they intersect
if so, it appoints a name for the intersecting lines and appends the intersect lines list
'''
global lines, line_name, intersect_name
name = str(1 + len(lines)) # appointing a name
i = 0 # appointing default index for the coming iteration below
for line in lines:
# checking whether new line and existing line intersect
status = CheckIntersect(newLine[0], newLine[1], line[0], line[1])
if status:
intsec_name = line_name[i] + "." + name # appointing a name
intersect_name.append(intsec_name) # appending the list
i += 1 # increasing the index by one
l = newLine
# indexing the newline's points and sorting from start to end in the next line
if(newLine[0][0] > newLine[1][0]):
l = [newLine[1], newLine[0]]
lines.append(l) # appending the new line
line_name.append(name) # appending the name of the new line.
AddNewColour()
ChangeColour()
def ChangeColour():
'''
this function changes the line colours to white for the brute force algorithm
it iterates through the different lines and appoints a new colour for each line
'''
global intersect_name, colours, brutecolours
brutecolours = colours[:] # copies the colours variable
for name in intersect_name: # iterates through the items
sp = name.split(".") # splits the string object
# appoints each splitted names to converted integer objects
n1 = int(sp[0])
n2 = int(sp[1])
brutecolours[n1 - 1] = hlp.white # making them white
brutecolours[n2 - 1] = hlp.white # making them white
def CursorActive():
'''
acessing and activating the cursor image to be used
this is for when the user wishes to draw their own line segments
'''
global cursor
cursor = True # activating the cursor
def RandomActive():
'''
accessing the existing global variables of random timer and lines
if random timer is on create random lines
this activates the action for the button, i.e. it gives the action to the button
'''
global randomLine, randomTimer
if randomTimer == True: # if random timer is on
randomLine = True # create the random lines
def RunActive():
'''
empities the orderlist and runs the system with the button click
'''
global run, orderList
run = True
orderList = [] # empties the list object
def StopActive():
'''
stops the system when stop button is clicked
'''
global stop
stop = True
def ClearActive():
'''
clears existing system
'''
global clear
clear = True
# activate flag for introduction menu
def StartGame():
global start # access the global variable
start = True # enable it
| [
"pygame.init",
"pygame.quit",
"pygame.font.Font",
"math.log10",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.mouse.get_pos",
"hlp.InsertNumber",
"pygame.draw.rect",
"random.sample",
"random.randrange",
"pygame.Color",
"pygame.time.Clock",
"hlp.AddText",
"os.path.abspath",
... | [((802, 815), 'pygame.init', 'pygame.init', ([], {}), '()\n', (813, 815), False, 'import pygame\n'), ((831, 856), 'pygame.Color', 'pygame.Color', (['(50)', '(50)', '(120)'], {}), '(50, 50, 120)\n', (843, 856), False, 'import pygame\n'), ((921, 1018), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 550)', '(pygame.FULLSCREEN | pygame.DOUBLEBUF | pygame.HWSURFACE)'], {}), '((1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF |\n pygame.HWSURFACE)\n', (944, 1018), False, 'import pygame\n'), ((1049, 1123), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Line Segment Intersection Visualisation Tool"""'], {}), "('Line Segment Intersection Visualisation Tool')\n", (1075, 1123), False, 'import pygame\n'), ((1210, 1229), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1227, 1229), False, 'import pygame\n'), ((1459, 1502), 'os.path.abspath', 'os.path.abspath', (['"""resources/bitterfont.otf"""'], {}), "('resources/bitterfont.otf')\n", (1474, 1502), False, 'import os\n'), ((1357, 1397), 'os.path.join', 'os.path.join', (['"""resources"""', '"""pointer.png"""'], {}), "('resources', 'pointer.png')\n", (1369, 1397), False, 'import os\n'), ((2659, 2684), 'random.randrange', 'random.randrange', (['(51)', '(450)'], {}), '(51, 450)\n', (2675, 2684), False, 'import random\n'), ((2732, 2757), 'random.randrange', 'random.randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (2748, 2757), False, 'import random\n'), ((2805, 2830), 'random.randrange', 'random.randrange', (['(51)', '(450)'], {}), '(51, 450)\n', (2821, 2830), False, 'import random\n'), ((2878, 2903), 'random.randrange', 'random.randrange', (['(50)', '(450)'], {}), '(50, 450)\n', (2894, 2903), False, 'import random\n'), ((4690, 4741), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Brute-Force Algorithm"""'], {}), "('Brute-Force Algorithm')\n", (4716, 4741), False, 'import pygame\n'), ((4820, 4875), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 550)', 'pygame.FULLSCREEN'], {}), '((1280, 550), pygame.FULLSCREEN)\n', (4843, 4875), False, 'import pygame\n'), ((15578, 15599), 'intro.Introduction2', 'intro.Introduction2', ([], {}), '()\n', (15597, 15599), False, 'import intro\n'), ((15970, 16025), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Bentley-Ottmann Algorithm"""'], {}), "('Bentley-Ottmann Algorithm')\n", (15996, 16025), False, 'import pygame\n'), ((16104, 16159), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 550)', 'pygame.FULLSCREEN'], {}), '((1280, 550), pygame.FULLSCREEN)\n', (16127, 16159), False, 'import pygame\n'), ((30273, 30294), 'intro.Introduction2', 'intro.Introduction2', ([], {}), '()\n', (30292, 30294), False, 'import intro\n'), ((30650, 30701), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Shamos-Hoey Algorithm"""'], {}), "('Shamos-Hoey Algorithm')\n", (30676, 30701), False, 'import pygame\n'), ((30780, 30835), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 550)', 'pygame.FULLSCREEN'], {}), '((1280, 550), pygame.FULLSCREEN)\n', (30803, 30835), False, 'import pygame\n'), ((44195, 44216), 'intro.Introduction2', 'intro.Introduction2', ([], {}), '()\n', (44214, 44216), False, 'import intro\n'), ((44358, 44409), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Efficiency Comparison"""'], {}), "('Efficiency Comparison')\n", (44384, 44409), False, 'import pygame\n'), ((44424, 44498), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 550)', '(pygame.FULLSCREEN | pygame.DOUBLEBUF)'], {}), '((1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF)\n', (44447, 44498), False, 'import pygame\n'), ((51292, 51313), 'intro.Introduction2', 'intro.Introduction2', ([], {}), '()\n', (51311, 51313), False, 'import intro\n'), ((51456, 51507), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Efficiency Comparison"""'], {}), "('Efficiency Comparison')\n", (51482, 51507), False, 'import pygame\n'), ((51522, 51596), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 550)', '(pygame.FULLSCREEN | pygame.DOUBLEBUF)'], {}), '((1280, 550), pygame.FULLSCREEN | pygame.DOUBLEBUF)\n', (51545, 51596), False, 'import pygame\n'), ((52519, 52550), 'random.sample', 'random.sample', (['transsheffc', '(550)'], {}), '(transsheffc, 550)\n', (52532, 52550), False, 'import random\n'), ((54467, 54498), 'random.sample', 'random.sample', (['transbfeffc', '(550)'], {}), '(transbfeffc, 550)\n', (54480, 54498), False, 'import random\n'), ((54793, 54824), 'random.sample', 'random.sample', (['transboeffc', '(550)'], {}), '(transboeffc, 550)\n', (54806, 54824), False, 'import random\n'), ((59095, 59116), 'intro.Introduction2', 'intro.Introduction2', ([], {}), '()\n', (59114, 59116), False, 'import intro\n'), ((59377, 59401), 'random.randrange', 'random.randrange', (['(1)', '(255)'], {}), '(1, 255)\n', (59393, 59401), False, 'import random\n'), ((59435, 59459), 'random.randrange', 'random.randrange', (['(1)', '(255)'], {}), '(1, 255)\n', (59451, 59459), False, 'import random\n'), ((59495, 59519), 'random.randrange', 'random.randrange', (['(1)', '(255)'], {}), '(1, 255)\n', (59511, 59519), False, 'import random\n'), ((59565, 59586), 'pygame.Color', 'pygame.Color', (['r', 'g', 'b'], {}), '(r, g, b)\n', (59577, 59586), False, 'import pygame\n'), ((5698, 5716), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5714, 5716), False, 'import pygame\n'), ((12968, 12998), 'hlp.AddText', 'hlp.AddText', (['"""(0,0)"""', '(30, 25)'], {}), "('(0,0)', (30, 25))\n", (12979, 12998), False, 'import hlp\n'), ((13007, 13039), 'hlp.AddText', 'hlp.AddText', (['"""(50,0)"""', '(430, 25)'], {}), "('(50,0)', (430, 25))\n", (13018, 13039), False, 'import hlp\n'), ((13048, 13080), 'hlp.AddText', 'hlp.AddText', (['"""(0,50)"""', '(30, 450)'], {}), "('(0,50)', (30, 450))\n", (13059, 13080), False, 'import hlp\n'), ((13089, 13123), 'hlp.AddText', 'hlp.AddText', (['"""(50,50)"""', '(430, 450)'], {}), "('(50,50)', (430, 450))\n", (13100, 13123), False, 'import hlp\n'), ((13132, 13181), 'hlp.Button', 'hlp.Button', (['"""Clear"""', '(200)', '(5)', '(100)', '(30)', 'ClearActive'], {}), "('Clear', 200, 5, 100, 30, ClearActive)\n", (13142, 13181), False, 'import hlp\n'), ((13190, 13250), 'hlp.Button', 'hlp.Button', (['"""Random Segment"""', '(50)', '(500)', '(180)', '(30)', 'RandomActive'], {}), "('Random Segment', 50, 500, 180, 30, RandomActive)\n", (13200, 13250), False, 'import hlp\n'), ((13259, 13320), 'hlp.Button', 'hlp.Button', (['"""Insert Segment"""', '(280)', '(500)', '(180)', '(35)', 'CursorActive'], {}), "('Insert Segment', 280, 500, 180, 35, CursorActive)\n", (13269, 13320), False, 'import hlp\n'), ((13329, 13374), 'hlp.Button', 'hlp.Button', (['"""Exit"""', '(500)', '(5)', '(100)', '(30)', 'sys.exit'], {}), "('Exit', 500, 5, 100, 30, sys.exit)\n", (13339, 13374), False, 'import hlp\n'), ((13409, 13457), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Back"""', '(900)', '(5)', '(100)', '(30)', '(1)'], {}), "('Back', 900, 5, 100, 30, 1)\n", (13429, 13457), False, 'import hlp\n'), ((13686, 13734), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Next"""', '(700)', '(5)', '(100)', '(30)', '(1)'], {}), "('Next', 700, 5, 100, 30, 1)\n", (13706, 13734), False, 'import hlp\n'), ((13931, 13993), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'line_colour', '[500, 50, 750, 490]', '(2)'], {}), '(display, line_colour, [500, 50, 750, 490], 2)\n', (13947, 13993), False, 'import pygame\n'), ((14050, 14097), 'hlp.AddText', 'hlp.AddText', (['"""Brute-Force Algorithm"""', '(520, 70)'], {}), "('Brute-Force Algorithm', (520, 70))\n", (14061, 14097), False, 'import hlp\n'), ((14155, 14193), 'hlp.AddText', 'hlp.AddText', (['"""Order List:"""', '(520, 120)'], {}), "('Order List:', (520, 120))\n", (14166, 14193), False, 'import hlp\n'), ((15267, 15312), 'hlp.AddText', 'hlp.AddText', (['"""Efficiency O(n*n):"""', '(520, 480)'], {}), "('Efficiency O(n*n):', (520, 480))\n", (15278, 15312), False, 'import hlp\n'), ((15475, 15496), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (15494, 15496), False, 'import pygame\n'), ((17036, 17054), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (17052, 17054), False, 'import pygame\n'), ((26639, 26669), 'hlp.AddText', 'hlp.AddText', (['"""(0,0)"""', '(30, 25)'], {}), "('(0,0)', (30, 25))\n", (26650, 26669), False, 'import hlp\n'), ((26678, 26710), 'hlp.AddText', 'hlp.AddText', (['"""(50,0)"""', '(430, 25)'], {}), "('(50,0)', (430, 25))\n", (26689, 26710), False, 'import hlp\n'), ((26719, 26751), 'hlp.AddText', 'hlp.AddText', (['"""(0,50)"""', '(30, 450)'], {}), "('(0,50)', (30, 450))\n", (26730, 26751), False, 'import hlp\n'), ((26760, 26794), 'hlp.AddText', 'hlp.AddText', (['"""(50,50)"""', '(430, 450)'], {}), "('(50,50)', (430, 450))\n", (26771, 26794), False, 'import hlp\n'), ((26855, 26899), 'hlp.Button', 'hlp.Button', (['"""Run"""', '(80)', '(5)', '(100)', '(35)', 'RunActive'], {}), "('Run', 80, 5, 100, 35, RunActive)\n", (26865, 26899), False, 'import hlp\n'), ((26908, 26955), 'hlp.Button', 'hlp.Button', (['"""Stop"""', '(200)', '(5)', '(100)', '(35)', 'StopActive'], {}), "('Stop', 200, 5, 100, 35, StopActive)\n", (26918, 26955), False, 'import hlp\n'), ((26964, 27013), 'hlp.Button', 'hlp.Button', (['"""Clear"""', '(320)', '(5)', '(100)', '(30)', 'ClearActive'], {}), "('Clear', 320, 5, 100, 30, ClearActive)\n", (26974, 27013), False, 'import hlp\n'), ((27022, 27082), 'hlp.Button', 'hlp.Button', (['"""Random Segment"""', '(50)', '(500)', '(180)', '(30)', 'RandomActive'], {}), "('Random Segment', 50, 500, 180, 30, RandomActive)\n", (27032, 27082), False, 'import hlp\n'), ((27091, 27152), 'hlp.Button', 'hlp.Button', (['"""Insert Segment"""', '(280)', '(500)', '(180)', '(35)', 'CursorActive'], {}), "('Insert Segment', 280, 500, 180, 35, CursorActive)\n", (27101, 27152), False, 'import hlp\n'), ((27161, 27206), 'hlp.Button', 'hlp.Button', (['"""Exit"""', '(500)', '(5)', '(100)', '(30)', 'sys.exit'], {}), "('Exit', 500, 5, 100, 30, sys.exit)\n", (27171, 27206), False, 'import hlp\n'), ((27241, 27289), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Back"""', '(900)', '(5)', '(100)', '(30)', '(1)'], {}), "('Back', 900, 5, 100, 30, 1)\n", (27261, 27289), False, 'import hlp\n'), ((27518, 27566), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Next"""', '(700)', '(5)', '(100)', '(30)', '(1)'], {}), "('Next', 700, 5, 100, 30, 1)\n", (27538, 27566), False, 'import hlp\n'), ((27916, 27978), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'line_colour', '[500, 50, 750, 490]', '(2)'], {}), '(display, line_colour, [500, 50, 750, 490], 2)\n', (27932, 27978), False, 'import pygame\n'), ((28035, 28086), 'hlp.AddText', 'hlp.AddText', (['"""Bentley-Ottmann Algorithm"""', '(520, 70)'], {}), "('Bentley-Ottmann Algorithm', (520, 70))\n", (28046, 28086), False, 'import hlp\n'), ((28143, 28182), 'hlp.AddText', 'hlp.AddText', (['"""Event Queue:"""', '(520, 120)'], {}), "('Event Queue:', (520, 120))\n", (28154, 28182), False, 'import hlp\n'), ((29329, 29367), 'hlp.AddText', 'hlp.AddText', (['"""Order List:"""', '(520, 200)'], {}), "('Order List:', (520, 200))\n", (29340, 29367), False, 'import hlp\n'), ((29956, 30007), 'hlp.AddText', 'hlp.AddText', (['"""Efficiency O((n+k)logn):"""', '(520, 480)'], {}), "('Efficiency O((n+k)logn):', (520, 480))\n", (29967, 30007), False, 'import hlp\n'), ((30170, 30191), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (30189, 30191), False, 'import pygame\n'), ((31675, 31693), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (31691, 31693), False, 'import pygame\n'), ((41174, 41204), 'hlp.AddText', 'hlp.AddText', (['"""(0,0)"""', '(30, 25)'], {}), "('(0,0)', (30, 25))\n", (41185, 41204), False, 'import hlp\n'), ((41213, 41245), 'hlp.AddText', 'hlp.AddText', (['"""(50,0)"""', '(430, 25)'], {}), "('(50,0)', (430, 25))\n", (41224, 41245), False, 'import hlp\n'), ((41254, 41286), 'hlp.AddText', 'hlp.AddText', (['"""(0,50)"""', '(30, 450)'], {}), "('(0,50)', (30, 450))\n", (41265, 41286), False, 'import hlp\n'), ((41295, 41329), 'hlp.AddText', 'hlp.AddText', (['"""(50,50)"""', '(430, 450)'], {}), "('(50,50)', (430, 450))\n", (41306, 41329), False, 'import hlp\n'), ((41390, 41434), 'hlp.Button', 'hlp.Button', (['"""Run"""', '(80)', '(5)', '(100)', '(35)', 'RunActive'], {}), "('Run', 80, 5, 100, 35, RunActive)\n", (41400, 41434), False, 'import hlp\n'), ((41443, 41490), 'hlp.Button', 'hlp.Button', (['"""Stop"""', '(200)', '(5)', '(100)', '(35)', 'StopActive'], {}), "('Stop', 200, 5, 100, 35, StopActive)\n", (41453, 41490), False, 'import hlp\n'), ((41499, 41548), 'hlp.Button', 'hlp.Button', (['"""Clear"""', '(320)', '(5)', '(100)', '(30)', 'ClearActive'], {}), "('Clear', 320, 5, 100, 30, ClearActive)\n", (41509, 41548), False, 'import hlp\n'), ((41557, 41617), 'hlp.Button', 'hlp.Button', (['"""Random Segment"""', '(50)', '(500)', '(180)', '(30)', 'RandomActive'], {}), "('Random Segment', 50, 500, 180, 30, RandomActive)\n", (41567, 41617), False, 'import hlp\n'), ((41626, 41687), 'hlp.Button', 'hlp.Button', (['"""Insert Segment"""', '(280)', '(500)', '(180)', '(35)', 'CursorActive'], {}), "('Insert Segment', 280, 500, 180, 35, CursorActive)\n", (41636, 41687), False, 'import hlp\n'), ((41696, 41741), 'hlp.Button', 'hlp.Button', (['"""Exit"""', '(500)', '(5)', '(100)', '(30)', 'sys.exit'], {}), "('Exit', 500, 5, 100, 30, sys.exit)\n", (41706, 41741), False, 'import hlp\n'), ((41776, 41824), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Back"""', '(900)', '(5)', '(100)', '(30)', '(1)'], {}), "('Back', 900, 5, 100, 30, 1)\n", (41796, 41824), False, 'import hlp\n'), ((42053, 42101), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Next"""', '(700)', '(5)', '(100)', '(30)', '(1)'], {}), "('Next', 700, 5, 100, 30, 1)\n", (42073, 42101), False, 'import hlp\n'), ((42299, 42361), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'line_colour', '[500, 50, 750, 490]', '(2)'], {}), '(display, line_colour, [500, 50, 750, 490], 2)\n', (42315, 42361), False, 'import pygame\n'), ((42491, 42523), 'hlp.AddText', 'hlp.AddText', (['"""<NAME>"""', '(520, 70)'], {}), "('<NAME>', (520, 70))\n", (42502, 42523), False, 'import hlp\n'), ((42580, 42619), 'hlp.AddText', 'hlp.AddText', (['"""Event Queue:"""', '(520, 120)'], {}), "('Event Queue:', (520, 120))\n", (42591, 42619), False, 'import hlp\n'), ((43888, 43935), 'hlp.AddText', 'hlp.AddText', (['"""Efficiency O(nlogn):"""', '(520, 200)'], {}), "('Efficiency O(nlogn):', (520, 200))\n", (43899, 43935), False, 'import hlp\n'), ((44092, 44113), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (44111, 44113), False, 'import pygame\n'), ((45414, 45432), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (45430, 45432), False, 'import pygame\n'), ((48958, 49011), 'hlp.Button', 'hlp.Button', (['"""Insert Line"""', 'posX1', 'posY', '(130)', '(30)', 'None'], {}), "('Insert Line', posX1, posY, 130, 30, None)\n", (48968, 49011), False, 'import hlp\n'), ((49020, 49078), 'hlp.Button', 'hlp.Button', (['"""Insert Intersect"""', 'posX2', 'posY', '(160)', '(30)', 'None'], {}), "('Insert Intersect', posX2, posY, 160, 30, None)\n", (49030, 49078), False, 'import hlp\n'), ((49214, 49254), 'hlp.AddText', 'hlp.AddText', (['"""BF"""', '(180, 460)', 'hlp.white'], {}), "('BF', (180, 460), hlp.white)\n", (49225, 49254), False, 'import hlp\n'), ((49263, 49303), 'hlp.AddText', 'hlp.AddText', (['"""BO"""', '(330, 460)', 'hlp.white'], {}), "('BO', (330, 460), hlp.white)\n", (49274, 49303), False, 'import hlp\n'), ((49312, 49352), 'hlp.AddText', 'hlp.AddText', (['"""SH"""', '(480, 460)', 'hlp.white'], {}), "('SH', (480, 460), hlp.white)\n", (49323, 49352), False, 'import hlp\n'), ((49456, 49521), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(100, 100)', '(100, 500)', '(2)'], {}), '(display, line_colour, (100, 100), (100, 500), 2)\n', (49472, 49521), False, 'import pygame\n'), ((49625, 49689), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(50, 450)', '(650, 450)', '(2)'], {}), '(display, line_colour, (50, 450), (650, 450), 2)\n', (49641, 49689), False, 'import pygame\n'), ((50854, 50901), 'hlp.Button', 'hlp.Button', (['"""Exit"""', '(350)', '(500)', '(100)', '(30)', 'sys.exit'], {}), "('Exit', 350, 500, 100, 30, sys.exit)\n", (50864, 50901), False, 'import hlp\n'), ((50936, 50986), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Back"""', '(650)', '(500)', '(100)', '(30)', '(1)'], {}), "('Back', 650, 500, 100, 30, 1)\n", (50956, 50986), False, 'import hlp\n'), ((51040, 51090), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Next"""', '(500)', '(500)', '(100)', '(30)', '(1)'], {}), "('Next', 500, 500, 100, 30, 1)\n", (51060, 51090), False, 'import hlp\n'), ((51163, 51184), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (51182, 51184), False, 'import pygame\n'), ((55438, 55456), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (55454, 55456), False, 'import pygame\n'), ((56473, 56520), 'hlp.Button', 'hlp.Button', (['"""Start"""', 'posX2', 'posY', '(160)', '(30)', 'None'], {}), "('Start', posX2, posY, 160, 30, None)\n", (56483, 56520), False, 'import hlp\n'), ((56529, 56586), 'hlp.AddText', 'hlp.AddText', (['"""Lines: 10, 100, 1000"""', '(600, 20)', 'hlp.white'], {}), "('Lines: 10, 100, 1000', (600, 20), hlp.white)\n", (56540, 56586), False, 'import hlp\n'), ((56595, 56635), 'hlp.AddText', 'hlp.AddText', (['"""10"""', '(115, 460)', 'hlp.white'], {}), "('10', (115, 460), hlp.white)\n", (56606, 56635), False, 'import hlp\n'), ((56644, 56685), 'hlp.AddText', 'hlp.AddText', (['"""100"""', '(350, 460)', 'hlp.white'], {}), "('100', (350, 460), hlp.white)\n", (56655, 56685), False, 'import hlp\n'), ((56694, 56736), 'hlp.AddText', 'hlp.AddText', (['"""1000"""', '(650, 460)', 'hlp.white'], {}), "('1000', (650, 460), hlp.white)\n", (56705, 56736), False, 'import hlp\n'), ((56745, 56785), 'hlp.AddText', 'hlp.AddText', (['"""max"""', '(50, 100)', 'hlp.white'], {}), "('max', (50, 100), hlp.white)\n", (56756, 56785), False, 'import hlp\n'), ((56794, 56832), 'hlp.AddText', 'hlp.AddText', (['"""0"""', '(50, 460)', 'hlp.white'], {}), "('0', (50, 460), hlp.white)\n", (56805, 56832), False, 'import hlp\n'), ((56852, 56884), 'pygame.font.Font', 'pygame.font.Font', (['bitterfont', '(16)'], {}), '(bitterfont, 16)\n', (56868, 56884), False, 'import pygame\n'), ((56980, 57017), 'pygame.transform.rotate', 'pygame.transform.rotate', (['sidetext', '(90)'], {}), '(sidetext, 90)\n', (57003, 57017), False, 'import pygame\n'), ((57163, 57228), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(100, 100)', '(100, 500)', '(2)'], {}), '(display, line_colour, (100, 100), (100, 500), 2)\n', (57179, 57228), False, 'import pygame\n'), ((57332, 57396), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(50, 450)', '(650, 450)', '(2)'], {}), '(display, line_colour, (50, 450), (650, 450), 2)\n', (57348, 57396), False, 'import pygame\n'), ((58657, 58704), 'hlp.Button', 'hlp.Button', (['"""Exit"""', '(350)', '(500)', '(100)', '(30)', 'sys.exit'], {}), "('Exit', 350, 500, 100, 30, sys.exit)\n", (58667, 58704), False, 'import hlp\n'), ((58739, 58789), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Back"""', '(650)', '(500)', '(100)', '(30)', '(1)'], {}), "('Back', 650, 500, 100, 30, 1)\n", (58759, 58789), False, 'import hlp\n'), ((58843, 58893), 'hlp.ButtonWithReturn', 'hlp.ButtonWithReturn', (['"""Next"""', '(500)', '(500)', '(100)', '(30)', '(1)'], {}), "('Next', 500, 500, 100, 30, 1)\n", (58863, 58893), False, 'import hlp\n'), ((58966, 58987), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (58985, 58987), False, 'import pygame\n'), ((7720, 7784), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(50, pos)', '(450, pos)', '(1)'], {}), '(display, line_colour, (50, pos), (450, pos), 1)\n', (7736, 7784), False, 'import pygame\n'), ((7914, 7978), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(pos, 50)', '(pos, 450)', '(1)'], {}), '(display, line_colour, (pos, 50), (pos, 450), 1)\n', (7930, 7978), False, 'import pygame\n'), ((8501, 8564), 'pygame.draw.line', 'pygame.draw.line', (['display', 'brutecolours[i]', 'line[0]', 'line[1]', '(1)'], {}), '(display, brutecolours[i], line[0], line[1], 1)\n', (8517, 8564), False, 'import pygame\n'), ((8662, 8696), 'hlp.AddText', 'hlp.AddText', (['line_name[i]', 'line[0]'], {}), '(line_name[i], line[0])\n', (8673, 8696), False, 'import hlp\n'), ((9896, 9942), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'hlp.red', 'point', '(3)'], {}), '(display, hlp.red, point, 3)\n', (9914, 9942), False, 'import pygame\n'), ((10101, 10132), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (10125, 10132), False, 'import pygame\n'), ((10151, 10173), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (10171, 10173), False, 'import pygame\n'), ((11509, 11539), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(True)'], {}), '(True)\n', (11533, 11539), False, 'import pygame\n'), ((13852, 13880), 'hlp.Description', 'hlp.Description', (['dsb.bf_desc'], {}), '(dsb.bf_desc)\n', (13867, 13880), False, 'import hlp\n'), ((14820, 14865), 'hlp.AddText', 'hlp.AddText', (['val', '(o_x, o_y)', '(255, 255, 255)'], {}), '(val, (o_x, o_y), (255, 255, 255))\n', (14831, 14865), False, 'import hlp\n'), ((19058, 19122), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(50, pos)', '(450, pos)', '(1)'], {}), '(display, line_colour, (50, pos), (450, pos), 1)\n', (19074, 19122), False, 'import pygame\n'), ((19252, 19316), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(pos, 50)', '(pos, 450)', '(1)'], {}), '(display, line_colour, (pos, 50), (pos, 450), 1)\n', (19268, 19316), False, 'import pygame\n'), ((19839, 19897), 'pygame.draw.line', 'pygame.draw.line', (['display', 'colours[i]', 'line[0]', 'line[1]', '(1)'], {}), '(display, colours[i], line[0], line[1], 1)\n', (19855, 19897), False, 'import pygame\n'), ((19991, 20025), 'hlp.AddText', 'hlp.AddText', (['line_name[i]', 'line[0]'], {}), '(line_name[i], line[0])\n', (20002, 20025), False, 'import hlp\n'), ((21660, 21716), 'pygame.draw.line', 'pygame.draw.line', (['display', 'hlp.red', '(x, 50)', '(x, 450)', '(1)'], {}), '(display, hlp.red, (x, 50), (x, 450), 1)\n', (21676, 21716), False, 'import pygame\n'), ((23467, 23498), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (23491, 23498), False, 'import pygame\n'), ((23517, 23539), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (23537, 23539), False, 'import pygame\n'), ((24875, 24905), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(True)'], {}), '(True)\n', (24899, 24905), False, 'import pygame\n'), ((27684, 27712), 'hlp.Description', 'hlp.Description', (['dsb.bo_desc'], {}), '(dsb.bo_desc)\n', (27699, 27712), False, 'import hlp\n'), ((28782, 28827), 'hlp.AddText', 'hlp.AddText', (['val', '(o_x, o_y)', '(255, 255, 255)'], {}), '(val, (o_x, o_y), (255, 255, 255))\n', (28793, 28827), False, 'import hlp\n'), ((29523, 29568), 'hlp.AddText', 'hlp.AddText', (['val', '(o_x, o_y)', '(255, 255, 255)'], {}), '(val, (o_x, o_y), (255, 255, 255))\n', (29534, 29568), False, 'import hlp\n'), ((33717, 33781), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(50, pos)', '(450, pos)', '(1)'], {}), '(display, line_colour, (50, pos), (450, pos), 1)\n', (33733, 33781), False, 'import pygame\n'), ((33911, 33975), 'pygame.draw.line', 'pygame.draw.line', (['display', 'line_colour', '(pos, 50)', '(pos, 450)', '(1)'], {}), '(display, line_colour, (pos, 50), (pos, 450), 1)\n', (33927, 33975), False, 'import pygame\n'), ((34498, 34556), 'pygame.draw.line', 'pygame.draw.line', (['display', 'colours[i]', 'line[0]', 'line[1]', '(1)'], {}), '(display, colours[i], line[0], line[1], 1)\n', (34514, 34556), False, 'import pygame\n'), ((34650, 34684), 'hlp.AddText', 'hlp.AddText', (['line_name[i]', 'line[0]'], {}), '(line_name[i], line[0])\n', (34661, 34684), False, 'import hlp\n'), ((36373, 36429), 'pygame.draw.line', 'pygame.draw.line', (['display', 'hlp.red', '(x, 50)', '(x, 450)', '(1)'], {}), '(display, hlp.red, (x, 50), (x, 450), 1)\n', (36389, 36429), False, 'import pygame\n'), ((37949, 38002), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'hlp.white', 'firstPoint', '(3)'], {}), '(display, hlp.white, firstPoint, 3)\n', (37967, 38002), False, 'import pygame\n'), ((38071, 38102), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (38095, 38102), False, 'import pygame\n'), ((38121, 38143), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (38141, 38143), False, 'import pygame\n'), ((39406, 39436), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(True)'], {}), '(True)\n', (39430, 39436), False, 'import pygame\n'), ((42220, 42248), 'hlp.Description', 'hlp.Description', (['dsb.sh_desc'], {}), '(dsb.sh_desc)\n', (42235, 42248), False, 'import hlp\n'), ((43264, 43303), 'hlp.AddText', 'hlp.AddText', (['val', '(o_x, o_y)', 'hlp.white'], {}), '(val, (o_x, o_y), hlp.white)\n', (43275, 43303), False, 'import hlp\n'), ((49890, 49960), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'hlp.button_colour', '(165, bPos - bf, 50, bf)'], {}), '(display, hlp.button_colour, (165, bPos - bf, 50, bf))\n', (49906, 49960), False, 'import pygame\n'), ((50234, 50304), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'hlp.button_colour', '(315, bPos - bo, 50, bo)'], {}), '(display, hlp.button_colour, (315, bPos - bo, 50, bo))\n', (50250, 50304), False, 'import pygame\n'), ((50694, 50764), 'pygame.draw.rect', 'pygame.draw.rect', (['display', 'hlp.button_colour', '(465, bPos - sh, 50, sh)'], {}), '(display, hlp.button_colour, (465, bPos - sh, 50, sh))\n', (50710, 50764), False, 'import pygame\n'), ((51123, 51154), 'hlp.Description', 'hlp.Description', (['dsb.effic_desc'], {}), '(dsb.effic_desc)\n', (51138, 51154), False, 'import hlp\n'), ((51832, 51845), 'math.log10', 'math.log10', (['i'], {}), '(i)\n', (51842, 51845), False, 'import math\n'), ((52059, 52072), 'math.log10', 'math.log10', (['i'], {}), '(i)\n', (52069, 52072), False, 'import math\n'), ((57427, 57488), 'pygame.draw.lines', 'pygame.draw.lines', (['display', '(62, 150, 81)', '(False)', 'bfpoints', '(4)'], {}), '(display, (62, 150, 81), False, bfpoints, 4)\n', (57444, 57488), False, 'import pygame\n'), ((57501, 57562), 'pygame.draw.lines', 'pygame.draw.lines', (['display', '(255, 255, 0)', '(False)', 'shpoints', '(4)'], {}), '(display, (255, 255, 0), False, shpoints, 4)\n', (57518, 57562), False, 'import pygame\n'), ((57575, 57634), 'pygame.draw.lines', 'pygame.draw.lines', (['display', '(255, 0, 0)', '(False)', 'bopoints', '(4)'], {}), '(display, (255, 0, 0), False, bopoints, 4)\n', (57592, 57634), False, 'import pygame\n'), ((57647, 57696), 'hlp.AddText', 'hlp.AddText', (['"""Brute Force"""', '(750, 150)', 'hlp.white'], {}), "('Brute Force', (750, 150), hlp.white)\n", (57658, 57696), False, 'import hlp\n'), ((57709, 57762), 'hlp.AddText', 'hlp.AddText', (['"""Bentley-Ottmann"""', '(750, 250)', 'hlp.white'], {}), "('Bentley-Ottmann', (750, 250), hlp.white)\n", (57720, 57762), False, 'import hlp\n'), ((57775, 57824), 'hlp.AddText', 'hlp.AddText', (['"""Shamos-Hoey"""', '(750, 350)', 'hlp.white'], {}), "('Shamos-Hoey', (750, 350), hlp.white)\n", (57786, 57824), False, 'import hlp\n'), ((57837, 57904), 'pygame.draw.line', 'pygame.draw.line', (['display', '(62, 150, 81)', '(720, 160)', '(740, 160)', '(4)'], {}), '(display, (62, 150, 81), (720, 160), (740, 160), 4)\n', (57853, 57904), False, 'import pygame\n'), ((57917, 57982), 'pygame.draw.line', 'pygame.draw.line', (['display', '(255, 0, 0)', '(720, 260)', '(740, 260)', '(4)'], {}), '(display, (255, 0, 0), (720, 260), (740, 260), 4)\n', (57933, 57982), False, 'import pygame\n'), ((57995, 58062), 'pygame.draw.line', 'pygame.draw.line', (['display', '(255, 255, 0)', '(720, 360)', '(740, 360)', '(4)'], {}), '(display, (255, 255, 0), (720, 360), (740, 360), 4)\n', (58011, 58062), False, 'import pygame\n'), ((58076, 58127), 'hlp.AddText', 'hlp.AddText', (['"""n=10;100;1000"""', '(720, 390)', 'hlp.white'], {}), "('n=10;100;1000', (720, 390), hlp.white)\n", (58087, 58127), False, 'import hlp\n'), ((58926, 58957), 'hlp.Description', 'hlp.Description', (['dsb.effic_desc'], {}), '(dsb.effic_desc)\n', (58941, 58957), False, 'import hlp\n'), ((5794, 5807), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (5805, 5807), False, 'import pygame\n'), ((10604, 10660), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'hlp.white', 'clickedPos[0]', '(2)'], {}), '(display, hlp.white, clickedPos[0], 2)\n', (10622, 10660), False, 'import pygame\n'), ((10721, 10780), 'pygame.draw.line', 'pygame.draw.line', (['display', 'hlp.white', 'clickedPos[0]', 'pos', '(1)'], {}), '(display, hlp.white, clickedPos[0], pos, 1)\n', (10737, 10780), False, 'import pygame\n'), ((17132, 17145), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (17143, 17145), False, 'import pygame\n'), ((23970, 24026), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'hlp.white', 'clickedPos[0]', '(2)'], {}), '(display, hlp.white, clickedPos[0], 2)\n', (23988, 24026), False, 'import pygame\n'), ((24087, 24146), 'pygame.draw.line', 'pygame.draw.line', (['display', 'hlp.white', 'clickedPos[0]', 'pos', '(1)'], {}), '(display, hlp.white, clickedPos[0], pos, 1)\n', (24103, 24146), False, 'import pygame\n'), ((31771, 31784), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (31782, 31784), False, 'import pygame\n'), ((38501, 38557), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'hlp.white', 'clickedPos[0]', '(2)'], {}), '(display, hlp.white, clickedPos[0], 2)\n', (38519, 38557), False, 'import pygame\n'), ((38618, 38677), 'pygame.draw.line', 'pygame.draw.line', (['display', 'hlp.white', 'clickedPos[0]', 'pos', '(1)'], {}), '(display, hlp.white, clickedPos[0], pos, 1)\n', (38634, 38677), False, 'import pygame\n'), ((45510, 45523), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (45521, 45523), False, 'import pygame\n'), ((55534, 55547), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (55545, 55547), False, 'import pygame\n'), ((6705, 6727), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (6725, 6727), False, 'import pygame\n'), ((18043, 18065), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (18063, 18065), False, 'import pygame\n'), ((22174, 22222), 'pygame.draw.circle', 'pygame.draw.circle', (['display', 'hlp.white', 'point', '(3)'], {}), '(display, hlp.white, point, 3)\n', (22192, 22222), False, 'import pygame\n'), ((32702, 32724), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (32722, 32724), False, 'import pygame\n'), ((37252, 37265), 'math.log10', 'math.log10', (['n'], {}), '(n)\n', (37262, 37265), False, 'import math\n'), ((46156, 46178), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (46176, 46178), False, 'import pygame\n'), ((56180, 56202), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (56200, 56202), False, 'import pygame\n'), ((22902, 22915), 'math.log10', 'math.log10', (['n'], {}), '(n)\n', (22912, 22915), False, 'import math\n'), ((46493, 46525), 'hlp.InsertNumber', 'hlp.InsertNumber', (['"""Line Number:"""'], {}), "('Line Number:')\n", (46509, 46525), False, 'import hlp\n'), ((47212, 47250), 'hlp.InsertNumber', 'hlp.InsertNumber', (['"""Intersect Number :"""'], {}), "('Intersect Number :')\n", (47228, 47250), False, 'import hlp\n'), ((47630, 47643), 'math.log10', 'math.log10', (['n'], {}), '(n)\n', (47640, 47643), False, 'import math\n'), ((48064, 48077), 'math.log10', 'math.log10', (['n'], {}), '(n)\n', (48074, 48077), False, 'import math\n')] |
import aiohttp
import asyncio
import async_timeout
import logging
from collections import namedtuple, deque
from .events import Events
from html.parser import HTMLParser
log = logging.getLogger(__name__)
class Parser(HTMLParser):
def handle_starttag(self, tag, attrs):
log.debug("Encountered a start tag: {}".format(tag))
def handle_endtag(self, tag):
log.debug("Encountered an end tag : {}".format(tag))
def handle_data(self, data):
log.debug("Encountered some data : {}".format(data))
if self.callback:
self.callback(data)
def set_callback(self, callback):
self.callback = callback
class E2210(object):
''' Moxa iologik E2210 module
12 inputs and 8 outputs
'''
MAX_INPUTS = 12
MAX_OUTPUTS = 8
GET_PATH = 'getParam.cgi'
SET_PATH = 'setParam.cgi'
SYS_INFO = ['DATE', 'TIME', 'IP', 'LOC', 'DESC',
'FWR_V', 'MOD_NAME', 'SN_NUM', 'MAC_ADDR']
def __init__(self, loop,
url=None,
events=None,
line=0,
addr=1,
handle_events=None):
self.loop = loop or None
self.url = url
self.line = line
self.addr = addr
self.events = events or Events()
self.parser = Parser()
self.parser.set_callback(self.received)
self.handle_events = handle_events
self.connection = None
self.changed = True
self.fail = True
self.command = namedtuple('Command', 'name method params completed')
self.setting = {'System': {},
'DIMode': ['DI' for i in range(self.MAX_INPUTS)],
'DIStatus': [0 for i in range(self.MAX_INPUTS)],
'DIFilter': [200 for i in range(self.MAX_INPUTS)],
'DOMode': ['DO' for i in range(self.MAX_OUTPUTS)],
'DOStatus': [1 for i in range(self.MAX_OUTPUTS)]
}
self.CMDS = {
'get_sys_info': ('get',
'&'.join(['{}=?'.format(i) for i in self.SYS_INFO])),
'get_di_mode': ('get',
'&'.join(['DIMode_{:02d}=?'.format(i) for i in range(self.MAX_INPUTS)])),
'set_di_mode': ('set',
'&'.join(['DIMode_{:02d}=0'.format(i) for i in range(self.MAX_INPUTS)])),
'get_di_status': ('get',
'&'.join(['DIStatus_{:02d}=?'.format(i) for i in range(self.MAX_INPUTS)])),
'set_di_filter_low': ('set',
'&'.join(['DIFilter_{:02d}={}'.format(i, self.setting['DIFilter'][i]) for i in range(0, self.MAX_OUTPUTS//2)])),
'set_di_filter_high': ('set',
'&'.join(['DIFilter_{:02d}={}'.format(i, self.setting['DIFilter'][i]) for i in range(self.MAX_OUTPUTS//2, self.MAX_OUTPUTS)])),
'get_do_mode': ('get',
'&'.join(['DOMode_{:02d}=?'.format(i) for i in range(self.MAX_OUTPUTS)])),
'set_do_mode': ('set',
'&'.join(['DOMode_{:02d}=0'.format(i) for i in range(self.MAX_OUTPUTS)])),
'get_do_status': ('get',
'&'.join(['DOStatus_{:02d}=?'.format(i) for i in range(self.MAX_OUTPUTS)])),
'set_do_status': ('set',
'&'.join(['DOStatus_{:02d}=1'.format(i) for i in range(self.MAX_OUTPUTS)])),
}
self.cmd_deque = deque()
for name in self.CMDS:
self.append_cmd(name)
# start to poll http server
self.restart_poll()
def poll(self):
pass
def do_output(self, addr, which, action, deadtime):
if which >= self.MAX_OUTPUTS or which < 0:
return
status = (action == 'Activate' and 0 or 1)
params = 'DOStatus_{:02d}={}'.format(which, status)
self.cmd_deque.appendleft(self.command('do_outputs',
'set', params, False))
def append_cmd(self, cmd_name=None):
cmd = self.CMDS.get(cmd_name)
if cmd:
self.cmd_deque.append(self.command(cmd_name,
cmd[0], cmd[1], False))
def received(self, data):
log.debug("Encountered some data : {}".format(data))
l = data.split('=')
if len(l) != 2:
return
reg = l[0]
val = l[1]
if reg in self.SYS_INFO:
self.setting['System'][reg] = val
elif reg.startswith('DIMode'):
n = int(reg.split('_')[1])
if n < 0 or n >= self.MAX_INPUTS:
return
self.setting['DIMode'][n] = (val == '0' and 'DI' or 'COUNTER')
elif reg.startswith('DIStatus'):
n = int(reg.split('_')[1])
if n < 0 or n >= self.MAX_INPUTS:
return
self.setting['DIStatus'][n] = (val == '0' and 'ALARM' or 'NORMAL')
event_type = 'Auxiliary Input'
event = 'MXI_{}_{}_{}'.format(self.line, self.addr, n)
condition = (val == '0' and True or False)
self.events.append(event, event_type, condition)
elif reg.startswith('DIFilter'):
n = int(reg.split('_')[1])
if n < 0 or n >= self.MAX_INPUTS:
return
self.setting['DIFilter'][n] = int(val)
elif reg.startswith('DOMode'):
n = int(reg.split('_')[1])
if n < 0 or n >= self.MAX_OUTPUTS:
return
self.setting['DOMode'][n] = (val == '0' and 'DO' or 'PULSE')
elif reg.startswith('DOStatus'):
n = int(reg.split('_')[1])
if n < 0 or n >= self.MAX_OUTPUTS:
return
self.setting['DOStatus'][n] = (val == '0' and 'OFF' or 'ON')
else:
log.warn("Do not care it: {}".format(data))
def processor(self):
if not self.events:
return
if callable(self.handle_events):
return self.handle_events(self.events)
else:
log.warn('No master to processor {}'.format(self.events))
def restart_poll(self):
asyncio.ensure_future(self.loop_polling())
async def _fetch(self, params, method='get'):
endpoint = (method == 'get' and self.GET_PATH or self.SET_PATH)
async with aiohttp.ClientSession() as session:
with async_timeout.timeout(20):
async with session.get('{}/{}?{}'.format(self.url,
endpoint,
params)) as response:
if response.status >= 200 and response.status <= 300:
self.parser.feed(await response.text())
async def _request(self):
try:
self.cmd = self.cmd_deque.popleft()
except IndexError:
self.append_cmd('get_di_status')
self.append_cmd('get_do_status')
self.cmd = self.cmd_deque.popleft()
log.debug('Request: {}'.format(self.cmd.name))
x = await self._fetch(self.cmd.params,
method=self.cmd.method)
async def loop_polling(self):
try:
while True:
try:
await self._request()
self.connection = True
self.processor()
except Exception as err:
log.error("Cmd {} failed, with Error: {} "
"Will retry in {} seconds"
.format(self.cmd.name, err, 10))
self.connection = False
if self.connection is not True:
self.changed = True
self.cmd_deque.append(self.cmd)
self.fail = True
await asyncio.sleep(10)
else:
self.changed = False
self.fail = False
log.info("{} Successfully requested. ".format(self.cmd.name))
# poll connection state every 1s
await asyncio.sleep(0.5)
except asyncio.CancelledError:
self.connection = False
except Exception as err:
log.error("Failed to access http server with Error: {}".format(err))
self.connection = False
| [
"logging.getLogger",
"aiohttp.ClientSession",
"collections.namedtuple",
"collections.deque",
"async_timeout.timeout",
"asyncio.sleep"
] | [((178, 205), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (195, 205), False, 'import logging\n'), ((1520, 1573), 'collections.namedtuple', 'namedtuple', (['"""Command"""', '"""name method params completed"""'], {}), "('Command', 'name method params completed')\n", (1530, 1573), False, 'from collections import namedtuple, deque\n'), ((3557, 3564), 'collections.deque', 'deque', ([], {}), '()\n', (3562, 3564), False, 'from collections import namedtuple, deque\n'), ((6466, 6489), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (6487, 6489), False, 'import aiohttp\n'), ((6519, 6544), 'async_timeout.timeout', 'async_timeout.timeout', (['(20)'], {}), '(20)\n', (6540, 6544), False, 'import async_timeout\n'), ((8269, 8287), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (8282, 8287), False, 'import asyncio\n'), ((7997, 8014), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (8010, 8014), False, 'import asyncio\n')] |
from ..models import Classify, Fields, Asset, AssetBind, ClassifyBind
from django.db.models import Q
from collections import OrderedDict
from django.forms.models import model_to_dict
class OperateInstance:
@staticmethod
def get_classify(id):
"""通过ID 查找指定分类表"""
return Classify.objects.filter(id=id).first()
# 获取类型表的 子表
@staticmethod
def get_children_classify(p_tid):
"""通过 主表ID 查找 子分类表 pid=id"""
children_classify = Classify.objects.filter(pid=p_tid)
if children_classify:
return children_classify
return None
@staticmethod
def get_parent_classify_classify(pid):
parent_classify_classify_obj = Classify.objects.filter(id=pid).first()
if parent_classify_classify_obj:
return parent_classify_classify_obj
return None
# parent_classify
@staticmethod
def get_parent_classify_bind(pid):
"""通过分类表主ID 查找 关系绑定表数据"""
parent_bind_obj = ClassifyBind.objects.filter(parent_classify_id=pid)
if parent_bind_obj:
return parent_bind_obj
return None
@staticmethod
def get_child_classify_bind(cid):
"""通过 child_classify_id 获取表关系记录"""
child_classify_obj = ClassifyBind.objects.filter(child_classify_id=cid)
if child_classify_obj:
return child_classify_obj
return None
@staticmethod
def get_classify_bind(pid, cid):
"""根据 parent_classify_id 和 child_classify_id 返回分类关系表"""
classify_bind_obj = ClassifyBind.objects.filter(
parent_classify_id=pid, child_classify_id=cid
).first()
if classify_bind_obj:
return classify_bind_obj
return None
@staticmethod
def get_abs_asset_bind(p_id, c_id):
"""根据 parent_asset_id child_asset_id 查询 asset_bind 记录"""
asset_bind = AssetBind.objects.filter(
parent_asset_id=p_id, child_asset_id=c_id
).first()
if asset_bind:
return asset_bind
return None
@staticmethod
def get_asset_bind(t_id):
"""
根据 classify_bind_id 查找 资产绑定记录
"""
asset_bind = AssetBind.objects.filter(classify_bind_id=t_id)
if asset_bind:
return asset_bind
return None
@staticmethod
def get_parent_asset_bind(t_id, p_id):
"""根据 表关系ID 主资产ID, 获取资产数据"""
asset_bind = AssetBind.objects.filter(
classify_bind=t_id, parent_asset_id=p_id
)
if asset_bind:
return asset_bind
return None
@staticmethod
def get_child_asset_bind(t_id, c_id):
"""根据 表关系ID 子资产ID 获取资产数据"""
asset_bind = AssetBind.objects.filter(
classify_bind_id=t_id, child_asset_id=c_id
)
if asset_bind:
return asset_bind
return None
@staticmethod
def get_c_asset_bind(c_id):
"""根据 子资产ID 获取资产数据"""
asset_bind = AssetBind.objects.filter(child_asset_id=c_id)
if asset_bind:
return asset_bind
return None
# @staticmethod
# def create_asset(c_id, *args):
# asset_obj = Asset.objects.create(asset_key=get_md5(*args), classify_id_id=c_id)
# asset_obj.save()
# return asset_obj
@staticmethod
def get_asset(id):
"""根据 ID 获取资产记录"""
asset_obj = Asset.objects.filter(id=id).first()
if asset_obj:
return asset_obj
return None
@staticmethod
def get_classify_asset(id, cid):
"""根据 分类表ID 资产表 ID 获取资产数据"""
asset_obj = Asset.objects.filter(id=id, classify_classify_id=cid).first()
if asset_obj:
return asset_obj
return None
@staticmethod
def get_all_asset(s_id):
asset_all_obj = Asset.objects.filter(classify_id=s_id)
if asset_all_obj:
return asset_all_obj
return None
@staticmethod
def get_classify_field(c_id):
"""根据分类表ID返回 fields 字段表"""
field_obj = Fields.objects.filter(classify_id=c_id).first()
if field_obj:
return field_obj
return None
@staticmethod
def get_all_field_map(c_id):
field_all = Classify.objects.filter(id=c_id).values()
if field_all:
return field_all
return None
@staticmethod
def get_asset_bind_exists(c_id):
"""查询 parent_asset_id 或者 child_asset_id 等于指定id的资产"""
field_all = AssetBind.objects.filter(
Q(parent_asset_id=c_id) | Q(child_asset_id=c_id)
)
if field_all:
return field_all
return None
@staticmethod
def get_p_bind_asset(id, pid):
"""通过主资产ID 和 分类ID 查询关联下 所有的数据"""
# 获取关联数据类型
classify_bind = OperateInstance.get_parent_classify_bind(pid)
l_c = []
if classify_bind:
for t_r in classify_bind:
data = OrderedDict()
asset_re_all = OperateInstance.get_parent_asset_bind(t_r.id, id)
data['classify_name'] = t_r.child_classify.name
data['classify_id'] = t_r.child_classify.id
data['parent_classify_name'] = t_r.child_classify.pid.name
data['fields'] = t_r.child_classify.fields.fields
if asset_re_all:
data['data'] = [model_to_dict(i.child_asset) for i in asset_re_all]
else:
data['data'] = []
l_c.append(data)
return l_c
return []
def get_c_bind_asset(id, cid):
"""通过子资产ID 和 分类ID 查询关联下 所有的数据"""
# 查询到所有的关联表记录
classify_bind = OperateInstance.get_child_classify_bind(cid)
l_c = []
if classify_bind:
# 循环关联表记录
for t_r in classify_bind:
asset_re_all = OperateInstance.get_child_asset_bind(t_r.id, id)
if not asset_re_all:
continue
data = OrderedDict()
data['classify_name'] = t_r.parent_classify.name
data['classify_id'] = t_r.parent_classify.id
data['parent_classify_name'] = t_r.parent_classify.pid.name
data['fields'] = t_r.parent_classify.fields.fields
if asset_re_all:
data['data'] = [model_to_dict(i.parent_asset) for i in asset_re_all]
else:
data['data'] = []
l_c.append(data)
return l_c
return []
@staticmethod
def get_p_classify_bind(pid):
""" 根据 主 classify_id 返回所有 关联数据 """
parent_bind_obj = ClassifyBind.objects.filter(parent_classify_id=pid)
if parent_bind_obj:
return parent_bind_obj
return []
def get_c_classify_bind(cid):
""" 根据 子 classify_id 返回所有 关联数据 """
parent_bind_obj = ClassifyBind.objects.filter(child_classify_id=cid)
if parent_bind_obj:
return parent_bind_obj
return []
| [
"django.db.models.Q",
"django.forms.models.model_to_dict",
"collections.OrderedDict"
] | [((4506, 4529), 'django.db.models.Q', 'Q', ([], {'parent_asset_id': 'c_id'}), '(parent_asset_id=c_id)\n', (4507, 4529), False, 'from django.db.models import Q\n'), ((4532, 4554), 'django.db.models.Q', 'Q', ([], {'child_asset_id': 'c_id'}), '(child_asset_id=c_id)\n', (4533, 4554), False, 'from django.db.models import Q\n'), ((4924, 4937), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4935, 4937), False, 'from collections import OrderedDict\n'), ((5979, 5992), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5990, 5992), False, 'from collections import OrderedDict\n'), ((5353, 5381), 'django.forms.models.model_to_dict', 'model_to_dict', (['i.child_asset'], {}), '(i.child_asset)\n', (5366, 5381), False, 'from django.forms.models import model_to_dict\n'), ((6331, 6360), 'django.forms.models.model_to_dict', 'model_to_dict', (['i.parent_asset'], {}), '(i.parent_asset)\n', (6344, 6360), False, 'from django.forms.models import model_to_dict\n')] |
import os
import torch
from modeling.unet import *
from modeling.bAttenUnet import MDecoderUNet, MMultiBAUNet, MMultiBUNet
def build_model(args, nchannels, nclass, model='unet'):
if model == 'unet':
return UNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p
)
elif model == "batten-unet":
return MDecoderUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
attention="attn"
)
elif model == 'prob-unet':
return ProbUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p
)
elif model == 'multi-unet':
return MultiUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p
)
elif model == 'decoder-unet':
return DecoderUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p
)
elif model == "multi-bunet":
return MMultiBUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p
)
elif model == "multi-atten-bunet":
return MMultiBAUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p
)
elif model == 'attn-unet':
return DecoderUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p,
attention='attn'
)
elif model == 'pattn-unet':
return DecoderUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p,
attention='prob',
)
elif model == 'pattn-unet-al':
return DecoderUNet(
n_channels=nchannels,
n_classes=nclass,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p,
attention='prob-al',
)
elif model == 'battn-unet-one':
return MDecoderUNet(
n_channels=nchannels,
# one head output
n_classes=1,
bilinear=True,
attention="attn"
)
else:
raise NotImplementedError
def build_transfer_learning_model(args, nchannels, nclass, pretrained, model='unet'):
"""
param args:
param nclass: number of classes
param pretrained: path to the pretrained model parameters
"""
# hard coded class number for pretained UNet on BraTS
pre_model = UNet(
n_channels=args.nchannels,
n_classes=3,
bilinear=True,
dropout=args.dropout,
dropp=args.drop_p
)
if not os.path.isfile(pretrained):
raise RuntimeError("no checkpoint found at {}".format(pretrained))
params = torch.load(pretrained)
pre_model.load_state_dict(params['state_dict'])
m = UNet(
n_channels=args.nchannels,
n_classes=nclass,
bilinear=pre_model.bilinear,
dropout=args.dropout,
dropp=args.drop_p
)
assert args.nchannels == pre_model.n_channels
m.inc = pre_model.inc
m.down1 = pre_model.down1
m.down2 = pre_model.down2
m.down3 = pre_model.down3
m.down4 = pre_model.down4
m.up1 = pre_model.up1
m.up2 = pre_model.up2
m.up3 = pre_model.up3
m.up4 = pre_model.up4
return m
| [
"modeling.bAttenUnet.MDecoderUNet",
"modeling.bAttenUnet.MMultiBAUNet",
"modeling.bAttenUnet.MMultiBUNet",
"torch.load",
"os.path.isfile"
] | [((3316, 3338), 'torch.load', 'torch.load', (['pretrained'], {}), '(pretrained)\n', (3326, 3338), False, 'import torch\n'), ((3200, 3226), 'os.path.isfile', 'os.path.isfile', (['pretrained'], {}), '(pretrained)\n', (3214, 3226), False, 'import os\n'), ((439, 528), 'modeling.bAttenUnet.MDecoderUNet', 'MDecoderUNet', ([], {'n_channels': 'nchannels', 'n_classes': 'nclass', 'bilinear': '(True)', 'attention': '"""attn"""'}), "(n_channels=nchannels, n_classes=nclass, bilinear=True,\n attention='attn')\n", (451, 528), False, 'from modeling.bAttenUnet import MDecoderUNet, MMultiBAUNet, MMultiBUNet\n'), ((1302, 1414), 'modeling.bAttenUnet.MMultiBUNet', 'MMultiBUNet', ([], {'n_channels': 'nchannels', 'n_classes': 'nclass', 'bilinear': '(True)', 'dropout': 'args.dropout', 'dropp': 'args.drop_p'}), '(n_channels=nchannels, n_classes=nclass, bilinear=True, dropout=\n args.dropout, dropp=args.drop_p)\n', (1313, 1414), False, 'from modeling.bAttenUnet import MDecoderUNet, MMultiBAUNet, MMultiBUNet\n'), ((1534, 1647), 'modeling.bAttenUnet.MMultiBAUNet', 'MMultiBAUNet', ([], {'n_channels': 'nchannels', 'n_classes': 'nclass', 'bilinear': '(True)', 'dropout': 'args.dropout', 'dropp': 'args.drop_p'}), '(n_channels=nchannels, n_classes=nclass, bilinear=True, dropout\n =args.dropout, dropp=args.drop_p)\n', (1546, 1647), False, 'from modeling.bAttenUnet import MDecoderUNet, MMultiBAUNet, MMultiBUNet\n'), ((2536, 2621), 'modeling.bAttenUnet.MDecoderUNet', 'MDecoderUNet', ([], {'n_channels': 'nchannels', 'n_classes': '(1)', 'bilinear': '(True)', 'attention': '"""attn"""'}), "(n_channels=nchannels, n_classes=1, bilinear=True, attention='attn'\n )\n", (2548, 2621), False, 'from modeling.bAttenUnet import MDecoderUNet, MMultiBAUNet, MMultiBUNet\n')] |
import KeyPressModule as kp
from djitellopy import tello
import time
import cv2
global img
kp.init()
skynet = tello.Tello()
skynet.connect()
print(skynet.get_battery())
skynet.streamon()
def getKeyboardInput():
lr, fb, ud, yv = 0, 0, 0, 0
speed = 50
if kp.getKey("LEFT"): lr = -speed
elif kp.getKey("RIGHT"): lr = speed
if kp.getKey("UP"): fb = speed
elif kp.getKey("DOWN"): fb = -speed
if kp.getKey("u"): ud = speed
elif kp.getKey("d"): ud = -speed
if kp.getKey("c"): yv = speed
elif kp.getKey("a"): yv = -speed
if kp.getKey("t"): skynet.takeoff()
if kp.getKey("l"): skynet.land(); time.sleep(3)
if kp.getKey('s'):
cv2.imwrite(f'Resources/Images/{time.time()}.jpg',img)
time.sleep(1)
return [lr, fb, ud, yv]
while True:
keyVals = getKeyboardInput()
skynet.send_rc_control(keyVals[0], keyVals[1], keyVals[2], keyVals[3])
img = skynet.get_frame_read().frame
#timg = cv2.resize(img, (360, 240))
cv2.imshow("Image", img)
cv2.waitKey(1) | [
"time.sleep",
"cv2.imshow",
"KeyPressModule.init",
"djitellopy.tello.Tello",
"time.time",
"cv2.waitKey",
"KeyPressModule.getKey"
] | [((92, 101), 'KeyPressModule.init', 'kp.init', ([], {}), '()\n', (99, 101), True, 'import KeyPressModule as kp\n'), ((112, 125), 'djitellopy.tello.Tello', 'tello.Tello', ([], {}), '()\n', (123, 125), False, 'from djitellopy import tello\n'), ((270, 287), 'KeyPressModule.getKey', 'kp.getKey', (['"""LEFT"""'], {}), "('LEFT')\n", (279, 287), True, 'import KeyPressModule as kp\n'), ((349, 364), 'KeyPressModule.getKey', 'kp.getKey', (['"""UP"""'], {}), "('UP')\n", (358, 364), True, 'import KeyPressModule as kp\n'), ((425, 439), 'KeyPressModule.getKey', 'kp.getKey', (['"""u"""'], {}), "('u')\n", (434, 439), True, 'import KeyPressModule as kp\n'), ((497, 511), 'KeyPressModule.getKey', 'kp.getKey', (['"""c"""'], {}), "('c')\n", (506, 511), True, 'import KeyPressModule as kp\n'), ((569, 583), 'KeyPressModule.getKey', 'kp.getKey', (['"""t"""'], {}), "('t')\n", (578, 583), True, 'import KeyPressModule as kp\n'), ((609, 623), 'KeyPressModule.getKey', 'kp.getKey', (['"""l"""'], {}), "('l')\n", (618, 623), True, 'import KeyPressModule as kp\n'), ((662, 676), 'KeyPressModule.getKey', 'kp.getKey', (['"""s"""'], {}), "('s')\n", (671, 676), True, 'import KeyPressModule as kp\n'), ((998, 1022), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (1008, 1022), False, 'import cv2\n'), ((1027, 1041), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1038, 1041), False, 'import cv2\n'), ((310, 328), 'KeyPressModule.getKey', 'kp.getKey', (['"""RIGHT"""'], {}), "('RIGHT')\n", (319, 328), True, 'import KeyPressModule as kp\n'), ((386, 403), 'KeyPressModule.getKey', 'kp.getKey', (['"""DOWN"""'], {}), "('DOWN')\n", (395, 403), True, 'import KeyPressModule as kp\n'), ((461, 475), 'KeyPressModule.getKey', 'kp.getKey', (['"""d"""'], {}), "('d')\n", (470, 475), True, 'import KeyPressModule as kp\n'), ((533, 547), 'KeyPressModule.getKey', 'kp.getKey', (['"""a"""'], {}), "('a')\n", (542, 547), True, 'import KeyPressModule as kp\n'), ((640, 653), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (650, 653), False, 'import time\n'), ((749, 762), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (759, 762), False, 'import time\n'), ((718, 729), 'time.time', 'time.time', ([], {}), '()\n', (727, 729), False, 'import time\n')] |
from foolbox import zoo
import numpy as np
import foolbox
import sys
import pytest
from foolbox.zoo.model_loader import ModelLoader
from os.path import join, dirname
@pytest.fixture(autouse=True)
def unload_foolbox_model_module():
# reload foolbox_model from scratch for every run
# to ensure atomic tests without side effects
module_names = ["foolbox_model", "model"]
for module_name in module_names:
if module_name in sys.modules:
del sys.modules[module_name]
test_data = [
# private repo won't work on travis
# ('https://github.com/bethgelab/AnalysisBySynthesis.git', (1, 28, 28)),
# ('https://github.com/bethgelab/convex_adversarial.git', (1, 28, 28)),
# ('https://github.com/bethgelab/mnist_challenge.git', 784)
(join("file://", dirname(__file__), "data/model_repo"), (3, 224, 224))
]
@pytest.mark.parametrize("url, dim", test_data)
def test_loading_model(url, dim):
# download model
model = zoo.get_model(url)
# create a dummy image
x = np.zeros(dim, dtype=np.float32)
x[:] = np.random.randn(*x.shape)
# run the model
logits = model.forward_one(x)
probabilities = foolbox.utils.softmax(logits)
predicted_class = np.argmax(logits)
# sanity check
assert predicted_class >= 0
assert np.sum(probabilities) >= 0.9999
# TODO: delete fmodel
def test_non_default_module_throws_error():
with pytest.raises(RuntimeError):
ModelLoader.get(key="other")
| [
"numpy.argmax",
"foolbox.zoo.model_loader.ModelLoader.get",
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.sum",
"pytest.raises",
"os.path.dirname",
"foolbox.utils.softmax",
"pytest.fixture",
"foolbox.zoo.get_model",
"numpy.random.randn"
] | [((169, 197), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (183, 197), False, 'import pytest\n'), ((853, 899), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""url, dim"""', 'test_data'], {}), "('url, dim', test_data)\n", (876, 899), False, 'import pytest\n'), ((967, 985), 'foolbox.zoo.get_model', 'zoo.get_model', (['url'], {}), '(url)\n', (980, 985), False, 'from foolbox import zoo\n'), ((1022, 1053), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'np.float32'}), '(dim, dtype=np.float32)\n', (1030, 1053), True, 'import numpy as np\n'), ((1065, 1090), 'numpy.random.randn', 'np.random.randn', (['*x.shape'], {}), '(*x.shape)\n', (1080, 1090), True, 'import numpy as np\n'), ((1166, 1195), 'foolbox.utils.softmax', 'foolbox.utils.softmax', (['logits'], {}), '(logits)\n', (1187, 1195), False, 'import foolbox\n'), ((1218, 1235), 'numpy.argmax', 'np.argmax', (['logits'], {}), '(logits)\n', (1227, 1235), True, 'import numpy as np\n'), ((1299, 1320), 'numpy.sum', 'np.sum', (['probabilities'], {}), '(probabilities)\n', (1305, 1320), True, 'import numpy as np\n'), ((1413, 1440), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1426, 1440), False, 'import pytest\n'), ((1450, 1478), 'foolbox.zoo.model_loader.ModelLoader.get', 'ModelLoader.get', ([], {'key': '"""other"""'}), "(key='other')\n", (1465, 1478), False, 'from foolbox.zoo.model_loader import ModelLoader\n'), ((794, 811), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (801, 811), False, 'from os.path import join, dirname\n')] |
"""
Copyright 2015-2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest import TestCase
from unittest.mock import (
Mock,
patch,
)
import requests.exceptions
from requests import Response
from nistbeacon import (
NistBeacon,
NistBeaconValue,
)
from tests.test_data.nist_records import local_record_json_db
class TestNistBeacon(TestCase):
@classmethod
def setUpClass(cls):
cls.init_timestamp = 1378395540
cls.expected_first = local_record_json_db[cls.init_timestamp]
cls.expected_first_next = local_record_json_db[cls.init_timestamp + 60]
cls.reference_previous = 1447872960
cls.reference_timestamp = 1447873020
cls.reference_next = 1447873080
cls.expected_current = local_record_json_db[cls.reference_timestamp]
cls.expected_next = local_record_json_db[cls.reference_next]
cls.expected_previous = local_record_json_db[cls.reference_previous]
# Perform conversions from json data to record objects
cls.expected_first = NistBeaconValue.from_json(cls.expected_first)
cls.expected_first_next = NistBeaconValue.from_json(
cls.expected_first_next
)
cls.expected_current = NistBeaconValue.from_json(cls.expected_current)
cls.expected_next = NistBeaconValue.from_json(cls.expected_next)
cls.expected_previous = NistBeaconValue.from_json(
cls.expected_previous
)
@patch('requests.get')
def test_get_first_record(self, requests_get_patched):
mock_response = Mock(spec=Response)
mock_response.status_code = 200
mock_response.text = self.expected_first.xml
requests_get_patched.return_value = mock_response
expected = self.expected_first
actual_download_false = NistBeacon.get_first_record(download=False)
actual_download_true = NistBeacon.get_first_record(download=True)
self.assertEqual(expected, actual_download_false)
self.assertEqual(expected, actual_download_true)
self.assertIsNot(expected, actual_download_false)
self.assertIsNot(expected, actual_download_true)
self.assertIsNot(actual_download_false, actual_download_true)
@patch('requests.get')
def test_get_next(self, requests_get_patched):
mock_response = Mock(spec=Response)
mock_response.status_code = 200
mock_response.text = self.expected_next.xml
requests_get_patched.return_value = mock_response
next_record = NistBeacon.get_next(self.reference_timestamp)
self.assertEqual(self.expected_next, next_record)
@patch('requests.get')
def test_get_previous(self, requests_get_patched):
mock_response = Mock(spec=Response)
mock_response.status_code = 200
mock_response.text = self.expected_previous.xml
requests_get_patched.return_value = mock_response
previous_record = NistBeacon.get_previous(
self.reference_timestamp
)
self.assertEqual(self.expected_previous, previous_record)
@patch('requests.get')
def test_get_record(self, requests_get_patched):
mock_response = Mock(spec=Response)
mock_response.status_code = 200
mock_response.text = self.expected_current.xml
requests_get_patched.return_value = mock_response
record = NistBeacon.get_record(self.reference_timestamp)
self.assertEqual(self.expected_current, record)
@patch('requests.get')
def test_get_last_record(self, requests_get_patched):
mock_response = Mock(spec=Response)
mock_response.status_code = 200
mock_response.text = self.expected_current.xml
requests_get_patched.return_value = mock_response
last_record = NistBeacon.get_last_record()
self.assertIsInstance(last_record, NistBeaconValue)
@patch('requests.get')
def test_get_last_record_404(self, requests_get_patched):
mock_response = Mock(spec=Response)
mock_response.status_code = 404
requests_get_patched.return_value = mock_response
self.assertIsNone(NistBeacon.get_last_record())
@patch('requests.get')
def test_get_last_record_exceptions(self, requests_get_patched):
exceptions_to_test = [
requests.exceptions.RequestException(),
requests.exceptions.ConnectionError(),
requests.exceptions.HTTPError(),
requests.exceptions.URLRequired(),
requests.exceptions.TooManyRedirects(),
requests.exceptions.Timeout(),
]
for exception_to_test in exceptions_to_test:
requests_get_patched.side_effect = exception_to_test
self.assertIsNone(NistBeacon.get_last_record())
@patch('requests.get')
def test_chain_check_empty_input(self, requests_get_patched):
mock_response = Mock(spec=Response)
mock_response.status_code = 404
requests_get_patched.return_value = mock_response
# noinspection PyTypeChecker
self.assertFalse(NistBeacon.chain_check(None))
@patch('requests.get')
def test_chain_check_majority(self, requests_get_patched):
first_response = Mock(spec=Response)
first_response.status_code = 200
first_response.text = self.expected_current.xml
previous_response = Mock(spec=Response)
previous_response.status_code = 200
previous_response.text = self.expected_previous.xml
next_response = Mock(spec=Response)
next_response.status_code = 200
next_response.text = self.expected_next.xml
requests_get_patched.side_effect = [
first_response,
previous_response,
next_response,
]
self.assertTrue(
NistBeacon.chain_check(
self.expected_current.timestamp
)
)
@patch('requests.get')
def test_chain_check_init(self, requests_get_patched):
first_response = Mock(spec=Response)
first_response.status_code = 200
first_response.text = self.expected_first.xml
previous_response = Mock(spec=Response)
previous_response.status_code = 404
next_response = Mock(spec=Response)
next_response.status_code = 200
next_response.text = self.expected_first_next.xml
requests_get_patched.side_effect = [
first_response,
previous_response,
next_response,
]
self.assertTrue(
NistBeacon.chain_check(
self.init_timestamp,
)
)
@patch('requests.get')
def test_chain_check_last(self, requests_get_patched):
first_response = Mock(spec=Response)
first_response.status_code = 200
first_response.text = self.expected_current.xml
previous_response = Mock(spec=Response)
previous_response.status_code = 200
previous_response.text = self.expected_previous.xml
next_response = Mock(spec=Response)
next_response.status_code = 404
requests_get_patched.side_effect = [
first_response,
previous_response,
next_response,
]
self.assertTrue(
NistBeacon.chain_check(
self.expected_current.timestamp,
)
)
@patch('requests.get')
def test_chain_check_no_records_around(self, requests_get_patched):
first_response = Mock(spec=Response)
first_response.status_code = 200
first_response.text = self.expected_current.xml
none_response = Mock(spec=Response)
none_response.status_code = 404
requests_get_patched.side_effect = [
first_response,
none_response,
none_response,
]
self.assertFalse(
NistBeacon.chain_check(
self.expected_current.timestamp
)
)
| [
"nistbeacon.NistBeacon.get_next",
"unittest.mock.Mock",
"nistbeacon.NistBeacon.get_first_record",
"nistbeacon.NistBeacon.get_last_record",
"nistbeacon.NistBeaconValue.from_json",
"nistbeacon.NistBeacon.get_previous",
"nistbeacon.NistBeacon.chain_check",
"nistbeacon.NistBeacon.get_record",
"unittest.... | [((1955, 1976), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (1960, 1976), False, 'from unittest.mock import Mock, patch\n'), ((2729, 2750), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (2734, 2750), False, 'from unittest.mock import Mock, patch\n'), ((3129, 3150), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (3134, 3150), False, 'from unittest.mock import Mock, patch\n'), ((3575, 3596), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (3580, 3596), False, 'from unittest.mock import Mock, patch\n'), ((3975, 3996), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (3980, 3996), False, 'from unittest.mock import Mock, patch\n'), ((4370, 4391), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (4375, 4391), False, 'from unittest.mock import Mock, patch\n'), ((4659, 4680), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (4664, 4680), False, 'from unittest.mock import Mock, patch\n'), ((5266, 5287), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (5271, 5287), False, 'from unittest.mock import Mock, patch\n'), ((5595, 5616), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (5600, 5616), False, 'from unittest.mock import Mock, patch\n'), ((6394, 6415), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (6399, 6415), False, 'from unittest.mock import Mock, patch\n'), ((7122, 7143), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (7127, 7143), False, 'from unittest.mock import Mock, patch\n'), ((7866, 7887), 'unittest.mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (7871, 7887), False, 'from unittest.mock import Mock, patch\n'), ((1540, 1585), 'nistbeacon.NistBeaconValue.from_json', 'NistBeaconValue.from_json', (['cls.expected_first'], {}), '(cls.expected_first)\n', (1565, 1585), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((1620, 1670), 'nistbeacon.NistBeaconValue.from_json', 'NistBeaconValue.from_json', (['cls.expected_first_next'], {}), '(cls.expected_first_next)\n', (1645, 1670), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((1725, 1772), 'nistbeacon.NistBeaconValue.from_json', 'NistBeaconValue.from_json', (['cls.expected_current'], {}), '(cls.expected_current)\n', (1750, 1772), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((1801, 1845), 'nistbeacon.NistBeaconValue.from_json', 'NistBeaconValue.from_json', (['cls.expected_next'], {}), '(cls.expected_next)\n', (1826, 1845), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((1878, 1926), 'nistbeacon.NistBeaconValue.from_json', 'NistBeaconValue.from_json', (['cls.expected_previous'], {}), '(cls.expected_previous)\n', (1903, 1926), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((2060, 2079), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (2064, 2079), False, 'from unittest.mock import Mock, patch\n'), ((2303, 2346), 'nistbeacon.NistBeacon.get_first_record', 'NistBeacon.get_first_record', ([], {'download': '(False)'}), '(download=False)\n', (2330, 2346), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((2378, 2420), 'nistbeacon.NistBeacon.get_first_record', 'NistBeacon.get_first_record', ([], {'download': '(True)'}), '(download=True)\n', (2405, 2420), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((2826, 2845), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (2830, 2845), False, 'from unittest.mock import Mock, patch\n'), ((3019, 3064), 'nistbeacon.NistBeacon.get_next', 'NistBeacon.get_next', (['self.reference_timestamp'], {}), '(self.reference_timestamp)\n', (3038, 3064), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((3230, 3249), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (3234, 3249), False, 'from unittest.mock import Mock, patch\n'), ((3431, 3480), 'nistbeacon.NistBeacon.get_previous', 'NistBeacon.get_previous', (['self.reference_timestamp'], {}), '(self.reference_timestamp)\n', (3454, 3480), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((3674, 3693), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (3678, 3693), False, 'from unittest.mock import Mock, patch\n'), ((3865, 3912), 'nistbeacon.NistBeacon.get_record', 'NistBeacon.get_record', (['self.reference_timestamp'], {}), '(self.reference_timestamp)\n', (3886, 3912), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((4079, 4098), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (4083, 4098), False, 'from unittest.mock import Mock, patch\n'), ((4275, 4303), 'nistbeacon.NistBeacon.get_last_record', 'NistBeacon.get_last_record', ([], {}), '()\n', (4301, 4303), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((4478, 4497), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (4482, 4497), False, 'from unittest.mock import Mock, patch\n'), ((5378, 5397), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (5382, 5397), False, 'from unittest.mock import Mock, patch\n'), ((5705, 5724), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (5709, 5724), False, 'from unittest.mock import Mock, patch\n'), ((5851, 5870), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (5855, 5870), False, 'from unittest.mock import Mock, patch\n'), ((6000, 6019), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (6004, 6019), False, 'from unittest.mock import Mock, patch\n'), ((6500, 6519), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (6504, 6519), False, 'from unittest.mock import Mock, patch\n'), ((6644, 6663), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (6648, 6663), False, 'from unittest.mock import Mock, patch\n'), ((6733, 6752), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (6737, 6752), False, 'from unittest.mock import Mock, patch\n'), ((7228, 7247), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (7232, 7247), False, 'from unittest.mock import Mock, patch\n'), ((7374, 7393), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (7378, 7393), False, 'from unittest.mock import Mock, patch\n'), ((7523, 7542), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (7527, 7542), False, 'from unittest.mock import Mock, patch\n'), ((7985, 8004), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (7989, 8004), False, 'from unittest.mock import Mock, patch\n'), ((8127, 8146), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Response'}), '(spec=Response)\n', (8131, 8146), False, 'from unittest.mock import Mock, patch\n'), ((4623, 4651), 'nistbeacon.NistBeacon.get_last_record', 'NistBeacon.get_last_record', ([], {}), '()\n', (4649, 4651), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((5559, 5587), 'nistbeacon.NistBeacon.chain_check', 'NistBeacon.chain_check', (['None'], {}), '(None)\n', (5581, 5587), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((6292, 6347), 'nistbeacon.NistBeacon.chain_check', 'NistBeacon.chain_check', (['self.expected_current.timestamp'], {}), '(self.expected_current.timestamp)\n', (6314, 6347), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((7031, 7074), 'nistbeacon.NistBeacon.chain_check', 'NistBeacon.chain_check', (['self.init_timestamp'], {}), '(self.init_timestamp)\n', (7053, 7074), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((7763, 7818), 'nistbeacon.NistBeacon.chain_check', 'NistBeacon.chain_check', (['self.expected_current.timestamp'], {}), '(self.expected_current.timestamp)\n', (7785, 7818), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((8364, 8419), 'nistbeacon.NistBeacon.chain_check', 'NistBeacon.chain_check', (['self.expected_current.timestamp'], {}), '(self.expected_current.timestamp)\n', (8386, 8419), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n'), ((5230, 5258), 'nistbeacon.NistBeacon.get_last_record', 'NistBeacon.get_last_record', ([], {}), '()\n', (5256, 5258), False, 'from nistbeacon import NistBeacon, NistBeaconValue\n')] |
import cv2
import numpy as np
import os
from auto_pose.meshrenderer import meshrenderer
from auto_pose.ae.utils import lazy_property
class PoseVisualizer:
def __init__(self, mp_pose_estimator, downsample=1, vertex_scale=False):
self.downsample = downsample
self.vertex_scale = [mp_pose_estimator.train_args.getint('Dataset', 'VERTEX_SCALE')] if not vertex_scale else [1.]
if hasattr(mp_pose_estimator, 'class_2_objpath'):
self.classes, self.ply_model_paths = zip(*mp_pose_estimator.class_2_objpath.items())
else:
# For BOP evaluation (sry!):
self.classes = mp_pose_estimator.class_2_codebook.keys()
all_model_paths = eval(mp_pose_estimator.train_args.get('Paths', 'MODEL_PATH'))
base_path = '/'.join(all_model_paths[0].split('/')[:-3])
itodd_paths = [os.path.join(base_path, 'itodd/models/obj_0000{: 02d}.ply'.format(i)) for i in range(29)]
all_model_paths = all_model_paths + itodd_paths
all_model_paths = [model_p.replace('YCB_VideoDataset/original2sixd/bop_models/', 'bop/original/ycbv/models_eval/') for model_p in all_model_paths]
self.ply_model_paths = []
for cb_name in mp_pose_estimator.class_2_codebook.values():
for model_path in all_model_paths:
bop_dataset = cb_name.split('_')[0]
bop_dataset = 'ycbv' if bop_dataset == 'original2sixd' else bop_dataset
model_type, obj, obj_id = cb_name.split('_')[-3:]
model_name = obj + '_' + obj_id
if bop_dataset in model_path and model_name in model_path:
self.ply_model_paths.append(model_path)
print(('renderer', 'Model paths: ', self.ply_model_paths))
@lazy_property
def renderer(self):
return meshrenderer.Renderer(self.ply_model_paths,
samples=1,
vertex_tmp_store_folder='.',
vertex_scale=float(self.vertex_scale[0])) # 1000 for models in meters
def render_poses(self, image, camK, pose_ests, dets, vis_bbs=True, vis_mask=False, all_pose_estimates_rgb=None, depth_image=None, waitKey=True):
W_d = image.shape[1] / self.downsample
H_d = image.shape[0] / self.downsample
print( [self.classes.index(pose_est.name) for pose_est in pose_ests])
bgr, depth,_ = self.renderer.render_many(obj_ids = [self.classes.index(pose_est.name) for pose_est in pose_ests],
W = W_d,
H = H_d,
K = camK.copy(),
Rs = [pose_est.trafo[:3,:3] for pose_est in pose_ests],
ts = [pose_est.trafo[:3,3] for pose_est in pose_ests],
near = 10,
far = 10000)
image_show = cv2.resize(image,(W_d,H_d))
if all_pose_estimates_rgb is not None:
image_show_rgb = image_show.copy()
g_y = np.zeros_like(bgr)
g_y[:,:,1]= bgr[:,:,1]
image_show[bgr > 0] = g_y[bgr > 0]*2./3. + image_show[bgr > 0]*1./3.
if all_pose_estimates_rgb is not None:
bgr, depth,_ = self.renderer.render_many(obj_ids = [clas_idx for clas_idx in all_class_idcs],
W = W_d,
H = H_d,
K = camK.copy(),
Rs = [pose_est.trafo[:3,:3] for pose_est in pose_ests],
ts = [pose_est.trafo[:3,3] for pose_est in pose_ests],
near = 10,
far = 10000)
bgr = cv2.resize(bgr,(W_d,H_d))
b_y = np.zeros_like(bgr)
b_y[:,:,0]= bgr[:,:,0]
image_show_rgb[bgr > 0] = b_y[bgr > 0]*2./3. + image_show_rgb[bgr > 0]*1./3.
if np.any(depth_image):
depth_show = depth_image.copy()
depth_show = np.dstack((depth_show,depth_show,depth_show))
depth_show[bgr[:,:,0] > 0] = g_y[bgr[:,:,0] > 0]*2./3. + depth_show[bgr[:,:,0] > 0]*1./3.
cv2.imshow('depth_refined_pose', depth_show)
if vis_bbs:
# for label,box,score in zip(labels,boxes,scores):
for det in dets:
# box = box.astype(np.int32) / self.downsample
# xmin, ymin, xmax, ymax = box[0], box[1], box[0] + box[2], box[1] + box[3]
xmin, ymin, xmax, ymax = int(det.xmin * W_d), int(det.ymin * H_d), int(det.xmax * W_d), int(det.ymax * H_d)
label, score = list(det.classes.items())[0]
try:
cv2.putText(image_show, '%s : %1.3f' % (label,score), (xmin, ymax+20), cv2.FONT_ITALIC, .5, (0,0,255), 2)
cv2.rectangle(image_show,(xmin,ymin),(xmax,ymax),(255,0,0),2)
if all_pose_estimates_rgb is not None:
cv2.putText(image_show_rgb, '%s : %1.3f' % (label,score), (xmin, ymax+20), cv2.FONT_ITALIC, .5, (0,0,255), 2)
cv2.rectangle(image_show_rgb,(xmin,ymin),(xmax,ymax),(255,0,0),2)
except:
print('failed to plot boxes')
if all_pose_estimates_rgb is not None:
cv2.imshow('rgb_pose', image_show_rgb)
cv2.imshow('refined_pose', image_show)
if waitKey:
cv2.waitKey(0)
else:
cv2.waitKey(1)
return (image_show)
| [
"cv2.rectangle",
"numpy.dstack",
"numpy.any",
"cv2.imshow",
"cv2.putText",
"cv2.waitKey",
"cv2.resize",
"numpy.zeros_like"
] | [((2879, 2908), 'cv2.resize', 'cv2.resize', (['image', '(W_d, H_d)'], {}), '(image, (W_d, H_d))\n', (2889, 2908), False, 'import cv2\n'), ((3016, 3034), 'numpy.zeros_like', 'np.zeros_like', (['bgr'], {}), '(bgr)\n', (3029, 3034), True, 'import numpy as np\n'), ((3798, 3817), 'numpy.any', 'np.any', (['depth_image'], {}), '(depth_image)\n', (3804, 3817), True, 'import numpy as np\n'), ((5251, 5289), 'cv2.imshow', 'cv2.imshow', (['"""refined_pose"""', 'image_show'], {}), "('refined_pose', image_show)\n", (5261, 5289), False, 'import cv2\n'), ((3599, 3626), 'cv2.resize', 'cv2.resize', (['bgr', '(W_d, H_d)'], {}), '(bgr, (W_d, H_d))\n', (3609, 3626), False, 'import cv2\n'), ((3644, 3662), 'numpy.zeros_like', 'np.zeros_like', (['bgr'], {}), '(bgr)\n', (3657, 3662), True, 'import numpy as np\n'), ((3888, 3935), 'numpy.dstack', 'np.dstack', (['(depth_show, depth_show, depth_show)'], {}), '((depth_show, depth_show, depth_show))\n', (3897, 3935), True, 'import numpy as np\n'), ((4048, 4092), 'cv2.imshow', 'cv2.imshow', (['"""depth_refined_pose"""', 'depth_show'], {}), "('depth_refined_pose', depth_show)\n", (4058, 4092), False, 'import cv2\n'), ((5204, 5242), 'cv2.imshow', 'cv2.imshow', (['"""rgb_pose"""', 'image_show_rgb'], {}), "('rgb_pose', image_show_rgb)\n", (5214, 5242), False, 'import cv2\n'), ((5322, 5336), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5333, 5336), False, 'import cv2\n'), ((5363, 5377), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5374, 5377), False, 'import cv2\n'), ((4599, 4714), 'cv2.putText', 'cv2.putText', (['image_show', "('%s : %1.3f' % (label, score))", '(xmin, ymax + 20)', 'cv2.FONT_ITALIC', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(image_show, '%s : %1.3f' % (label, score), (xmin, ymax + 20),\n cv2.FONT_ITALIC, 0.5, (0, 0, 255), 2)\n", (4610, 4714), False, 'import cv2\n'), ((4725, 4794), 'cv2.rectangle', 'cv2.rectangle', (['image_show', '(xmin, ymin)', '(xmax, ymax)', '(255, 0, 0)', '(2)'], {}), '(image_show, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)\n', (4738, 4794), False, 'import cv2\n'), ((4870, 4990), 'cv2.putText', 'cv2.putText', (['image_show_rgb', "('%s : %1.3f' % (label, score))", '(xmin, ymax + 20)', 'cv2.FONT_ITALIC', '(0.5)', '(0, 0, 255)', '(2)'], {}), "(image_show_rgb, '%s : %1.3f' % (label, score), (xmin, ymax + 20\n ), cv2.FONT_ITALIC, 0.5, (0, 0, 255), 2)\n", (4881, 4990), False, 'import cv2\n'), ((5004, 5077), 'cv2.rectangle', 'cv2.rectangle', (['image_show_rgb', '(xmin, ymin)', '(xmax, ymax)', '(255, 0, 0)', '(2)'], {}), '(image_show_rgb, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)\n', (5017, 5077), False, 'import cv2\n')] |
"""
User module
"""
import discord
import random
import asyncio
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions, BadArgument
import requests, json, pyfiglet
from datetime import timedelta, datetime
class User(commands.Cog):
api_key = "<KEY>"
base_url = "http://api.openweathermap.org/data/2.5/weather?"
def __init__(self, bot):
self.bot = bot
def get_embed(self, _title, _description, icon):
embed = discord.Embed(title=_title, description=_description, color= discord.Color.dark_theme())
embed.set_thumbnail(url=icon)
return embed
def get_weather(self, city_name):
complete_url = self.base_url + "appid=" + self.api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
if x["cod"] != "404":
# store the value of "main"
# key in variable y
y = x["main"]
# store the value corresponding
# to the "temp" key of y
current_temperature = y["temp"]
# store the value corresponding
# to the "pressure" key of y
current_pressure = y["pressure"]
# store the value corresponding
# to the "humidity" key of y
current_humidiy = y["humidity"]
# store the value of "weather"
# key in variable z
z = x["weather"]
# store the value corresponding
# to the "description" key at
# the 0th index of z
weather_description = z[0]["description"]
# print following values
result = ("Temperature (in kelvin unit) = " + str(current_temperature) + "\natmospheric pressure (in hPa unit) = " + str(current_pressure) + "\nhumidity (in percentage) = " + str(current_humidiy) + "\ndescription = " + str(weather_description))
return result
else:
print(" City Not Found : " + city_name)
return "That city might be in moon"
@commands.command()
async def say(self, ctx, *, arg):
async with ctx.channel.typing():
thing = arg
await (ctx.channel).send(thing)
print("Event: Repeated {ctx.author.name}: ", arg)
@commands.command()
async def hi(self, ctx):
async with ctx.channel.typing():
thing = "hello human!"
await (ctx.channel).send(thing)
print("Event: I said Hi to ", ctx.author.name)
@commands.command()
async def info(self, ctx, *, member: discord.Member):
async with ctx.channel.typing():
await asyncio.sleep(2)
avatar = member.avatar_url
fmt = 'Joined basement on {0.joined_at} \njoined Discord on {0.created_at} \nThis member has {1} roles.'
msg = self.get_embed("Info of {0.display_name}".format(member), fmt.format(member, len(member.roles)), avatar)
await ctx.send(embed=msg)
print(ctx.author.name, " checked info of ", member.name)
@info.error
async def info_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send('I could not find that member...')
@commands.command(pass_context=True)
async def weather(self, ctx, a: str):
async with ctx.channel.typing():
msg = self.get_weather(a)
await asyncio.sleep(2)
await ctx.send(embed=discord.Embed(title=f"Weather status at {a}", description=msg, color=discord.Color.dark_theme()))
print("Event. weather checked on user's command: ", ctx.author.name, ", location: ", a)
@commands.command()
async def bing(self, ctx):
async with ctx.channel.typing():
thing = discord.Embed(title="Bong!", description="Sounds like something " + "https://www.bing.com/"+" would know!", color=discord.Color.dark_theme())
await (ctx.channel).send(embed=thing)
print("Event. I binged, bong! : ", ctx.author.name)
@commands.command()
async def google(self, ctx):
await ctx.send("It is quite important that you **google your problems before asking** someone. Most of your questions have already been answered at least once online because you are definitely not the only one with this particular question. Additionally, each programming language, API, or program should be well documented in its official documentation. \nRefer to this page: https://duck-dev.github.io/general/how-to-google/")
print("Event. how to google! : ", ctx.author.name)
@commands.command()
async def dontasktoask(self, ctx):
async with ctx.channel.typing():
thing = discord.Embed(title="Don't ask to ask, Just ask!", description="Ask your question, instead of asking to help \nhttps://dontasktoask.com/", color=discord.Color.dark_theme())
await (ctx.channel).send(embed = thing)
print("Event. ", ctx.author.name, " did ask to ask!")
@commands.command(name='goodnight', aliases=['night', 'gn'])
async def goodnight(self, ctx, *, args = "nothing"):
async with ctx.channel.typing():
thing = discord.Embed(title="Good Night", description="Sleep tight", color= discord.Color.dark_theme())
await (ctx.channel).send(embed=thing)
print(f"Event. {ctx.author.name} said good night")
@commands.command(name='goodmorning', aliases=['morning', 'gm'])
async def goodmorning(self, ctx, *, args = "nothing"):
async with ctx.channel.typing():
thing = discord.Embed(title="Good Morning", description="Wishing you a good day", color= discord.Color.dark_theme())
await (ctx.channel).send(embed=thing)
print(f"Event. {ctx.author.name} said good morning")
@commands.group()
async def git(self, ctx):
"""
A set of funny ~~useful~~ git commands.
"""
if ctx.invoked_subcommand is None:
await ctx.send('> See: `[]help git`')
@git.command()
async def push(self, ctx, remote: str, branch: str):
await ctx.send('Pushing {} to {}'.format(remote, branch))
@git.command()
async def blame(self, ctx, branch: str):
await ctx.send('#blame{}'.format(branch))
@git.command()
async def lick(self, ctx, user):
if random.choice([True, False]):
await ctx.send('*licks {}, Mmm tastes good*'.format(user))
else:
await ctx.send('*licks {}, euh tastes kinda bad*'.format(user))
@git.command()
async def commit(self, ctx, *, message: str):
await ctx.send('Commiting {}'.format(message))
@git.command()
async def pull(self, ctx, branch: str):
await ctx.send('Pulling {}'.format(branch))
@git.command()
async def status(self, ctx, user: discord.Member=None):
if user:
await ctx.send("On branch {0}\nYour branch is up to date with 'origin/main'. \nstatus: {1}".format(user.display_name, user.status))
else:
await ctx.send("On branch main\nYour branch is up to date with 'origin/main'. \nstatus: {}".format(ctx.author.status))
@git.command()
async def merge(self, ctx, thing, anotherthing):
await ctx.send('Merging {0} to {1}'.format(thing, anotherthing))
@git.command()
async def add(self, ctx, *, thing):
msg = await ctx.send('Adding {0}...'.format(thing))
await asyncio.sleep(2)
await msg.edit(content='Added {0} to changes.\n`{1} additions and {2} deletions.`'.format(thing, random.randint(10, 1000), random.randint(10, 1000)))
@git.command()
async def out(self, ctx, *, thing):
await ctx.send('https://tenor.com/view/the-office-steve-carell-please-leave-get-out-move-gif-3579774')
@commands.command(name='codeblocks', aliases=['codeblock', 'cb', 'myst'])
async def codeblocks(self, ctx, *args):
async with ctx.channel.typing():
thing = discord.Embed(title="Code Blocks", description="""**__Use codeblocks to send code in a message!__**
To make a codeblock, surround your code with \`\`\`
\`\`\`cs
// your code here
\`\`\`
`In order use C# syntax highlighting add cs after the three back ticks`
To send lengthy code, paste it into <https://paste.myst.rs/> and send the link of the paste into chat.""", color=discord.Color.dark_theme())
await (ctx.channel).send(embed=thing)
print(f"Event: {ctx.author.name} used codeblocks")
@commands.command(name='example', aliases=['Example', 'eg', 'eg.'])
async def example(self, ctx, *args):
async with ctx.channel.typing():
thing = discord.Embed(title="Example", description="""**__How to create a Minimal, Reproducible Example__**
When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to reproduce the problem. This is referred to by community members as creating a minimal, reproducible example (**reprex**), a minimal, complete and verifiable example (**mcve**), or a minimal, workable example (**mwe**). Regardless of how it's communicated to you, it boils down to ensuring your code that reproduces the problem follows the following guidelines:
**__Your code examples should be…__**
» **Minimal** – Use as little code as possible that still produces the same problem
» **Complete** – Provide all parts someone else needs to reproduce your problem in the question itself
» **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem
""", color=discord.Color.dark_theme())
await (ctx.channel).send(embed=thing)
print(f"Event: {ctx.author.name} used example")
@commands.command(name='pastemyst', aliases=['pm', 'pastebin', 'PasteMyst', 'paste'])
async def pastemyst(self, ctx, *, args = "nothing"):
async with ctx.channel.typing():
thing = discord.Embed(title="How to use PasteMyst", description="> 1. paste your code in https://paste.myst.rs/\n> 2. copy the link of the website completely\n> 3. send the link into chat.", color=discord.Color.dark_theme())
await (ctx.channel).send(embed=thing)
print(f"Event: {ctx.author.name} used how to use pastemyst")
@commands.group(name="ascii")
async def ascii(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.trigger_typing()
embed = discord.Embed(title="Ascii Modules", description="use []ascii <module>", color = discord.Color.dark_theme())
embed.add_field(name="Word", value="Shows ascii art of given text.", inline=False)
embed.add_field(name="Fonts", value="See available Fonts.", inline=False)
embed.set_footer(text="use []ascii <module> <args>")
await ctx.send(embed=embed)
@ascii.command(name="word", aliases=["w", "Word", "W"])
async def word(self, ctx, word:str = "hey", font:str = "standard"):
try:
result = pyfiglet.figlet_format(word, font = font)
except:
result = f"There is no font called {font}."
await ctx.send("```\n" + result + "\n```")
@ascii.command(name="fonts", aliases=["font", "f"])
async def fonts(self, ctx, page:int=1):
total_pages = 4
with open('./cogs/fonts.json', 'r') as f:
try:
data = json.load(f)
if page == 1:
page_data = data['fonts1']
page_no = 1
elif page == 2:
page_data = data['fonts2']
page_no = 2
elif page == 3:
page_data = data['fonts3']
page_no = 3
elif page == 4:
page_data = data['fonts4']
page_no = 4
elif page is None:
page_data = data['fonts1']
page_no = 1
else:
page_data = "more fonts will be added in future"
page_no = 0
except:
print("fonts.json loading error")
if page_data is not None:
Separator = "\n"
fields = Separator.join(page_data)
#embeding
embed = discord.Embed(color = discord.Color.dark_theme())
embed.set_author(name='Ascii Art')
embed.add_field(name='Fonts page', value=fields, inline=False)
if page_no != 0:
embed.set_footer(text=f"page: {page_no}/{total_pages}")
else:
embed.set_footer(text="use []ascii fonts <page_no>")
await ctx.send(embed=embed)
else:
print("looks like there's a problem with page_data")
#===================================== ADD COG ======================================#
def setup(bot):
bot.add_cog(User(bot))
| [
"random.choice",
"random.randint",
"pyfiglet.figlet_format",
"requests.get",
"discord.ext.commands.group",
"discord.Color.dark_theme",
"asyncio.sleep",
"json.load",
"discord.ext.commands.command"
] | [((2097, 2115), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2113, 2115), False, 'from discord.ext import commands\n'), ((2324, 2342), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2340, 2342), False, 'from discord.ext import commands\n'), ((2550, 2568), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2566, 2568), False, 'from discord.ext import commands\n'), ((3264, 3299), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (3280, 3299), False, 'from discord.ext import commands\n'), ((3686, 3704), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (3702, 3704), False, 'from discord.ext import commands\n'), ((4060, 4078), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4076, 4078), False, 'from discord.ext import commands\n'), ((4616, 4634), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (4632, 4634), False, 'from discord.ext import commands\n'), ((5028, 5087), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""goodnight"""', 'aliases': "['night', 'gn']"}), "(name='goodnight', aliases=['night', 'gn'])\n", (5044, 5087), False, 'from discord.ext import commands\n'), ((5418, 5481), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""goodmorning"""', 'aliases': "['morning', 'gm']"}), "(name='goodmorning', aliases=['morning', 'gm'])\n", (5434, 5481), False, 'from discord.ext import commands\n'), ((5829, 5845), 'discord.ext.commands.group', 'commands.group', ([], {}), '()\n', (5843, 5845), False, 'from discord.ext import commands\n'), ((7837, 7909), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""codeblocks"""', 'aliases': "['codeblock', 'cb', 'myst']"}), "(name='codeblocks', aliases=['codeblock', 'cb', 'myst'])\n", (7853, 7909), False, 'from discord.ext import commands\n'), ((8533, 8599), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""example"""', 'aliases': "['Example', 'eg', 'eg.']"}), "(name='example', aliases=['Example', 'eg', 'eg.'])\n", (8549, 8599), False, 'from discord.ext import commands\n'), ((9771, 9859), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""pastemyst"""', 'aliases': "['pm', 'pastebin', 'PasteMyst', 'paste']"}), "(name='pastemyst', aliases=['pm', 'pastebin', 'PasteMyst',\n 'paste'])\n", (9787, 9859), False, 'from discord.ext import commands\n'), ((10313, 10341), 'discord.ext.commands.group', 'commands.group', ([], {'name': '"""ascii"""'}), "(name='ascii')\n", (10327, 10341), False, 'from discord.ext import commands\n'), ((786, 812), 'requests.get', 'requests.get', (['complete_url'], {}), '(complete_url)\n', (798, 812), False, 'import requests, json, pyfiglet\n'), ((6371, 6399), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (6384, 6399), False, 'import random\n'), ((7481, 7497), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (7494, 7497), False, 'import asyncio\n'), ((11040, 11079), 'pyfiglet.figlet_format', 'pyfiglet.figlet_format', (['word'], {'font': 'font'}), '(word, font=font)\n', (11062, 11079), False, 'import requests, json, pyfiglet\n'), ((557, 583), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (581, 583), False, 'import discord\n'), ((2686, 2702), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (2699, 2702), False, 'import asyncio\n'), ((3439, 3455), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (3452, 3455), False, 'import asyncio\n'), ((11421, 11433), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11430, 11433), False, 'import requests, json, pyfiglet\n'), ((3911, 3937), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (3935, 3937), False, 'import discord\n'), ((4880, 4906), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (4904, 4906), False, 'import discord\n'), ((5274, 5300), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (5298, 5300), False, 'import discord\n'), ((5683, 5709), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (5707, 5709), False, 'import discord\n'), ((8389, 8415), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (8413, 8415), False, 'import discord\n'), ((9630, 9656), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (9654, 9656), False, 'import discord\n'), ((10163, 10189), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (10187, 10189), False, 'import discord\n'), ((10557, 10583), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (10581, 10583), False, 'import discord\n'), ((7603, 7627), 'random.randint', 'random.randint', (['(10)', '(1000)'], {}), '(10, 1000)\n', (7617, 7627), False, 'import random\n'), ((7629, 7653), 'random.randint', 'random.randint', (['(10)', '(1000)'], {}), '(10, 1000)\n', (7643, 7653), False, 'import random\n'), ((12380, 12406), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (12404, 12406), False, 'import discord\n'), ((3554, 3580), 'discord.Color.dark_theme', 'discord.Color.dark_theme', ([], {}), '()\n', (3578, 3580), False, 'import discord\n')] |
"""Admin sites for the ``django-tinylinks`` app."""
from django.contrib import admin
from django.template.defaultfilters import truncatechars
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from tinylinks.forms import TinylinkAdminForm
from tinylinks.models import Tinylink, TinylinkLog
class TinylinkAdmin(admin.ModelAdmin):
list_display = ('short_url', 'url_truncated', 'amount_of_views', 'user',
'last_checked', 'status', 'validation_error',)
search_fields = ['short_url', 'long_url']
form = TinylinkAdminForm
fieldsets = [
('Tinylink', {'fields': ['user', 'long_url', 'short_url', ]}),
]
def url_truncated(self, obj):
return truncatechars(obj.long_url, 60)
url_truncated.short_description = _('Long URL')
def status(self, obj):
if not obj.is_broken:
return _('OK')
return _('Link broken')
status.short_description = _('Status')
admin.site.register(Tinylink, TinylinkAdmin)
class TinylinkLogAdmin(admin.ModelAdmin):
list_display = ('tinylink', 'datetime', 'remote_ip', 'tracked')
readonly_fields = ('datetime',)
date_hierarchy = 'datetime'
admin.site.register(TinylinkLog, TinylinkLogAdmin)
| [
"django.contrib.admin.site.register",
"django.utils.translation.ugettext_lazy",
"django.template.defaultfilters.truncatechars"
] | [((1003, 1047), 'django.contrib.admin.site.register', 'admin.site.register', (['Tinylink', 'TinylinkAdmin'], {}), '(Tinylink, TinylinkAdmin)\n', (1022, 1047), False, 'from django.contrib import admin\n'), ((1230, 1280), 'django.contrib.admin.site.register', 'admin.site.register', (['TinylinkLog', 'TinylinkLogAdmin'], {}), '(TinylinkLog, TinylinkLogAdmin)\n', (1249, 1280), False, 'from django.contrib import admin\n'), ((826, 839), 'django.utils.translation.ugettext_lazy', '_', (['"""Long URL"""'], {}), "('Long URL')\n", (827, 839), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((989, 1000), 'django.utils.translation.ugettext_lazy', '_', (['"""Status"""'], {}), "('Status')\n", (990, 1000), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((755, 786), 'django.template.defaultfilters.truncatechars', 'truncatechars', (['obj.long_url', '(60)'], {}), '(obj.long_url, 60)\n', (768, 786), False, 'from django.template.defaultfilters import truncatechars\n'), ((940, 956), 'django.utils.translation.ugettext_lazy', '_', (['"""Link broken"""'], {}), "('Link broken')\n", (941, 956), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((917, 924), 'django.utils.translation.ugettext_lazy', '_', (['"""OK"""'], {}), "('OK')\n", (918, 924), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import requests
import json
def main():
host = "http://localhost:5006"
urlpattern = "/user/"
response = requests.post(f"{host}{urlpattern}", json={'key1': 'random value'})
if 199 < response.status_code < 300:
for k, v in response.headers.items():
print(f"{k} -> {v}")
print(f"{'=' * 50}")
body = json.loads(response.text)
for k, v in body.items():
print(f"{k} -> {v}")
else:
print(f"Something bad happened: {response.status_code}")
if __name__ == "__main__":
main()
| [
"json.loads",
"requests.post"
] | [((119, 186), 'requests.post', 'requests.post', (['f"""{host}{urlpattern}"""'], {'json': "{'key1': 'random value'}"}), "(f'{host}{urlpattern}', json={'key1': 'random value'})\n", (132, 186), False, 'import requests\n'), ((353, 378), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (363, 378), False, 'import json\n')] |
#!/usr/bin/python
import os
import glob
import traceback
import datetime
import dandan
from flask import Flask
from flask import abort
from flask import send_file
from flask import send_from_directory
from flask import render_template
from werkzeug.routing import BaseConverter
import config
__VERSION__ = "0.0.1.1"
dirname = os.path.dirname(os.path.abspath(__file__))
favicon = os.path.join(dirname, "static/images/favicon.ico")
class RegexConverter(BaseConverter):
def __init__(self, map, *args):
self.map = map
self.regex = args[0]
server = Flask(__name__)
server.url_map.converters['regex'] = RegexConverter
def get_data():
data = dandan.value.AttrDict()
data.info.name = "Files"
data.info.current_time = datetime.datetime.now()
return data
def get_info(filepath):
info = dandan.value.AttrDict()
info.filepath = filepath
info.basename = os.path.basename(filepath)
info.size = os.path.getsize(filepath)
info.mtime = datetime.datetime.fromtimestamp(os.path.getmtime(filepath))
if os.path.isfile(filepath):
info.type = "file"
elif os.path.isdir(filepath):
info.type = 'dir'
return info
def get_response(filename=""):
basket = os.path.join(dirname, "basket")
if not os.path.exists(basket):
return "basket not exists."
# abort(404)
filepath = os.path.join(basket, filename)
if not os.path.exists(filepath):
return "file not exists {}".format(filepath)
# abort(404)
if os.path.isfile(filepath):
return send_file(filepath)
children = os.listdir(filepath)
data = get_data()
data.filename = filename
if filename:
data.title = '/{}/'.format(filename)
else:
data.title = "/"
data.items = [get_info(os.path.join(filepath, child)) for child in children]
return render_template("index.html", **data)
@server.route('/')
@server.route("/<regex('.+'):filename>")
def index(filename=""):
if filename == "favicon.ico" and os.path.exists(favicon):
return send_file(favicon)
else:
return get_response(filename)
def main():
try:
print("run server {}:{}".format(config.host, config.port))
server.run(host=config.host, port=config.port, debug=config.debug, threaded=True)
except Exception:
traceback.print_exc()
return
if __name__ == "__main__":
main()
| [
"flask.render_template",
"os.path.exists",
"os.path.getsize",
"os.listdir",
"flask.Flask",
"dandan.value.AttrDict",
"os.path.join",
"os.path.isfile",
"datetime.datetime.now",
"os.path.isdir",
"os.path.basename",
"os.path.abspath",
"os.path.getmtime",
"flask.send_file",
"traceback.print_e... | [((406, 456), 'os.path.join', 'os.path.join', (['dirname', '"""static/images/favicon.ico"""'], {}), "(dirname, 'static/images/favicon.ico')\n", (418, 456), False, 'import os\n'), ((602, 617), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (607, 617), False, 'from flask import Flask\n'), ((368, 393), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (383, 393), False, 'import os\n'), ((704, 727), 'dandan.value.AttrDict', 'dandan.value.AttrDict', ([], {}), '()\n', (725, 727), False, 'import dandan\n'), ((788, 811), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (809, 811), False, 'import datetime\n'), ((874, 897), 'dandan.value.AttrDict', 'dandan.value.AttrDict', ([], {}), '()\n', (895, 897), False, 'import dandan\n'), ((949, 975), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (965, 975), False, 'import os\n'), ((993, 1018), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (1008, 1018), False, 'import os\n'), ((1105, 1129), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (1119, 1129), False, 'import os\n'), ((1286, 1317), 'os.path.join', 'os.path.join', (['dirname', '"""basket"""'], {}), "(dirname, 'basket')\n", (1298, 1317), False, 'import os\n'), ((1435, 1465), 'os.path.join', 'os.path.join', (['basket', 'filename'], {}), '(basket, filename)\n', (1447, 1465), False, 'import os\n'), ((1590, 1614), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (1604, 1614), False, 'import os\n'), ((1670, 1690), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (1680, 1690), False, 'import os\n'), ((1941, 1978), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html', **data)\n", (1956, 1978), False, 'from flask import render_template\n'), ((1069, 1095), 'os.path.getmtime', 'os.path.getmtime', (['filepath'], {}), '(filepath)\n', (1085, 1095), False, 'import os\n'), ((1169, 1192), 'os.path.isdir', 'os.path.isdir', (['filepath'], {}), '(filepath)\n', (1182, 1192), False, 'import os\n'), ((1330, 1352), 'os.path.exists', 'os.path.exists', (['basket'], {}), '(basket)\n', (1344, 1352), False, 'import os\n'), ((1478, 1502), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1492, 1502), False, 'import os\n'), ((1632, 1651), 'flask.send_file', 'send_file', (['filepath'], {}), '(filepath)\n', (1641, 1651), False, 'from flask import send_file\n'), ((2108, 2131), 'os.path.exists', 'os.path.exists', (['favicon'], {}), '(favicon)\n', (2122, 2131), False, 'import os\n'), ((2149, 2167), 'flask.send_file', 'send_file', (['favicon'], {}), '(favicon)\n', (2158, 2167), False, 'from flask import send_file\n'), ((1875, 1904), 'os.path.join', 'os.path.join', (['filepath', 'child'], {}), '(filepath, child)\n', (1887, 1904), False, 'import os\n'), ((2436, 2457), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2455, 2457), False, 'import traceback\n')] |
#!/usr/bin/env python3
from subprocess import Popen, PIPE, DEVNULL, run
import socket
import sys
import traceback
import argparse
import time
import logging
import os
logger = logging.getLogger("django")
def startProcess(command, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((socket.gethostbyname(socket.gethostname()), int(port)))
if result == 0: raise OSError("[Errno 98] Address already in use")
sock.close()
#popen = Popen(command, stdout=DEVNULL, stderr=PIPE)
popen = os.popen(' '.join(command))
time.sleep(3)
print("Start httptracker is sucess.")
except OSError as e:
raise e
except Exception as e:
raise e
except PermissionError as e:
raise e
def stopProcess(command):
try:
killCommand = ['pkill', '-f', ' '.join(command)]
run(killCommand)
time.sleep(2)
print("Stop httptracker is sucess.")
except OSError as e:
raise e
except Exception as e:
raise e
except PermissionError as e:
raise e
class CustomArgparse(argparse.ArgumentParser):
def error(self, message):
if message == "":
print("[Error] Argument is wrong...<(^^;)\n", file=sys.stderr)
else:
print("[Error] " + message + "\n", file=sys.stderr)
self.print_help()
sys.exit(2)
def main():
try:
parser = CustomArgparse(
prog = "httptracker",
description = "Track HTTP request to the end of the host.\nex) httptracker --mode start -p 80",
add_help = True
)
parser.add_argument(
"-m",
"--mode",
dest = "mode",
nargs = 1,
required = True,
help = "[Required]Select modes which are 'start', 'restart', 'stop' to execute httptracker.",
)
parser.add_argument(
"-p",
"--port",
dest = "port",
nargs = 1,
type = int,
default = 8000,
required = False,
help = "[Optional]Direct port which httptracker process use. Default is 8000/tcp."
)
parser.add_argument(
"-i",
"--ipaddress",
dest = "ipaddress",
nargs = 1,
type = str,
default = "0.0.0.0",
required = False,
help = "[Optional]Direct listen ip address which httptracker process use. Default is 0.0.0.0 .",
)
args = parser.parse_args()
ipaddress = ""
port = ""
if args.ipaddress != "0.0.0.0": ipaddress= args.ipaddress[0]
else: ipaddress = args.ipaddress
if args.port != 8000: port = str(args.port[0])
else: port = str(args.port)
command = ["python3", os.path.dirname(__file__) + "/manage.py", "runserver", ipaddress + ":" + port]
if args.mode:
mode = args.mode[0]
if mode == "start":
startProcess(command, port)
elif mode == "restart":
stopProcess(command)
startProcess(command, port)
elif mode == "stop":
stopProcess(command)
else:
parser.error('Argument "--mode" need only "start", "restart", "stop".')
except SystemExit:
pass
except:
print("[Error] " + traceback.format_exc().splitlines()[-1], file=sys.stderr)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"traceback.format_exc",
"socket.socket",
"subprocess.run",
"time.sleep",
"os.path.dirname",
"sys.exit",
"socket.gethostname"
] | [((178, 205), 'logging.getLogger', 'logging.getLogger', (['"""django"""'], {}), "('django')\n", (195, 205), False, 'import logging\n'), ((264, 313), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (277, 313), False, 'import socket\n'), ((613, 626), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (623, 626), False, 'import time\n'), ((907, 923), 'subprocess.run', 'run', (['killCommand'], {}), '(killCommand)\n', (910, 923), False, 'from subprocess import Popen, PIPE, DEVNULL, run\n'), ((932, 945), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (942, 945), False, 'import time\n'), ((1415, 1426), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1423, 1426), False, 'import sys\n'), ((2885, 2910), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2900, 2910), False, 'import os\n'), ((369, 389), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (387, 389), False, 'import socket\n'), ((3463, 3485), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3483, 3485), False, 'import traceback\n')] |
"""Datatest main program"""
import sys as _sys
from unittest import TestProgram as _TestProgram
from unittest import defaultTestLoader as _defaultTestLoader
try:
from unittest.signals import installHandler
except ImportError:
installHandler = None
from datatest import DataTestRunner
__unittest = True
__datatest = True
class DataTestProgram(_TestProgram):
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=DataTestRunner, testLoader=_defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, ignore=False):
self.ignore = ignore
_TestProgram.__init__(self,
module=module,
defaultTest=defaultTest,
argv=argv,
testRunner=testRunner,
testLoader=testLoader,
exit=exit,
verbosity=verbosity,
failfast=failfast,
catchbreak=catchbreak,
buffer=buffer)
def runTests(self):
try:
if self.catchbreak and installHandler:
installHandler()
except AttributeError:
pass # does not have catchbreak attribute
if self.testRunner is None:
self.testRunner = DataTestRunner
if isinstance(self.testRunner, type):
try:
kwds = ['verbosity', 'failfast', 'buffer', 'warnings', 'ignore']
kwds = [attr for attr in kwds if hasattr(self, attr)]
kwds = dict((attr, getattr(self, attr)) for attr in kwds)
testRunner = self.testRunner(**kwds)
except TypeError:
if 'warnings' in kwds:
del kwds['warnings']
testRunner = self.testRunner(**kwds)
else:
# assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
_sys.exit(not self.result.wasSuccessful())
if _sys.version_info[:2] == (3, 1): # Patch methods for Python 3.1.
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=DataTestRunner, testLoader=_defaultTestLoader,
exit=True, ignore=False):
self.ignore = ignore
_TestProgram.__init__(self,
module=module,
defaultTest=defaultTest,
argv=argv,
testRunner=testRunner,
testLoader=testLoader,
exit=exit)
DataTestProgram.__init__ = __init__
elif _sys.version_info[:2] == (2, 6): # Patch runTests() for Python 2.6.
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=DataTestRunner, testLoader=_defaultTestLoader,
exit=True, ignore=False):
self.exit = exit # <- 2.6 does not handle exit argument.
self.ignore = ignore
_TestProgram.__init__(self,
module=module,
defaultTest=defaultTest,
argv=argv,
testRunner=testRunner,
testLoader=testLoader)
DataTestProgram.__init__ = __init__
main = DataTestProgram
| [
"unittest.TestProgram.__init__",
"unittest.signals.installHandler"
] | [((677, 904), 'unittest.TestProgram.__init__', '_TestProgram.__init__', (['self'], {'module': 'module', 'defaultTest': 'defaultTest', 'argv': 'argv', 'testRunner': 'testRunner', 'testLoader': 'testLoader', 'exit': 'exit', 'verbosity': 'verbosity', 'failfast': 'failfast', 'catchbreak': 'catchbreak', 'buffer': 'buffer'}), '(self, module=module, defaultTest=defaultTest, argv=\n argv, testRunner=testRunner, testLoader=testLoader, exit=exit,\n verbosity=verbosity, failfast=failfast, catchbreak=catchbreak, buffer=\n buffer)\n', (698, 904), True, 'from unittest import TestProgram as _TestProgram\n'), ((2518, 2658), 'unittest.TestProgram.__init__', '_TestProgram.__init__', (['self'], {'module': 'module', 'defaultTest': 'defaultTest', 'argv': 'argv', 'testRunner': 'testRunner', 'testLoader': 'testLoader', 'exit': 'exit'}), '(self, module=module, defaultTest=defaultTest, argv=\n argv, testRunner=testRunner, testLoader=testLoader, exit=exit)\n', (2539, 2658), True, 'from unittest import TestProgram as _TestProgram\n'), ((3245, 3374), 'unittest.TestProgram.__init__', '_TestProgram.__init__', (['self'], {'module': 'module', 'defaultTest': 'defaultTest', 'argv': 'argv', 'testRunner': 'testRunner', 'testLoader': 'testLoader'}), '(self, module=module, defaultTest=defaultTest, argv=\n argv, testRunner=testRunner, testLoader=testLoader)\n', (3266, 3374), True, 'from unittest import TestProgram as _TestProgram\n'), ((1296, 1312), 'unittest.signals.installHandler', 'installHandler', ([], {}), '()\n', (1310, 1312), False, 'from unittest.signals import installHandler\n')] |
import numpy as np
from uncertainties import umath as um
def getTeqpl(Teffst, aR, ecc, A=0, f=1/4.):
"""Return the planet equilibrium temperature.
Relation adapted from equation 4 page 4 in http://www.mpia.de/homes/ppvi/chapter/madhusudhan.pdf
and https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law
and later updated to include the effect of excentricity on the average stellar planet distance
according to equation 5 p 25 of Laughlin & Lissauer 2015arXiv150105685L (1501.05685)
Plus Exoplanet atmospheres, physical processes, Sara Seager, p30 eq 3.9 for f contribution.
:param float/np.ndarray Teffst: Effective temperature of the star
:param float/np.ndarray aR: Ration of the planetary orbital semi-major axis over the stellar
radius (without unit)
:param float/np.ndarray A: Bond albedo (should be between 0 and 1)
:param float/np.ndarray f: Redistribution factor. If 1/4 the energy is uniformly redistributed
over the planetary surface. If f = 2/3, no redistribution at all, the atmosphere immediately
reradiate whithout advection.
:return float/np.ndarray Teqpl: Equilibrium temperature of the planet
"""
return Teffst * (f * (1 - A))**(1 / 4.) * np.sqrt(1 / aR) / (1 - ecc**2)**(1/8.)
def getTeqpl_error(Teffst, aR, ecc, A=0, f=1/4.):
"""Return the planet equilibrium temperature.
Relation adapted from equation 4 page 4 in http://www.mpia.de/homes/ppvi/chapter/madhusudhan.pdf
and https://en.wikipedia.org/wiki/Stefan%E2%80%93Boltzmann_law
and later updated to include the effect of excentricity on the average stellar planet distance
according to equation 5 p 25 of Laughlin & Lissauer 2015arXiv150105685L (1501.05685)
Plus Exoplanet atmospheres, physical processes, Sara Seager, p30 eq 3.9 for f contribution.
:param float/np.ndarray Teffst: Effective temperature of the star
:param float/np.ndarray aR: Ration of the planetary orbital semi-major axis over the stellar
radius (without unit)
:param float/np.ndarray A: Bond albedo (should be between 0 and 1)
:param float/np.ndarray f: Redistribution factor. If 1/4 the energy is uniformly redistributed
over the planetary surface. If f = 2/3, no redistribution at all, the atmosphere immediately
reradiate whithout advection.
:return float/np.ndarray Teqpl: Equilibrium temperature of the planet
"""
return Teffst * (f * (1 - A))**(1 / 4.) * um.sqrt(1 / aR) / (1 - ecc**2)**(1/8.)
def getHtidal(Ms, Rp, a, e):
# a -- in AU, semi major axis
# Teq -- in Kelvins, planetary equilibrium temperature
# M -- in Jupiter masses, planetary mass
# Z -- [Fe/H], stellar metallicity
# Rp -- radius planet
# Ms -- stellar mass
# e -- eccentricity
# G -- gravitational constant
#
#
G = 6.67408 * 10**(-11) # m3 kg-1 s-2
# Equation from Enoch et al. 2012
# Q = 10**5 # Tidal dissipation factor for high mass planets ...?
# k = 0.51 # Love number
# H_tidal = (63/4) * ((G * Ms)**(3/2) * Ms * Rp**5 * a**(-15/2)*e**2) / ((3*Q) / (2*k))
# Equation from Jackson 2008
# Qp' = (3*Qp) / (2*k)
Qp = 500 # with Love number 0.3 for terrestrial planets
H_tidal = (63 / 16*np.pi) * (((G*Ms)**(3/2) * Ms * Rp**3) / (Qp)) * a**(-15/2) * e**2
return H_tidal
def safronov_nb(Mp, Ms, Rp, a):
# Ozturk 2018, Safronov 1972
return (Mp/Ms) * (a/Rp)
| [
"numpy.sqrt",
"uncertainties.umath.sqrt"
] | [((1240, 1255), 'numpy.sqrt', 'np.sqrt', (['(1 / aR)'], {}), '(1 / aR)\n', (1247, 1255), True, 'import numpy as np\n'), ((2469, 2484), 'uncertainties.umath.sqrt', 'um.sqrt', (['(1 / aR)'], {}), '(1 / aR)\n', (2476, 2484), True, 'from uncertainties import umath as um\n')] |
import pandas as pd
import re
import os
from tqdm import tqdm
## Cleaning train raw dataset
train = open('./data/raw/train.crash').readlines()
train_ids = []
train_texts = []
train_labels = []
for id, line in tqdm(enumerate(train)):
line = line.strip()
if line.startswith("train_"):
train_ids.append(id)
elif line == "0" or line == "1":
train_labels.append(id)
for id, lb in tqdm(zip(train_ids, train_labels)):
line_id = train[id].strip()
label = train[lb].strip()
text = ' '.join(train[id + 1: lb])
text = re.sub('\s+', ' ', text).strip()[1: -1].strip()
train_texts.append(text)
train_df = pd.DataFrame({
'id': train_ids,
'text': train_texts,
'label': train_labels
})
if not os.path.exists('./data'):
os.makedirs('./data')
train_df.to_csv('./data/train.csv', encoding='utf-8', index=False)
## Clean test raw dataset
test = open("./data/raw/test.crash").readlines()
test_ids = []
test_texts = []
for id, line in tqdm(enumerate(test)):
line = line.strip()
if line.startswith("test_"):
test_ids.append(id)
for i, id in tqdm(enumerate(test_ids)):
if i >= len(test_ids) - 1:
end = len(test)
else:
end = test_ids[i + 1]
line_id = test[id].strip()
text = re.sub('\s+', ' ', ' '.join(test[id + 1: end])).strip()[1:-1].strip()
test_texts.append(text)
test_df = pd.DataFrame({
'id': test_ids,
'text': test_texts
})
submission = pd.read_csv('./data/raw/sample_submission.csv', encoding='utf-8')
result = pd.concat([test_df, submission], axis=1, sort=False)
result.to_csv('./data/test.csv', encoding='utf-8', index=False) | [
"os.path.exists",
"pandas.read_csv",
"os.makedirs",
"pandas.DataFrame",
"re.sub",
"pandas.concat"
] | [((646, 721), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': train_ids, 'text': train_texts, 'label': train_labels}"], {}), "({'id': train_ids, 'text': train_texts, 'label': train_labels})\n", (658, 721), True, 'import pandas as pd\n'), ((1388, 1438), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': test_ids, 'text': test_texts}"], {}), "({'id': test_ids, 'text': test_texts})\n", (1400, 1438), True, 'import pandas as pd\n'), ((1464, 1529), 'pandas.read_csv', 'pd.read_csv', (['"""./data/raw/sample_submission.csv"""'], {'encoding': '"""utf-8"""'}), "('./data/raw/sample_submission.csv', encoding='utf-8')\n", (1475, 1529), True, 'import pandas as pd\n'), ((1539, 1591), 'pandas.concat', 'pd.concat', (['[test_df, submission]'], {'axis': '(1)', 'sort': '(False)'}), '([test_df, submission], axis=1, sort=False)\n', (1548, 1591), True, 'import pandas as pd\n'), ((746, 770), 'os.path.exists', 'os.path.exists', (['"""./data"""'], {}), "('./data')\n", (760, 770), False, 'import os\n'), ((776, 797), 'os.makedirs', 'os.makedirs', (['"""./data"""'], {}), "('./data')\n", (787, 797), False, 'import os\n'), ((557, 582), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'text'], {}), "('\\\\s+', ' ', text)\n", (563, 582), False, 'import re\n')] |
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(name="google-ads-api-report-fetcher",
version="0.1",
description="Library for fetching reports from Google Ads API and saving them locally / BigQuery.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/google/ads-api-reports-fetcher",
author="Google Inc. (gTech gPS CSE team)",
author_email="<EMAIL>",
license="Apache 2.0",
classifiers=[
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"License :: OSI Approved :: Apache Software License"
],
packages=find_packages(include=["runner", "runner.*"]),
install_requires=[
"google-ads==14.1.0", "google-cloud-bigquery==2.26.0",
"pandas==1.3.4", "pyarrow==6.0.1", "tabulate"
],
setup_requires=["pytest-runner"],
tests_requires=["pytest"],
entry_points={
"console_scripts": [
"fetch-reports=runner.fetcher:main",
"post-process-queries=runner.post_processor:main",
]
})
| [
"setuptools.find_packages",
"pathlib.Path"
] | [((67, 89), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import pathlib\n'), ((889, 934), 'setuptools.find_packages', 'find_packages', ([], {'include': "['runner', 'runner.*']"}), "(include=['runner', 'runner.*'])\n", (902, 934), False, 'from setuptools import setup, find_packages\n')] |
import glob
import os
from io import StringIO
from threading import Thread
import logging
from logger import TimeHandler
from costants import THREADS, INFERENCE_GRAPH
from pipeline import pipeline
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(TimeHandler().handler)
class MyThread(Thread):
def __init__(self, name, file_path):
Thread.__init__(self)
self.name = name
self.path = file_path
def run(self):
for file_path in self.path:
file_path = os.path.join(file_path)
fp = StringIO()
pipeline(
pdf_path=file_path,
inference_graph_path=INFERENCE_GRAPH,
thread_name=self.name
)
logger.info(fp.getvalue())
fp.close()
if __name__ == '__main__':
path_list = []
for path in glob.iglob("..\\Polizze\\" + '/**/*.pdf', recursive=True):
path_list.append(path)
el_per_list = int(len(path_list) / THREADS)
thread_list = []
i = 0
path_list_per_thread = []
if len(path_list) == 1:
new_thread = MyThread('Thread_{}'.format(0), path_list)
new_thread.start()
new_thread.join()
else:
for i in range(0, THREADS):
if i < THREADS - 2:
path_list_per_thread = path_list[el_per_list * i:el_per_list * (i + 1) - 1]
else:
path_list_per_thread = path_list[
el_per_list * i:len(path_list) - 1] # lista vuota se c'e' un solo elemento
new_thread = MyThread('Thread_{}'.format(i), path_list_per_thread)
new_thread.start()
thread_list.append(new_thread)
for new_thread in thread_list:
new_thread.join()
| [
"logging.getLogger",
"threading.Thread.__init__",
"glob.iglob",
"os.path.join",
"logger.TimeHandler",
"io.StringIO",
"pipeline.pipeline"
] | [((216, 243), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (233, 243), False, 'import logging\n'), ((917, 974), 'glob.iglob', 'glob.iglob', (["('..\\\\Polizze\\\\' + '/**/*.pdf')"], {'recursive': '(True)'}), "('..\\\\Polizze\\\\' + '/**/*.pdf', recursive=True)\n", (927, 974), False, 'import glob\n'), ((296, 309), 'logger.TimeHandler', 'TimeHandler', ([], {}), '()\n', (307, 309), False, 'from logger import TimeHandler\n'), ((399, 420), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (414, 420), False, 'from threading import Thread\n'), ((562, 585), 'os.path.join', 'os.path.join', (['file_path'], {}), '(file_path)\n', (574, 585), False, 'import os\n'), ((604, 614), 'io.StringIO', 'StringIO', ([], {}), '()\n', (612, 614), False, 'from io import StringIO\n'), ((628, 721), 'pipeline.pipeline', 'pipeline', ([], {'pdf_path': 'file_path', 'inference_graph_path': 'INFERENCE_GRAPH', 'thread_name': 'self.name'}), '(pdf_path=file_path, inference_graph_path=INFERENCE_GRAPH,\n thread_name=self.name)\n', (636, 721), False, 'from pipeline import pipeline\n')] |
import time
import sys
import json
import argparse
from tqdm import trange
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import numpy as np
from scipy.spatial.distance import jensenshannon
import gym
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.ticker import MaxNLocator
from matplotlib.lines import Line2D
import pandemic_simulator as ps
from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType
from pandemic_simulator.environment.interfaces import InfectionSummary
from pandemic_simulator.viz import PandemicViz
from pandemic_simulator.environment import PandemicSimOpts
from stable_baselines3.common import base_class
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
def hellinger(p, q):
# distance between p and q
# p and q are np array probability distributions
return (1.0 / np.sqrt(2.0)) * np.sqrt(np.sum(np.square(np.sqrt(p) - np.sqrt(q)), axis=1))
def evaluate_policy(
name: str,
model: "base_class.BaseAlgorithm",
base_model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 32,
deterministic: bool = True,
render: bool = False,
viz: Optional[PandemicViz] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
different elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env])
episode_rewards = []
reward_std = []
episode_true_rewards = []
true_reward_std = []
episode_true_rewards2 = []
true_reward_std2 = []
vfs = []
log_probs = []
ents = []
base_vfs = []
base_log_probs = []
base_ents = []
kls = []
js = []
h = []
numpy_obs = env.reset()
states = None
for t in range(200):
actions, states = model.predict(numpy_obs, state=states, deterministic=True)
vf, logp, ent = model.policy.evaluate_actions(torch.as_tensor(numpy_obs), torch.as_tensor(actions))
base_vf, base_logp, base_ent = base_model.policy.evaluate_actions(torch.as_tensor(numpy_obs), torch.as_tensor(actions))
vfs.append(torch.mean(vf).detach().item())
log_probs.append(torch.mean(logp).detach().item())
ents.append(torch.mean(ent).detach().item())
base_vfs.append(torch.mean(base_vf).detach().item())
base_log_probs.append(torch.mean(base_logp).detach().item())
base_ents.append(torch.mean(base_ent).detach().item())
# Distances
log_ratio = logp - base_logp
# Estimator of KL from http://joschu.net/blog/kl-approx.html
kls.append(torch.mean(torch.exp(log_ratio) - 1 - log_ratio).item())
latent_pi, _, latent_sde = model.policy._get_latent(torch.as_tensor(numpy_obs))
model_dist = model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().numpy()
latent_pi, _, latent_sde = base_model.policy._get_latent(torch.as_tensor(numpy_obs))
base_dist = base_model.policy._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde).distribution.probs.detach().numpy()
js.append(np.mean(jensenshannon(model_dist, base_dist, axis=1)).item())
h.append(np.mean(hellinger(model_dist, base_dist)).item())
numpy_obs, _, done, info = env.step(actions)
rew = env.get_attr("last_reward")
true_rew = env.get_attr("get_true_reward")
true_rew2 = env.get_attr("get_true_reward2")
episode_rewards.append(np.mean(rew))
reward_std.append(rew)
episode_true_rewards.append(np.mean(true_rew))
true_reward_std.append(true_rew)
episode_true_rewards2.append(np.mean(true_rew2))
true_reward_std2.append(true_rew2)
obs = env.get_attr("observation")
infection_data = np.zeros((1, 5))
threshold_data = np.zeros(len(obs))
for o in obs:
infection_data += o.global_infection_summary[-1]
gis = np.array([o.global_infection_summary[-1] for o in obs]).squeeze(1)
gts = np.array([o.global_testing_summary[-1] for o in obs]).squeeze(1)
stage = np.array([o.stage[-1].item() for o in obs])
if viz:
viz.record_list(obs[0], gis, gts, stage, rew, true_rew, true_rew2=true_rew2)
reward = np.sum(episode_rewards).item()
true_reward = np.sum(episode_true_rewards).item()
true_reward2 = np.sum(episode_true_rewards2).item()
#if viz:
# viz.plot(name=name, evaluate=True, plots_to_show=['critical_summary', 'stages', 'cumulative_reward', 'cumulative_true_reward2'])
# viz.reset()
return reward, np.std(np.sum(np.array(reward_std), axis=0)).item(), \
true_reward, np.std(np.sum(np.array(true_reward_std), axis=0)).item(), \
true_reward2, np.std(np.sum(np.array(true_reward_std2), axis=0)).item(), \
kls, js, h, log_probs, base_log_probs, vfs, base_vfs
def plot_critical_summary(ax, viz, color, sty, m):
gis = np.vstack(viz._gis).squeeze()
gis_std = np.vstack(viz._gis_std).squeeze()
ax.plot(viz._num_persons * gis[:, viz._critical_index], color='black', linestyle=sty, linewidth=1, label='_nolegend_')
#ax.fill_between(np.arange(len(gis)), viz._num_persons * (gis-gis_std)[:, viz._critical_index], viz._num_persons * (gis+gis_std)[:, viz._critical_index], alpha=0.1, color=color)
ax.plot(np.arange(gis.shape[0]), np.ones(gis.shape[0]) * viz._max_hospital_capacity, 'y')
ax.legend(['Max hospital capacity'], loc='upper left')
ax.set_ylim(-0.1, viz._max_hospital_capacity * 3)
ax.set_title('ICU Usage', fontsize=16)
ax.set_xlabel('time (days)', fontsize=16)
ax.set_ylabel('persons', fontsize=16)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
height = viz._num_persons * gis[m, viz._critical_index]
ax.plot([m, m], [-0.1, height], color=color, linestyle=sty, linewidth=2)
ax.plot([0, m], [height, height], color=color, linestyle=sty, linewidth=2)
def plot_stages(ax, viz, color, sty):
days = np.arange(len(viz._stages))
stages = np.array(viz._stages)
stages_std = np.array(viz._stages_std)
ax.plot(days, stages, color='black', linestyle=sty, linewidth=1)
#ax.fill_between(days, stages - stages_std, stages + stages_std, alpha=0.1, color=color)
ax.set_ylim(-0.1, 5) # This assumes at most 5 stages!!
ax.set_title('Regulation Stage', fontsize=16)
ax.set_xlabel('time (days)', fontsize=16)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
m = np.argmax(stages[50:]) + 50
ax.plot([m, m], [-0.1, stages[m]], color=color, linestyle=sty, linewidth=2)
p1 = Line2D([0,1],[0,1],linestyle='-', color='black')
p2 = Line2D([0,1],[0,1],linestyle='--', color='black')
ax.legend([p1, p2], ['smaller policy', 'larger policy'], loc='upper right')
return m
def plot(v1, v2):
fig, (ax1, ax2) = plt.subplots(1, 2)
c1 = 'red'
c2 = 'blue'
s1 = '-'
s2 = '--'
m1 = plot_stages(ax2, v1, c1, s1)
plot_critical_summary(ax1, v1, c1, s1, m1)
m2 = plot_stages(ax2, v2, c2, s2)
plot_critical_summary(ax1, v2, c2, s2, m2)
ax1.figure.set_size_inches(4, 3)
ax2.figure.set_size_inches(4, 3)
fig.set_size_inches(8, 3)
plt.savefig('test.svg',dpi=120, bbox_inches='tight', pad_inches = 0, format='svg')
def make_cfg():
# cfg = ps.sh.small_town_config
# cfg.delta_start_lo = int(sys.argv[6])
# cfg.delta_start_hi = int(sys.argv[7])
# return cfg
sim_config = ps.env.PandemicSimConfig(
num_persons=500,
location_configs=[
ps.env.LocationConfig(ps.env.Home, num=150),
ps.env.LocationConfig(ps.env.GroceryStore, num=2, num_assignees=5, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Office, num=2, num_assignees=150, state_opts=dict(visitor_capacity=0)),
ps.env.LocationConfig(ps.env.School, num=10, num_assignees=2, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Hospital, num=1, num_assignees=15, state_opts=dict(patient_capacity=5)),
ps.env.LocationConfig(ps.env.RetailStore, num=2, num_assignees=5, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.HairSalon, num=2, num_assignees=3, state_opts=dict(visitor_capacity=5)),
ps.env.LocationConfig(ps.env.Restaurant, num=1, num_assignees=6, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Bar, num=1, num_assignees=3, state_opts=dict(visitor_capacity=30))
],
person_routine_assignment=ps.sh.DefaultPersonRoutineAssignment(),
delta_start_lo = 95,
delta_start_hi = 105
)
sim_config_med = ps.env.PandemicSimConfig(
num_persons=2000,
location_configs=[
ps.env.LocationConfig(ps.env.Home, num=600),
ps.env.LocationConfig(ps.env.GroceryStore, num=4, num_assignees=10, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Office, num=4, num_assignees=300, state_opts=dict(visitor_capacity=0)),
ps.env.LocationConfig(ps.env.School, num=20, num_assignees=4, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Hospital, num=2, num_assignees=30, state_opts=dict(patient_capacity=5)),
ps.env.LocationConfig(ps.env.RetailStore, num=4, num_assignees=10, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.HairSalon, num=4, num_assignees=6, state_opts=dict(visitor_capacity=5)),
ps.env.LocationConfig(ps.env.Restaurant, num=2, num_assignees=12, state_opts=dict(visitor_capacity=30)),
ps.env.LocationConfig(ps.env.Bar, num=2, num_assignees=6, state_opts=dict(visitor_capacity=30))
],
person_routine_assignment=ps.sh.DefaultPersonRoutineAssignment(),
delta_start_lo = 95,
delta_start_hi = 105
)
return sim_config
def make_reg():
return ps.sh.austin_regulations
def make_sim(sim_config, noise):
sim_opt = PandemicSimOpts()
sim_opt.spontaneous_testing_rate = noise
return ps.env.PandemicSim.from_config(sim_config=sim_config, sim_opts=sim_opt)
def make_viz(sim_config):
return ps.viz.GymViz.from_config(sim_config=sim_config)
def load_model(env, model_path, width, depth):
agent = ps.model.StageModel(env = env)
d_model = width
n_layers = depth
net_arch = [d_model] * n_layers if n_layers != 0 else []
policy_kwargs = {
"net_arch": [dict(pi=net_arch, vf=net_arch)],
}
model = agent.get_model("ppo", policy_kwargs = policy_kwargs, verbose = 0)
return model.load(model_path)
def init(args, noise):
n_cpus = args.n_cpus
ps.init_globals(seed=args.seed)
sim_config = make_cfg()
regulations = make_reg()
viz = make_viz(sim_config)
done_fn = ps.env.DoneFunctionFactory.default(ps.env.DoneFunctionType.TIME_LIMIT, horizon=200)
reward_fn = SumReward(
reward_fns=[
RewardFunctionFactory.default(RewardFunctionType.INFECTION_SUMMARY_ABOVE_THRESHOLD,
summary_type=InfectionSummary.CRITICAL,
threshold=sim_config.max_hospital_capacity / sim_config.num_persons),
RewardFunctionFactory.default(RewardFunctionType.INFECTION_SUMMARY_ABSOLUTE,
summary_type=InfectionSummary.CRITICAL),
RewardFunctionFactory.default(RewardFunctionType.LOWER_STAGE,
num_stages=len(regulations)),
RewardFunctionFactory.default(RewardFunctionType.SMOOTH_STAGE_CHANGES,
num_stages=len(regulations))
],
weights=[0, 10, 0.1, 0.01]
)
gym = ps.env.PandemicPolicyGymEnv.from_config(
sim_config=sim_config,
sim_opts = PandemicSimOpts(spontaneous_testing_rate=noise),
pandemic_regulations=regulations,
done_fn=done_fn,
reward_fn=reward_fn,
constrain=True,
four_start=False,
obs_history_size=3,
num_days_in_obs=8
)
env = gym.get_multi_env(n=n_cpus) if n_cpus > 1 else gym.get_single_env()
return env, viz
def evaluate(env, model_path, width, depth, base_model, viz):
model = load_model(env, model_path, width, depth)
model_parameters = filter(lambda p: p.requires_grad, model.policy.mlp_extractor.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
params = int(params)
print(f"Evaluating {model_path+str(width)}...")
reward, rstd, true_reward, trstd, true_reward2, tr2std, kl, js, h, log_probs, base_log_probs, vfs, base_vfs = evaluate_policy(model_path, model, base_model, env, viz=viz)
env.close()
print(f"Model: {model_path}. Proxy: {reward}. Objective: {true_reward}.")
return params, reward, rstd, true_reward, trstd, true_reward2, tr2std, kl, js, h, log_probs, base_log_probs, vfs, base_vfs
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_path')
parser.add_argument('base_model_path')
parser.add_argument('base_width', type=int)
parser.add_argument('base_depth', type=int)
parser.add_argument('--seed', type=int, default=17)
parser.add_argument('--n_cpus', type=int, default=32)
parser.add_argument('--n_episodes', type=int, default=32)
parser.add_argument('--epoch', type=int, default=0)
parser.add_argument('--width', type=int, default=0)
#parser.add_argument('--noise', type=str, default="")
args = parser.parse_known_args(sys.argv[1:])[0]
vs = []
for w in [16, 112]:
env, viz = init(args, 0.02)
base_model = load_model(env, args.base_model_path, args.base_width, args.base_depth)
evaluate(env, args.model_path+str(w), w, 2, base_model, viz)
vs.append(viz)
plot(vs[0], vs[1])
# params, reward, reward_std, true_reward, true_reward_std, true_reward2, true_reward2_std, kls, js, h, log_probs, base_log_probs, vfs, base_vfs, e, noises = \
# [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
# #widths = [4, 8, 12, 16, 20, 24, 28, 32] if args.width == 0 else [40, 48, 56, 64, 80, 96, 112, 128]
# for w in [args.width]:
# for noise in ['01', '02', '003', '005', '03', '04', '05', '06', '07', '08', '09', '095', '1']:
# n2n = {'01':0.1, '02':0.2, '003':0.03, '005':0.05, '03':0.3, '04':0.4, '05':0.5, '06':0.6, '07':0.7, '08':0.8, '09':0.9, '095':0.95, '1':1}
# env, viz = init(args, n2n[noise])
# base_model = load_model(env, args.base_model_path, args.base_width, args.base_depth)
# p, r, rs, tr, trs, tr2, tr2s, kl, j_s, h_, logp, blogp, vf, bvf = evaluate(env, args.model_path+noise+"_"+str(w), w, 2, base_model, viz)
# noises.append(n2n[noise])
# params.append(p)
# reward.append(r)
# reward_std.append(rs)
# true_reward.append(tr)
# true_reward_std.append(trs)
# true_reward2.append(tr2)
# true_reward2_std.append(tr2s)
# kls.append(kl)
# js.append(j_s)
# h.append(h_)
# log_probs.append(logp)
# base_log_probs.append(blogp)
# vfs.append(vf)
# base_vfs.append(bvf)
# e.append(args.epoch)
# f = open(f"pandemic_{args.epoch}_{args.width}_noise.json", "w")
# json.dump({'params':params, 'noise':noises, 'rew': reward, 'rew_std': reward_std, 'true_rew': true_reward, 'true_rew_std': true_reward_std, 'true_rew2': true_reward2,
# 'true_rew2_std': true_reward2_std, 'kls': kls, 'js': js, 'h': h, 'log_probs': log_probs, 'base_log_probs': base_log_probs, 'vfs': vfs, 'base_vfs': base_vfs, 'e': e}, f)
# f.close()
if __name__ == '__main__':
main()
| [
"pandemic_simulator.sh.DefaultPersonRoutineAssignment",
"torch.as_tensor",
"numpy.sqrt",
"pandemic_simulator.model.StageModel",
"torch.exp",
"numpy.array",
"pandemic_simulator.init_globals",
"matplotlib.ticker.MaxNLocator",
"pandemic_simulator.viz.GymViz.from_config",
"pandemic_simulator.env.Locat... | [((8355, 8376), 'numpy.array', 'np.array', (['viz._stages'], {}), '(viz._stages)\n', (8363, 8376), True, 'import numpy as np\n'), ((8395, 8420), 'numpy.array', 'np.array', (['viz._stages_std'], {}), '(viz._stages_std)\n', (8403, 8420), True, 'import numpy as np\n'), ((8930, 8982), 'matplotlib.lines.Line2D', 'Line2D', (['[0, 1]', '[0, 1]'], {'linestyle': '"""-"""', 'color': '"""black"""'}), "([0, 1], [0, 1], linestyle='-', color='black')\n", (8936, 8982), False, 'from matplotlib.lines import Line2D\n'), ((8989, 9042), 'matplotlib.lines.Line2D', 'Line2D', (['[0, 1]', '[0, 1]'], {'linestyle': '"""--"""', 'color': '"""black"""'}), "([0, 1], [0, 1], linestyle='--', color='black')\n", (8995, 9042), False, 'from matplotlib.lines import Line2D\n'), ((9178, 9196), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (9190, 9196), True, 'import matplotlib.pyplot as plt\n'), ((9553, 9639), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.svg"""'], {'dpi': '(120)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0)', 'format': '"""svg"""'}), "('test.svg', dpi=120, bbox_inches='tight', pad_inches=0, format=\n 'svg')\n", (9564, 9639), True, 'import matplotlib.pyplot as plt\n'), ((12413, 12430), 'pandemic_simulator.environment.PandemicSimOpts', 'PandemicSimOpts', ([], {}), '()\n', (12428, 12430), False, 'from pandemic_simulator.environment import PandemicSimOpts\n'), ((12489, 12560), 'pandemic_simulator.env.PandemicSim.from_config', 'ps.env.PandemicSim.from_config', ([], {'sim_config': 'sim_config', 'sim_opts': 'sim_opt'}), '(sim_config=sim_config, sim_opts=sim_opt)\n', (12519, 12560), True, 'import pandemic_simulator as ps\n'), ((12602, 12650), 'pandemic_simulator.viz.GymViz.from_config', 'ps.viz.GymViz.from_config', ([], {'sim_config': 'sim_config'}), '(sim_config=sim_config)\n', (12627, 12650), True, 'import pandemic_simulator as ps\n'), ((12714, 12742), 'pandemic_simulator.model.StageModel', 'ps.model.StageModel', ([], {'env': 'env'}), '(env=env)\n', (12733, 12742), True, 'import pandemic_simulator as ps\n'), ((13112, 13143), 'pandemic_simulator.init_globals', 'ps.init_globals', ([], {'seed': 'args.seed'}), '(seed=args.seed)\n', (13127, 13143), True, 'import pandemic_simulator as ps\n'), ((13250, 13337), 'pandemic_simulator.env.DoneFunctionFactory.default', 'ps.env.DoneFunctionFactory.default', (['ps.env.DoneFunctionType.TIME_LIMIT'], {'horizon': '(200)'}), '(ps.env.DoneFunctionType.TIME_LIMIT,\n horizon=200)\n', (13284, 13337), True, 'import pandemic_simulator as ps\n'), ((15576, 15601), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15599, 15601), False, 'import argparse\n'), ((3555, 3582), 'stable_baselines3.common.vec_env.DummyVecEnv', 'DummyVecEnv', (['[lambda : env]'], {}), '([lambda : env])\n', (3566, 3582), False, 'from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv\n'), ((6049, 6065), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {}), '((1, 5))\n', (6057, 6065), True, 'import numpy as np\n'), ((7649, 7672), 'numpy.arange', 'np.arange', (['gis.shape[0]'], {}), '(gis.shape[0])\n', (7658, 7672), True, 'import numpy as np\n'), ((8012, 8037), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (8023, 8037), False, 'from matplotlib.ticker import MaxNLocator\n'), ((8775, 8800), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (8786, 8800), False, 'from matplotlib.ticker import MaxNLocator\n'), ((8811, 8833), 'numpy.argmax', 'np.argmax', (['stages[50:]'], {}), '(stages[50:])\n', (8820, 8833), True, 'import numpy as np\n'), ((14686, 14713), 'gym.get_multi_env', 'gym.get_multi_env', ([], {'n': 'n_cpus'}), '(n=n_cpus)\n', (14703, 14713), False, 'import gym\n'), ((14733, 14753), 'gym.get_single_env', 'gym.get_single_env', ([], {}), '()\n', (14751, 14753), False, 'import gym\n'), ((974, 986), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (981, 986), True, 'import numpy as np\n'), ((4124, 4150), 'torch.as_tensor', 'torch.as_tensor', (['numpy_obs'], {}), '(numpy_obs)\n', (4139, 4150), False, 'import torch\n'), ((4152, 4176), 'torch.as_tensor', 'torch.as_tensor', (['actions'], {}), '(actions)\n', (4167, 4176), False, 'import torch\n'), ((4253, 4279), 'torch.as_tensor', 'torch.as_tensor', (['numpy_obs'], {}), '(numpy_obs)\n', (4268, 4279), False, 'import torch\n'), ((4281, 4305), 'torch.as_tensor', 'torch.as_tensor', (['actions'], {}), '(actions)\n', (4296, 4305), False, 'import torch\n'), ((4942, 4968), 'torch.as_tensor', 'torch.as_tensor', (['numpy_obs'], {}), '(numpy_obs)\n', (4957, 4968), False, 'import torch\n'), ((5170, 5196), 'torch.as_tensor', 'torch.as_tensor', (['numpy_obs'], {}), '(numpy_obs)\n', (5185, 5196), False, 'import torch\n'), ((5732, 5744), 'numpy.mean', 'np.mean', (['rew'], {}), '(rew)\n', (5739, 5744), True, 'import numpy as np\n'), ((5815, 5832), 'numpy.mean', 'np.mean', (['true_rew'], {}), '(true_rew)\n', (5822, 5832), True, 'import numpy as np\n'), ((5914, 5932), 'numpy.mean', 'np.mean', (['true_rew2'], {}), '(true_rew2)\n', (5921, 5932), True, 'import numpy as np\n'), ((6544, 6567), 'numpy.sum', 'np.sum', (['episode_rewards'], {}), '(episode_rewards)\n', (6550, 6567), True, 'import numpy as np\n'), ((6594, 6622), 'numpy.sum', 'np.sum', (['episode_true_rewards'], {}), '(episode_true_rewards)\n', (6600, 6622), True, 'import numpy as np\n'), ((6650, 6679), 'numpy.sum', 'np.sum', (['episode_true_rewards2'], {}), '(episode_true_rewards2)\n', (6656, 6679), True, 'import numpy as np\n'), ((7250, 7269), 'numpy.vstack', 'np.vstack', (['viz._gis'], {}), '(viz._gis)\n', (7259, 7269), True, 'import numpy as np\n'), ((7295, 7318), 'numpy.vstack', 'np.vstack', (['viz._gis_std'], {}), '(viz._gis_std)\n', (7304, 7318), True, 'import numpy as np\n'), ((7674, 7695), 'numpy.ones', 'np.ones', (['gis.shape[0]'], {}), '(gis.shape[0])\n', (7681, 7695), True, 'import numpy as np\n'), ((10942, 10980), 'pandemic_simulator.sh.DefaultPersonRoutineAssignment', 'ps.sh.DefaultPersonRoutineAssignment', ([], {}), '()\n', (10978, 10980), True, 'import pandemic_simulator as ps\n'), ((12182, 12220), 'pandemic_simulator.sh.DefaultPersonRoutineAssignment', 'ps.sh.DefaultPersonRoutineAssignment', ([], {}), '()\n', (12218, 12220), True, 'import pandemic_simulator as ps\n'), ((14379, 14426), 'pandemic_simulator.environment.PandemicSimOpts', 'PandemicSimOpts', ([], {'spontaneous_testing_rate': 'noise'}), '(spontaneous_testing_rate=noise)\n', (14394, 14426), False, 'from pandemic_simulator.environment import PandemicSimOpts\n'), ((6213, 6268), 'numpy.array', 'np.array', (['[o.global_infection_summary[-1] for o in obs]'], {}), '([o.global_infection_summary[-1] for o in obs])\n', (6221, 6268), True, 'import numpy as np\n'), ((6295, 6348), 'numpy.array', 'np.array', (['[o.global_testing_summary[-1] for o in obs]'], {}), '([o.global_testing_summary[-1] for o in obs])\n', (6303, 6348), True, 'import numpy as np\n'), ((9919, 9962), 'pandemic_simulator.env.LocationConfig', 'ps.env.LocationConfig', (['ps.env.Home'], {'num': '(150)'}), '(ps.env.Home, num=150)\n', (9940, 9962), True, 'import pandemic_simulator as ps\n'), ((11156, 11199), 'pandemic_simulator.env.LocationConfig', 'ps.env.LocationConfig', (['ps.env.Home'], {'num': '(600)'}), '(ps.env.Home, num=600)\n', (11177, 11199), True, 'import pandemic_simulator as ps\n'), ((13407, 13614), 'pandemic_simulator.environment.reward.RewardFunctionFactory.default', 'RewardFunctionFactory.default', (['RewardFunctionType.INFECTION_SUMMARY_ABOVE_THRESHOLD'], {'summary_type': 'InfectionSummary.CRITICAL', 'threshold': '(sim_config.max_hospital_capacity / sim_config.num_persons)'}), '(RewardFunctionType.\n INFECTION_SUMMARY_ABOVE_THRESHOLD, summary_type=InfectionSummary.\n CRITICAL, threshold=sim_config.max_hospital_capacity / sim_config.\n num_persons)\n', (13436, 13614), False, 'from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType\n'), ((13712, 13832), 'pandemic_simulator.environment.reward.RewardFunctionFactory.default', 'RewardFunctionFactory.default', (['RewardFunctionType.INFECTION_SUMMARY_ABSOLUTE'], {'summary_type': 'InfectionSummary.CRITICAL'}), '(RewardFunctionType.INFECTION_SUMMARY_ABSOLUTE,\n summary_type=InfectionSummary.CRITICAL)\n', (13741, 13832), False, 'from pandemic_simulator.environment.reward import RewardFunction, SumReward, RewardFunctionFactory, RewardFunctionType\n'), ((1015, 1025), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (1022, 1025), True, 'import numpy as np\n'), ((1028, 1038), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (1035, 1038), True, 'import numpy as np\n'), ((5363, 5407), 'scipy.spatial.distance.jensenshannon', 'jensenshannon', (['model_dist', 'base_dist'], {'axis': '(1)'}), '(model_dist, base_dist, axis=1)\n', (5376, 5407), False, 'from scipy.spatial.distance import jensenshannon\n'), ((6904, 6924), 'numpy.array', 'np.array', (['reward_std'], {}), '(reward_std)\n', (6912, 6924), True, 'import numpy as np\n'), ((6985, 7010), 'numpy.array', 'np.array', (['true_reward_std'], {}), '(true_reward_std)\n', (6993, 7010), True, 'import numpy as np\n'), ((7072, 7098), 'numpy.array', 'np.array', (['true_reward_std2'], {}), '(true_reward_std2)\n', (7080, 7098), True, 'import numpy as np\n'), ((4329, 4343), 'torch.mean', 'torch.mean', (['vf'], {}), '(vf)\n', (4339, 4343), False, 'import torch\n'), ((4387, 4403), 'torch.mean', 'torch.mean', (['logp'], {}), '(logp)\n', (4397, 4403), False, 'import torch\n'), ((4442, 4457), 'torch.mean', 'torch.mean', (['ent'], {}), '(ent)\n', (4452, 4457), False, 'import torch\n'), ((4500, 4519), 'torch.mean', 'torch.mean', (['base_vf'], {}), '(base_vf)\n', (4510, 4519), False, 'import torch\n'), ((4568, 4589), 'torch.mean', 'torch.mean', (['base_logp'], {}), '(base_logp)\n', (4578, 4589), False, 'import torch\n'), ((4633, 4653), 'torch.mean', 'torch.mean', (['base_ent'], {}), '(base_ent)\n', (4643, 4653), False, 'import torch\n'), ((4833, 4853), 'torch.exp', 'torch.exp', (['log_ratio'], {}), '(log_ratio)\n', (4842, 4853), False, 'import torch\n')] |
from setuptools import setup
setup(
name="codewars_unittest",
version="0.1.0",
packages=["codewars_unittest"],
license="MIT",
description="unittest runner with Codewars output",
install_requires=[],
url="https://github.com/Codewars/python-unittest",
)
| [
"setuptools.setup"
] | [((30, 264), 'setuptools.setup', 'setup', ([], {'name': '"""codewars_unittest"""', 'version': '"""0.1.0"""', 'packages': "['codewars_unittest']", 'license': '"""MIT"""', 'description': '"""unittest runner with Codewars output"""', 'install_requires': '[]', 'url': '"""https://github.com/Codewars/python-unittest"""'}), "(name='codewars_unittest', version='0.1.0', packages=[\n 'codewars_unittest'], license='MIT', description=\n 'unittest runner with Codewars output', install_requires=[], url=\n 'https://github.com/Codewars/python-unittest')\n", (35, 264), False, 'from setuptools import setup\n')] |
import torch
CONFIG = {"device": torch.device("cuda" if torch.cuda.is_available() else "cpu")}
def get(): return CONFIG["device"]
def set_device(device): CONFIG["device"] = device
| [
"torch.cuda.is_available"
] | [((57, 82), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (80, 82), False, 'import torch\n')] |
import logging
import random
import os
import json
from typing import Tuple, List
import requests
def predict(player_name: str) -> str:
next_move = _predict_next_move(*_get_player_games(player_name))
return _convert_game_to_json(next_move)
R_rock, P_paper, S_scissors, V_spock, L_lizard = ('R', 'P', 'S', 'V', 'L')
INTERNAL_MOVES_ENCODING = [R_rock, P_paper, S_scissors, V_spock, L_lizard]
def _get_player_games(player_name: str) -> Tuple[str, str]:
game_manager_uri = os.getenv("GAME_MANAGER_URI", None)
url = f'{game_manager_uri}/game-manager/api/games?player={player_name}'
logging.info(f'requesting human moves: {url}')
req = requests.get(url)
data = req.json()
return _convert_games_to_str(data["challengerGames"]), _convert_games_to_str(data["humanGames"])
def _convert_games_to_str(games) -> str:
SOURCE_MOVES_ENCODING = [R_rock, P_paper, S_scissors, L_lizard, V_spock]
return "".join([SOURCE_MOVES_ENCODING[game] for game in games])
def _convert_game_to_json(game: str) -> str:
JSON_MOVES_ENCODING = {R_rock: "rock", P_paper: "paper",
S_scissors: "scissors", L_lizard: "lizard", V_spock: "spock"}
return json.dumps({"prediction": JSON_MOVES_ENCODING[game]})
def _zip_moves(challenger_moves: List[str], human_moves: List[str]) -> List[Tuple[str, str]]:
move_encoding_dict = {value: index for index, value in enumerate(INTERNAL_MOVES_ENCODING)}
history = [(move_encoding_dict[i], move_encoding_dict[j])
for i, j in zip(challenger_moves, human_moves)]
return history
def _predict_next_move(challenger_moves: str, human_moves: str) -> str:
history = _zip_moves(challenger_moves, human_moves)
# what would have been predicted in the last rounds?
pred_hist = [_best_next_moves_for_game(
history[:i]) for i in range(2, len(history)+1)]
# if no history prediction, then returns random
if not pred_hist:
return random.choice(INTERNAL_MOVES_ENCODING)
# how would the different predictions have scored?
# we have the pred_hist from moves i=2 to len(history) so we can check
# check https://i.stack.imgur.com/jILea.png for game rules
n_pred = len(pred_hist[0])
scores = [[0]*5 for i in range(n_pred)]
for pred, real in zip(pred_hist[:-1], history[2:]):
for i in range(n_pred):
# %5: When an int is negative it returns the count to the move
# to beat another, in (reverse order) counterclockwise
# i.e -1%5=4, -2%5=3
scores[i][(real[1]-pred[i]+1) % 5] += 1
scores[i][(real[1]-pred[i]+3) % 5] += 1
# 1 & 3 move to the other "moves" that beat another
# for example Rock is beaten with Paper and Spock,
# which are 1 & 3 positions away
scores[i][(real[1]-pred[i]+2) % 5] -= 1
scores[i][(real[1]-pred[i]+4) % 5] -= 1
# depending in predicted strategies, select best one with less risks
# return best counter move
best_scores = [list(max(enumerate(s), key=lambda x: x[1])) for s in scores]
best_scores[-1][1] *= 1.001 # bias towards the simplest strategy
if best_scores[-1][1] < 0.4*len(history):
best_scores[-1][1] *= 1.4
strat, (shift, _) = max(enumerate(best_scores), key=lambda x: x[1][1])
return INTERNAL_MOVES_ENCODING[(pred_hist[-1][strat]+shift) % 5]
def _best_next_moves_for_game(hist: List[str]) -> List[List[str]]:
N = len(hist)
# find longest match of the preceding moves in the earlier history
cand_m = cand_o = cand_b = range(N-1)
for l in range(1, min(N, 20)):
ref = hist[N-l]
# l = 1
# Looks for previous occurrences of the last move in my_moves, since hist[N-l] == hist[-1]
# l = 2
# it checks which of the possible candidates was preceded by the move previous to the last
# and so on... i.e loos for longest chain matching last moves to use the next move
cand_m_tmp = []
for c in cand_m:
if c >= l and hist[c-l+1][0] == ref[0]:
cand_m_tmp.append(c)
if not cand_m_tmp:
cand_m = cand_m[-1:]
else:
cand_m = cand_m_tmp[:]
# same for op_moves
cand_o_tmp = []
for c in cand_o:
if c >= l and hist[c-l+1][1] == ref[1]:
cand_o_tmp.append(c)
if not cand_o_tmp:
cand_o = cand_o[-1:]
else:
cand_o = cand_o_tmp[:]
# same for both_moves i.e directly the zipped tuples
cand_b_tmp = []
for c in cand_b:
if c >= l and hist[c-l+1] == ref:
cand_b_tmp.append(c)
if not cand_b_tmp:
cand_b = cand_b[-1:]
else:
cand_b = cand_b_tmp[:]
# analyze which moves were used how often, i.e a np.bincount
freq_m, freq_o = [0]*5, [0]*5
for m in hist:
freq_m[m[0]] += 1
freq_o[m[1]] += 1
# return predictions (or possible "good" strategies)
last_2_moves = [j for i in hist[:-3:-1] for j in i]
return (last_2_moves + # repeat last moves
[hist[cand_m[-1]+1][0], # history matching of my own moves
# history matching of opponent's moves
hist[cand_o[-1]+1][1],
hist[cand_b[-1]+1][0], # history matching of both
hist[cand_b[-1]+1][1],
freq_m.index(max(freq_m)), # my most frequent move
freq_o.index(max(freq_o)), # opponent's most frequent move
0])
| [
"random.choice",
"os.getenv",
"json.dumps",
"requests.get",
"logging.info"
] | [((505, 540), 'os.getenv', 'os.getenv', (['"""GAME_MANAGER_URI"""', 'None'], {}), "('GAME_MANAGER_URI', None)\n", (514, 540), False, 'import os\n'), ((625, 671), 'logging.info', 'logging.info', (['f"""requesting human moves: {url}"""'], {}), "(f'requesting human moves: {url}')\n", (637, 671), False, 'import logging\n'), ((683, 700), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (695, 700), False, 'import requests\n'), ((1233, 1286), 'json.dumps', 'json.dumps', (["{'prediction': JSON_MOVES_ENCODING[game]}"], {}), "({'prediction': JSON_MOVES_ENCODING[game]})\n", (1243, 1286), False, 'import json\n'), ((2019, 2057), 'random.choice', 'random.choice', (['INTERNAL_MOVES_ENCODING'], {}), '(INTERNAL_MOVES_ENCODING)\n', (2032, 2057), False, 'import random\n')] |
# -*- coding: utf-8 -*-
import io
import collections
from stella.core.utils import RewindableIterator
from stella.core.interpreter.productions import Token
__all__ = ['Tokenizer', 'Lexer']
################################################################################
### Tokenizer
################################################################################
class Tokenizer(object):
def __init__(self, tokens):
self.tokens = tokens
def get_token(self, value):
return next((x for x in self.tokens if x.match(value)), None)
################################################################################
### Lexer
################################################################################
class Lexer(object):
def __init__(self, stream, tokenizer):
iterator = iter(stream)
self.iterator = RewindableIterator(iterator)
self.tokenizer = tokenizer
def __iter__(self):
return RewindableIterator(self)
def __next__(self):
token = None
tmp_value = next(self.iterator)
tmp_token = self.tokenizer.get_token(tmp_value)
token_found = False
while tmp_token or not token_found:
if tmp_token:
token_found = True
value = tmp_value
token = tmp_token
try:
char = self.iterator.peek()
tmp_token = self.tokenizer.get_token(tmp_value + char)
if not token and not tmp_token and self.tokenizer.get_token(char):
token_found = True
value = tmp_value
if tmp_token or not token_found:
tmp_value = tmp_value + char
next(self.iterator)
except StopIteration:
value = tmp_value
token = tmp_token
break
self.iterator.commit()
return Token(token, value)
| [
"stella.core.interpreter.productions.Token",
"stella.core.utils.RewindableIterator"
] | [((853, 881), 'stella.core.utils.RewindableIterator', 'RewindableIterator', (['iterator'], {}), '(iterator)\n', (871, 881), False, 'from stella.core.utils import RewindableIterator\n'), ((957, 981), 'stella.core.utils.RewindableIterator', 'RewindableIterator', (['self'], {}), '(self)\n', (975, 981), False, 'from stella.core.utils import RewindableIterator\n'), ((1932, 1951), 'stella.core.interpreter.productions.Token', 'Token', (['token', 'value'], {}), '(token, value)\n', (1937, 1951), False, 'from stella.core.interpreter.productions import Token\n')] |
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
max_epochs = 6000
init_stddev = 0.0001
source_embedding_size = 2
target_embedding_size = 2
source_state_size = 2
preattention_size = 2
target_state_size = 2
max_seq_len = 10
source_tokens = [
'i like it'.split(' '),
'i hate it'.split(' '),
'i don\'t hate it'.split(' '),
'i don\'t like it'.split(' '),
]
target_tokens = [
'i don\'t like it'.split(' '),
'i don\'t hate it'.split(' '),
'i hate it'.split(' '),
'i like it'.split(' '),
]
source_vocab = [ 'EDGE' ] + sorted({ token for sent in source_tokens for token in sent })
source_token2index = { token: index for (index, token) in enumerate(source_vocab) }
source_index2token = { index: token for (index, token) in enumerate(source_vocab) }
source_max_len = max(len(sent) for sent in source_tokens)
index_source_indexes = []
index_source_lens = []
for sent in source_tokens:
source_lens = len(sent)
source_index = [ source_token2index[token] for token in sent ] + [ 0 for _ in range(source_max_len - source_lens) ]
index_source_lens.append(source_lens)
index_source_indexes.append(source_index)
target_vocab = [ 'EDGE' ] + sorted({ token for sent in target_tokens for token in sent })
target_token2index = { token: index for (index, token) in enumerate(target_vocab) }
target_index2token = { index: token for (index, token) in enumerate(target_vocab) }
target_max_len = max(len(sent) for sent in target_tokens) + 1 #Plus edge token
index_target_prefixes = []
index_target_lens = []
index_target_targets = []
for sent in target_tokens:
target_len = len(sent) + 1 #Plus edge token
target_index = [ target_token2index[token] for token in sent ]
target_prefix = [ target_token2index['EDGE'] ] + target_index + [ 0 for _ in range(target_max_len - target_len) ]
target_target = target_index + [ target_token2index['EDGE'] ] + [ 0 for _ in range(target_max_len - target_len) ]
index_target_prefixes.append(target_prefix)
index_target_lens.append(target_len)
index_target_targets.append(target_target)
g = tf.Graph()
with g.as_default():
source_indexes = tf.placeholder(tf.int32, [None, None], 'source_indexes')
source_lens = tf.placeholder(tf.int32, [None], 'source_lens')
target_prefixes = tf.placeholder(tf.int32, [None, None], 'target_prefixes')
target_lens = tf.placeholder(tf.int32, [None], 'target_lens')
target_targets = tf.placeholder(tf.int32, [None, None], 'target_targets')
batch_size = tf.shape(source_indexes)[0]
source_seq_width = tf.shape(source_indexes)[1]
target_seq_width = tf.shape(target_prefixes)[1]
with tf.variable_scope('source'):
with tf.variable_scope('embedding'):
embedding_matrix = tf.get_variable('embedding_matrix', [len(source_vocab), source_embedding_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
embedded = tf.nn.embedding_lookup(embedding_matrix, source_indexes)
with tf.variable_scope('init_state'):
init_state_fw = tf.get_variable('init_state_fw', [source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init_fw = tf.tile(tf.reshape(init_state_fw, [1, source_state_size]), [batch_size, 1])
init_state_bw = tf.get_variable('init_state_bw', [source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init_bw = tf.tile(tf.reshape(init_state_bw, [1, source_state_size]), [batch_size, 1])
with tf.variable_scope('rnn'):
cell_fw = tf.contrib.rnn.GRUCell(source_state_size)
cell_bw = tf.contrib.rnn.GRUCell(source_state_size)
((outputs_fw, outputs_bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedded, sequence_length=source_lens, initial_state_fw=batch_init_fw, initial_state_bw=batch_init_bw)
outputs_ = tf.concat([ outputs_fw, outputs_bw ], axis=2)
outputs_2d_ = tf.reshape(outputs_, [batch_size*source_seq_width, 2*source_state_size])
W = tf.get_variable('W', [2*source_state_size, source_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
b = tf.get_variable('b', [source_state_size], tf.float32, tf.zeros_initializer())
source_outputs_2d = tf.matmul(outputs_2d_, W) + b
source_outputs = tf.reshape(source_outputs_2d, [batch_size, source_seq_width, source_state_size])
with tf.variable_scope('targets'):
with tf.variable_scope('embedding'):
embedding_matrix = tf.get_variable('embedding_matrix', [len(target_vocab), target_embedding_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
embedded = tf.nn.embedding_lookup(embedding_matrix, target_prefixes)
with tf.variable_scope('init_state'):
init_state = tf.get_variable('init_state', [target_state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init = tf.tile(tf.reshape(init_state, [1, target_state_size]), [batch_size, 1])
with tf.variable_scope('rnn'):
#Custom RNN cell for producing attention vectors that condition the language model via par-inject
class CellAttention(tf.nn.rnn_cell.RNNCell):
def __init__(self):
super(CellAttention, self).__init__()
self.W1 = None
self.b1 = None
self.W2 = None
self.b2 = None
self.inner_cell = tf.contrib.rnn.GRUCell(target_state_size) #The inner RNN cell that actually tranforms the input and previous state into the next state
@property
def state_size(self):
return source_state_size
@property
def output_size(self):
return (source_seq_width, source_state_size) #Return the attention vector apart from the next state (to be able to inspect it later)
def build(self, inputs_shape):
self.W1 = self.add_variable('W1', [source_state_size + target_state_size, preattention_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
self.b1 = tf.get_variable('b1', [preattention_size], tf.float32, tf.zeros_initializer())
self.W2 = self.add_variable('W2', [preattention_size, 1], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
self.b2 = tf.get_variable('b2', [1], tf.float32, tf.zeros_initializer())
self.built = True
def call(self, next_inputs, curr_states):
with tf.variable_scope('attention'):
#Replicate the current state for each source sentence word in order to concatenate it with each source sentence word vector
expanded_curr_state = tf.tile(tf.reshape(curr_states, [batch_size, 1, target_state_size]), [1, source_seq_width, 1])
pre_attention_input = tf.concat([ source_outputs, expanded_curr_state ], axis=2)
pre_attention_input_2d = tf.reshape(pre_attention_input, [batch_size*source_seq_width, source_state_size + target_state_size])
pre_attention_2d = tf.tanh(tf.matmul(pre_attention_input_2d, self.W1) + self.b1)
attention_logits = tf.reshape(tf.matmul(pre_attention_2d, self.W2) + self.b2, [batch_size, source_seq_width])
mask = tf.sequence_mask(source_lens, source_seq_width, tf.float32)
attention = tf.nn.softmax(attention_logits*mask + -1e10*(1 - mask))
expanded_attention = tf.tile(tf.reshape(attention, [batch_size, source_seq_width, 1]), [1, 1, source_state_size])
attended_sources = tf.reduce_sum(source_outputs*expanded_attention, axis=1)
#Pass the input and state to the inner cell to produce the next state (input consists of word embedding and attended source)
(new_output, new_state) = self.inner_cell(tf.concat([ attended_sources, next_inputs ], axis=1), curr_states)
return ((attention, new_state), new_state)
cell = CellAttention()
((attentions, outputs), _) = tf.nn.dynamic_rnn(cell, embedded, sequence_length=target_lens, initial_state=batch_init)
with tf.variable_scope('output'):
W = tf.get_variable('W', [target_state_size, len(target_vocab)], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
b = tf.get_variable('b', [len(target_vocab)], tf.float32, tf.zeros_initializer())
outputs_2d = tf.reshape(outputs, [batch_size*target_seq_width, target_state_size])
logits_2d = tf.matmul(outputs_2d, W) + b
logits = tf.reshape(logits_2d, [batch_size, target_seq_width, len(target_vocab)])
probs = tf.nn.softmax(logits)
next_word_probs = probs[:, -1, :]
mask = tf.sequence_mask(target_lens, target_seq_width, tf.float32)
error = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_targets, logits=logits)*mask)/tf.cast(tf.reduce_sum(target_lens), tf.float32)
step = tf.train.AdamOptimizer().minimize(error)
init = tf.global_variables_initializer()
g.finalize()
with tf.Session() as s:
s.run([ init ], { })
(fig, ax) = plt.subplots(1, 1)
plt.ion()
train_errors = list()
print('epoch', 'train error', sep='\t')
for epoch in range(1, max_epochs+1):
s.run([ step ], { source_indexes: index_source_indexes, source_lens: index_source_lens, target_prefixes: index_target_prefixes, target_lens: index_target_lens, target_targets: index_target_targets })
[ train_error ] = s.run([ error ], { source_indexes: index_source_indexes, source_lens: index_source_lens, target_prefixes: index_target_prefixes, target_lens: index_target_lens, target_targets: index_target_targets })
train_errors.append(train_error)
if epoch%100 == 0:
print(epoch, train_error, sep='\t')
ax.cla()
ax.plot(np.arange(len(train_errors)), train_errors, color='red', linestyle='-', label='train')
ax.set_xlim(0, max_epochs)
ax.set_xlabel('epoch')
ax.set_ylim(0.0, 2.0)
ax.set_ylabel('XE') #Cross entropy
ax.grid(True)
ax.set_title('Error progress')
ax.legend()
fig.tight_layout()
plt.draw()
plt.pause(0.0001)
print()
for sent in source_tokens:
source = [ source_token2index[token] for token in sent ]
prefix_prob = 1.0
index_prefix = [ target_token2index['EDGE'] ]
for _ in range(max_seq_len):
[ curr_probs ] = s.run([ next_word_probs ], { source_indexes: [ source ], source_lens: [ len(source) ], target_prefixes: [ index_prefix ], target_lens: [ len(index_prefix) ] })
selected_index = np.argmax(curr_probs[0, :])
prefix_prob = prefix_prob*curr_probs[0, selected_index]
index_prefix.append(selected_index)
if selected_index == target_token2index['EDGE']:
break
index_generated = index_prefix[1:]
generated = [ target_index2token[i] for i in index_generated ]
[ curr_attentions ] = s.run([ attentions ], { source_indexes: [ source ], source_lens: [ len(source) ], target_prefixes: [ index_generated ], target_lens: [ len(index_generated) ] })
print('Input sentence: ', ' '.join(sent))
print('Generated sentence:', ' '.join(generated))
print('Sentence probability:', prefix_prob)
print('Attention:')
print('', '\t', *sent)
for i in range(len(generated)):
print('', generated[i]+'\t', np.round(curr_attentions[0, i, :], 2))
print()
fig.show() | [
"tensorflow.shape",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.reduce_sum",
"tensorflow.logging.set_verbosity",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.nn.softmax",
"tensorflow.zeros_initializer",
"tensorflow.Graph",
"tens... | [((76, 118), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (100, 118), True, 'import tensorflow as tf\n'), ((2201, 2211), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2209, 2211), True, 'import tensorflow as tf\n'), ((2254, 2310), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]', '"""source_indexes"""'], {}), "(tf.int32, [None, None], 'source_indexes')\n", (2268, 2310), True, 'import tensorflow as tf\n'), ((2329, 2376), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""source_lens"""'], {}), "(tf.int32, [None], 'source_lens')\n", (2343, 2376), True, 'import tensorflow as tf\n'), ((2399, 2456), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]', '"""target_prefixes"""'], {}), "(tf.int32, [None, None], 'target_prefixes')\n", (2413, 2456), True, 'import tensorflow as tf\n'), ((2475, 2522), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""target_lens"""'], {}), "(tf.int32, [None], 'target_lens')\n", (2489, 2522), True, 'import tensorflow as tf\n'), ((2544, 2600), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]', '"""target_targets"""'], {}), "(tf.int32, [None, None], 'target_targets')\n", (2558, 2600), True, 'import tensorflow as tf\n'), ((9404, 9463), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['target_lens', 'target_seq_width', 'tf.float32'], {}), '(target_lens, target_seq_width, tf.float32)\n', (9420, 9463), True, 'import tensorflow as tf\n'), ((9698, 9731), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9729, 9731), True, 'import tensorflow as tf\n'), ((2623, 2647), 'tensorflow.shape', 'tf.shape', (['source_indexes'], {}), '(source_indexes)\n', (2631, 2647), True, 'import tensorflow as tf\n'), ((2674, 2698), 'tensorflow.shape', 'tf.shape', (['source_indexes'], {}), '(source_indexes)\n', (2682, 2698), True, 'import tensorflow as tf\n'), ((2725, 2750), 'tensorflow.shape', 'tf.shape', (['target_prefixes'], {}), '(target_prefixes)\n', (2733, 2750), True, 'import tensorflow as tf\n'), ((2768, 2795), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""source"""'], {}), "('source')\n", (2785, 2795), True, 'import tensorflow as tf\n'), ((4634, 4662), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""targets"""'], {}), "('targets')\n", (4651, 4662), True, 'import tensorflow as tf\n'), ((9760, 9772), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9770, 9772), True, 'import tensorflow as tf\n'), ((9829, 9847), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (9841, 9847), True, 'import matplotlib.pyplot as plt\n'), ((9856, 9865), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (9863, 9865), True, 'import matplotlib.pyplot as plt\n'), ((2810, 2840), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding"""'], {}), "('embedding')\n", (2827, 2840), True, 'import tensorflow as tf\n'), ((3038, 3094), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_matrix', 'source_indexes'], {}), '(embedding_matrix, source_indexes)\n', (3060, 3094), True, 'import tensorflow as tf\n'), ((3117, 3148), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""init_state"""'], {}), "('init_state')\n", (3134, 3148), True, 'import tensorflow as tf\n'), ((3681, 3705), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rnn"""'], {}), "('rnn')\n", (3698, 3705), True, 'import tensorflow as tf\n'), ((3729, 3770), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['source_state_size'], {}), '(source_state_size)\n', (3751, 3770), True, 'import tensorflow as tf\n'), ((3793, 3834), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['source_state_size'], {}), '(source_state_size)\n', (3815, 3834), True, 'import tensorflow as tf\n'), ((3879, 4041), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['cell_fw', 'cell_bw', 'embedded'], {'sequence_length': 'source_lens', 'initial_state_fw': 'batch_init_fw', 'initial_state_bw': 'batch_init_bw'}), '(cell_fw, cell_bw, embedded, sequence_length\n =source_lens, initial_state_fw=batch_init_fw, initial_state_bw=\n batch_init_bw)\n', (3910, 4041), True, 'import tensorflow as tf\n'), ((4055, 4098), 'tensorflow.concat', 'tf.concat', (['[outputs_fw, outputs_bw]'], {'axis': '(2)'}), '([outputs_fw, outputs_bw], axis=2)\n', (4064, 4098), True, 'import tensorflow as tf\n'), ((4127, 4203), 'tensorflow.reshape', 'tf.reshape', (['outputs_', '[batch_size * source_seq_width, 2 * source_state_size]'], {}), '(outputs_, [batch_size * source_seq_width, 2 * source_state_size])\n', (4137, 4203), True, 'import tensorflow as tf\n'), ((4539, 4624), 'tensorflow.reshape', 'tf.reshape', (['source_outputs_2d', '[batch_size, source_seq_width, source_state_size]'], {}), '(source_outputs_2d, [batch_size, source_seq_width, source_state_size]\n )\n', (4549, 4624), True, 'import tensorflow as tf\n'), ((4677, 4707), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding"""'], {}), "('embedding')\n", (4694, 4707), True, 'import tensorflow as tf\n'), ((4905, 4962), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_matrix', 'target_prefixes'], {}), '(embedding_matrix, target_prefixes)\n', (4927, 4962), True, 'import tensorflow as tf\n'), ((4985, 5016), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""init_state"""'], {}), "('init_state')\n", (5002, 5016), True, 'import tensorflow as tf\n'), ((5276, 5300), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rnn"""'], {}), "('rnn')\n", (5293, 5300), True, 'import tensorflow as tf\n'), ((8688, 8780), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'embedded'], {'sequence_length': 'target_lens', 'initial_state': 'batch_init'}), '(cell, embedded, sequence_length=target_lens,\n initial_state=batch_init)\n', (8705, 8780), True, 'import tensorflow as tf\n'), ((8791, 8818), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {}), "('output')\n", (8808, 8818), True, 'import tensorflow as tf\n'), ((9078, 9149), 'tensorflow.reshape', 'tf.reshape', (['outputs', '[batch_size * target_seq_width, target_state_size]'], {}), '(outputs, [batch_size * target_seq_width, target_state_size])\n', (9088, 9149), True, 'import tensorflow as tf\n'), ((9315, 9336), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (9328, 9336), True, 'import tensorflow as tf\n'), ((9589, 9615), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['target_lens'], {}), '(target_lens)\n', (9602, 9615), True, 'import tensorflow as tf\n'), ((9645, 9669), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (9667, 9669), True, 'import tensorflow as tf\n'), ((2965, 3013), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (2993, 3013), True, 'import tensorflow as tf\n'), ((3244, 3292), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (3272, 3292), True, 'import tensorflow as tf\n'), ((3330, 3379), 'tensorflow.reshape', 'tf.reshape', (['init_state_fw', '[1, source_state_size]'], {}), '(init_state_fw, [1, source_state_size])\n', (3340, 3379), True, 'import tensorflow as tf\n'), ((3505, 3553), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (3533, 3553), True, 'import tensorflow as tf\n'), ((3591, 3640), 'tensorflow.reshape', 'tf.reshape', (['init_state_bw', '[1, source_state_size]'], {}), '(init_state_bw, [1, source_state_size])\n', (3601, 3640), True, 'import tensorflow as tf\n'), ((4304, 4352), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (4332, 4352), True, 'import tensorflow as tf\n'), ((4424, 4446), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4444, 4446), True, 'import tensorflow as tf\n'), ((4480, 4505), 'tensorflow.matmul', 'tf.matmul', (['outputs_2d_', 'W'], {}), '(outputs_2d_, W)\n', (4489, 4505), True, 'import tensorflow as tf\n'), ((4832, 4880), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (4860, 4880), True, 'import tensorflow as tf\n'), ((5106, 5154), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (5134, 5154), True, 'import tensorflow as tf\n'), ((5189, 5235), 'tensorflow.reshape', 'tf.reshape', (['init_state', '[1, target_state_size]'], {}), '(init_state, [1, target_state_size])\n', (5199, 5235), True, 'import tensorflow as tf\n'), ((8909, 8957), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (8937, 8957), True, 'import tensorflow as tf\n'), ((9029, 9051), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (9049, 9051), True, 'import tensorflow as tf\n'), ((9172, 9196), 'tensorflow.matmul', 'tf.matmul', (['outputs_2d', 'W'], {}), '(outputs_2d, W)\n', (9181, 9196), True, 'import tensorflow as tf\n'), ((9490, 9578), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'target_targets', 'logits': 'logits'}), '(labels=target_targets,\n logits=logits)\n', (9536, 9578), True, 'import tensorflow as tf\n'), ((11068, 11078), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (11076, 11078), True, 'import matplotlib.pyplot as plt\n'), ((11095, 11112), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (11104, 11112), True, 'import matplotlib.pyplot as plt\n'), ((11603, 11630), 'numpy.argmax', 'np.argmax', (['curr_probs[0, :]'], {}), '(curr_probs[0, :])\n', (11612, 11630), True, 'import numpy as np\n'), ((5771, 5812), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['target_state_size'], {}), '(target_state_size)\n', (5793, 5812), True, 'import tensorflow as tf\n'), ((12542, 12579), 'numpy.round', 'np.round', (['curr_attentions[0, i, :]', '(2)'], {}), '(curr_attentions[0, i, :], 2)\n', (12550, 12579), True, 'import numpy as np\n'), ((6409, 6457), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (6437, 6457), True, 'import tensorflow as tf\n'), ((6544, 6566), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (6564, 6566), True, 'import tensorflow as tf\n'), ((6658, 6706), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (6686, 6706), True, 'import tensorflow as tf\n'), ((6777, 6799), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (6797, 6799), True, 'import tensorflow as tf\n'), ((6943, 6973), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention"""'], {}), "('attention')\n", (6960, 6973), True, 'import tensorflow as tf\n'), ((7335, 7391), 'tensorflow.concat', 'tf.concat', (['[source_outputs, expanded_curr_state]'], {'axis': '(2)'}), '([source_outputs, expanded_curr_state], axis=2)\n', (7344, 7391), True, 'import tensorflow as tf\n'), ((7443, 7551), 'tensorflow.reshape', 'tf.reshape', (['pre_attention_input', '[batch_size * source_seq_width, source_state_size + target_state_size]'], {}), '(pre_attention_input, [batch_size * source_seq_width, \n source_state_size + target_state_size])\n', (7453, 7551), True, 'import tensorflow as tf\n'), ((7842, 7901), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['source_lens', 'source_seq_width', 'tf.float32'], {}), '(source_lens, source_seq_width, tf.float32)\n', (7858, 7901), True, 'import tensorflow as tf\n'), ((7938, 8006), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(attention_logits * mask + -10000000000.0 * (1 - mask))'], {}), '(attention_logits * mask + -10000000000.0 * (1 - mask))\n', (7951, 8006), True, 'import tensorflow as tf\n'), ((8176, 8234), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(source_outputs * expanded_attention)'], {'axis': '(1)'}), '(source_outputs * expanded_attention, axis=1)\n', (8189, 8234), True, 'import tensorflow as tf\n'), ((8461, 8511), 'tensorflow.concat', 'tf.concat', (['[attended_sources, next_inputs]'], {'axis': '(1)'}), '([attended_sources, next_inputs], axis=1)\n', (8470, 8511), True, 'import tensorflow as tf\n'), ((7177, 7236), 'tensorflow.reshape', 'tf.reshape', (['curr_states', '[batch_size, 1, target_state_size]'], {}), '(curr_states, [batch_size, 1, target_state_size])\n', (7187, 7236), True, 'import tensorflow as tf\n'), ((8048, 8104), 'tensorflow.reshape', 'tf.reshape', (['attention', '[batch_size, source_seq_width, 1]'], {}), '(attention, [batch_size, source_seq_width, 1])\n', (8058, 8104), True, 'import tensorflow as tf\n'), ((7621, 7663), 'tensorflow.matmul', 'tf.matmul', (['pre_attention_input_2d', 'self.W1'], {}), '(pre_attention_input_2d, self.W1)\n', (7630, 7663), True, 'import tensorflow as tf\n'), ((7730, 7766), 'tensorflow.matmul', 'tf.matmul', (['pre_attention_2d', 'self.W2'], {}), '(pre_attention_2d, self.W2)\n', (7739, 7766), True, 'import tensorflow as tf\n')] |
import codecs
def encode_macaroon(macaroon):
encoded_macaroon = codecs.encode(macaroon, 'hex')
return encoded_macaroon
def read_file(file_path):
opened_file = open(file_path, 'rb').read()
return opened_file | [
"codecs.encode"
] | [((69, 99), 'codecs.encode', 'codecs.encode', (['macaroon', '"""hex"""'], {}), "(macaroon, 'hex')\n", (82, 99), False, 'import codecs\n')] |
import numpy as np
from ctapipe.core import Component
from ctapipe.containers import MuonRingContainer
from .fitting import kundu_chaudhuri_circle_fit, taubin_circle_fit
import traitlets as traits
# the fit methods do not expose the same interface, so we
# force the same interface onto them, here.
# we also modify their names slightly, since the names are
# exposed to the user via the string traitlet `fit_method`
def kundu_chaudhuri(x, y, weights, mask):
"""kundu_chaudhuri_circle_fit with x, y, weights, mask interface"""
return kundu_chaudhuri_circle_fit(x[mask], y[mask], weights[mask])
def taubin(x, y, weights, mask):
"""taubin_circle_fit with x, y, weights, mask interface"""
return taubin_circle_fit(x, y, mask)
FIT_METHOD_BY_NAME = {m.__name__: m for m in [kundu_chaudhuri, taubin]}
__all__ = ["MuonRingFitter"]
class MuonRingFitter(Component):
"""Different ring fit algorithms for muon rings"""
fit_method = traits.CaselessStrEnum(
list(FIT_METHOD_BY_NAME.keys()),
default_value=list(FIT_METHOD_BY_NAME.keys())[0],
).tag(config=True)
def __call__(self, x, y, img, mask):
"""allows any fit to be called in form of
MuonRingFitter(fit_method = "name of the fit")
"""
fit_function = FIT_METHOD_BY_NAME[self.fit_method]
radius, center_x, center_y = fit_function(x, y, img, mask)
return MuonRingContainer(
center_x=center_x,
center_y=center_y,
radius=radius,
center_phi=np.arctan2(center_y, center_x),
center_distance=np.sqrt(center_x ** 2 + center_y ** 2),
)
| [
"numpy.sqrt",
"numpy.arctan2"
] | [((1539, 1569), 'numpy.arctan2', 'np.arctan2', (['center_y', 'center_x'], {}), '(center_y, center_x)\n', (1549, 1569), True, 'import numpy as np\n'), ((1599, 1637), 'numpy.sqrt', 'np.sqrt', (['(center_x ** 2 + center_y ** 2)'], {}), '(center_x ** 2 + center_y ** 2)\n', (1606, 1637), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*-coding: utf-8 -*-
__author__ = 'dracarysX'
from django.db import models
class Publisher(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
class Meta:
db_table = 'Publisher'
def __str__(self):
return 'Publisher: {}'.format(self.name)
class School(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
class Meta:
db_table = 'School'
def __str__(self):
return 'School: {}'.format(self.name)
class Author(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
age = models.IntegerField()
school = models.ForeignKey(School)
class Meta:
db_table = 'Author'
def __str__(self):
return 'Author: {}'.format(self.name)
class Book(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50)
author = models.ForeignKey(Author)
publisher = models.ForeignKey(Publisher)
class Meta:
db_table = 'Book'
def __str__(self):
return 'Book: {}'.format(self.name)
| [
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((143, 177), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (159, 177), False, 'from django.db import models\n'), ((189, 221), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (205, 221), False, 'from django.db import models\n'), ((382, 416), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (398, 416), False, 'from django.db import models\n'), ((428, 460), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (444, 460), False, 'from django.db import models\n'), ((615, 649), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (631, 649), False, 'from django.db import models\n'), ((661, 692), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (677, 692), False, 'from django.db import models\n'), ((703, 724), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (722, 724), False, 'from django.db import models\n'), ((738, 763), 'django.db.models.ForeignKey', 'models.ForeignKey', (['School'], {}), '(School)\n', (755, 763), False, 'from django.db import models\n'), ((916, 950), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (932, 950), False, 'from django.db import models\n'), ((962, 993), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (978, 993), False, 'from django.db import models\n'), ((1007, 1032), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Author'], {}), '(Author)\n', (1024, 1032), False, 'from django.db import models\n'), ((1049, 1077), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Publisher'], {}), '(Publisher)\n', (1066, 1077), False, 'from django.db import models\n')] |
from django.conf import settings
from django.conf.urls import *
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from ajax_select import urls as ajax_select_urls
from .views import (
HomeView, CustomSearchView, autocomplete, ErrorView, BibliographieView,
RssFeed, GlobalSitemap,
)
admin.autodiscover()
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^', include('libretto.urls')),
url(r'^examens/', include('examens.urls')),
url(r'^presentation$',
TemplateView.as_view(template_name='pages/presentation.html'),
name='presentation'),
url(r'^contribuer$',
TemplateView.as_view(template_name='pages/contribute.html'),
name='contribuer'),
url(r'^bibliographie$', BibliographieView.as_view(), name='bibliographie'),
url(r'^', include('accounts.urls')),
url(r'^dossiers/', include('dossiers.urls')),
url(r'^admin/lookups/', include(ajax_select_urls)),
url(r'^admin/', admin.site.urls),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^recherche/', CustomSearchView(), name='haystack_search'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^autocomplete$', autocomplete, name='autocomplete'),
url(r'^rss\.xml$', RssFeed(), name='rss_feed'),
url(r'^sitemap.xml$', cache_page(24*60*60)(sitemap),
{'sitemaps': {'global': GlobalSitemap}},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^404$', ErrorView.as_view(status=404)),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^403$', ErrorView.as_view(status=403)),
url(r'^500$', ErrorView.as_view(status=500)),
url(r'^503$', ErrorView.as_view(status=503)),
]
| [
"django.views.generic.TemplateView.as_view",
"django.conf.urls.static.static",
"django.views.decorators.cache.cache_page",
"django.contrib.admin.autodiscover"
] | [((467, 487), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (485, 487), False, 'from django.contrib import admin\n'), ((1863, 1926), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (1869, 1926), False, 'from django.conf.urls.static import static\n'), ((1972, 2033), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1978, 2033), False, 'from django.conf.urls.static import static\n'), ((678, 739), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""pages/presentation.html"""'}), "(template_name='pages/presentation.html')\n", (698, 739), False, 'from django.views.generic import TemplateView\n'), ((804, 863), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""pages/contribute.html"""'}), "(template_name='pages/contribute.html')\n", (824, 863), False, 'from django.views.generic import TemplateView\n'), ((1637, 1661), 'django.views.decorators.cache.cache_page', 'cache_page', (['(24 * 60 * 60)'], {}), '(24 * 60 * 60)\n', (1647, 1661), False, 'from django.views.decorators.cache import cache_page\n')] |
'''
This script makes an image very similar to Figure 2 of Hutchison et al. 2019 (https://arxiv.org/pdf/1905.08812.pdf). Undoubtedly, there are likely simpler ways to make this figure -- this is how I chose to code it up.
Because the figure in the paper uses some proprietary data, the code below will generate fake data to be plotted.
Credit: <NAME>
<EMAIL>
Texas A&M University
'''
_author_ = '<NAME>'
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import matplotlib.gridspec as gridspec
from matplotlib.patches import Polygon
import matplotlib.patheffects as PathEffects
from mpl_toolkits.axes_grid.inset_locator import inset_axes
from matplotlib.lines import Line2D
from matplotlib import patches
# -- Generating fake data -- #
# -------------------------- #
np.random.seed(seed=3) # fixing the random seed so we can get the same result
gauss2d = np.loadtxt('gaussian2D_sig2_kernel7.txt') # fake 1D emission line
gauss1d = np.loadtxt('gaussian1D_sig2_kernel7.txt') # fake 2D emission line
# 1D & 2D gaussian pulled from here (because it's faster for this exercise):
# http://dev.theomader.com/gaussian-kernel-calculator/
noise1d = np.random.uniform(-1,1,250) # noise for 1D spectrum
noise2d = np.random.uniform(-1,1,(250,70)) # noise for 2D spectrum
shape = noise2d.shape
xcen, ycen = int(shape[0]/2), int(shape[1]/2)
galspec2d_line1 = noise2d.copy()
galspec2d_line1[xcen-3:xcen+4,ycen-3:ycen+4] += gauss2d * 35 # 2D emission line
galspec1d_line1 = noise1d.copy()
galspec1d_line1[xcen-3:xcen+4] += gauss1d * 15 # Lya 1D emission line
galspec2d_line2 = galspec2d_line1.copy()
galspec2d_line2[xcen+17:xcen+24,ycen-3:ycen+4] += gauss2d * 35 # 2D emission line
galspec1d_line2 = galspec1d_line1.copy()
galspec1d_line2[xcen+17:xcen+24] += gauss1d * 10 # CIII] 1D doublet emission line
noisegal = np.random.uniform(-1,1,(50,35)) # noise for photometry of 'galaxy'
galaxy = noisegal.copy()
galaxy[22:29,13:20] += gauss2d * 25 # add signal for galaxy shape
galaxy[24:31,16:23] += gauss2d * 25 # add signal for galaxy shape
wavelength = np.arange(len(galspec1d_line1)) # fake wavelength range
# fake errors
np.random.seed(seed=13) # fixing the random seed so we can get the same result
error1d = np.random.random(len(noise1d)) + 0.4
# ---------------------------#
# -- Initializing the image -- #
# ---------------------------- #
f = plt.figure(figsize=(10.5,9))
gs0 = gridspec.GridSpec(2,1,height_ratios=[1,0.9],hspace=0.1) # the main subplots
# ------------- #
# -- TOP ROW -- #
# ------------- #
gs01 = gridspec.GridSpecFromSubplotSpec(1,2,subplot_spec=gs0[0], # the top panel's subplots
width_ratios=[1.2,2],wspace=0.22)
# --> RIGHT SIDE: the Lya spectrum
line = 'lya'
band = 'Y'
# The subplot gs001 is made up of 3 subplots where the top and bottom are just used to
# center the middle one more accurately -- they aren't necessary if you don't care THAT much :)
gs001 = gridspec.GridSpecFromSubplotSpec(3,1,subplot_spec=gs01[1],
height_ratios=[0.05,1,0.12],hspace=0.0)
# This is the real subplot for the data (the middle one from gs001), split into 2 subplots
# so that we can have the 2D spectrum on top and the 1D on the bottom
gs011 = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs001[1],
height_ratios=[1.25,2],hspace=0.0)
# 2D spectrum
ax01 = plt.Subplot(f, gs011[0])
ax01.imshow(galspec2d_line1[75:175,28:42].T, # zooming in for the sake of the example
aspect='auto',origin='lower',cmap='gray',clim=(-1.5,2.3))
# removing the tickmarks and labels for the 2D spectrum
ax01.xaxis.set_ticks_position('none')
ax01.yaxis.set_ticks_position('none')
ax01.set_yticklabels([])
ax01.set_xticklabels([])
# white text with black outline
txt = ax01.text(0.023,0.73,'%s-band'%(band), size=20.5, color='w',transform=ax01.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
f.add_subplot(ax01) # adds the subplot to the image
# 1D spectrum
ax02 = plt.Subplot(f, gs011[1])
ax02.step(wavelength,galspec1d_line1,where='mid',lw=2.3)
ax02.fill_between(wavelength,error1d,error1d*-1,alpha=0.2)
ax02.set_xlim(wavelength[74],wavelength[174])
ax02.set_ylabel(r'F$_{\lambda}$ [10$^{-18}$ erg/s/cm$^2$/$\AA$]',fontsize=16)
ax02.set_xlabel('observed wavelength [microns]',labelpad=5,fontsize=16)
f.add_subplot(ax02) # adds the subplot to the image
# --> LEFT SIDE: F160W STAMP
gs002 = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec=gs01[0])
ax002 = plt.Subplot(f, gs002[0]) # no need to add extra tiny subplots for padding here!
ax002.imshow(galaxy,aspect='auto',origin='upper',cmap='gray',clim=(-1,2))
# removing the tickmarks and labels for the 2D spectrum
ax002.xaxis.set_ticks_position('none')
ax002.yaxis.set_ticks_position('none')
ax002.set_yticklabels([])
ax002.set_xticklabels([])
# white text with black outline
txt = ax002.text(0.03,0.90,'F160W',ha='left',size=22.5, color='w',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
# adding years for the slit layouts, using the set_path_effects to "bold" the text
txt = ax002.text(0.04,0.13,'2016',size=19.5, color='#CF6060',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#CF6060')])
txt = ax002.text(0.04,0.22,'2014',size=19.5, color='#F4D03F',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#F4D03F')])
txt = ax002.text(0.04,0.04,'2017',size=19.5, color='#70B5E3',transform=ax002.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=1.18, foreground='#70B5E3')])
# plotting slits over the regions in the image
# loc: 2, 3, 4, 1
ax002.add_patch(Polygon([[7,7],[22,45],[25.5,43],[11,5]], # 2016 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#CF6060'))
ax002.add_patch(Polygon([[15,5],[15,45],[20,45],[20,5]], # 2014 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#F4D03F'))
ax002.add_patch(Polygon([[5,23],[5,28],[28,28],[28,23]], # 2017 slit
zorder=3,facecolor='none',lw=1.8,edgecolor='#70B5E3'))
f.add_subplot(ax002) # adds the subplot to the figure
# ------------------------------------------------------------------------- #
# ---------------- #
# -- BOTTOM ROW -- #
# ---------------- #
# --> the CIII] spectrum
line = 'ciii'
band = 'H'
# similar padding process done as with the Lya spectrum (where only the middle one matters)
gs02 = gridspec.GridSpecFromSubplotSpec(1,3,subplot_spec=gs0[1],width_ratios=[0.28,2,0.13],wspace=0.0)
# splitting the middle subplot from above into two, so that we can have 2D on top and 1D on bottom
gs003 = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs02[1],height_ratios=[1.75,2],hspace=0.0)
# 2D spectrum
ax21 = plt.Subplot(f, gs003[0])
ax21.imshow(galspec2d_line2[:,15:55].T,aspect='auto',origin='lower',cmap='gray',clim=(-1.5,2.2))
# removing the tickmarks and labels for the 2D spectrum
ax21.xaxis.set_ticks_position('none')
ax21.yaxis.set_ticks_position('none')
ax21.set_yticklabels([])
ax21.set_xticklabels([])
# white text with black outline
txt = ax21.text(0.02,0.75,'%s-band'%(band), size=16+8.5, color='w',transform=ax21.transAxes)
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='k')])
f.add_subplot(ax21) # adds subplot to the figure
# 1D spectrum
ax22 = plt.Subplot(f, gs003[1])
ax22.step(wavelength,galspec1d_line2,where='mid',lw=2.7)
ax22.fill_between(wavelength,error1d,error1d*-1,alpha=0.2)
ax22.set_xlim(wavelength[0],wavelength[-1])
ax22.set_ylabel(r'F$_{\lambda}$ [10$^{-19}$ erg/s/cm$^{2}$/$\AA$]',fontsize=16)
ax22.set_xlabel('observed wavelength [microns]',fontsize=16)
f.add_subplot(ax22) # adds subplot to the figure
# saving figure
plt.savefig('figure.pdf')
#plt.show()
plt.close('all')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.Subplot",
"matplotlib.pyplot.close",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"numpy.random.uniform",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"numpy.loadtxt",
"matplotlib.patheffects.withStroke",
"matp... | [((808, 830), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(3)'}), '(seed=3)\n', (822, 830), True, 'import numpy as np\n'), ((897, 938), 'numpy.loadtxt', 'np.loadtxt', (['"""gaussian2D_sig2_kernel7.txt"""'], {}), "('gaussian2D_sig2_kernel7.txt')\n", (907, 938), True, 'import numpy as np\n'), ((973, 1014), 'numpy.loadtxt', 'np.loadtxt', (['"""gaussian1D_sig2_kernel7.txt"""'], {}), "('gaussian1D_sig2_kernel7.txt')\n", (983, 1014), True, 'import numpy as np\n'), ((1182, 1211), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(250)'], {}), '(-1, 1, 250)\n', (1199, 1211), True, 'import numpy as np\n'), ((1244, 1279), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(250, 70)'], {}), '(-1, 1, (250, 70))\n', (1261, 1279), True, 'import numpy as np\n'), ((1845, 1879), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(50, 35)'], {}), '(-1, 1, (50, 35))\n', (1862, 1879), True, 'import numpy as np\n'), ((2154, 2177), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(13)'}), '(seed=13)\n', (2168, 2177), True, 'import numpy as np\n'), ((2384, 2413), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10.5, 9)'}), '(figsize=(10.5, 9))\n', (2394, 2413), True, 'import matplotlib.pyplot as plt\n'), ((2419, 2478), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[1, 0.9]', 'hspace': '(0.1)'}), '(2, 1, height_ratios=[1, 0.9], hspace=0.1)\n', (2436, 2478), True, 'import matplotlib.gridspec as gridspec\n'), ((2559, 2659), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(2)'], {'subplot_spec': 'gs0[0]', 'width_ratios': '[1.2, 2]', 'wspace': '(0.22)'}), '(1, 2, subplot_spec=gs0[0], width_ratios=[\n 1.2, 2], wspace=0.22)\n', (2591, 2659), True, 'import matplotlib.gridspec as gridspec\n'), ((2938, 3046), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(3)', '(1)'], {'subplot_spec': 'gs01[1]', 'height_ratios': '[0.05, 1, 0.12]', 'hspace': '(0.0)'}), '(3, 1, subplot_spec=gs01[1], height_ratios=\n [0.05, 1, 0.12], hspace=0.0)\n', (2970, 3046), True, 'import matplotlib.gridspec as gridspec\n'), ((3213, 3316), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(1)'], {'subplot_spec': 'gs001[1]', 'height_ratios': '[1.25, 2]', 'hspace': '(0.0)'}), '(2, 1, subplot_spec=gs001[1], height_ratios\n =[1.25, 2], hspace=0.0)\n', (3245, 3316), True, 'import matplotlib.gridspec as gridspec\n'), ((3338, 3362), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs011[0]'], {}), '(f, gs011[0])\n', (3349, 3362), True, 'import matplotlib.pyplot as plt\n'), ((3977, 4001), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs011[1]'], {}), '(f, gs011[1])\n', (3988, 4001), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4466), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(1)'], {'subplot_spec': 'gs01[0]'}), '(1, 1, subplot_spec=gs01[0])\n', (4438, 4466), True, 'import matplotlib.gridspec as gridspec\n'), ((4473, 4497), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs002[0]'], {}), '(f, gs002[0])\n', (4484, 4497), True, 'import matplotlib.pyplot as plt\n'), ((6469, 6575), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(3)'], {'subplot_spec': 'gs0[1]', 'width_ratios': '[0.28, 2, 0.13]', 'wspace': '(0.0)'}), '(1, 3, subplot_spec=gs0[1], width_ratios=[\n 0.28, 2, 0.13], wspace=0.0)\n', (6501, 6575), True, 'import matplotlib.gridspec as gridspec\n'), ((6673, 6775), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(1)'], {'subplot_spec': 'gs02[1]', 'height_ratios': '[1.75, 2]', 'hspace': '(0.0)'}), '(2, 1, subplot_spec=gs02[1], height_ratios=\n [1.75, 2], hspace=0.0)\n', (6705, 6775), True, 'import matplotlib.gridspec as gridspec\n'), ((6788, 6812), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs003[0]'], {}), '(f, gs003[0])\n', (6799, 6812), True, 'import matplotlib.pyplot as plt\n'), ((7367, 7391), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['f', 'gs003[1]'], {}), '(f, gs003[1])\n', (7378, 7391), True, 'import matplotlib.pyplot as plt\n'), ((7760, 7785), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figure.pdf"""'], {}), "('figure.pdf')\n", (7771, 7785), True, 'import matplotlib.pyplot as plt\n'), ((7798, 7814), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7807, 7814), True, 'import matplotlib.pyplot as plt\n'), ((5745, 5854), 'matplotlib.patches.Polygon', 'Polygon', (['[[7, 7], [22, 45], [25.5, 43], [11, 5]]'], {'zorder': '(3)', 'facecolor': '"""none"""', 'lw': '(1.8)', 'edgecolor': '"""#CF6060"""'}), "([[7, 7], [22, 45], [25.5, 43], [11, 5]], zorder=3, facecolor='none',\n lw=1.8, edgecolor='#CF6060')\n", (5752, 5854), False, 'from matplotlib.patches import Polygon\n'), ((5876, 5984), 'matplotlib.patches.Polygon', 'Polygon', (['[[15, 5], [15, 45], [20, 45], [20, 5]]'], {'zorder': '(3)', 'facecolor': '"""none"""', 'lw': '(1.8)', 'edgecolor': '"""#F4D03F"""'}), "([[15, 5], [15, 45], [20, 45], [20, 5]], zorder=3, facecolor='none',\n lw=1.8, edgecolor='#F4D03F')\n", (5883, 5984), False, 'from matplotlib.patches import Polygon\n'), ((6006, 6114), 'matplotlib.patches.Polygon', 'Polygon', (['[[5, 23], [5, 28], [28, 28], [28, 23]]'], {'zorder': '(3)', 'facecolor': '"""none"""', 'lw': '(1.8)', 'edgecolor': '"""#70B5E3"""'}), "([[5, 23], [5, 28], [28, 28], [28, 23]], zorder=3, facecolor='none',\n lw=1.8, edgecolor='#70B5E3')\n", (6013, 6114), False, 'from matplotlib.patches import Polygon\n'), ((3849, 3900), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""k"""'}), "(linewidth=3, foreground='k')\n", (3871, 3900), True, 'import matplotlib.patheffects as PathEffects\n'), ((4962, 5013), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""k"""'}), "(linewidth=3, foreground='k')\n", (4984, 5013), True, 'import matplotlib.patheffects as PathEffects\n'), ((5210, 5270), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(1.18)', 'foreground': '"""#CF6060"""'}), "(linewidth=1.18, foreground='#CF6060')\n", (5232, 5270), True, 'import matplotlib.patheffects as PathEffects\n'), ((5383, 5443), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(1.18)', 'foreground': '"""#F4D03F"""'}), "(linewidth=1.18, foreground='#F4D03F')\n", (5405, 5443), True, 'import matplotlib.patheffects as PathEffects\n'), ((5556, 5616), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(1.18)', 'foreground': '"""#70B5E3"""'}), "(linewidth=1.18, foreground='#70B5E3')\n", (5578, 5616), True, 'import matplotlib.patheffects as PathEffects\n'), ((7241, 7292), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""k"""'}), "(linewidth=3, foreground='k')\n", (7263, 7292), True, 'import matplotlib.patheffects as PathEffects\n')] |
import sys
import os
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: {} <conf-list> <conf-dir> [white-list-files]".format(sys.argv[0]))
sys.exit(-1)
conf_list_file = sys.argv[1]
conf_dir = sys.argv[2]
conf_list = {}
white_list_files = sys.argv[3:]
ele_white_list = set()
for fn in white_list_files:
with open(fn, 'r') as f:
lines = f.readlines()
for l in lines:
ele_white_list.add(l.rstrip())
with open(conf_list_file, 'r') as f:
lines = f.readlines()
for l in lines:
fn = os.path.join(conf_dir, l.rstrip())
with open(fn, 'r') as conf_f:
elements = conf_f.readlines()
conf_list[l] = list(map(lambda s: s.rstrip(), elements))
offensive = {}
supported = []
for conf, eles in conf_list.items():
can_not_run = False
for e in eles:
if e not in ele_white_list:
can_not_run = True
if e not in offensive:
offensive[e] = 0
offensive[e] += 1
if not can_not_run:
supported.append(conf)
ratio = float(len(supported)) / float(len(conf_list.keys())) * 100.0
sorted_eles = sorted(offensive.items(), key = lambda x : x[1])
print("Support {} / {} ({}%) Confs".format(len(supported), len(conf_list.keys()), ratio))
for e in sorted_eles[::-1]:
print(e[0], e[1])
| [
"sys.exit"
] | [((173, 185), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (181, 185), False, 'import sys\n')] |
import torch
from torch.autograd import Variable
from util.util import *
from util.data_util import *
import numpy as np
from PIL import Image
from data.base_dataset import get_transform_params, get_raw_transform_fn, \
get_transform_fn, get_soft_bbox, get_masked_image
from util.data_util import crop_canvas, paste_canvas
class JointInference():
def __init__(self, joint_opt):
###########################
# Argument Parsing
###########################
from options.box2mask_test_options import BoxToMaskTestOptions as MaskGenTestOption
from options.mask2image_test_options import MaskToImageTestOptions as ImgGenTestOption
#print('++++++++++++++++++++++++MaskGenTestOption',MaskGenTestOption)
self.opt_maskgen = load_script_to_opt(joint_opt.maskgen_script, MaskGenTestOption)
self.opt_imggen = load_script_to_opt(joint_opt.imggen_script, ImgGenTestOption)
# TODO(sh): make this part less hacky
self.opt_maskgen.gpu_ids = self.opt_imggen.gpu_ids = joint_opt.gpu_ids
###########################
# Model Initialization
###########################
from .models import create_model
self.G_box2mask = create_model(self.opt_maskgen)
self.G_mask2img = create_model(self.opt_imggen)
def sample_bbox(self, bbox_originals, opt, random=False):
candidate_list = []
# sample object based on size
for bbox in bbox_originals:
cls = bbox['cls']
xmin = bbox['bbox'][0]
ymin = bbox['bbox'][1]
xmax = bbox['bbox'][2]
ymax = bbox['bbox'][3]
box_w, box_h = xmax - xmin, ymax - ymin
min_axis = min(box_w, box_h)
max_axis = max(box_w, box_h)
if max_axis < opt.min_box_size:
continue
candidate_list.append(bbox)
if not random and len(candidate_list) > 0:
# Sample from bbox within size limit
return np.random.choice(candidate_list)
else:
# Random sample
return np.random.choice(bbox_originals)
def sample_window(self, img, label, bbox_sampled):
pass
def normalize_input(self, img, label, normalize_image=False):
tnfm_image_raw = get_raw_transform_fn(normalize=normalize_image)
tnfm_label_raw = get_raw_transform_fn(normalize=False)
return tnfm_image_raw(img), tnfm_label_raw(label) * 255.0
def gen_layout(self, bbox_sampled, label_original, opt):
# crop canvas
input_dict = crop_canvas(bbox_sampled, label_original, opt)
# generate layout
with torch.no_grad():
label_generated = self.G_box2mask.evaluate({
'label_map': Variable(input_dict['label']),
'mask_ctx_in': Variable(input_dict['mask_ctx_in']),
'mask_out': Variable(input_dict['mask_out']),
'mask_in': Variable(input_dict['mask_in']),
'cls': Variable(input_dict['cls']),
'label_map_orig': Variable(input_dict['label_orig']),
'mask_ctx_in_orig': Variable(input_dict['mask_ctx_in_orig']),
'mask_out_orig': Variable(input_dict['mask_out_orig'])
}, target_size=(input_dict['label_orig'].size()[2:4]))
# paste canvas
label_canvas = paste_canvas(label_original, label_generated.data, \
input_dict, resize=False)
return label_canvas, input_dict, label_generated.data
def gen_image(self, bbox_sampled, img_original, label_generated, opt):
# crop canvas
input_dict = crop_canvas(bbox_sampled, label_generated, opt, \
img_original=img_original, transform_img=True)
# generate layout
with torch.no_grad():
img_generated = self.G_mask2img.inference(
Variable(input_dict['label']),
Variable(torch.zeros_like(input_dict['label'])),
Variable(input_dict['image']),
Variable(input_dict['mask_in']),
Variable(input_dict['mask_out'])
)
# paste canvas
img_canvas = paste_canvas(img_original, (img_generated.data+1)/2, \
input_dict, method=Image.BICUBIC, is_img=True)
return img_canvas, input_dict, img_generated.data
| [
"util.data_util.crop_canvas",
"numpy.random.choice",
"util.data_util.paste_canvas",
"data.base_dataset.get_raw_transform_fn",
"torch.no_grad",
"torch.zeros_like",
"torch.autograd.Variable"
] | [((2324, 2371), 'data.base_dataset.get_raw_transform_fn', 'get_raw_transform_fn', ([], {'normalize': 'normalize_image'}), '(normalize=normalize_image)\n', (2344, 2371), False, 'from data.base_dataset import get_transform_params, get_raw_transform_fn, get_transform_fn, get_soft_bbox, get_masked_image\n'), ((2397, 2434), 'data.base_dataset.get_raw_transform_fn', 'get_raw_transform_fn', ([], {'normalize': '(False)'}), '(normalize=False)\n', (2417, 2434), False, 'from data.base_dataset import get_transform_params, get_raw_transform_fn, get_transform_fn, get_soft_bbox, get_masked_image\n'), ((2606, 2652), 'util.data_util.crop_canvas', 'crop_canvas', (['bbox_sampled', 'label_original', 'opt'], {}), '(bbox_sampled, label_original, opt)\n', (2617, 2652), False, 'from util.data_util import crop_canvas, paste_canvas\n'), ((3401, 3477), 'util.data_util.paste_canvas', 'paste_canvas', (['label_original', 'label_generated.data', 'input_dict'], {'resize': '(False)'}), '(label_original, label_generated.data, input_dict, resize=False)\n', (3413, 3477), False, 'from util.data_util import crop_canvas, paste_canvas\n'), ((3674, 3772), 'util.data_util.crop_canvas', 'crop_canvas', (['bbox_sampled', 'label_generated', 'opt'], {'img_original': 'img_original', 'transform_img': '(True)'}), '(bbox_sampled, label_generated, opt, img_original=img_original,\n transform_img=True)\n', (3685, 3772), False, 'from util.data_util import crop_canvas, paste_canvas\n'), ((4210, 4318), 'util.data_util.paste_canvas', 'paste_canvas', (['img_original', '((img_generated.data + 1) / 2)', 'input_dict'], {'method': 'Image.BICUBIC', 'is_img': '(True)'}), '(img_original, (img_generated.data + 1) / 2, input_dict, method\n =Image.BICUBIC, is_img=True)\n', (4222, 4318), False, 'from util.data_util import crop_canvas, paste_canvas\n'), ((2036, 2068), 'numpy.random.choice', 'np.random.choice', (['candidate_list'], {}), '(candidate_list)\n', (2052, 2068), True, 'import numpy as np\n'), ((2130, 2162), 'numpy.random.choice', 'np.random.choice', (['bbox_originals'], {}), '(bbox_originals)\n', (2146, 2162), True, 'import numpy as np\n'), ((2693, 2708), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2706, 2708), False, 'import torch\n'), ((3823, 3838), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3836, 3838), False, 'import torch\n'), ((3911, 3940), 'torch.autograd.Variable', 'Variable', (["input_dict['label']"], {}), "(input_dict['label'])\n", (3919, 3940), False, 'from torch.autograd import Variable\n'), ((4023, 4052), 'torch.autograd.Variable', 'Variable', (["input_dict['image']"], {}), "(input_dict['image'])\n", (4031, 4052), False, 'from torch.autograd import Variable\n'), ((4070, 4101), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_in']"], {}), "(input_dict['mask_in'])\n", (4078, 4101), False, 'from torch.autograd import Variable\n'), ((4119, 4151), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_out']"], {}), "(input_dict['mask_out'])\n", (4127, 4151), False, 'from torch.autograd import Variable\n'), ((2796, 2825), 'torch.autograd.Variable', 'Variable', (["input_dict['label']"], {}), "(input_dict['label'])\n", (2804, 2825), False, 'from torch.autograd import Variable\n'), ((2858, 2893), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_ctx_in']"], {}), "(input_dict['mask_ctx_in'])\n", (2866, 2893), False, 'from torch.autograd import Variable\n'), ((2923, 2955), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_out']"], {}), "(input_dict['mask_out'])\n", (2931, 2955), False, 'from torch.autograd import Variable\n'), ((2984, 3015), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_in']"], {}), "(input_dict['mask_in'])\n", (2992, 3015), False, 'from torch.autograd import Variable\n'), ((3040, 3067), 'torch.autograd.Variable', 'Variable', (["input_dict['cls']"], {}), "(input_dict['cls'])\n", (3048, 3067), False, 'from torch.autograd import Variable\n'), ((3103, 3137), 'torch.autograd.Variable', 'Variable', (["input_dict['label_orig']"], {}), "(input_dict['label_orig'])\n", (3111, 3137), False, 'from torch.autograd import Variable\n'), ((3175, 3215), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_ctx_in_orig']"], {}), "(input_dict['mask_ctx_in_orig'])\n", (3183, 3215), False, 'from torch.autograd import Variable\n'), ((3250, 3287), 'torch.autograd.Variable', 'Variable', (["input_dict['mask_out_orig']"], {}), "(input_dict['mask_out_orig'])\n", (3258, 3287), False, 'from torch.autograd import Variable\n'), ((3967, 4004), 'torch.zeros_like', 'torch.zeros_like', (["input_dict['label']"], {}), "(input_dict['label'])\n", (3983, 4004), False, 'import torch\n')] |
from osbot_aws.apis.IAM import IAM
class IAM_Policy:
def __init__(self, policy_name=None, policy_path=None):
self.iam = IAM()
self.policy_name = policy_name
self.version = "2012-10-17"
self.statements = []
self.policy_path = policy_path
self.account_id = self.iam.account_id()
def add_cloud_watch(self, resource_arn):
return self.add_statement_allow(["logs:CreateLogGroup","logs:CreateLogStream","logs:PutLogEvents"], [resource_arn])
def add_statement(self, effect, actions, resources):
self.statements.append({"Effect" : effect ,
"Action" : actions ,
"Resource" : resources})
return self
def add_statement_allow(self, actions, resources):
return self.add_statement('Allow', actions,resources)
def create(self,delete_before_create=False):
if self.policy_name is None:
return {'status':'error', 'data':'policy name is None'}
return self.iam.policy_create(self.policy_name, self.statement(), delete_before_create=delete_before_create)
def delete(self):
return self.iam.policy_delete(self.policy_arn())
def exists(self):
return self.iam.policy_exists(self.policy_arn())
def policy_arn(self):
return self.iam.policy_arn(self.policy_name, self.policy_path, self.account_id)
def statement(self):
return { 'Version' : self.version , 'Statement': self.statements}
def statement_from_aws(self):
return self.iam.policy_statement(self.policy_arn())
| [
"osbot_aws.apis.IAM.IAM"
] | [((142, 147), 'osbot_aws.apis.IAM.IAM', 'IAM', ([], {}), '()\n', (145, 147), False, 'from osbot_aws.apis.IAM import IAM\n')] |
# copyright (c) 2018 Larz60+
from lxml import html
import ScraperPaths
import CIA_ScanTools
import GetPage
import os
import json
import sys
from bs4 import BeautifulSoup
class CIA_History:
def __init__(self):
self.spath = ScraperPaths.ScraperPaths()
self.gp = GetPage.GetPage()
self.getpage = self.gp.get_page
self.get_filename = self.gp.get_filename
self.cst = CIA_ScanTools.CIA_Scan_Tools()
self.fact_links = self.cst.fact_links
url = 'https://www.cia.gov/library/publications/resources/the-world-factbook/docs/history.html'
filename = self.get_filename(url)
self.get_history(url, filename)
self.cst.save_fact_links()
def get_history(self, url, filename):
page = self.getpage(url, filename)
c1 = self.fact_links['History'] = {}
soup = BeautifulSoup(page, 'lxml')
tables = soup.findAll('table')
trs = tables[1].find_all('tr')
for n, tr in enumerate(trs):
if n == 0:
item = tr.find('span', {'class': 'h1'})
title = item.text
c2 = c1[title] = {}
elif n == 1:
allps = tr.find_all('p')
descr = []
for p in allps:
descr.append(p.text)
c2['Description'] = descr
trs = tables[3].find_all('tr')
for n, tr in enumerate(trs):
if n == 0:
title1 = tr.find('span').text
c3 = c2[title1] = {}
elif n == 1:
subtext = tr.find('p').text
c3['subtitle'] = subtext
elif n == 2:
newtable = tr.find('table')
newtrs = newtable.find_all('tr')
for newtr in newtrs:
newtds = newtr.find_all('td')
year = newtds[0].text
year_text = newtds[1].text
c3[year] = year_text
if __name__ == '__main__':
CIA_History()
| [
"bs4.BeautifulSoup",
"CIA_ScanTools.CIA_Scan_Tools",
"GetPage.GetPage",
"ScraperPaths.ScraperPaths"
] | [((237, 264), 'ScraperPaths.ScraperPaths', 'ScraperPaths.ScraperPaths', ([], {}), '()\n', (262, 264), False, 'import ScraperPaths\n'), ((283, 300), 'GetPage.GetPage', 'GetPage.GetPage', ([], {}), '()\n', (298, 300), False, 'import GetPage\n'), ((409, 439), 'CIA_ScanTools.CIA_Scan_Tools', 'CIA_ScanTools.CIA_Scan_Tools', ([], {}), '()\n', (437, 439), False, 'import CIA_ScanTools\n'), ((864, 891), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page', '"""lxml"""'], {}), "(page, 'lxml')\n", (877, 891), False, 'from bs4 import BeautifulSoup\n')] |
from django.conf.urls import patterns
from django.conf import settings
urlpatterns = patterns(
'',
(r'^media/(?P<path>.+)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'^(.*\.html)$', 'thumbnail_tests.views.direct_to_template'),
)
| [
"django.conf.urls.patterns"
] | [((87, 289), 'django.conf.urls.patterns', 'patterns', (['""""""', "('^media/(?P<path>.+)$', 'django.views.static.serve', {'document_root':\n settings.MEDIA_ROOT, 'show_indexes': True})", "('^(.*\\\\.html)$', 'thumbnail_tests.views.direct_to_template')"], {}), "('', ('^media/(?P<path>.+)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT, 'show_indexes': True}), (\n '^(.*\\\\.html)$', 'thumbnail_tests.views.direct_to_template'))\n", (95, 289), False, 'from django.conf.urls import patterns\n')] |
r"""Train an EfficientNet classifier.
Currently implementation of multi-label multi-class classification is
non-functional.
During training, start tensorboard from within the classification/ directory:
tensorboard --logdir run --bind_all --samples_per_plugin scalars=0,images=0
Example usage:
python train_classifier_tf.py run_idfg /ssd/crops_sq \
-m "efficientnet-b0" --pretrained --finetune --label-weighted \
--epochs 50 --batch-size 512 --lr 1e-4 \
--seed 123 \
--logdir run_idfg
"""
from __future__ import annotations
import argparse
from collections import defaultdict
from collections.abc import Callable, Mapping, MutableMapping, Sequence
from datetime import datetime
import json
import os
from typing import Any, Optional
import uuid
import numpy as np
import sklearn.metrics
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import tqdm
from classification.train_utils import (
HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img,
imgs_with_confidences, load_dataset_csv, prefix_all_keys)
from visualization import plot_utils
AUTOTUNE = tf.data.experimental.AUTOTUNE
# match pytorch EfficientNet model names
EFFICIENTNET_MODELS: Mapping[str, Mapping[str, Any]] = {
'efficientnet-b0': dict(cls='EfficientNetB0', img_size=224, dropout=0.2),
'efficientnet-b1': dict(cls='EfficientNetB1', img_size=240, dropout=0.2),
'efficientnet-b2': dict(cls='EfficientNetB2', img_size=260, dropout=0.3),
'efficientnet-b3': dict(cls='EfficientNetB3', img_size=300, dropout=0.3),
'efficientnet-b4': dict(cls='EfficientNetB4', img_size=380, dropout=0.4),
'efficientnet-b5': dict(cls='EfficientNetB5', img_size=456, dropout=0.4),
'efficientnet-b6': dict(cls='EfficientNetB6', img_size=528, dropout=0.5),
'efficientnet-b7': dict(cls='EfficientNetB7', img_size=600, dropout=0.5)
}
def create_dataset(
img_files: Sequence[str],
labels: Sequence[Any],
sample_weights: Optional[Sequence[float]] = None,
img_base_dir: str = '',
transform: Optional[Callable[[tf.Tensor], Any]] = None,
target_transform: Optional[Callable[[Any], Any]] = None,
cache: bool | str = False
) -> tf.data.Dataset:
"""Create a tf.data.Dataset.
The dataset returns elements (img, label, img_file, sample_weight) if
sample_weights is not None, or (img, label, img_file) if
sample_weights=None.
img: tf.Tensor, shape [H, W, 3], type uint8
label: tf.Tensor
img_file: tf.Tensor, scalar, type str
sample_weight: tf.Tensor, scalar, type float32
Possible TODO: oversample the imbalanced classes
see tf.data.experimental.sample_from_datasets
Args:
img_files: list of str, relative paths from img_base_dir
labels: list of int if multilabel=False
sample_weights: optional list of float
img_base_dir: str, base directory for images
transform: optional transform to apply to a single uint8 JPEG image
target_transform: optional transform to apply to a single label
cache: bool or str, cache images in memory if True, cache images to
a file on disk if a str
Returns: tf.data.Dataset
"""
# images dataset
img_ds = tf.data.Dataset.from_tensor_slices(img_files)
img_ds = img_ds.map(lambda p: tf.io.read_file(img_base_dir + os.sep + p),
num_parallel_calls=AUTOTUNE)
# for smaller disk / memory usage, we cache the raw JPEG bytes instead
# of the decoded Tensor
if isinstance(cache, str):
img_ds = img_ds.cache(cache)
elif cache:
img_ds = img_ds.cache()
# convert JPEG bytes to a 3D uint8 Tensor
# keras EfficientNet already includes normalization from [0, 255] to [0, 1],
# so we don't need to do that here
img_ds = img_ds.map(lambda img: tf.io.decode_jpeg(img, channels=3))
if transform:
img_ds = img_ds.map(transform, num_parallel_calls=AUTOTUNE)
# labels dataset
labels_ds = tf.data.Dataset.from_tensor_slices(labels)
if target_transform:
labels_ds = labels_ds.map(target_transform, num_parallel_calls=AUTOTUNE)
# img_files dataset
img_files_ds = tf.data.Dataset.from_tensor_slices(img_files)
if sample_weights is None:
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds))
# weights dataset
weights_ds = tf.data.Dataset.from_tensor_slices(sample_weights)
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds, weights_ds))
def create_dataloaders(
dataset_csv_path: str,
label_index_json_path: str,
splits_json_path: str,
cropped_images_dir: str,
img_size: int,
multilabel: bool,
label_weighted: bool,
weight_by_detection_conf: bool | str,
batch_size: int,
augment_train: bool,
cache_splits: Sequence[str]
) -> tuple[dict[str, tf.data.Dataset], list[str]]:
"""
Args:
dataset_csv_path: str, path to CSV file with columns
['dataset', 'location', 'label'], where label is a comma-delimited
list of labels
splits_json_path: str, path to JSON file
augment_train: bool, whether to shuffle/augment the training set
cache_splits: list of str, splits to cache
training set is cached at /mnt/tempds/random_file_name
validation and test sets are cached in memory
Returns:
datasets: dict, maps split to DataLoader
label_names: list of str, label names in order of label id
"""
df, label_names, split_to_locs = load_dataset_csv(
dataset_csv_path, label_index_json_path, splits_json_path,
multilabel=multilabel, label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf)
# define the transforms
# efficientnet data preprocessing:
# - train:
# 1) random crop: aspect_ratio_range=(0.75, 1.33), area_range=(0.08, 1.0)
# 2) bicubic resize to img_size
# 3) random horizontal flip
# - test:
# 1) center crop
# 2) bicubic resize to img_size
@tf.function
def train_transform(img: tf.Tensor) -> tf.Tensor:
"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
img = tf.image.resize_with_pad(img, img_size, img_size,
method=tf.image.ResizeMethod.BICUBIC)
img = tf.image.random_flip_left_right(img)
img = tf.image.random_brightness(img, max_delta=0.25)
img = tf.image.random_contrast(img, lower=0.75, upper=1.25)
img = tf.image.random_saturation(img, lower=0.75, upper=1.25)
return img
@tf.function
def test_transform(img: tf.Tensor) -> tf.Tensor:
"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
img = tf.image.resize_with_pad(img, img_size, img_size,
method=tf.image.ResizeMethod.BICUBIC)
return img
dataloaders = {}
for split, locs in split_to_locs.items():
is_train = (split == 'train') and augment_train
split_df = df[df['dataset_location'].isin(locs)]
weights = None
if label_weighted or weight_by_detection_conf:
# weights sums to:
# - if weight_by_detection_conf: (# images in split - conf delta)
# - otherwise: (# images in split)
weights = split_df['weights'].tolist()
if not weight_by_detection_conf:
assert np.isclose(sum(weights), len(split_df))
cache: bool | str = (split in cache_splits)
if split == 'train' and 'train' in cache_splits:
unique_filename = str(uuid.uuid4())
os.makedirs('/mnt/tempds/', exist_ok=True)
cache = f'/mnt/tempds/{unique_filename}'
ds = create_dataset(
img_files=split_df['path'].tolist(),
labels=split_df['label_index'].tolist(),
sample_weights=weights,
img_base_dir=cropped_images_dir,
transform=train_transform if is_train else test_transform,
target_transform=None,
cache=cache)
if is_train:
ds = ds.shuffle(1000, reshuffle_each_iteration=True)
ds = ds.batch(batch_size).prefetch(buffer_size=AUTOTUNE)
dataloaders[split] = ds
return dataloaders, label_names
def build_model(model_name: str, num_classes: int, img_size: int,
pretrained: bool, finetune: bool) -> tf.keras.Model:
"""Creates a model with an EfficientNet base."""
class_name = EFFICIENTNET_MODELS[model_name]['cls']
dropout = EFFICIENTNET_MODELS[model_name]['dropout']
model_class = tf.keras.applications.__dict__[class_name]
weights = 'imagenet' if pretrained else None
inputs = tf.keras.layers.Input(shape=(img_size, img_size, 3))
base_model = model_class(
input_tensor=inputs, weights=weights, include_top=False, pooling='avg')
if finetune:
# freeze the base model's weights, including BatchNorm statistics
# https://www.tensorflow.org/guide/keras/transfer_learning#fine-tuning
base_model.trainable = False
# rebuild output
x = tf.keras.layers.Dropout(dropout, name='top_dropout')(base_model.output)
outputs = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1. / 3., mode='fan_out', distribution='uniform'),
name='logits')(x)
model = tf.keras.Model(inputs, outputs, name='complete_model')
model.base_model = base_model # cache this so that we can turn off finetune
return model
def main(dataset_dir: str,
cropped_images_dir: str,
multilabel: bool,
model_name: str,
pretrained: bool,
finetune: int,
label_weighted: bool,
weight_by_detection_conf: bool | str,
epochs: int,
batch_size: int,
lr: float,
weight_decay: float,
seed: Optional[int] = None,
logdir: str = '',
cache_splits: Sequence[str] = ()) -> None:
"""Main function."""
# input validation
assert os.path.exists(dataset_dir)
assert os.path.exists(cropped_images_dir)
if isinstance(weight_by_detection_conf, str):
assert os.path.exists(weight_by_detection_conf)
# set seed
seed = np.random.randint(10_000) if seed is None else seed
np.random.seed(seed)
tf.random.set_seed(seed)
# create logdir and save params
params = dict(locals()) # make a copy
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') # '20200722_110816'
logdir = os.path.join(logdir, timestamp)
os.makedirs(logdir, exist_ok=True)
print('Created logdir:', logdir)
with open(os.path.join(logdir, 'params.json'), 'w') as f:
json.dump(params, f, indent=1)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
img_size = EFFICIENTNET_MODELS[model_name]['img_size']
# create dataloaders and log the index_to_label mapping
loaders, label_names = create_dataloaders(
dataset_csv_path=os.path.join(dataset_dir, 'classification_ds.csv'),
label_index_json_path=os.path.join(dataset_dir, 'label_index.json'),
splits_json_path=os.path.join(dataset_dir, 'splits.json'),
cropped_images_dir=cropped_images_dir,
img_size=img_size,
multilabel=multilabel,
label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf,
batch_size=batch_size,
augment_train=True,
cache_splits=cache_splits)
writer = tf.summary.create_file_writer(logdir)
writer.set_as_default()
model = build_model(
model_name, num_classes=len(label_names), img_size=img_size,
pretrained=pretrained, finetune=finetune > 0)
# define loss function and optimizer
loss_fn: tf.keras.losses.Loss
if multilabel:
loss_fn = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
# using EfficientNet training defaults
# - batch norm momentum: 0.99
# - optimizer: RMSProp, decay 0.9 and momentum 0.9
# - epochs: 350
# - learning rate: 0.256, decays by 0.97 every 2.4 epochs
# - weight decay: 1e-5
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
lr, decay_steps=1, decay_rate=0.97, staircase=True)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate=lr, rho=0.9, momentum=0.9)
best_epoch_metrics: dict[str, float] = {}
for epoch in range(epochs):
print(f'Epoch: {epoch}')
optimizer.learning_rate = lr_schedule(epoch)
tf.summary.scalar('lr', optimizer.learning_rate, epoch)
if epoch > 0 and finetune == epoch:
print('Turning off fine-tune!')
model.base_model.trainable = True
print('- train:')
# TODO: change weighted to False if oversampling minority classes
train_metrics, train_heaps, train_cm = run_epoch(
model, loader=loaders['train'], weighted=label_weighted,
loss_fn=loss_fn, weight_decay=weight_decay, optimizer=optimizer,
finetune=finetune > epoch, return_extreme_images=True)
train_metrics = prefix_all_keys(train_metrics, prefix='train/')
log_run('train', epoch, writer, label_names,
metrics=train_metrics, heaps=train_heaps, cm=train_cm)
print('- val:')
val_metrics, val_heaps, val_cm = run_epoch(
model, loader=loaders['val'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
val_metrics = prefix_all_keys(val_metrics, prefix='val/')
log_run('val', epoch, writer, label_names,
metrics=val_metrics, heaps=val_heaps, cm=val_cm)
if val_metrics['val/acc_top1'] > best_epoch_metrics.get('val/acc_top1', 0): # pylint: disable=line-too-long
filename = os.path.join(logdir, f'ckpt_{epoch}.h5')
print(f'New best model! Saving checkpoint to {filename}')
model.save(filename)
best_epoch_metrics.update(train_metrics)
best_epoch_metrics.update(val_metrics)
best_epoch_metrics['epoch'] = epoch
print('- test:')
test_metrics, test_heaps, test_cm = run_epoch(
model, loader=loaders['test'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
test_metrics = prefix_all_keys(test_metrics, prefix='test/')
log_run('test', epoch, writer, label_names,
metrics=test_metrics, heaps=test_heaps, cm=test_cm)
# stop training after 8 epochs without improvement
if epoch >= best_epoch_metrics['epoch'] + 8:
break
hparams_dict = {
'model_name': model_name,
'multilabel': multilabel,
'finetune': finetune,
'batch_size': batch_size,
'epochs': epochs
}
hp.hparams(hparams_dict)
writer.close()
def log_run(split: str, epoch: int, writer: tf.summary.SummaryWriter,
label_names: Sequence[str], metrics: MutableMapping[str, float],
heaps: Mapping[str, Mapping[int, list[HeapItem]]], cm: np.ndarray
) -> None:
"""Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a
single epoch run to Tensorboard.
Args:
metrics: dict, keys already prefixed with {split}/
"""
per_class_recall = recall_from_confusion_matrix(cm, label_names)
metrics.update(prefix_all_keys(per_class_recall, f'{split}/label_recall/'))
# log metrics
for metric, value in metrics.items():
tf.summary.scalar(metric, value, epoch)
# log confusion matrix
cm_fig = plot_utils.plot_confusion_matrix(cm, classes=label_names,
normalize=True)
cm_fig_img = tf.convert_to_tensor(fig_to_img(cm_fig)[np.newaxis, ...])
tf.summary.image(f'confusion_matrix/{split}', cm_fig_img, step=epoch)
# log tp/fp/fn images
for heap_type, heap_dict in heaps.items():
log_images_with_confidence(heap_dict, label_names, epoch=epoch,
tag=f'{split}/{heap_type}')
writer.flush()
def log_images_with_confidence(
heap_dict: Mapping[int, list[HeapItem]],
label_names: Sequence[str],
epoch: int,
tag: str) -> None:
"""
Args:
heap_dict: dict, maps label_id to list of HeapItem, where each HeapItem
data is a list [img, target, top3_conf, top3_preds, img_file],
and img is a tf.Tensor of shape [H, W, 3]
label_names: list of str, label names in order of label id
epoch: int
tag: str
"""
for label_id, heap in heap_dict.items():
label_name = label_names[label_id]
sorted_heap = sorted(heap, reverse=True) # sort largest to smallest
imgs_list = [item.data for item in sorted_heap]
fig, img_files = imgs_with_confidences(imgs_list, label_names)
# tf.summary.image requires input of shape [N, H, W, C]
fig_img = tf.convert_to_tensor(fig_to_img(fig)[np.newaxis, ...])
tf.summary.image(f'{label_name}/{tag}', fig_img, step=epoch)
tf.summary.text(f'{label_name}/{tag}_files', '\n\n'.join(img_files),
step=epoch)
def track_extreme_examples(tp_heaps: dict[int, list[HeapItem]],
fp_heaps: dict[int, list[HeapItem]],
fn_heaps: dict[int, list[HeapItem]],
inputs: tf.Tensor,
labels: tf.Tensor,
img_files: tf.Tensor,
logits: tf.Tensor) -> None:
"""Updates the 5 most extreme true-positive (tp), false-positive (fp), and
false-negative (fn) examples with examples from this batch.
Each HeapItem's data attribute is a tuple with:
- img: np.ndarray, shape [H, W, 3], type uint8
- label: int
- top3_conf: list of float
- top3_preds: list of float
- img_file: str
Args:
*_heaps: dict, maps label_id (int) to heap of HeapItems
inputs: tf.Tensor, shape [batch_size, H, W, 3], type float32
labels: tf.Tensor, shape [batch_size]
img_files: tf.Tensor, shape [batch_size], type tf.string
logits: tf.Tensor, shape [batch_size, num_classes]
"""
labels = labels.numpy().tolist()
inputs = inputs.numpy().astype(np.uint8)
img_files = img_files.numpy().astype(str).tolist()
batch_probs = tf.nn.softmax(logits, axis=1)
iterable = zip(labels, inputs, img_files, batch_probs)
for label, img, img_file, confs in iterable:
label_conf = confs[label].numpy().item()
top3_conf, top3_preds = tf.math.top_k(confs, k=3, sorted=True)
top3_conf = top3_conf.numpy().tolist()
top3_preds = top3_preds.numpy().tolist()
data = (img, label, top3_conf, top3_preds, img_file)
if top3_preds[0] == label: # true positive
item = HeapItem(priority=label_conf - top3_conf[1], data=data)
add_to_heap(tp_heaps[label], item, k=5)
else:
# false positive for top3_pred[0]
# false negative for label
item = HeapItem(priority=top3_conf[0] - label_conf, data=data)
add_to_heap(fp_heaps[top3_preds[0]], item, k=5)
add_to_heap(fn_heaps[label], item, k=5)
def run_epoch(model: tf.keras.Model,
loader: tf.data.Dataset,
weighted: bool,
top: Sequence[int] = (1, 3),
loss_fn: Optional[tf.keras.losses.Loss] = None,
weight_decay: float = 0,
finetune: bool = False,
optimizer: Optional[tf.keras.optimizers.Optimizer] = None,
return_extreme_images: bool = False
) -> tuple[
dict[str, float],
dict[str, dict[int, list[HeapItem]]],
np.ndarray
]:
"""Runs for 1 epoch.
Args:
model: tf.keras.Model
loader: tf.data.Dataset
weighted: bool, whether to use sample weights in calculating loss and
accuracy
top: tuple of int, list of values of k for calculating top-K accuracy
loss_fn: optional loss function, calculates the mean loss over a batch
weight_decay: float, L2-regularization constant
finetune: bool, if true sets model's dropout and BN layers to eval mode
optimizer: optional optimizer
Returns:
metrics: dict, metrics from epoch, contains keys:
'loss': float, mean per-example loss over entire epoch,
only included if loss_fn is not None
'acc_top{k}': float, accuracy@k over the entire epoch
heaps: dict, keys are ['tp', 'fp', 'fn'], values are heap_dicts,
each heap_dict maps label_id (int) to a heap of <= 5 HeapItems with
data attribute (img, target, top3_conf, top3_preds, img_file)
- 'tp': priority is the difference between target confidence and
2nd highest confidence
- 'fp': priority is the difference between highest confidence and
target confidence
- 'fn': same as 'fp'
confusion_matrix: np.ndarray, shape [num_classes, num_classes],
C[i, j] = # of samples with true label i, predicted as label j
"""
# if evaluating or finetuning, set dropout & BN layers to eval mode
is_train = False
train_dropout_and_bn = False
if optimizer is not None:
assert loss_fn is not None
is_train = True
if not finetune:
train_dropout_and_bn = True
reg_vars = [
v for v in model.trainable_variables if 'kernel' in v.name]
if loss_fn is not None:
losses = tf.keras.metrics.Mean()
accuracies_topk = {
k: tf.keras.metrics.SparseTopKCategoricalAccuracy(k) for k in top
}
# for each label, track 5 most-confident and least-confident examples
tp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fn_heaps: dict[int, list[HeapItem]] = defaultdict(list)
all_labels = []
all_preds = []
tqdm_loader = tqdm.tqdm(loader)
for batch in tqdm_loader:
if weighted:
inputs, labels, img_files, weights = batch
else:
# even if batch contains sample weights, don't use them
inputs, labels, img_files = batch[0:3]
weights = None
all_labels.append(labels.numpy())
desc = []
with tf.GradientTape(watch_accessed_variables=is_train) as tape:
outputs = model(inputs, training=train_dropout_and_bn)
if loss_fn is not None:
loss = loss_fn(labels, outputs)
if weights is not None:
loss *= weights
# we do not track L2-regularization loss in the loss metric
losses.update_state(loss, sample_weight=weights)
desc.append(f'Loss {losses.result().numpy():.4f}')
if optimizer is not None:
loss = tf.math.reduce_mean(loss)
if not finetune: # only regularize layers before the final FC
loss += weight_decay * tf.add_n(
tf.nn.l2_loss(v) for v in reg_vars)
all_preds.append(tf.math.argmax(outputs, axis=1).numpy())
if optimizer is not None:
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for k, acc in accuracies_topk.items():
acc.update_state(labels, outputs, sample_weight=weights)
desc.append(f'Acc@{k} {acc.result().numpy() * 100:.3f}')
tqdm_loader.set_description(' '.join(desc))
if return_extreme_images:
track_extreme_examples(tp_heaps, fp_heaps, fn_heaps, inputs,
labels, img_files, outputs)
confusion_matrix = sklearn.metrics.confusion_matrix(
y_true=np.concatenate(all_labels), y_pred=np.concatenate(all_preds))
metrics = {}
if loss_fn is not None:
metrics['loss'] = losses.result().numpy().item()
for k, acc in accuracies_topk.items():
metrics[f'acc_top{k}'] = acc.result().numpy().item() * 100
heaps = {'tp': tp_heaps, 'fp': fp_heaps, 'fn': fn_heaps}
return metrics, heaps, confusion_matrix
def _parse_args() -> argparse.Namespace:
"""Parses arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Trains classifier.')
parser.add_argument(
'dataset_dir',
help='path to directory containing: 1) classification dataset CSV, '
'2) label index JSON, 3) splits JSON')
parser.add_argument(
'cropped_images_dir',
help='path to local directory where image crops are saved')
parser.add_argument(
'--multilabel', action='store_true',
help='for multi-label, multi-class classification')
parser.add_argument(
'-m', '--model-name', default='efficientnet-b0',
choices=list(EFFICIENTNET_MODELS.keys()),
help='which EfficientNet model')
parser.add_argument(
'--pretrained', action='store_true',
help='start with pretrained model')
parser.add_argument(
'--finetune', type=int, default=0,
help='only fine tune the final fully-connected layer for the first '
'<finetune> epochs')
parser.add_argument(
'--label-weighted', action='store_true',
help='weight training samples to balance labels')
parser.add_argument(
'--weight-by-detection-conf', nargs='?', const=True, default=False,
help='weight training examples by detection confidence. '
'Optionally takes a .npz file for isotonic calibration.')
parser.add_argument(
'--epochs', type=int, default=0,
help='number of epochs for training, 0 for eval-only')
parser.add_argument(
'--batch-size', type=int, default=256,
help='batch size for both training and eval')
parser.add_argument(
'--lr', type=float, default=None,
help='initial learning rate, defaults to (0.016 * batch_size / 256)')
parser.add_argument(
'--weight-decay', type=float, default=1e-5,
help='weight decay')
parser.add_argument(
'--seed', type=int,
help='random seed')
parser.add_argument(
'--logdir', default='.',
help='directory where TensorBoard logs and a params file are saved')
parser.add_argument(
'--cache', nargs='*', choices=['train', 'val', 'test'], default=(),
help='which splits of the dataset to cache')
return parser.parse_args()
if __name__ == '__main__':
args = _parse_args()
if args.lr is None:
args.lr = 0.016 * args.batch_size / 256 # based on TF models repo
main(dataset_dir=args.dataset_dir,
cropped_images_dir=args.cropped_images_dir,
multilabel=args.multilabel,
model_name=args.model_name,
pretrained=args.pretrained,
finetune=args.finetune,
label_weighted=args.label_weighted,
weight_by_detection_conf=args.weight_by_detection_conf,
epochs=args.epochs,
batch_size=args.batch_size,
lr=args.lr,
weight_decay=args.weight_decay,
seed=args.seed,
logdir=args.logdir,
cache_splits=args.cache)
| [
"tensorflow.io.read_file",
"tensorflow.GradientTape",
"tensorflow.nn.softmax",
"tensorflow.image.random_saturation",
"tensorflow.summary.image",
"tensorflow.keras.layers.Input",
"os.path.exists",
"classification.train_utils.imgs_with_confidences",
"argparse.ArgumentParser",
"tensorflow.data.Datase... | [((3300, 3345), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['img_files'], {}), '(img_files)\n', (3334, 3345), True, 'import tensorflow as tf\n'), ((4063, 4105), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['labels'], {}), '(labels)\n', (4097, 4105), True, 'import tensorflow as tf\n'), ((4256, 4301), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['img_files'], {}), '(img_files)\n', (4290, 4301), True, 'import tensorflow as tf\n'), ((4444, 4494), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['sample_weights'], {}), '(sample_weights)\n', (4478, 4494), True, 'import tensorflow as tf\n'), ((4506, 4572), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(img_ds, labels_ds, img_files_ds, weights_ds)'], {}), '((img_ds, labels_ds, img_files_ds, weights_ds))\n', (4525, 4572), True, 'import tensorflow as tf\n'), ((5662, 5850), 'classification.train_utils.load_dataset_csv', 'load_dataset_csv', (['dataset_csv_path', 'label_index_json_path', 'splits_json_path'], {'multilabel': 'multilabel', 'label_weighted': 'label_weighted', 'weight_by_detection_conf': 'weight_by_detection_conf'}), '(dataset_csv_path, label_index_json_path, splits_json_path,\n multilabel=multilabel, label_weighted=label_weighted,\n weight_by_detection_conf=weight_by_detection_conf)\n', (5678, 5850), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((8881, 8933), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(img_size, img_size, 3)'}), '(shape=(img_size, img_size, 3))\n', (8902, 8933), True, 'import tensorflow as tf\n'), ((9584, 9638), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""complete_model"""'}), "(inputs, outputs, name='complete_model')\n", (9598, 9638), True, 'import tensorflow as tf\n'), ((10255, 10282), 'os.path.exists', 'os.path.exists', (['dataset_dir'], {}), '(dataset_dir)\n', (10269, 10282), False, 'import os\n'), ((10294, 10328), 'os.path.exists', 'os.path.exists', (['cropped_images_dir'], {}), '(cropped_images_dir)\n', (10308, 10328), False, 'import os\n'), ((10518, 10538), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10532, 10538), True, 'import numpy as np\n'), ((10543, 10567), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (10561, 10567), True, 'import tensorflow as tf\n'), ((10739, 10770), 'os.path.join', 'os.path.join', (['logdir', 'timestamp'], {}), '(logdir, timestamp)\n', (10751, 10770), False, 'import os\n'), ((10775, 10809), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (10786, 10809), False, 'import os\n'), ((10960, 11011), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (11004, 11011), True, 'import tensorflow as tf\n'), ((11793, 11830), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (11822, 11830), True, 'import tensorflow as tf\n'), ((12636, 12738), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['lr'], {'decay_steps': '(1)', 'decay_rate': '(0.97)', 'staircase': '(True)'}), '(lr, decay_steps=1,\n decay_rate=0.97, staircase=True)\n', (12682, 12738), True, 'import tensorflow as tf\n'), ((12760, 12828), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'learning_rate': 'lr', 'rho': '(0.9)', 'momentum': '(0.9)'}), '(learning_rate=lr, rho=0.9, momentum=0.9)\n', (12787, 12828), True, 'import tensorflow as tf\n'), ((15333, 15357), 'tensorboard.plugins.hparams.api.hparams', 'hp.hparams', (['hparams_dict'], {}), '(hparams_dict)\n', (15343, 15357), True, 'from tensorboard.plugins.hparams import api as hp\n'), ((15841, 15886), 'classification.train_utils.recall_from_confusion_matrix', 'recall_from_confusion_matrix', (['cm', 'label_names'], {}), '(cm, label_names)\n', (15869, 15886), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((16117, 16190), 'visualization.plot_utils.plot_confusion_matrix', 'plot_utils.plot_confusion_matrix', (['cm'], {'classes': 'label_names', 'normalize': '(True)'}), '(cm, classes=label_names, normalize=True)\n', (16149, 16190), False, 'from visualization import plot_utils\n'), ((16316, 16385), 'tensorflow.summary.image', 'tf.summary.image', (['f"""confusion_matrix/{split}"""', 'cm_fig_img'], {'step': 'epoch'}), "(f'confusion_matrix/{split}', cm_fig_img, step=epoch)\n", (16332, 16385), True, 'import tensorflow as tf\n'), ((18945, 18974), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (18958, 18974), True, 'import tensorflow as tf\n'), ((22508, 22525), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22519, 22525), False, 'from collections import defaultdict\n'), ((22568, 22585), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22579, 22585), False, 'from collections import defaultdict\n'), ((22628, 22645), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22639, 22645), False, 'from collections import defaultdict\n'), ((22705, 22722), 'tqdm.tqdm', 'tqdm.tqdm', (['loader'], {}), '(loader)\n', (22714, 22722), False, 'import tqdm\n'), ((25038, 25156), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Trains classifier."""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description='Trains classifier.')\n", (25061, 25156), False, 'import argparse\n'), ((4349, 4403), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(img_ds, labels_ds, img_files_ds)'], {}), '((img_ds, labels_ds, img_files_ds))\n', (4368, 4403), True, 'import tensorflow as tf\n'), ((6343, 6435), 'tensorflow.image.resize_with_pad', 'tf.image.resize_with_pad', (['img', 'img_size', 'img_size'], {'method': 'tf.image.ResizeMethod.BICUBIC'}), '(img, img_size, img_size, method=tf.image.\n ResizeMethod.BICUBIC)\n', (6367, 6435), True, 'import tensorflow as tf\n'), ((6484, 6520), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['img'], {}), '(img)\n', (6515, 6520), True, 'import tensorflow as tf\n'), ((6535, 6582), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['img'], {'max_delta': '(0.25)'}), '(img, max_delta=0.25)\n', (6561, 6582), True, 'import tensorflow as tf\n'), ((6597, 6650), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['img'], {'lower': '(0.75)', 'upper': '(1.25)'}), '(img, lower=0.75, upper=1.25)\n', (6621, 6650), True, 'import tensorflow as tf\n'), ((6665, 6720), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['img'], {'lower': '(0.75)', 'upper': '(1.25)'}), '(img, lower=0.75, upper=1.25)\n', (6691, 6720), True, 'import tensorflow as tf\n'), ((6903, 6995), 'tensorflow.image.resize_with_pad', 'tf.image.resize_with_pad', (['img', 'img_size', 'img_size'], {'method': 'tf.image.ResizeMethod.BICUBIC'}), '(img, img_size, img_size, method=tf.image.\n ResizeMethod.BICUBIC)\n', (6927, 6995), True, 'import tensorflow as tf\n'), ((9282, 9334), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout'], {'name': '"""top_dropout"""'}), "(dropout, name='top_dropout')\n", (9305, 9334), True, 'import tensorflow as tf\n'), ((10394, 10434), 'os.path.exists', 'os.path.exists', (['weight_by_detection_conf'], {}), '(weight_by_detection_conf)\n', (10408, 10434), False, 'import os\n'), ((10462, 10486), 'numpy.random.randint', 'np.random.randint', (['(10000)'], {}), '(10000)\n', (10479, 10486), True, 'import numpy as np\n'), ((10917, 10947), 'json.dump', 'json.dump', (['params', 'f'], {'indent': '(1)'}), '(params, f, indent=1)\n', (10926, 10947), False, 'import json\n'), ((11041, 11092), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (11081, 11092), True, 'import tensorflow as tf\n'), ((12121, 12220), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(from_logits=True, reduction=tf.keras.\n losses.Reduction.NONE)\n', (12155, 12220), True, 'import tensorflow as tf\n'), ((12257, 12367), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), '(from_logits=True, reduction=\n tf.keras.losses.Reduction.NONE)\n', (12302, 12367), True, 'import tensorflow as tf\n'), ((13011, 13066), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""lr"""', 'optimizer.learning_rate', 'epoch'], {}), "('lr', optimizer.learning_rate, epoch)\n", (13028, 13066), True, 'import tensorflow as tf\n'), ((13598, 13645), 'classification.train_utils.prefix_all_keys', 'prefix_all_keys', (['train_metrics'], {'prefix': '"""train/"""'}), "(train_metrics, prefix='train/')\n", (13613, 13645), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((13993, 14036), 'classification.train_utils.prefix_all_keys', 'prefix_all_keys', (['val_metrics'], {'prefix': '"""val/"""'}), "(val_metrics, prefix='val/')\n", (14008, 14036), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((15906, 15965), 'classification.train_utils.prefix_all_keys', 'prefix_all_keys', (['per_class_recall', 'f"""{split}/label_recall/"""'], {}), "(per_class_recall, f'{split}/label_recall/')\n", (15921, 15965), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((16036, 16075), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['metric', 'value', 'epoch'], {}), '(metric, value, epoch)\n', (16053, 16075), True, 'import tensorflow as tf\n'), ((17365, 17410), 'classification.train_utils.imgs_with_confidences', 'imgs_with_confidences', (['imgs_list', 'label_names'], {}), '(imgs_list, label_names)\n', (17386, 17410), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((17557, 17617), 'tensorflow.summary.image', 'tf.summary.image', (['f"""{label_name}/{tag}"""', 'fig_img'], {'step': 'epoch'}), "(f'{label_name}/{tag}', fig_img, step=epoch)\n", (17573, 17617), True, 'import tensorflow as tf\n'), ((19165, 19203), 'tensorflow.math.top_k', 'tf.math.top_k', (['confs'], {'k': '(3)', 'sorted': '(True)'}), '(confs, k=3, sorted=True)\n', (19178, 19203), True, 'import tensorflow as tf\n'), ((22263, 22286), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (22284, 22286), True, 'import tensorflow as tf\n'), ((22322, 22371), 'tensorflow.keras.metrics.SparseTopKCategoricalAccuracy', 'tf.keras.metrics.SparseTopKCategoricalAccuracy', (['k'], {}), '(k)\n', (22368, 22371), True, 'import tensorflow as tf\n'), ((3380, 3422), 'tensorflow.io.read_file', 'tf.io.read_file', (['(img_base_dir + os.sep + p)'], {}), '(img_base_dir + os.sep + p)\n', (3395, 3422), True, 'import tensorflow as tf\n'), ((3902, 3936), 'tensorflow.io.decode_jpeg', 'tf.io.decode_jpeg', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (3919, 3936), True, 'import tensorflow as tf\n'), ((7794, 7836), 'os.makedirs', 'os.makedirs', (['"""/mnt/tempds/"""'], {'exist_ok': '(True)'}), "('/mnt/tempds/', exist_ok=True)\n", (7805, 7836), False, 'import os\n'), ((10664, 10678), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10676, 10678), False, 'from datetime import datetime\n'), ((10861, 10896), 'os.path.join', 'os.path.join', (['logdir', '"""params.json"""'], {}), "(logdir, 'params.json')\n", (10873, 10896), False, 'import os\n'), ((11286, 11336), 'os.path.join', 'os.path.join', (['dataset_dir', '"""classification_ds.csv"""'], {}), "(dataset_dir, 'classification_ds.csv')\n", (11298, 11336), False, 'import os\n'), ((11368, 11413), 'os.path.join', 'os.path.join', (['dataset_dir', '"""label_index.json"""'], {}), "(dataset_dir, 'label_index.json')\n", (11380, 11413), False, 'import os\n'), ((11440, 11480), 'os.path.join', 'os.path.join', (['dataset_dir', '"""splits.json"""'], {}), "(dataset_dir, 'splits.json')\n", (11452, 11480), False, 'import os\n'), ((14294, 14334), 'os.path.join', 'os.path.join', (['logdir', 'f"""ckpt_{epoch}.h5"""'], {}), "(logdir, f'ckpt_{epoch}.h5')\n", (14306, 14334), False, 'import os\n'), ((14839, 14884), 'classification.train_utils.prefix_all_keys', 'prefix_all_keys', (['test_metrics'], {'prefix': '"""test/"""'}), "(test_metrics, prefix='test/')\n", (14854, 14884), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((16275, 16293), 'classification.train_utils.fig_to_img', 'fig_to_img', (['cm_fig'], {}), '(cm_fig)\n', (16285, 16293), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19433, 19488), 'classification.train_utils.HeapItem', 'HeapItem', ([], {'priority': '(label_conf - top3_conf[1])', 'data': 'data'}), '(priority=label_conf - top3_conf[1], data=data)\n', (19441, 19488), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19501, 19540), 'classification.train_utils.add_to_heap', 'add_to_heap', (['tp_heaps[label]', 'item'], {'k': '(5)'}), '(tp_heaps[label], item, k=5)\n', (19512, 19540), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19659, 19714), 'classification.train_utils.HeapItem', 'HeapItem', ([], {'priority': '(top3_conf[0] - label_conf)', 'data': 'data'}), '(priority=top3_conf[0] - label_conf, data=data)\n', (19667, 19714), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19727, 19774), 'classification.train_utils.add_to_heap', 'add_to_heap', (['fp_heaps[top3_preds[0]]', 'item'], {'k': '(5)'}), '(fp_heaps[top3_preds[0]], item, k=5)\n', (19738, 19774), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((19787, 19826), 'classification.train_utils.add_to_heap', 'add_to_heap', (['fn_heaps[label]', 'item'], {'k': '(5)'}), '(fn_heaps[label], item, k=5)\n', (19798, 19826), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((23063, 23113), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'watch_accessed_variables': 'is_train'}), '(watch_accessed_variables=is_train)\n', (23078, 23113), True, 'import tensorflow as tf\n'), ((24574, 24600), 'numpy.concatenate', 'np.concatenate', (['all_labels'], {}), '(all_labels)\n', (24588, 24600), True, 'import numpy as np\n'), ((24609, 24634), 'numpy.concatenate', 'np.concatenate', (['all_preds'], {}), '(all_preds)\n', (24623, 24634), True, 'import numpy as np\n'), ((7768, 7780), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7778, 7780), False, 'import uuid\n'), ((9439, 9537), 'tensorflow.keras.initializers.VarianceScaling', 'tf.keras.initializers.VarianceScaling', ([], {'scale': '(1.0 / 3.0)', 'mode': '"""fan_out"""', 'distribution': '"""uniform"""'}), "(scale=1.0 / 3.0, mode='fan_out',\n distribution='uniform')\n", (9476, 9537), True, 'import tensorflow as tf\n'), ((17515, 17530), 'classification.train_utils.fig_to_img', 'fig_to_img', (['fig'], {}), '(fig)\n', (17525, 17530), False, 'from classification.train_utils import HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img, imgs_with_confidences, load_dataset_csv, prefix_all_keys\n'), ((23620, 23645), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['loss'], {}), '(loss)\n', (23639, 23645), True, 'import tensorflow as tf\n'), ((23864, 23895), 'tensorflow.math.argmax', 'tf.math.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (23878, 23895), True, 'import tensorflow as tf\n'), ((23802, 23818), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (23815, 23818), True, 'import tensorflow as tf\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils.py."""
from typing import Optional
import unittest
from pyreach import constraints
from pyreach.common.python import types_gen
from pyreach.impl import constraints_impl as impl
from pyreach.impl import test_data
class TestConstraintsImpl(unittest.TestCase):
def test_constraints_impl(self) -> None:
constraints_device = impl.ConstraintsDevice()
try:
constraints_device.start()
self.assertIsNone(constraints_device.get())
constraints_device.enqueue_device_data(
types_gen.DeviceData(
device_type="settings-engine",
data_type="key-value",
key="workcell_constraints.json",
value=test_data.get_workcell_constraints_json()))
constraints_device.wait(1)
cs: Optional[impl.ConstraintsImpl] = constraints_device.get()
self.assertIsNotNone(cs)
assert cs
self.assertIsNone(cs.get_joint_limits(""))
interactables = cs.get_interactables()
self.assertEqual(len(interactables), 2)
self.assertEqual(interactables[0].name, "LeftBox")
left_geometry = interactables[0].geometry
self.assertIsInstance(left_geometry, constraints.Box)
assert isinstance(left_geometry, constraints.Box)
self.assertEqual(left_geometry.pose.as_tuple(),
(-0.246944084763527, -0.705296516418457,
-0.168291628360748, 0.0, 0.0, 0.0))
self.assertEqual(
left_geometry.scale.as_tuple(),
(0.379999995231628, 0.259999990463257, 0.200000002980232))
self.assertEqual(interactables[1].name, "RightBox")
right_geometry = interactables[1].geometry
self.assertIsInstance(right_geometry, constraints.Box)
assert isinstance(right_geometry, constraints.Box)
self.assertEqual(right_geometry.pose.as_tuple(),
(0.254177570343018, -0.711709439754486,
-0.174813330173492, -6.585575275907331e-05,
-0.006104793682704136, -0.021574200980967757))
self.assertEqual(
right_geometry.scale.as_tuple(),
(0.370000004768372, 0.300000011920929, 0.200000002980232))
finally:
constraints_device.close()
def test_robot_constraints_impl(self) -> None:
constraints_device = impl.ConstraintsDevice("")
try:
constraints_device.start()
self.assertIsNone(constraints_device.get())
constraints_device.enqueue_device_data(
types_gen.DeviceData(
device_type="settings-engine",
data_type="key-value",
key="workcell_constraints.json",
value=test_data.get_workcell_constraints_json()))
self.assertIsNone(constraints_device.get())
constraints_device.enqueue_device_data(
types_gen.DeviceData(
device_type="robot",
data_type="key-value",
key="robot_constraints.json",
value=test_data.get_robot_constraints_json()))
constraints_device.wait(1)
cs: Optional[impl.ConstraintsImpl] = constraints_device.get()
self.assertIsNotNone(cs)
assert cs
joints = cs.get_joint_limits("")
self.assertIsNotNone(joints)
assert joints is not None
self.assertEqual(len(joints), 6)
self.assertEqual(joints[0].min, -6.335545214359173)
self.assertEqual(joints[0].max, 6.335545187179586)
self.assertEqual(joints[1].min, -6.335545214359173)
self.assertEqual(joints[1].max, 6.335545187179586)
self.assertEqual(joints[2].min, -6.335545214359173)
self.assertEqual(joints[2].max, 6.335545187179586)
self.assertEqual(joints[3].min, -6.335545214359173)
self.assertEqual(joints[3].max, 6.335545187179586)
self.assertEqual(joints[4].min, -6.335545214359173)
self.assertEqual(joints[4].max, 6.335545187179586)
self.assertEqual(joints[5].min, -6.335545214359173)
self.assertEqual(joints[5].max, 6.335545187179586)
self.assertEqual(len(cs.get_interactables()), 2)
finally:
constraints_device.close()
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"pyreach.impl.test_data.get_workcell_constraints_json",
"pyreach.impl.constraints_impl.ConstraintsDevice",
"pyreach.impl.test_data.get_robot_constraints_json"
] | [((4688, 4703), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4701, 4703), False, 'import unittest\n'), ((925, 949), 'pyreach.impl.constraints_impl.ConstraintsDevice', 'impl.ConstraintsDevice', ([], {}), '()\n', (947, 949), True, 'from pyreach.impl import constraints_impl as impl\n'), ((2877, 2903), 'pyreach.impl.constraints_impl.ConstraintsDevice', 'impl.ConstraintsDevice', (['""""""'], {}), "('')\n", (2899, 2903), True, 'from pyreach.impl import constraints_impl as impl\n'), ((1269, 1310), 'pyreach.impl.test_data.get_workcell_constraints_json', 'test_data.get_workcell_constraints_json', ([], {}), '()\n', (1308, 1310), False, 'from pyreach.impl import test_data\n'), ((3223, 3264), 'pyreach.impl.test_data.get_workcell_constraints_json', 'test_data.get_workcell_constraints_json', ([], {}), '()\n', (3262, 3264), False, 'from pyreach.impl import test_data\n'), ((3531, 3569), 'pyreach.impl.test_data.get_robot_constraints_json', 'test_data.get_robot_constraints_json', ([], {}), '()\n', (3567, 3569), False, 'from pyreach.impl import test_data\n')] |
from sys import stdin
from collections import defaultdict, deque
MAX_COLORS = 51
def load_num():
return int(stdin.readline())
def load_pair():
return tuple(map(int, stdin.readline().split()))
def load_case():
nbeads = load_num()
return [load_pair() for b in range(nbeads)]
def build_necklace(beads):
"""Construct an euler circuit in the graph defined by the beads"""
# For a graph to have an euler circuit all vertices must have
# even degree. (Plus 0 or 2 odd vertices) Init and ckeck degree
amatrix = [defaultdict(int) for _ in range(MAX_COLORS)]
degree = defaultdict(int)
for b in beads:
amatrix[b[0]][b[1]] += 1
amatrix[b[1]][b[0]] += 1
degree[b[0]] +=1
degree[b[1]] +=1
for k, v in degree.items():
if v%2 != 0:
return None
# Create necklace using Fleury's algorithm
def get_next_bead(color):
""" """
s_color, s_degree = 0, 0
for col, deg in amatrix[color].items():
if deg > s_degree:
s_color, s_degree = col, deg
if s_degree>0:
amatrix[color][s_color] -= 1
amatrix[s_color][color] -= 1
return (color, s_color)
else:
return None
# Start construction
nxt = get_next_bead(beads[0][1])
necklace = deque([nxt])
while True:
nxt = get_next_bead(necklace[-1][1])
if nxt:
necklace.append(nxt)
elif len(beads) != len(necklace):
# Created a closed cycle.move last segment to the start
prev = necklace.pop()
necklace.appendleft(prev)
else:
break
return necklace
if __name__ == '__main__':
ncases = load_num()
for c in range(ncases):
beads = load_case()
necklace = build_necklace(beads)
# Print result
print("Case #{}".format(c+1))
if necklace:
# Print all necklace beads together for faster IO (damn timelimits)
# Almost a third of the time is wasted on IO
necklace_str = ""
for b in necklace:
necklace_str += "{} {}\n".format(b[0], b[1])
else:
necklace_str = "some beads may be lost\n"
if c+1 == ncases:
print(necklace_str[:-1])
else:
print(necklace_str)
| [
"sys.stdin.readline",
"collections.deque",
"collections.defaultdict"
] | [((601, 617), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (612, 617), False, 'from collections import defaultdict, deque\n'), ((1354, 1366), 'collections.deque', 'deque', (['[nxt]'], {}), '([nxt])\n', (1359, 1366), False, 'from collections import defaultdict, deque\n'), ((115, 131), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (129, 131), False, 'from sys import stdin\n'), ((543, 559), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (554, 559), False, 'from collections import defaultdict, deque\n'), ((177, 193), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (191, 193), False, 'from sys import stdin\n')] |
import spacy
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from string import punctuation
from tqdm import tqdm
from rank_bm25 import BM25Okapi
import time
from collections import defaultdict
from heapq import nlargest
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from operator import itemgetter
from .ProcessFiles import ProcessFiles
from src.Entity.ChatResponse import ChatResponse
from src.Entity.Files import Files
from .Thread import Thread
from .Resume import Resume
from .Tools import Tools
class Process:
def initProcess(database, process):
action = process['action']
print(action)
text = process['request_query']
file = process['file']
user_id = process['user_id']
print(user_id)
hash = Tools.encodeBase64(text)
file = Files.getFiles(database, file, user_id)
if len(file) == 0:
return {"status": "erro", "message": "Não achei nenhum arquivo cadastrado"}
process['type'] = file[0]['type']
process['hash'] = hash
chat_response = []
if action == 'query':
chat_response = ChatResponse.updateChatResponse(database, process)
if len(chat_response) > 0:
# print("chat_response")
# print(chat_response)
response = chat_response[0]
return response
else:
if action == "query":
db = database
Thread(db, process).start()
response = {"status": "learning", "message": "Ainda não sei a resposta, estou aprendendo...Pergunte - me novamente em instantes"}
return response
elif action == "resume":
resume = Resume.resumeFile(process)
# if text:
# resume = json.dumps(resume, indent = 4)
# insert = database.execute('INSERT INTO explain.chat_response (hash, text, response) VALUES (%s,%s, %s)', (hash, text, resume))
# if(insert):
# return resume
# else:
# return "Erro ao inserir texto"
return resume
else:
return "Não reconheço essa ação"
| [
"src.Entity.Files.Files.getFiles",
"src.Entity.ChatResponse.ChatResponse.updateChatResponse",
"nltk.download"
] | [((337, 359), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (350, 359), False, 'import nltk\n'), ((360, 386), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (373, 386), False, 'import nltk\n'), ((922, 961), 'src.Entity.Files.Files.getFiles', 'Files.getFiles', (['database', 'file', 'user_id'], {}), '(database, file, user_id)\n', (936, 961), False, 'from src.Entity.Files import Files\n'), ((1241, 1291), 'src.Entity.ChatResponse.ChatResponse.updateChatResponse', 'ChatResponse.updateChatResponse', (['database', 'process'], {}), '(database, process)\n', (1272, 1291), False, 'from src.Entity.ChatResponse import ChatResponse\n')] |
import csv
from pylab import *
import matplotlib.pyplot as plt
count1=[]
req_data=[]
def get_request (str):
f=open('weblog.txt','r')
pdata=[]
req_data1=[]
data=csv.reader(f,delimiter=' ')
for row in data:
row[3]=row[3][1:]
row[3]=row[3].split(':')
row[3][1:4]=[':'.join(row[3][1:4])]
row[5]=row[5].split('/')
row[5][0]=row[5][0].split(' ')
#print(row[5][0][1])
row[4]=row[4][:5]
row[9]=row[9].split(' ')
row[9][1:15]=[':'.join(row[9][1:15])]
if row[5][0][1][:4].lower() == 'www.':
row[5][0][1]=row[5][0][1][4:]
pdata.append(row)
#for term in pdata:
# print(term)
for row in pdata:
#print(row[6])
item=row[6]
if row[5][0][1]==str:
req_data1.append(item)
if item in req_data:
continue
else:
if (row[5][0][1]==str):
req_data.append(row[6])
#print(ipdata1)
for row in req_data:
count1.append(req_data1.count(row))
print(count1)
f.close()
return count1;
def main():
count=[]
count=get_request('www.kinneryandrajan.com')
'''#this is for non bar plot
plt.ylabel('WWW.TWIBUZZ.COM')
#plt.xlabel("No of Hits by Different IP's")
#plt.xticks(count,ipdata)
plt.plot(count,'g*-',label='Hit Count', linewidth=2)''
#this is bar graph
#plt.xticks(count,ipdata,rotation='vertical')'''
'''import pylab as p
fig = p.figure()
ax = fig.add_subplot(1,1,1)
N=len(count)
ind=range(len(count))
ax.bar(ind, count, facecolor='blue', ecolor='black')
ax.set_ylabel('No of Hits')
ax.set_title("Hit count of Different IP's on www.twibuzz.com",fontstyle='italic')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
majorLocator = MultipleLocator(1)
ax.xaxis.set_major_locator(majorLocator)
ax.set_xticklabels(req_data,rotation='vertical')
#ax.xaxis.set_linespacing(4)
#fig.autofmt_xdate()
p.show()
plt.bar(range(len(count)),count,align="center",width=0.5,alpha=0.5)
plt.ylabel('WWW.TWIBUZZ.COM')
plt.xlabel('No of Hits')
plt.set_xticklabels(count)
def autolabel(rects):
for rect in rects:
height = rect
plt.text(1.05*height, '%d'%int(height),
ha='center', va='bottom')
plt.show()
'''
figure(1, figsize=(6,6))
ax = axes([0.1, 0.1, 0.8, 0.8])
#explode=(1, 0.05, 1)
pie(count, labels=req_data,autopct='%1.1f%%', shadow=True, startangle=90)
title('Type of Request to www.kinneryandrajan.com', bbox={'facecolor':'0.8', 'pad':5})
show()
pass
if __name__ == '__main__':
main()
| [
"csv.reader"
] | [((205, 233), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""" """'}), "(f, delimiter=' ')\n", (215, 233), False, 'import csv\n')] |
## 2018/08/17 Initial
## 2018/08/18 Add CSV format
## 2018/08/23 Add def get_stockidxname_SeymourExcel(),def get_stockidx_SeymourExcel()
## def get_all_stockidx_SeymourExcel() from test_crawl.py
## 2018/09/06 Add value of column 'PBR' in def readExcel()
## 2018/10/27 Add exception handling in def readExcel(self,dir_execlfile)
## 2019/07/20 Add get_all_stockidxname_SeymourExcel, get_stockname_SeymourExcel and get_all_stockname_SeymourExcel
#################################################################
import xlrd
import xlwt
import xlutils.copy
import csv
import os
from logger import logger
class ExcelRW:
def readExcel(self,dir_execlfile):
try:
data = xlrd.open_workbook(dir_execlfile) # 打開一個Excel表格
table = data.sheets()[0] # 打開Excel表格的第一張表
nrows = table.nrows # 獲取每張表的行數
except FileNotFoundError as fnf_error:
print(fnf_error)
list_rtu_row_values=[]
for row in range(nrows): # 遍歷每一行
#print(table.row_values(row)) # 獲取每行的值
#if table.row_values(row)[11] != "合理價格": # 排除第一行後,獲取每行合理價格的值
if table.row_values(row)[10] != "價值比": # 排除第一行後,獲取每行價格比的值
#print(str(table.row_values(row)[1]).strip('.0'), table.row_values(row)[2], table.row_values(row)[11])
'''
list_row_values=[str(table.row_values(row)[1])[0:4], table.row_values(row)[2],
table.row_values(row)[10],#column "價值比"
table.row_values(row)[4]]#column 'PBR'
'''
#2019/02/16 Add 現金殖利率 by 低波固收操作模式
#2019/02/19 Correct from 現金殖利率 to 現金股利
#list_row_values=[str(table.row_values(row)[1])[0:4], table.row_values(row)[2],
#2019/07/20 Cause 低波固收追蹤股 contnet of '代碼' column excexx 4 digits
list_row_values=[str(table.row_values(row)[1]), table.row_values(row)[2],
table.row_values(row)[10],#column "價值比"
table.row_values(row)[4],#column 'PBR'
#table.row_values(row)[8]]#column '現金殖利率'
table.row_values(row)[7]]#column '現金股利'
list_rtu_row_values.append(list_row_values)
#print(list_rtu_row_values,list_row_values)
return list_rtu_row_values
def writeCSVbyTable(self,dir_csvfile,list_table):
# 開啟輸出的 CSV 檔案
with open(dir_csvfile, 'w', newline='') as csvfile:
# 建立 CSV 檔寫入器
writer = csv.writer(csvfile, delimiter=',')
# 寫入二維表格
writer.writerows(list_table)
def writeCSVbyRow(self,dir_csvfile,list_row):
# 開啟輸出的 CSV 檔案
with open(dir_csvfile, 'w', newline=',') as csvfile:
# 建立 CSV 檔寫入器
writer = csv.writer(csvfile, delimiter=' ')
# 寫入一列資料
writer.writerow(list_row)
def get_stockidxname_SeymourExcel(self,dirnamelog,excelfname):
#print('將讀取Excel file:', excelfname, '的資料')
logger.info('Read Excel file::{0}'.format(excelfname))
# Excel file including path
dirlog_ExcelFile=os.path.join(dirnamelog,excelfname)
list_row_value_price=self.readExcel(dirlog_ExcelFile)
list_rtu_stockidxname=[]
# Get stock idx and company name from Excel files
for list_row_value in list_row_value_price:
list_stockidx_name=[list_row_value[0],list_row_value[1]]
list_rtu_stockidxname.append(list_stockidx_name)
return list_rtu_stockidxname
def get_all_stockidxname_SeymourExcel(self,dir_log,list_excel_files):
list_rtu_all_stockidx_stockidxname=[]
for excel_file in list_excel_files:
list_stockidx_stockidxname = self.get_stockidxname_SeymourExcel(dir_log,excel_file)
list_rtu_all_stockidx_stockidxname.extend(list_stockidx_stockidxname)
return list_rtu_all_stockidx_stockidxname
def get_stockidx_SeymourExcel(self,dirnamelog,excelfname):
print('將讀取Excel file:', excelfname, '的資料')
#logging.error('將讀取Excel file: {}'.format(excelfname))
# Excel file including path
dirlog_ExcelFile=os.path.join(dirnamelog,excelfname)
list_row_value_price=self.readExcel(dirlog_ExcelFile)
list_rtu_stockidx=[]
# Get stock idx from Excel files
for list_row_value in list_row_value_price:
list_stockidx=[list_row_value[0]]
list_rtu_stockidx.append(list_stockidx)
return list_rtu_stockidx
def get_all_stockidx_SeymourExcel(self,dir_log,list_excel_files):
list_rtu_all_stockidx=[]
for excel_file in list_excel_files:
list_stockidx=self.get_stockidx_SeymourExcel(dir_log,excel_file)
list_rtu_all_stockidx.extend(list_stockidx)
return list_rtu_all_stockidx
def get_stockname_SeymourExcel(self,dirnamelog,excelfname):
print('將讀取Excel file:', excelfname, '的資料')
# Excel file including path
dirlog_ExcelFile=os.path.join(dirnamelog,excelfname)
list_row_value_price=self.readExcel(dirlog_ExcelFile)
list_rtu_stockidxname=[]
# Get company name from Excel files
for list_row_value in list_row_value_price:
list_stockidx_name=[list_row_value[1]]
list_rtu_stockidxname.append(list_stockidx_name)
return list_rtu_stockidxname
def get_all_stockname_SeymourExcel(self,dir_log,list_excel_files):
list_rtu_all_stockname=[]
for excel_file in list_excel_files:
list_stockname=self.get_stockname_SeymourExcel(dir_log,excel_file)
list_rtu_all_stockname.extend(list_stockname)
return list_rtu_all_stockname | [
"xlrd.open_workbook",
"csv.writer",
"os.path.join"
] | [((3335, 3371), 'os.path.join', 'os.path.join', (['dirnamelog', 'excelfname'], {}), '(dirnamelog, excelfname)\n', (3347, 3371), False, 'import os\n'), ((4395, 4431), 'os.path.join', 'os.path.join', (['dirnamelog', 'excelfname'], {}), '(dirnamelog, excelfname)\n', (4407, 4431), False, 'import os\n'), ((5257, 5293), 'os.path.join', 'os.path.join', (['dirnamelog', 'excelfname'], {}), '(dirnamelog, excelfname)\n', (5269, 5293), False, 'import os\n'), ((710, 743), 'xlrd.open_workbook', 'xlrd.open_workbook', (['dir_execlfile'], {}), '(dir_execlfile)\n', (728, 743), False, 'import xlrd\n'), ((2717, 2751), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (2727, 2751), False, 'import csv\n'), ((2996, 3030), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""" """'}), "(csvfile, delimiter=' ')\n", (3006, 3030), False, 'import csv\n')] |
#
# Test for the standard lead acid parameters
#
import pybamm
from tests import get_discretisation_for_testing
import unittest
class TestStandardParametersLeadAcid(unittest.TestCase):
def test_scipy_constants(self):
param = pybamm.LeadAcidParameters()
self.assertAlmostEqual(param.R.evaluate(), 8.314, places=3)
self.assertAlmostEqual(param.F.evaluate(), 96485, places=0)
def test_print_parameters(self):
parameters = pybamm.LeadAcidParameters()
parameter_values = pybamm.lead_acid.BaseModel().default_parameter_values
output_file = "lead_acid_parameters.txt"
parameter_values.print_parameters(parameters, output_file)
# test print_parameters with dict and without C-rate
del parameter_values["Nominal cell capacity [A.h]"]
parameters = {"C_e": parameters.C_e, "sigma_n": parameters.sigma_n}
parameter_values.print_parameters(parameters)
def test_parameters_defaults_lead_acid(self):
# Load parameters to be tested
parameters = pybamm.LeadAcidParameters()
parameter_values = pybamm.lead_acid.BaseModel().default_parameter_values
param_eval = parameter_values.print_parameters(parameters)
param_eval = {k: v[0] for k, v in param_eval.items()}
# Diffusional C-rate should be smaller than C-rate
self.assertLess(param_eval["C_e"], param_eval["C_rate"])
# Dimensionless electrode conductivities should be large
self.assertGreater(
parameter_values.evaluate(parameters.sigma_n(parameters.T_ref)), 10
)
self.assertGreater(
parameter_values.evaluate(parameters.sigma_p(parameters.T_ref)), 10
)
# Rescaled dimensionless electrode conductivities should still be large
self.assertGreater(
parameter_values.evaluate(parameters.sigma_n_prime(parameters.T_ref)), 10
)
self.assertGreater(
parameter_values.evaluate(parameters.sigma_p_prime(parameters.T_ref)), 10
)
# Dimensionless double-layer capacity should be small
self.assertLess(param_eval["C_dl_n"], 1e-3)
self.assertLess(param_eval["C_dl_p"], 1e-3)
# Volume change positive in negative electrode and negative in positive
# electrode
self.assertLess(param_eval["DeltaVsurf_n"], 0)
self.assertGreater(param_eval["DeltaVsurf_p"], 0)
def test_concatenated_parameters(self):
# create
param = pybamm.LeadAcidParameters()
s_param = param.s_plus_S
self.assertIsInstance(s_param, pybamm.Concatenation)
self.assertEqual(
s_param.domain, ["negative electrode", "separator", "positive electrode"]
)
# process parameters and discretise
parameter_values = pybamm.ParameterValues(
chemistry=pybamm.parameter_sets.Sulzer2019
)
disc = get_discretisation_for_testing()
processed_s = disc.process_symbol(parameter_values.process_symbol(s_param))
# test output
combined_submeshes = disc.mesh.combine_submeshes(
"negative electrode", "separator", "positive electrode"
)
self.assertEqual(processed_s.shape, (combined_submeshes.npts, 1))
def test_current_functions(self):
# create current functions
param = pybamm.LeadAcidParameters()
dimensional_current_density = param.dimensional_current_density_with_time
dimensionless_current_density = param.current_with_time
# process
parameter_values = pybamm.ParameterValues(
{
"Electrode height [m]": 0.1,
"Electrode width [m]": 0.1,
"Negative electrode thickness [m]": 1,
"Separator thickness [m]": 1,
"Positive electrode thickness [m]": 1,
"Typical electrolyte concentration [mol.m-3]": 1,
"Number of electrodes connected in parallel to make a cell": 8,
"Typical current [A]": 2,
"Current function [A]": 2,
}
)
dimensional_current_density_eval = parameter_values.process_symbol(
dimensional_current_density
)
dimensionless_current_density_eval = parameter_values.process_symbol(
dimensionless_current_density
)
self.assertAlmostEqual(
dimensional_current_density_eval.evaluate(t=3), 2 / (8 * 0.1 * 0.1)
)
self.assertEqual(dimensionless_current_density_eval.evaluate(t=3), 1)
def test_thermal_parameters(self):
values = pybamm.lead_acid.BaseModel().default_parameter_values
param = pybamm.LeadAcidParameters()
T = 1 # dummy temperature as the values are constant
# Density
self.assertAlmostEqual(values.evaluate(param.rho_cn(T)), 0.8810, places=2)
self.assertAlmostEqual(values.evaluate(param.rho_n(T)), 0.8810, places=2)
self.assertAlmostEqual(values.evaluate(param.rho_s(T)), 0.7053, places=2)
self.assertAlmostEqual(values.evaluate(param.rho_p(T)), 1.4393, places=2)
self.assertAlmostEqual(values.evaluate(param.rho_cp(T)), 1.4393, places=2)
self.assertAlmostEqual(values.evaluate(param.rho(T)), 1.7102, places=2)
# Thermal conductivity
self.assertAlmostEqual(values.evaluate(param.lambda_cn(T)), 1.6963, places=2)
self.assertAlmostEqual(values.evaluate(param.lambda_n(T)), 1.6963, places=2)
self.assertAlmostEqual(values.evaluate(param.lambda_s(T)), 0.0019, places=2)
self.assertAlmostEqual(values.evaluate(param.lambda_p(T)), 1.6963, places=2)
self.assertAlmostEqual(values.evaluate(param.lambda_cp(T)), 1.6963, places=2)
def test_functions_lead_acid(self):
# Load parameters to be tested
param = pybamm.LeadAcidParameters()
parameters = {
"D_e_1": param.D_e(pybamm.Scalar(1), pybamm.Scalar(0)),
"kappa_e_0": param.kappa_e(pybamm.Scalar(0), pybamm.Scalar(0)),
"chi_1": param.chi(pybamm.Scalar(1), pybamm.Scalar(0)),
"chi_0.5": param.chi(pybamm.Scalar(0.5), pybamm.Scalar(0)),
"U_n_1": param.U_n(pybamm.Scalar(1), pybamm.Scalar(0)),
"U_n_0.5": param.U_n(pybamm.Scalar(0.5), pybamm.Scalar(0)),
"U_p_1": param.U_p(pybamm.Scalar(1), pybamm.Scalar(0)),
"U_p_0.5": param.U_p(pybamm.Scalar(0.5), pybamm.Scalar(0)),
}
# Process
parameter_values = pybamm.ParameterValues(
chemistry=pybamm.parameter_sets.Sulzer2019
)
param_eval = parameter_values.print_parameters(parameters)
param_eval = {k: v[0] for k, v in param_eval.items()}
# Known values for dimensionless functions
self.assertEqual(param_eval["D_e_1"], 1)
self.assertEqual(param_eval["kappa_e_0"], 0)
# Known monotonicity for dimensionless functions
self.assertGreater(param_eval["chi_1"], param_eval["chi_0.5"])
self.assertLess(param_eval["U_n_1"], param_eval["U_n_0.5"])
self.assertGreater(param_eval["U_p_1"], param_eval["U_p_0.5"])
def test_update_initial_state_of_charge(self):
# Load parameters to be tested
parameters = pybamm.LeadAcidParameters()
parameter_values = pybamm.lead_acid.BaseModel().default_parameter_values
param_eval = parameter_values.print_parameters(parameters)
param_eval = {k: v[0] for k, v in param_eval.items()}
# Update initial state of charge
parameter_values.update({"Initial State of Charge": 0.2})
param_eval_update = parameter_values.print_parameters(parameters)
param_eval_update = {k: v[0] for k, v in param_eval_update.items()}
# Test that relevant parameters have changed as expected
self.assertLess(param_eval_update["q_init"], param_eval["q_init"])
self.assertLess(param_eval_update["c_e_init"], param_eval["c_e_init"])
self.assertLess(
param_eval_update["epsilon_n_init"], param_eval["epsilon_n_init"]
)
self.assertEqual(
param_eval_update["epsilon_s_init"], param_eval["epsilon_s_init"]
)
self.assertLess(
param_eval_update["epsilon_p_init"], param_eval["epsilon_p_init"]
)
self.assertGreater(
param_eval_update["curlyU_n_init"], param_eval["curlyU_n_init"]
)
self.assertGreater(
param_eval_update["curlyU_p_init"], param_eval["curlyU_p_init"]
)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| [
"pybamm.Scalar",
"pybamm.LeadAcidParameters",
"unittest.main",
"pybamm.ParameterValues",
"pybamm.lead_acid.BaseModel",
"tests.get_discretisation_for_testing"
] | [((8738, 8753), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8751, 8753), False, 'import unittest\n'), ((240, 267), 'pybamm.LeadAcidParameters', 'pybamm.LeadAcidParameters', ([], {}), '()\n', (265, 267), False, 'import pybamm\n'), ((463, 490), 'pybamm.LeadAcidParameters', 'pybamm.LeadAcidParameters', ([], {}), '()\n', (488, 490), False, 'import pybamm\n'), ((1050, 1077), 'pybamm.LeadAcidParameters', 'pybamm.LeadAcidParameters', ([], {}), '()\n', (1075, 1077), False, 'import pybamm\n'), ((2501, 2528), 'pybamm.LeadAcidParameters', 'pybamm.LeadAcidParameters', ([], {}), '()\n', (2526, 2528), False, 'import pybamm\n'), ((2817, 2883), 'pybamm.ParameterValues', 'pybamm.ParameterValues', ([], {'chemistry': 'pybamm.parameter_sets.Sulzer2019'}), '(chemistry=pybamm.parameter_sets.Sulzer2019)\n', (2839, 2883), False, 'import pybamm\n'), ((2921, 2953), 'tests.get_discretisation_for_testing', 'get_discretisation_for_testing', ([], {}), '()\n', (2951, 2953), False, 'from tests import get_discretisation_for_testing\n'), ((3361, 3388), 'pybamm.LeadAcidParameters', 'pybamm.LeadAcidParameters', ([], {}), '()\n', (3386, 3388), False, 'import pybamm\n'), ((3581, 3958), 'pybamm.ParameterValues', 'pybamm.ParameterValues', (["{'Electrode height [m]': 0.1, 'Electrode width [m]': 0.1,\n 'Negative electrode thickness [m]': 1, 'Separator thickness [m]': 1,\n 'Positive electrode thickness [m]': 1,\n 'Typical electrolyte concentration [mol.m-3]': 1,\n 'Number of electrodes connected in parallel to make a cell': 8,\n 'Typical current [A]': 2, 'Current function [A]': 2}"], {}), "({'Electrode height [m]': 0.1, 'Electrode width [m]':\n 0.1, 'Negative electrode thickness [m]': 1, 'Separator thickness [m]': \n 1, 'Positive electrode thickness [m]': 1,\n 'Typical electrolyte concentration [mol.m-3]': 1,\n 'Number of electrodes connected in parallel to make a cell': 8,\n 'Typical current [A]': 2, 'Current function [A]': 2})\n", (3603, 3958), False, 'import pybamm\n'), ((4702, 4729), 'pybamm.LeadAcidParameters', 'pybamm.LeadAcidParameters', ([], {}), '()\n', (4727, 4729), False, 'import pybamm\n'), ((5858, 5885), 'pybamm.LeadAcidParameters', 'pybamm.LeadAcidParameters', ([], {}), '()\n', (5883, 5885), False, 'import pybamm\n'), ((6528, 6594), 'pybamm.ParameterValues', 'pybamm.ParameterValues', ([], {'chemistry': 'pybamm.parameter_sets.Sulzer2019'}), '(chemistry=pybamm.parameter_sets.Sulzer2019)\n', (6550, 6594), False, 'import pybamm\n'), ((7279, 7306), 'pybamm.LeadAcidParameters', 'pybamm.LeadAcidParameters', ([], {}), '()\n', (7304, 7306), False, 'import pybamm\n'), ((518, 546), 'pybamm.lead_acid.BaseModel', 'pybamm.lead_acid.BaseModel', ([], {}), '()\n', (544, 546), False, 'import pybamm\n'), ((1105, 1133), 'pybamm.lead_acid.BaseModel', 'pybamm.lead_acid.BaseModel', ([], {}), '()\n', (1131, 1133), False, 'import pybamm\n'), ((4632, 4660), 'pybamm.lead_acid.BaseModel', 'pybamm.lead_acid.BaseModel', ([], {}), '()\n', (4658, 4660), False, 'import pybamm\n'), ((7334, 7362), 'pybamm.lead_acid.BaseModel', 'pybamm.lead_acid.BaseModel', ([], {}), '()\n', (7360, 7362), False, 'import pybamm\n'), ((5940, 5956), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (5953, 5956), False, 'import pybamm\n'), ((5958, 5974), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (5971, 5974), False, 'import pybamm\n'), ((6016, 6032), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (6029, 6032), False, 'import pybamm\n'), ((6034, 6050), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (6047, 6050), False, 'import pybamm\n'), ((6084, 6100), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (6097, 6100), False, 'import pybamm\n'), ((6102, 6118), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (6115, 6118), False, 'import pybamm\n'), ((6154, 6172), 'pybamm.Scalar', 'pybamm.Scalar', (['(0.5)'], {}), '(0.5)\n', (6167, 6172), False, 'import pybamm\n'), ((6174, 6190), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (6187, 6190), False, 'import pybamm\n'), ((6224, 6240), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (6237, 6240), False, 'import pybamm\n'), ((6242, 6258), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (6255, 6258), False, 'import pybamm\n'), ((6294, 6312), 'pybamm.Scalar', 'pybamm.Scalar', (['(0.5)'], {}), '(0.5)\n', (6307, 6312), False, 'import pybamm\n'), ((6314, 6330), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (6327, 6330), False, 'import pybamm\n'), ((6364, 6380), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (6377, 6380), False, 'import pybamm\n'), ((6382, 6398), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (6395, 6398), False, 'import pybamm\n'), ((6434, 6452), 'pybamm.Scalar', 'pybamm.Scalar', (['(0.5)'], {}), '(0.5)\n', (6447, 6452), False, 'import pybamm\n'), ((6454, 6470), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (6467, 6470), False, 'import pybamm\n')] |
#!/usr/bin/env python
# ======================================================================
# This software is distributed under the MIT license reproduced below:
#
# Copyright (C) 2009-2014 <NAME>' <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Giampaolo Rodola' not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# <NAME>' DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT <NAME>' BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
import sys
try:
from setuptools import Extension, setup
except ImportError:
from distutils.core import Extension, setup
NAME = 'pysendfile'
VERSION = '2.0.1'
if sys.version_info < (2, 5):
sys.exit('python version not supported (< 2.5)')
if 'sunos' in sys.platform:
libraries = ["sendfile"]
else:
libraries = []
def main():
setup(name=NAME,
url='https://github.com/giampaolo/pysendfile',
version=VERSION,
description='A Python interface to sendfile(2)',
long_description=open('README.rst', 'r').read(),
author='<NAME>',
author_email='<EMAIL>',
platforms='UNIX',
license='MIT',
keywords=['sendfile', 'python', 'performance', 'ftp'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: POSIX :: AIX',
'Programming Language :: C',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: System :: Networking',
'Topic :: System :: Operating System',
'Topic :: Internet :: File Transfer Protocol (FTP)',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: MIT License',
],
ext_modules=[Extension('sendfile',
sources=['sendfilemodule.c'],
libraries=libraries)])
if __name__ == '__main__':
main()
| [
"distutils.core.Extension",
"sys.exit"
] | [((1420, 1468), 'sys.exit', 'sys.exit', (['"""python version not supported (< 2.5)"""'], {}), "('python version not supported (< 2.5)')\n", (1428, 1468), False, 'import sys\n'), ((3546, 3618), 'distutils.core.Extension', 'Extension', (['"""sendfile"""'], {'sources': "['sendfilemodule.c']", 'libraries': 'libraries'}), "('sendfile', sources=['sendfilemodule.c'], libraries=libraries)\n", (3555, 3618), False, 'from distutils.core import Extension, setup\n')] |
from Stroke import Stroke
from TactileBrush import TactileBrush
import json
from sortedcontainers import SortedList
EPSILON = 0.001
class Point:
def __init__(self, x : int, y : int):
self.x = int(x)
self.y = int(y)
def __repr__(self):
return "(" + str(self.x) + ", " + str(self.y) + ")"
def __key(self):
return (self.x, self.y)
def __eq__(self, value):
if isinstance(value, Point):
return self.__key() == value.__key()
return NotImplemented
def __hash__(self):
h = hash(self.__key())
return h
class ActuatorValue:
__slots__ = ("pin", "value")
def __init__(self, pin : int, value : float):
self.pin = pin
self.value = value
class Frame:
__slots__ = ("time", "actuators")
def __init__(self, time : float):
self.time = time
self.actuators = set()
class VibrationPattern:
__slots__ = ("isLooped", "duration", "interpolation", "frames")
def __init__(self, duration : float, is_looped : bool, interpolation : int):
self.duration = duration
self.isLooped = is_looped
self.interpolation = interpolation
self.frames = SortedList(key = lambda frame: frame.time) # sort frames by time
def add_frame(self, frame : Frame):
for f in self.frames:
time = abs(f.time - frame.time)
if time < EPSILON:
f.actuators |= frame.actuators
return
self.frames.add(frame)
def to_json(self):
d = dict()
d["isLooped"] = self.isLooped
d["duration"] = self.duration / 1000.0
d["interpolation"] = self.interpolation
d["frames"] = list()
for f in self.frames:
fr = dict()
fr["time"] = f.time / 1000.0
fr["actuators"] = list()
for actuator in f.actuators:
a = dict()
a["pin"] = actuator.pin
a["value"] = actuator.value
fr["actuators"].append(a)
d["frames"].append(fr)
return json.dumps(d, indent=4, sort_keys=True)
class Config:
with open('config.json') as json_file:
config = json.load(json_file)
lines = config["grid"]["lines"]
columns = config["grid"]["columns"]
spacing = config["grid"]["spacing"]
mapping = dict()
for coord in config["mapping"]:
coords_list = coord.split(",")
mapping[Point(coords_list[0], coords_list[1])] = int(config["mapping"][coord])
def create_pattern(motion : dict):
pattern = VibrationPattern(duration, False, 0)
for activation_time, steps in motion.items():
# Create starting frame
start_frame = Frame(activation_time)
for step in steps:
# Calculate end time
end_time = max(0, min(activation_time + step.duration, pattern.duration))
point = Point(step.column, step.line)
# Get pin from config
pin = Config.mapping[point]
value = step.intensity
# Add to starting frame
start_frame.actuators.add(ActuatorValue(pin, value))
# Create end frame
end_frame = Frame(end_time)
end_frame.actuators.add(ActuatorValue(pin, 0.0))
# Add frames
pattern.add_frame(start_frame)
pattern.add_frame(end_frame)
return pattern
def get_position_from_string(s : str):
s = s.strip() # remove whitespace
pos_x = 0
pos_y = 0
try:
split = s.split(',')
pos_x = float(split[0])
pos_y = float(split[1])
except Exception as e:
raise Exception("Invalid position was passed. Format must be 'x,y.")
return pos_x, pos_y
def get_duration_from_string(s : str):
s = s.strip()
duration = 0
try:
duration = float(s)
except Exception as e:
raise Exception("Invalid duration was passed. A decimal value must be passed.")
return duration
if __name__ == "__main__":
print("Enter stroke start position (x,y):")
start_str = input()
start_x, start_y = get_position_from_string(start_str)
print("Enter stroke start position (x,y):")
end_str = input()
end_x, end_y = get_position_from_string(end_str)
print("Enter duration of stroke in msec:")
duration_str = input()
duration = get_duration_from_string(duration_str)
t = TactileBrush(Config.lines, Config.columns, Config.spacing)
s = Stroke(start_x, start_y, end_x, end_y, duration, 1)
motion = t.compute_stroke_steps(s)
pattern = create_pattern(motion)
print("Json Pattern:\n")
print(pattern.to_json()) | [
"sortedcontainers.SortedList",
"Stroke.Stroke",
"json.dumps",
"TactileBrush.TactileBrush",
"json.load"
] | [((4493, 4551), 'TactileBrush.TactileBrush', 'TactileBrush', (['Config.lines', 'Config.columns', 'Config.spacing'], {}), '(Config.lines, Config.columns, Config.spacing)\n', (4505, 4551), False, 'from TactileBrush import TactileBrush\n'), ((4560, 4611), 'Stroke.Stroke', 'Stroke', (['start_x', 'start_y', 'end_x', 'end_y', 'duration', '(1)'], {}), '(start_x, start_y, end_x, end_y, duration, 1)\n', (4566, 4611), False, 'from Stroke import Stroke\n'), ((1214, 1254), 'sortedcontainers.SortedList', 'SortedList', ([], {'key': '(lambda frame: frame.time)'}), '(key=lambda frame: frame.time)\n', (1224, 1254), False, 'from sortedcontainers import SortedList\n'), ((2122, 2161), 'json.dumps', 'json.dumps', (['d'], {'indent': '(4)', 'sort_keys': '(True)'}), '(d, indent=4, sort_keys=True)\n', (2132, 2161), False, 'import json\n'), ((2237, 2257), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2246, 2257), False, 'import json\n')] |
import sys
import json
from h5model import h5model
if len(sys.argv) < 2:
print('Usage: ' + sys.argv[0] + ' ' + '<model name>')
exit(1)
modelName = sys.argv[1]
model = h5model(modelName)
model.createModel()
if model.responseStatus >= 400:
print("Unable to create model '" + modelName + "': " + model.errorMessage, file = sys.stderr)
exit(1)
print(model.responseSuccessPayload) | [
"h5model.h5model"
] | [((174, 192), 'h5model.h5model', 'h5model', (['modelName'], {}), '(modelName)\n', (181, 192), False, 'from h5model import h5model\n')] |
# -*- coding: utf-8 -*-
import os, sys
from PyQt5.QtWidgets import (QWizard, QMessageBox)
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import pyqtSlot, Qt
try:
import odmltables
have_odmltables = True
except:
have_odmltables = False
from .settings import Settings
class OdmltablesWizard(QWizard):
def __init__(self, wizname, parent=None):
super(OdmltablesWizard, self).__init__(parent)
self.wizname = wizname
self.settingsfile = os.path.join(os.path.expanduser("~"),
'.odmltables',
wizname.replace(' ', '').lower() + '.conf')
# initialize settings
self.settings = Settings(self.settingsfile)
# setting starting page of wizard
# self.setStartId(0)
self.setOption(self.IndependentPages, False)
# images won't show in Windows 7 if style not set
self.setWizardStyle(self.ModernStyle)
self.setOption(self.HaveHelpButton, True)
logo_filename = "odMLtables_100x100.png"
logo_dirs = [os.path.join(os.path.dirname(__file__), '..', '..', 'logo'),
os.path.join(sys.prefix, 'share/pixmaps')]
for logo_dir in logo_dirs:
filepath = os.path.join(logo_dir, logo_filename)
if os.path.exists(filepath):
self.setPixmap(QWizard.LogoPixmap, QPixmap(filepath))
# set up help messages
self._lastHelpMsg = ''
self._helpMsgs = self._createHelpMsgs()
self.helpRequested.connect(self._showHelp)
self.setWindowTitle(self.tr(wizname))
def _createHelpMsgs(self):
raise NotImplementedError()
@pyqtSlot()
def _showHelp(self):
# get the help message for the current page
msg = self._helpMsgs[self.currentId()]
# # if same as last message, display alternate message
# if msg == self._lastHelpMsg:
# msg = self._helpMsgs[self.NUM_PAGES + 1]
doc_link = "<p>For detailed information about odMLtables refer to the " \
"<a href='http://pythonhosted.org/python-odmltables'>odMLtables " \
"documentation</a>.</p>"
msgBox = QMessageBox()
msgBox.setWindowTitle("Help")
msgBox.setTextFormat(Qt.RichText)
msgBox.setText(msg + doc_link)
msgBox.exec_()
# QMessageBox.information(self,
# self.tr(self.wizname),
# msg)
# self._lastHelpMsg = msg
def get_graphic_path():
if have_odmltables:
data_path = os.path.join(os.path.dirname(odmltables.__file__),
'gui',
'graphics')
return data_path
| [
"os.path.exists",
"PyQt5.QtWidgets.QMessageBox",
"os.path.join",
"PyQt5.QtCore.pyqtSlot",
"os.path.dirname",
"PyQt5.QtGui.QPixmap",
"os.path.expanduser"
] | [((1709, 1719), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (1717, 1719), False, 'from PyQt5.QtCore import pyqtSlot, Qt\n'), ((2233, 2246), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (2244, 2246), False, 'from PyQt5.QtWidgets import QWizard, QMessageBox\n'), ((494, 517), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (512, 517), False, 'import os, sys\n'), ((1176, 1217), 'os.path.join', 'os.path.join', (['sys.prefix', '"""share/pixmaps"""'], {}), "(sys.prefix, 'share/pixmaps')\n", (1188, 1217), False, 'import os, sys\n'), ((1277, 1314), 'os.path.join', 'os.path.join', (['logo_dir', 'logo_filename'], {}), '(logo_dir, logo_filename)\n', (1289, 1314), False, 'import os, sys\n'), ((1330, 1354), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1344, 1354), False, 'import os, sys\n'), ((2643, 2679), 'os.path.dirname', 'os.path.dirname', (['odmltables.__file__'], {}), '(odmltables.__file__)\n', (2658, 2679), False, 'import os, sys\n'), ((1107, 1132), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1122, 1132), False, 'import os, sys\n'), ((1407, 1424), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['filepath'], {}), '(filepath)\n', (1414, 1424), False, 'from PyQt5.QtGui import QPixmap\n')] |