code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import os
import signal
from os.path import join
from sys import argv
from utils.csv_table import CsvTable
from utils.fasta_map import FastaMap
from utils.hierarchy_tree import HierarchyTree
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
def main():
data_dir = argv[1]
csv_path = join(data_dir, "sequences.csv")
fasta_path = join(data_dir, "sequences.fasta")
print("\nReading and processing files...")
csv_table = CsvTable(csv_path).group_countries_by_median_length()
ids = csv_table.values('Accession')
fasta_map = FastaMap(fasta_path).filter(lambda item: item[0] in ids)
print("Files processing finished!")
labels = csv_table.dict_of('Accession', 'Geo_Location')
print("\nBuilding hierarchy...")
distances_table = fasta_map.compare_all_samples()
tree = HierarchyTree(distances_table, labels)
tree.build_tree()
if __name__ == '__main__':
if len(argv) == 2:
pid_h = os.fork()
if pid_h == 0:
main()
else:
try:
os.wait()
except KeyboardInterrupt:
os.kill(pid_h, signal.SIGKILL)
print("\nshutdown")
else:
print("python sarscovhierarchy.py <data_path>")
| [
"utils.csv_table.CsvTable",
"signal.signal",
"os.kill",
"os.path.join",
"utils.fasta_map.FastaMap",
"utils.hierarchy_tree.HierarchyTree",
"os.wait",
"os.fork"
] | [((194, 239), 'signal.signal', 'signal.signal', (['signal.SIGTSTP', 'signal.SIG_IGN'], {}), '(signal.SIGTSTP, signal.SIG_IGN)\n', (207, 239), False, 'import signal\n'), ((292, 323), 'os.path.join', 'join', (['data_dir', '"""sequences.csv"""'], {}), "(data_dir, 'sequences.csv')\n", (296, 323), False, 'from os.path import join\n'), ((341, 374), 'os.path.join', 'join', (['data_dir', '"""sequences.fasta"""'], {}), "(data_dir, 'sequences.fasta')\n", (345, 374), False, 'from os.path import join\n'), ((807, 845), 'utils.hierarchy_tree.HierarchyTree', 'HierarchyTree', (['distances_table', 'labels'], {}), '(distances_table, labels)\n', (820, 845), False, 'from utils.hierarchy_tree import HierarchyTree\n'), ((936, 945), 'os.fork', 'os.fork', ([], {}), '()\n', (943, 945), False, 'import os\n'), ((438, 456), 'utils.csv_table.CsvTable', 'CsvTable', (['csv_path'], {}), '(csv_path)\n', (446, 456), False, 'from utils.csv_table import CsvTable\n'), ((548, 568), 'utils.fasta_map.FastaMap', 'FastaMap', (['fasta_path'], {}), '(fasta_path)\n', (556, 568), False, 'from utils.fasta_map import FastaMap\n'), ((1035, 1044), 'os.wait', 'os.wait', ([], {}), '()\n', (1042, 1044), False, 'import os\n'), ((1099, 1129), 'os.kill', 'os.kill', (['pid_h', 'signal.SIGKILL'], {}), '(pid_h, signal.SIGKILL)\n', (1106, 1129), False, 'import os\n')] |
import unittest
import k3ut
import k3handy
dd = k3ut.dd
class TestHandyCmd(unittest.TestCase):
def test_parse_flag(self):
cases = [
('', ()),
('x', ('raise', )),
('t', ('tty',)),
('n', ('none',)),
('p', ('pass',)),
('o', ('stdout',)),
('0', ('oneline',)),
('x0', ('raise', 'oneline',)),
]
for c in cases:
flag, want = c
got = k3handy.parse_flag(flag)
self.assertEqual(want, got)
with self.assertRaises(KeyError):
k3handy.parse_flag('q')
def test_cmdf(self):
for f in ('0', ('oneline', )):
got = k3handy.cmdf(
'python', '-c', 'print("a"); print("b")',
flag=f
)
self.assertEqual('a', got)
# no output
got = k3handy.cmdf(
'python', '-c', '',
flag=f
)
self.assertEqual('', got)
# not raise without 'x'
k3handy.cmdf(
'python', '-c',
'import sys; sys.exit(5)',
flag=f,
)
# return None if error
for f in ('n0', ['none', 'oneline']):
got = k3handy.cmdf(
'python', '-c',
'import sys; sys.exit(5)',
flag=f,
)
self.assertEqual(None, got)
# raise with 'x'
for f in ('x0', ['raise', 'oneline']):
self.assertRaises(k3handy.CalledProcessError,
k3handy.cmdf,
'python', '-c',
'import sys; sys.exit(5)',
flag=f,
)
self.assertRaises(k3handy.CalledProcessError,
k3handy.cmdx,
'python', '-c',
'import sys; sys.exit(5)',
)
# out
for f in ('o', ['stdout']):
got = k3handy.cmdf(
'python', '-c', 'print("a"); print("b")',
flag=f,
)
self.assertEqual(['a', 'b'], got)
for f in ('xo', ['raise', 'stdout']):
self.assertRaises(k3handy.CalledProcessError,
k3handy.cmdf,
'python', '-c',
'import sys; sys.exit(5)',
flag=f,
)
# tty
for f in ('t', ['tty']):
returncode, out, err = k3handy.cmdf(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
flag=f,
)
dd('out:', out)
self.assertEqual(['True'], out)
returncode, out, err = k3handy.cmdtty(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
)
dd('out:', out)
self.assertEqual(['True'], out)
# input
read_stdin_in_subproc = '''
import k3handy;
k3handy.cmdf(
'python', '-c', 'import sys; print(sys.stdin.read())',
flag='p'
)
'''
returncode, out, err = k3handy.cmdx(
'python', '-c',
read_stdin_in_subproc,
input="123",
)
dd('out:', out)
self.assertEqual(["123"], out)
def test_cmd0(self):
got = k3handy.cmd0(
'python', '-c', 'print("a"); print("b")',
)
self.assertEqual('a', got)
# no output
got = k3handy.cmd0(
'python', '-c', '',
)
self.assertEqual('', got)
# failure to exception
self.assertRaises(k3handy.CalledProcessError,
k3handy.cmd0,
'python', '-c',
'import sys; sys.exit(5)',
)
def test_cmdout(self):
got = k3handy.cmdout(
'python', '-c', 'print("a"); print("b")',
)
self.assertEqual(['a', 'b'], got)
self.assertRaises(k3handy.CalledProcessError,
k3handy.cmdout,
'python', '-c',
'import sys; sys.exit(5)',
)
def test_cmdx(self):
got = k3handy.cmdx(
'python', '-c', 'print("a"); print("b")',
)
self.assertEqual((0, ['a', 'b'], []), got)
self.assertRaises(k3handy.CalledProcessError,
k3handy.cmdx,
'python', '-c',
'import sys; sys.exit(5)',
)
def test_cmdtty(self):
returncode, out, err = k3handy.cmdtty(
'python', '-c', 'import sys; print(sys.stdout.isatty())',
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual(['True'], out)
self.assertEqual([], err)
def test_cmdpass(self):
read_stdin_in_subproc = '''
import k3handy;
k3handy.cmdpass(
'python', '-c', 'import sys; print(sys.stdin.read())',
)
'''
returncode, out, err = k3handy.cmdx(
'python', '-c',
read_stdin_in_subproc,
input="123",
)
dd('returncode:', returncode)
dd('out:', out)
dd('err:', err)
self.assertEqual(0, returncode)
self.assertEqual(["123"], out)
| [
"k3handy.cmdf",
"k3handy.parse_flag",
"k3handy.cmdtty",
"k3handy.cmd0",
"k3handy.cmdout",
"k3handy.cmdx"
] | [((2897, 2969), 'k3handy.cmdtty', 'k3handy.cmdtty', (['"""python"""', '"""-c"""', '"""import sys; print(sys.stdout.isatty())"""'], {}), "('python', '-c', 'import sys; print(sys.stdout.isatty())')\n", (2911, 2969), False, 'import k3handy\n'), ((3251, 3315), 'k3handy.cmdx', 'k3handy.cmdx', (['"""python"""', '"""-c"""', 'read_stdin_in_subproc'], {'input': '"""123"""'}), "('python', '-c', read_stdin_in_subproc, input='123')\n", (3263, 3315), False, 'import k3handy\n'), ((3467, 3521), 'k3handy.cmd0', 'k3handy.cmd0', (['"""python"""', '"""-c"""', '"""print("a"); print("b")"""'], {}), '(\'python\', \'-c\', \'print("a"); print("b")\')\n', (3479, 3521), False, 'import k3handy\n'), ((3617, 3649), 'k3handy.cmd0', 'k3handy.cmd0', (['"""python"""', '"""-c"""', '""""""'], {}), "('python', '-c', '')\n", (3629, 3649), False, 'import k3handy\n'), ((4000, 4056), 'k3handy.cmdout', 'k3handy.cmdout', (['"""python"""', '"""-c"""', '"""print("a"); print("b")"""'], {}), '(\'python\', \'-c\', \'print("a"); print("b")\')\n', (4014, 4056), False, 'import k3handy\n'), ((4382, 4436), 'k3handy.cmdx', 'k3handy.cmdx', (['"""python"""', '"""-c"""', '"""print("a"); print("b")"""'], {}), '(\'python\', \'-c\', \'print("a"); print("b")\')\n', (4394, 4436), False, 'import k3handy\n'), ((4788, 4860), 'k3handy.cmdtty', 'k3handy.cmdtty', (['"""python"""', '"""-c"""', '"""import sys; print(sys.stdout.isatty())"""'], {}), "('python', '-c', 'import sys; print(sys.stdout.isatty())')\n", (4802, 4860), False, 'import k3handy\n'), ((5285, 5349), 'k3handy.cmdx', 'k3handy.cmdx', (['"""python"""', '"""-c"""', 'read_stdin_in_subproc'], {'input': '"""123"""'}), "('python', '-c', read_stdin_in_subproc, input='123')\n", (5297, 5349), False, 'import k3handy\n'), ((480, 504), 'k3handy.parse_flag', 'k3handy.parse_flag', (['flag'], {}), '(flag)\n', (498, 504), False, 'import k3handy\n'), ((600, 623), 'k3handy.parse_flag', 'k3handy.parse_flag', (['"""q"""'], {}), "('q')\n", (618, 623), False, 'import k3handy\n'), ((708, 770), 'k3handy.cmdf', 'k3handy.cmdf', (['"""python"""', '"""-c"""', '"""print("a"); print("b")"""'], {'flag': 'f'}), '(\'python\', \'-c\', \'print("a"); print("b")\', flag=f)\n', (720, 770), False, 'import k3handy\n'), ((901, 941), 'k3handy.cmdf', 'k3handy.cmdf', (['"""python"""', '"""-c"""', '""""""'], {'flag': 'f'}), "('python', '-c', '', flag=f)\n", (913, 941), False, 'import k3handy\n'), ((1075, 1138), 'k3handy.cmdf', 'k3handy.cmdf', (['"""python"""', '"""-c"""', '"""import sys; sys.exit(5)"""'], {'flag': 'f'}), "('python', '-c', 'import sys; sys.exit(5)', flag=f)\n", (1087, 1138), False, 'import k3handy\n'), ((1299, 1362), 'k3handy.cmdf', 'k3handy.cmdf', (['"""python"""', '"""-c"""', '"""import sys; sys.exit(5)"""'], {'flag': 'f'}), "('python', '-c', 'import sys; sys.exit(5)', flag=f)\n", (1311, 1362), False, 'import k3handy\n'), ((2104, 2166), 'k3handy.cmdf', 'k3handy.cmdf', (['"""python"""', '"""-c"""', '"""print("a"); print("b")"""'], {'flag': 'f'}), '(\'python\', \'-c\', \'print("a"); print("b")\', flag=f)\n', (2116, 2166), False, 'import k3handy\n'), ((2666, 2744), 'k3handy.cmdf', 'k3handy.cmdf', (['"""python"""', '"""-c"""', '"""import sys; print(sys.stdout.isatty())"""'], {'flag': 'f'}), "('python', '-c', 'import sys; print(sys.stdout.isatty())', flag=f)\n", (2678, 2744), False, 'import k3handy\n')] |
import os
import numpy as np
import random
import numbers
import skimage
from skimage import io, color
import torch
# read uint8 image from path
def imread_uint8(imgpath, mode='RGB'):
'''
mode: 'RGB', 'gray', 'Y', 'L'.
'Y' and 'L' mean the Y channel of YCbCr.
'''
if mode == 'RGB':
img = io.imread(imgpath)
elif mode == 'gray':
img = io.imread(imgpath, as_gray=True)
img = skimage.img_as_ubyte(img)
elif mode in ['Y','L']:
# Y channel of YCbCr
# Note: The skimage.color.rgb2ycbcr() function is the same with that of matlab,
# PIL.Image.convert('YCbCr') is not.
img = io.imread(imgpath)
if img.ndim == 3:
img = color.rgb2ycbcr(img)[:,:,0]
img = img.round().astype(np.uint8)
return img
def augment_img(img, mode='8'):
'''flip and/or rotate the image randomly'''
if mode == '2':
mode = random.randint(0, 1)
elif mode == '4':
mode = random.randint(0, 3)
elif mode == '8':
mode = random.randint(0, 7)
else:
mode = 0
if mode == 0:
return img
elif mode == 1:
return np.fliplr(img)
elif mode == 2:
return np.rot90(img, k=2)
elif mode == 3:
return np.fliplr(np.rot90(img, k=2))
elif mode == 4:
return np.rot90(img, k=1)
elif mode == 5:
return np.fliplr(np.rot90(img, k=1))
elif mode == 6:
return np.rot90(img, k=3)
elif mode == 7:
return np.fliplr(np.rot90(img, k=3))
def random_crop(img, size):
'''crop image patch randomly'''
if isinstance(size, numbers.Number):
size = (int(size), int(size))
h, w = img.shape[0:2]
ph, pw = size
rnd_h = random.randint(0, h - ph)
rnd_w = random.randint(0, w - pw)
img_patch = img[rnd_h:rnd_h + ph, rnd_w:rnd_w + pw, ...]
return img_patch
def uint2tensor(img, normalized=True):
if img.ndim == 2:
img = img[:, :, np.newaxis]
img = skimage.img_as_float32(img)
if normalized:
img = (img - 0.5) / 0.5
img = torch.from_numpy(np.ascontiguousarray(img.transpose(2, 0, 1))).float()
return img
def tensor2uint(img, normalized=True):
img = img.data.squeeze().cpu().numpy().astype(np.float32)
if img.ndim == 3:
img = img.transpose(1, 2, 0)
elif img.ndim == 4:
img = img.transpose(0, 2, 3, 1)
if normalized:
img = img * 0.5 + 0.5
img = img.clip(0, 1) * 255
img = img.round().astype(np.uint8)
return img
def tensor3to4(tensor):
return tensor.unsqueeze(0)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
| [
"os.path.exists",
"os.makedirs",
"skimage.color.rgb2ycbcr",
"numpy.fliplr",
"skimage.img_as_float32",
"skimage.io.imread",
"numpy.rot90",
"skimage.img_as_ubyte",
"random.randint"
] | [((1733, 1758), 'random.randint', 'random.randint', (['(0)', '(h - ph)'], {}), '(0, h - ph)\n', (1747, 1758), False, 'import random\n'), ((1771, 1796), 'random.randint', 'random.randint', (['(0)', '(w - pw)'], {}), '(0, w - pw)\n', (1785, 1796), False, 'import random\n'), ((1988, 2015), 'skimage.img_as_float32', 'skimage.img_as_float32', (['img'], {}), '(img)\n', (2010, 2015), False, 'import skimage\n'), ((317, 335), 'skimage.io.imread', 'io.imread', (['imgpath'], {}), '(imgpath)\n', (326, 335), False, 'from skimage import io, color\n'), ((922, 942), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (936, 942), False, 'import random\n'), ((2610, 2630), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2624, 2630), False, 'import os\n'), ((2640, 2657), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2651, 2657), False, 'import os\n'), ((375, 407), 'skimage.io.imread', 'io.imread', (['imgpath'], {'as_gray': '(True)'}), '(imgpath, as_gray=True)\n', (384, 407), False, 'from skimage import io, color\n'), ((422, 447), 'skimage.img_as_ubyte', 'skimage.img_as_ubyte', (['img'], {}), '(img)\n', (442, 447), False, 'import skimage\n'), ((980, 1000), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (994, 1000), False, 'import random\n'), ((1159, 1173), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (1168, 1173), True, 'import numpy as np\n'), ((653, 671), 'skimage.io.imread', 'io.imread', (['imgpath'], {}), '(imgpath)\n', (662, 671), False, 'from skimage import io, color\n'), ((1038, 1058), 'random.randint', 'random.randint', (['(0)', '(7)'], {}), '(0, 7)\n', (1052, 1058), False, 'import random\n'), ((1209, 1227), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(2)'}), '(img, k=2)\n', (1217, 1227), True, 'import numpy as np\n'), ((716, 736), 'skimage.color.rgb2ycbcr', 'color.rgb2ycbcr', (['img'], {}), '(img)\n', (731, 736), False, 'from skimage import io, color\n'), ((1273, 1291), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(2)'}), '(img, k=2)\n', (1281, 1291), True, 'import numpy as np\n'), ((1328, 1346), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(1)'}), '(img, k=1)\n', (1336, 1346), True, 'import numpy as np\n'), ((1392, 1410), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(1)'}), '(img, k=1)\n', (1400, 1410), True, 'import numpy as np\n'), ((1447, 1465), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(3)'}), '(img, k=3)\n', (1455, 1465), True, 'import numpy as np\n'), ((1511, 1529), 'numpy.rot90', 'np.rot90', (['img'], {'k': '(3)'}), '(img, k=3)\n', (1519, 1529), True, 'import numpy as np\n')] |
# Generated by Django 3.2.9 on 2021-12-28 03:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=100)),
('pages', models.IntegerField(default=None, null=True)),
('rows_per_page', models.IntegerField(default=None, null=True)),
('columns_per_row', models.IntegerField(default=None, null=True)),
('created_ts', models.DateTimeField(auto_now_add=True)),
('updated_ts', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page', models.IntegerField(default=None, null=True)),
('row', models.IntegerField(default=None, null=True)),
('column', models.IntegerField(default=None, null=True)),
('currency', models.CharField(blank=True, default='', max_length=100)),
('value', models.DecimalField(decimal_places=2, default=None, max_digits=19, null=True)),
('type', models.CharField(choices=[('Bill', 'Bill'), ('Coin', 'Coin')], max_length=4)),
('country', models.CharField(blank=True, default='', max_length=2)),
('created_ts', models.DateTimeField(auto_now_add=True)),
('updated_ts', models.DateTimeField(auto_now=True)),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myapp.book')),
],
),
]
| [
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((333, 429), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (352, 429), False, 'from django.db import migrations, models\n'), ((460, 492), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (476, 492), False, 'from django.db import migrations, models\n'), ((521, 565), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (540, 565), False, 'from django.db import migrations, models\n'), ((602, 646), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (621, 646), False, 'from django.db import migrations, models\n'), ((685, 729), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (704, 729), False, 'from django.db import migrations, models\n'), ((763, 802), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (783, 802), False, 'from django.db import migrations, models\n'), ((836, 871), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (856, 871), False, 'from django.db import migrations, models\n'), ((1005, 1101), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1024, 1101), False, 'from django.db import migrations, models\n'), ((1125, 1169), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (1144, 1169), False, 'from django.db import migrations, models\n'), ((1196, 1240), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (1215, 1240), False, 'from django.db import migrations, models\n'), ((1270, 1314), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (1289, 1314), False, 'from django.db import migrations, models\n'), ((1346, 1402), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(100)'}), "(blank=True, default='', max_length=100)\n", (1362, 1402), False, 'from django.db import migrations, models\n'), ((1431, 1508), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': 'None', 'max_digits': '(19)', 'null': '(True)'}), '(decimal_places=2, default=None, max_digits=19, null=True)\n', (1450, 1508), False, 'from django.db import migrations, models\n'), ((1536, 1612), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('Bill', 'Bill'), ('Coin', 'Coin')]", 'max_length': '(4)'}), "(choices=[('Bill', 'Bill'), ('Coin', 'Coin')], max_length=4)\n", (1552, 1612), False, 'from django.db import migrations, models\n'), ((1643, 1697), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(2)'}), "(blank=True, default='', max_length=2)\n", (1659, 1697), False, 'from django.db import migrations, models\n'), ((1731, 1770), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1751, 1770), False, 'from django.db import migrations, models\n'), ((1804, 1839), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1824, 1839), False, 'from django.db import migrations, models\n'), ((1867, 1946), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""myapp.book"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='myapp.book')\n", (1884, 1946), False, 'from django.db import migrations, models\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Routines related to the canonical Chandra ACA dark current model.
The model is based on smoothed twice-broken power-law fits of
dark current histograms from Jan-2007 though Aug-2017. This analysis
was done entirely with dark current maps scaled to -14 C.
See: /proj/sot/ska/analysis/dark_current_model/dark_model.ipynb
and other files in that directory.
Alternatively:
http://nbviewer.ipython.org/url/asc.harvard.edu/mta/ASPECT/analysis/dark_current_model/dark_model.ipynb
"""
import numpy as np
import warnings
from Chandra.Time import DateTime
# Define a common fixed binning of dark current distribution
from . import darkbins
# Global cache (e.g. for initial dark current in synthetic_dark_image
CACHE = {}
# Some constants and globals. Done this way to support sherpa fitting.
# Needs to be re-worked to be nicer.
# Fixed gaussian for smoothing the broken power law
dx = 0.1
sigma = 0.30 # Gaussian sigma in log space
xg = np.arange(-2.5 * sigma, 2.5 * sigma, dx, dtype=float)
yg = np.exp(-0.5 * (xg / sigma) ** 2)
yg /= np.sum(yg)
NPIX = 1024 ** 2
# Fixed
xbins = darkbins.bins
xall = darkbins.bin_centers
imin = 0
imax = len(xall)
# Warm threshold used in fitting acq prob model. This constant is
# not used in any configured code, but leave here just in case.
warm_threshold = 100.
# Increase in dark current per 4 degC increase in T_ccd
DARK_SCALE_4C = 1.0 / 0.70
def dark_temp_scale(t_ccd, t_ccd_ref=-19.0, scale_4c=None):
"""Return the multiplicative scale factor to convert a CCD dark map
or dark current value from temperature ``t_ccd`` to temperature
``t_ccd_ref``::
scale = scale_4c ** ((t_ccd_ref - t_ccd) / 4.0)
In other words, if you have a dark current value that corresponds to ``t_ccd``
and need the value at a different temperature ``t_ccd_ref`` then use the
the following. Do not be misled by the misleading parameter names.
>>> from chandra_aca.dark_scale import dark_temp_scale
>>> scale = dark_temp_scale(t_ccd, t_ccd_ref, scale_4c)
>>> dark_curr_at_t_ccd_ref = scale * dark_curr_at_t_ccd
The default value for ``scale_4c`` is 1.0 / 0.7. It is written this way
because the equation was previously expressed using 1 / scale_4c with a
value of 0.7. This value is based on best global fit for dark current model
in `plot_predicted_warmpix.py`. This represents the multiplicative change
in dark current for each 4 degC increase::
>>> dark_temp_scale(t_ccd=-18, t_ccd_ref=-10, scale_4c=2.0)
4.0
:param t_ccd: actual temperature (degC)
:param t_ccd_ref: reference temperature (degC, default=-19.0)
:param scale_4c: increase in dark current per 4 degC increase (default=1.0 / 0.7)
:returns: scale factor
"""
if scale_4c is None:
scale_4c = DARK_SCALE_4C
return scale_4c ** ((t_ccd_ref - t_ccd) / 4.0)
def get_dark_hist(date, t_ccd):
"""
Return the dark current histogram corresponding to ``date`` and ``t_ccd``.
:param date: date in any DateTime format
:param t_ccd: CCD temperature (deg C)
:returns: bin_centers, bins, darkhist
"""
pars = get_sbp_pars(date)
x = darkbins.bin_centers
y = smooth_twice_broken_pow(pars, x)
# Model params are calibrated using reference temp. -14 C
scale = dark_temp_scale(-14, t_ccd)
xbins = darkbins.bins * scale
x = x * scale
return x, xbins, y
def smooth_broken_pow(pars, x):
"""
Smoothed broken power-law. Pars are same as bpl1d (NOT + gaussian sigma):
1: gamma1
2: gamma2
3: x_b (break point)
4: x_r (normalization reference point)
5: ampl1
"""
(gamma1, gamma2, x_b, x_r, ampl1) = pars
ampl2 = ampl1 * (x_b / x_r) ** (gamma2 - gamma1)
ok = xall > x_b
y = ampl1 * (xall / x_r) ** (-gamma1)
y[ok] = ampl2 * (xall[ok] / x_r) ** (-gamma2)
imin = np.searchsorted(xall, x[0] - 1e-3)
imax = np.searchsorted(xall, x[-1] + 1e-3)
return np.convolve(y, yg, mode='same')[imin:imax]
def smooth_twice_broken_pow(pars, x):
"""
Smoothed broken power-law. Pars are same as bpl1d (NOT + gaussian sigma):
1: gamma1
2: gamma2
3: gamma3
4: x_b (break point)
5: ampl1
"""
x_b2 = 1000
x_r = 50
(gamma1, gamma2, gamma3, x_b, ampl1) = pars
y = ampl1 * (xall / x_r) ** (-gamma1)
i0, i1 = np.searchsorted(xall, [x_b, x_b2])
ampl2 = ampl1 * (x_b / x_r) ** (gamma2 - gamma1)
y[i0:i1] = ampl2 * (xall[i0:i1] / x_r) ** (-gamma2)
i1 = np.searchsorted(xall, x_b2)
ampl3 = ampl2 * (x_b2 / x_r) ** (gamma3 - gamma2)
y[i1:] = ampl3 * (xall[i1:] / x_r) ** (-gamma3)
imin = np.searchsorted(xall, x[0] - 1e-3)
imax = np.searchsorted(xall, x[-1] + 1e-3)
return np.convolve(y, yg, mode='same')[imin:imax]
def temp_scalefac(T_ccd):
"""
Return the multiplicative scale factor to convert a CCD dark map from
the nominal -19C temperature to the temperature T. Based on best global fit for
dark current model in plot_predicted_warmpix.py. Previous value was 0.62 instead
of 0.70.
If attempting to reproduce previous analysis, be aware that this is now calling
chandra_aca.dark_model.dark_temp_scale and the value will be determined using the
module DARK_SCALE_4C value which may differ from previous values of 1.0/0.70 or 1.0/0.62.
"""
warnings.warn("temp_scalefac is deprecated. See chandra_aca.dark_model.dark_temp_scale.")
return dark_temp_scale(-19, T_ccd)
def as_array(vals):
if np.array(vals).ndim == 0:
is_scalar = True
vals = np.array([vals])
else:
is_scalar = False
vals = np.array(vals)
return vals, is_scalar
def get_sbp_pars(dates):
"""
Return smooth broken powerlaw parameters set(s) at ``dates``.
This is based on the sbp fits for the darkhist_zodi_m14 histograms in
/proj/sot/ska/analysis/dark_current_model/dark_model.ipynb.
The actual bi-linear fits (as a function of year) to the g1, g2, g3, x_b, and ampl
parameters are derived from fits and by-hand inspection of fit trending.
This is only accurate for dates > 2007.0.
:param dates: one or a list of date(s) in DateTime compatible format
:returns: one or a list of parameter lists [g1, g2, g3, x_b, ampl]
"""
dates, is_scalar = as_array(dates)
mid_year = 2012.0 # Fixed in dark_model.ipynb notebook
years = DateTime(dates).frac_year
dyears = years - mid_year
# Poly fit parameter for pre-2012 and post-2012. Vals here are:
# y_mid, slope_pre, slope_post
par_fits = ((0.075, -0.00692, -0.0207), # g1
(3.32, 0.0203, 0 * 0.0047), # g2
(2.40, 0.061, 0.061), # g3
(192, 0.1, 0.1), # x_b
(18400, 1.45e3, 742), # ampl
)
pars_list = []
for dyear in dyears:
pars = []
for y_mid, slope_pre, slope_post in par_fits:
slope = slope_pre if dyear < 0 else slope_post
pars.append(y_mid + slope * dyear)
pars_list.append(pars)
if is_scalar:
pars_list = pars_list[0]
return pars_list
def get_warm_fracs(warm_threshold, date='2013:001:12:00:00', T_ccd=-19.0):
"""
Calculate fraction of pixels in modeled dark current distribution
above warm threshold(s).
:param warm_threshold: scalar or list of threshold(s) in e-/sec
:param date: date to use for modeled dark current distribution/histogram
:param T_ccd: temperature (C) of modeled dark current distribution
:returns: list or scalar of warm fractions (depends on warm_threshold type)
"""
x, xbins, y = get_dark_hist(date, T_ccd)
warm_thresholds, is_scalar = as_array(warm_threshold)
warmpixes = []
for warm_threshold in warm_thresholds:
# First get the full bins to right of warm_threshold
ii = np.searchsorted(xbins, warm_threshold)
warmpix = np.sum(y[ii:])
lx = np.log(warm_threshold)
lx0 = np.log(xbins[ii - 1])
lx1 = np.log(xbins[ii])
ly0 = np.log(y[ii - 1])
ly1 = np.log(y[ii])
m = (ly1 - ly0) / (lx1 - lx0)
partial_bin = y[ii] * (lx1 ** m - lx ** m) / (lx1 ** m - lx0 ** m)
warmpix += partial_bin
warmpixes.append(warmpix)
if is_scalar:
out = warmpixes[0]
else:
out = np.array(warmpixes)
return out / (1024.0 ** 2)
def synthetic_dark_image(date, t_ccd_ref=None):
"""
Generate a synthetic dark current image corresponding to the specified
``date`` and ``t_ccd``.
:param date: (DateTime compatible)
:param t_ccd_ref: ACA CCD temperature
"""
from mica.archive.aca_dark import get_dark_cal_image
if 'dark_1999223' not in CACHE:
dark = get_dark_cal_image('1999:223:12:00:00', select='nearest', t_ccd_ref=-14).ravel()
CACHE['dark_1999223'] = dark.copy()
else:
dark = CACHE['dark_1999223'].copy()
# Fill any pixels above 40 e-/sec with a random sampling from a cool
# pixel below 40 e-/sec
warm = dark > 40
warm_idx = np.flatnonzero(warm)
not_warm_idx = np.flatnonzero(~warm)
fill_idx = np.random.randint(0, len(not_warm_idx), len(warm_idx))
dark[warm_idx] = dark[fill_idx]
darkmodel = smooth_twice_broken_pow(get_sbp_pars(date), xall)
darkran = np.random.poisson(darkmodel)
nn = 0
for ii, npix in enumerate(darkran):
# Generate n log-uniform variates within bin
if npix > 0:
logdark = np.random.uniform(np.log(xbins[ii]), np.log(xbins[ii + 1]), npix)
dark[nn:nn + npix] += np.exp(logdark)
nn += npix
np.random.shuffle(dark)
dark.shape = (1024, 1024)
if t_ccd_ref is not None:
dark *= dark_temp_scale(-14, t_ccd_ref)
return dark
| [
"Chandra.Time.DateTime",
"numpy.convolve",
"numpy.random.poisson",
"numpy.searchsorted",
"numpy.flatnonzero",
"numpy.log",
"numpy.exp",
"numpy.sum",
"numpy.array",
"warnings.warn",
"mica.archive.aca_dark.get_dark_cal_image",
"numpy.arange",
"numpy.random.shuffle"
] | [((1034, 1087), 'numpy.arange', 'np.arange', (['(-2.5 * sigma)', '(2.5 * sigma)', 'dx'], {'dtype': 'float'}), '(-2.5 * sigma, 2.5 * sigma, dx, dtype=float)\n', (1043, 1087), True, 'import numpy as np\n'), ((1093, 1125), 'numpy.exp', 'np.exp', (['(-0.5 * (xg / sigma) ** 2)'], {}), '(-0.5 * (xg / sigma) ** 2)\n', (1099, 1125), True, 'import numpy as np\n'), ((1132, 1142), 'numpy.sum', 'np.sum', (['yg'], {}), '(yg)\n', (1138, 1142), True, 'import numpy as np\n'), ((3957, 3992), 'numpy.searchsorted', 'np.searchsorted', (['xall', '(x[0] - 0.001)'], {}), '(xall, x[0] - 0.001)\n', (3972, 3992), True, 'import numpy as np\n'), ((4003, 4039), 'numpy.searchsorted', 'np.searchsorted', (['xall', '(x[-1] + 0.001)'], {}), '(xall, x[-1] + 0.001)\n', (4018, 4039), True, 'import numpy as np\n'), ((4442, 4476), 'numpy.searchsorted', 'np.searchsorted', (['xall', '[x_b, x_b2]'], {}), '(xall, [x_b, x_b2])\n', (4457, 4476), True, 'import numpy as np\n'), ((4596, 4623), 'numpy.searchsorted', 'np.searchsorted', (['xall', 'x_b2'], {}), '(xall, x_b2)\n', (4611, 4623), True, 'import numpy as np\n'), ((4742, 4777), 'numpy.searchsorted', 'np.searchsorted', (['xall', '(x[0] - 0.001)'], {}), '(xall, x[0] - 0.001)\n', (4757, 4777), True, 'import numpy as np\n'), ((4788, 4824), 'numpy.searchsorted', 'np.searchsorted', (['xall', '(x[-1] + 0.001)'], {}), '(xall, x[-1] + 0.001)\n', (4803, 4824), True, 'import numpy as np\n'), ((5449, 5549), 'warnings.warn', 'warnings.warn', (['"""temp_scalefac is deprecated. See chandra_aca.dark_model.dark_temp_scale."""'], {}), "(\n 'temp_scalefac is deprecated. See chandra_aca.dark_model.dark_temp_scale.'\n )\n", (5462, 5549), False, 'import warnings\n'), ((5739, 5753), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (5747, 5753), True, 'import numpy as np\n'), ((9179, 9199), 'numpy.flatnonzero', 'np.flatnonzero', (['warm'], {}), '(warm)\n', (9193, 9199), True, 'import numpy as np\n'), ((9219, 9240), 'numpy.flatnonzero', 'np.flatnonzero', (['(~warm)'], {}), '(~warm)\n', (9233, 9240), True, 'import numpy as np\n'), ((9429, 9457), 'numpy.random.poisson', 'np.random.poisson', (['darkmodel'], {}), '(darkmodel)\n', (9446, 9457), True, 'import numpy as np\n'), ((9750, 9773), 'numpy.random.shuffle', 'np.random.shuffle', (['dark'], {}), '(dark)\n', (9767, 9773), True, 'import numpy as np\n'), ((4050, 4081), 'numpy.convolve', 'np.convolve', (['y', 'yg'], {'mode': '"""same"""'}), "(y, yg, mode='same')\n", (4061, 4081), True, 'import numpy as np\n'), ((4835, 4866), 'numpy.convolve', 'np.convolve', (['y', 'yg'], {'mode': '"""same"""'}), "(y, yg, mode='same')\n", (4846, 4866), True, 'import numpy as np\n'), ((5674, 5690), 'numpy.array', 'np.array', (['[vals]'], {}), '([vals])\n', (5682, 5690), True, 'import numpy as np\n'), ((6498, 6513), 'Chandra.Time.DateTime', 'DateTime', (['dates'], {}), '(dates)\n', (6506, 6513), False, 'from Chandra.Time import DateTime\n'), ((7965, 8003), 'numpy.searchsorted', 'np.searchsorted', (['xbins', 'warm_threshold'], {}), '(xbins, warm_threshold)\n', (7980, 8003), True, 'import numpy as np\n'), ((8022, 8036), 'numpy.sum', 'np.sum', (['y[ii:]'], {}), '(y[ii:])\n', (8028, 8036), True, 'import numpy as np\n'), ((8050, 8072), 'numpy.log', 'np.log', (['warm_threshold'], {}), '(warm_threshold)\n', (8056, 8072), True, 'import numpy as np\n'), ((8087, 8108), 'numpy.log', 'np.log', (['xbins[ii - 1]'], {}), '(xbins[ii - 1])\n', (8093, 8108), True, 'import numpy as np\n'), ((8123, 8140), 'numpy.log', 'np.log', (['xbins[ii]'], {}), '(xbins[ii])\n', (8129, 8140), True, 'import numpy as np\n'), ((8155, 8172), 'numpy.log', 'np.log', (['y[ii - 1]'], {}), '(y[ii - 1])\n', (8161, 8172), True, 'import numpy as np\n'), ((8187, 8200), 'numpy.log', 'np.log', (['y[ii]'], {}), '(y[ii])\n', (8193, 8200), True, 'import numpy as np\n'), ((8449, 8468), 'numpy.array', 'np.array', (['warmpixes'], {}), '(warmpixes)\n', (8457, 8468), True, 'import numpy as np\n'), ((5608, 5622), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (5616, 5622), True, 'import numpy as np\n'), ((9706, 9721), 'numpy.exp', 'np.exp', (['logdark'], {}), '(logdark)\n', (9712, 9721), True, 'import numpy as np\n'), ((8862, 8934), 'mica.archive.aca_dark.get_dark_cal_image', 'get_dark_cal_image', (['"""1999:223:12:00:00"""'], {'select': '"""nearest"""', 't_ccd_ref': '(-14)'}), "('1999:223:12:00:00', select='nearest', t_ccd_ref=-14)\n", (8880, 8934), False, 'from mica.archive.aca_dark import get_dark_cal_image\n'), ((9624, 9641), 'numpy.log', 'np.log', (['xbins[ii]'], {}), '(xbins[ii])\n', (9630, 9641), True, 'import numpy as np\n'), ((9643, 9664), 'numpy.log', 'np.log', (['xbins[ii + 1]'], {}), '(xbins[ii + 1])\n', (9649, 9664), True, 'import numpy as np\n')] |
import unittest
import ipaddress
from net_models.models import (
StaticRouteV4,
StaticRouteV6
)
from tests.BaseTestClass import TestVendorIndependentBase
class TestStaticRouteV4(TestVendorIndependentBase):
TEST_CLASS = StaticRouteV4
def test_valid_routes(self):
test_cases = [
{
"test_name": "Test-Default-01",
"data": {
"network": "0.0.0.0/0",
"next_hop": "1.2.3.4"
},
"result": {
"network": ipaddress.IPv4Network("0.0.0.0/0"),
"next_hop": ipaddress.IPv4Address("1.2.3.4")
}
},
{
"test_name": "Test-Default-02",
"data": {
"network": "0.0.0.0/0.0.0.0",
"next_hop": "1.2.3.4"
},
"result": {
"network": ipaddress.IPv4Network("0.0.0.0/0"),
"next_hop": ipaddress.IPv4Address("1.2.3.4")
}
},
{
"test_name": "Test-Default-03",
"data": {
"network": "0.0.0.0/0",
"interface": "Serial1/2/3",
"vrf": "TEST-VRF"
},
"result": {
"network": ipaddress.IPv4Network("0.0.0.0/0"),
"interface": "Serial1/2/3",
"vrf": "TEST-VRF"
}
},
{
"test_name": "Test-VRF-01",
"data": {
"network": "192.168.0.0/255.255.255.0",
"next_hop": "1.2.3.4",
"vrf": "TEST-VRF"
},
"result": {
"network": ipaddress.IPv4Network("192.168.0.0/24"),
"next_hop": ipaddress.IPv4Address("1.2.3.4"),
"vrf": "TEST-VRF"
}
},
]
for test_case in test_cases:
with self.subTest(msg=test_case["test_name"]):
want = test_case["result"]
have = self.TEST_CLASS(**test_case["data"]).dict(exclude_none=True)
self.assertDictEqual(want, have)
class TestStaticRouteV6(TestVendorIndependentBase):
TEST_CLASS = StaticRouteV6
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"ipaddress.IPv4Network",
"ipaddress.IPv4Address"
] | [((2417, 2432), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2430, 2432), False, 'import unittest\n'), ((557, 591), 'ipaddress.IPv4Network', 'ipaddress.IPv4Network', (['"""0.0.0.0/0"""'], {}), "('0.0.0.0/0')\n", (578, 591), False, 'import ipaddress\n'), ((625, 657), 'ipaddress.IPv4Address', 'ipaddress.IPv4Address', (['"""1.2.3.4"""'], {}), "('1.2.3.4')\n", (646, 657), False, 'import ipaddress\n'), ((949, 983), 'ipaddress.IPv4Network', 'ipaddress.IPv4Network', (['"""0.0.0.0/0"""'], {}), "('0.0.0.0/0')\n", (970, 983), False, 'import ipaddress\n'), ((1017, 1049), 'ipaddress.IPv4Address', 'ipaddress.IPv4Address', (['"""1.2.3.4"""'], {}), "('1.2.3.4')\n", (1038, 1049), False, 'import ipaddress\n'), ((1379, 1413), 'ipaddress.IPv4Network', 'ipaddress.IPv4Network', (['"""0.0.0.0/0"""'], {}), "('0.0.0.0/0')\n", (1400, 1413), False, 'import ipaddress\n'), ((1837, 1876), 'ipaddress.IPv4Network', 'ipaddress.IPv4Network', (['"""192.168.0.0/24"""'], {}), "('192.168.0.0/24')\n", (1858, 1876), False, 'import ipaddress\n'), ((1910, 1942), 'ipaddress.IPv4Address', 'ipaddress.IPv4Address', (['"""1.2.3.4"""'], {}), "('1.2.3.4')\n", (1931, 1942), False, 'import ipaddress\n')] |
"""
This library provides a set of tools that can be used in chemometrics analysis.
These tools are:
- some pre-processing methods that can be applyed in the spectra.
- an function that make average of spectra in the case of there are more than one spectra by sample (e.g. triplicate or duplicate aquisitions)
"""
DOCLINES = (__doc__ or '').split("\n")
import setuptools
setuptools.setup(
name="preprocspectra",
version="0.0.65",
license='MIT',
author="<NAME>",
author_email="<EMAIL>",
description=DOCLINES[0],
long_description = "\n".join(DOCLINES[2:]),
url="https://github.com/dijsilva/preprocspectra",
packages=setuptools.find_packages(),
download_url = 'https://github.com/dijsilva/preprocspectra/releases/tag/0.0.1',
keywords = ['SPECTROSCOPY', 'PREPROCESSING', 'SPECTRA'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'pandas>=1.0.3',
'numpy>=1.18.3',
'scipy>=1.4.1',
],
python_requires='>=3.6'
)
| [
"setuptools.find_packages"
] | [((655, 681), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (679, 681), False, 'import setuptools\n')] |
"""Collection of DVC options
Based on ZnTrackOption python descriptors this gives access to them being used
to define e.g. dependencies
Examples
--------
>>> from zntrack import Node, dvc
>>> class HelloWorld(Node)
>>> vars = dvc.params()
"""
import logging
from zntrack import utils
from zntrack.core.zntrackoption import ZnTrackOption
from zntrack.dvc.custom_base import PlotsModifyOption
log = logging.getLogger(__name__)
# All available DVC cmd options + results
# detailed explanations on https://dvc.org/doc/command-reference/run#options
class params(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
class deps(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DEPS
file = utils.Files.zntrack
def __get__(self, instance, owner=None):
"""Use load_node_dependency before returning the value"""
if instance is None:
return self
value = super().__get__(instance, owner)
value = utils.utils.load_node_dependency(value, log_warning=True)
setattr(instance, self.name, value)
return value
class outs(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
class checkpoints(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
class outs_no_cache(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
class outs_persistent(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
class metrics(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
class metrics_no_cache(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
class plots(PlotsModifyOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
class plots_no_cache(ZnTrackOption):
"""Identify DVC option
See https://dvc.org/doc/command-reference/run#options for more information
on the available options
"""
zn_type = utils.ZnTypes.DVC
file = utils.Files.zntrack
| [
"logging.getLogger",
"zntrack.utils.utils.load_node_dependency"
] | [((405, 432), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (422, 432), False, 'import logging\n'), ((1263, 1320), 'zntrack.utils.utils.load_node_dependency', 'utils.utils.load_node_dependency', (['value'], {'log_warning': '(True)'}), '(value, log_warning=True)\n', (1295, 1320), False, 'from zntrack import utils\n')] |
# coding: utf-8
"""Common Groups operations."""
from os.path import abspath, join as pjoin
import logging
import json
from commongroups.cmgroup import CMGroup
from commongroups.errors import MissingParamError, NoCredentialsError
from commongroups.googlesheet import SheetManager
from commongroups.hypertext import directory
from commongroups import logconf # pylint: disable=unused-import
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def cmgs_from_googlesheet(env):
"""
Generate compound group objects from parameters given in a Google Sheet.
Use the Google Sheets source referenced in the environment's configuration.
Parameters:
env (:class:`commongroups.env.CommonEnv`): Environment to use for all
generated groups and for identifying the Google Sheet.
Returns:
Generator yielding :class:`commongroups.cmgroup.CMGroup` objects.
"""
logger.info('Generating compound groups from Google Sheet')
try:
sheet = SheetManager(env.config['google_sheet_title'],
env.config['google_worksheet'],
env.config['google_key_file'])
except KeyError as keyx:
logger.exception('Google Sheets access is not configured')
raise MissingParamError(keyx.args[0])
except NoCredentialsError:
logger.exception('Cannot authenticate Google Service Account')
raise
cmg_gen = sheet.get_cmgs(env)
return cmg_gen
# TODO: Import group definitions from spreadsheet file.
def cmgs_from_file(env, path, filetype=None):
"""
Generate compound group objects from a file.
Only the defining parameters and descriptive information for each compound
group are imported from the file. Importing lists of compounds for already
populated groups is *not supported.*
Parameters:
env (:class:`commongroups.env.CommonEnv`): The project environment.
Determines the environment used for the :class:`CMGroup` objects.
path (str): Path to a file containing parameters, and optionally
other ``info``, for a number of CMGs.
filetype (str): Type of file; required only if path does not have a
file extension.
Yields:
:class:`commongroups.cmgroup.CMGroup` objects.
"""
filetype = filetype or path.split('.')[-1]
filetype = filetype.lower()
path = abspath(path)
logger.debug('Reading group parameters from %s', path)
if filetype == 'json':
with open(path, 'r') as json_file:
many_params = json.load(json_file)
else:
raise NotImplementedError(
'File type unsupported: {}'.format(filetype))
for item in many_params:
yield CMGroup(env, item['params'], item['info'])
def collect_to_json(cmgs, env, filename=None):
"""
Write parameters and info for a number of compound groups to a JSON file.
The output is written to ``cmgroups.json`` (or other filename if specified)
in the project environment's ``results`` directory.
Parameters:
cmgs (iterable): :class:`commongroups.cmgroup.CMGroup` objects to write.
env (:class:`commongroups.env.CommonEnv`): Project environment.
filename (str): Optional alternative filename.
"""
filename = filename or 'cmgroups.json'
cmg_data = [cmg.to_dict() for cmg in cmgs]
path = pjoin(env.results_path, filename)
logger.info('Writing JSON file: %s', path)
with open(path, 'w') as json_file:
json.dump(cmg_data, json_file, indent=2, sort_keys=True)
def batch_process(cmgs, env):
"""
Process compound groups in a given environment and output all results.
Use the database connection provided by the environment. Output results to
Excel (compound lists and group info) and JSON (group parameters and info).
Create a browseable HTML directory of all groups & results.
Parameters:
cmgs (iterable): :class:`commongroups.cmgroup.CMGroup` objects to
process.
env (:class:`commongroups.env.CommonEnv`): Environment.
Returns:
List of processed compound groups.
"""
if not env.database:
env.connect_database()
processed_cmgs = []
for cmg in cmgs:
cmg.process(env.database)
cmg.to_excel()
cmg.to_json()
cmg.to_html(formats=['xlsx', 'json'])
processed_cmgs.append(cmg)
collect_to_json(processed_cmgs, env)
directory(processed_cmgs, env)
return processed_cmgs
| [
"logging.getLogger",
"commongroups.cmgroup.CMGroup",
"os.path.join",
"commongroups.errors.MissingParamError",
"commongroups.hypertext.directory",
"json.load",
"commongroups.googlesheet.SheetManager",
"os.path.abspath",
"json.dump"
] | [((402, 429), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (419, 429), False, 'import logging\n'), ((2413, 2426), 'os.path.abspath', 'abspath', (['path'], {}), '(path)\n', (2420, 2426), False, 'from os.path import abspath, join as pjoin\n'), ((3398, 3431), 'os.path.join', 'pjoin', (['env.results_path', 'filename'], {}), '(env.results_path, filename)\n', (3403, 3431), True, 'from os.path import abspath, join as pjoin\n'), ((4472, 4502), 'commongroups.hypertext.directory', 'directory', (['processed_cmgs', 'env'], {}), '(processed_cmgs, env)\n', (4481, 4502), False, 'from commongroups.hypertext import directory\n'), ((1009, 1123), 'commongroups.googlesheet.SheetManager', 'SheetManager', (["env.config['google_sheet_title']", "env.config['google_worksheet']", "env.config['google_key_file']"], {}), "(env.config['google_sheet_title'], env.config[\n 'google_worksheet'], env.config['google_key_file'])\n", (1021, 1123), False, 'from commongroups.googlesheet import SheetManager\n'), ((3526, 3582), 'json.dump', 'json.dump', (['cmg_data', 'json_file'], {'indent': '(2)', 'sort_keys': '(True)'}), '(cmg_data, json_file, indent=2, sort_keys=True)\n', (3535, 3582), False, 'import json\n'), ((1287, 1318), 'commongroups.errors.MissingParamError', 'MissingParamError', (['keyx.args[0]'], {}), '(keyx.args[0])\n', (1304, 1318), False, 'from commongroups.errors import MissingParamError, NoCredentialsError\n'), ((2582, 2602), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2591, 2602), False, 'import json\n'), ((2749, 2791), 'commongroups.cmgroup.CMGroup', 'CMGroup', (['env', "item['params']", "item['info']"], {}), "(env, item['params'], item['info'])\n", (2756, 2791), False, 'from commongroups.cmgroup import CMGroup\n')] |
# -*- coding: utf-8 -*-
# Copyright 2018 <NAME> All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test dashboard browser class.
A credentials file with username + password must be supplied for this to work.
('credentials*' is excluded in .gitignore)
"""
import unittest
import pyotp
from merlink.browsers.dashboard import DashboardBrowser
from merlink.browsers.pages.page_utils import get_input_var_value
from merlink.browsers.pages.page_hunters import get_pagetext_mkiconf
from test.credentials import emails, passwords, totp_base32_secret
class TestLogins(unittest.TestCase):
"""Test various dashboard browser logins.
Cycle through access types
Emails list has these emails: [
# [0] test for all access
# [1] test for full org + 1 network access in different org
# [2] test for full network access in multiple orgs
# [3] test for read only access in one network in one org
# [4] test for monitor only access in one network in one org
# [5] test for guest ambassador access in one network in one org
# [6] test for tfa network admin access in one network in one org
# [7] test for SAML admin (PLANNED)
]
Coverage for these functions:
login
tfa_submit_info
org_data_setup
logout
"""
def setUp(self):
"""Set up only a browser."""
self.browser = DashboardBrowser()
def test_login_sms_redirect(self):
"""Test that a tfa user is redirected to the tfa page."""
self.assertEqual(self.browser.login(emails[6], passwords[6]),
'sms_auth')
def test_login_tfa_fail(self):
"""Verify that bad TFA code returns expected ValueError exception."""
self.browser.login(emails[6], passwords[6], tfa_code='000000')
with self.assertRaises(ValueError):
self.check_network_access()
def test_login_tfa(self):
"""Verify that tfa works with python authenticator `pyotp`."""
totp = pyotp.TOTP(totp_base32_secret)
self.browser.login(emails[6], passwords[6], tfa_code=totp.now())
self.check_network_access()
def test_login_failure(self):
"""Test whether sending a bad password will result in an auth error."""
self.assertEqual(self.browser.login(emails[6], 'badpassword'),
'auth_error')
def test_login_success(self):
"""Test whether login works with a user with known good email/pass."""
self.assertEqual(self.browser.login(emails[0], passwords[0]),
'auth_success')
def test_login_multiorg(self):
"""Test whether a user with 2 orgs can access everything they should.
* Checks org access in both orgs.
"""
self.browser.login(emails[0], passwords[0])
self.check_org_access()
self.toggle_orgs()
self.check_org_access()
def test_login_1org_1network(self):
"""Test access for user with 1 org and 1 network in another org
* Navigate to organization settings in org with full access
* Navigate to network settings in other org where user does not have
org access
"""
self.browser.login(emails[1], passwords[1])
# Force first org to be one where user has org access
this_org = self.browser.active_org_id
if not self.browser.orgs_dict[this_org]['org_admin_type']:
# Set org id to the org with full access
self.toggle_orgs()
# Check org access in the first org and check network access in 2nd.
self.check_org_access()
self.toggle_orgs()
self.check_network_access()
def test_login_2networks_diff_orgs(self):
"""Test access for admin with network access in 2 diff orgs."""
self.browser.login(emails[2], passwords[2])
self.check_network_access()
self.toggle_orgs()
self.check_network_access()
def test_login_1network_read_only(self):
"""Test access for network read-only admin."""
self.browser.login(emails[3], passwords[3])
self.check_network_access()
def test_login_1network_monitor_only(self):
"""Test access for network monitors in 1 network in 1 org.
Network monitors only have access to /usage/list, /new_reports
"""
self.browser.login(emails[4], passwords[4])
self.check_network_access(route='/new_reports')
def test_login_1network_guest_ambassador(self):
"""Test access for network amabassadors in 1 network in 1 org.
Guest ambassadors only have access to /configure/guests
"""
self.browser.login(emails[5], passwords[5])
self.check_network_access()
def get_other_org_id(self):
"""For tests that involve multiple orgs, get the org not active now."""
return [item for item in list(self.browser.orgs_dict.keys())
if item not in [self.browser.active_org_id]][0]
def toggle_orgs(self):
"""Set the org id to that of the other org.
In many of these tests, there are admins with access to 2 orgs.
This function allows us to quickly toggle between the two
"""
self.browser.set_org_id(self.get_other_org_id())
print('Switching to org:\t', self.browser.get_active_org_name())
def check_org_access(self):
"""Verify org access by scraping the org name out of settings."""
# Print name that should only be visible on Organization > Settings
self.browser.open_route('/organization/edit')
print('Testing org access...\nOpened org:\t\t\t', get_input_var_value(
self.browser.get_page(), 'organization_name'))
def check_network_access(self, route='/configure/guests'):
"""Verify network access by scraping its name from network settings.
Using /configure/guests as it should be accessible for all user
types, except monitor only. MkiConf vars will be present on all pages.
"""
network_eid = self.browser.active_network_id
self.browser.open_route(route, network_eid=network_eid)
mkiconf_dict = get_pagetext_mkiconf(self.browser.get_page().text)
print('Testing network access...\nOpened network:\t\t',
mkiconf_dict['network_name'])
class TestBrowser(unittest.TestCase):
"""Tests browser functionality given that login has occurred."""
def setUp(self):
"""Set up only a browser."""
self.browser = DashboardBrowser()
self.browser.login(emails[0], passwords[0])
"""Test setters and getters.
Coverage for these attributes:
get_active_network_name
get_active_org_name
get_network_names
get_org_names
set_network_id
set_network_name
set_org_id
set_org_name
Coverage for these key browser components:
scrape_json
open_route
handle_redirects
combined_network_redirect
"""
def test_get_active_network_name(self):
pass
def test_get_active_org_name(self):
pass
def test_get_network_names(self):
pass
def test_get_org_names(self):
pass
def test_set_network_id(self):
pass
def test_set_network_name(self):
pass
def test_set_org_id(self):
pass
def test_set_org_name(self):
pass
def test_scrape_json(self):
pass
def test_open_route(self):
pass
def test_handle_redirects(self):
pass
def test_combined_network_redirect(self):
pass
def tearDown(self):
"""Logout of browser and close it."""
self.browser.logout()
self.browser.mechsoup.close()
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"pyotp.TOTP",
"merlink.browsers.dashboard.DashboardBrowser"
] | [((8352, 8367), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8365, 8367), False, 'import unittest\n'), ((1951, 1969), 'merlink.browsers.dashboard.DashboardBrowser', 'DashboardBrowser', ([], {}), '()\n', (1967, 1969), False, 'from merlink.browsers.dashboard import DashboardBrowser\n'), ((2569, 2599), 'pyotp.TOTP', 'pyotp.TOTP', (['totp_base32_secret'], {}), '(totp_base32_secret)\n', (2579, 2599), False, 'import pyotp\n'), ((7079, 7097), 'merlink.browsers.dashboard.DashboardBrowser', 'DashboardBrowser', ([], {}), '()\n', (7095, 7097), False, 'from merlink.browsers.dashboard import DashboardBrowser\n')] |
"""
mixcoatl.admin.user
-------------------
Implements access to the DCM User API
"""
from mixcoatl.resource import Resource
from mixcoatl.decorators.lazy import lazy_property
from mixcoatl.decorators.validations import required_attrs
from mixcoatl.utils import camelize, camel_keys, uncamel_keys
import json
import time
class User(Resource):
"""A user within the DCM environment"""
PATH = 'admin/User'
COLLECTION_NAME = 'users'
PRIMARY_KEY = 'user_id'
def __init__(self, user_id=None, endpoint=None, *args, **kwargs):
Resource.__init__(self, endpoint=endpoint)
self.__user_id = user_id
@property
def user_id(self):
"""`int` The DCM unique id of this user across all customer accounts"""
return self.__user_id
@lazy_property
def account(self):
"""`dict` The DCM account"""
return self.__account
@account.setter
def account(self, a):
# pylint: disable-msg=C0111,W0201
self.__account = a
@lazy_property
def account_user_id(self):
"""`str` or `None` A unique identifier to reference this user's access
to a specific account
"""
return self.__account_user_id
@lazy_property
def alpha_name(self):
"""`str` The user's full name in the form of `Last name, First name`"""
return self.__alpha_name
@lazy_property
def billing_codes(self):
"""`list` The billing codes against which this user has provisioning rights
.. note::
Only present when this user is queried in the context of an `account_id`
"""
return self.__billing_codes
@billing_codes.setter
def billing_codes(self, b):
# pylint: disable-msg=C0111,W0201
self.__billing_codes = b
@lazy_property
def cloud_console_password(self):
"""`str` The encrypted password that the user can log into the underlying cloud with"""
return self.__cloud_console_password
@lazy_property
def cloud_api_public_key(self):
"""`str` The encrypted public key the user can make API calls to the underlying cloud with"""
return self.__cloud_api_public_key
@lazy_property
def cloud_api_secret_key(self):
"""`str` The encrypted secret key the user can make API calls to the underlying cloud with"""
return self.__cloud_api_secret_key
@lazy_property
def customer(self):
"""`dict` The customer record to which this user belongs"""
return self.__customer
@lazy_property
def editable(self):
"""`bool` Indicates if the core values of this user may be changed"""
return self.__editable
@lazy_property
def email(self):
"""`str` Email is a unique identifier that enables a given user to
identify themselves to DCM
"""
return self.__email
@email.setter
def email(self, e):
# pylint: disable-msg=C0111,W0201
self.__email = e
@lazy_property
def family_name(self):
"""`str` The family name of the user"""
return self.__family_name
@family_name.setter
def family_name(self, f):
# pylint: disable-msg=C0111,W0201
self.__family_name = f
@lazy_property
def given_name(self):
"""`str` The given name of the user"""
return self.__given_name
@given_name.setter
def given_name(self, g):
# pylint: disable-msg=C0111,W0201
self.__given_name = g
@lazy_property
def groups(self):
"""`list` The group membership of this user idependent of any individual accounts"""
return self.__groups
@groups.setter
def groups(self, g):
# pylint: disable-msg=C0111,W0201
self.__groups = g
@lazy_property
def has_cloud_api_access(self):
"""`bool` Indicates that the user has access to the underlying cloud API (i.e. AWS IAM)"""
return self.__has_cloud_api_access
@lazy_property
def has_cloud_console_access(self):
"""`bool` Indicates that the user has access to the underlying cloud console (i.e. AWS IAM)"""
return self.__has_cloud_console_access
@lazy_property
def legacy_user_id(self):
"""`int` The legacy user id"""
return self.__legacy_user_id
@lazy_property
def notifications_targets(self):
"""`dict` The various targets configured for delivery of notifications"""
return self.__notification_targets
@lazy_property
def notifications_settings(self):
"""`dict` Notification settings configured for this user"""
return self.__notification_settings
@lazy_property
def status(self):
"""`str` The current status of this user in DCM"""
return self.__status
@lazy_property
def time_zone(self):
"""`str` The timezone id for the user's prefered time zone"""
return self.__time_zone
@lazy_property
def vm_login_id(self):
"""`str` The username the user will use to login to cloud instances
for shell or remote desktop access
"""
return self.__vm_login_id
@lazy_property
def ssh_public_key(self):
"""`str` The public key to grant the user access to Unix instances"""
return self.__ssh_public_key
@ssh_public_key.setter
def ssh_public_key(self, s):
"""`str` The public key to grant the user access to Unix instances"""
self.__ssh_public_key = s
@lazy_property
def password(self):
"""`str` DCM login password"""
return self.__password
@password.setter
def password(self, p):
self.__password = p
@required_attrs(['user_id'])
def grant(self, account_id, groups, billing_codes):
"""Grants the user access to the specified account. :attr:`reason`
:param account_id: Account ID of the account to grant access.
:type account_id: int.
:param groups: List of group ID the user will belong to.
:type groups: list.
:param billing_codes: List of billing code the user will use.
:type billing_codes: list.
:returns: bool -- Result of API call
"""
p = '%s/%s' % (self.PATH, str(self.user_id))
group_list = []
billing_code_list = []
for group in groups:
group_list.append({"groupId": group})
for billing_code in billing_codes:
billing_code_list.append({"billingCodeId": billing_code})
payload = {"grant": [{"account": {"accountId": account_id},
"groups": group_list,
"billingCodes": billing_code_list}]}
return self.put(p, data=json.dumps(payload))
@required_attrs(['account', 'given_name', 'family_name', 'email', 'groups', 'billing_codes'])
def create(self):
"""Creates a new user."""
billing_code_list = []
for billing_code in self.billing_codes:
billing_code_list.append({"billingCodeId": billing_code})
parms = [{'givenName': self.given_name,
'familyName': self.family_name,
'email': self.email,
'groups': [{'groupId': self.groups}],
'account': {'accountId': self.account},
'billingCodes': billing_code_list}]
if self.ssh_public_key is not None:
parms[0].update({'sshPublicKey': self.ssh_public_key})
if self.password is not None:
parms[0].update({'password': self.password})
payload = {'addUser': camel_keys(parms)}
response = self.post(data=json.dumps(payload))
if self.last_error is None:
self.load()
return response
else:
raise UserCreationException(self.last_error)
@classmethod
def all(cls, keys_only=False, endpoint=None, **kwargs):
"""Return all users
.. note::
The keys used to make the request determine results visibility
:param keys_only: Return :attr:`user_id` instead of :class:`User`
:type keys_only: bool.
:param detail: str. The level of detail to return - `basic` or `extended`
:type detail: str.
:returns: `list` of :class:`User` or :attr:`user_id`
:raises: :class:`UserException`
"""
r = Resource(cls.PATH, endpoint=endpoint)
if 'detail' in kwargs:
r.request_details = kwargs['detail']
else:
r.request_details = 'basic'
x = r.get()
if r.last_error is None:
if keys_only is True:
return [i[camelize(cls.PRIMARY_KEY)] for i in x[cls.COLLECTION_NAME]]
else:
return [type(cls.__name__, (object,), i) for i in uncamel_keys(x)[cls.COLLECTION_NAME]]
else:
raise UserException(r.last_error)
class UserException(BaseException):
pass
class UserCreationException(UserException):
"""User Creation Exception"""
pass
| [
"mixcoatl.utils.uncamel_keys",
"mixcoatl.utils.camelize",
"json.dumps",
"mixcoatl.decorators.validations.required_attrs",
"mixcoatl.utils.camel_keys",
"mixcoatl.resource.Resource",
"mixcoatl.resource.Resource.__init__"
] | [((5697, 5724), 'mixcoatl.decorators.validations.required_attrs', 'required_attrs', (["['user_id']"], {}), "(['user_id'])\n", (5711, 5724), False, 'from mixcoatl.decorators.validations import required_attrs\n'), ((6762, 6858), 'mixcoatl.decorators.validations.required_attrs', 'required_attrs', (["['account', 'given_name', 'family_name', 'email', 'groups', 'billing_codes']"], {}), "(['account', 'given_name', 'family_name', 'email', 'groups',\n 'billing_codes'])\n", (6776, 6858), False, 'from mixcoatl.decorators.validations import required_attrs\n'), ((552, 594), 'mixcoatl.resource.Resource.__init__', 'Resource.__init__', (['self'], {'endpoint': 'endpoint'}), '(self, endpoint=endpoint)\n', (569, 594), False, 'from mixcoatl.resource import Resource\n'), ((8381, 8418), 'mixcoatl.resource.Resource', 'Resource', (['cls.PATH'], {'endpoint': 'endpoint'}), '(cls.PATH, endpoint=endpoint)\n', (8389, 8418), False, 'from mixcoatl.resource import Resource\n'), ((7606, 7623), 'mixcoatl.utils.camel_keys', 'camel_keys', (['parms'], {}), '(parms)\n', (7616, 7623), False, 'from mixcoatl.utils import camelize, camel_keys, uncamel_keys\n'), ((6735, 6754), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (6745, 6754), False, 'import json\n'), ((7660, 7679), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (7670, 7679), False, 'import json\n'), ((8667, 8692), 'mixcoatl.utils.camelize', 'camelize', (['cls.PRIMARY_KEY'], {}), '(cls.PRIMARY_KEY)\n', (8675, 8692), False, 'from mixcoatl.utils import camelize, camel_keys, uncamel_keys\n'), ((8811, 8826), 'mixcoatl.utils.uncamel_keys', 'uncamel_keys', (['x'], {}), '(x)\n', (8823, 8826), False, 'from mixcoatl.utils import camelize, camel_keys, uncamel_keys\n')] |
from ldap3 import Server, Connection, ALL, NTLM, ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES
class Simple_AD:
"""
A simple wrapper for ldap3 in an Active Directory Environment.
Examples:
0. Initilaize a connection
sad = Simple_AD(server_name="server1.mydomain.com", username="mydomain.com\\jsmith", password="<PASSWORD>")
1. Get group(s) by samaccountname
groups = sad.get_adgroup(samaccountname="Accounting")
2. Get users from a group fast
users_from_group = sad.expand_users_from_group(groups[0])
3. Get a user by samaccountname
user = sad.get_aduser(samaccountname="jsmith")
"""
def __init__(self, server_name, username, password, use_ssl=True, default_attributes=ALL_ATTRIBUTES):
"""
Class initilizer. Takes various ldap3 properties and passess them through
Args:
server_name (string): The name of the target ldap server
username (string): Username to authenticate in the format: {domain_name\\user_name}. Ex: [contoso\\jsmith]
password (string): <PASSWORD>
use_ssl (bool, optional): Binds to SSL on port 636 and should be used when possible. Defaults to True.
default_attributes: Sets the default attributes to get for an object during an LDAP query, optional. Defaults to ALL_ATTRIBUTES.
Can be set to:
ALL_OPERATIONAL_ATTRIBUTES - ldap3 operation attributes wrapper
[] - No attributes by default
["member","lastLogonDate"] - Custom attributes
"""
self.server = Server(server_name, use_ssl=use_ssl, get_info=ALL)
self.conn = Connection(self.server, auto_bind=True, user=username, password=password, authentication=NTLM)
self.base_ou = self.server.info.naming_contexts[0]
self.default_attributes = default_attributes
def get_adgroup(self, samaccountname="*", distinguishedname="*", attributes=False):
"""
Get an AD Group Object from a filter.
Can return multiple results using a wildcard '*'
Args:
samaccountname (str, optional): LDAP Property samaccountname. Defaults to "*".
distinguishedname (str, optional): LDAP Property distinguishedname.. Defaults to "*".
attributes (bool, optional): Custom attributes if not using defaults. Defaults to False.
Returns:
object: ldap3 attribute object of the group(s)
"""
if(attributes == False):
attributes = self.default_attributes
self.conn.search(
search_base=self.base_ou,
search_filter=f'(&(objectCategory=group)(samaccountname={samaccountname})(distinguishedname={distinguishedname}))',
search_scope='SUBTREE',
attributes = attributes
)
return self.conn.entries
def get_aduser(self, samaccountname="*", distinguishedname="*", attributes=False):
"""
Get an AD User Object from a filter.
Can return multiple results using a wildcard '*'
Args:
samaccountname (str, optional): LDAP Property samaccountname. Defaults to "*".
distinguishedname (str, optional): LDAP Property distinguishedname.. Defaults to "*".
attributes (bool, optional): Custom attributes if not using defaults. Defaults to False.
Returns:
object: ldap3 attribute object of the user(s)
"""
if(attributes == False):
attributes = self.default_attributes
self.conn.search(
search_base=self.base_ou,
search_filter=f'(&(objectCategory=user)(samaccountname={samaccountname})(distinguishedname={distinguishedname}))',
search_scope='SUBTREE',
attributes = attributes
)
return self.conn.entries
def expand_users_from_group(self, group, attributes=False):
"""
A handy function to quickly expand user attributes from a group object with the [member] attribute populated.
This is useful when you need to get additional attributes from a users in a group, but don't want to look them up for each.
This builds a single LDAP query for all users
Args:
group (group object): Group object with the [member] attribute populated
attributes (bool, optional): Custom attributes if not using defaults. Defaults to False.
Raises:
Exception: If [member] is not populated on the [group] argument, exception will be raised.
Returns:
object: List of AD users with their requested attributes
Examples:
1. Expand members from the group "Accounting"
sad = Simple_AD(server_name="server1.mydomain.com", username="mydomain.com\\jsmith", password="<PASSWORD>")
groups = sad.get_adgroup(samaccountname="Accounting")
users_from_group = sad.expand_users_from_group(groups[0])
"""
if not hasattr(group, 'member'):
raise Exception("Group must have attribute [member] populated.")
#Validate attributes to get
if(attributes == False):
attributes = self.default_attributes
#Build fast query in format: (|(distinguishedname=dn1)(distinguishedname=dn2)(...))
members_ldap_query = ""
for distinguishedname in group.member:
members_ldap_query += f"(distinguishedname={distinguishedname})"
members_ldap_query = "(|" + members_ldap_query + ")"
#Get the users
self.conn.search(
search_base=self.base_ou,
search_filter=members_ldap_query,
search_scope='SUBTREE',
attributes = attributes
)
#Return results
return self.conn.entries | [
"ldap3.Connection",
"ldap3.Server"
] | [((1692, 1742), 'ldap3.Server', 'Server', (['server_name'], {'use_ssl': 'use_ssl', 'get_info': 'ALL'}), '(server_name, use_ssl=use_ssl, get_info=ALL)\n', (1698, 1742), False, 'from ldap3 import Server, Connection, ALL, NTLM, ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES\n'), ((1763, 1861), 'ldap3.Connection', 'Connection', (['self.server'], {'auto_bind': '(True)', 'user': 'username', 'password': 'password', 'authentication': 'NTLM'}), '(self.server, auto_bind=True, user=username, password=password,\n authentication=NTLM)\n', (1773, 1861), False, 'from ldap3 import Server, Connection, ALL, NTLM, ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 1.11.5 on 2017-12-27 07:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('configuration', '0014_auto_20171225_1112'),
]
operations = [
migrations.CreateModel(
name='ShowVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=32, verbose_name='创建者')),
('updator', models.CharField(max_length=32, verbose_name='更新者')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('deleted_time', models.DateTimeField(blank=True, null=True)),
('template_id', models.IntegerField(verbose_name='关联的模板 ID')),
('real_version_id', models.IntegerField(verbose_name='关联的VersionedEntity ID')),
('name', models.CharField(max_length=32, verbose_name='版本名称')),
],
),
migrations.AddField(
model_name='template',
name='draft',
field=models.TextField(default='', verbose_name='草稿'),
),
migrations.AddField(
model_name='template',
name='draft_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='草稿更新时间'),
),
migrations.AddField(
model_name='template',
name='draft_updator',
field=models.CharField(default='', max_length=32, verbose_name='草稿更新者'),
),
migrations.AlterField(
model_name='application',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='configmap',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='deplpyment',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='secret',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='service',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='template',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterField(
model_name='versionedentity',
name='updator',
field=models.CharField(max_length=32, verbose_name='更新者'),
),
migrations.AlterUniqueTogether(
name='showversion',
unique_together=set([('template_id', 'name')]),
),
]
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((2048, 2095), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'verbose_name': '"""草稿"""'}), "(default='', verbose_name='草稿')\n", (2064, 2095), False, 'from django.db import migrations, models\n'), ((2221, 2287), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""草稿更新时间"""'}), "(blank=True, null=True, verbose_name='草稿更新时间')\n", (2241, 2287), False, 'from django.db import migrations, models\n'), ((2416, 2481), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(32)', 'verbose_name': '"""草稿更新者"""'}), "(default='', max_length=32, verbose_name='草稿更新者')\n", (2432, 2481), False, 'from django.db import migrations, models\n'), ((2609, 2660), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""更新者"""'}), "(max_length=32, verbose_name='更新者')\n", (2625, 2660), False, 'from django.db import migrations, models\n'), ((2786, 2837), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""更新者"""'}), "(max_length=32, verbose_name='更新者')\n", (2802, 2837), False, 'from django.db import migrations, models\n'), ((2964, 3015), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""更新者"""'}), "(max_length=32, verbose_name='更新者')\n", (2980, 3015), False, 'from django.db import migrations, models\n'), ((3138, 3189), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""更新者"""'}), "(max_length=32, verbose_name='更新者')\n", (3154, 3189), False, 'from django.db import migrations, models\n'), ((3313, 3364), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""更新者"""'}), "(max_length=32, verbose_name='更新者')\n", (3329, 3364), False, 'from django.db import migrations, models\n'), ((3489, 3540), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""更新者"""'}), "(max_length=32, verbose_name='更新者')\n", (3505, 3540), False, 'from django.db import migrations, models\n'), ((3672, 3723), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""更新者"""'}), "(max_length=32, verbose_name='更新者')\n", (3688, 3723), False, 'from django.db import migrations, models\n'), ((1120, 1213), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1136, 1213), False, 'from django.db import migrations, models\n'), ((1240, 1291), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""创建者"""'}), "(max_length=32, verbose_name='创建者')\n", (1256, 1291), False, 'from django.db import migrations, models\n'), ((1322, 1373), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""更新者"""'}), "(max_length=32, verbose_name='更新者')\n", (1338, 1373), False, 'from django.db import migrations, models\n'), ((1404, 1443), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1424, 1443), False, 'from django.db import migrations, models\n'), ((1474, 1509), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1494, 1509), False, 'from django.db import migrations, models\n'), ((1543, 1577), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1562, 1577), False, 'from django.db import migrations, models\n'), ((1613, 1656), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1633, 1656), False, 'from django.db import migrations, models\n'), ((1691, 1735), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""关联的模板 ID"""'}), "(verbose_name='关联的模板 ID')\n", (1710, 1735), False, 'from django.db import migrations, models\n'), ((1774, 1831), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""关联的VersionedEntity ID"""'}), "(verbose_name='关联的VersionedEntity ID')\n", (1793, 1831), False, 'from django.db import migrations, models\n'), ((1859, 1911), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'verbose_name': '"""版本名称"""'}), "(max_length=32, verbose_name='版本名称')\n", (1875, 1911), False, 'from django.db import migrations, models\n')] |
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import random
import numpy as np
import pytest
from sklearn import exceptions
from sklearn.base import clone
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from mlxtend.classifier import EnsembleVoteClassifier
from mlxtend.data import iris_data
from mlxtend.utils import assert_raises
X, y = iris_data()
X = X[:, 1:3]
def test_EnsembleVoteClassifier():
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard')
scores = cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
scores_mean = (round(scores.mean(), 2))
assert(scores_mean == 0.94)
def test_fit_base_estimators_false():
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3],
voting='hard',
fit_base_estimators=False)
eclf.fit(X, y)
assert round(eclf.score(X, y), 2) == 0.97
def test_use_clones():
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear',
multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
EnsembleVoteClassifier(clfs=[clf1, clf2, clf3],
use_clones=True).fit(X, y)
assert_raises(exceptions.NotFittedError,
"This RandomForestClassifier instance is not fitted yet."
" Call 'fit' with appropriate arguments"
" before using this estimator.",
clf2.predict,
X)
EnsembleVoteClassifier(clfs=[clf1, clf2, clf3],
use_clones=False).fit(X, y)
clf2.predict(X)
def test_sample_weight():
# with no weight
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard')
prob1 = eclf.fit(X, y).predict_proba(X)
# with weight = 1
w = np.ones(len(y))
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard')
prob2 = eclf.fit(X, y, sample_weight=w).predict_proba(X)
# with random weight
random.seed(87)
w = np.array([random.random() for _ in range(len(y))])
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard')
prob3 = eclf.fit(X, y, sample_weight=w).predict_proba(X)
diff12 = np.max(np.abs(prob1 - prob2))
diff23 = np.max(np.abs(prob2 - prob3))
assert diff12 < 1e-3, "max diff is %.4f" % diff12
assert diff23 > 1e-3, "max diff is %.4f" % diff23
def test_no_weight_support():
random.seed(87)
w = np.array([random.random() for _ in range(len(y))])
logi = LogisticRegression(solver='liblinear', multi_class='ovr')
rf = RandomForestClassifier(n_estimators=10)
gnb = GaussianNB()
knn = KNeighborsClassifier()
eclf = EnsembleVoteClassifier(clfs=[logi, rf, gnb, knn], voting='hard')
with pytest.raises(TypeError):
eclf.fit(X, y, sample_weight=w)
def test_no_weight_support_with_no_weight():
logi = LogisticRegression(solver='liblinear', multi_class='ovr')
rf = RandomForestClassifier(n_estimators=10)
gnb = GaussianNB()
knn = KNeighborsClassifier()
eclf = EnsembleVoteClassifier(clfs=[logi, rf, gnb, knn], voting='hard')
eclf.fit(X, y)
def test_1model_labels():
clf = LogisticRegression(multi_class='multinomial',
solver='newton-cg', random_state=123)
ens_clf_1 = EnsembleVoteClassifier(clfs=[clf], voting='soft', weights=None)
ens_clf_2 = EnsembleVoteClassifier(clfs=[clf], voting='soft', weights=[1.])
pred_e1 = ens_clf_1.fit(X, y).predict(X)
pred_e2 = ens_clf_2.fit(X, y).predict(X)
pred_e3 = clf.fit(X, y).predict(X)
np.testing.assert_equal(pred_e1, pred_e2)
np.testing.assert_equal(pred_e1, pred_e3)
def test_1model_probas():
clf = LogisticRegression(multi_class='multinomial',
solver='newton-cg', random_state=123)
ens_clf_1 = EnsembleVoteClassifier(clfs=[clf], voting='soft', weights=None)
ens_clf_2 = EnsembleVoteClassifier(clfs=[clf], voting='soft', weights=[1.])
pred_e1 = ens_clf_1.fit(X, y).predict_proba(X)
pred_e2 = ens_clf_2.fit(X, y).predict_proba(X)
pred_e3 = clf.fit(X, y).predict_proba(X)
np.testing.assert_almost_equal(pred_e1, pred_e2, decimal=8)
np.testing.assert_almost_equal(pred_e1, pred_e3, decimal=8)
def test_EnsembleVoteClassifier_weights():
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
scores_mean = (round(scores.mean(), 2))
assert(scores_mean == 0.93)
def test_EnsembleVoteClassifier_gridsearch():
clf1 = LogisticRegression(solver='liblinear',
multi_class='ovr',
random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='soft')
params = {'logisticregression__C': [1.0, 100.0],
'randomforestclassifier__n_estimators': [20, 200]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5, iid=False)
X, y = iris_data()
grid.fit(X, y)
mean_scores = [round(s, 2) for s
in grid.cv_results_['mean_test_score']]
assert mean_scores == [0.95, 0.96, 0.96, 0.95]
def test_EnsembleVoteClassifier_gridsearch_enumerate_names():
clf1 = LogisticRegression(solver='liblinear',
multi_class='ovr',
random_state=1)
clf2 = RandomForestClassifier(random_state=1)
eclf = EnsembleVoteClassifier(clfs=[clf1, clf1, clf2])
params = {'logisticregression-1__C': [1.0, 100.0],
'logisticregression-2__C': [1.0, 100.0],
'randomforestclassifier__n_estimators': [5, 20],
'voting': ['hard', 'soft']}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5, iid=False)
X, y = iris_data()
grid = grid.fit(X, y)
def test_get_params():
clf1 = KNeighborsClassifier(n_neighbors=1)
clf2 = RandomForestClassifier(random_state=1, n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3])
got = sorted(list({s.split('__')[0] for s in eclf.get_params().keys()}))
expect = ['clfs',
'fit_base_estimators',
'gaussiannb',
'kneighborsclassifier',
'randomforestclassifier',
'use_clones',
'verbose',
'voting',
'weights']
assert got == expect, got
def test_classifier_gridsearch():
clf1 = KNeighborsClassifier(n_neighbors=1)
clf2 = RandomForestClassifier(random_state=1, n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1])
params = {'clfs': [[clf1, clf1, clf1], [clf2, clf3]]}
grid = GridSearchCV(estimator=eclf,
param_grid=params,
iid=False,
cv=5,
refit=True)
grid.fit(X, y)
assert len(grid.best_params_['clfs']) == 2
def test_string_labels_numpy_array():
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard')
y_str = y.copy()
y_str = y_str.astype(str)
y_str[:50] = 'a'
y_str[50:100] = 'b'
y_str[100:150] = 'c'
scores = cross_val_score(eclf,
X,
y_str,
cv=5,
scoring='accuracy')
scores_mean = (round(scores.mean(), 2))
assert(scores_mean == 0.94)
def test_string_labels_python_list():
np.random.seed(123)
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], voting='hard')
y_str = (['a' for a in range(50)] +
['b' for a in range(50)] +
['c' for a in range(50)])
scores = cross_val_score(eclf,
X,
y_str,
cv=5,
scoring='accuracy')
scores_mean = (round(scores.mean(), 2))
assert(scores_mean == 0.94)
def test_clone():
clf1 = LogisticRegression(solver='liblinear', multi_class='ovr')
clf2 = RandomForestClassifier(n_estimators=10)
clf3 = GaussianNB()
eclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3],
voting='hard',
fit_base_estimators=False)
clone(eclf)
| [
"mlxtend.data.iris_data",
"sklearn.model_selection.GridSearchCV",
"numpy.abs",
"numpy.testing.assert_equal",
"sklearn.base.clone",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"random.seed",
"sklearn.linear_model.LogisticRegression",
"mlxtend.classifier.Ense... | [((631, 642), 'mlxtend.data.iris_data', 'iris_data', ([], {}), '()\n', (640, 642), False, 'from mlxtend.data import iris_data\n'), ((699, 718), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (713, 718), True, 'import numpy as np\n'), ((730, 787), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (748, 787), False, 'from sklearn.linear_model import LogisticRegression\n'), ((799, 838), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (821, 838), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((850, 862), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (860, 862), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((874, 936), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (896, 936), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((951, 1004), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['eclf', 'X', 'y'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(eclf, X, y, cv=5, scoring='accuracy')\n", (966, 1004), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((1241, 1260), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (1255, 1260), True, 'import numpy as np\n'), ((1272, 1329), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (1290, 1329), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1341, 1380), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (1363, 1380), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1392, 1404), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1402, 1404), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1475, 1568), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""', 'fit_base_estimators': '(False)'}), "(clfs=[clf1, clf2, clf3], voting='hard',\n fit_base_estimators=False)\n", (1497, 1568), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((1728, 1747), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (1742, 1747), True, 'import numpy as np\n'), ((1759, 1816), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (1777, 1816), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1858, 1897), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (1880, 1897), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1909, 1921), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1919, 1921), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((2033, 2225), 'mlxtend.utils.assert_raises', 'assert_raises', (['exceptions.NotFittedError', '"""This RandomForestClassifier instance is not fitted yet. Call \'fit\' with appropriate arguments before using this estimator."""', 'clf2.predict', 'X'], {}), '(exceptions.NotFittedError,\n "This RandomForestClassifier instance is not fitted yet. Call \'fit\' with appropriate arguments before using this estimator."\n , clf2.predict, X)\n', (2046, 2225), False, 'from mlxtend.utils import assert_raises\n'), ((2495, 2514), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (2509, 2514), True, 'import numpy as np\n'), ((2526, 2583), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (2544, 2583), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2595, 2634), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (2617, 2634), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2646, 2658), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (2656, 2658), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((2670, 2732), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (2692, 2732), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((2828, 2847), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (2842, 2847), True, 'import numpy as np\n'), ((2859, 2916), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (2877, 2916), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2928, 2967), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (2950, 2967), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2979, 2991), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (2989, 2991), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3003, 3065), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (3025, 3065), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((3157, 3172), 'random.seed', 'random.seed', (['(87)'], {}), '(87)\n', (3168, 3172), False, 'import random\n'), ((3236, 3255), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (3250, 3255), True, 'import numpy as np\n'), ((3267, 3324), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (3285, 3324), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3336, 3375), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (3358, 3375), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3387, 3399), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (3397, 3399), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3411, 3473), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (3433, 3473), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((3766, 3781), 'random.seed', 'random.seed', (['(87)'], {}), '(87)\n', (3777, 3781), False, 'import random\n'), ((3852, 3909), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (3870, 3909), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3919, 3958), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (3941, 3958), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3969, 3981), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (3979, 3981), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3992, 4014), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4012, 4014), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4026, 4090), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[logi, rf, gnb, knn]', 'voting': '"""hard"""'}), "(clfs=[logi, rf, gnb, knn], voting='hard')\n", (4048, 4090), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((4224, 4281), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (4242, 4281), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4291, 4330), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (4313, 4330), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4341, 4353), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (4351, 4353), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((4364, 4386), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4384, 4386), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4398, 4462), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[logi, rf, gnb, knn]', 'voting': '"""hard"""'}), "(clfs=[logi, rf, gnb, knn], voting='hard')\n", (4420, 4462), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((4520, 4607), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'multi_class': '"""multinomial"""', 'solver': '"""newton-cg"""', 'random_state': '(123)'}), "(multi_class='multinomial', solver='newton-cg',\n random_state=123)\n", (4538, 4607), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4649, 4712), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf]', 'voting': '"""soft"""', 'weights': 'None'}), "(clfs=[clf], voting='soft', weights=None)\n", (4671, 4712), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((4729, 4793), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf]', 'voting': '"""soft"""', 'weights': '[1.0]'}), "(clfs=[clf], voting='soft', weights=[1.0])\n", (4751, 4793), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((4928, 4969), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pred_e1', 'pred_e2'], {}), '(pred_e1, pred_e2)\n', (4951, 4969), True, 'import numpy as np\n'), ((4974, 5015), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['pred_e1', 'pred_e3'], {}), '(pred_e1, pred_e3)\n', (4997, 5015), True, 'import numpy as np\n'), ((5054, 5141), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'multi_class': '"""multinomial"""', 'solver': '"""newton-cg"""', 'random_state': '(123)'}), "(multi_class='multinomial', solver='newton-cg',\n random_state=123)\n", (5072, 5141), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5183, 5246), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf]', 'voting': '"""soft"""', 'weights': 'None'}), "(clfs=[clf], voting='soft', weights=None)\n", (5205, 5246), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((5263, 5327), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf]', 'voting': '"""soft"""', 'weights': '[1.0]'}), "(clfs=[clf], voting='soft', weights=[1.0])\n", (5285, 5327), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((5480, 5539), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pred_e1', 'pred_e2'], {'decimal': '(8)'}), '(pred_e1, pred_e2, decimal=8)\n', (5510, 5539), True, 'import numpy as np\n'), ((5544, 5603), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pred_e1', 'pred_e3'], {'decimal': '(8)'}), '(pred_e1, pred_e3, decimal=8)\n', (5574, 5603), True, 'import numpy as np\n'), ((5654, 5673), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (5668, 5673), True, 'import numpy as np\n'), ((5685, 5742), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (5703, 5742), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5754, 5793), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (5776, 5793), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5805, 5817), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (5815, 5817), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((5829, 5916), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""soft"""', 'weights': '[1, 2, 10]'}), "(clfs=[clf1, clf2, clf3], voting='soft', weights=[1, \n 2, 10])\n", (5851, 5916), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((5994, 6047), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['eclf', 'X', 'y'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(eclf, X, y, cv=5, scoring='accuracy')\n", (6009, 6047), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((6300, 6373), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""', 'random_state': '(1)'}), "(solver='liblinear', multi_class='ovr', random_state=1)\n", (6318, 6373), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6445, 6483), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (6467, 6483), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((6495, 6507), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (6505, 6507), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((6519, 6581), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""soft"""'}), "(clfs=[clf1, clf2, clf3], voting='soft')\n", (6541, 6581), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((6713, 6777), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'eclf', 'param_grid': 'params', 'cv': '(5)', 'iid': '(False)'}), '(estimator=eclf, param_grid=params, cv=5, iid=False)\n', (6725, 6777), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((6790, 6801), 'mlxtend.data.iris_data', 'iris_data', ([], {}), '()\n', (6799, 6801), False, 'from mlxtend.data import iris_data\n'), ((7046, 7119), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""', 'random_state': '(1)'}), "(solver='liblinear', multi_class='ovr', random_state=1)\n", (7064, 7119), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7191, 7229), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (7213, 7229), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7241, 7288), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf1, clf2]'}), '(clfs=[clf1, clf1, clf2])\n', (7263, 7288), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((7517, 7581), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'eclf', 'param_grid': 'params', 'cv': '(5)', 'iid': '(False)'}), '(estimator=eclf, param_grid=params, cv=5, iid=False)\n', (7529, 7581), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((7594, 7605), 'mlxtend.data.iris_data', 'iris_data', ([], {}), '()\n', (7603, 7605), False, 'from mlxtend.data import iris_data\n'), ((7668, 7703), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (7688, 7703), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((7715, 7770), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(1)', 'n_estimators': '(10)'}), '(random_state=1, n_estimators=10)\n', (7737, 7770), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7782, 7794), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (7792, 7794), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((7806, 7853), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]'}), '(clfs=[clf1, clf2, clf3])\n', (7828, 7853), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((8276, 8311), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (8296, 8311), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((8323, 8378), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(1)', 'n_estimators': '(10)'}), '(random_state=1, n_estimators=10)\n', (8345, 8378), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((8390, 8402), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (8400, 8402), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((8414, 8449), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1]'}), '(clfs=[clf1])\n', (8436, 8449), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((8521, 8597), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'eclf', 'param_grid': 'params', 'iid': '(False)', 'cv': '(5)', 'refit': '(True)'}), '(estimator=eclf, param_grid=params, iid=False, cv=5, refit=True)\n', (8533, 8597), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((8805, 8824), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (8819, 8824), True, 'import numpy as np\n'), ((8836, 8893), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (8854, 8893), False, 'from sklearn.linear_model import LogisticRegression\n'), ((8905, 8944), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (8927, 8944), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((8956, 8968), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (8966, 8968), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((8980, 9042), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (9002, 9042), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((9179, 9236), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['eclf', 'X', 'y_str'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(eclf, X, y_str, cv=5, scoring='accuracy')\n", (9194, 9236), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((9473, 9492), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (9487, 9492), True, 'import numpy as np\n'), ((9504, 9561), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (9522, 9561), False, 'from sklearn.linear_model import LogisticRegression\n'), ((9573, 9612), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (9595, 9612), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((9624, 9636), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (9634, 9636), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((9648, 9710), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""'}), "(clfs=[clf1, clf2, clf3], voting='hard')\n", (9670, 9710), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((9845, 9902), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['eclf', 'X', 'y_str'], {'cv': '(5)', 'scoring': '"""accuracy"""'}), "(eclf, X, y_str, cv=5, scoring='accuracy')\n", (9860, 9902), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((10127, 10184), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (10145, 10184), False, 'from sklearn.linear_model import LogisticRegression\n'), ((10196, 10235), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (10218, 10235), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((10247, 10259), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (10257, 10259), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((10271, 10364), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'voting': '"""hard"""', 'fit_base_estimators': '(False)'}), "(clfs=[clf1, clf2, clf3], voting='hard',\n fit_base_estimators=False)\n", (10293, 10364), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((10433, 10444), 'sklearn.base.clone', 'clone', (['eclf'], {}), '(eclf)\n', (10438, 10444), False, 'from sklearn.base import clone\n'), ((3556, 3577), 'numpy.abs', 'np.abs', (['(prob1 - prob2)'], {}), '(prob1 - prob2)\n', (3562, 3577), True, 'import numpy as np\n'), ((3599, 3620), 'numpy.abs', 'np.abs', (['(prob2 - prob3)'], {}), '(prob2 - prob3)\n', (3605, 3620), True, 'import numpy as np\n'), ((4100, 4124), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4113, 4124), False, 'import pytest\n'), ((1926, 1990), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'use_clones': '(True)'}), '(clfs=[clf1, clf2, clf3], use_clones=True)\n', (1948, 1990), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((2318, 2383), 'mlxtend.classifier.EnsembleVoteClassifier', 'EnsembleVoteClassifier', ([], {'clfs': '[clf1, clf2, clf3]', 'use_clones': '(False)'}), '(clfs=[clf1, clf2, clf3], use_clones=False)\n', (2340, 2383), False, 'from mlxtend.classifier import EnsembleVoteClassifier\n'), ((3191, 3206), 'random.random', 'random.random', ([], {}), '()\n', (3204, 3206), False, 'import random\n'), ((3800, 3815), 'random.random', 'random.random', ([], {}), '()\n', (3813, 3815), False, 'import random\n')] |
from django import shortcuts
from django.db import models
# Create your models here.
class Link(models.Model):
url = models.CharField(max_length=1000)
shortUrl = models.CharField(max_length=20)
def __str__(self):
return self.shortUrl | [
"django.db.models.CharField"
] | [((123, 156), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (139, 156), False, 'from django.db import models\n'), ((172, 203), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (188, 203), False, 'from django.db import models\n')] |
import re
from django.shortcuts import render
from django.utils.http import urlunquote, urlquote
from django.contrib import messages
from sequenceSearch.tasks import blast
from django.http import Http404
from model_utils.managers import InheritanceManager
from django.core.cache import cache
from pks.models import AT, KR, DH, ER, cMT, oMT, TE, Subunit, Domain
import json
from io import StringIO
import pandas as pd
from itertools import chain
import os
from django.conf import settings
def search(request):
timeTaken = 0
if request.method != 'POST':
# if not a post, show the search form
if 'aainput' in request.GET:
# prefill the search form with a referring AA sequence
aainput = urlunquote(request.GET['aainput'])
messages.success(request, 'Imported AA sequence from previous page')
elif 'subunit' in request.GET:
# prefill the search form with a referring PKS subunit id
subunitid = int(request.GET['subunit'])
assert 0 <= subunitid
subunit = Subunit.objects.get(id=subunitid)
aainput = subunit.sequence
messages.success(request,
'Imported sequence for PKS cluster %s subunit %s'
% (subunit.cluster.description, subunit.name))
else:
aainput = ''
context = {'aainput': aainput}
return render(request, 'sequencesearch.html', context)
elif 'aainput' in request.POST:
# if this is a post, validate the input and
# run alignment
try:
# validate all inputs
searchDatabase = str(request.POST['searchDatabase'])
maxHits = int(request.POST['maxHits'])
assert 1 <= maxHits <= 10000
inputs = request.POST['aainput']
evalue = float(request.POST['evalue'])
assert 0.0 <= evalue <= 10.0
showAllDomains = int(request.POST['showAllDomains'])
assert 0 <= showAllDomains <= 1
inputs = inputs.strip()
if len(re.findall('>.*?\n', inputs)) >= 2:
messages.error(request,
'Error: Multiple queries detected, please remove until only one query is present')
return render(request, 'sequencesearch.html')
inputs = re.sub('^>.*?\n', '', inputs)
inputs = re.sub('\s', '', inputs)
if len(inputs) == 0:
messages.error(request, 'Error: Empty Query')
return render(request, 'sequencesearch.html')
if len(inputs) > 50000:
messages.error(request, 'Error: max query size is 50,000 residues')
return render(request, 'sequencesearch.html')
# use alignment cache if it exists
alignments = cache.get((inputs, evalue, maxHits, showAllDomains, searchDatabase))
# run Blast if there is no cached alignment
if alignments is None:
if searchDatabase == "reviewed":
db=os.path.join(
settings.BASE_DIR,
'pipeline', 'data', 'blast', 'clustercad_subunits_reviewed',
)
else:
db=os.path.join(
settings.BASE_DIR,
'pipeline', 'data', 'blast', 'clustercad_subunits_all',
)
results = blast.delay(query=inputs,
evalue=evalue, max_target_seqs=maxHits, sortOutput=True, database=db)
results, timeTaken, queries_found = results.get()
#If no queries found, then no hits
if not queries_found:
messages.error(request, 'No hits - please refine query!')
return render(request, 'sequencesearch.html')
#Read from jsonized pandas dataframe
df = pd.read_json(results)
#Get the subunits and process domains and modules
subjectacc_separater = lambda x: Subunit.objects.get(id=x.split('_')[1], cluster__mibigAccession=x.split('_')[0])
df['subunit'] = df['subject acc.ver'].map(subjectacc_separater)
domain_getter = lambda x: Domain.objects.filter(module__subunit=x['subunit'], stop__gte=x['s. start'], start__lte=x['s. end']).select_subclasses().order_by('start')
df['domains'] = df.apply(domain_getter, axis=1)
module_getter = lambda x: list(set([domain.module for domain in x['domains']]))
df['modules'] = df.apply(module_getter, axis=1)
#User option to show all domains
if showAllDomains:
show_all_domains_func = lambda x: [{'module': module, 'domains': list(Domain.objects.filter(module=module).select_subclasses().order_by('start'))} for module in x['modules']]
df['modules'] = df.apply(show_all_domains_func, axis=1)
#Other option to show only relevant domains
else:
module_dict_getter = lambda x: [{'module': module, 'domains': list(x['domains'].filter(module=module))} for module in x['modules']]
df['modules'] = df.apply(module_dict_getter, axis=1)
#Keep useful columns only
df = df[['subunit','modules','q. start','q. end','s. start','s. end','evalue','bit score','domains']]
df = df.sort_values('bit score', ascending=False)
alignments = df
#Set cache
cache.set((inputs, evalue, maxHits, showAllDomains, searchDatabase), alignments,
60 * 60 * 24 * 7) # cache for one week
except ValueError:
messages.error(request, 'Error: Invalid query!')
return render(request, 'sequencesearch.html')
else:
raise Http404
# get domain options to display in UI
get_all_domains = lambda x: [domain for module in x['modules'] for domain in module['domains']]
domains = alignments.apply(get_all_domains, axis=1).tolist()
#flatten the list
domains = list(chain.from_iterable(domains))
#Extract desired types for html showing
ats = list(filter(lambda d: isinstance(d, AT), domains))
atsubstrates = list(set([at.substrate for at in ats]))
krs = list(filter(lambda d: isinstance(d, KR), domains))
krtypes = list(set([kr.type for kr in krs]))
boolDomains = []
for domain in (DH, ER, cMT, oMT):
thesedomains = list(filter(lambda d: isinstance(d, domain), domains))
typelist = list(set([str(d) for d in thesedomains]))
if len(typelist) > 0:
boolDomains.append((domain.__name__, typelist))
tes = list(filter(lambda d: isinstance(d, TE), domains))
tetypes = list(set([str(te) for te in tes]))
alignments = alignments.values.tolist()
# create context dict of all results to show the user
context = {
'alignments': alignments,
'queryResidues': len(inputs),
'evalue': str(evalue),
'maxHits': str(maxHits),
'timeTaken': timeTaken,
'showAllDomains': showAllDomains,
'atsubstrates': atsubstrates,
'krtypes': krtypes,
'boolDomains': boolDomains,
'tetypes': tetypes,
'searchDatabase': str(searchDatabase),
}
return render(request, 'sequenceresult.html', context)
| [
"django.shortcuts.render",
"sequenceSearch.tasks.blast.delay",
"django.utils.http.urlunquote",
"django.contrib.messages.error",
"os.path.join",
"django.contrib.messages.success",
"itertools.chain.from_iterable",
"pks.models.Subunit.objects.get",
"re.sub",
"re.findall",
"pks.models.Domain.objects... | [((7502, 7549), 'django.shortcuts.render', 'render', (['request', '"""sequenceresult.html"""', 'context'], {}), "(request, 'sequenceresult.html', context)\n", (7508, 7549), False, 'from django.shortcuts import render\n'), ((1404, 1451), 'django.shortcuts.render', 'render', (['request', '"""sequencesearch.html"""', 'context'], {}), "(request, 'sequencesearch.html', context)\n", (1410, 1451), False, 'from django.shortcuts import render\n'), ((6271, 6299), 'itertools.chain.from_iterable', 'chain.from_iterable', (['domains'], {}), '(domains)\n', (6290, 6299), False, 'from itertools import chain\n'), ((735, 769), 'django.utils.http.urlunquote', 'urlunquote', (["request.GET['aainput']"], {}), "(request.GET['aainput'])\n", (745, 769), False, 'from django.utils.http import urlunquote, urlquote\n'), ((782, 850), 'django.contrib.messages.success', 'messages.success', (['request', '"""Imported AA sequence from previous page"""'], {}), "(request, 'Imported AA sequence from previous page')\n", (798, 850), False, 'from django.contrib import messages\n'), ((1069, 1102), 'pks.models.Subunit.objects.get', 'Subunit.objects.get', ([], {'id': 'subunitid'}), '(id=subunitid)\n', (1088, 1102), False, 'from pks.models import AT, KR, DH, ER, cMT, oMT, TE, Subunit, Domain\n'), ((1154, 1280), 'django.contrib.messages.success', 'messages.success', (['request', "('Imported sequence for PKS cluster %s subunit %s' % (subunit.cluster.\n description, subunit.name))"], {}), "(request, 'Imported sequence for PKS cluster %s subunit %s' %\n (subunit.cluster.description, subunit.name))\n", (1170, 1280), False, 'from django.contrib import messages\n'), ((2346, 2375), 're.sub', 're.sub', (['"""^>.*?\n"""', '""""""', 'inputs'], {}), "('^>.*?\\n', '', inputs)\n", (2352, 2375), False, 'import re\n'), ((2397, 2422), 're.sub', 're.sub', (['"""\\\\s"""', '""""""', 'inputs'], {}), "('\\\\s', '', inputs)\n", (2403, 2422), False, 'import re\n'), ((2835, 2903), 'django.core.cache.cache.get', 'cache.get', (['(inputs, evalue, maxHits, showAllDomains, searchDatabase)'], {}), '((inputs, evalue, maxHits, showAllDomains, searchDatabase))\n', (2844, 2903), False, 'from django.core.cache import cache\n'), ((2135, 2250), 'django.contrib.messages.error', 'messages.error', (['request', '"""Error: Multiple queries detected, please remove until only one query is present"""'], {}), "(request,\n 'Error: Multiple queries detected, please remove until only one query is present'\n )\n", (2149, 2250), False, 'from django.contrib import messages\n'), ((2286, 2324), 'django.shortcuts.render', 'render', (['request', '"""sequencesearch.html"""'], {}), "(request, 'sequencesearch.html')\n", (2292, 2324), False, 'from django.shortcuts import render\n'), ((2471, 2516), 'django.contrib.messages.error', 'messages.error', (['request', '"""Error: Empty Query"""'], {}), "(request, 'Error: Empty Query')\n", (2485, 2516), False, 'from django.contrib import messages\n'), ((2540, 2578), 'django.shortcuts.render', 'render', (['request', '"""sequencesearch.html"""'], {}), "(request, 'sequencesearch.html')\n", (2546, 2578), False, 'from django.shortcuts import render\n'), ((2631, 2698), 'django.contrib.messages.error', 'messages.error', (['request', '"""Error: max query size is 50,000 residues"""'], {}), "(request, 'Error: max query size is 50,000 residues')\n", (2645, 2698), False, 'from django.contrib import messages\n'), ((2722, 2760), 'django.shortcuts.render', 'render', (['request', '"""sequencesearch.html"""'], {}), "(request, 'sequencesearch.html')\n", (2728, 2760), False, 'from django.shortcuts import render\n'), ((3488, 3587), 'sequenceSearch.tasks.blast.delay', 'blast.delay', ([], {'query': 'inputs', 'evalue': 'evalue', 'max_target_seqs': 'maxHits', 'sortOutput': '(True)', 'database': 'db'}), '(query=inputs, evalue=evalue, max_target_seqs=maxHits,\n sortOutput=True, database=db)\n', (3499, 3587), False, 'from sequenceSearch.tasks import blast\n'), ((3980, 4001), 'pandas.read_json', 'pd.read_json', (['results'], {}), '(results)\n', (3992, 4001), True, 'import pandas as pd\n'), ((5703, 5805), 'django.core.cache.cache.set', 'cache.set', (['(inputs, evalue, maxHits, showAllDomains, searchDatabase)', 'alignments', '(60 * 60 * 24 * 7)'], {}), '((inputs, evalue, maxHits, showAllDomains, searchDatabase),\n alignments, 60 * 60 * 24 * 7)\n', (5712, 5805), False, 'from django.core.cache import cache\n'), ((5882, 5930), 'django.contrib.messages.error', 'messages.error', (['request', '"""Error: Invalid query!"""'], {}), "(request, 'Error: Invalid query!')\n", (5896, 5930), False, 'from django.contrib import messages\n'), ((5950, 5988), 'django.shortcuts.render', 'render', (['request', '"""sequencesearch.html"""'], {}), "(request, 'sequencesearch.html')\n", (5956, 5988), False, 'from django.shortcuts import render\n'), ((2083, 2111), 're.findall', 're.findall', (['""">.*?\n"""', 'inputs'], {}), "('>.*?\\n', inputs)\n", (2093, 2111), False, 'import re\n'), ((3068, 3164), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""pipeline"""', '"""data"""', '"""blast"""', '"""clustercad_subunits_reviewed"""'], {}), "(settings.BASE_DIR, 'pipeline', 'data', 'blast',\n 'clustercad_subunits_reviewed')\n", (3080, 3164), False, 'import os\n'), ((3290, 3381), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""pipeline"""', '"""data"""', '"""blast"""', '"""clustercad_subunits_all"""'], {}), "(settings.BASE_DIR, 'pipeline', 'data', 'blast',\n 'clustercad_subunits_all')\n", (3302, 3381), False, 'import os\n'), ((3781, 3838), 'django.contrib.messages.error', 'messages.error', (['request', '"""No hits - please refine query!"""'], {}), "(request, 'No hits - please refine query!')\n", (3795, 3838), False, 'from django.contrib import messages\n'), ((3866, 3904), 'django.shortcuts.render', 'render', (['request', '"""sequencesearch.html"""'], {}), "(request, 'sequencesearch.html')\n", (3872, 3904), False, 'from django.shortcuts import render\n'), ((4338, 4442), 'pks.models.Domain.objects.filter', 'Domain.objects.filter', ([], {'module__subunit': "x['subunit']", 'stop__gte': "x['s. start']", 'start__lte': "x['s. end']"}), "(module__subunit=x['subunit'], stop__gte=x['s. start'],\n start__lte=x['s. end'])\n", (4359, 4442), False, 'from pks.models import AT, KR, DH, ER, cMT, oMT, TE, Subunit, Domain\n'), ((4894, 4930), 'pks.models.Domain.objects.filter', 'Domain.objects.filter', ([], {'module': 'module'}), '(module=module)\n', (4915, 4930), False, 'from pks.models import AT, KR, DH, ER, cMT, oMT, TE, Subunit, Domain\n')] |
import dash_core_components as dcc
import dash_html_components as html
def navbar(*args, **kwargs):
"""
"""
return html.Div(children=[
html.Div(children=[
dcc.Link(href="/", children=[
html.I(className="fab fa-earlybirds mr-3"),
html.Span(children='Tanager', className="font-semibold")
]),
], className='mt-8 text-white space-x-5 text-2xl mx-2'),
html.Div(children=[
dcc.Input(
id="experiment-filter",
name="experiment-filter",
type="text",
placeholder="Filter by name",
className="w-2/3 focus:ring-4 focus:ring-blue-300 py-2 px-4 rounded-full",
),
html.Button(id='dir-refresh', className='text-white active:text-blue-500', title="Refresh expreiment list",
children=[
html.I(className='fas fa-redo-alt')
]),
], className='flex justify-around my-4'),
html.Nav(*args, className="overflow-y-auto h-5/6", **kwargs)
], className='w-52 lg:w-64 bg-gray-900 flex flex-col flex-none text-center h-auto'
)
def navbar_item(*args, **kwargs):
"""
"""
children = kwargs.pop('children', [])
children.append(*args)
children.insert(0, html.I(className='fas fa-chart-bar mx-3'))
return dcc.Link(
className='flex items-center py-2 px-6 text-gray-500 hover:bg-gray-700 hover:bg-opacity-25 hover:text-gray-100',
children=children,
**kwargs
)
def graph_panel(*args, **kwargs):
classname = kwargs.pop('className', '') + ' flex flex-col items-center px-5 py-6 shadow-lg rounded-xl bg-white'
return html.Section(*args, className=classname, style={'min-height': '30rem'}, **kwargs)
def get_default_page(config):
return html.Div(children=[
html.H1(config['title'], className="text-6xl font-bold alert-heading"),
html.H2(
config['description'], # "Tanager allows you to visualize Inspyred. "
className='text-2xl text-gray-400 ml-10'
),
html.Hr(className='border border-black'),
html.P(
"Please select the project from the left navigation to get started",
className="mb-0",
)
], className='mt-40'
)
| [
"dash_html_components.Hr",
"dash_html_components.Nav",
"dash_html_components.Section",
"dash_html_components.I",
"dash_core_components.Link",
"dash_html_components.Span",
"dash_html_components.H2",
"dash_html_components.H1",
"dash_core_components.Input",
"dash_html_components.P"
] | [((1408, 1568), 'dash_core_components.Link', 'dcc.Link', ([], {'className': '"""flex items-center py-2 px-6 text-gray-500 hover:bg-gray-700 hover:bg-opacity-25 hover:text-gray-100"""', 'children': 'children'}), "(className=\n 'flex items-center py-2 px-6 text-gray-500 hover:bg-gray-700 hover:bg-opacity-25 hover:text-gray-100'\n , children=children, **kwargs)\n", (1416, 1568), True, 'import dash_core_components as dcc\n'), ((1752, 1838), 'dash_html_components.Section', 'html.Section', (['*args'], {'className': 'classname', 'style': "{'min-height': '30rem'}"}), "(*args, className=classname, style={'min-height': '30rem'}, **\n kwargs)\n", (1764, 1838), True, 'import dash_html_components as html\n'), ((1353, 1394), 'dash_html_components.I', 'html.I', ([], {'className': '"""fas fa-chart-bar mx-3"""'}), "(className='fas fa-chart-bar mx-3')\n", (1359, 1394), True, 'import dash_html_components as html\n'), ((1054, 1114), 'dash_html_components.Nav', 'html.Nav', (['*args'], {'className': '"""overflow-y-auto h-5/6"""'}), "(*args, className='overflow-y-auto h-5/6', **kwargs)\n", (1062, 1114), True, 'import dash_html_components as html\n'), ((1905, 1975), 'dash_html_components.H1', 'html.H1', (["config['title']"], {'className': '"""text-6xl font-bold alert-heading"""'}), "(config['title'], className='text-6xl font-bold alert-heading')\n", (1912, 1975), True, 'import dash_html_components as html\n'), ((1985, 2057), 'dash_html_components.H2', 'html.H2', (["config['description']"], {'className': '"""text-2xl text-gray-400 ml-10"""'}), "(config['description'], className='text-2xl text-gray-400 ml-10')\n", (1992, 2057), True, 'import dash_html_components as html\n'), ((2149, 2189), 'dash_html_components.Hr', 'html.Hr', ([], {'className': '"""border border-black"""'}), "(className='border border-black')\n", (2156, 2189), True, 'import dash_html_components as html\n'), ((2199, 2296), 'dash_html_components.P', 'html.P', (['"""Please select the project from the left navigation to get started"""'], {'className': '"""mb-0"""'}), "('Please select the project from the left navigation to get started',\n className='mb-0')\n", (2205, 2296), True, 'import dash_html_components as html\n'), ((475, 661), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""experiment-filter"""', 'name': '"""experiment-filter"""', 'type': '"""text"""', 'placeholder': '"""Filter by name"""', 'className': '"""w-2/3 focus:ring-4 focus:ring-blue-300 py-2 px-4 rounded-full"""'}), "(id='experiment-filter', name='experiment-filter', type='text',\n placeholder='Filter by name', className=\n 'w-2/3 focus:ring-4 focus:ring-blue-300 py-2 px-4 rounded-full')\n", (484, 661), True, 'import dash_core_components as dcc\n'), ((237, 279), 'dash_html_components.I', 'html.I', ([], {'className': '"""fab fa-earlybirds mr-3"""'}), "(className='fab fa-earlybirds mr-3')\n", (243, 279), True, 'import dash_html_components as html\n'), ((297, 353), 'dash_html_components.Span', 'html.Span', ([], {'children': '"""Tanager"""', 'className': '"""font-semibold"""'}), "(children='Tanager', className='font-semibold')\n", (306, 353), True, 'import dash_html_components as html\n'), ((932, 967), 'dash_html_components.I', 'html.I', ([], {'className': '"""fas fa-redo-alt"""'}), "(className='fas fa-redo-alt')\n", (938, 967), True, 'import dash_html_components as html\n')] |
# Generated by Django 2.1.1 on 2018-10-04 12:35
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('album_name', models.CharField(max_length=30)),
('uploaded_on', models.DateField(default=datetime.datetime(2018, 10, 4, 12, 35, 4, 306720))),
('album_logo', models.FileField(upload_to='')),
('album_genre', models.CharField(max_length=30)),
('album_artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='albums', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('song_name', models.CharField(max_length=40)),
('song_file', models.FileField(upload_to='')),
('song_album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='songs', to='music_nation.Album')),
],
),
]
| [
"datetime.datetime",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.AutoField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((263, 320), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (294, 320), False, 'from django.db import migrations, models\n'), ((450, 543), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (466, 543), False, 'from django.db import migrations, models\n'), ((573, 604), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (589, 604), False, 'from django.db import migrations, models\n'), ((748, 778), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '""""""'}), "(upload_to='')\n", (764, 778), False, 'from django.db import migrations, models\n'), ((813, 844), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (829, 844), False, 'from django.db import migrations, models\n'), ((880, 999), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""albums"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='albums', to=settings.AUTH_USER_MODEL)\n", (897, 999), False, 'from django.db import migrations, models\n'), ((1124, 1217), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1140, 1217), False, 'from django.db import migrations, models\n'), ((1246, 1277), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (1262, 1277), False, 'from django.db import migrations, models\n'), ((1310, 1340), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '""""""'}), "(upload_to='')\n", (1326, 1340), False, 'from django.db import migrations, models\n'), ((1374, 1488), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""songs"""', 'to': '"""music_nation.Album"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='songs', to='music_nation.Album')\n", (1391, 1488), False, 'from django.db import migrations, models\n'), ((664, 713), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(10)', '(4)', '(12)', '(35)', '(4)', '(306720)'], {}), '(2018, 10, 4, 12, 35, 4, 306720)\n', (681, 713), False, 'import datetime\n')] |
from unittest import TestCase
from yawast.scanner.plugins.dns.caa import _get_cname
from dns import resolver
class TestGetCname(TestCase):
def test__get_cname(self):
resv = resolver.Resolver()
resv.nameservers = ["1.1.1.1", "8.8.8.8"]
name = _get_cname("cntest.adamcaudill.com", resv)
self.assertEqual("www.google.com.", name)
| [
"yawast.scanner.plugins.dns.caa._get_cname",
"dns.resolver.Resolver"
] | [((187, 206), 'dns.resolver.Resolver', 'resolver.Resolver', ([], {}), '()\n', (204, 206), False, 'from dns import resolver\n'), ((273, 315), 'yawast.scanner.plugins.dns.caa._get_cname', '_get_cname', (['"""cntest.adamcaudill.com"""', 'resv'], {}), "('cntest.adamcaudill.com', resv)\n", (283, 315), False, 'from yawast.scanner.plugins.dns.caa import _get_cname\n')] |
from flask import *
import pdfsplitter
app=Flask(__name__)
@app.route("/")
def upload():
return render_template("file_upload.html")
@app.route("/success",methods=["POST"])
def success():
success.start_page=int(request.form['start'])
success.end_page=int(request.form['end'])
f=request.files['file']
success.file_name=f.filename
f.save(success.file_name)
return render_template("success.html",start=success.start_page,
end=success.end_page,name=success.file_name)
@app.route("/convert")
def cropper():
pdfsplitter.cropper(success.start_page,success.end_page,success.file_name)
return render_template("download.html")
@app.route("/download")
def download():
filename=success.file_name.split(".")[0]+"cropped.pdf"
return send_file(filename,as_attachment=True)
if __name__ == "__main__":
app.run(debug=True)
| [
"pdfsplitter.cropper"
] | [((587, 663), 'pdfsplitter.cropper', 'pdfsplitter.cropper', (['success.start_page', 'success.end_page', 'success.file_name'], {}), '(success.start_page, success.end_page, success.file_name)\n', (606, 663), False, 'import pdfsplitter\n')] |
from altair.vegalite.v4 import schema
from altair.vegalite.v4.schema.channels import Tooltip
import pandas as pd
import altair as alt
import numpy as np
from queries import Pomodoro
THEME = 'magma'
# TO DO: Add docstings where needed
def get_current_date():
"""
Gets the current date to perform default
charts.
returns:
date: tuple. (year, month, day)
"""
date = pd.to_datetime('now')
year = date.year
month = date.month
day = date.day
return (year, month, day)
# POMODORO CHARTS
def monthly_chart(year, month, df):
"""
"""
# Filter
df_copy = df.copy()
filtered = df_copy.loc[f'{year}/{month}']
month_name = filtered.full_date.dt.month_name()
month_name = month_name.iloc[0]
base = alt.Chart(
filtered, title=f'Productivity in {month_name}').mark_circle().encode(
x=alt.X('monthdate(full_date):O',
title='Days',
axis=alt.Axis(labelAngle=-90)),
y=alt.Y('hoursminutes(full_date)', title='Daily hours'),
).properties(width=400, height=200)
stack = base.mark_bar().encode(y=alt.Y('count()', title='Daily pomodoros'),
color=alt.Color('project',
title='Project names'),
tooltip=[
alt.Tooltip('category',
title='Category'),
alt.Tooltip('project',
title='Project name'),
alt.Tooltip('count()',
title='Pomodoros'),
alt.Tooltip(
'sum(pomodoro_length)',
title='Minutes invested this day')
])
scatter = base.encode(color=alt.Color('project', title='Project names'),
tooltip=[
alt.Tooltip('category', title='Category'),
alt.Tooltip('project', title='Project name'),
alt.Tooltip('yearmonthdate(full_date)',
title='Date'),
alt.Tooltip('pomodoro_calification',
title='Satisfaction'),
alt.Tooltip('hoursminutes(full_date)',
title='Start')
],
size=alt.Size('pomodoro_calification',
sort='descending',
title='Calification'))
chart = alt.hconcat(stack, scatter)
return chart
def hourly_chart(df):
"""
"""
df_copy = df.copy()
# Get only the bad pomodoros
bad_condition = df_copy.pomodoro_calification == 'Bad'
bad_df = df_copy[bad_condition]
# Filtered pomodoros without calification
condition = df_copy.pomodoro_calification != 0
new_df = df_copy[condition]
grouped_chart = alt.Chart(new_df).mark_bar().encode(
alt.X('pomodoro_calification:N', title="", axis=None),
alt.Y('count():Q', title='Pomodoro count'),
alt.Column('hours(full_date):O',
title='Good and Bad pomodoros by hour'),
alt.Color('pomodoro_calification:N', title='Calification'),
tooltip=[alt.Tooltip('hours(full_date)'),
alt.Tooltip('count()')]).properties(width=20, height=200)
heatmap = alt.Chart(
bad_df, title='Bad pomodoros by day and hour').mark_rect().encode(
alt.X('hours(full_date)',
title='Hours',
axis=alt.Axis(labelAngle=-90)),
alt.Y('day(full_date):O', title='Day of the week'),
alt.Color('count():Q',
title='Pomodoro count',
scale=alt.Scale(domain=(10, 1), scheme=THEME)),
tooltip=[
alt.Tooltip('count()', title='Bad pomodoros'),
alt.Tooltip('sum(pomodoro_length)', title='Minutes wasted'),
alt.Tooltip('hours(full_date)', title='Hour')
]).properties(width=400, height=200)
return grouped_chart & heatmap
## PROJECT CHARTS
def create_projects_df(df):
"""
"""
df_copy = df.copy()
date_format = '%Y-%m-%d'
tmp_projects = df_copy.groupby('project').agg({
'category':
'first',
'project_start':
'first',
'project_end':
'first',
'project_cancel':
'first',
'pomodoro_date':
'nunique',
'pomodoro_length':
'sum',
'pomodoro_calification':
'count'
})
# Rename the columns resulting from the groupby
project_columns = {
'project_start': 'start',
'project_end': 'end',
'project_cancel': 'cancel',
'pomodoro_date': 'working_days',
'pomodoro_length': 'minutes',
'pomodoro_calification': 'total_pomodoros'
}
tmp_projects.rename(columns=project_columns, inplace=True)
# Create separete columns for the pomodoro califications
tmp_projects_2 = df_copy.groupby(
'project')['pomodoro_calification'].value_counts().unstack().fillna(0)
# Merge the two resulting groupby dataframes
projects = pd.merge(tmp_projects,
tmp_projects_2,
left_index=True,
right_index=True)
# Create the project status column.
conditions = [projects.end.notnull(), projects.cancel.notnull()]
choices = ['Ended', 'Canceled']
projects['status'] = np.select(conditions, choices, default='On')
# Create the days column. It counts the amount of days since its
# start until its end/cancel date or current day if still on.
today = pd.to_datetime("today", format=date_format)
end_mask = (projects.status == "Ended")
cancel_mask = (projects.status == 'Canceled')
on_mask = (projects.status == 'On')
projects['days'] = 0
projects.loc[end_mask, 'days'] = (projects.end - projects.start).dt.days
projects.loc[cancel_mask,
'days'] = (projects.cancel - projects.start).dt.days
projects.loc[on_mask, 'days'] = (today - projects.start).dt.days
# Convert the minutes count into hours
projects['hours'] = pd.to_datetime(projects.minutes,
unit='m').dt.strftime('%H:%M')
# Convert the minutes column to amount of pomodoros
projects['pomodoros'] = projects.minutes / 25
projects.reset_index(inplace=True)
return projects
def projects_hours_days(df):
"""
"""
df_copy = df.copy()
single = alt.selection_single()
chart = alt.Chart(
df_copy, title='Projects').mark_point(filled=True).encode(
alt.X('yearmonthdate(start)', title="Project starting date"),
alt.Y('days', title='Days since the start'),
color=alt.Color(
'status:N',
title='Project current status',
sort='descending',
),
size=alt.Size('hours',
title='Total hours invested in the project'),
tooltip=[
alt.Tooltip('category', title='Category'),
alt.Tooltip('project', title='Project'),
alt.Tooltip('start', title='Project starting date'),
alt.Tooltip('status', title='Status'),
alt.Tooltip('days', title='Days since the start'),
alt.Tooltip('working_days',
title='Days with at least 1 pomodoro'),
alt.Tooltip('hours', title='Total hours invested'),
alt.Tooltip('pomodoros', title='Amount of pomodoros made')
]).add_selection(single).properties(width=800).interactive()
return chart
# Make possible to show various plojects
def plot_project(project, df):
"""
"""
df_copy = df.copy()
# Filterer the project
filtered = df_copy[df_copy.project == project]
# Get start and end dates
row = filtered.iloc[0]
start = row.project_start
end = row.project_end
cancel = row.project_cancel
start = start.date()
if end:
last = end.date()
elif cancel:
last = cancel.date()
else:
today = pd.to_datetime("today")
last = today.date()
line = alt.Chart(filtered).mark_bar().encode(
alt.X(
'yearmonthdate(full_date):O',
# scale=alt.Scale(
# domain=[start.isoformat(), last.isoformat()]),
axis=alt.Axis(labelAngle=-90)),
alt.Y('count()')).configure_range(category={'scheme': 'dark2'})
return line
def my_theme():
return {
'config': {
'view': {
'continuousHeight': 300,
'continuousWidth': 400
}, # from the default theme
'range': {
'category': {
'scheme': THEME
}
}
}
}
# Altair theme
alt.themes.register('my_theme', my_theme)
alt.themes.enable('my_theme')
if __name__ == "__main__":
pomodoro = Pomodoro()
df = pomodoro.create_df(pomodoro.QUERY)
project = 'El asesinato de <NAME> - <NAME>'
filtered = plot_project(project, df)
| [
"altair.selection_single",
"queries.Pomodoro",
"numpy.select",
"altair.Chart",
"altair.Axis",
"pandas.merge",
"altair.Scale",
"altair.themes.register",
"altair.Y",
"altair.X",
"altair.themes.enable",
"altair.Tooltip",
"altair.Column",
"altair.hconcat",
"altair.Size",
"altair.Color",
... | [((9347, 9388), 'altair.themes.register', 'alt.themes.register', (['"""my_theme"""', 'my_theme'], {}), "('my_theme', my_theme)\n", (9366, 9388), True, 'import altair as alt\n'), ((9389, 9418), 'altair.themes.enable', 'alt.themes.enable', (['"""my_theme"""'], {}), "('my_theme')\n", (9406, 9418), True, 'import altair as alt\n'), ((406, 427), 'pandas.to_datetime', 'pd.to_datetime', (['"""now"""'], {}), "('now')\n", (420, 427), True, 'import pandas as pd\n'), ((2896, 2923), 'altair.hconcat', 'alt.hconcat', (['stack', 'scatter'], {}), '(stack, scatter)\n', (2907, 2923), True, 'import altair as alt\n'), ((5571, 5644), 'pandas.merge', 'pd.merge', (['tmp_projects', 'tmp_projects_2'], {'left_index': '(True)', 'right_index': '(True)'}), '(tmp_projects, tmp_projects_2, left_index=True, right_index=True)\n', (5579, 5644), True, 'import pandas as pd\n'), ((5888, 5932), 'numpy.select', 'np.select', (['conditions', 'choices'], {'default': '"""On"""'}), "(conditions, choices, default='On')\n", (5897, 5932), True, 'import numpy as np\n'), ((6081, 6124), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {'format': 'date_format'}), "('today', format=date_format)\n", (6095, 6124), True, 'import pandas as pd\n'), ((6953, 6975), 'altair.selection_single', 'alt.selection_single', ([], {}), '()\n', (6973, 6975), True, 'import altair as alt\n'), ((9463, 9473), 'queries.Pomodoro', 'Pomodoro', ([], {}), '()\n', (9471, 9473), False, 'from queries import Pomodoro\n'), ((1157, 1198), 'altair.Y', 'alt.Y', (['"""count()"""'], {'title': '"""Daily pomodoros"""'}), "('count()', title='Daily pomodoros')\n", (1162, 1198), True, 'import altair as alt\n'), ((1241, 1284), 'altair.Color', 'alt.Color', (['"""project"""'], {'title': '"""Project names"""'}), "('project', title='Project names')\n", (1250, 1284), True, 'import altair as alt\n'), ((2052, 2095), 'altair.Color', 'alt.Color', (['"""project"""'], {'title': '"""Project names"""'}), "('project', title='Project names')\n", (2061, 2095), True, 'import altair as alt\n'), ((2727, 2801), 'altair.Size', 'alt.Size', (['"""pomodoro_calification"""'], {'sort': '"""descending"""', 'title': '"""Calification"""'}), "('pomodoro_calification', sort='descending', title='Calification')\n", (2735, 2801), True, 'import altair as alt\n'), ((8610, 8633), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (8624, 8633), True, 'import pandas as pd\n'), ((1421, 1462), 'altair.Tooltip', 'alt.Tooltip', (['"""category"""'], {'title': '"""Category"""'}), "('category', title='Category')\n", (1432, 1462), True, 'import altair as alt\n'), ((1554, 1598), 'altair.Tooltip', 'alt.Tooltip', (['"""project"""'], {'title': '"""Project name"""'}), "('project', title='Project name')\n", (1565, 1598), True, 'import altair as alt\n'), ((1690, 1731), 'altair.Tooltip', 'alt.Tooltip', (['"""count()"""'], {'title': '"""Pomodoros"""'}), "('count()', title='Pomodoros')\n", (1701, 1731), True, 'import altair as alt\n'), ((1823, 1893), 'altair.Tooltip', 'alt.Tooltip', (['"""sum(pomodoro_length)"""'], {'title': '"""Minutes invested this day"""'}), "('sum(pomodoro_length)', title='Minutes invested this day')\n", (1834, 1893), True, 'import altair as alt\n'), ((2163, 2204), 'altair.Tooltip', 'alt.Tooltip', (['"""category"""'], {'title': '"""Category"""'}), "('category', title='Category')\n", (2174, 2204), True, 'import altair as alt\n'), ((2236, 2280), 'altair.Tooltip', 'alt.Tooltip', (['"""project"""'], {'title': '"""Project name"""'}), "('project', title='Project name')\n", (2247, 2280), True, 'import altair as alt\n'), ((2312, 2365), 'altair.Tooltip', 'alt.Tooltip', (['"""yearmonthdate(full_date)"""'], {'title': '"""Date"""'}), "('yearmonthdate(full_date)', title='Date')\n", (2323, 2365), True, 'import altair as alt\n'), ((2439, 2497), 'altair.Tooltip', 'alt.Tooltip', (['"""pomodoro_calification"""'], {'title': '"""Satisfaction"""'}), "('pomodoro_calification', title='Satisfaction')\n", (2450, 2497), True, 'import altair as alt\n'), ((2571, 2624), 'altair.Tooltip', 'alt.Tooltip', (['"""hoursminutes(full_date)"""'], {'title': '"""Start"""'}), "('hoursminutes(full_date)', title='Start')\n", (2582, 2624), True, 'import altair as alt\n'), ((3330, 3383), 'altair.X', 'alt.X', (['"""pomodoro_calification:N"""'], {'title': '""""""', 'axis': 'None'}), "('pomodoro_calification:N', title='', axis=None)\n", (3335, 3383), True, 'import altair as alt\n'), ((3393, 3435), 'altair.Y', 'alt.Y', (['"""count():Q"""'], {'title': '"""Pomodoro count"""'}), "('count():Q', title='Pomodoro count')\n", (3398, 3435), True, 'import altair as alt\n'), ((3445, 3517), 'altair.Column', 'alt.Column', (['"""hours(full_date):O"""'], {'title': '"""Good and Bad pomodoros by hour"""'}), "('hours(full_date):O', title='Good and Bad pomodoros by hour')\n", (3455, 3517), True, 'import altair as alt\n'), ((3546, 3604), 'altair.Color', 'alt.Color', (['"""pomodoro_calification:N"""'], {'title': '"""Calification"""'}), "('pomodoro_calification:N', title='Calification')\n", (3555, 3604), True, 'import altair as alt\n'), ((3965, 4015), 'altair.Y', 'alt.Y', (['"""day(full_date):O"""'], {'title': '"""Day of the week"""'}), "('day(full_date):O', title='Day of the week')\n", (3970, 4015), True, 'import altair as alt\n'), ((6598, 6640), 'pandas.to_datetime', 'pd.to_datetime', (['projects.minutes'], {'unit': '"""m"""'}), "(projects.minutes, unit='m')\n", (6612, 6640), True, 'import pandas as pd\n'), ((8918, 8934), 'altair.Y', 'alt.Y', (['"""count()"""'], {}), "('count()')\n", (8923, 8934), True, 'import altair as alt\n'), ((1020, 1073), 'altair.Y', 'alt.Y', (['"""hoursminutes(full_date)"""'], {'title': '"""Daily hours"""'}), "('hoursminutes(full_date)', title='Daily hours')\n", (1025, 1073), True, 'import altair as alt\n'), ((3623, 3654), 'altair.Tooltip', 'alt.Tooltip', (['"""hours(full_date)"""'], {}), "('hours(full_date)')\n", (3634, 3654), True, 'import altair as alt\n'), ((3673, 3695), 'altair.Tooltip', 'alt.Tooltip', (['"""count()"""'], {}), "('count()')\n", (3684, 3695), True, 'import altair as alt\n'), ((3926, 3950), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(-90)'}), '(labelAngle=-90)\n', (3934, 3950), True, 'import altair as alt\n'), ((4126, 4165), 'altair.Scale', 'alt.Scale', ([], {'domain': '(10, 1)', 'scheme': 'THEME'}), '(domain=(10, 1), scheme=THEME)\n', (4135, 4165), True, 'import altair as alt\n'), ((4206, 4251), 'altair.Tooltip', 'alt.Tooltip', (['"""count()"""'], {'title': '"""Bad pomodoros"""'}), "('count()', title='Bad pomodoros')\n", (4217, 4251), True, 'import altair as alt\n'), ((4269, 4328), 'altair.Tooltip', 'alt.Tooltip', (['"""sum(pomodoro_length)"""'], {'title': '"""Minutes wasted"""'}), "('sum(pomodoro_length)', title='Minutes wasted')\n", (4280, 4328), True, 'import altair as alt\n'), ((4346, 4391), 'altair.Tooltip', 'alt.Tooltip', (['"""hours(full_date)"""'], {'title': '"""Hour"""'}), "('hours(full_date)', title='Hour')\n", (4357, 4391), True, 'import altair as alt\n'), ((8883, 8907), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(-90)'}), '(labelAngle=-90)\n', (8891, 8907), True, 'import altair as alt\n'), ((784, 842), 'altair.Chart', 'alt.Chart', (['filtered'], {'title': 'f"""Productivity in {month_name}"""'}), "(filtered, title=f'Productivity in {month_name}')\n", (793, 842), True, 'import altair as alt\n'), ((979, 1003), 'altair.Axis', 'alt.Axis', ([], {'labelAngle': '(-90)'}), '(labelAngle=-90)\n', (987, 1003), True, 'import altair as alt\n'), ((3285, 3302), 'altair.Chart', 'alt.Chart', (['new_df'], {}), '(new_df)\n', (3294, 3302), True, 'import altair as alt\n'), ((3746, 3802), 'altair.Chart', 'alt.Chart', (['bad_df'], {'title': '"""Bad pomodoros by day and hour"""'}), "(bad_df, title='Bad pomodoros by day and hour')\n", (3755, 3802), True, 'import altair as alt\n'), ((8674, 8693), 'altair.Chart', 'alt.Chart', (['filtered'], {}), '(filtered)\n', (8683, 8693), True, 'import altair as alt\n'), ((7079, 7139), 'altair.X', 'alt.X', (['"""yearmonthdate(start)"""'], {'title': '"""Project starting date"""'}), "('yearmonthdate(start)', title='Project starting date')\n", (7084, 7139), True, 'import altair as alt\n'), ((7153, 7196), 'altair.Y', 'alt.Y', (['"""days"""'], {'title': '"""Days since the start"""'}), "('days', title='Days since the start')\n", (7158, 7196), True, 'import altair as alt\n'), ((7216, 7288), 'altair.Color', 'alt.Color', (['"""status:N"""'], {'title': '"""Project current status"""', 'sort': '"""descending"""'}), "('status:N', title='Project current status', sort='descending')\n", (7225, 7288), True, 'import altair as alt\n'), ((7370, 7432), 'altair.Size', 'alt.Size', (['"""hours"""'], {'title': '"""Total hours invested in the project"""'}), "('hours', title='Total hours invested in the project')\n", (7378, 7432), True, 'import altair as alt\n'), ((7498, 7539), 'altair.Tooltip', 'alt.Tooltip', (['"""category"""'], {'title': '"""Category"""'}), "('category', title='Category')\n", (7509, 7539), True, 'import altair as alt\n'), ((7557, 7596), 'altair.Tooltip', 'alt.Tooltip', (['"""project"""'], {'title': '"""Project"""'}), "('project', title='Project')\n", (7568, 7596), True, 'import altair as alt\n'), ((7614, 7665), 'altair.Tooltip', 'alt.Tooltip', (['"""start"""'], {'title': '"""Project starting date"""'}), "('start', title='Project starting date')\n", (7625, 7665), True, 'import altair as alt\n'), ((7683, 7720), 'altair.Tooltip', 'alt.Tooltip', (['"""status"""'], {'title': '"""Status"""'}), "('status', title='Status')\n", (7694, 7720), True, 'import altair as alt\n'), ((7738, 7787), 'altair.Tooltip', 'alt.Tooltip', (['"""days"""'], {'title': '"""Days since the start"""'}), "('days', title='Days since the start')\n", (7749, 7787), True, 'import altair as alt\n'), ((7805, 7871), 'altair.Tooltip', 'alt.Tooltip', (['"""working_days"""'], {'title': '"""Days with at least 1 pomodoro"""'}), "('working_days', title='Days with at least 1 pomodoro')\n", (7816, 7871), True, 'import altair as alt\n'), ((7917, 7967), 'altair.Tooltip', 'alt.Tooltip', (['"""hours"""'], {'title': '"""Total hours invested"""'}), "('hours', title='Total hours invested')\n", (7928, 7967), True, 'import altair as alt\n'), ((7985, 8043), 'altair.Tooltip', 'alt.Tooltip', (['"""pomodoros"""'], {'title': '"""Amount of pomodoros made"""'}), "('pomodoros', title='Amount of pomodoros made')\n", (7996, 8043), True, 'import altair as alt\n'), ((6989, 7025), 'altair.Chart', 'alt.Chart', (['df_copy'], {'title': '"""Projects"""'}), "(df_copy, title='Projects')\n", (6998, 7025), True, 'import altair as alt\n')] |
# Copyright (C) 2016 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
import os
import glob
#import tensorflow as tf
from collections import Counter
def find_data_files_and_labels_deep(
data_dir,
types=('.jpg', '.jpeg', '.png'),
full_file_path=False,
add_background_label=False):
label_counts = Counter()
label_ids = []
label_texts = []
filenames = []
label_index = 1 if add_background_label else 0
for root, _, files in os.walk(data_dir, topdown=False):
label = os.path.relpath(root, data_dir) if (root != data_dir) else ''
matching_files = []
matching_count = 0
for f in files:
if os.path.splitext(f)[1].lower() in types:
f = os.path.join(root if full_file_path else label, f)
matching_files.append(f)
matching_count += 1
if matching_count:
if label_index and not label_index % 100:
print('Finished finding files in %d of %d classes.' % (label_index, len(unique_labels)))
label_counts[label] += matching_count
label_ids.extend([label_index] * matching_count)
label_texts.extend([label] * matching_count)
filenames.extend(matching_files)
label_index += 1
print('Found %d data files across %d labels inside %s.' % (len(filenames), len(label_counts), data_dir))
return filenames, label_texts, label_ids, label_counts
def find_data_files_and_labels_shallow(
data_dir,
unique_labels=[],
types=('.jpg', '.jpeg', '.png'),
full_file_path=False,
include_empty_labels=False,
add_background_label=False):
label_counts = Counter()
label_ids = []
label_texts = []
filenames = []
# if labels are not specified as argument, we will find them ourselves
if not unique_labels:
unique_labels = next(os.walk(data_dir))[1]
label_index = 1 if add_background_label else 0
for label in unique_labels:
label_path = os.path.join(data_dir, label)
files = os.listdir(label_path)
matching_files = []
matching_count = 0
for f in files:
full_f = os.path.join(label_path, f)
if os.path.isfile(full_f) and os.path.splitext(f)[1].lower() in types:
f = full_f if full_file_path else os.path.join(label, f)
matching_files.append(f)
matching_count += 1
if include_empty_labels or matching_count:
if label_index and not label_index % 100:
print('Finished finding files in %d of %d classes.' % (label_index, len(unique_labels)))
label_counts[label] += matching_count
label_ids.extend([label_index] * matching_count)
label_texts.extend([label] * matching_count)
filenames.extend(matching_files)
label_index += 1
print('Found %d data files across %d labels inside %s.' % (len(filenames), len(label_counts), data_dir))
return filenames, label_texts, label_ids, label_counts
def find_data_files_shallow(data_dir, unique_labels=[], types=('.jpg', 'jpeg'), full_file_path=False):
return find_data_files_and_labels_shallow(
data_dir, unique_labels, types,
full_file_path=full_file_path,
include_empty_labels=False,
add_background_label=False)[0]
def load_labels(labels_file):
try:
unique_labels = [l.strip() for l in open(labels_file, 'r').readlines()]
except OSError:
return []
return unique_labels
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir')
args = parser.parse_args()
data_dir = args.data_dir
files, labels, ids, counts = find_data_files_and_labels_deep(data_dir)
files2, labels2, ids2, counts2 = find_data_files_and_labels_shallow(data_dir)
if __name__ == '__main__':
main() | [
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"os.path.splitext",
"collections.Counter",
"os.path.isfile",
"os.walk",
"os.path.relpath"
] | [((624, 633), 'collections.Counter', 'Counter', ([], {}), '()\n', (631, 633), False, 'from collections import Counter\n'), ((772, 804), 'os.walk', 'os.walk', (['data_dir'], {'topdown': '(False)'}), '(data_dir, topdown=False)\n', (779, 804), False, 'import os\n'), ((2015, 2024), 'collections.Counter', 'Counter', ([], {}), '()\n', (2022, 2024), False, 'from collections import Counter\n'), ((3934, 3959), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3957, 3959), False, 'import argparse\n'), ((2343, 2372), 'os.path.join', 'os.path.join', (['data_dir', 'label'], {}), '(data_dir, label)\n', (2355, 2372), False, 'import os\n'), ((2389, 2411), 'os.listdir', 'os.listdir', (['label_path'], {}), '(label_path)\n', (2399, 2411), False, 'import os\n'), ((822, 853), 'os.path.relpath', 'os.path.relpath', (['root', 'data_dir'], {}), '(root, data_dir)\n', (837, 853), False, 'import os\n'), ((2513, 2540), 'os.path.join', 'os.path.join', (['label_path', 'f'], {}), '(label_path, f)\n', (2525, 2540), False, 'import os\n'), ((1039, 1089), 'os.path.join', 'os.path.join', (['(root if full_file_path else label)', 'f'], {}), '(root if full_file_path else label, f)\n', (1051, 1089), False, 'import os\n'), ((2215, 2232), 'os.walk', 'os.walk', (['data_dir'], {}), '(data_dir)\n', (2222, 2232), False, 'import os\n'), ((2556, 2578), 'os.path.isfile', 'os.path.isfile', (['full_f'], {}), '(full_f)\n', (2570, 2578), False, 'import os\n'), ((2674, 2696), 'os.path.join', 'os.path.join', (['label', 'f'], {}), '(label, f)\n', (2686, 2696), False, 'import os\n'), ((978, 997), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (994, 997), False, 'import os\n'), ((2583, 2602), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (2599, 2602), False, 'import os\n')] |
import numpy as np
from math_study.numpy_basics.statistics.statistics import random_data
if __name__ == '__main__':
print('Numpy - Statistic - min & max')
print('\nrandom multidimensional array:')
print(random_data)
print('\nget the min value from the array:')
print(random_data.min())
print('\nget the min value from the array along the specified axis:')
print(random_data.min(axis=0)) # array filled with vertical min values: min of the values in the same column
print(random_data.min(axis=1)) # array filled with horizontal min values: min of the values in the same rows
# if you exceed the number of dimensions, you'll get a 'numpy.AxisError'
try:
print(random_data.min(axis=3))
except np.AxisError as axis_error:
print(axis_error)
print(str('- ' * 20))
print('\nget the max value from the array:')
print(random_data.max())
print('\nget the min value from the array along the specified axis:')
print(random_data.max(axis=0)) # array filled with vertical max values: max of the values in the same column
print(random_data.max(axis=1)) # array filled with horizontal max values: max of the values in the same rows
# if you exceed the number of dimensions, you'll get a 'numpy.AxisError'
try:
print(random_data.max(axis=3))
except np.AxisError as axis_error:
print(axis_error)
| [
"math_study.numpy_basics.statistics.statistics.random_data.min",
"math_study.numpy_basics.statistics.statistics.random_data.max"
] | [((291, 308), 'math_study.numpy_basics.statistics.statistics.random_data.min', 'random_data.min', ([], {}), '()\n', (306, 308), False, 'from math_study.numpy_basics.statistics.statistics import random_data\n'), ((395, 418), 'math_study.numpy_basics.statistics.statistics.random_data.min', 'random_data.min', ([], {'axis': '(0)'}), '(axis=0)\n', (410, 418), False, 'from math_study.numpy_basics.statistics.statistics import random_data\n'), ((509, 532), 'math_study.numpy_basics.statistics.statistics.random_data.min', 'random_data.min', ([], {'axis': '(1)'}), '(axis=1)\n', (524, 532), False, 'from math_study.numpy_basics.statistics.statistics import random_data\n'), ((891, 908), 'math_study.numpy_basics.statistics.statistics.random_data.max', 'random_data.max', ([], {}), '()\n', (906, 908), False, 'from math_study.numpy_basics.statistics.statistics import random_data\n'), ((995, 1018), 'math_study.numpy_basics.statistics.statistics.random_data.max', 'random_data.max', ([], {'axis': '(0)'}), '(axis=0)\n', (1010, 1018), False, 'from math_study.numpy_basics.statistics.statistics import random_data\n'), ((1109, 1132), 'math_study.numpy_basics.statistics.statistics.random_data.max', 'random_data.max', ([], {'axis': '(1)'}), '(axis=1)\n', (1124, 1132), False, 'from math_study.numpy_basics.statistics.statistics import random_data\n'), ((714, 737), 'math_study.numpy_basics.statistics.statistics.random_data.min', 'random_data.min', ([], {'axis': '(3)'}), '(axis=3)\n', (729, 737), False, 'from math_study.numpy_basics.statistics.statistics import random_data\n'), ((1314, 1337), 'math_study.numpy_basics.statistics.statistics.random_data.max', 'random_data.max', ([], {'axis': '(3)'}), '(axis=3)\n', (1329, 1337), False, 'from math_study.numpy_basics.statistics.statistics import random_data\n')] |
import os
import unittest
import tempfile
import fcntl
import struct
from ffrecord import checkFsAlign
FS_IOCNUM_CHECK_FS_ALIGN = 2147772004
def checkFsAlign2(fd):
buf = bytearray(4)
try:
fcntl.ioctl(fd, FS_IOCNUM_CHECK_FS_ALIGN, buf)
except OSError as err:
return False
fsAlign = struct.unpack("i", buf)
return fsAlign[0] == 1
class TestFsAlign(unittest.TestCase):
def subtest_fsalign(self, fname, is_aligned):
if not os.path.exists(fname):
print(f'{fname} does not exist, skip...')
return
fd = os.open(fname, os.O_RDONLY | os.O_DIRECT)
assert checkFsAlign(fd) == checkFsAlign2(fd) == is_aligned
def test_fs(self):
fname = "/public_dataset/1/ImageNet/train.ffr/PART_00000.ffr"
self.subtest_fsalign(fname, True)
def test_tmp(self):
with tempfile.NamedTemporaryFile() as tmp:
self.subtest_fsalign(tmp.name, False)
if __name__ == '__main__':
unittest.main()
| [
"os.path.exists",
"fcntl.ioctl",
"os.open",
"ffrecord.checkFsAlign",
"struct.unpack",
"tempfile.NamedTemporaryFile",
"unittest.main"
] | [((319, 342), 'struct.unpack', 'struct.unpack', (['"""i"""', 'buf'], {}), "('i', buf)\n", (332, 342), False, 'import struct\n'), ((990, 1005), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1003, 1005), False, 'import unittest\n'), ((209, 255), 'fcntl.ioctl', 'fcntl.ioctl', (['fd', 'FS_IOCNUM_CHECK_FS_ALIGN', 'buf'], {}), '(fd, FS_IOCNUM_CHECK_FS_ALIGN, buf)\n', (220, 255), False, 'import fcntl\n'), ((586, 627), 'os.open', 'os.open', (['fname', '(os.O_RDONLY | os.O_DIRECT)'], {}), '(fname, os.O_RDONLY | os.O_DIRECT)\n', (593, 627), False, 'import os\n'), ((476, 497), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (490, 497), False, 'import os\n'), ((643, 659), 'ffrecord.checkFsAlign', 'checkFsAlign', (['fd'], {}), '(fd)\n', (655, 659), False, 'from ffrecord import checkFsAlign\n'), ((869, 898), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (896, 898), False, 'import tempfile\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os, json
def main():
fp = 'vocab.json'
with open(fp, 'r') as f:
caption_token_to_idx = json.load(f)['question_token_to_idx']
caption_tokens = list(caption_token_to_idx.keys())
print(caption_tokens[5:])
with open("clevr-scene-nouns.txt", "w") as output:
for row in caption_tokens[5:]:
output.write(str(row) + '\n')
if __name__ == '__main__':
main() | [
"json.load"
] | [((161, 173), 'json.load', 'json.load', (['f'], {}), '(f)\n', (170, 173), False, 'import os, json\n')] |
import re
from configparser import ConfigParser
from tadataka.camera.model import CameraModel
def parse_(line):
camera_id, model_params = re.split(r"\s+", line, maxsplit=1)
try:
camera_id = int(camera_id)
except ValueError:
raise ValueError("Camera ID must be integer")
return camera_id, CameraModel.fromstring(model_params)
def load(filename):
camera_models = dict()
with open(filename, 'r') as f:
for line in f:
camera_id, camera_model = parse_(line.strip())
camera_models[camera_id] = camera_model
return camera_models
def save(filename, camera_models):
# sort by camera_id to make it easy to test
items = sorted(camera_models.items(), key=lambda v: v[0])
with open(filename, 'w') as f:
for camera_id, camera_model in items:
line = ' '.join([str(camera_id), str(camera_model)])
f.write(line + '\n')
| [
"re.split",
"tadataka.camera.model.CameraModel.fromstring"
] | [((144, 178), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {'maxsplit': '(1)'}), "('\\\\s+', line, maxsplit=1)\n", (152, 178), False, 'import re\n'), ((322, 358), 'tadataka.camera.model.CameraModel.fromstring', 'CameraModel.fromstring', (['model_params'], {}), '(model_params)\n', (344, 358), False, 'from tadataka.camera.model import CameraModel\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cpro.models
class Migration(migrations.Migration):
dependencies = [
('cpro', '0002_auto_20160916_2110'),
]
operations = [
migrations.AddField(
model_name='card',
name='_cache_event_image',
field=models.ImageField(null=True, upload_to=cpro.models.uploadItem(b'e'), blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='card',
name='_cache_event_last_update',
field=models.DateTimeField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='card',
name='_cache_event_name',
field=models.CharField(max_length=100, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='card',
name='_cache_event_translated_name',
field=models.CharField(max_length=100, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='event',
name='i_kind',
field=models.PositiveIntegerField(default=0, verbose_name='Kind', choices=[(0, 'Token'), (1, 'Medley'), (2, 'Coop'), (3, 'Caravan')]),
preserve_default=True,
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.PositiveIntegerField",
"django.db.models.CharField"
] | [((627, 658), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (647, 658), False, 'from django.db import models, migrations\n'), ((822, 865), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (838, 865), False, 'from django.db import models, migrations\n'), ((1040, 1083), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (1056, 1083), False, 'from django.db import models, migrations\n'), ((1239, 1370), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'verbose_name': '"""Kind"""', 'choices': "[(0, 'Token'), (1, 'Medley'), (2, 'Coop'), (3, 'Caravan')]"}), "(default=0, verbose_name='Kind', choices=[(0,\n 'Token'), (1, 'Medley'), (2, 'Coop'), (3, 'Caravan')])\n", (1266, 1370), False, 'from django.db import models, migrations\n')] |
import random
from typing import Any, Dict, List
import pytorch_lightning as pl
from sklearn.model_selection import train_test_split
from ..dataloaders.sequence_loader import SequenceLoader
from ..datasets.sequence_dataset import SequenceDataset, SequenceSubset
from neural_lifetimes.utils.data import FeatureDictionaryEncoder, Tokenizer, TargetCreator
class SequenceDataModule(pl.LightningDataModule):
"""
A Pytorch Lightning (pl) module is a wrapper around different dataloaders and datasets.
The pl automatically selects the correct split of the dataset given the current step.
Args:
dataset (SequenceDataset): The dataset for training.
test_size (float): The proportion of data points that will be part of the validation set. 0 < test_size < 1
batch_points (int): Batch size (count of events).
transform (FeatureDictionaryEncoder): The encoder to be used on the batch. Will be passed on to the
``SequenceLoader``.
target_transform (TargetCreator): The transform used to generate targets. Will be passed on to the
``SequenceLoader``.
tokenizer (Tokenizer): The tokenizer appending start tokens for packing the sequences. Will be passed
on to the ``SequenceLoader``.
min_points (int): ??
forecast_dataset (SequenceDataset, Optional): The dataset used for forecasting. Defaults to None.
forecast_limit (int, Optional): The maximum number of data points on which to perform forecasting.
This will generate a simple random sample of size ``forecast_limit`` from the indexes of the
respective split of indices. if ``None``, no sample is taken. Defaults to None.
Attributes:
dataset (SequenceDataset): A pytorch Dataset instance
test_size (float): The proportion of data points that will be part of the validation set. 0 < test_size < 1
batch_points (int): Batch size
target_transform (TargetCreator): A class that transforms the targets.
TODO: all
"""
def __init__(
self,
dataset: SequenceDataset,
test_size: float,
batch_points: int,
transform: FeatureDictionaryEncoder,
target_transform: TargetCreator,
tokenizer: Tokenizer,
min_points: int,
forecast_dataset: SequenceDataset = None,
forecast_limit: int = None,
):
super().__init__()
self.dataset = dataset
self.forecast_dataset = forecast_dataset
self.test_size = test_size
self.batch_points = batch_points
self.transform = transform
self.target_transform = target_transform
self.tokenizer = tokenizer
self.min_points = min_points
self.forecast_limit = forecast_limit
self.save_hyperparameters(self.build_parameter_dict())
self.train_inds: List[int] = []
self.predict_inds: List[int] = []
self.test_inds: List[int] = []
def setup(self, stage: str):
"""
Create splits of the dataset.
This is also the place to add data transformation and augmentations.
Gets automatically called by pytorch. In case of distributed learning setup
will be exectuted for each GPU.
Docs: see https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html#setup
Args:
stage (str): [description]
"""
if stage == "fit":
self.train_inds, self.valid_inds = train_test_split(range(len(self.dataset)), test_size=self.test_size)
if stage == "test":
self.test_inds = list(range(len(self.dataset)))
if stage == "predict":
self.predict_inds = list(range(len(self.dataset)))
def build_parameter_dict(self) -> Dict[str, Any]:
"""
Return a dictionary of datamodule parameters.
Returns:
Dict[str, Any]: Parameters of the datamodule
"""
hparams = {
"test_size": self.test_size,
"batch_points": self.batch_points,
"min_points": self.min_points,
}
hparams = {f"datamodule/{k}": v for k, v in hparams.items()}
# return {**hparams, **self.dataset.build_parameter_dict(), **self.target_transform.build_parameter_dict()}
# get target creator parameters
# TODO improve documentation on this
try:
hparams.update(
{f"target_transform/{k}": v for k, v in self.target_transform.build_parameter_dict().items()}
)
except AttributeError:
pass
# get dataset parameters
try:
hparams.update({f"dataset/{k}": v for k, v in self.dataset.build_parameter_dict().items()})
except AttributeError:
pass
return hparams
def _build_dataloader(self, dataset: SequenceDataset, indices: List[int]) -> SequenceLoader:
"""Build a dataloader over the provided index list.
Args:
indices (List[int]): indices of the rows which this dataloader will provide.
Returns:
SequenceLoader: A SequenceLoader allowing access to data with the provided indices.
"""
return SequenceLoader(
SequenceSubset(dataset, indices),
self.transform,
self.target_transform,
self.tokenizer,
self.batch_points,
self.min_points,
)
def train_dataloader(self):
"""Build a dataloader for training steps.
Returns:
SequenceLoader: the dataloader for training.
"""
return self._build_dataloader(self.dataset, self.train_inds)
def val_dataloader(self):
"""Build a dataloader for validation steps.
Returns:
SequenceLoader: the dataloader for validation.
"""
return self._build_dataloader(self.dataset, self.valid_inds)
def test_dataloader(self):
"""Build a dataloader for testing.
Returns:
SequenceLoader: the dataloader for testing.
"""
return self._build_dataloader(self.dataset, self.test_inds)
def predict_dataloader(self):
"""Build a dataloader for prediction.
Returns:
SequenceLoader: the dataloader for prediction.
"""
# TODO: decide on forecast vs train dataset
return self._build_dataloader(self.forecast_dataset, self.predict_inds)
def _forecast_indices(self, indices):
if indices is None or len(indices) <= self.forecast_limit:
return indices
else:
return random.sample(indices, self.forecast_limit)
def train_forecast_dataloader(self):
return self._build_dataloader(self.forecast_dataset, self._forecast_indices(self.train_inds))
def val_forecast_dataloader(self):
return self._build_dataloader(self.forecast_dataset, self._forecast_indices(self.valid_inds))
def test_forecast_dataloader(self):
return self._build_dataloader(self.forecast_dataset, self._forecast_indices(self.test_inds))
| [
"random.sample"
] | [((6641, 6684), 'random.sample', 'random.sample', (['indices', 'self.forecast_limit'], {}), '(indices, self.forecast_limit)\n', (6654, 6684), False, 'import random\n')] |
from django import forms
from django.contrib.auth.forms import UserCreationForm
# Current app module.
from .models import Account
class CreateUser(UserCreationForm):
"""Form for creating user account in the system."""
class Meta:
model = Account
first_name = forms.CharField(max_length=50,
widget= forms.TextInput
(attrs={'type':'text',
'name':'first_name',
'class': 'form-control my-input',
'id': 'first_name',
'placeholder': 'First name'}))
second_name = forms.CharField(max_length=50,
widget= forms.TextInput
(attrs={'type':'text',
'name':'second_name',
'class': 'form-control my-input',
'id': 'second_name',
'placeholder': 'Second name'}))
email = forms.EmailField(max_length=300,
widget= forms.TextInput
(attrs={'type':'email',
'name':'second_name',
'class': 'form-control my-input',
'id': 'email',
'placeholder': 'Second name'}))
fields = ['first_name', 'second_name', 'email', 'sex', 'date_of_birth',
'country',
]
help_texts = {
"first_name": "Enter your real First name",
"second_name": "Enter your real Second name",
"email": "Enter your email",
"sex": "Select your sex",
"date_of_birth": "Enter your date of birth",
"country": "Select your country from the list",
}
def clean(self, *args, **kwargs):
"""Compare first_name and second_name fields in form, checks that their
value is not the same."""
cleaned_data = super().clean()
first_name = cleaned_data.get('first_name')
second_name = cleaned_data.get('second_name')
if first_name == second_name:
raise forms.ValidationError("First name and Second name must be " +
"different!")
return cleaned_data
def clean_first_name(self, *args, **kwargs):
"""Checks that first_name field doesn't contain digits and field's
length is not over 15 characters."""
first_name = self.cleaned_data.get('first_name')
for i in first_name:
if i.isdigit():
raise forms.ValidationError("The First name cannot contain" +
"numbers")
if len(first_name) <= 15:
return first_name
else:
raise forms.ValidationError("This is not a valid First Name!")
def clean_second_name(self, *args, **kwargs):
"""Checks that second_name field doesn't contain digits and field's
length is not over 15 characters."""
second_name = self.cleaned_data.get('second_name')
for i in second_name:
if i.isdigit():
raise forms.ValidationError("The Second name cannot contain" +
"numbers")
if len(second_name) <= 15:
return second_name
else:
raise forms.ValidationError("This is not a valid Second Name!")
| [
"django.forms.TextInput",
"django.forms.ValidationError"
] | [((2524, 2599), 'django.forms.ValidationError', 'forms.ValidationError', (["('First name and Second name must be ' + 'different!')"], {}), "('First name and Second name must be ' + 'different!')\n", (2545, 2599), False, 'from django import forms\n'), ((3183, 3239), 'django.forms.ValidationError', 'forms.ValidationError', (['"""This is not a valid First Name!"""'], {}), "('This is not a valid First Name!')\n", (3204, 3239), False, 'from django import forms\n'), ((3763, 3820), 'django.forms.ValidationError', 'forms.ValidationError', (['"""This is not a valid Second Name!"""'], {}), "('This is not a valid Second Name!')\n", (3784, 3820), False, 'from django import forms\n'), ((403, 551), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'type': 'text', 'name': 'first_name', 'class': 'form-control my-input',\n 'id': 'first_name', 'placeholder': 'First name'}"}), "(attrs={'type': 'text', 'name': 'first_name', 'class':\n 'form-control my-input', 'id': 'first_name', 'placeholder': 'First name'})\n", (418, 551), False, 'from django import forms\n'), ((864, 1020), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'type': 'text', 'name': 'second_name', 'class': 'form-control my-input',\n 'id': 'second_name', 'placeholder': 'Second name'}"}), "(attrs={'type': 'text', 'name': 'second_name', 'class':\n 'form-control my-input', 'id': 'second_name', 'placeholder': 'Second name'}\n )\n", (879, 1020), False, 'from django import forms\n'), ((1325, 1471), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'type': 'email', 'name': 'second_name', 'class': 'form-control my-input',\n 'id': 'email', 'placeholder': 'Second name'}"}), "(attrs={'type': 'email', 'name': 'second_name', 'class':\n 'form-control my-input', 'id': 'email', 'placeholder': 'Second name'})\n", (1340, 1471), False, 'from django import forms\n'), ((2975, 3041), 'django.forms.ValidationError', 'forms.ValidationError', (["('The First name cannot contain' + 'numbers')"], {}), "('The First name cannot contain' + 'numbers')\n", (2996, 3041), False, 'from django import forms\n'), ((3552, 3619), 'django.forms.ValidationError', 'forms.ValidationError', (["('The Second name cannot contain' + 'numbers')"], {}), "('The Second name cannot contain' + 'numbers')\n", (3573, 3619), False, 'from django import forms\n')] |
from django.shortcuts import render
from django.template import RequestContext
from django.shortcuts import render_to_response, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.backends import ModelBackend
def login_user(request):
logout(request)
email = password = ''
if request.POST:
email = request.POST['email']
password = request.POST['password']
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/main/')
return render_to_response('login.html', context=RequestContext(request)) | [
"django.http.HttpResponseRedirect",
"django.contrib.auth.authenticate",
"django.contrib.auth.login",
"django.template.RequestContext",
"django.contrib.auth.logout"
] | [((343, 358), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (349, 358), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((504, 548), 'django.contrib.auth.authenticate', 'authenticate', ([], {'email': 'email', 'password': 'password'}), '(email=email, password=password)\n', (516, 548), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((752, 775), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (766, 775), False, 'from django.template import RequestContext\n'), ((625, 645), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (630, 645), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((669, 699), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/main/"""'], {}), "('/main/')\n", (689, 699), False, 'from django.http import HttpResponse, HttpResponseRedirect\n')] |
#!/usr/bin/env python3
# Copyright (c) 2020 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
# Test mempool eviction based on transaction fee
# 1. Fill 90% of the mempool with transactions with a high fee
# 2. Fill 10% of the mempool with transactions with a lower fee
# 3. Send a large transaction (15% of mempool) that has a lower fee than
# most of the transactions in the pool
# 4. See what happens...
from test_framework.test_framework import BitcoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.cdefs import ONE_MEGABYTE
from test_framework.util import bytes_to_hex_str, create_confirmed_utxos, satoshi_round
from test_framework.util import assert_equal, assert_raises_rpc_error
import decimal
import random
def send_tx_with_data(node, utxo, fee, data_size):
assert(data_size > 24)
send_value = utxo['amount'] - fee
inputs = []
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs = {}
addr = node.getnewaddress()
outputs[addr] = satoshi_round(send_value)
data = bytearray(random.getrandbits(8) for _ in range(24)) + bytearray(data_size - 24)
outputs["data"] = bytes_to_hex_str(data)
rawTxn = node.createrawtransaction(inputs, outputs)
signedTxn = node.signrawtransaction(rawTxn)["hex"]
return node.sendrawtransaction(signedTxn)
class MempoolEvictionPriorityTest(BitcoinTestFramework):
mempool_size = 300
total_number_of_transactions = 50
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool={}".format(self.mempool_size),
"-maxmempoolsizedisk=0",
"-spendzeroconfchange=0",
"-genesisactivationheight=1",
"-maxtxsizepolicy=0",
'-maxtxfee=1.0']]
def run_test(self):
transaction_overhead = 2048
mempool_size = self.mempool_size
total_number_of_transactions = self.total_number_of_transactions
number_of_good_transactions = total_number_of_transactions * 90 // 100
number_of_cheap_transactions = total_number_of_transactions - number_of_good_transactions
last_transaction_factor = total_number_of_transactions * 15 // 100
transaction_size = mempool_size * ONE_MEGABYTE // total_number_of_transactions - transaction_overhead
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxos = create_confirmed_utxos(relayfee, self.nodes[0], total_number_of_transactions + 1)
# Transactions with higher fee rate
# size: 6MiB, fee: 10,000,000 satoshi (0.1 BSV) --> fee rate: 1.6 sat/byte
good_fee = decimal.Decimal('0.1')
good_txids = []
for i in range(number_of_good_transactions):
txid = send_tx_with_data(self.nodes[0], utxos.pop(), good_fee, transaction_size)
self.log.debug("Inserted good transaction %d %s", i + 1, txid)
good_txids.append(txid)
assert_equal(len(self.nodes[0].getrawmempool()), number_of_good_transactions)
self.log.info("%d transactions successfully arrived to mempool.", number_of_good_transactions)
# Transactions with lower fee rate
# size: 6MiB, fee: 2,500,000 satoshi (0.025 BSV) --> fee rate: 0.4 sat/byte
cheap_fee = good_fee / 4
cheap_txids = []
for i in range(number_of_cheap_transactions):
txid = send_tx_with_data(self.nodes[0], utxos.pop(), cheap_fee, transaction_size)
self.log.debug("Inserted cheap transaction %d %s", i + 1, txid)
cheap_txids.append(txid)
assert_equal(len(self.nodes[0].getrawmempool()), total_number_of_transactions)
self.log.info("%d transactions successfully arrived to mempool.", total_number_of_transactions)
# The mempool should now be full. Insert the last, large transaction
# size: 42MiB, fee: 35,000,000 satoshi (0.35 BSV) --> fee rate: 0.8 sat/byte
self.log.info("Inserting last transaction")
last_fee = last_transaction_factor * good_fee / 2
last_size = last_transaction_factor * transaction_size
assert_raises_rpc_error(
-26, 'mempool full',
send_tx_with_data, self.nodes[0], utxos.pop(), last_fee, last_size)
# Now let's see what happens. There should be no cheap transactions in the pool any more.
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), number_of_good_transactions)
self.log.info("%d transactions were evicted.", total_number_of_transactions - len(mempool))
for txid in cheap_txids:
assert(txid not in mempool)
self.log.info("All transactions with insufficient fee were evicted.")
if __name__ == '__main__':
MempoolEvictionPriorityTest().main()
| [
"test_framework.util.bytes_to_hex_str",
"test_framework.util.satoshi_round",
"test_framework.util.create_confirmed_utxos",
"random.getrandbits",
"decimal.Decimal"
] | [((1086, 1111), 'test_framework.util.satoshi_round', 'satoshi_round', (['send_value'], {}), '(send_value)\n', (1099, 1111), False, 'from test_framework.util import bytes_to_hex_str, create_confirmed_utxos, satoshi_round\n'), ((1225, 1247), 'test_framework.util.bytes_to_hex_str', 'bytes_to_hex_str', (['data'], {}), '(data)\n', (1241, 1247), False, 'from test_framework.util import bytes_to_hex_str, create_confirmed_utxos, satoshi_round\n'), ((2570, 2656), 'test_framework.util.create_confirmed_utxos', 'create_confirmed_utxos', (['relayfee', 'self.nodes[0]', '(total_number_of_transactions + 1)'], {}), '(relayfee, self.nodes[0], \n total_number_of_transactions + 1)\n', (2592, 2656), False, 'from test_framework.util import bytes_to_hex_str, create_confirmed_utxos, satoshi_round\n'), ((2799, 2821), 'decimal.Decimal', 'decimal.Decimal', (['"""0.1"""'], {}), "('0.1')\n", (2814, 2821), False, 'import decimal\n'), ((1133, 1154), 'random.getrandbits', 'random.getrandbits', (['(8)'], {}), '(8)\n', (1151, 1154), False, 'import random\n')] |
import cv2
import os
import csv
import parzen.PARZEN as parzen
def extract_features(image_path, vector_size, label):
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
small = cv2.resize(gray, (vector_size, vector_size))
small = small.flatten()
features = (small).tolist()
features[-1] = str(label)
return features
def write_csv(images_path='persian_number/'):
files = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))]
final_path = {}
database = []
for f in files:
tmp_list = [os.path.join(f, p) for p in sorted(os.listdir(f))]
# tmp_list[-1] = f[8:]
final_path[f[15:]] = (tmp_list)
# print(f[15:])
with open('file.csv', "w") as csv_file:
for key, value in final_path.items():
writer = csv.writer(csv_file, delimiter=',')
for path in value:
writer.writerow(extract_features(path, 30, key))
write_csv()
# dosent work for these feature
# radius = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1, 2, 3, 4]
#
# my_parzen = parzen.ParzenClassifier(csv_file='file.csv', data=None, r=radius, weight=.90)
# radius_accuracy_dict, best_radius = my_parzen.kfold_validation(10)
#
# test, predicted = my_parzen.test(best_radius)
#
# ac, cm, re = my_parzen.report(test, predicted)
#
# print(re, "\n")
# print(cm, "\n")
# print(ac, "\n")
| [
"os.listdir",
"csv.writer",
"os.path.join",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread"
] | [((129, 151), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (139, 151), False, 'import cv2\n'), ((163, 200), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (175, 200), False, 'import cv2\n'), ((213, 257), 'cv2.resize', 'cv2.resize', (['gray', '(vector_size, vector_size)'], {}), '(gray, (vector_size, vector_size))\n', (223, 257), False, 'import cv2\n'), ((431, 459), 'os.path.join', 'os.path.join', (['images_path', 'p'], {}), '(images_path, p)\n', (443, 459), False, 'import os\n'), ((580, 598), 'os.path.join', 'os.path.join', (['f', 'p'], {}), '(f, p)\n', (592, 598), False, 'import os\n'), ((476, 499), 'os.listdir', 'os.listdir', (['images_path'], {}), '(images_path)\n', (486, 499), False, 'import os\n'), ((849, 884), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (859, 884), False, 'import csv\n'), ((615, 628), 'os.listdir', 'os.listdir', (['f'], {}), '(f)\n', (625, 628), False, 'import os\n')] |
from flask import g
import re
'''
This is a helper class to parse
CloudWatch slow query log events
'''
regexPerformance = re.compile(
r"^# Query_time:\s+(\d+\.\d+)\s+Lock_time:\s+(\d+\.\d+)\s+Rows_sent:\s+(\d+)\s+Rows_examined:\s+(\d+)"
)
regexUserInfo = re.compile(
r"^# User@Host:\s+(\S+)\s+@\s+(.*)\s+Id:\s+(\d+)"
)
regexLinesToIgnore = [
re.compile(r"^#"),
re.compile(r"^use ", re.I),
re.compile(r"^set ", re.I)
]
class CWEvent:
"""
static regex patterns used in the parse_log_entry function
"""
def __init__(self, event):
self._event = event
@staticmethod
def skip_line(line):
for regex in regexLinesToIgnore:
if bool(regex.match(line)):
g.logger.debug("IGNORE: {}".format(line))
return True
return False
def parse(self):
cw_event = self._event
query = None
query_time = None
lock_time = None
sent = None
rows = None
host = None
user = None
log_session = None
for line in cw_event['message'].splitlines():
g.logger.debug("LINE: {}".format(line))
user_info = regexUserInfo.match(line)
if user_info:
query = ""
user = user_info.group(1)
host = user_info.group(2)
log_session = user_info.group(3)
continue
if bool(re.match('^# Query_time:', line)):
query = ""
g.logger.debug("QT LIKELY: {}".format(line))
m = regexPerformance.match(line)
if m:
query_time = float(m.group(1))
lock_time = float(m.group(2))
sent = int(m.group(3))
rows = int(m.group(4))
g.logger.debug("QT OK: {} {}".format(query_time, rows))
else:
g.logger.debug("QT ERROR: {}".format(line))
continue
if self.skip_line(line):
continue
query = query + line + "\t"
# done with the entry... do we have a pending query to output
if any(x is None for x in [rows, query_time, lock_time, user, host]):
g.logger.info("PARSE_FAILED: {}".format(cw_event))
g.logger.info("PARSE_FAILED: {} {} {} {} {}".format(
rows,
query_time,
lock_time,
user,
host
))
return {
'event': cw_event,
'qtime': query_time,
'session': log_session,
'rows': rows,
'sent': sent,
'ltime': lock_time,
'query': query,
'raw': cw_event['message'],
'timestamp': cw_event['timestamp']
}
| [
"re.match",
"re.compile"
] | [((124, 262), 're.compile', 're.compile', (['"""^# Query_time:\\\\s+(\\\\d+\\\\.\\\\d+)\\\\s+Lock_time:\\\\s+(\\\\d+\\\\.\\\\d+)\\\\s+Rows_sent:\\\\s+(\\\\d+)\\\\s+Rows_examined:\\\\s+(\\\\d+)"""'], {}), "(\n '^# Query_time:\\\\s+(\\\\d+\\\\.\\\\d+)\\\\s+Lock_time:\\\\s+(\\\\d+\\\\.\\\\d+)\\\\s+Rows_sent:\\\\s+(\\\\d+)\\\\s+Rows_examined:\\\\s+(\\\\d+)'\n )\n", (134, 262), False, 'import re\n'), ((262, 329), 're.compile', 're.compile', (['"""^# User@Host:\\\\s+(\\\\S+)\\\\s+@\\\\s+(.*)\\\\s+Id:\\\\s+(\\\\d+)"""'], {}), "('^# User@Host:\\\\s+(\\\\S+)\\\\s+@\\\\s+(.*)\\\\s+Id:\\\\s+(\\\\d+)')\n", (272, 329), False, 'import re\n'), ((358, 374), 're.compile', 're.compile', (['"""^#"""'], {}), "('^#')\n", (368, 374), False, 'import re\n'), ((381, 406), 're.compile', 're.compile', (['"""^use """', 're.I'], {}), "('^use ', re.I)\n", (391, 406), False, 'import re\n'), ((413, 438), 're.compile', 're.compile', (['"""^set """', 're.I'], {}), "('^set ', re.I)\n", (423, 438), False, 'import re\n'), ((1450, 1482), 're.match', 're.match', (['"""^# Query_time:"""', 'line'], {}), "('^# Query_time:', line)\n", (1458, 1482), False, 'import re\n')] |
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from const import size_str, dts_time_str, duration_time_str, x_str, y_str, ssim_all_str
from ffmpeg import get_packets_info, get_qp_data, get_ssim_data
from graph import generate_line_graph
def generate_bitrate_qp_graph(file_path):
"""
Call ffprobe to get packets info and generate bitrate graph
Call ffmpeg to get qp data and generate the qp graph
:param file_path: input video file path
:return: <div> bitrate graph, <div> qp graph, <div> combined graph
"""
packets_info = get_packets_info(file_path)
b_data = compute_bitrate(packets_info)
qp_data = get_qp_data(file_path)
bitrate_graph = generate_line_graph(b_data)
qp_graph = generate_line_graph(qp_data)
c_graph = generate_line_graph(b_data, qp_data)
return bitrate_graph, qp_graph, c_graph
def generate_ssim_graph(main_file_path, ref_file_path):
"""
Call ffmpeg to get ssim per frame and generate ssim graph
:param main_file_path: main video file path
:param ref_file_path: ref video file path
:return: <div> ssim graph
"""
ssim_data = get_ssim_data(main_file_path, ref_file_path)
ssim_graph = generate_line_graph(compute_ssim(ssim_data))
return ssim_graph
def compute_bitrate(packets):
"""
Calculate the bitrate per frame
:param packets: packet info from ffprobe
:return: bitrate data
"""
data = {x_str: [], y_str: []}
for packet in packets:
bitrate = float(packet[size_str]) / float(packet[duration_time_str])
# TODO: replace with frame index
dts = packet[dts_time_str]
data[x_str].append(dts)
data[y_str].append(bitrate)
return data
def compute_ssim(frame_ssim):
"""
Parse SSIM data to an appropriate format
:param frame_ssim: SSIM per frame dict
:return: data (dict)
"""
data = {x_str: [], y_str: []}
for f_idx, ssim in frame_ssim.items():
data[x_str].append(f_idx)
data[y_str].append(ssim[ssim_all_str])
return data
| [
"ffmpeg.get_ssim_data",
"ffmpeg.get_qp_data",
"graph.generate_line_graph",
"ffmpeg.get_packets_info"
] | [((1066, 1093), 'ffmpeg.get_packets_info', 'get_packets_info', (['file_path'], {}), '(file_path)\n', (1082, 1093), False, 'from ffmpeg import get_packets_info, get_qp_data, get_ssim_data\n'), ((1151, 1173), 'ffmpeg.get_qp_data', 'get_qp_data', (['file_path'], {}), '(file_path)\n', (1162, 1173), False, 'from ffmpeg import get_packets_info, get_qp_data, get_ssim_data\n'), ((1194, 1221), 'graph.generate_line_graph', 'generate_line_graph', (['b_data'], {}), '(b_data)\n', (1213, 1221), False, 'from graph import generate_line_graph\n'), ((1237, 1265), 'graph.generate_line_graph', 'generate_line_graph', (['qp_data'], {}), '(qp_data)\n', (1256, 1265), False, 'from graph import generate_line_graph\n'), ((1280, 1316), 'graph.generate_line_graph', 'generate_line_graph', (['b_data', 'qp_data'], {}), '(b_data, qp_data)\n', (1299, 1316), False, 'from graph import generate_line_graph\n'), ((1637, 1681), 'ffmpeg.get_ssim_data', 'get_ssim_data', (['main_file_path', 'ref_file_path'], {}), '(main_file_path, ref_file_path)\n', (1650, 1681), False, 'from ffmpeg import get_packets_info, get_qp_data, get_ssim_data\n')] |
import pytest
from copy import deepcopy
import time
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
from py_conway import Game, GameState, InitError # nopep8
from py_conway.threaded_game import ThreadedGame # nopep8
def create_zeros(x, y):
dim_one = [0 for item in range(x)]
return [dim_one[:] for item in range(y)]
# Create a Game class that takes a numpy array for the seed
def test_game_init():
test_game = Game(12, 12)
assert test_game.board_size == (12, 12)
# Add a seed with a single cell active
def test_test_game_init_seed():
seed = create_zeros(6, 6)
seed[1][1] = 1
test_game = Game(6, 6, seed)
assert test_game.seed == seed
# Test for defaults
def test_test_game_init_defaults():
test_game = Game(6, 6)
assert test_game.board_size == (6, 6)
assert test_game.seed == create_zeros(6, 6)
# Test for default board_size on set seed
def test_test_game_init_board_default_seed():
test_game = Game(12, 12)
assert test_game.seed == create_zeros(12, 12)
# Test that check cell returns a valid value
def test_test_game_3_x_3_check_cell_with_one_neighbor():
seed = [[0, 1, 0],
[0, 1, 0],
[0, 0, 0]]
test_game = Game(3, 3, seed)
assert test_game._num_neighbors(1, 1) == 1
def test_test_game_3_x_3_check_cell_with_two_neighbors():
seed = [[0, 1, 0],
[0, 1, 0],
[0, 0, 0]]
test_game = Game(3, 3, seed)
assert test_game._num_neighbors(1, 2) == 2
def test_test_game_3_x_3_check_cell_with_three_neighbors():
seed = [[1, 0, 0],
[1, 1, 0],
[0, 0, 0]]
test_game = Game(3, 3, seed)
assert test_game._num_neighbors(0, 1) == 3
# Run on an array of a single column
def test_single_column_array():
seed = [[0], [0], [1]]
test_game = Game(3, 1, seed)
assert test_game._num_neighbors(1, 0) == 1
# Run the test_game for one iteration on a single cell
def test_3_x_3_single_cell_single_run():
seed = [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]]
test_game = Game(3, 3, seed)
test_game.start()
test_game.run_generation()
assert test_game.current_board == create_zeros(3, 3)
# Run the test_game for one iteration on three cells
def test_3_x_3_three_neighbors_single_run():
seed = [[1, 0, 0],
[1, 1, 0],
[0, 0, 0]]
expected_state = [[1, 1, 0],
[1, 1, 0],
[0, 0, 0]]
test_game = Game(3, 3, seed)
test_game.start()
test_game.run_generation()
assert test_game.current_board == expected_state
# Run the test_game for one iteration on four cells
def test_3_x_3_four_neighbors_single_run():
seed = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected_state = [[1, 1, 1],
[1, 0, 1],
[1, 1, 1]]
test_game = Game(3, 3, seed)
test_game.start()
test_game.run_generation()
assert test_game.current_board == expected_state
# Run the test_game for one iteration on four cells
def test_4_x_3_three_neighbors_single_run():
seed = [[1, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 1, 1]]
expected_state = [[0, 1, 0, 0],
[1, 0, 0, 1],
[0, 1, 1, 0]]
test_game = Game(3, 4, seed)
test_game.start()
test_game.run_generation()
assert test_game.current_board == expected_state
# Test the number of live cells on an empty seed
def test_3_x_3_empty_seed_no_live_cells():
test_game = Game(3, 3)
assert test_game.live_cells == 0
# Test the live cells count on a seed with 4 lve cells
def test_3_x_3_seed_five_live_cells():
seed = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
test_game = Game(3, 3, seed)
assert test_game.live_cells == 5
def test_increment_live_cells_after_update():
seed = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
test_game = Game(3, 3, seed)
test_game.start()
test_game.run_generation()
assert test_game.live_cells == 8
def test_update_generations_count_after_each_generation():
test_game = Game(2, 2, [[1, 0], [0, 1]])
test_game.start()
test_game.run_generation()
test_game.run_generation()
assert test_game.generations == 2
# Run the test_game for two iterations on four cells
def test_3_x_3_four_neighbors_two_runs():
seed = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected_state = [[1, 0, 1],
[0, 0, 0],
[1, 0, 1]]
test_game = Game(3, 3, seed)
test_game.start()
test_game.run_generation()
test_game.run_generation()
assert test_game.current_board == expected_state
def test_default_game_state_ready():
test_game = Game(3, 3)
assert test_game.state == GameState.READY
def test_threaded_start_game_changes_state_to_running():
seed = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
test_game = ThreadedGame(3, 3, seed)
test_game.state = GameState.READY
test_game._thread_active = True
test_game._run()
assert test_game.state == GameState.FINISHED
assert test_game.live_cells == 0
def test_threaded_empty_board_run_game_until_no_living_cells_left():
test_game = ThreadedGame(3, 3)
test_game.state = GameState.READY
test_game._run()
assert test_game.state == GameState.FINISHED
def test_ensure_that_width_height_and_seed_match():
seed = [[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]]
my_game = Game(3, 4, seed)
assert my_game.board_size == (4, 4)
def test_ensure_that_live_cells_count_is_accurate_before_run():
seed = [[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]]
test_game = Game(4, 4, seed)
test_game.start()
test_game.current_board[0][0] = 1
test_game.run_generation()
assert test_game.live_cells == 5
def test_no_seed_ensure_live_cells_count_is_accurate_before_run():
test_game = Game(4, 4)
test_game.start()
test_game.current_board[0][0] = 1
test_game.current_board[0][1] = 1
test_game.current_board[0][2] = 1
test_game.run_generation()
assert test_game.live_cells == 2
def test_threaded_game_still_life_game_will_continue_to_run():
seed = [[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]]
test_game = ThreadedGame(4, 4, seed)
test_game.start_thread()
assert test_game.state == GameState.STASIS
assert test_game.generations > 1
def test_still_life_game_can_be_stopped():
seed = [[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]]
test_game = ThreadedGame(4, 4, seed)
test_game.start_thread()
assert test_game.state == GameState.STASIS
test_game.stop_thread()
time.sleep(.10)
assert test_game.state == GameState.FINISHED
def test_ensure_that_seed_includes_valid_data():
seed = [['a', 0, 0, 0],
[0, 2, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, -1]]
with pytest.raises(InitError):
Game(4, 4, seed)
seed = [[0, 0, 0, 0],
[0, 2, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, -1]]
with pytest.raises(InitError):
Game(4, 4, seed)
seed = [[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, -1]]
with pytest.raises(InitError):
Game(4, 4, seed)
def test_generate_random_seed_generates_random_seed():
test_game = Game(12, 12, random=True)
assert test_game.live_cells > 0
def test_enable_boundary_wrapping_on_board():
seed = [[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0]]
test_game = Game(4, 4, seed=seed, enforce_boundary=False)
expected_board = [[0, 0, 0, 0],
[1, 1, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]]
test_game.start()
test_game.run_generation()
assert test_game.current_board == expected_board
def test_dont_run_generation_when_game_not_started():
test_game = Game(6, 6, random=True)
test_game.run_generation()
assert test_game.generations == 0
def test_start_can_be_used_to_restart_the_game():
test_game = Game(6, 6, random=True)
random_seed = test_game.current_board
test_game.start()
test_game.run_generation()
test_game.run_generation()
assert test_game.current_board != random_seed
test_game.start()
assert test_game.current_board == random_seed
def test_reseed_can_be_used_to_create_a_new_random_seed():
test_game = Game(6, 6, random=True)
first_random_seed = deepcopy(test_game.current_board)
test_game.reseed()
assert first_random_seed != test_game.current_board
def test_cannot_reseed_when_game_is_active():
test_game = Game(6, 6, random=True)
first_random_seed = deepcopy(test_game.current_board)
test_game.start()
test_game.reseed()
assert first_random_seed == test_game.current_board
def test_empty_width_raises_error_when_no_seed_provided():
with pytest.raises(InitError):
Game(columns=0, random=True)
def test_empty_height_raises_error_when_no_seed_provided():
with pytest.raises(InitError):
Game(rows=0, random=True)
def test_calculate_proper_width_and_height_when_seed_provided():
seed = [[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 0]]
my_game = Game(seed=seed)
assert my_game.board_size == (3, 4)
def test_changing_seed_does_not_change_current_board():
seed = [[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 0]]
my_game = Game(seed=seed)
my_game.seed[0][0] = 0
assert my_game.seed != my_game.current_board
def test_game_with_differing_dimensions_and_random_seed():
my_game = Game(57, 37, random=True)
my_game.start()
my_game.run_generation()
assert my_game.generations == 1
def test_game_can_be_stopped_while_running():
my_game = Game(100, 100, random=True)
my_game.start()
my_game.run_generation()
my_game.stop()
assert my_game.state == GameState.FINISHED
def test_game_still_life_changes_status_to_stasis():
seed = [[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]]
my_game = Game(seed=seed)
my_game.start()
my_game.run_generation()
assert my_game.state == GameState.STASIS
def test_game_oscillator_changes_status_to_stasis():
seed = [[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]
my_game = Game(seed=seed)
my_game.start()
my_game.run_generation()
assert my_game.state == GameState.RUNNING
my_game.run_generation()
assert my_game.state == GameState.STASIS
| [
"py_conway.Game",
"time.sleep",
"py_conway.threaded_game.ThreadedGame",
"os.path.dirname",
"pytest.raises",
"copy.deepcopy"
] | [((506, 518), 'py_conway.Game', 'Game', (['(12)', '(12)'], {}), '(12, 12)\n', (510, 518), False, 'from py_conway import Game, GameState, InitError\n'), ((701, 717), 'py_conway.Game', 'Game', (['(6)', '(6)', 'seed'], {}), '(6, 6, seed)\n', (705, 717), False, 'from py_conway import Game, GameState, InitError\n'), ((826, 836), 'py_conway.Game', 'Game', (['(6)', '(6)'], {}), '(6, 6)\n', (830, 836), False, 'from py_conway import Game, GameState, InitError\n'), ((1033, 1045), 'py_conway.Game', 'Game', (['(12)', '(12)'], {}), '(12, 12)\n', (1037, 1045), False, 'from py_conway import Game, GameState, InitError\n'), ((1286, 1302), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (1290, 1302), False, 'from py_conway import Game, GameState, InitError\n'), ((1496, 1512), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (1500, 1512), False, 'from py_conway import Game, GameState, InitError\n'), ((1708, 1724), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (1712, 1724), False, 'from py_conway import Game, GameState, InitError\n'), ((1886, 1902), 'py_conway.Game', 'Game', (['(3)', '(1)', 'seed'], {}), '(3, 1, seed)\n', (1890, 1902), False, 'from py_conway import Game, GameState, InitError\n'), ((2134, 2150), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (2138, 2150), False, 'from py_conway import Game, GameState, InitError\n'), ((2548, 2564), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (2552, 2564), False, 'from py_conway import Game, GameState, InitError\n'), ((2957, 2973), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (2961, 2973), False, 'from py_conway import Game, GameState, InitError\n'), ((3385, 3401), 'py_conway.Game', 'Game', (['(3)', '(4)', 'seed'], {}), '(3, 4, seed)\n', (3389, 3401), False, 'from py_conway import Game, GameState, InitError\n'), ((3620, 3630), 'py_conway.Game', 'Game', (['(3)', '(3)'], {}), '(3, 3)\n', (3624, 3630), False, 'from py_conway import Game, GameState, InitError\n'), ((3851, 3867), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (3855, 3867), False, 'from py_conway import Game, GameState, InitError\n'), ((4040, 4056), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (4044, 4056), False, 'from py_conway import Game, GameState, InitError\n'), ((4226, 4254), 'py_conway.Game', 'Game', (['(2)', '(2)', '[[1, 0], [0, 1]]'], {}), '(2, 2, [[1, 0], [0, 1]])\n', (4230, 4254), False, 'from py_conway import Game, GameState, InitError\n'), ((4662, 4678), 'py_conway.Game', 'Game', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (4666, 4678), False, 'from py_conway import Game, GameState, InitError\n'), ((4873, 4883), 'py_conway.Game', 'Game', (['(3)', '(3)'], {}), '(3, 3)\n', (4877, 4883), False, 'from py_conway import Game, GameState, InitError\n'), ((5076, 5100), 'py_conway.threaded_game.ThreadedGame', 'ThreadedGame', (['(3)', '(3)', 'seed'], {}), '(3, 3, seed)\n', (5088, 5100), False, 'from py_conway.threaded_game import ThreadedGame\n'), ((5370, 5388), 'py_conway.threaded_game.ThreadedGame', 'ThreadedGame', (['(3)', '(3)'], {}), '(3, 3)\n', (5382, 5388), False, 'from py_conway.threaded_game import ThreadedGame\n'), ((5671, 5687), 'py_conway.Game', 'Game', (['(3)', '(4)', 'seed'], {}), '(3, 4, seed)\n', (5675, 5687), False, 'from py_conway import Game, GameState, InitError\n'), ((5916, 5932), 'py_conway.Game', 'Game', (['(4)', '(4)', 'seed'], {}), '(4, 4, seed)\n', (5920, 5932), False, 'from py_conway import Game, GameState, InitError\n'), ((6149, 6159), 'py_conway.Game', 'Game', (['(4)', '(4)'], {}), '(4, 4)\n', (6153, 6159), False, 'from py_conway import Game, GameState, InitError\n'), ((6553, 6577), 'py_conway.threaded_game.ThreadedGame', 'ThreadedGame', (['(4)', '(4)', 'seed'], {}), '(4, 4, seed)\n', (6565, 6577), False, 'from py_conway.threaded_game import ThreadedGame\n'), ((6858, 6882), 'py_conway.threaded_game.ThreadedGame', 'ThreadedGame', (['(4)', '(4)', 'seed'], {}), '(4, 4, seed)\n', (6870, 6882), False, 'from py_conway.threaded_game import ThreadedGame\n'), ((6994, 7009), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7004, 7009), False, 'import time\n'), ((7686, 7711), 'py_conway.Game', 'Game', (['(12)', '(12)'], {'random': '(True)'}), '(12, 12, random=True)\n', (7690, 7711), False, 'from py_conway import Game, GameState, InitError\n'), ((7918, 7963), 'py_conway.Game', 'Game', (['(4)', '(4)'], {'seed': 'seed', 'enforce_boundary': '(False)'}), '(4, 4, seed=seed, enforce_boundary=False)\n', (7922, 7963), False, 'from py_conway import Game, GameState, InitError\n'), ((8289, 8312), 'py_conway.Game', 'Game', (['(6)', '(6)'], {'random': '(True)'}), '(6, 6, random=True)\n', (8293, 8312), False, 'from py_conway import Game, GameState, InitError\n'), ((8452, 8475), 'py_conway.Game', 'Game', (['(6)', '(6)'], {'random': '(True)'}), '(6, 6, random=True)\n', (8456, 8475), False, 'from py_conway import Game, GameState, InitError\n'), ((8806, 8829), 'py_conway.Game', 'Game', (['(6)', '(6)'], {'random': '(True)'}), '(6, 6, random=True)\n', (8810, 8829), False, 'from py_conway import Game, GameState, InitError\n'), ((8855, 8888), 'copy.deepcopy', 'deepcopy', (['test_game.current_board'], {}), '(test_game.current_board)\n', (8863, 8888), False, 'from copy import deepcopy\n'), ((9034, 9057), 'py_conway.Game', 'Game', (['(6)', '(6)'], {'random': '(True)'}), '(6, 6, random=True)\n', (9038, 9057), False, 'from py_conway import Game, GameState, InitError\n'), ((9083, 9116), 'copy.deepcopy', 'deepcopy', (['test_game.current_board'], {}), '(test_game.current_board)\n', (9091, 9116), False, 'from copy import deepcopy\n'), ((9658, 9673), 'py_conway.Game', 'Game', ([], {'seed': 'seed'}), '(seed=seed)\n', (9662, 9673), False, 'from py_conway import Game, GameState, InitError\n'), ((9880, 9895), 'py_conway.Game', 'Game', ([], {'seed': 'seed'}), '(seed=seed)\n', (9884, 9895), False, 'from py_conway import Game, GameState, InitError\n'), ((10049, 10074), 'py_conway.Game', 'Game', (['(57)', '(37)'], {'random': '(True)'}), '(57, 37, random=True)\n', (10053, 10074), False, 'from py_conway import Game, GameState, InitError\n'), ((10224, 10251), 'py_conway.Game', 'Game', (['(100)', '(100)'], {'random': '(True)'}), '(100, 100, random=True)\n', (10228, 10251), False, 'from py_conway import Game, GameState, InitError\n'), ((10544, 10559), 'py_conway.Game', 'Game', ([], {'seed': 'seed'}), '(seed=seed)\n', (10548, 10559), False, 'from py_conway import Game, GameState, InitError\n'), ((10830, 10845), 'py_conway.Game', 'Game', ([], {'seed': 'seed'}), '(seed=seed)\n', (10834, 10845), False, 'from py_conway import Game, GameState, InitError\n'), ((7228, 7252), 'pytest.raises', 'pytest.raises', (['InitError'], {}), '(InitError)\n', (7241, 7252), False, 'import pytest\n'), ((7262, 7278), 'py_conway.Game', 'Game', (['(4)', '(4)', 'seed'], {}), '(4, 4, seed)\n', (7266, 7278), False, 'from py_conway import Game, GameState, InitError\n'), ((7395, 7419), 'pytest.raises', 'pytest.raises', (['InitError'], {}), '(InitError)\n', (7408, 7419), False, 'import pytest\n'), ((7429, 7445), 'py_conway.Game', 'Game', (['(4)', '(4)', 'seed'], {}), '(4, 4, seed)\n', (7433, 7445), False, 'from py_conway import Game, GameState, InitError\n'), ((7562, 7586), 'pytest.raises', 'pytest.raises', (['InitError'], {}), '(InitError)\n', (7575, 7586), False, 'import pytest\n'), ((7596, 7612), 'py_conway.Game', 'Game', (['(4)', '(4)', 'seed'], {}), '(4, 4, seed)\n', (7600, 7612), False, 'from py_conway import Game, GameState, InitError\n'), ((9290, 9314), 'pytest.raises', 'pytest.raises', (['InitError'], {}), '(InitError)\n', (9303, 9314), False, 'import pytest\n'), ((9324, 9352), 'py_conway.Game', 'Game', ([], {'columns': '(0)', 'random': '(True)'}), '(columns=0, random=True)\n', (9328, 9352), False, 'from py_conway import Game, GameState, InitError\n'), ((9424, 9448), 'pytest.raises', 'pytest.raises', (['InitError'], {}), '(InitError)\n', (9437, 9448), False, 'import pytest\n'), ((9458, 9483), 'py_conway.Game', 'Game', ([], {'rows': '(0)', 'random': '(True)'}), '(rows=0, random=True)\n', (9462, 9483), False, 'from py_conway import Game, GameState, InitError\n'), ((142, 167), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (157, 167), False, 'import os\n')] |
# Create your tests here.
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APIRequestFactory
class TestSignUpCustomerAPI(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_superuser(username='root', password='<PASSWORD>', email='')
def test_signup_api(self):
response = self.client.post(reverse('customer-list'),
{
"username": "sky53674",
"password": "<PASSWORD>",
"first_name": "string",
"last_name": "string",
"email": "<EMAIL>",
"phone_num": "string",
"nickname": "string"
},
)
# force_authenticate(request, user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
| [
"rest_framework.test.APIRequestFactory",
"django.contrib.auth.models.User.objects.create_superuser",
"rest_framework.reverse.reverse"
] | [((315, 334), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (332, 334), False, 'from rest_framework.test import APIRequestFactory\n'), ((355, 434), 'django.contrib.auth.models.User.objects.create_superuser', 'User.objects.create_superuser', ([], {'username': '"""root"""', 'password': '"""<PASSWORD>"""', 'email': '""""""'}), "(username='root', password='<PASSWORD>', email='')\n", (384, 434), False, 'from django.contrib.auth.models import User\n'), ((503, 527), 'rest_framework.reverse.reverse', 'reverse', (['"""customer-list"""'], {}), "('customer-list')\n", (510, 527), False, 'from rest_framework.reverse import reverse\n')] |
#!/usr/bin/env python
import unittest
import xml.dom.minidom
from ternip.formats.timex3 import Timex3XmlDocument
from ternip.timex import Timex
class Timex3DocumentTest(unittest.TestCase):
def test_strip_timexes(self):
t = Timex3XmlDocument('<root>This is some <TIMEX3 attr="timex">annotated <TIMEX3>embedded annotated </TIMEX3>text</TIMEX3>.</root>')
t.strip_timexes()
self.assertEquals(str(t), xml.dom.minidom.parseString('<root>This is some annotated embedded annotated text.</root>').toxml())
def test_reconcile_TIMEX(self):
s = Timex3XmlDocument('<root>This is some annotated text.</root>')
t = Timex(type='date')
t1 = Timex(id=1)
t2 = Timex(id=2)
t3 = Timex(id=3)
t.value = "20100710"
t.id = 6
t.mod = "BEFORE"
t.freq = "1M"
t.comment = "Test"
t.quant = 'EVERY'
t.temporal_function = True
t.document_role = 'MODIFICATION_TIME'
t.begin_timex = t1
t.end_timex = t2
t.context = t3
s.reconcile([[('This', 'POS', set()), ('is', 'POS', set()), ('some', 'POS', {t}), ('annotated', 'POS', {t}),
('text', 'POS', {t}), ('.', 'POS', set())]])
self.assertEquals(str(s), xml.dom.minidom.parseString('<root>This is <TIMEX3 tid="t6" beginPoint="t1" endPoint="t2" anchorTimeID="t3" functionInDocument="MODIFICATION_TIME" temporalFunction="true" type="DATE" value="20100710" mod="BEFORE" freq="1M" comment="Test" quant="EVERY">some annotated text</TIMEX3>.</root>').toxml())
def test_timex_to_sents(self):
d = Timex3XmlDocument('<root>This is <TIMEX3 tid="t6" beginPoint="t1" endPoint="t2" anchorTimeID="t3" functionInDocument="MODIFICATION_TIME" temporalFunction="true" type="DATE" value="20100710" mod="BEFORE" freq="1M" comment="Test" quant="EVERY">some annotated text</TIMEX3><TIMEX3 type="date" tid="t1" /><TIMEX3 type="date" tid="t2" /><TIMEX3 type="date" tid="t3" />.</root>')
s = d.get_sents()
t = s[0][2][2].pop()
self.assertEquals(t.value, "20100710")
self.assertEquals(t.id, 6)
self.assertEquals(t.mod, "BEFORE")
self.assertEquals(t.freq, "1M")
self.assertEquals(t.comment, "Test")
self.assertEquals(t.quant, 'EVERY')
self.assertEquals(t.temporal_function, True)
self.assertEquals(t.document_role, 'MODIFICATION_TIME')
self.assertEquals(t.begin_timex.id, 1)
self.assertEquals(t.end_timex.id, 2)
self.assertEquals(t.context.id, 3)
def test_timex_to_sents_temporalfunction(self):
d = Timex3XmlDocument('<root>This is <TIMEX3 tid="t6" type="DATE">some annotated text</TIMEX3>.</root>')
s = d.get_sents()
t = s[0][2][2].pop()
self.assertEquals(t.id, 6)
self.assertEquals(t.type, "DATE")
self.assertEquals(t.temporal_function, False) | [
"ternip.formats.timex3.Timex3XmlDocument",
"ternip.timex.Timex"
] | [((254, 392), 'ternip.formats.timex3.Timex3XmlDocument', 'Timex3XmlDocument', (['"""<root>This is some <TIMEX3 attr="timex">annotated <TIMEX3>embedded annotated </TIMEX3>text</TIMEX3>.</root>"""'], {}), '(\n \'<root>This is some <TIMEX3 attr="timex">annotated <TIMEX3>embedded annotated </TIMEX3>text</TIMEX3>.</root>\'\n )\n', (271, 392), False, 'from ternip.formats.timex3 import Timex3XmlDocument\n'), ((602, 664), 'ternip.formats.timex3.Timex3XmlDocument', 'Timex3XmlDocument', (['"""<root>This is some annotated text.</root>"""'], {}), "('<root>This is some annotated text.</root>')\n", (619, 664), False, 'from ternip.formats.timex3 import Timex3XmlDocument\n'), ((678, 696), 'ternip.timex.Timex', 'Timex', ([], {'type': '"""date"""'}), "(type='date')\n", (683, 696), False, 'from ternip.timex import Timex\n'), ((711, 722), 'ternip.timex.Timex', 'Timex', ([], {'id': '(1)'}), '(id=1)\n', (716, 722), False, 'from ternip.timex import Timex\n'), ((737, 748), 'ternip.timex.Timex', 'Timex', ([], {'id': '(2)'}), '(id=2)\n', (742, 748), False, 'from ternip.timex import Timex\n'), ((763, 774), 'ternip.timex.Timex', 'Timex', ([], {'id': '(3)'}), '(id=3)\n', (768, 774), False, 'from ternip.timex import Timex\n'), ((1664, 2047), 'ternip.formats.timex3.Timex3XmlDocument', 'Timex3XmlDocument', (['"""<root>This is <TIMEX3 tid="t6" beginPoint="t1" endPoint="t2" anchorTimeID="t3" functionInDocument="MODIFICATION_TIME" temporalFunction="true" type="DATE" value="20100710" mod="BEFORE" freq="1M" comment="Test" quant="EVERY">some annotated text</TIMEX3><TIMEX3 type="date" tid="t1" /><TIMEX3 type="date" tid="t2" /><TIMEX3 type="date" tid="t3" />.</root>"""'], {}), '(\n \'<root>This is <TIMEX3 tid="t6" beginPoint="t1" endPoint="t2" anchorTimeID="t3" functionInDocument="MODIFICATION_TIME" temporalFunction="true" type="DATE" value="20100710" mod="BEFORE" freq="1M" comment="Test" quant="EVERY">some annotated text</TIMEX3><TIMEX3 type="date" tid="t1" /><TIMEX3 type="date" tid="t2" /><TIMEX3 type="date" tid="t3" />.</root>\'\n )\n', (1681, 2047), False, 'from ternip.formats.timex3 import Timex3XmlDocument\n'), ((2684, 2794), 'ternip.formats.timex3.Timex3XmlDocument', 'Timex3XmlDocument', (['"""<root>This is <TIMEX3 tid="t6" type="DATE">some annotated text</TIMEX3>.</root>"""'], {}), '(\n \'<root>This is <TIMEX3 tid="t6" type="DATE">some annotated text</TIMEX3>.</root>\'\n )\n', (2701, 2794), False, 'from ternip.formats.timex3 import Timex3XmlDocument\n')] |
from PIL import Image
import os
import glob
import argparse
from evaluation import QudrilateralFinder
from utils import mesh_imgs, draw_polygon_pil
def args_processor():
parser = argparse.ArgumentParser(description='demo for docscanner')
parser.add_argument("-i", "--imagePath", default="z_ref_doc_scanner/data/self_collected/high-level-camera/stills/", help="Path to the document image")
# parser.add_argument("-i", "--imagePath", default="z_ref_doc_scanner/data/self_collected/high-level-camera/stills/high_00012.jpg", help="Path to the document image")
parser.add_argument("-o", "--outputPath", default="results/demo/", help="Path to store the result")
parser.add_argument("-cm", "--cornerModel", help="Model for corner point refinement", default="trained_models/corner/v3_Feb14_08-50-21/v3_resnet.pb")
parser.add_argument("-m", "--mesh", action='store_true', help="save images in mesh")
return parser.parse_args()
if __name__ == "__main__":
args = args_processor()
os.makedirs(args.outputPath, exist_ok=True)
imgs = [args.imagePath] if os.path.isfile(args.imagePath) else (glob.glob(f'{args.imagePath}*.jpg') + glob.glob(f'{args.imagePath}*.png'))
imgs_pil = []
for im_path in imgs:
im = Image.open(im_path)
qf = QudrilateralFinder(None, args.cornerModel)
quad_pred = qf.find_quad_model2_only_by_top_left(im)
draw_polygon_pil(im, quad_pred, outline='red', width=3)
im.save(f'{args.outputPath}/{os.path.basename(im_path)}')
print(f'prediced qudrilateral: {os.path.basename(im_path)} -- {quad_pred}')
imgs_pil.append(im)
mesh = mesh_imgs(imgs_pil)
mesh.save(f'{args.outputPath}/mesh.jpg')
| [
"PIL.Image.open",
"os.makedirs",
"argparse.ArgumentParser",
"evaluation.QudrilateralFinder",
"utils.draw_polygon_pil",
"os.path.isfile",
"utils.mesh_imgs",
"os.path.basename",
"glob.glob"
] | [((184, 242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""demo for docscanner"""'}), "(description='demo for docscanner')\n", (207, 242), False, 'import argparse\n'), ((1007, 1050), 'os.makedirs', 'os.makedirs', (['args.outputPath'], {'exist_ok': '(True)'}), '(args.outputPath, exist_ok=True)\n', (1018, 1050), False, 'import os\n'), ((1641, 1660), 'utils.mesh_imgs', 'mesh_imgs', (['imgs_pil'], {}), '(imgs_pil)\n', (1650, 1660), False, 'from utils import mesh_imgs, draw_polygon_pil\n'), ((1082, 1112), 'os.path.isfile', 'os.path.isfile', (['args.imagePath'], {}), '(args.imagePath)\n', (1096, 1112), False, 'import os\n'), ((1250, 1269), 'PIL.Image.open', 'Image.open', (['im_path'], {}), '(im_path)\n', (1260, 1269), False, 'from PIL import Image\n'), ((1283, 1325), 'evaluation.QudrilateralFinder', 'QudrilateralFinder', (['None', 'args.cornerModel'], {}), '(None, args.cornerModel)\n', (1301, 1325), False, 'from evaluation import QudrilateralFinder\n'), ((1395, 1450), 'utils.draw_polygon_pil', 'draw_polygon_pil', (['im', 'quad_pred'], {'outline': '"""red"""', 'width': '(3)'}), "(im, quad_pred, outline='red', width=3)\n", (1411, 1450), False, 'from utils import mesh_imgs, draw_polygon_pil\n'), ((1119, 1154), 'glob.glob', 'glob.glob', (['f"""{args.imagePath}*.jpg"""'], {}), "(f'{args.imagePath}*.jpg')\n", (1128, 1154), False, 'import glob\n'), ((1157, 1192), 'glob.glob', 'glob.glob', (['f"""{args.imagePath}*.png"""'], {}), "(f'{args.imagePath}*.png')\n", (1166, 1192), False, 'import glob\n'), ((1489, 1514), 'os.path.basename', 'os.path.basename', (['im_path'], {}), '(im_path)\n', (1505, 1514), False, 'import os\n'), ((1558, 1583), 'os.path.basename', 'os.path.basename', (['im_path'], {}), '(im_path)\n', (1574, 1583), False, 'import os\n')] |
from django.contrib import admin
from .models import Vote, Blog
# Register your models here.
admin.site.register(Vote)
admin.site.register(Blog) | [
"django.contrib.admin.site.register"
] | [((93, 118), 'django.contrib.admin.site.register', 'admin.site.register', (['Vote'], {}), '(Vote)\n', (112, 118), False, 'from django.contrib import admin\n'), ((119, 144), 'django.contrib.admin.site.register', 'admin.site.register', (['Blog'], {}), '(Blog)\n', (138, 144), False, 'from django.contrib import admin\n')] |
# ====================================
# > Messaging and reporting utilities
# =================================
import textwrap
from collections import Counter
from datetime import datetime
from typing import Sequence, Tuple, cast
import rich
from rich.markup import escape
from rich.panel import Panel
from rich.progress import BarColumn, Progress, TimeElapsedColumn
from rich.table import Table
from rich.text import Text
from diff_shades.results import (
Analysis,
ProjectResults,
ResultTypes,
calculate_line_changes,
diff_two_results,
filter_results,
)
console = rich.get_console()
def color_diff(contents: str) -> str:
"""Inject rich markup into the diff."""
lines = escape(contents).split("\n")
for i, line in enumerate(lines):
if line.startswith("+++") or line.startswith("---"):
line = "[bold]" + line + "[/]"
elif line.startswith("@@"):
line = "[cyan]" + line + "[/]"
elif line.startswith("+"):
line = "[green]" + line + "[/]"
elif line.startswith("-"):
line = "[red]" + line + "[/]"
lines[i] = line
return "\n".join(lines)
def make_rich_progress() -> Progress:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
"-",
"[progress.percentage]{task.completed}/{task.total}",
"-",
TimeElapsedColumn(),
console=console,
)
def readable_int(number: int) -> str:
if number < 10000:
return str(number)
return f"{number:,}".replace(",", " ")
def make_analysis_summary(analysis: Analysis) -> Panel:
main_table = Table.grid()
stats_table = Table.grid()
file_table = Table(title="File breakdown", show_edge=False, box=rich.box.SIMPLE)
file_table.add_column("Result")
file_table.add_column("# of files")
project_table = Table(title="Project breakdown", show_edge=False, box=rich.box.SIMPLE)
project_table.add_column("Result")
project_table.add_column("# of projects")
for type in ("nothing-changed", "reformatted", "failed"):
count = len(filter_results(analysis.files(), type))
file_table.add_row(type, str(count), style=type)
type_counts = Counter(proj.overall_result for proj in analysis)
for type in ("nothing-changed", "reformatted", "failed"):
count = type_counts.get(cast(ResultTypes, type), 0)
project_table.add_row(type, str(count), style=type)
stats_table.add_row(file_table, " ", project_table)
main_table.add_row(stats_table)
additions, deletions = analysis.line_changes
left_stats = f"""
[bold]# of lines: {readable_int(analysis.line_count)}
# of files: {len(analysis.files())}
# of projects: {len(analysis.projects)}\
"""
right_stats = (
f"\n\n[bold]{readable_int(additions + deletions)} changes in total[/]"
f"\n[green]{readable_int(additions)} additions[/]"
f" - [red]{readable_int(deletions)} deletions"
)
stats_table_two = Table.grid(expand=True)
stats_table_two.add_row(
textwrap.dedent(left_stats), Text.from_markup(right_stats, justify="right")
)
main_table.add_row(stats_table_two)
extra_args = analysis.metadata.get("black-extra-args")
if extra_args:
pretty_args = Text(" ".join(extra_args), style="itatic", justify="center")
main_table.add_row(Panel(pretty_args, title="\[custom arguments]", border_style="dim"))
created_at = datetime.fromisoformat(analysis.metadata["created-at"])
subtitle = (
f"[dim]black {analysis.metadata['black-version']} -"
f" {created_at.strftime('%b %d %Y %X')} UTC"
)
return Panel(main_table, title="[bold]Summary", subtitle=subtitle, expand=False)
def make_comparison_summary(
project_pairs: Sequence[Tuple[ProjectResults, ProjectResults]],
) -> Panel:
# TODO: clean this up by reusing Analysis objects.
lines = 0
files = 0
differing_projects = 0
differing_files = 0
additions = 0
deletions = 0
for results_one, results_two in project_pairs:
any_failed = False
for file, r1 in results_one.items():
files += 1
lines += r1.line_count
r2 = results_two[file]
if r1 != r2:
any_failed = True
differing_files += 1
if "failed" not in (r1.type, r2.type):
diff = diff_two_results(r1, r2, "throwaway")
changes = calculate_line_changes(diff)
additions += changes[0]
deletions += changes[1]
differing_projects += int(any_failed)
def fmt_num(number: int) -> str:
return "[cyan]" + readable_int(number) + "[/cyan]"
line = (
f"{fmt_num(differing_projects)} projects & {fmt_num(differing_files)} files changed /"
)
line += f" {fmt_num(additions + deletions)} changes"
line += f" [[green]+{readable_int(additions)}[/]/[red]-{readable_int(deletions)}[/]]\n\n"
line += f"... out of {fmt_num(lines)} lines"
line += f", {fmt_num(files)} files"
line += f" & {fmt_num(len(project_pairs))} projects"
return Panel(line, title="[bold]Summary", expand=False)
def make_project_details_table(analysis: Analysis) -> Table:
project_table = Table(show_edge=False, box=rich.box.SIMPLE)
project_table.add_column("Name")
project_table.add_column("Results (n/r/f)")
project_table.add_column("Line changes (total +/-)")
project_table.add_column("# files")
project_table.add_column("# lines")
for proj, proj_results in analysis.results.items():
results = ""
for type in ("nothing-changed", "reformatted", "failed"):
count = len(filter_results(proj_results, type))
results += f"[{type}]{count}[/]/"
results = results[:-1]
additions, deletions = proj_results.line_changes
if additions or deletions:
line_changes = (
f"{readable_int(additions + deletions)}"
f" [[green]{readable_int(additions)}[/]"
f"/[red]{readable_int(deletions)}[/]]"
)
else:
line_changes = "n/a"
file_count = str(len(proj_results))
line_count = readable_int(proj_results.line_count)
color = proj_results.overall_result
project_table.add_row(proj, results, line_changes, file_count, line_count, style=color)
return project_table
| [
"rich.text.Text.from_markup",
"rich.get_console",
"rich.progress.BarColumn",
"textwrap.dedent",
"rich.table.Table.grid",
"rich.panel.Panel",
"diff_shades.results.filter_results",
"rich.markup.escape",
"rich.table.Table",
"collections.Counter",
"diff_shades.results.diff_two_results",
"datetime.... | [((596, 614), 'rich.get_console', 'rich.get_console', ([], {}), '()\n', (612, 614), False, 'import rich\n'), ((1716, 1728), 'rich.table.Table.grid', 'Table.grid', ([], {}), '()\n', (1726, 1728), False, 'from rich.table import Table\n'), ((1747, 1759), 'rich.table.Table.grid', 'Table.grid', ([], {}), '()\n', (1757, 1759), False, 'from rich.table import Table\n'), ((1778, 1845), 'rich.table.Table', 'Table', ([], {'title': '"""File breakdown"""', 'show_edge': '(False)', 'box': 'rich.box.SIMPLE'}), "(title='File breakdown', show_edge=False, box=rich.box.SIMPLE)\n", (1783, 1845), False, 'from rich.table import Table\n'), ((1942, 2012), 'rich.table.Table', 'Table', ([], {'title': '"""Project breakdown"""', 'show_edge': '(False)', 'box': 'rich.box.SIMPLE'}), "(title='Project breakdown', show_edge=False, box=rich.box.SIMPLE)\n", (1947, 2012), False, 'from rich.table import Table\n'), ((2295, 2344), 'collections.Counter', 'Counter', (['(proj.overall_result for proj in analysis)'], {}), '(proj.overall_result for proj in analysis)\n', (2302, 2344), False, 'from collections import Counter\n'), ((3097, 3120), 'rich.table.Table.grid', 'Table.grid', ([], {'expand': '(True)'}), '(expand=True)\n', (3107, 3120), False, 'from rich.table import Table\n'), ((3554, 3609), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["analysis.metadata['created-at']"], {}), "(analysis.metadata['created-at'])\n", (3576, 3609), False, 'from datetime import datetime\n'), ((3759, 3832), 'rich.panel.Panel', 'Panel', (['main_table'], {'title': '"""[bold]Summary"""', 'subtitle': 'subtitle', 'expand': '(False)'}), "(main_table, title='[bold]Summary', subtitle=subtitle, expand=False)\n", (3764, 3832), False, 'from rich.panel import Panel\n'), ((5259, 5307), 'rich.panel.Panel', 'Panel', (['line'], {'title': '"""[bold]Summary"""', 'expand': '(False)'}), "(line, title='[bold]Summary', expand=False)\n", (5264, 5307), False, 'from rich.panel import Panel\n'), ((5391, 5434), 'rich.table.Table', 'Table', ([], {'show_edge': '(False)', 'box': 'rich.box.SIMPLE'}), '(show_edge=False, box=rich.box.SIMPLE)\n', (5396, 5434), False, 'from rich.table import Table\n'), ((1289, 1300), 'rich.progress.BarColumn', 'BarColumn', ([], {}), '()\n', (1298, 1300), False, 'from rich.progress import BarColumn, Progress, TimeElapsedColumn\n'), ((1455, 1474), 'rich.progress.TimeElapsedColumn', 'TimeElapsedColumn', ([], {}), '()\n', (1472, 1474), False, 'from rich.progress import BarColumn, Progress, TimeElapsedColumn\n'), ((3158, 3185), 'textwrap.dedent', 'textwrap.dedent', (['left_stats'], {}), '(left_stats)\n', (3173, 3185), False, 'import textwrap\n'), ((3187, 3233), 'rich.text.Text.from_markup', 'Text.from_markup', (['right_stats'], {'justify': '"""right"""'}), "(right_stats, justify='right')\n", (3203, 3233), False, 'from rich.text import Text\n'), ((711, 727), 'rich.markup.escape', 'escape', (['contents'], {}), '(contents)\n', (717, 727), False, 'from rich.markup import escape\n'), ((2439, 2462), 'typing.cast', 'cast', (['ResultTypes', 'type'], {}), '(ResultTypes, type)\n', (2443, 2462), False, 'from typing import Sequence, Tuple, cast\n'), ((3468, 3536), 'rich.panel.Panel', 'Panel', (['pretty_args'], {'title': '"""\\\\[custom arguments]"""', 'border_style': '"""dim"""'}), "(pretty_args, title='\\\\[custom arguments]', border_style='dim')\n", (3473, 3536), False, 'from rich.panel import Panel\n'), ((5824, 5858), 'diff_shades.results.filter_results', 'filter_results', (['proj_results', 'type'], {}), '(proj_results, type)\n', (5838, 5858), False, 'from diff_shades.results import Analysis, ProjectResults, ResultTypes, calculate_line_changes, diff_two_results, filter_results\n'), ((4508, 4545), 'diff_shades.results.diff_two_results', 'diff_two_results', (['r1', 'r2', '"""throwaway"""'], {}), "(r1, r2, 'throwaway')\n", (4524, 4545), False, 'from diff_shades.results import Analysis, ProjectResults, ResultTypes, calculate_line_changes, diff_two_results, filter_results\n'), ((4576, 4604), 'diff_shades.results.calculate_line_changes', 'calculate_line_changes', (['diff'], {}), '(diff)\n', (4598, 4604), False, 'from diff_shades.results import Analysis, ProjectResults, ResultTypes, calculate_line_changes, diff_two_results, filter_results\n')] |
#! /usr/bin/python3
import os
import sys
import json
import requests
import base64
from email.parser import Parser
from email import policy
email = Parser(policy=policy.SMTP).parse(sys.stdin)
secret = os.getenv('SECRET')
headers = {
'User-Agent': 'tempmail/service',
'Authorization': f'Bearer {secret}'
}
data = {
'sender':sys.argv[1],
'recipients' : sys.argv[2:],
'headers': { k:v for k,v in email.items()},
'body': email.get_body().as_string(),
}
requests.post(f'http://server/api/email', json=data, headers=headers)
| [
"requests.post",
"email.parser.Parser",
"os.getenv"
] | [((205, 224), 'os.getenv', 'os.getenv', (['"""SECRET"""'], {}), "('SECRET')\n", (214, 224), False, 'import os\n'), ((480, 549), 'requests.post', 'requests.post', (['f"""http://server/api/email"""'], {'json': 'data', 'headers': 'headers'}), "(f'http://server/api/email', json=data, headers=headers)\n", (493, 549), False, 'import requests\n'), ((150, 176), 'email.parser.Parser', 'Parser', ([], {'policy': 'policy.SMTP'}), '(policy=policy.SMTP)\n', (156, 176), False, 'from email.parser import Parser\n')] |
import os
import shutil
def ensure_folder_exists_and_is_clear(folder):
if not os.path.exists(folder):
os.makedirs(folder)
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
raise
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"os.unlink",
"shutil.rmtree"
] | [((158, 176), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (168, 176), False, 'import os\n'), ((85, 107), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (99, 107), False, 'import os\n'), ((117, 136), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (128, 136), False, 'import os\n'), ((198, 228), 'os.path.join', 'os.path.join', (['folder', 'the_file'], {}), '(folder, the_file)\n', (210, 228), False, 'import os\n'), ((257, 282), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (271, 282), False, 'import os\n'), ((300, 320), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (309, 320), False, 'import os\n'), ((338, 362), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (351, 362), False, 'import os\n'), ((380, 404), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (393, 404), False, 'import shutil\n')] |
from unittest import TestCase
from src.DiGraph import DiGraph
from src.GraphAlgo import GraphAlgo
class TestDiGraph(TestCase):
def test_v_size(self):
g = DiGraph()
ga = GraphAlgo()
ga.graph=g
ga.load_from_json('../data/A0.json')
self.assertEqual (g.v_size(),11)
def test_e_size(self):
g = DiGraph()
ga = GraphAlgo()
ga.graph = g
ga.load_from_json('../data/A0.json')
self.assertEqual(g.e_size(),22)
def test_get_mc(self):
g = DiGraph()
ga = GraphAlgo()
ga.graph = g
ga.load_from_json('../data/A0.json')
self.assertEqual(g.get_mc(),0)
g.remove_edge(10,9)
self.assertEqual(g.get_mc(),1)
def test_add_edge(self):
g = DiGraph()
ga = GraphAlgo()
ga.graph = g
ga.load_from_json('../data/A0.json')
self.assertEqual(g.e_size(),22)
g.add_edge(0,5,33)
self.assertEqual(g.e_size(),23)
def test_add_node(self):
g = DiGraph()
ga = GraphAlgo()
ga.graph = g
ga.load_from_json('../data/A1.json')
self.assertEqual(g.v_size(), 17)
g.add_node(17,(1,1,0))
self.assertEqual(g.v_size(), 18)
def test_remove_node(self):
g = DiGraph()
ga = GraphAlgo()
ga.graph = g
ga.load_from_json('../data/A1.json')
self.assertEqual(g.v_size(), 17)
g.remove_node(0)
self.assertEqual(g.v_size(), 16)
def test_remove_edge(self):
g = DiGraph()
ga = GraphAlgo()
ga.graph = g
ga.load_from_json('../data/A0.json')
self.assertEqual(g.e_size(), 22)
g.remove_edge(0,1)
self.assertEqual(g.e_size(), 21)
| [
"src.GraphAlgo.GraphAlgo",
"src.DiGraph.DiGraph"
] | [((169, 178), 'src.DiGraph.DiGraph', 'DiGraph', ([], {}), '()\n', (176, 178), False, 'from src.DiGraph import DiGraph\n'), ((192, 203), 'src.GraphAlgo.GraphAlgo', 'GraphAlgo', ([], {}), '()\n', (201, 203), False, 'from src.GraphAlgo import GraphAlgo\n'), ((350, 359), 'src.DiGraph.DiGraph', 'DiGraph', ([], {}), '()\n', (357, 359), False, 'from src.DiGraph import DiGraph\n'), ((373, 384), 'src.GraphAlgo.GraphAlgo', 'GraphAlgo', ([], {}), '()\n', (382, 384), False, 'from src.GraphAlgo import GraphAlgo\n'), ((532, 541), 'src.DiGraph.DiGraph', 'DiGraph', ([], {}), '()\n', (539, 541), False, 'from src.DiGraph import DiGraph\n'), ((555, 566), 'src.GraphAlgo.GraphAlgo', 'GraphAlgo', ([], {}), '()\n', (564, 566), False, 'from src.GraphAlgo import GraphAlgo\n'), ((782, 791), 'src.DiGraph.DiGraph', 'DiGraph', ([], {}), '()\n', (789, 791), False, 'from src.DiGraph import DiGraph\n'), ((805, 816), 'src.GraphAlgo.GraphAlgo', 'GraphAlgo', ([], {}), '()\n', (814, 816), False, 'from src.GraphAlgo import GraphAlgo\n'), ((1033, 1042), 'src.DiGraph.DiGraph', 'DiGraph', ([], {}), '()\n', (1040, 1042), False, 'from src.DiGraph import DiGraph\n'), ((1056, 1067), 'src.GraphAlgo.GraphAlgo', 'GraphAlgo', ([], {}), '()\n', (1065, 1067), False, 'from src.GraphAlgo import GraphAlgo\n'), ((1293, 1302), 'src.DiGraph.DiGraph', 'DiGraph', ([], {}), '()\n', (1300, 1302), False, 'from src.DiGraph import DiGraph\n'), ((1316, 1327), 'src.GraphAlgo.GraphAlgo', 'GraphAlgo', ([], {}), '()\n', (1325, 1327), False, 'from src.GraphAlgo import GraphAlgo\n'), ((1548, 1557), 'src.DiGraph.DiGraph', 'DiGraph', ([], {}), '()\n', (1555, 1557), False, 'from src.DiGraph import DiGraph\n'), ((1571, 1582), 'src.GraphAlgo.GraphAlgo', 'GraphAlgo', ([], {}), '()\n', (1580, 1582), False, 'from src.GraphAlgo import GraphAlgo\n')] |
from django.shortcuts import render, redirect
from carts.models import Cart
from .forms import (
ForWhomForm,
EventTypeForm,
GuestNumberForm,
DateInputForm,
)
from .models import (
ForWhom,
EventType,
Guest,
HelloDate
)
def selection(request):
if request.method == 'POST':
form = ForWhomForm(request.POST or None)
if form.is_valid():
select = request.POST.get('select')
select = ForWhom(
select = select,
)
select.save()
return redirect('birthday:eventtype')
else:
form = ForWhomForm()
return render(request, 'birthday/select.html', {'form': form})
def eventtype(request):
if request.method == "POST":
form = EventTypeForm(request.POST or None)
if form.is_valid():
name_obj = request.POST.get('name')
#print()
price = 0.00
if name_obj[:1]== 'V':
price = 12.00
elif name_obj[:1]== 'B':
price = 102.00
elif name_obj[:1]== 'C':
price = 1222.00
elif name_obj[:1]== 'D':
price = 1112.00
elif name_obj[:1]== 'P':
price = 152.00
elif name_obj[:1]== 'S':
price = 1332.00
name_obj = EventType(
name = name_obj,
dam = price
)
name_obj.save()
return redirect('birthday:guest')
else:
form = EventTypeForm()
return render(request, 'birthday/event_types.html', {'form': form})
def guest_number(request):
if request.method == "POST":
form = GuestNumberForm(request.POST or None)
if form.is_valid():
name_obj = request.POST.get('number')
#name_obj = int(name_obj) * 10
price = int(name_obj) * 5
name_obj = Guest(
number = name_obj,
price = price
)
name_obj.save()
return redirect('birthday:date')
else:
form = GuestNumberForm()
return render(request, 'birthday/guest_number.html', {'form': form})
def select_date(request):
if request.method == 'POST':
form = DateInputForm(request.POST or None)
if form.is_valid():
date = request.POST.get('date')
date = HelloDate(
date = date,
)
date.save()
return redirect('birthday:detail')
else:
form = DateInputForm()
c = {
'form': form,
}
return render(request, 'birthday/select_date.html', c)
def selection_detail(request):
forwhom = ForWhom.objects.all().order_by('-id')[0:1]
eventtype = EventType.objects.all().order_by('-id')[0:1]
guest = Guest.objects.all().order_by('-id')[0:1]
date = HelloDate.objects.all().order_by('-id')[0:1]
cart_obj, new_obj = Cart.objects.new_or_get(request)
args = {
'forwhom' : forwhom,
'eventtype' : eventtype,
'guest' : guest,
'date' : date,
'cart_obj' : cart_obj
}
#print(args)
return render(request, 'birthday/detail.html', args) | [
"django.shortcuts.render",
"carts.models.Cart.objects.new_or_get",
"django.shortcuts.redirect"
] | [((683, 738), 'django.shortcuts.render', 'render', (['request', '"""birthday/select.html"""', "{'form': form}"], {}), "(request, 'birthday/select.html', {'form': form})\n", (689, 738), False, 'from django.shortcuts import render, redirect\n'), ((1632, 1692), 'django.shortcuts.render', 'render', (['request', '"""birthday/event_types.html"""', "{'form': form}"], {}), "(request, 'birthday/event_types.html', {'form': form})\n", (1638, 1692), False, 'from django.shortcuts import render, redirect\n'), ((2205, 2266), 'django.shortcuts.render', 'render', (['request', '"""birthday/guest_number.html"""', "{'form': form}"], {}), "(request, 'birthday/guest_number.html', {'form': form})\n", (2211, 2266), False, 'from django.shortcuts import render, redirect\n'), ((2688, 2735), 'django.shortcuts.render', 'render', (['request', '"""birthday/select_date.html"""', 'c'], {}), "(request, 'birthday/select_date.html', c)\n", (2694, 2735), False, 'from django.shortcuts import render, redirect\n'), ((3038, 3070), 'carts.models.Cart.objects.new_or_get', 'Cart.objects.new_or_get', (['request'], {}), '(request)\n', (3061, 3070), False, 'from carts.models import Cart\n'), ((3259, 3304), 'django.shortcuts.render', 'render', (['request', '"""birthday/detail.html"""', 'args'], {}), "(request, 'birthday/detail.html', args)\n", (3265, 3304), False, 'from django.shortcuts import render, redirect\n'), ((601, 631), 'django.shortcuts.redirect', 'redirect', (['"""birthday:eventtype"""'], {}), "('birthday:eventtype')\n", (609, 631), False, 'from django.shortcuts import render, redirect\n'), ((1552, 1578), 'django.shortcuts.redirect', 'redirect', (['"""birthday:guest"""'], {}), "('birthday:guest')\n", (1560, 1578), False, 'from django.shortcuts import render, redirect\n'), ((2124, 2149), 'django.shortcuts.redirect', 'redirect', (['"""birthday:date"""'], {}), "('birthday:date')\n", (2132, 2149), False, 'from django.shortcuts import render, redirect\n'), ((2566, 2593), 'django.shortcuts.redirect', 'redirect', (['"""birthday:detail"""'], {}), "('birthday:detail')\n", (2574, 2593), False, 'from django.shortcuts import render, redirect\n')] |
# 引入日历模块
import calendar
import sys
# 打印我的工作日历
workDay = 0
restDay = 0
month = 8
weeks = calendar.monthcalendar(2021, month)
jobDays = [[] for i in range(6)]
jobDaysIndex = 0
print('# ' + str(month) + '月日报')
print()
print('## 一、日历')
print()
print('| 星期一 | 星期二 | 星期三 | 星期四 | 星期五 | <font color=red>星期六</font> | <font color=red>星期日</font> | 项目统计 |')
print('|---|---|---|---|---|---|---|---|')
for week in weeks:
for i in range(len(week)):
day = week[i]
if day == 0:
week[i] = '| - '
elif (i == 5 or i == 6):
week[i] = '| ' + str(day) + ' <font color=Red>休息</font> '
restDay = restDay+1
if i == 5 :
jobDaysIndex = jobDaysIndex + 1
else:
week[i] = '| ' + str(day) + ' <font color=SeaGreen>工作</font> '
workDay = workDay+1
jobDays[jobDaysIndex].append(day)
#print()
sys.stdout.write(week[i])
sys.stdout.write('|')
print()
#print(weeks)
print()
print('## 二、月度统计')
print()
print('| 工时统计 <br/> 实际 / 法定 | 类别统计(天) | 项目统计(天)|')
print('|---|---|---|')
print('| 出勤天数 <br/> 0 / '+str(workDay)+' | <font color=SeaGreen>工作:0</font> | |')
print('| 平均工时 <br/> 0 / 8 | <font color=Red>休息:0</font> | |')
print('| 总工时 <br/> 0 / '+str(workDay*8)+' | <font color=#CC33CC>加班:0</font> | |')
print('| | <font color=DeepSkyBlue>出差:0</font> | |')
print('| 使用年假 <br/> 0 小时 | <font color=IndianRed>年假:0</font> | |')
print('| 使用倒休 <br/> 0 小时 | <font color=#DF0101>倒休:0</font> | |')
print('| 获得倒休 <br/> 0 小时 | | |')
print()
print('## 三、详情')
for jobDay in jobDays:
if len(jobDay) > 0:
print()
print('### ' + str(jobDay))
print()
for day in jobDay:
print('- ' + str(day) + ":XXX")
print(' - XXX')
| [
"calendar.monthcalendar",
"sys.stdout.write"
] | [((92, 127), 'calendar.monthcalendar', 'calendar.monthcalendar', (['(2021)', 'month'], {}), '(2021, month)\n', (114, 127), False, 'import calendar\n'), ((947, 968), 'sys.stdout.write', 'sys.stdout.write', (['"""|"""'], {}), "('|')\n", (963, 968), False, 'import sys\n'), ((917, 942), 'sys.stdout.write', 'sys.stdout.write', (['week[i]'], {}), '(week[i])\n', (933, 942), False, 'import sys\n')] |
import numpy as np
import os
import sklearn.metrics
from scipy.optimize import curve_fit
def slice_lat(ds):
return ds.sel(lat=slice(-25, 25))
def ensure_dir(file_path):
"""Check if a directory exists and create it if needed"""
if not os.path.exists(file_path):
os.makedirs(file_path)
def days_per_month(month, year):
"""Return the number of days in any month and year"""
days = [30, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
d = days[int(month)-1]
if d==28 and int(year)%4==0:
if int(year)%100==0 and int(year)%400!=0:
pass
else:
d = 29
return d
def precip_to_mm(ds):
"""Convert precip to mm"""
if ds.pr.attrs['units']=='kg m-2 s-1':
ds['pr'] = ds.pr * 24*60**2
ds.pr.attrs['units']='mm day-1'
elif ds.pr.attrs['units']=='mm day-1':
pass
else:
raise ValueError('Unrecognised units')
return ds
def gaus(x,a,x0,sigma):
"""Simple normal distribution function"""
return a*np.exp(-(x-x0)**2/(2*sigma**2))
def fit_gaussian(y, x):
"""Fit a normal gaussian distribution curve to the data.
Returns
[amplitide, mean, width, r^2 statistic]
4x4 covariance matrix for above values
"""
popt_f, pcov_f = np.full(4, np.nan, dtype=np.float64), np.full((4,4), np.nan, dtype=np.float64)
bounds = (np.array([0, -30, 0]), np.array([25, 20, 25]))
try:
popt, pcov = curve_fit(gaus,x,y,p0=[8,-5,10], maxfev=8000, bounds=bounds)
a,x0,sigma = popt
y_pred = gaus(x,a,x0,sigma)
r = sklearn.metrics.r2_score(y, y_pred)
popt_f[:3] = popt
popt_f[3] = r
pcov_f[:3, :3] = pcov
except RuntimeError:
pass
return popt_f, pcov_f | [
"scipy.optimize.curve_fit",
"os.path.exists",
"os.makedirs",
"numpy.exp",
"numpy.array",
"numpy.full"
] | [((250, 275), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (264, 275), False, 'import os\n'), ((285, 307), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (296, 307), False, 'import os\n'), ((1020, 1061), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (1026, 1061), True, 'import numpy as np\n'), ((1282, 1318), 'numpy.full', 'np.full', (['(4)', 'np.nan'], {'dtype': 'np.float64'}), '(4, np.nan, dtype=np.float64)\n', (1289, 1318), True, 'import numpy as np\n'), ((1320, 1361), 'numpy.full', 'np.full', (['(4, 4)', 'np.nan'], {'dtype': 'np.float64'}), '((4, 4), np.nan, dtype=np.float64)\n', (1327, 1361), True, 'import numpy as np\n'), ((1375, 1396), 'numpy.array', 'np.array', (['[0, -30, 0]'], {}), '([0, -30, 0])\n', (1383, 1396), True, 'import numpy as np\n'), ((1398, 1420), 'numpy.array', 'np.array', (['[25, 20, 25]'], {}), '([25, 20, 25])\n', (1406, 1420), True, 'import numpy as np\n'), ((1452, 1517), 'scipy.optimize.curve_fit', 'curve_fit', (['gaus', 'x', 'y'], {'p0': '[8, -5, 10]', 'maxfev': '(8000)', 'bounds': 'bounds'}), '(gaus, x, y, p0=[8, -5, 10], maxfev=8000, bounds=bounds)\n', (1461, 1517), False, 'from scipy.optimize import curve_fit\n')] |
"""
Library Features:
Name: lib_dryes_downloader_geo
Author(s): <NAME> (<EMAIL>), <NAME> (<EMAIL>)
Date: '20210929'
Version: '1.0.0'
"""
#################################################################################
# Library
import os
import logging
from osgeo import gdal, gdalconst
import numpy as np
import rasterio
import matplotlib.pylab as plt
from lib_dryes_downloader_hsaf_generic import create_darray_2d
#################################################################################
logging.getLogger("rasterio").setLevel(logging.WARNING)
# -------------------------------------------------------------------------------------
# Method to read tiff file
def reproject_file_tiff(file_name_in, file_name_out,
file_wide_out, file_high_out, file_geotrans_out, file_proj_out):
dset_tiff_out = gdal.GetDriverByName('GTiff').Create(
file_name_out, file_wide_out, file_high_out, 1, gdalconst.GDT_Float32)
dset_tiff_out.SetGeoTransform(file_geotrans_out)
dset_tiff_out.SetProjection(file_proj_out)
dset_tiff_in = gdal.Open(file_name_in, gdalconst.GA_ReadOnly)
dset_proj_in = dset_tiff_in.GetProjection()
dset_geotrans_in = dset_tiff_in.GetGeoTransform()
dset_data_in = dset_tiff_in.ReadAsArray()
dset_band_in = dset_tiff_in.GetRasterBand(1)
# Reproject from input file to output file set with out information
gdal.ReprojectImage(dset_tiff_in, dset_tiff_out, dset_proj_in, file_proj_out,
gdalconst.GRA_NearestNeighbour)
return dset_tiff_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get a raster file
def read_file_raster(file_name, file_proj='epsg:4326', var_name='land',
coord_name_x='Longitude', coord_name_y='Latitude',
dim_name_x='Longitude', dim_name_y='Latitude', no_data_default=-9999.0):
if os.path.exists(file_name):
if (file_name.endswith('.txt') or file_name.endswith('.asc')) or file_name.endswith('.tif'):
crs = rasterio.crs.CRS({"init": file_proj})
with rasterio.open(file_name, mode='r+') as dset:
dset.crs = crs
bounds = dset.bounds
no_data = dset.nodata
res = dset.res
transform = dset.transform
data = dset.read()
proj = dset.crs.wkt
values = data[0, :, :]
if (no_data is None) or (np.isnan(no_data)):
no_data = no_data_default
decimal_round = 7
center_right = bounds.right - (res[0] / 2)
center_left = bounds.left + (res[0] / 2)
center_top = bounds.top - (res[1] / 2)
center_bottom = bounds.bottom + (res[1] / 2)
lon = np.arange(center_left, center_right + np.abs(res[0] / 2), np.abs(res[0]), float)
lat = np.flip(np.arange(center_bottom, center_top + np.abs(res[0] / 2), np.abs(res[1]), float), axis=0)
lons, lats = np.meshgrid(lon, lat)
if center_bottom > center_top:
center_bottom_tmp = center_top
center_top_tmp = center_bottom
center_bottom = center_bottom_tmp
center_top = center_top_tmp
values = np.flipud(values)
lats = np.flipud(lats)
# # Debug
# plt.figure()
# plt.imshow(lats)
# plt.colorbar()
#
# # Debug
# plt.figure()
# plt.imshow(values)
# plt.colorbar()
# plt.show()
min_lon_round = round(np.min(lons), decimal_round)
max_lon_round = round(np.max(lons), decimal_round)
min_lat_round = round(np.min(lats), decimal_round)
max_lat_round = round(np.max(lats), decimal_round)
center_right_round = round(center_right, decimal_round)
center_left_round = round(center_left, decimal_round)
center_bottom_round = round(center_bottom, decimal_round)
center_top_round = round(center_top, decimal_round)
assert min_lon_round == center_left_round
assert max_lon_round == center_right_round
assert min_lat_round == center_bottom_round
assert max_lat_round == center_top_round
dims = values.shape
high = dims[0] # nrows
wide = dims[1] # cols
bounding_box = [min_lon_round, max_lat_round, max_lon_round, min_lat_round]
da = create_darray_2d(values, lons, lats, coord_name_x=coord_name_x, coord_name_y=coord_name_y,
dim_name_x=dim_name_x, dim_name_y=dim_name_y, name=var_name)
else:
logging.error(' ===> Geographical file ' + file_name + ' format unknown')
raise NotImplementedError('File type reader not implemented yet')
else:
logging.error(' ===> Geographical file ' + file_name + ' not found')
raise IOError('Geographical file location or name is wrong')
return da, wide, high, proj, transform, bounding_box, no_data, dim_name_x, dim_name_y
# # -------------------------------------------------------------------------------------
| [
"osgeo.gdal.Open",
"os.path.exists",
"logging.getLogger",
"rasterio.crs.CRS",
"osgeo.gdal.GetDriverByName",
"lib_dryes_downloader_hsaf_generic.create_darray_2d",
"osgeo.gdal.ReprojectImage",
"numpy.abs",
"numpy.flipud",
"rasterio.open",
"numpy.max",
"numpy.isnan",
"numpy.min",
"numpy.meshg... | [((1101, 1147), 'osgeo.gdal.Open', 'gdal.Open', (['file_name_in', 'gdalconst.GA_ReadOnly'], {}), '(file_name_in, gdalconst.GA_ReadOnly)\n', (1110, 1147), False, 'from osgeo import gdal, gdalconst\n'), ((1422, 1535), 'osgeo.gdal.ReprojectImage', 'gdal.ReprojectImage', (['dset_tiff_in', 'dset_tiff_out', 'dset_proj_in', 'file_proj_out', 'gdalconst.GRA_NearestNeighbour'], {}), '(dset_tiff_in, dset_tiff_out, dset_proj_in,\n file_proj_out, gdalconst.GRA_NearestNeighbour)\n', (1441, 1535), False, 'from osgeo import gdal, gdalconst\n'), ((2034, 2059), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (2048, 2059), False, 'import os\n'), ((530, 559), 'logging.getLogger', 'logging.getLogger', (['"""rasterio"""'], {}), "('rasterio')\n", (547, 559), False, 'import logging\n'), ((5089, 5157), 'logging.error', 'logging.error', (["(' ===> Geographical file ' + file_name + ' not found')"], {}), "(' ===> Geographical file ' + file_name + ' not found')\n", (5102, 5157), False, 'import logging\n'), ((864, 893), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (884, 893), False, 'from osgeo import gdal, gdalconst\n'), ((2181, 2218), 'rasterio.crs.CRS', 'rasterio.crs.CRS', (["{'init': file_proj}"], {}), "({'init': file_proj})\n", (2197, 2218), False, 'import rasterio\n'), ((3160, 3181), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (3171, 3181), True, 'import numpy as np\n'), ((4706, 4865), 'lib_dryes_downloader_hsaf_generic.create_darray_2d', 'create_darray_2d', (['values', 'lons', 'lats'], {'coord_name_x': 'coord_name_x', 'coord_name_y': 'coord_name_y', 'dim_name_x': 'dim_name_x', 'dim_name_y': 'dim_name_y', 'name': 'var_name'}), '(values, lons, lats, coord_name_x=coord_name_x,\n coord_name_y=coord_name_y, dim_name_x=dim_name_x, dim_name_y=dim_name_y,\n name=var_name)\n', (4722, 4865), False, 'from lib_dryes_downloader_hsaf_generic import create_darray_2d\n'), ((4919, 4992), 'logging.error', 'logging.error', (["(' ===> Geographical file ' + file_name + ' format unknown')"], {}), "(' ===> Geographical file ' + file_name + ' format unknown')\n", (4932, 4992), False, 'import logging\n'), ((2236, 2271), 'rasterio.open', 'rasterio.open', (['file_name'], {'mode': '"""r+"""'}), "(file_name, mode='r+')\n", (2249, 2271), False, 'import rasterio\n'), ((2609, 2626), 'numpy.isnan', 'np.isnan', (['no_data'], {}), '(no_data)\n', (2617, 2626), True, 'import numpy as np\n'), ((2996, 3010), 'numpy.abs', 'np.abs', (['res[0]'], {}), '(res[0])\n', (3002, 3010), True, 'import numpy as np\n'), ((3439, 3456), 'numpy.flipud', 'np.flipud', (['values'], {}), '(values)\n', (3448, 3456), True, 'import numpy as np\n'), ((3480, 3495), 'numpy.flipud', 'np.flipud', (['lats'], {}), '(lats)\n', (3489, 3495), True, 'import numpy as np\n'), ((3791, 3803), 'numpy.min', 'np.min', (['lons'], {}), '(lons)\n', (3797, 3803), True, 'import numpy as np\n'), ((3854, 3866), 'numpy.max', 'np.max', (['lons'], {}), '(lons)\n', (3860, 3866), True, 'import numpy as np\n'), ((3917, 3929), 'numpy.min', 'np.min', (['lats'], {}), '(lats)\n', (3923, 3929), True, 'import numpy as np\n'), ((3980, 3992), 'numpy.max', 'np.max', (['lats'], {}), '(lats)\n', (3986, 3992), True, 'import numpy as np\n'), ((2976, 2994), 'numpy.abs', 'np.abs', (['(res[0] / 2)'], {}), '(res[0] / 2)\n', (2982, 2994), True, 'import numpy as np\n'), ((3103, 3117), 'numpy.abs', 'np.abs', (['res[1]'], {}), '(res[1])\n', (3109, 3117), True, 'import numpy as np\n'), ((3083, 3101), 'numpy.abs', 'np.abs', (['(res[0] / 2)'], {}), '(res[0] / 2)\n', (3089, 3101), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torchvision
# There is no pretrained model in torch.hub for mnasnet0_75, mnasnet1_3, shufflenetv2_x1.5, and shufflenetv2_x2.0
SUPPORTED_MODELS = {
'group1': [
'alexnet',
'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn',
'squeezenet1_0', 'squeezenet1_1',
'densenet121', 'densenet169', 'densenet161', 'densenet201',
'mobilenet_v2',
],
'group2': [
'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3',
],
'group3': [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2',
],
'group4': [
'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0',
],
'group5': ['inception_v3'],
'group6': ['googlenet'],
}
class Extractor(nn.Module):
def __init__(self, model_name, pretrained=True):
super().__init__()
self.model_name = model_name
self.pretrained = pretrained
self.supported_models = SUPPORTED_MODELS
if not self.model_name in self.get_supported_models():
raise ValueError(f'{self.model_name} is unsupported extractor')
self.__init_model()
def __init_model(self):
if self.model_name in ['inception_v3', 'googlenet']:
self.model = getattr(torchvision.models, self.model_name)(pretrained=self.pretrained, init_weights=True)
else:
self.model = getattr(torchvision.models, self.model_name)(pretrained=self.pretrained)
def __resnet_forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
return x
def __shufflenet_forward(self, x):
x = self.model.conv1(x)
x = self.model.maxpool(x)
x = self.model.stage2(x)
x = self.model.stage3(x)
x = self.model.stage4(x)
x = self.model.conv5(x)
return x
def __inception_forward(self, x):
x = self.model._transform_input(x)
x = self.model.Conv2d_1a_3x3(x)
x = self.model.Conv2d_2a_3x3(x)
x = self.model.Conv2d_2b_3x3(x)
x = self.model.maxpool1(x)
x = self.model.Conv2d_3b_1x1(x)
x = self.model.Conv2d_4a_3x3(x)
x = self.model.maxpool2(x)
x = self.model.Mixed_5b(x)
x = self.model.Mixed_5c(x)
x = self.model.Mixed_5d(x)
x = self.model.Mixed_6a(x)
x = self.model.Mixed_6b(x)
x = self.model.Mixed_6c(x)
x = self.model.Mixed_6d(x)
x = self.model.Mixed_6e(x)
x = self.model.Mixed_7a(x)
x = self.model.Mixed_7b(x)
x = self.model.Mixed_7c(x)
return x
def __googlenet_forward(self, x):
x = self.model._transform_input(x)
x = self.model.conv1(x)
x = self.model.maxpool1(x)
x = self.model.conv2(x)
x = self.model.conv3(x)
x = self.model.maxpool2(x)
x = self.model.inception3a(x)
x = self.model.inception3b(x)
x = self.model.maxpool3(x)
x = self.model.inception4a(x)
x = self.model.inception4b(x)
x = self.model.inception4c(x)
x = self.model.inception4d(x)
x = self.model.inception4e(x)
x = self.model.maxpool4(x)
x = self.model.inception5a(x)
x = self.model.inception5b(x)
return x
def forward(self, x):
if x.shape[1] == 1:
x = torch.cat([x, x, x], axis=1)
if self.model_name in self.supported_models['group1']:
return self.model.features(x)
elif self.model_name in self.supported_models['group2']:
return self.model.layers(x)
elif self.model_name in self.supported_models['group3']:
return self.__resnet_forward(x)
elif self.model_name in self.supported_models['group4']:
return self.__shufflenet_forward(x)
elif self.model_name in self.supported_models['group5']:
return self.__inception_forward(x)
elif self.model_name in self.supported_models['group6']:
return self.__googlenet_forward(x)
def get_supported_models(self):
model_list = []
for v in self.supported_models.values():
model_list += v
return model_list
if __name__ == '__main__':
model_list = []
for v in SUPPORTED_MODELS.values():
model_list += v
input_tensor = torch.rand(1, 1, 80, 80)
for model_name in model_list:
try:
extractor = Extractor(model_name, pretrained=True)
feats = extractor(input_tensor)
print(f'{model_name.ljust(20)}{feats.shape}')
except Exception as e:
print(e)
| [
"torch.cat",
"torch.rand"
] | [((4684, 4708), 'torch.rand', 'torch.rand', (['(1)', '(1)', '(80)', '(80)'], {}), '(1, 1, 80, 80)\n', (4694, 4708), False, 'import torch\n'), ((3703, 3731), 'torch.cat', 'torch.cat', (['[x, x, x]'], {'axis': '(1)'}), '([x, x, x], axis=1)\n', (3712, 3731), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, print_function, unicode_literals
from pymta.test_util import SMTPTestCase
from pythonic_testcase import *
from schwarz.mailqueue import SMTPMailer
from schwarz.mailqueue.compat import IS_PYTHON3
class SMTPMailerFullstackTest(SMTPTestCase):
def test_can_send_message(self):
mailer = SMTPMailer(self.hostname, port=self.listen_port)
fromaddr = '<EMAIL>'
message = b'Header: value\n\nbody\n'
toaddrs = ('<EMAIL>', '<EMAIL>',)
msg_was_sent = mailer.send(fromaddr, toaddrs, message)
assert_true(msg_was_sent)
received_queue = self.get_received_messages()
assert_equals(1, received_queue.qsize())
received_message = received_queue.get(block=False)
assert_equals(fromaddr, received_message.smtp_from)
assert_equals(toaddrs, tuple(received_message.smtp_to))
assert_none(received_message.username)
# pymta converts this to a string automatically
expected_message = message.decode('ASCII')
# in Python 2 the received message lacks the final '\n' (unknown reason)
if not IS_PYTHON3:
expected_message = expected_message.rstrip('\n')
assert_equals(expected_message, received_message.msg_data)
| [
"schwarz.mailqueue.SMTPMailer"
] | [((394, 442), 'schwarz.mailqueue.SMTPMailer', 'SMTPMailer', (['self.hostname'], {'port': 'self.listen_port'}), '(self.hostname, port=self.listen_port)\n', (404, 442), False, 'from schwarz.mailqueue import SMTPMailer\n')] |
import os
import yaml
def main():
filename = 'scripts/portfolio.yml'
new_posts = yaml.load(open(filename))
for key in new_posts.keys():
target_filename = key.replace(':description','')
source_path = os.path.join('_posts', target_filename)
target_path = os.path.join('_new_posts', target_filename)
with open(target_path, 'w') as out:
for line in open(source_path).readlines():
if line.startswith('description'):
out.write('description: "%s"\n'%new_posts[key])
else:
out.write(line)
if __name__ == "__main__":
main()
| [
"os.path.join"
] | [((228, 267), 'os.path.join', 'os.path.join', (['"""_posts"""', 'target_filename'], {}), "('_posts', target_filename)\n", (240, 267), False, 'import os\n'), ((290, 333), 'os.path.join', 'os.path.join', (['"""_new_posts"""', 'target_filename'], {}), "('_new_posts', target_filename)\n", (302, 333), False, 'import os\n')] |
tags = set([
'Connected TV', 'Second Screen', 'Multimedia data analysis', 'Speech to Text', 'Social Networks', 'Smart City Services', 'Geo-localization', 'Geographical Information System', 'Point of Interest', 'Recommendation System', 'Open Data', 'App Development Tool', 'Mobile Services', 'User Interface Generator', 'Document Processing', 'Information Mashup', 'Augmented Reality', 'Gaming', 'Pervasive Gaming', 'User interface', '3D User Interface', 'Animated Characters', 'Game development tool', 'Text to Speech',
'Geospatial', '3D graphics', '3D', 'Text and Speech', 'POI', 'Smart City', 'Recommendation', 'Social',
'Mobile', 'Android', 'iOS', 'WebUI', 'Unity', 'XML3D',
'Open software', 'SaaS service', 'Client side', 'Server side'
])
import logging
import urllib.request
def get_url_status(url):
if url is None:
return False
try:
r = urllib.request.urlopen(url)
return r.status == 200
except urllib.error.HTTPError as e:
logging.info('Error: %s' % e)
except Exception as e:
logging.warning('Error: %s' % e)
return False
def check_remote_resource(url, msg = None):
logging.info('Checking remote resource at %s' % url)
if get_url_status(url):
return True
if msg is not None:
logging.warning(msg)
return False
| [
"logging.warning",
"logging.info"
] | [((1105, 1157), 'logging.info', 'logging.info', (["('Checking remote resource at %s' % url)"], {}), "('Checking remote resource at %s' % url)\n", (1117, 1157), False, 'import logging\n'), ((1220, 1240), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (1235, 1240), False, 'import logging\n'), ((956, 985), 'logging.info', 'logging.info', (["('Error: %s' % e)"], {}), "('Error: %s' % e)\n", (968, 985), False, 'import logging\n'), ((1012, 1044), 'logging.warning', 'logging.warning', (["('Error: %s' % e)"], {}), "('Error: %s' % e)\n", (1027, 1044), False, 'import logging\n')] |
import alpaca_trade_api as tradeapi
import os
import config
import sys
class TradeSession:
def __init__(self):
self.api = tradeapi.REST(config.APCA_API_KEY_ID, config.APCA_API_SECRET_KEY,
base_url=config.APCA_API_BASE_URL,api_version='v2')
# Extract apca_api_key and secret key from databse per user
# going to look like this
# parser = argparse.ArgumentParser()
# parser.add_argument('--key-id', help='APCA_API_KEY_ID')
# parser.add_argument('--secret-key', help='APCA_API_SECRET_KEY')
# parser.add_argument('--base-url')
# args = parser.parse_args()
# using mysql
# Account Connectivity Test
def connect_api(self):
account = self.api.get_account()
print(account)
return account
# once user inters api key have them test it
# connect this to front end with descrption "test connect" after user inputs api key and secret key
# Checking for stock testing
def look_up_stock(self):
userInput = input("Enter Stock Name Example Apple(AAPL): ")
aapl = self.api.get_barset(userInput, 'day')
print(aapl.df)
return aapl.df
# have this communicate to front end and let user input what they want to look up
#ACCOUNT
def show_buying_power(self):
account = self.api.get_account()
# get api account from databse
# Check if our account is restricted from trading.
if account.trading_blocked:
print('Account is currently restricted from trading.')
# Check how much money we can use to open new positions.
print('${} is available as buying power.'.format(account.buying_power))
return account.buying_power
def show_gain_loss(self):
account = self.api.get_account()
# get key from databse
# Check our current balance vs. our balance at the last market close
balance_change = float(account.equity) - float(account.last_equity)
print(f'Today\'s portfolio balance change: ${balance_change}')
return balance_change
def list_all_assets(self):
# Get a list of all active assets.
active_assets =self. api.list_assets(status='active')
#Filter the assets down to just those on NASDAQ.
nasdaq_assets = [a for a in active_assets if a.exchange == 'NASDAQ']
print(nasdaq_assets)
# check if stock market is open
# Was getting a error so made its own function for market is open
def market_is_open(self):
api = tradeapi.REST()
# Check if the market is open now.
clock = api.get_clock()
print('The market is {}'.format('open.' if clock.is_open else 'closed.'))
# Check when the market was open on Dec. 1, 2018
date = '2018-12-01'
calendar = api.get_calendar(start=date, end=date)[0]
print('The market opened at {} and closed at {} on {}.'.format(
calendar.open,
calendar.close,
date
))
def is_tradable(self,asset):
my_asset = self.api.get_asset(asset)
if my_asset.tradable:
return True
return False
# CLI that selects user bot options
# It is used to call from the test
# It is connected to the Alpaca API as well
def cli():
print("--------------------------------------------------------------------------")
print(" Welcome to AlgoBot Project ")
print("--------------------------------------------------------------------------")
print(" Please select number opton you would like to do ")
print(" ")
print("1. Account Information")
print("2. Buying Power")
print("3. List Assets")
print("4. Show Gains and Losses")
print("5. Look Up Stock Price")
print("6. Exit AlgoBot Project")
def menu():
cli()
''' Main menu to choose an item '''
chosen_element = 0
chosen_element = input("Enter a selection from 1 to 6: ")
if int(chosen_element) == 1:
print('Account Information')
x.connect_api()
menu()
# Call Account information Method
elif int(chosen_element) == 2:
# Call Stock Price Look up methond
print('Your buying power is: ')
x.show_buying_power()
menu()
elif int(chosen_element) == 3:
# call List of Assets Method
print('List of Assets')
x.list_all_assets()
menu()
elif int(chosen_element) == 4:
# Call Gains and Losses
print('Your Gains and Losses\n')
print("Gain/Loss: ",x.show_gain_loss())
menu() # keeps menu tab open to make next selection and not close
elif int(chosen_element) == 5:
# Look up stock price
# this has user input so the user will have to input stock they would like to look up
# example Tesla = TSLA, Apple = AAPL etc
print('Look Up Stock Price')
x.look_up_stock()
menu()
# exits the menu when 6 is selected.
elif int(chosen_element) == 6:
print('Goodbye!')
sys.exit()
else:
print('Sorry, the value entered must be a number from 1 to 5, then try again!')
if __name__ == '__main__':
x = TradeSession()
menu()
cli()
# for testing purposes
#x.show_buying_power()
#print("Current buying power: ",x.show_buying_power())
#print("Gain/Loss: ",x.show_gain_loss())
#x.list_assets()
#x.list_all_assets(api)
#print(x.is_tradable(api,"AAPL"))
# testing aws pipline | [
"alpaca_trade_api.REST",
"sys.exit"
] | [((144, 267), 'alpaca_trade_api.REST', 'tradeapi.REST', (['config.APCA_API_KEY_ID', 'config.APCA_API_SECRET_KEY'], {'base_url': 'config.APCA_API_BASE_URL', 'api_version': '"""v2"""'}), "(config.APCA_API_KEY_ID, config.APCA_API_SECRET_KEY, base_url=\n config.APCA_API_BASE_URL, api_version='v2')\n", (157, 267), True, 'import alpaca_trade_api as tradeapi\n'), ((2675, 2690), 'alpaca_trade_api.REST', 'tradeapi.REST', ([], {}), '()\n', (2688, 2690), True, 'import alpaca_trade_api as tradeapi\n'), ((5401, 5411), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5409, 5411), False, 'import sys\n')] |
import numpy as np
import cv2
def ransac_align_points(
pA, pB, threshold, diagonal_constraint=0.75, default=np.eye(4)[:3],
):
"""
"""
# sensible requirement of 51 or more spots to compute ransac affine
if len(pA) <= 50 or len(pB) <= 50:
if default is not None:
print("Insufficient spot matches for ransac, returning default identity")
return default
else:
raise ValueError("Insufficient spot matches for ransac, need more than 50")
# compute the affine
r, Aff, inline = cv2.estimateAffine3D(pA, pB, ransacThreshold=threshold, confidence=0.999)
# rarely ransac just doesn't work (depends on data and parameters)
# sensible choices for hard constraints on the affine matrix
if np.any( np.diag(Aff) < diagonal_constraint ):
if default is not None:
print("Degenerate affine produced, returning default identity")
return default
else:
raise ValueError("Degenerate affine produced, ransac failed")
return Aff
| [
"numpy.eye",
"cv2.estimateAffine3D",
"numpy.diag"
] | [((554, 627), 'cv2.estimateAffine3D', 'cv2.estimateAffine3D', (['pA', 'pB'], {'ransacThreshold': 'threshold', 'confidence': '(0.999)'}), '(pA, pB, ransacThreshold=threshold, confidence=0.999)\n', (574, 627), False, 'import cv2\n'), ((114, 123), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (120, 123), True, 'import numpy as np\n'), ((780, 792), 'numpy.diag', 'np.diag', (['Aff'], {}), '(Aff)\n', (787, 792), True, 'import numpy as np\n')] |
# Credit - https://github.com/balajisrinivas/Detect-Face-and-Blur-OpenCV
# For blurface
from asyncio import TimeoutError, sleep
from calendar import timegm
from datetime import datetime, timedelta
from json import JSONDecodeError, dumps
from platform import system
from random import choice
from statistics import mean, median, mode, pstdev, stdev
from string import punctuation
from subprocess import PIPE, Popen, STDOUT
from time import gmtime, mktime, time
from urllib import parse
from asyncpraw import Reddit
from cv2 import GaussianBlur, dnn, imread, imwrite
from deep_translator import GoogleTranslator, constants
from discord import Embed, File, channel
from discord.ext import commands, tasks
from lyricsgenius import Genius
from mendeleev import element
from numpy import array, max, min, sqrt, squeeze, sum
from plotly import graph_objects as go
from PyPDF2 import PdfFileReader
from qrcode import QRCode
import config
from src.utils import funcs
from src.utils.base_cog import BaseCog
from src.utils.page_buttons import PageButtons
HCF_LIMIT = 1000000
class Utility(BaseCog, name="Utility", description="Some useful commands for getting data or calculating things."):
def __init__(self, botInstance, *args, **kwargs):
super().__init__(botInstance, *args, **kwargs)
self.reminderIDsToDelete = set()
self.remindersToAdd = []
self.client.loop.create_task(self.__generateFiles())
async def __generateFiles(self):
await funcs.generateJson("reminders", {"list": []})
self.reminderLoop.start()
def blurFace(self, filename: str):
imgName = f"{time()}.png"
prototxtPath = funcs.PATH + funcs.getResource(self.name, "deploy.prototxt")
modelPath = funcs.PATH + funcs.getResource(self.name, "model.caffemodel")
model = dnn.readNetFromCaffe(prototxtPath, modelPath)
image = imread(filename)
h, w = image.shape[:2]
kernelW = (w // 7) | 1
kernelH = (h // 7) | 1
blob = dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))
model.setInput(blob)
output = squeeze(model.forward())
for i in range(0, output.shape[0]):
confidence = output[i, 2]
if confidence > 0.4:
box = output[i, 3:7] * array([w, h, w, h])
startX, startY, endX, endY = box.astype(int)
face = image[startY:endY, startX:endX]
face = GaussianBlur(face, (kernelW, kernelH), 0)
image[startY:endY, startX:endX] = face
imwrite(f"{funcs.PATH}/temp/{imgName}", image)
return imgName
@tasks.loop(seconds=2.0)
async def reminderLoop(self):
update = False
reminders = await funcs.readJson("data/reminders.json")
for reminder in reminders["list"]:
rtime = reminder["data"]["time"]
if rtime <= int(time()) and await funcs.userIDNotBlacklisted(reminder["data"]["userID"]):
try:
user = self.client.get_user(reminder["data"]["userID"])
e = Embed(title="⚠️ Reminder", description=reminder["data"]["reminder"])
e.set_footer(text=f"Remind time: {str(datetime.utcfromtimestamp(rtime)).split('.')[0]} UTC")
await user.send(embed=e)
except:
pass
self.reminderIDsToDelete.add(reminder["ID"])
if reminder["ID"] in self.reminderIDsToDelete:
self.reminderIDsToDelete.remove(reminder["ID"])
reminders["list"].remove(reminder)
update = True
for reminder in self.remindersToAdd:
reminders["list"].append(reminder)
self.remindersToAdd.remove(reminder)
update = True
if update:
await funcs.dumpJson("data/reminders.json", reminders)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="reminderdel", description="Removes a reminder.", usage="<reminder ID>",
aliases=["reminderdelete", "reminderemove", "removereminder", "deletereminder", "delreminder", "delremind"])
async def reminderdel(self, ctx, reminderID=None):
if not reminderID:
return await ctx.send(
embed=funcs.errorEmbed(
None,
f"You must specify a reminder ID! See `{self.client.command_prefix}reminders` for a list of your reminders."
)
)
reminders = await funcs.readJson("data/reminders.json")
toremove = None
for reminder in reminders["list"]:
if reminder["ID"] == reminderID.casefold() and reminder["data"]["userID"] == ctx.author.id:
toremove = reminder["ID"]
break
if toremove:
self.reminderIDsToDelete.add(toremove)
await ctx.reply(f"Removed reminder with ID: `{toremove}`")
else:
await ctx.reply(
embed=funcs.errorEmbed(
None,
f"Unknown reminder ID. See `{self.client.command_prefix}reminders` for a list of your reminders."
)
)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="reminder", description="Creates a reminder or shows a list of your reminders.",
aliases=["remind", "remindme", "reminders"],
usage="[Xm/h/d (replace X with number of minutes/hours/days)] <message>")
async def reminder(self, ctx, minutes=None, *, r=None):
if minutes and not r:
return await ctx.send(embed=funcs.errorEmbed(None, "Please leave a message!"))
reminders = await funcs.readJson("data/reminders.json")
now = int(time())
if not minutes and not r:
yourreminders = []
for reminder in reminders["list"]:
rtime = reminder["data"]["time"]
if reminder["data"]["userID"] == ctx.author.id and rtime > now:
e = Embed(title="Your Reminders", description=reminder["data"]["reminder"])
e.add_field(name="ID", value=f"`{reminder['ID']}`")
e.add_field(name="Remind Date (UTC)",
value=f'`{str(datetime.utcfromtimestamp(rtime)).split(".")[0]}`')
e.add_field(name="Will Remind In", value=f'`{funcs.timeDifferenceStr(rtime, now)}`')
yourreminders.append(e)
if not yourreminders:
yourreminders.append(Embed(title="Your Reminders", description="None"))
else:
for i, e in enumerate(yourreminders):
e.set_footer(text="Page {:,} of {:,}".format(i + 1, len(yourreminders)))
m = await ctx.reply(embed=yourreminders[0])
if len(yourreminders) > 1:
await m.edit(view=PageButtons(ctx, self.client, m, yourreminders))
else:
try:
minutes = float(minutes)
except:
try:
if minutes.casefold().endswith("h"):
minutes = float(minutes[:-1]) * 60
elif minutes.casefold().endswith("d"):
minutes = float(minutes[:-1]) * 1440
elif minutes.casefold().endswith("m"):
minutes = float(minutes[:-1])
else:
raise Exception
except:
return await ctx.reply(embed=funcs.errorEmbed(None, f"Invalid input: `{minutes}`"))
if minutes > 100000000 or len(r) > 500:
return await ctx.reply(embed=funcs.errorEmbed(None, "That value is too big or your input is too long."))
reminder = {
"ID": funcs.randomHex(16),
"data": {
"userID": ctx.author.id,
"time": int(minutes * 60 + now),
"reminder": r
}
}
self.remindersToAdd.append(reminder)
await ctx.reply("Added reminder: {}\n\nID: `{}`\n\nI will remind you in {} ({}). Be sure to have DMs on!".format(
reminder["data"]["reminder"],
reminder["ID"],
funcs.timeDifferenceStr(reminder["data"]["time"], now),
str(datetime.utcfromtimestamp(reminder["data"]["time"])).split(".")[0]
))
async def gatherLabelsAndValues(self, ctx):
labels, values = [], []
while len(labels) < 25:
try:
await ctx.send(
f"Enter name for label **{len(labels) + 1}**, `!undo` to delete previous entry," +
" `!done` to move on to values, or `!cancel` to cancel."
)
entry = await self.client.wait_for(
"message",
check=lambda m: m.author == ctx.author and m.channel == ctx.channel and len(m.content) <= 100,
timeout=60
)
except TimeoutError:
break
content = entry.content
if content.casefold() == "!undo":
try:
labels.pop(-1)
except:
await ctx.send(embed=funcs.errorEmbed(None, "No entries."))
elif content.casefold() == "!done":
break
elif content.casefold() == "!cancel":
return 0, 0
else:
labels.append(content)
if len(labels) < 2:
raise Exception("Not enough labels.")
while len(values) != len(labels):
try:
await ctx.send(
f'Enter value (NOT percentage) for label **{labels[len(values)]}**, ' +
'`!undo` to delete previous entry, or `!cancel` to cancel.'
)
entry = await self.client.wait_for(
"message",
check=lambda m: m.author == ctx.author and m.channel == ctx.channel and len(m.content) <= 100,
timeout=60
)
except TimeoutError:
raise Exception("Not enough values.")
content = entry.content
if content.casefold() == "!undo":
try:
values.pop(-1)
except:
await ctx.send(embed=funcs.errorEmbed(None, "No entries."))
elif content.casefold() == "!cancel":
return 0, 0
else:
try:
values.append(float(content))
except:
await ctx.send(embed=funcs.errorEmbed(None, "Invalid value."))
return labels, values
async def gatherXtitleAndYtitle(self, ctx):
xtitle, ytitle = "", ""
try:
await ctx.send('Enter your desired x-axis title, or `!na` if you wish to leave it blank.')
entry = await self.client.wait_for(
"message",
check=lambda m: m.author == ctx.author and m.channel == ctx.channel and len(m.content) <= 100,
timeout=60
)
if entry.content.casefold() != "!na":
xtitle = entry.content
except TimeoutError:
pass
try:
await ctx.send('Enter your desired y-axis title, or `!na` if you wish to leave it blank.')
entry = await self.client.wait_for(
"message",
check=lambda m: m.author == ctx.author and m.channel == ctx.channel and len(m.content) <= 100,
timeout=60
)
if entry.content.casefold() != "!na":
ytitle = entry.content
except TimeoutError:
pass
return xtitle, ytitle
@staticmethod
async def makeChartEmbed(ctx, fig, labels, values, imgName, title):
e = Embed(title=title, description=f"Requested by: {ctx.author.mention}")
for i, c in enumerate(labels):
e.add_field(name=c, value=f"`{funcs.removeDotZero(values[i])}`")
await funcs.funcToCoro(fig.write_image, f"{funcs.PATH}/temp/{imgName}")
image = File(f"{funcs.PATH}/temp/{imgName}")
e.set_image(url=f"attachment://{imgName}")
return e, image
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="piechart", description="Generates a pie chart.", aliases=["pie", "piegraph"], usage="[title]")
async def piechart(self, ctx, *, title: str=""):
if len(title) > 100:
return await ctx.reply(embed=funcs.errorEmbed(None, "Title must be 100 characters or less."))
imgName = f"{time()}.png"
image = None
try:
labels, values = await self.gatherLabelsAndValues(ctx)
if labels == 0 and values == 0:
return await ctx.send("Cancelled chart generation.")
except Exception as ex:
return await ctx.send(embed=funcs.errorEmbed(None, str(ex)))
try:
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_layout(title=title)
e, image = await self.makeChartEmbed(ctx, fig, labels, values, imgName, title if title else "Pie Chart")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "An error occurred, please try again later.")
await ctx.reply(embed=e, file=image)
await funcs.deleteTempFile(imgName)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="linechart", description="Generates a line chart.", aliases=["line", "linegraph"], usage="[title]")
async def linechart(self, ctx, *, title: str=""):
if len(title) > 100:
return await ctx.reply(embed=funcs.errorEmbed(None, "Title must be 100 characters or less."))
imgName = f"{time()}.png"
image = None
try:
labels, values = await self.gatherLabelsAndValues(ctx)
if labels == 0 and values == 0:
return await ctx.send("Cancelled chart generation.")
except Exception as ex:
return await ctx.send(embed=funcs.errorEmbed(None, str(ex)))
try:
fig = go.Figure(data=[go.Scatter(x=labels, y=values)])
xtitle, ytitle = await self.gatherXtitleAndYtitle(ctx)
fig.update_layout(title=title, xaxis_title=xtitle, yaxis_title=ytitle)
e, image = await self.makeChartEmbed(ctx, fig, labels, values, imgName, title if title else "Line Chart")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "An error occurred, please try again later.")
await ctx.reply(embed=e, file=image)
await funcs.deleteTempFile(imgName)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="barchart", description="Generates a bar chart.", aliases=["bar", "bargraph"], usage="[title]")
async def barchart(self, ctx, *, title: str=""):
if len(title) > 100:
return await ctx.reply(embed=funcs.errorEmbed(None, "Title must be 100 characters or less."))
imgName = f"{time()}.png"
image = None
try:
labels, values = await self.gatherLabelsAndValues(ctx)
if labels == 0 and values == 0:
return await ctx.send("Cancelled chart generation.")
except Exception as ex:
return await ctx.send(embed=funcs.errorEmbed(None, str(ex)))
try:
fig = go.Figure(data=[go.Bar(x=labels, y=values)])
xtitle, ytitle = await self.gatherXtitleAndYtitle(ctx)
fig.update_layout(title=title, xaxis_title=xtitle, yaxis_title=ytitle)
e, image = await self.makeChartEmbed(ctx, fig, labels, values, imgName, title if title else "Bar Chart")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "An error occurred, please try again later.")
await ctx.reply(embed=e, file=image)
await funcs.deleteTempFile(imgName)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="github", description="Returns statistics about a GitHub repository.", usage='[username/repository]',
aliases=["loc", "code", "linesofcode", "repository", "repo", "git", "source", "sourcecode"])
async def repository(self, ctx, *, repo: str=""):
await ctx.send("Getting repository statistics. Please wait...")
try:
repo = repo.casefold().replace(" ", "") or config.githubRepo
while repo.endswith("/"):
repo = repo[:-1]
repo = repo.split("github.com/")[1] if "github.com/" in repo else repo
res = await funcs.getRequest("https://api.codetabs.com/v1/loc/?github=" + repo)
e = Embed(description=f"https://github.com/{repo}")
e.set_author(name=repo,
icon_url="https://media.discordapp.net/attachments/771698457391136798/927918869702647808/github.png")
for i in sorted(res.json(), reverse=True, key=lambda x: x["linesOfCode"])[:25]:
e.add_field(name=f"{i['language']} Lines (Files)", value="`{:,} ({:,})`".format(i["linesOfCode"], i["files"]))
e.set_footer(text="Note: Lines of code do not include comment or blank lines.")
e.set_image(url=funcs.githubRepoPic(repo))
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Unknown repository or server error.")
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="covid", description="Gets COVID-19 data.",
aliases=["coronavirus", "corona", "covid19", "cv", "c19", "cv19"], usage="[location]")
async def covid(self, ctx, *, searchtype: str=""):
headers = {
"x-rapidapi-host": "corona-virus-world-and-india-data.p.rapidapi.com",
"x-rapidapi-key": config.rapidApiKey
}
try:
res = await funcs.getRequest("https://corona-virus-world-and-india-data.p.rapidapi.com/api", headers=headers)
data = res.json()
total = data["countries_stat"]
found = False
if searchtype == "":
total = data["world_total"]
else:
if searchtype.casefold() == "us" or searchtype.casefold().startswith(("united states", "america")):
searchtype = "usa"
elif searchtype.casefold().startswith(("united kingdom", "great britain", "britain", "england")) \
or searchtype.casefold() == "gb":
searchtype = "uk"
elif searchtype.casefold().startswith("hk"):
searchtype = "hong kong"
if searchtype.casefold().startswith(("korea", "south korea", "sk")):
searchtype = "S. Korea"
for i in total:
if i["country_name"].casefold().replace(".", "") == searchtype.casefold().replace(".", ""):
found = True
total = i
break
if not found:
total = data["world_total"]
e = Embed(description="Statistics taken at: `" + data["statistic_taken_at"] + " UTC`")
e.set_author(name=f"COVID-19 Statistics ({total['country_name'] if found else 'Global'})",
icon_url="https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/" +
"SARS-CoV-2_without_background.png/220px-SARS-CoV-2_without_background.png")
if found:
e.add_field(name="Country", value=f"`{total['country_name']}`")
e.add_field(name="Total Cases", value=f"`{total['cases']}`")
e.add_field(name="Total Deaths", value=f"`{total['deaths']}" +
"\n({}%)`".format(round(int(total['deaths']
.replace(',', '').replace('N/A', '0')) / int(total['cases']
.replace(',', '').replace('N/A', '0')) * 100, 2)))
e.add_field(name="Total Recovered", value=f"`{total['total_recovered']}" +
"\n({}%)`".format(round(int(total['total_recovered']
.replace(',', '').replace('N/A', '0')) / int(total['cases']
.replace(',', '').replace('N/A', '0')) * 100, 2)))
e.add_field(name="Active Cases", value=f"`{total['active_cases']}" +
"\n({}%)`".format(round(int(total['active_cases']
.replace(',', '').replace('N/A', '0')) / int(total['cases']
.replace(',', '').replace('N/A', '0')) * 100, 2)))
e.add_field(name="Critical Cases", value=f"`{total['serious_critical']}" +
"\n({}%)`".format(round(int(total['serious_critical']
.replace(',', '').replace('N/A', '0')) / int(total['active_cases']
.replace(',', '').replace('N/A', '0')) * 100, 2)))
e.add_field(name="Total Tests", value=f"`{total['total_tests']}`")
else:
e.add_field(name="Total Cases", value=f"`{total['total_cases']}`")
e.add_field(name="Total Deaths", value=f"`{total['total_deaths']}" +
"\n({}%)`".format(round(int(total['total_deaths']
.replace(',', '').replace('N/A', '0')) / int(total['total_cases']
.replace(',', '').replace('N/A', '0')) * 100, 2)))
e.add_field(name="Total Recovered", value=f"`{total['total_recovered']}" +
"\n({}%)`".format(round(int(total['total_recovered']
.replace(',', '').replace('N/A', '0')) / int(total['total_cases']
.replace(',', '').replace('N/A', '0')) * 100, 2)))
e.add_field(name="Active Cases", value=f"`{total['active_cases']}" +
"\n({}%)`".format(round(int(total['active_cases']
.replace(',', '').replace('N/A', '0')) / int(total['total_cases']
.replace(',', '').replace('N/A', '0')) * 100, 2)))
e.add_field(name="Critical Cases", value=f"`{total['serious_critical']}`")
e.add_field(name="New Cases Today", value=f"`{total['new_cases']}`")
e.add_field(name="New Deaths Today", value=f"`{total['new_deaths']}`")
e.set_footer(text="Note: The data provided may not be 100% accurate.")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Invalid input or server error.")
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="flightinfo", description="Gets information about a flight.",
aliases=["flight", "flightradar"], usage="<flight number>")
async def flightinfo(self, ctx, *, flightstr: str=""):
if flightstr == "":
e = funcs.errorEmbed(None, "Empty input.")
else:
ph = "Unknown"
flightstr = flightstr.upper().replace(" ", "")
url = "https://api.flightradar24.com/common/v1/flight/list.json?"
params = {"fetchBy": "flight", "page": "1", "limit": "25", "query": flightstr}
try:
res = await funcs.getRequest(url, headers={"User-agent": "*"}, params=params)
allflights = res.json()
fdd = allflights["result"]["response"]["data"]
dago, eta = "", ""
reg, data, arrive, realarrive, depart, realdepart = ph, ph, ph, ph, ph, ph
ft, duration, originname, originicao, originiata, destname, desticao, destiata = ph, ph, ph, ph, ph, ph, ph, ph
flighturl = f"https://www.flightradar24.com/data/flights/{flightstr.casefold()}"
status, callsign, aircraft, flightdate, airline = ph, ph, ph, ph, ph
for data in fdd:
callsign = data["identification"]["callsign"]
if callsign is None:
callsign = "None"
status = str(data["status"]["text"])
aircraft = f"{str(data['aircraft']['model']['text'])} ({str(data['aircraft']['model']['code'])})"
reg = data["aircraft"]["registration"]
airline = data["airline"]["name"]
originname = data["airport"]["origin"]["name"]
originiata = data["airport"]["origin"]["code"]["iata"]
originicao = data["airport"]["origin"]["code"]["icao"]
destname = data["airport"]["destination"]["name"]
if not originname or not destname:
continue
destiata = data["airport"]["destination"]["code"]["iata"]
desticao = data["airport"]["destination"]["code"]["icao"]
realdepart = data["time"]["real"]["departure"]
depart = "Local Departure Time"
realarrive = data["time"]["real"]["arrival"]
arrive = "Local Arrival Time"
if realarrive is None:
realarrive = data["time"]["estimated"]["arrival"]
if realarrive is None:
continue
arrive = "Estimated Local Arrival Time"
duration = str(datetime.fromtimestamp(realarrive) - datetime.utcnow())[:5]
if duration[1:2] == ":":
duration = "0" + (duration[:4])
eta = "Estimated Flight Time Remaining"
else:
duration = str(datetime.fromtimestamp(realarrive) - datetime.fromtimestamp(realdepart))[:5]
if duration[1:2] == ":":
duration = "0" + (duration[:4])
eta = "Total Flight Duration"
if eta.startswith("\nEstimated"):
ft = str(datetime.utcnow() - datetime.fromtimestamp(realdepart))[:5]
if ft[1:2] == ":":
ft = "0" + (ft[:4])
dago = "Current Flight Time"
realdepart = datetime.fromtimestamp(realdepart + data["airport"]["origin"]["timezone"]["offset"])
realarrive = datetime.fromtimestamp(realarrive + data["airport"]["destination"]["timezone"]["offset"])
flightdate = funcs.dateBirthday(realdepart.day, realdepart.month, realdepart.year, noBD=True)
break
imgl = res.json()["result"]["response"]["aircraftImages"]
thumbnail = "https://images.flightradar24.com/opengraph/fr24_logo_twitter.png"
for image in imgl:
if image["registration"] != reg:
continue
thumbnail = list(
image["images"]["thumbnails"]
)[0]["src"][:-4].replace("_tb", "").replace("com/200/", "com/full/")
e = Embed(title=f"Flight {flightstr}", description=flighturl)
e.set_image(url=thumbnail)
e.add_field(name="Date", value=f"`{flightdate}`")
e.add_field(name="Callsign", value=f"`{callsign}`")
e.add_field(name="Status", value=f"`{status}`")
e.add_field(name="Aircraft", value=f"`{aircraft}`")
e.add_field(name="Registration", value=f"`{reg} ({data['aircraft']['country']['name']})`")
e.add_field(name="Airline",
value=f"`{airline} ({data['airline']['code']['iata']}/{data['airline']['code']['icao']})`")
e.add_field(name="Origin", value=f"`{originname} ({originiata}/{originicao})`")
e.add_field(name="Destination", value=f"`{destname} ({destiata}/{desticao})`")
e.add_field(name=depart, value=f"`{str(realdepart)}`")
if dago:
e.add_field(name=dago, value=f"`{ft}`")
e.add_field(name=arrive, value=f"`{str(realarrive)}`")
if eta:
e.add_field(name=eta, value=f"`{duration}`")
e.set_footer(text="Note: Flight data provided by Flightradar24 may not be 100% accurate.",
icon_url="https://i.pinimg.com/564x/8c/90/8f/8c908ff985364bdba5514129d3d4e799.jpg")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Unknown flight or server error.")
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="weather", description="Finds the current weather of a location.",
aliases=["w"], usage="<location>")
async def weather(self, ctx, *, location: str=""):
zero = -funcs.KELVIN
url = f"http://api.openweathermap.org/data/2.5/weather?q={location.casefold().replace(' ', '%20')}" + \
f"&APPID={config.owmKey}"
try:
r = await funcs.getRequest(url)
data = r.json()
country = data["sys"]["country"]
temp = data["main"]["temp"] + zero
lastupdate = str(datetime.fromtimestamp(int(data["dt"]) + (int(data["timezone"]))))
timenow = str(datetime.fromtimestamp(int(time()) + int(data["timezone"])))
temp2 = funcs.celsiusToFahrenheit(temp)
high = data["main"]["temp_max"] + zero
low = data["main"]["temp_min"] + zero
high2 = funcs.celsiusToFahrenheit(high)
low2 = funcs.celsiusToFahrenheit(low)
winddegrees = float(data["wind"]["deg"])
e = Embed(title=f"{data['name']}, {country}", description=f"**{data['weather'][0]['description'].title()}**")
e.add_field(name="Temperature", value="`{}°F / {}°C`".format(round(temp2, 1), round(temp, 1)))
e.add_field(name="Temp Range", value="`{}°F - {}°F\n".format(round(low2, 1), round(high2, 1)) +
"{}°C - {}°C`".format(round(low, 1), round(high, 1)))
e.add_field(name="Humidity", value="`{}%`".format(data["main"]["humidity"]))
e.add_field(name="Wind Speed", value="`{} m/s`".format(data["wind"]["speed"]))
e.add_field(name="Wind Direction",
value="`{}° ({})`".format(int(winddegrees), funcs.degreesToDirection(winddegrees)))
e.add_field(name="Local Time", value=f"`{timenow}`")
e.add_field(name="Last Updated (Local Time)", value=f"`{lastupdate}`")
e.set_footer(text="Note: Weather data provided by OpenWeatherMap may not be 100% accurate.",
icon_url="https://cdn.discordapp.com/attachments/771404776410972161/931460099296358470/unknown.png")
e.set_thumbnail(url=f"http://openweathermap.org/img/wn/{data['weather'][0]['icon']}@2x.png")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Unknown location or server error.")
await ctx.reply(embed=e)
@commands.cooldown(1, 15, commands.BucketType.user)
@commands.command(name="translate", description="Translates text to a different language. " +
"Translation may sometimes fail due to rate limit.",
aliases=["t", "translator", "trans", "tr", "translation"], usage="<language code to translate to> <input>")
async def translate(self, ctx, dest=None, *, text):
try:
dest = dest.casefold()
if dest == "zh-tw":
dest = "zh-TW"
elif dest == "zh-cn":
dest = "zh-CN"
if dest not in constants.GOOGLE_CODES_TO_LANGUAGES.keys():
e = funcs.errorEmbed(
"Invalid language code!",
f"Valid options:\n\n{', '.join(f'`{i}`' for i in sorted(constants.GOOGLE_CODES_TO_LANGUAGES.keys()))}"
)
else:
g = GoogleTranslator(source="auto", target=dest)
output = await funcs.funcToCoro(g.translate, text)
e = Embed(title="Translation", description=funcs.formatting(output))
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "An error occurred. Invalid input?")
await ctx.reply(embed=e)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="currency", description="Converts the price of one currency to another.",
aliases=["fiat", "cc", "convertcurrency", "currencyconvert"], usage="<from currency> <to currency> [amount]")
async def currency(self, ctx, fromC, toC, *, amount: str="1"):
try:
output = [fromC.upper(), toC.upper(), amount]
res = await funcs.getRequest("http://api.exchangeratesapi.io/v1/latest",
params={"access_key": config.exchangeratesapiKey})
data = res.json()
amount = float(output[2].replace(",", "").replace(" ", ""))
initialamount = amount
fromCurrency = output[0]
toCurrency = output[1]
coingecko = "https://api.coingecko.com/api/v3/coins/markets"
if fromCurrency != "EUR":
try:
amount /= data["rates"][fromCurrency]
except:
res = await funcs.getRequest(
coingecko, params={"ids": self.client.tickers[fromCurrency.casefold()], "vs_currency": "EUR"}
)
cgData = res.json()
amount *= cgData[0]["current_price"]
if toCurrency != "EUR":
try:
amount *= data["rates"][toCurrency]
except:
res = await funcs.getRequest(
coingecko, params={"ids": self.client.tickers[toCurrency.casefold()], "vs_currency": "EUR"}
)
cgData = res.json()
if fromCurrency.upper() == toCurrency.upper():
amount = float(initialamount)
else:
amount /= cgData[0]["current_price"]
await ctx.reply(
f"The current price of **{funcs.removeDotZero(initialamount)} {fromCurrency}** in **{toCurrency}**: " +
f"`{funcs.removeDotZero(amount)}`"
)
except Exception as ex:
funcs.printError(ctx, ex)
await ctx.reply(embed=funcs.errorEmbed(None, "Invalid input or unknown currency."))
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="wiki", description="Returns a Wikipedia article.",
aliases=["wikipedia"], usage="<article title (case-sensitive)>")
async def wiki(self, ctx, *, page: str=""):
if page == "":
e = funcs.errorEmbed(None, "Cannot process empty input.")
else:
wikiurl = "https://en.wikipedia.org/w/api.php?format=json&action=query" + \
"&prop=extracts&exintro&explaintext&redirects=1&titles="
try:
res = await funcs.getRequest(f"{wikiurl}{page.replace(' ', '_')}")
data = res.json()
wikipage = data["query"]
if list(wikipage["pages"])[0] == "-1":
res = await funcs.getRequest(f"{wikiurl}{page.replace(' ', '_').title()}")
data = res.json()
wikipage = data["query"]
if list(wikipage["pages"])[0] == "-1":
return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid article."))
if wikipage["pages"][list(wikipage["pages"])[0]]["extract"].casefold().startswith(f"{page} may refer to:\n\n"):
try:
splitthing = f"may refer to:\n\n"
page = wikipage["pages"][list(wikipage["pages"])[0]]["extract"].split(
splitthing, 1
)[1].split("\n", 1)[1].split(",", 1)[0]
res = await funcs.getRequest(f"{wikiurl}{page.replace(' ', '_')}")
data = res.json()
wikipage = data["query"]
if wikipage["pages"][list(wikipage["pages"])[0]] == "-1":
return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid article."))
except IndexError:
pass
summary = wikipage["pages"][list(wikipage["pages"])[0]]["extract"]
if len(summary) != len(wikipage["pages"][list(wikipage["pages"])[0]]["extract"][:1000]):
summary = wikipage["pages"][list(wikipage["pages"])[0]]["extract"][:1000] + "..."
e = Embed(description="https://en.wikipedia.org/wiki/" +
f"{wikipage['pages'][list(wikipage['pages'])[0]]['title'].replace(' ', '_')}"
)
e.set_author(name=wikipage["pages"][list(wikipage["pages"])[0]]["title"],
icon_url="https://cdn.discordapp.com/attachments/659771291858894849/" +
"677853982718165001/1122px-Wikipedia-logo-v2.png")
e.add_field(name="Extract", value=f"```{summary}```")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Invalid input or server error.")
await ctx.reply(embed=e)
@commands.cooldown(1, 45, commands.BucketType.user)
@commands.command(name="srctop10", aliases=["top10", "src", "speedruncom", "leaderboard", "lb", "sr"], hidden=True,
description="Shows the top 10 leaderboard for speedrun.com games.", usage="[speedrun.com game abbreviation]")
async def srctop10(self, ctx, *, game: str="mc"):
await ctx.send("Getting speedrun.com data. Please wait...")
try:
gameres = await funcs.getRequest(f"https://www.speedrun.com/api/v1/games/{game.casefold().replace(' ', '')}")
game = gameres.json()["data"]
gameName = game["names"]["international"]
categories = None
for i in game["links"]:
if i["rel"] == "categories":
categories = i["uri"]
break
if not categories:
raise Exception
catres = await funcs.getRequest(categories)
cat = catres.json()["data"]
lb = None
catID, catName, catURL = None, None, None
for i in cat:
catName = i["name"]
catURL = i["weblink"]
for j in i["links"]:
if j["rel"] == "leaderboard":
lb = j["uri"]
break
if lb:
break
if not lb:
raise Exception
output = f"{catURL}\n"
catres = await funcs.getRequest(lb)
runs = catres.json()["data"]["runs"][:10]
count = 0
for i in runs:
run = i["run"]
count += 1
d, h, m, s, ms = funcs.timeDifferenceStr(run["times"]["primary_t"], 0, noStr=True)
names = ""
for p in run["players"]:
try:
names += p["name"]
except:
pres = await funcs.getRequest(p["uri"])
player = pres.json()["data"]
names += player["names"]["international"]
names += ", "
names = names.replace("_", "\_")
output += f"{count}. `{funcs.timeStr(d, h, m, s, ms)}` by [{names[:-2]}]({run['weblink']})\n"
if not count:
output += "No runs found."
top = f"Top {count} - " if count else ""
e = Embed(description=output)
e.set_author(name=f"{top}{gameName} - {catName}",
icon_url="https://cdn.discordapp.com/attachments/771698457391136798/842103813585240124/src.png")
if count:
e.set_footer(text="Please use the link above to view the full leaderboards as well as other categories.")
await ctx.reply(embed=e)
except Exception as ex:
funcs.printError(ctx, ex)
await ctx.reply(embed=funcs.errorEmbed(None, "Server error or unknown game."))
@commands.cooldown(1, 45, commands.BucketType.user)
@commands.command(name="srcqueue", aliases=["queue", "speedrunqueue", "srqueue"], hidden=True,
description="Shows the run queue for speedrun.com games.", usage="[speedrun.com game abbreviation]")
async def srcqueue(self, ctx, *, game: str="mc"):
await ctx.send("Getting speedrun.com data. Please wait...")
try:
gameres = await funcs.getRequest(f"https://www.speedrun.com/api/v1/games/{game.casefold().replace(' ', '')}")
game = gameres.json()["data"]
gameID = game["id"]
gameName = game["names"]["international"]
queue = []
categories = {}
queueres = await funcs.getRequest(
f"https://www.speedrun.com/api/v1/runs?game={gameID}&status=new&embed=players&max=200"
)
queuedata = queueres.json()
for i in queuedata["data"]:
queue.append(i)
cat = i["category"]
if cat not in categories:
catres = await funcs.getRequest(f"https://www.speedrun.com/api/v1/categories/{cat}")
categories[cat] = catres.json()["data"]["name"]
if queuedata["pagination"]["links"]:
while queuedata["pagination"]["links"][-1]["rel"] == "next":
queueres = await funcs.getRequest(queuedata["pagination"]["links"][-1]["uri"])
queuedata = queueres.json()
for i in queuedata["data"]:
queue.append(i)
cat = i["category"]
if cat not in categories:
catres = await funcs.getRequest(f"https://www.speedrun.com/api/v1/categories/{cat}")
categories[cat] = catres.json()["data"]["name"]
if queue:
output = ""
outputlist = []
pagecount, count, run = 0, 0, 0
total = len(queue) / 15
for i in queue:
run += 1
d, h, m, s, ms = funcs.timeDifferenceStr(i["times"]["primary_t"], 0, noStr=True)
names = ""
for player in i["players"]["data"]:
try:
names += player["names"]["international"]
except:
names += player["name"]
names += ", "
names = names.replace("_", "\_")
output += f"{'{:,}'.format(run)}. [{categories[i['category']]}]({i['weblink']}) " + \
f"in `{funcs.timeStr(d, h, m, s, ms)}` by {names[:-2]}\n"
count += 1
if count == 15 or run == len(queue):
pagecount += 1
e = Embed(description=output)
e.set_author(
name=f"Unverified Runs ({'{:,}'.format(len(queue))}) - {gameName}",
icon_url="https://cdn.discordapp.com/attachments/771698457391136798/842103813585240124/src.png"
)
e.set_footer(text="Page {:,} of {:,}".format(pagecount, funcs.strictRounding(total)))
outputlist.append(e)
output = ""
count = 0
m = await ctx.reply(embed=outputlist[0])
await m.edit(view=PageButtons(ctx, self.client, m, outputlist))
else:
e = Embed(description="No runs found.")
e.set_author(name=f"Unverified Runs ({'{:,}'.format(len(queue))}) - {gameName}",
icon_url="https://cdn.discordapp.com/attachments/771698457391136798/842103813585240124/src.png")
await ctx.reply(embed=e)
except Exception as ex:
funcs.printError(ctx, ex)
await ctx.reply(embed=funcs.errorEmbed(None, "Server error or unknown game."))
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="urban", description="Looks up a term on Urban Dictionary.",
aliases=["ud", "urbandictionary"], usage="<term>")
async def urban(self, ctx, *, term=""):
if term == "":
return await ctx.reply(embed=funcs.errorEmbed(None, "Empty input."))
else:
try:
res = await funcs.getRequest("http://api.urbandictionary.com/v0/define", params={"term": term})
data = res.json()
terms = data["list"]
if not terms:
return await ctx.reply(embed=funcs.errorEmbed(None, "Unknown term."))
embeds = []
pagecount = 0
for i, c in enumerate(terms):
pagecount += 1
example = c["example"].replace("[", "").replace("]", "")
definition = c["definition"].replace("[", "").replace("]", "")
permalink = c["permalink"]
word = c["word"]
author = c["author"]
writtenon = funcs.timeStrToDatetime(c["written_on"])
e = Embed(description=permalink)
e.set_author(name=f'"{word}"', icon_url="https://cdn.discordapp.com/attachments/659771291858894849/" +
"669142387330777115/urban-dictionary-android.png")
e.add_field(name="Definition", value=funcs.formatting(definition, limit=1000))
if example:
e.add_field(name="Example", value=funcs.formatting(example, limit=1000))
if author:
e.add_field(name="Author", value=f"`{author}`")
e.add_field(name="Submission Time (UTC)", value=f"`{writtenon}`")
try:
ar = round(c['thumbs_up'] / (c['thumbs_up'] + c['thumbs_down']) * 100, 2)
e.set_footer(
text="Approval rate: {}% ({:,} 👍 - ".format(ar, c['thumbs_up']) +
"{:,} 👎)\n".format(c['thumbs_down']) +
"Page {:,} of {:,}".format(i + 1, len(terms))
)
except ZeroDivisionError:
e.set_footer(text="Approval rate: n/a (0 👍 - 0 👎)\nPage {:,} of {:,}".format(i + 1, len(terms)))
embeds.append(e)
m = await ctx.reply(embed=embeds[0])
await m.edit(view=PageButtons(ctx, self.client, m, embeds))
except Exception as ex:
funcs.printError(ctx, ex)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="lyrics", description="Gets the lyrics of a song from Genius.",
aliases=["lyric", "song", "genius"], usage="<song keywords>")
async def lyrics(self, ctx, *, keywords):
try:
await ctx.send("Getting lyrics. Please wait...")
try:
res = await funcs.getRequest("https://api.genius.com/search",
params={"q": keywords, "access_token": config.geniusToken})
data2 = res.json()["response"]["hits"][0]["result"]
except:
return await ctx.send(embed=funcs.errorEmbed(None, "Unknown song."))
author = data2["artist_names"]
title = data2["title_with_featured"]
link = data2["url"]
thumbnail = data2["song_art_image_thumbnail_url"]
song = await funcs.funcToCoro(Genius(config.geniusToken).search_song, author, title)
originallyric = funcs.multiString(song.lyrics.replace("EmbedShare URLCopyEmbedCopy", ""), limit=2048)
embeds = []
pagecount = 0
for p in originallyric:
pagecount += 1
e = Embed(description=p, title=f"{author} - {title}"[:256])
e.set_thumbnail(url=thumbnail)
e.add_field(name="Genius Link", value=link)
e.set_footer(text="Page {:,} of {:,}".format(pagecount, len(originallyric)))
embeds.append(e)
m = await ctx.reply(embed=embeds[0])
await m.edit(view=PageButtons(ctx, self.client, m, embeds))
except Exception as ex:
funcs.printError(ctx, ex)
await ctx.reply(embed=funcs.errorEmbed(None, "Server error or song doesn't have lyrics."))
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="qrgen", description="Generates a QR code.", aliases=["qrg", "genqr", "qr", "qrc"],
usage='<input> ["QRcolour=black"]\n\nNote: Add "QRcolour=black" at the end to make the QR code black.')
async def qrgen(self, ctx, *, text):
black = text.split(" ")[-1] == "QRcolour=black"
if black:
text = text[:-14]
while text.endswith(" "):
text = text[:-1]
imgName = f"{time()}.png"
image = None
try:
e = Embed(title="QR Code")
qr = QRCode()
qr.add_data(text)
qr.make(fit=True)
if black:
img = qr.make_image(fill_color="white", back_color="black")
else:
img = qr.make_image(fill_color="black", back_color="white")
img.save(f"{funcs.PATH}/temp/{imgName}")
image = File(f"{funcs.PATH}/temp/{imgName}")
e.set_image(url=f"attachment://{imgName}")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Invalid input.")
await ctx.reply(embed=e, file=image)
await funcs.deleteTempFile(imgName)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="qrread", description="Reads a QR code.", aliases=["qrscan", "qrr", "readqr"],
usage="<image URL OR image attachment>")
async def qrread(self, ctx):
await ctx.send("Reading image. Please wait... " +
"(URL embeds take longer to process than image attachments)")
if not ctx.message.attachments:
await sleep(3)
if ctx.message.attachments or ctx.message.embeds:
try:
qrlink = ctx.message.attachments[0].url if ctx.message.attachments else ctx.message.embeds[0].thumbnail.url
qr = await funcs.decodeQR(qrlink)
e = Embed(title="QR Code Message", description=funcs.formatting(qr)) if qr \
else funcs.errorEmbed(None, "Cannot detect QR code. Maybe try making the image clearer?")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, str(ex))
else:
e = funcs.errorEmbed(None, "No attachment or URL detected, please try again.")
await ctx.reply(embed=e)
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="compile", description="Compiles code.", aliases=["comp"])
async def compile(self, ctx):
try:
res = await funcs.getRequest("https://run.glot.io/languages", verify=False)
data = res.json()
languages = [i["name"] for i in data]
output = ", ".join(f'`{j}`' for j in languages)
language = ""
option = None
await ctx.reply(embed=Embed(title="Please select a language below or input `quit` to quit...",
description=output))
while language not in languages and language != "quit":
try:
option = await self.client.wait_for(
"message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=120
)
language = option.content.casefold().replace(" ", "").replace("#", "sharp") \
.replace("♯", "sharp").replace("++", "pp")
language = "javascript" if language == "js" else language
if language not in languages and language != "quit":
await option.reply(embed=funcs.errorEmbed(None, "Invalid language."))
except TimeoutError:
return await ctx.send("Cancelling compilation...")
if language == "quit":
return await option.reply("Cancelling compilation...")
versionurl = f"https://run.glot.io/languages/{language}"
res = await funcs.getRequest(versionurl, verify=False)
data = res.json()
url = data["url"]
await option.reply("**You have 15 minutes to type out your code. Input `quit` to quit.**")
code = None
try:
option = await self.client.wait_for(
"message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=900
)
content = option.content
try:
if option.attachments:
content = await funcs.readTxtAttachment(option)
code = content.replace("```", "").replace('“', '"').replace('”', '"').replace("‘", "'").replace("’", "'")
if code == "quit":
return await option.reply("Cancelling compilation...")
except:
pass
except TimeoutError:
return await ctx.send("Cancelling compilation...")
await option.reply("**Please enter your desired file name including the extension.** (e.g. `main.py`)")
try:
option = await self.client.wait_for(
"message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=120
)
filename = option.content
except TimeoutError:
return await ctx.send("Cancelling compilation...")
data = {"files": [{"name": filename, "content": code}]}
headers = {
"Authorization": f"Token {config.glotIoKey}",
"Content-type": "application/json"
}
res = await funcs.postRequest(url=url, data=dumps(data), headers=headers, verify=False)
try:
data = res.json()
stderr = data["stderr"]
if stderr == "":
await option.reply(embed=Embed(title="Compilation", description=funcs.formatting(data["stdout"] or "None")))
else:
await option.reply(embed=funcs.errorEmbed(data["error"].title(), funcs.formatting(stderr)))
except AttributeError:
await option.reply(embed=funcs.errorEmbed(None, "Code exceeded the maximum allowed running time."))
except Exception as ex:
funcs.printError(ctx, ex)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="unix", description="Converts a unix timestamp to a proper date format in GMT.",
aliases=["time", "timestamp", "epoch", "gmt", "utc", "timezone"],
usage="[time zone (-12-14)] [timestamp value]")
async def unix(self, ctx, tz=None, timestamp=None):
mins = 0
if not tz:
tz = 0
else:
try:
tz = float(tz)
if not -12 <= tz <= 14:
raise Exception
if tz != int(tz):
mins = int((tz - int(tz)) * 60)
except:
return await ctx.reply(embed=funcs.errorEmbed(None, "Time zone must be -12-14 inclusive."))
td = timedelta(hours=int(tz), minutes=mins)
if not timestamp:
timestamp = mktime(gmtime())
dt = datetime.fromtimestamp(timestamp) + td
timestamp = timegm((dt - td).timetuple())
else:
try:
timestamp = int(float(timestamp))
dt = datetime.utcfromtimestamp(timestamp) + td
except:
return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid timestamp."))
timezone = "" if not tz and not mins else f"{'+' if tz > 0 else ''}{int(tz)}{f':{abs(mins)}' if mins else ''}"
await ctx.reply(funcs.formatting(str(dt) + f" (GMT{timezone})\n\nTimestamp: {int(timestamp)}"))
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="scisum", aliases=["science", "sci"], hidden=True,
description="Shows the science summary for the last month.")
async def scisum(self, ctx):
await ctx.reply("https://tiny.cc/sci-sum")
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="dict", description="Returns the definition(s) of a word.",
aliases=["dictionary", "def", "definition", "meaning", "define"],
usage="<language code> <word>")
async def dict(self, ctx, langcode, *, word):
codes = ["en", "hi", "es", "fr", "ja", "ru", "de", "it", "ko", "pt-BR", "ar", "tr"]
languages = [
"English", "Hindi", "Spanish", "French", "Japanese", "Russian", "German",
"Italian", "Korean", "Brazilian Portuguese", "Arabic", "Turkish"
]
langcode = langcode.casefold() if langcode != "pt-BR" else langcode
if langcode not in codes:
codesList = ", ".join(f"`{code}` ({languages[codes.index(code)]})" for code in codes)
e = funcs.errorEmbed("Invalid language code!", f"Valid options:\n\n{codesList}")
else:
try:
res = await funcs.getRequest(f"https://api.dictionaryapi.dev/api/v2/entries/{langcode}/{word}")
data = res.json()
word = data[0]["word"].title()
output = ""
for i in data:
meanings = i["meanings"]
for j in meanings:
try:
partOfSpeech = f' [{j["partOfSpeech"]}]'
except:
partOfSpeech = ""
definitions = j["definitions"]
for k in definitions:
definition = k["definition"]
output += f"- {definition}{partOfSpeech}\n"
e = Embed(title=f'"{word}"').add_field(name="Definition(s)", value=funcs.formatting(output[:-1], limit=1000))
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Unknown word.")
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="reddit", description="Looks up a community or user on Reddit.",
aliases=["subreddit", "r", "redditor"], usage="<r/subreddit OR u/redditor>")
async def reddit(self, ctx, *, inp=""):
redditclient = Reddit(client_id=config.redditClientID, client_secret=config.redditClientSecret, user_agent="*")
inp = inp.casefold().replace(" ", "/")
inp = inp.split("reddit.com/")[1] if "reddit.com/" in inp else inp
while inp.startswith("/"):
inp = inp[1:]
while inp.endswith("/"):
inp = inp[:-1]
try:
icon_url = "https://www.redditinc.com/assets/images/site/reddit-logo.png"
if inp.startswith("r") and "/" in inp:
subreddit = await redditclient.subreddit(inp.split("/")[-1], fetch=True)
if subreddit.over18 and not isinstance(ctx.channel, channel.DMChannel) and not ctx.channel.is_nsfw():
e = funcs.errorEmbed("NSFW/Over 18!", "Please view this community in an NSFW channel.")
else:
tags = [
i for i in [
"Link Flairs" if subreddit.can_assign_link_flair else 0,
"User Flairs" if subreddit.can_assign_user_flair else 0,
"Spoilers Enabled" if subreddit.spoilers_enabled else 0,
"NSFW" if subreddit.over18 else 0
] if i
]
e = Embed(description=f"https://www.reddit.com/r/{subreddit.display_name}" + " ([Old Reddit](" +
f"https://old.reddit.com/r/{subreddit.display_name}))")
e.set_author(icon_url=icon_url, name="r/" + subreddit.display_name)
if tags:
e.add_field(name="Tags", value=", ".join(f"`{i}`" for i in tags))
e.set_footer(text=subreddit.public_description)
dt = datetime.utcfromtimestamp(subreddit.created_utc)
e.add_field(name="Creation Date", value=funcs.dateBirthday(dt.day, dt.month, dt.year))
e.add_field(name="Subscribers", value="`{:,}`".format(subreddit.subscribers))
async for submission in subreddit.new(limit=1):
sauthor = submission.author or "[deleted]"
if sauthor != "[deleted]":
sauthor = sauthor.name
e.add_field(
name="Latest Post ({:,} point{}; from u/{})".format(
submission.score, "" if submission.score == 1 else "s", sauthor
),
value=f"https://www.reddit.com{submission.permalink}" + " ([Old Reddit](" +
f"https://old.reddit.com{submission.permalink}))",
inline=False
)
elif inp.startswith("u") and "/" in inp:
redditor = await redditclient.redditor(inp.split("/")[-1], fetch=True)
try:
suspended = redditor.is_suspended
tags = ["Suspended"]
nickname = ""
except:
suspended = False
tags = [
i for i in [
"Verified" if redditor.has_verified_email else 0,
"Reddit Employee" if redditor.is_employee else 0,
"Moderator" if redditor.is_mod else 0,
"Gold" if redditor.is_gold else 0,
"NSFW" if redditor.subreddit["over_18"] else 0
] if i
]
nickname = redditor.subreddit["title"]
if "NSFW" in tags and not isinstance(ctx.channel, channel.DMChannel) and not ctx.channel.is_nsfw():
e = funcs.errorEmbed("NSFW/Over 18!", "Please view this profile in an NSFW channel.")
else:
e = Embed(description=f"https://www.reddit.com/user/{redditor.name}" + " ([Old Reddit](" +
f"https://old.reddit.com/user/{redditor.name}))")
e.set_author(icon_url=icon_url, name="u/" + redditor.name + (f" ({nickname})" if nickname else ""))
if tags:
e.add_field(name="Tags", value=", ".join(f"`{i}`" for i in tags))
if not suspended:
lkarma = redditor.link_karma
ckarma = redditor.comment_karma
trophies = await redditor.trophies()
e.set_thumbnail(url=redditor.icon_img)
dt = datetime.utcfromtimestamp(redditor.created_utc)
e.add_field(name="Join Date", value=funcs.dateBirthday(dt.day, dt.month, dt.year))
e.add_field(name="Total Karma", value="`{:,}`".format(lkarma + ckarma))
e.add_field(name="Post Karma", value="`{:,}`".format(lkarma))
e.add_field(name="Comment Karma", value="`{:,}`".format(ckarma))
if trophies:
e.add_field(
name="Trophies ({:,})".format(len(trophies)),
value=", ".join(f"`{trophy.name}`" for trophy in trophies[:50])
+ ("..." if len(trophies) > 50 else ""),
inline=False
)
async for submission in redditor.submissions.new(limit=1):
e.add_field(
name=f"Latest Post (on r/{submission.subreddit.display_name}; " +
f"{'{:,}'.format(submission.score)} point{'' if submission.score == 1 else 's'})",
value=f"https://www.reddit.com{submission.permalink}" + " ([Old Reddit](" +
f"https://old.reddit.com{submission.permalink}))",
inline=False
)
async for comment in redditor.comments.new(limit=1):
e.add_field(
name=f"Latest Comment (on r/{comment.subreddit.display_name}; " +
f"{'{:,}'.format(comment.score)} point{'' if comment.score == 1 else 's'})",
value=funcs.formatting(comment.body, limit=1000),
inline=False
)
e.set_footer(text=redditor.subreddit["public_description"])
e.set_image(url=redditor.subreddit["banner_img"])
else:
e = funcs.errorEmbed("Invalid input!", 'Please use `r/"subreddit name"` or `u/"username"`.')
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Invalid search.")
await ctx.reply(embed=e)
@commands.cooldown(1, 1, commands.BucketType.user)
@commands.command(name="calc", description="Does simple math.",
aliases=["calculate", "calculator", "cal", "math", "maths", "safeeval"], usage="<input>")
async def calc(self, ctx, *, inp):
try:
e = Embed(description=funcs.formatting(funcs.removeDotZero(funcs.evalMath(inp))))
except ZeroDivisionError:
answer = [
"Stop right there, that's illegal!",
"Wait hol up...",
"FBI OPEN UP!!!",
"LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO" +
"OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO" +
"OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOL",
"You madlad...",
"God damn you.",
"......................................................",
"Why the hell do you exist?",
"Mate I think you've got issues.",
"Are you okay?",
"You tell me the answer.",
"What is wrong with you?",
"Disgraceful.",
"Don't you dare.",
"HOW DARE YOU?!?",
"You bloody bastard...",
"Do that again and I will stick that zero down your throat. Egg for breakfast, anyone?",
"Get a life.",
"Dio taxista Ronnosessuale dio animale porca di quella madonna vacca in calore rotta in settecento mila pezzi",
"Naughty naughty naughty, you filthy old soomaka!",
"Hey that's my yarbles! Give it back!",
"*magic portal opens...*", "[magic humming]",
"Go to the den.",
"EXXXXXCCCCCCUUUUUSEEEE MEEE",
"what", "wat", "wut", "Negative nothing", "屌", "No.", "no",
"Der Mann sprach für seine Rechte\ner ist verstört, er ist ein egoistischer Gör!",
"ENOUGH! Because of you, I almost lost my way! But everycreature here has reminded me of " +
"the true power of friendship! There will always be darkness in the world, but there will " +
"also always be those who find the light!",
"Focusing on our differences keeps us divided! Villains and creatures use that division against us!",
"SSSSHHHHHAAAAAAAAAAAHHDAAAHHHPPP",
"YOU! YOU TRIPLE GREASY WALKING SECOND DINING COURSE, YOU'RE JUST A PHONY! YOU'RE A GIANT, MORALIST" +
" PHONY WHO CAN'T TAKE CARE OF ANYONE, ESPECIALLY HIMSELF! YOU HAVE YOUR OWN DISCIPLINE UP YOUR OWN" +
" ARSE AND YOU DON'T EVEN SEE IT!"
]
try:
answer.append(
(await funcs.readTxt(funcs.getResource(self.name, "copypasta.txt"))).replace("\*", "*")[:1994]
)
except Exception as ex:
funcs.printError(ctx, ex)
pass
e = Embed(description=f"```{choice(answer)}```")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, str(ex))
await ctx.reply(embed=e)
@commands.cooldown(1, 1, commands.BucketType.user)
@commands.command(name="sqrt", usage="<input>", hidden=True,
aliases=["square", "root"], description="Calculates the square root of a given value or math expession.")
async def sqrt(self, ctx, *, val):
try:
e = Embed(description=funcs.formatting(funcs.removeDotZero(sqrt([funcs.evalMath(val)])[0])))
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, str(ex))
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="wordcount", description="Counts the number of words and characters in an input.",
aliases=["lettercount", "countletter", "countchar", "countletters", "char", "chars", "letters",
"charcount", "wc", "countword", "word", "words", "countwords", "letter"],
usage="<input OR text attachment>")
async def wordcount(self, ctx, *, inp=""):
filename = f"{time()}"
if ctx.message.attachments:
try:
inp = await funcs.readTxtAttachment(ctx.message)
if not inp:
raise
except:
try:
attach = ctx.message.attachments[0]
filename += f"-{attach.filename}"
filepath = f"{funcs.PATH}/temp/{filename}"
await attach.save(filepath)
pdf = await funcs.funcToCoro(open, filepath, "rb")
reader = PdfFileReader(pdf)
inp = ""
for page in range(reader.numPages):
pageobj = await funcs.funcToCoro(reader.getPage, page - 1)
inp += (await funcs.funcToCoro(pageobj.extractText))
await funcs.funcToCoro(pdf.close)
except Exception as ex:
funcs.printError(ctx, ex)
inp = inp
if not inp:
return await ctx.reply(embed=funcs.errorEmbed(None, "Cannot process empty input."))
splt = funcs.replaceCharacters(inp, punctuation).split()
e = Embed(title="Word Count")
e.add_field(name="Characters", value="`{:,}`".format(len(inp.strip())))
e.add_field(name="Words", value="`{:,}`".format(len(splt)))
e.add_field(name="Unique Words", value="`{:,}`".format(len(set(splt))))
e.set_footer(text="Note: This may not be 100% accurate.")
await ctx.reply(embed=e)
await funcs.deleteTempFile(filename)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="country", description="Shows information about a country.",
aliases=["location", "countries", "place", "nation"], usage="<country name OR code>")
async def country(self, ctx, *, country):
msg = ctx.message
try:
try:
res = await funcs.getRequest(
"https://restcountries.com/v2/name/" + country.casefold().replace("_", ""), verify=False
)
data = res.json()
if len(data) > 1:
await ctx.reply(
"`Please select a number: " +
f"{', '.join(str(i) + ' (' + c['name'] + ')' for i, c in enumerate(data))}`"
)
try:
pchoice = await self.client.wait_for(
"message", check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=20
)
msg = pchoice
pchoice = int(pchoice.content) if -1 < int(pchoice.content) < len(data) else 0
except (TimeoutError, ValueError):
pchoice = 0
else:
pchoice = 0
data = data[pchoice]
except Exception:
res = await funcs.getRequest(
"https://restcountries.com/v2/alpha/" + country.casefold().replace("_", ""), verify=False
)
data = res.json()
lat = data['latlng'][0]
long = data['latlng'][1]
e = Embed(title=f"{data['name']} ({data['alpha3Code']})")
e.set_thumbnail(url=data["flags"]["png"])
e.add_field(name="Native Name", value=f"`{data['nativeName']}`")
e.add_field(name="Population", value="`{:,}`".format(data["population"]))
e.add_field(name="Demonym", value=f"`{data['demonym']}`")
e.add_field(
name="Local Currency", value=", ".join(f"`{c['name']} ({c['code']} {c['symbol']})`" for c in data["currencies"])
)
try:
if data["gini"]:
e.add_field(name="Gini Coefficient", value=f"`{round(data['gini'] / 100, 3)}`")
except:
pass
try:
if data["capital"]:
e.add_field(name="Capital", value=f"`{data['capital']}`")
except:
pass
e.add_field(
name="Coordinates",
value=f"`{str(round(lat, 2)).replace('-', '')}°{'N' if lat > 0 else 'S'}, " +
f"{str(round(long, 2)).replace('-', '')}°{'E' if long > 0 else 'W'}`"
)
e.add_field(name="Region", value=f"`{data['region']} ({data['subregion']})`")
e.add_field(name="Land Area", value="`{:,} km² / {:,} mi²`".format(int(data["area"]), int(data["area"] * 0.386102159)))
e.add_field(name="Calling Code", value=", ".join(f"`+{code}`" for code in data["callingCodes"]))
e.add_field(name="Top Level Domain", value=", ".join(f"`{dom}`" for dom in data["topLevelDomain"]))
e.add_field(name="Time Zones", value=", ".join(f"`{tz}`" for tz in data["timezones"]))
e.set_footer(text="Note: The data provided may not be 100% accurate.")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Invalid input or server error.")
await msg.reply(embed=e)
@commands.cooldown(1, 1, commands.BucketType.user)
@commands.command(name="ip", description="Shows information about an IP address.",
aliases=["ipaddress"], hidden=True, usage="<IP address>")
async def ip(self, ctx, ip):
try:
res = await funcs.getRequest(f"http://ip-api.com/json/{ip}")
data = res.json()
e = Embed(title=data["query"])
e.add_field(name="City", value=f"`{data['city']}`")
e.add_field(name="Region", value=f"`{data['regionName']}`")
e.add_field(name="Country", value=f"`{data['country']} ({data['countryCode']})`")
e.add_field(name="Location", value=f"`{data['lat']}, {data['lon']}`")
if data['zip']:
e.add_field(name="Zip", value=f"`{data['zip']}`")
e.add_field(name="Time Zone", value=f"`{data['timezone']}`")
e.add_field(name="ISP", value=f"`{data['isp']}`")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Invalid input or server error.")
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="element", description="Shows information about a chemical element.",
aliases=["elem", "chem", "chemical"], hidden=True, usage="<element symbol or name>")
async def chemical(self, ctx, elementname):
try:
elementobj = element(elementname)
except:
try:
elementobj = element(elementname.title())
except:
return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid element."))
try:
name = elementobj.name
group = elementobj.group
mp = elementobj.melting_point
bp = elementobj.boiling_point
desc = elementobj.description
ar = elementobj.atomic_radius
en = elementobj.electronegativity()
try:
fi = elementobj.ionenergies[1]
except:
fi = None
roomtemp = funcs.KELVIN + 25
if not mp or not bp:
state = "Artificial"
elif mp > roomtemp:
state = "Solid"
elif mp < roomtemp < bp:
state = "Liquid"
else:
state = "Gas"
e = Embed(title=f"{name} ({elementobj.symbol})", description=desc if desc else "")
e.set_thumbnail(url=f"https://images-of-elements.com/t/{name.casefold()}.png")
e.add_field(name="Protons", value=f"`{elementobj.protons}`")
e.add_field(name="Neutrons", value=f"`{elementobj.neutrons}`")
e.add_field(name="Electrons", value=f"`{elementobj.electrons}`")
e.add_field(name="Atomic Mass", value=f"`{funcs.removeDotZero(elementobj.atomic_weight)}`")
e.add_field(name="Period", value=f"`{elementobj.period}`")
try:
gn = group.name
e.add_field(name="Group", value=f"`{group.symbol}{(' - ' + gn) if gn else ''}`")
except:
pass
if ar:
e.add_field(name="Atomic Radius", value=f"`{funcs.removeDotZero(ar)}`")
if en:
e.add_field(name="Electronegativity", value=f"`{funcs.removeDotZero(en)}`")
if fi:
e.add_field(name="First Ionisation", value=f"`{funcs.removeDotZero(fi)}`")
if mp:
e.add_field(name="Melting Point", value=f"`{funcs.removeDotZero(mp)}`")
if bp:
e.add_field(name="Boiling Point", value=f"`{funcs.removeDotZero(bp)}`")
e.add_field(name="State", value=f"`{state}`")
e.add_field(name="Config", value=f"`{elementobj.econf}`")
e.add_field(name="Discoverer", value=f"`{elementobj.discoverers}`")
discoveryear = elementobj.discovery_year
discoverlocation = elementobj.discovery_location
if discoveryear or discoverlocation:
both = bool(discoveryear and discoverlocation)
e.add_field(name="Discovered In",
value=f"`{discoveryear if discoveryear else ''}{' in ' if both else ''}" +
f"{discoverlocation if discoverlocation else ''}`")
await ctx.reply(embed=e)
except Exception as ex:
funcs.printError(ctx, ex)
await ctx.reply(embed=funcs.errorEmbed(None, "Invalid element."))
@commands.cooldown(1, 1, commands.BucketType.user)
@commands.command(name="periodic", description="Shows the periodic table.",
aliases=["periotictable", "elements"], hidden=True)
async def periodic(self, ctx):
await funcs.sendImage(ctx, "https://media.discordapp.net/attachments/871621453521485864/882103596563431424/table.jpg")
@commands.cooldown(1, 1, commands.BucketType.user)
@commands.command(name="sohcahtoa", description="SOH CAH TOA.",
aliases=["trigonometry", "triggernometry", "sincostan", "sinecostan", "sine", "cos", "tan"], hidden=True)
async def sohcahtoa(self, ctx):
await funcs.sendImage(ctx, "https://media.discordapp.net/attachments/771404776410972161/954017475668885534/unknown.png")
@commands.cooldown(1, 1, commands.BucketType.user)
@commands.command(name="osi", description="Shows the OSI Model.",
aliases=["osimodel", "7layers"], hidden=True)
async def osi(self, ctx):
await funcs.sendImage(ctx, "https://cdn.discordapp.com/attachments/771404776410972161/950404988369240104/unknown.png")
@commands.cooldown(1, 1, commands.BucketType.user)
@commands.command(name="normalbodytemp", description="Shows the normal body temperature range chart.",
aliases=["bodytemp", "nbt"], hidden=True)
async def normalbodytemp(self, ctx):
await funcs.sendImage(ctx, "https://cdn.discordapp.com/attachments/771404776410972161/851367517241999380/image0.jpg")
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.command(description="Gets information and generates a citation for an article via DOI number.",
aliases=["reference", "ref", "citation", "doi", "cit", "altmetric", "altmetrics", "cite", "art"],
usage="<DOI number> [citation style]", name="article")
async def article(self, ctx, doi, style="apa"):
await ctx.send("Getting article data. Please wait...")
doi = f'https://doi.org/{funcs.replaceCharacters(doi, ["https://doi.org/", "doi:", "doi.org/"])}'.casefold()
while doi.endswith("."):
doi = doi[:-1]
style = style.casefold()
style = "chicago-author-date" if style.startswith("chig") or style.startswith("chic") else style
style = "multidisciplinary-digital-publishing-institute" if style.startswith("mdpi") else style
cmd = f'curl -LH "Accept: text/x-bibliography; style={style}" "{doi}"'
try:
obj = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=False if system() == "Windows" else True)
res = obj.stdout.read().decode("utf-8").split("\n")
if res[-1]:
res.append("")
res = "".join(i.replace("\n", "") for i in res[4:-1])
if res.startswith(("<", " ")) or '{"status"' in res or not res:
raise Exception("Invalid DOI number or server error.")
while " " in res:
res = res.replace(" ", " ")
if "java.lang.Thread.run" in res:
res = "Invalid citation style!"
doi = doi.replace('"', "")
desc = doi + "\nhttps://sci-hub.mksa.top/" + doi.replace("https://doi.org/", "") + "\n"
e = Embed(title="Article", description=desc + funcs.formatting(res))
obj.kill()
doi = doi.split("doi.org/")[1]
try:
altmetricdata = await funcs.getRequest("https://api.altmetric.com/v1/doi/" + doi, verify=False)
altmetric = altmetricdata.json()
desc += altmetric["details_url"] + "\n"
e.description = desc + funcs.formatting(res)
if len(altmetric["title"]) < 257:
e.title = altmetric["title"]
e.set_thumbnail(url=altmetric["images"]["large"])
try:
e.add_field(name='Authors ({:,})'.format(len(altmetric["authors"])),
value=", ".join(f"`{author}`" for author in altmetric["authors"][:10])
+ ("..." if len(altmetric["authors"]) > 10 else ""))
except:
pass
try:
e.add_field(name="Journal",
value=f"`{altmetric['journal']} (ISSN: {'/'.join(issn for issn in altmetric['issns'])})`")
except:
pass
if altmetric["published_on"] < 0:
pub = (datetime(1970, 1, 1) + timedelta(seconds=altmetric["published_on"])).date()
else:
pub = datetime.utcfromtimestamp(int(altmetric["published_on"])).date()
e.add_field(name="Publish Date", value="`%s %s %s`" % (pub.day, funcs.monthNumberToName(pub.month), pub.year))
try:
e.add_field(name="PMID", value=f"`{altmetric['pmid']}`")
except:
pass
citations = [
{"field": "cited_by_msm_count", "name": "News Outlet"},
{"field": "cited_by_tweeters_count", "name": "Twitter"},
{"field": "cited_by_feeds_count", "name": "Blog"},
{"field": "cited_by_wikipedia_count", "name": "Wikipedia"},
{"field": "cited_by_videos_count", "name": "Video"},
{"field": "cited_by_rdts_count", "name": "Reddit"},
{"field": "cited_by_fbwalls_count", "name": "Facebook"},
{"field": "cited_by_gplus_count", "name": "Google+"},
{"field": "cited_by_qna_count", "name": "Q&A Thread"},
{"field": "cited_by_rh_count", "name": "Research Highlight"},
{"field": "cited_by_policies_count", "name": "Policy Source"},
{"field": "cited_by_book_reviews_count", "name": "Book Review"}
]
for i in citations:
try:
if altmetric[i["field"]]:
e.add_field(name=f"{i['name']} Mentions", value="`{:,}`".format(altmetric[i["field"]]))
except:
pass
e.set_footer(text="Last updated: {} UTC".format(str(datetime.utcfromtimestamp(int(altmetric["last_updated"])))),
icon_url="https://secure.gravatar.com/avatar/97869aff9f24c5d0e1e44b55a274631a")
except JSONDecodeError:
e.set_footer(text="Note: No Altmetric data available for this article.")
try:
dimensionsdata = await funcs.getRequest("https://metrics-api.dimensions.ai/doi/" + doi, verify=False)
dimensions = dimensionsdata.json()
if dimensions["times_cited"]:
e.add_field(name="Citations", value="`{:,}`".format(dimensions["times_cited"]))
if dimensions["recent_citations"]:
e.add_field(name="Citations (2y)", value="`{:,}`".format(dimensions["recent_citations"]))
if dimensions["times_cited"] or dimensions["recent_citations"]:
e.description = f"{desc}https://badge.dimensions.ai/details/doi/{doi}\n{funcs.formatting(res)}"
except:
pass
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, str(ex))
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="quartile", usage='<numbers separated with ;> ["all" to show all points]',
aliases=["avg", "average", "mean", "median", "mode", "q1", "q2",
"q3", "range", "sd", "iqr", "quartiles", "boxplot", "box", "qir"],
description="Computes statistical data from a set of numerical values.")
async def quartile(self, ctx, *, items):
imgName = f"{time()}.png"
image = None
try:
if ";" not in items:
items = items.replace(",", ";")
else:
items = items.replace(",", "")
if items.casefold().endswith("all"):
boxpoints = "all"
items = items[:-3]
while items.endswith(" "):
items = items[:-1]
else:
boxpoints = False
while items.startswith(";"):
items = items[1:]
while items.endswith(";"):
items = items[:-1]
while " " in items:
items = items.replace(" ", " ")
while "; ;" in items:
items = items.replace("; ;", ";")
while ";;" in items:
items = items.replace(";;", ";")
itemslist = items.split(";")
if "" in itemslist:
raise Exception("Invalid input. Please separate the items with `;`.")
while " " in itemslist:
itemslist.remove(" ")
data = array(list(map(float, [i.strip() for i in itemslist])))
data.sort()
halflist = int(len(data) // 2)
q3 = median(data[-halflist:])
q1 = median(data[:halflist])
e = Embed(title="Quartile Calculator",
description=f'Requested by: {ctx.author.mention}\n' +
f'{funcs.formatting("; ".join(funcs.removeDotZero(float(i)) for i in data))}')
e.add_field(name="Total Values", value="`{:,}`".format(len(data)))
e.add_field(name="Mean", value=f'`{funcs.removeDotZero(mean(data))}`')
try:
e.add_field(name="Mode", value=f'`{funcs.removeDotZero(mode(data))}`')
except:
e.add_field(name="Mode", value="`None`")
e.add_field(name="Q1", value=f'`{funcs.removeDotZero(q1)}`')
e.add_field(name="Median (Q2)", value=f'`{funcs.removeDotZero(median(data))}`')
e.add_field(name="Q3", value=f'`{funcs.removeDotZero(q3)}`')
e.add_field(name="Interquartile Range", value=f'`{funcs.removeDotZero(q3 - q1)}`')
e.add_field(name="Range", value=f'`{funcs.removeDotZero(max(data) - min(data))}`')
e.add_field(name="Population SD", value=f'`{funcs.removeDotZero(pstdev(data))}`')
e.add_field(name="Sample SD", value=f'`{funcs.removeDotZero(stdev(data))}`')
e.add_field(name="Minimum Value", value=f'`{funcs.removeDotZero(min(data))}`')
e.add_field(name="Maximum Value", value=f'`{funcs.removeDotZero(max(data))}`')
e.add_field(name="Sum", value=f'`{funcs.removeDotZero(sum(data))}`')
fig = go.Figure()
fig.add_trace(go.Box(y=data, quartilemethod="linear", name="Linear Quartile"))
fig.add_trace(go.Box(y=data, quartilemethod="inclusive", name="Inclusive Quartile"))
fig.add_trace(go.Box(y=data, quartilemethod="exclusive", name="Exclusive Quartile"))
fig.update_traces(boxpoints=boxpoints, jitter=0.3)
await funcs.funcToCoro(fig.write_image, f"{funcs.PATH}/temp/{imgName}")
image = File(f"{funcs.PATH}/temp/{imgName}")
e.set_image(url=f"attachment://{imgName}")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, str(ex))
await ctx.reply(embed=e, file=image)
await funcs.deleteTempFile(imgName)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="hcf", usage="<value #1 up to {:,}> <value #2 up to {:,}>".format(HCF_LIMIT, HCF_LIMIT),
aliases=["lcm", "gcf", "gcd", "hcd", "lcf", "hcm"],
description="Calculates the highest common factor and lowest common multiple of two values.")
async def hcf(self, ctx, number1, number2):
try:
a = int(float(number1))
b = int(float(number2))
if a > HCF_LIMIT or b > HCF_LIMIT:
raise ValueError
lst = sorted([a, b])
a, b = lst[0], lst[1]
hcf = 1
for i in range(2, a + 1):
if not a % i and not b % i:
hcf = i
lcm = int((a * b) / hcf)
await ctx.reply(f'The HCF of {funcs.removeDotZero(a)} and ' +
f'{funcs.removeDotZero(b)} is: **{funcs.removeDotZero(hcf)}' +
f'**\nThe LCM of {funcs.removeDotZero(a)} and ' +
f'{funcs.removeDotZero(b)} is: **{funcs.removeDotZero(lcm)}**')
except ValueError:
await ctx.reply(embed=funcs.errorEmbed(None, "Invalid input. Values must be {:,} or below.".format(HCF_LIMIT)))
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="zodiac", description="Converts a date to its zodiac sign.", hidden=True,
aliases=["starsign", "horoscope", "zs"], usage="[month] [day]\n\nAlternative usage(s):\n\n- <zodiac sign>")
async def zodiac(self, ctx, month: str="", day: str=""):
try:
if month and not day:
try:
z = funcs.getZodiacInfo(month)
e = Embed(title=z[2] + f" :{z[2].casefold().replace('scorpio', 'scorpius')}:")
e.add_field(name="Dates", value=f"`{z[1]}`")
e.set_image(url=z[0])
except Exception as ex:
e = funcs.errorEmbed("Invalid zodiac!", str(ex))
else:
if not month:
month = month or datetime.now().month
if not day:
day = day or datetime.now().day
try:
month = funcs.monthNumberToName(int(month))
except:
month = funcs.monthNumberToName(funcs.monthNameToNumber(month))
monthint = int(funcs.monthNameToNumber(month))
try:
day = int(day)
except:
day = int(day[:-2])
date = f"{month} {funcs.valueToOrdinal(day)}"
if day < 1 or day > 31 and monthint in [1, 3, 5, 7, 8, 10, 12] \
or day > 30 and monthint in [4, 6, 9, 11] \
or day > 29 and monthint == 2:
raise Exception
z = funcs.dateToZodiac(date)
e = Embed(title=f"{date} Zodiac Sign :{z.casefold().replace('scorpio', 'scorpius')}:")
e.set_image(url=funcs.getZodiacInfo(z)[0])
e.set_footer(text=z)
except Exception:
e = funcs.errorEmbed(None, "Invalid input.")
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="chinesezodiac", description="Converts a year to its Chinese zodiac sign.", usage="[year]",
aliases=["cz", "zodiacchinese", "year", "yearofthe", "ly", "leap", "leapyear"], hidden=True)
async def chinesezodiac(self, ctx, year: str=""):
year = year or datetime.now().year
try:
year = int(year)
e = Embed(
title=f"{str(year) if year > 1 else str(year * -1 + 1) + ' B.C.'} Chinese Zodiac Sign",
description=funcs.formatting(funcs.yearToChineseZodiac(year))
)
ly = str(funcs.leapYear(year))
e.add_field(name="Leap Year", value=f"`{ly if ly != 'None' else 'Unknown'}`")
except Exception:
e = funcs.errorEmbed(None, "Invalid input.")
await ctx.reply(embed=e)
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.command(description="Shows how far apart two dates are.", aliases=["weekday", "day", "days", "dates", "age", "today"],
usage="[date #1 day] [date #1 month] [date #1 year] [date #2 day] [date #2 month] [date #2 year]\n\n" +
"Alternative usage(s):\n\n- <days (+/-) from today OR weeks (+/- ending with w) from today>\n\n" +
"- <date day> <date month> <date year> <days (+/-) from date OR weeks (+/- ending with w) from date>",
name="date")
async def date(self, ctx, day: str="", month: str="", year: str="", day2: str="", month2: str="", year2: str=""):
today = datetime.today()
try:
if day and not month and not year and not day2 and not month2 and not year2:
try:
day1int = int(day)
except ValueError:
day1int = int(day[:-1]) * 7
neg1 = day1int < 0
dateobj = datetime.today() + timedelta(days=day1int)
month2 = month2 or datetime.now().month
day2 = day2 or datetime.now().day
year2 = year2 or datetime.now().year
try:
month2 = funcs.monthNumberToName(int(month2))
except:
month2 = funcs.monthNumberToName(funcs.monthNameToNumber(month2))
dateobj2 = datetime(int(year2), int(funcs.monthNameToNumber(month2)), int(day2))
else:
neg1 = False
month = month or datetime.now().month
day = day or datetime.now().day
year = year or datetime.now().year
try:
month = funcs.monthNumberToName(int(month))
except:
month = funcs.monthNumberToName(funcs.monthNameToNumber(month))
dateobj = datetime(int(year), int(funcs.monthNameToNumber(month)), int(day))
if day2 and not month2 and not year2:
try:
day2int = int(day2)
except ValueError:
day2int = int(day2[:-1]) * 7
dateobj2 = dateobj + timedelta(days=day2int)
else:
if not month2:
month2 = month2 or datetime.now().month
if not day2:
day2 = day2 or datetime.now().day
if not year2:
year2 = year2 or datetime.now().year
try:
month2 = funcs.monthNumberToName(int(month2))
except:
month2 = funcs.monthNumberToName(funcs.monthNameToNumber(month2))
dateobj2 = datetime(int(year2), int(funcs.monthNameToNumber(month2)), int(day2))
dateobjs = sorted([dateobj, dateobj2])
delta = dateobjs[1] - dateobjs[0]
daysint = delta.days + (1 if neg1 else 0)
if dateobj.date() != today.date() and dateobj2.date() != today.date():
e = Embed(title="Two Dates")
e.add_field(
name="Date #1",
value="`%s, %s %s %s`" % (
funcs.weekdayNumberToName(dateobjs[0].weekday()),
dateobjs[0].day,
funcs.monthNumberToName(dateobjs[0].month),
dateobjs[0].year
)
)
e.add_field(
name="Date #2",
value="`%s, %s %s %s`" % (
funcs.weekdayNumberToName(dateobjs[1].weekday()),
dateobjs[1].day,
funcs.monthNumberToName(dateobjs[1].month),
dateobjs[1].year
)
)
hastoday = False
else:
hastoday = True
if today.date() == dateobj.date():
e = Embed(
title=f"{funcs.weekdayNumberToName(dateobj2.weekday())}, " +
f"{dateobj2.day} {funcs.monthNumberToName(dateobj2.month)} {dateobj2.year}"
)
else:
e = Embed(
title=f"{funcs.weekdayNumberToName(dateobj.weekday())}, " +
f"{dateobj.day} {funcs.monthNumberToName(dateobj.month)} {dateobj.year}"
)
if daysint:
years, months, daysfinal, monthsfinal, daysint = funcs.dateDifference(dateobjs[0].date(), dateobjs[1].date())
res = f"== {'Difference From Today' if hastoday else 'Time Difference'} ==\n\n"
if years:
res += "{:,} year{}, {} month{}, and {} day{}\nor ".format(
years, "" if years == 1 else "s", months, "" if months == 1 else "s", daysfinal, "" if daysfinal == 1 else "s"
)
if monthsfinal:
res += "{:,} month{} and {} day{}\nor ".format(
monthsfinal, "" if monthsfinal == 1 else "s", daysfinal, "" if daysfinal == 1 else "s"
)
res += "{:,} day{}".format(daysint, "" if daysint == 1 else "s")
e.description = funcs.formatting(res)
else:
e.description = funcs.formatting("Today")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Invalid input.")
await ctx.reply(embed=e)
@commands.cooldown(1, 1, commands.BucketType.user)
@commands.command(name="iss", description="Gets information about the International Space Station and all humans in space.",
aliases=["space"], hidden=True)
async def iss(self, ctx):
try:
issdata = await funcs.getRequest("http://api.open-notify.org/iss-now.json", verify=False)
iss = issdata.json()["iss_position"]
hisdata = await funcs.getRequest("http://api.open-notify.org/astros.json", verify=False)
his = hisdata.json()["people"]
dt = datetime(1998, 11, 20).date()
e = Embed(description="https://en.wikipedia.org/wiki/International_Space_Station")
e.set_author(name="The International Space Station",
icon_url="https://upload.wikimedia.org/wikipedia/commons/thumb/1/15/ISS_emblem.png/195px-ISS_emblem.png")
e.add_field(name="Location", value=f"`{iss['latitude']}, {iss['longitude']}`")
e.add_field(name="Launch Date", value=funcs.dateBirthday(dt.day, dt.month, dt.year))
e.add_field(name="Speed", value="`7.66 km/s (27,600 km/h or 17,100 mph)`")
if his:
e.add_field(name="Humans in Space ({:,})".format(len(his)), inline=False,
value=", ".join(
f"`{i['name']} ({i['craft']})`" for i in sorted(his, key=lambda x: x["craft"])
)[:800].rsplit("`, ", 1)[0] + "`")
e.set_image(url="https://cdn.discordapp.com/attachments/771698457391136798/926876797759537192/unknown.png")
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Server error.")
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(usage="<note #1 with octave (e.g. F#2)> <note #2 with octave (e.g. G5)>", hidden=True,
aliases=["octave", "note", "notes", "semitone", "semitones", "vocalrange", "octaves", "notesrange"],
name="noterange", description="Shows the range in octaves and semitones between two given musical notes.")
async def noterange(self, ctx, *, noterange):
try:
while " " in noterange:
noterange = noterange.replace(" ", " ")
note1, note2 = funcs.replaceCharacters(noterange.strip().replace(",", ""), [" - ", " — ", "—"], " ").split(" ")
notes = sorted([funcs.noteFinder(note1), funcs.noteFinder(note2)], key=lambda x: x[1])
diff = notes[1][1] - notes[0][1]
if not diff:
raise Exception
else:
octaves = diff // 12
semitones = diff % 12
andsemitones = f" and {semitones} semitone{'' if semitones == 1 else 's'}"
octavestr = f"{'{:,}'.format(octaves)} octave{'' if octaves == 1 else 's'}{andsemitones if semitones else ''}\nor "
e = Embed(title=f"{notes[0][0]} — {notes[1][0]}",
description=funcs.formatting(
f"== Note Range ==\n\n{octavestr if octaves else ''}{'{:,}'.format(diff)} semitone" +
f"{'' if diff == 1 else 's'}"
))
except Exception as ex:
funcs.printError(ctx, ex)
e = funcs.errorEmbed(None, "Invalid input.")
e.set_footer(text="Notes: " + ", ".join(i for i in funcs.MUSICAL_NOTES))
await ctx.reply(embed=e)
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(description="Adds a timestamp to a YouTube video link, " +
"useful for mobile users who cannot copy links with timestamps.", hidden=True,
aliases=["yt", "ytts", "ytt"], usage="<YouTube video link> <timestamp>", name="yttimestamp")
async def yttimestamp(self, ctx, link, timestamp):
if "youtu" not in link.casefold():
return await ctx.reply(embed=funcs.errorEmbed(None, "Not a YouTube link."))
s = 0
try:
for i in range(timestamp.count(":") + 1):
try:
spl = timestamp.rsplit(":", 1)
val = int(spl[1])
timestamp = spl[0]
except IndexError:
val = int(timestamp)
s += val * 60 ** i
except:
return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid input."))
if "youtu.be" in link.casefold():
link = link.split('?')[0] + "?"
else:
link = link.split('&')[0] + "&"
await ctx.reply(f"<{link}t={s}>")
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(description="Shows the age timeline of a hypothetical person born in a certain year up until adulthood.",
usage="[year (1500-2500)]\n\nAlternative usage(s):\n\n- <age (0-100)>", name="agelist", hidden=True)
async def agelist(self, ctx, year=""):
nowyear = datetime.today().year
if not year:
year = str(nowyear - 18)
try:
year = funcs.evalMath(year.replace(",", ""))
isage = 0 <= year <= 100
if not 1500 <= year <= 2500 and not isage:
return await ctx.reply(
embed=funcs.errorEmbed(None, "Year must be 1500-2500 inclusive, and age must be 0-100 inclusive.")
)
if isage:
year = nowyear - year
except:
return await ctx.reply(embed=funcs.errorEmbed(None, "Invalid year."))
notableyears = {1: "infant", 2: "toddler", 4: "young child", 7: "child", 10: "older child", 13: "teenager", 18: "adult"}
res = f"Born in {year}:\n"
isfuture = False
for age in range(0, 26):
currentyear = year + age
if currentyear > nowyear and not isfuture:
res += "\n" if age else ""
res += "\n== Future ==\n"
isfuture = True
res += f"\n- {currentyear}: {'baby' if not age else f'{age - 1}-{age}'}"
if age in notableyears:
res += f" ({notableyears[age]})"
await ctx.reply(funcs.formatting(res))
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.command(name="google", description="Generates search URLs for Google, Bing, and DuckDuckGo.",
aliases=["search", "ddg", "duckduckgo", "lookup", "bing"], usage="<keywords>")
async def google(self, ctx, *, inp: str=""):
if not inp:
return await ctx.reply(embed=funcs.errorEmbed(None, "Empty input."))
param = parse.urlencode({"q": inp})
view = funcs.newButtonView(
2, label="Google", url=f"https://www.google.com/search?{param}", emoji=self.client.emoji["google"]
)
view = funcs.newButtonView(
2, label="Bing", url=f"https://www.bing.com/search?{param}", emoji=self.client.emoji["bing"], view=view
)
await ctx.reply(
f"Use the buttons below to search for `{inp}`.",
view=funcs.newButtonView(
2, label="DuckDuckGo", url=f"https://www.duckduckgo.com/?{param}", emoji=self.client.emoji["ddg"], view=view
)
)
@commands.cooldown(1, 15, commands.BucketType.user)
@commands.command(name="wolfram", description="Queries things using the Wolfram|Alpha API.",
aliases=["wolf", "wa", "wolframalpha", "query"], usage="<input>")
async def wolfram(self, ctx, *, inp: str=""):
if not inp:
return await ctx.reply(embed=funcs.errorEmbed(None, "Empty input."))
else:
await ctx.send("Querying. Please wait...")
try:
params = {"appid": config.wolframID, "output": "json", "lang": "en", "input": inp}
res = await funcs.getRequest("http://api.wolframalpha.com/v2/query", params=params)
data = res.json()["queryresult"]
e = Embed()
e.set_author(icon_url="https://media.discordapp.net/attachments/771404776410972161/929386312765669376/wolfram.png",
name="Wolfram|Alpha Query")
if data["success"]:
imgs = []
for i, c in enumerate(data["pods"]):
if c["subpods"][0]["plaintext"] and i < 25:
e.add_field(name=c["title"],
value=funcs.formatting(c["subpods"][0]["plaintext"], limit=200),
inline=False)
try:
imgs.append((c["subpods"][0]["img"]["src"], c["title"]))
except:
pass
embeds = []
for i, c in enumerate(imgs):
emb = e.copy()
emb.set_image(url=c[0])
emb.set_footer(text="{}\nPage {:,} of {:,}".format(c[1], i + 1, len(imgs)))
embeds.append(emb)
m = await ctx.reply(embed=embeds[0])
return await m.edit(view=PageButtons(ctx, self.client, m, embeds))
else:
try:
e.add_field(name="Did You Mean", value=", ".join(f"`{i['val']}`" for i in data["didyoumeans"][:20]))
except:
e.add_field(name="Tips", value=funcs.formatting("Check your spelling, and use English"))
return await ctx.reply(embed=e)
except Exception as ex:
funcs.printError(ctx, ex)
return await ctx.reply(embed=funcs.errorEmbed(None, "Server error or query limit reached."))
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="blurface", description="Detects faces in an image and blurs them.", hidden=True,
aliases=["faceblur", "blurfaces", "anonymize", "anonymise", "blur"], usage="<image attachment>")
async def blurface(self, ctx):
if not ctx.message.attachments:
return await ctx.reply(embed=funcs.errorEmbed(None, "No attachment detected."))
await ctx.send("Blurring faces. Please wait...")
await funcs.useImageFunc(ctx, self.blurFace)
setup = Utility.setup
| [
"urllib.parse.urlencode",
"src.utils.funcs.dateToZodiac",
"cv2.dnn.readNetFromCaffe",
"src.utils.funcs.errorEmbed",
"platform.system",
"asyncio.sleep",
"src.utils.funcs.monthNameToNumber",
"src.utils.funcs.leapYear",
"src.utils.funcs.printError",
"cv2.imread",
"time.gmtime",
"src.utils.funcs.v... | [((2643, 2666), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'seconds': '(2.0)'}), '(seconds=2.0)\n', (2653, 2666), False, 'from discord.ext import commands, tasks\n'), ((3899, 3948), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (3916, 3948), False, 'from discord.ext import commands, tasks\n'), ((3954, 4165), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""reminderdel"""', 'description': '"""Removes a reminder."""', 'usage': '"""<reminder ID>"""', 'aliases': "['reminderdelete', 'reminderemove', 'removereminder', 'deletereminder',\n 'delreminder', 'delremind']"}), "(name='reminderdel', description='Removes a reminder.',\n usage='<reminder ID>', aliases=['reminderdelete', 'reminderemove',\n 'removereminder', 'deletereminder', 'delreminder', 'delremind'])\n", (3970, 4165), False, 'from discord.ext import commands, tasks\n'), ((5231, 5280), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (5248, 5280), False, 'from discord.ext import commands, tasks\n'), ((5286, 5522), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""reminder"""', 'description': '"""Creates a reminder or shows a list of your reminders."""', 'aliases': "['remind', 'remindme', 'reminders']", 'usage': '"""[Xm/h/d (replace X with number of minutes/hours/days)] <message>"""'}), "(name='reminder', description=\n 'Creates a reminder or shows a list of your reminders.', aliases=[\n 'remind', 'remindme', 'reminders'], usage=\n '[Xm/h/d (replace X with number of minutes/hours/days)] <message>')\n", (5302, 5522), False, 'from discord.ext import commands, tasks\n'), ((12434, 12483), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (12451, 12483), False, 'from discord.ext import commands, tasks\n'), ((12489, 12610), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""piechart"""', 'description': '"""Generates a pie chart."""', 'aliases': "['pie', 'piegraph']", 'usage': '"""[title]"""'}), "(name='piechart', description='Generates a pie chart.',\n aliases=['pie', 'piegraph'], usage='[title]')\n", (12505, 12610), False, 'from discord.ext import commands, tasks\n'), ((13644, 13693), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (13661, 13693), False, 'from discord.ext import commands, tasks\n'), ((13699, 13824), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""linechart"""', 'description': '"""Generates a line chart."""', 'aliases': "['line', 'linegraph']", 'usage': '"""[title]"""'}), "(name='linechart', description='Generates a line chart.',\n aliases=['line', 'linegraph'], usage='[title]')\n", (13715, 13824), False, 'from discord.ext import commands, tasks\n'), ((14961, 15010), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (14978, 15010), False, 'from discord.ext import commands, tasks\n'), ((15016, 15137), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""barchart"""', 'description': '"""Generates a bar chart."""', 'aliases': "['bar', 'bargraph']", 'usage': '"""[title]"""'}), "(name='barchart', description='Generates a bar chart.',\n aliases=['bar', 'bargraph'], usage='[title]')\n", (15032, 15137), False, 'from discord.ext import commands, tasks\n'), ((16268, 16317), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (16285, 16317), False, 'from discord.ext import commands, tasks\n'), ((16323, 16553), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""github"""', 'description': '"""Returns statistics about a GitHub repository."""', 'usage': '"""[username/repository]"""', 'aliases': "['loc', 'code', 'linesofcode', 'repository', 'repo', 'git', 'source',\n 'sourcecode']"}), "(name='github', description=\n 'Returns statistics about a GitHub repository.', usage=\n '[username/repository]', aliases=['loc', 'code', 'linesofcode',\n 'repository', 'repo', 'git', 'source', 'sourcecode'])\n", (16339, 16553), False, 'from discord.ext import commands, tasks\n'), ((17800, 17849), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (17817, 17849), False, 'from discord.ext import commands, tasks\n'), ((17855, 18017), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""covid"""', 'description': '"""Gets COVID-19 data."""', 'aliases': "['coronavirus', 'corona', 'covid19', 'cv', 'c19', 'cv19']", 'usage': '"""[location]"""'}), "(name='covid', description='Gets COVID-19 data.', aliases=[\n 'coronavirus', 'corona', 'covid19', 'cv', 'c19', 'cv19'], usage=\n '[location]')\n", (17871, 18017), False, 'from discord.ext import commands, tasks\n'), ((23785, 23834), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (23802, 23834), False, 'from discord.ext import commands, tasks\n'), ((23840, 23992), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""flightinfo"""', 'description': '"""Gets information about a flight."""', 'aliases': "['flight', 'flightradar']", 'usage': '"""<flight number>"""'}), "(name='flightinfo', description=\n 'Gets information about a flight.', aliases=['flight', 'flightradar'],\n usage='<flight number>')\n", (23856, 23992), False, 'from discord.ext import commands, tasks\n'), ((29867, 29916), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (29884, 29916), False, 'from discord.ext import commands, tasks\n'), ((29922, 30055), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""weather"""', 'description': '"""Finds the current weather of a location."""', 'aliases': "['w']", 'usage': '"""<location>"""'}), "(name='weather', description=\n 'Finds the current weather of a location.', aliases=['w'], usage=\n '<location>')\n", (29938, 30055), False, 'from discord.ext import commands, tasks\n'), ((32416, 32466), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(15)', 'commands.BucketType.user'], {}), '(1, 15, commands.BucketType.user)\n', (32433, 32466), False, 'from discord.ext import commands, tasks\n'), ((32472, 32743), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""translate"""', 'description': "('Translates text to a different language. ' +\n 'Translation may sometimes fail due to rate limit.')", 'aliases': "['t', 'translator', 'trans', 'tr', 'translation']", 'usage': '"""<language code to translate to> <input>"""'}), "(name='translate', description=\n 'Translates text to a different language. ' +\n 'Translation may sometimes fail due to rate limit.', aliases=['t',\n 'translator', 'trans', 'tr', 'translation'], usage=\n '<language code to translate to> <input>')\n", (32488, 32743), False, 'from discord.ext import commands, tasks\n'), ((33748, 33797), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (33765, 33797), False, 'from discord.ext import commands, tasks\n'), ((33803, 34022), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""currency"""', 'description': '"""Converts the price of one currency to another."""', 'aliases': "['fiat', 'cc', 'convertcurrency', 'currencyconvert']", 'usage': '"""<from currency> <to currency> [amount]"""'}), "(name='currency', description=\n 'Converts the price of one currency to another.', aliases=['fiat', 'cc',\n 'convertcurrency', 'currencyconvert'], usage=\n '<from currency> <to currency> [amount]')\n", (33819, 34022), False, 'from discord.ext import commands, tasks\n'), ((36015, 36064), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (36032, 36064), False, 'from discord.ext import commands, tasks\n'), ((36070, 36212), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""wiki"""', 'description': '"""Returns a Wikipedia article."""', 'aliases': "['wikipedia']", 'usage': '"""<article title (case-sensitive)>"""'}), "(name='wiki', description='Returns a Wikipedia article.',\n aliases=['wikipedia'], usage='<article title (case-sensitive)>')\n", (36086, 36212), False, 'from discord.ext import commands, tasks\n'), ((39003, 39053), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(45)', 'commands.BucketType.user'], {}), '(1, 45, commands.BucketType.user)\n', (39020, 39053), False, 'from discord.ext import commands, tasks\n'), ((39059, 39297), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""srctop10"""', 'aliases': "['top10', 'src', 'speedruncom', 'leaderboard', 'lb', 'sr']", 'hidden': '(True)', 'description': '"""Shows the top 10 leaderboard for speedrun.com games."""', 'usage': '"""[speedrun.com game abbreviation]"""'}), "(name='srctop10', aliases=['top10', 'src', 'speedruncom',\n 'leaderboard', 'lb', 'sr'], hidden=True, description=\n 'Shows the top 10 leaderboard for speedrun.com games.', usage=\n '[speedrun.com game abbreviation]')\n", (39075, 39297), False, 'from discord.ext import commands, tasks\n'), ((42011, 42061), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(45)', 'commands.BucketType.user'], {}), '(1, 45, commands.BucketType.user)\n', (42028, 42061), False, 'from discord.ext import commands, tasks\n'), ((42067, 42275), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""srcqueue"""', 'aliases': "['queue', 'speedrunqueue', 'srqueue']", 'hidden': '(True)', 'description': '"""Shows the run queue for speedrun.com games."""', 'usage': '"""[speedrun.com game abbreviation]"""'}), "(name='srcqueue', aliases=['queue', 'speedrunqueue',\n 'srqueue'], hidden=True, description=\n 'Shows the run queue for speedrun.com games.', usage=\n '[speedrun.com game abbreviation]')\n", (42083, 42275), False, 'from discord.ext import commands, tasks\n'), ((46110, 46159), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (46127, 46159), False, 'from discord.ext import commands, tasks\n'), ((46165, 46307), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""urban"""', 'description': '"""Looks up a term on Urban Dictionary."""', 'aliases': "['ud', 'urbandictionary']", 'usage': '"""<term>"""'}), "(name='urban', description=\n 'Looks up a term on Urban Dictionary.', aliases=['ud',\n 'urbandictionary'], usage='<term>')\n", (46181, 46307), False, 'from discord.ext import commands, tasks\n'), ((48853, 48902), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (48870, 48902), False, 'from discord.ext import commands, tasks\n'), ((48908, 49064), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""lyrics"""', 'description': '"""Gets the lyrics of a song from Genius."""', 'aliases': "['lyric', 'song', 'genius']", 'usage': '"""<song keywords>"""'}), "(name='lyrics', description=\n 'Gets the lyrics of a song from Genius.', aliases=['lyric', 'song',\n 'genius'], usage='<song keywords>')\n", (48924, 49064), False, 'from discord.ext import commands, tasks\n'), ((50694, 50743), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (50711, 50743), False, 'from discord.ext import commands, tasks\n'), ((50749, 50975), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""qrgen"""', 'description': '"""Generates a QR code."""', 'aliases': "['qrg', 'genqr', 'qr', 'qrc']", 'usage': '"""<input> ["QRcolour=black"]\n\nNote: Add "QRcolour=black" at the end to make the QR code black."""'}), '(name=\'qrgen\', description=\'Generates a QR code.\', aliases=\n [\'qrg\', \'genqr\', \'qr\', \'qrc\'], usage=\n """<input> ["QRcolour=black"]\n\nNote: Add "QRcolour=black" at the end to make the QR code black."""\n )\n', (50765, 50975), False, 'from discord.ext import commands, tasks\n'), ((51969, 52018), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (51986, 52018), False, 'from discord.ext import commands, tasks\n'), ((52024, 52170), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""qrread"""', 'description': '"""Reads a QR code."""', 'aliases': "['qrscan', 'qrr', 'readqr']", 'usage': '"""<image URL OR image attachment>"""'}), "(name='qrread', description='Reads a QR code.', aliases=[\n 'qrscan', 'qrr', 'readqr'], usage='<image URL OR image attachment>')\n", (52040, 52170), False, 'from discord.ext import commands, tasks\n'), ((53157, 53206), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (53174, 53206), False, 'from discord.ext import commands, tasks\n'), ((53212, 53297), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""compile"""', 'description': '"""Compiles code."""', 'aliases': "['comp']"}), "(name='compile', description='Compiles code.', aliases=['comp']\n )\n", (53228, 53297), False, 'from discord.ext import commands, tasks\n'), ((57180, 57229), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (57197, 57229), False, 'from discord.ext import commands, tasks\n'), ((57235, 57466), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""unix"""', 'description': '"""Converts a unix timestamp to a proper date format in GMT."""', 'aliases': "['time', 'timestamp', 'epoch', 'gmt', 'utc', 'timezone']", 'usage': '"""[time zone (-12-14)] [timestamp value]"""'}), "(name='unix', description=\n 'Converts a unix timestamp to a proper date format in GMT.', aliases=[\n 'time', 'timestamp', 'epoch', 'gmt', 'utc', 'timezone'], usage=\n '[time zone (-12-14)] [timestamp value]')\n", (57251, 57466), False, 'from discord.ext import commands, tasks\n'), ((58672, 58721), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (58689, 58721), False, 'from discord.ext import commands, tasks\n'), ((58727, 58864), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""scisum"""', 'aliases': "['science', 'sci']", 'hidden': '(True)', 'description': '"""Shows the science summary for the last month."""'}), "(name='scisum', aliases=['science', 'sci'], hidden=True,\n description='Shows the science summary for the last month.')\n", (58743, 58864), False, 'from discord.ext import commands, tasks\n'), ((58973, 59022), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(5)', 'commands.BucketType.user'], {}), '(1, 5, commands.BucketType.user)\n', (58990, 59022), False, 'from discord.ext import commands, tasks\n'), ((59028, 59216), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""dict"""', 'description': '"""Returns the definition(s) of a word."""', 'aliases': "['dictionary', 'def', 'definition', 'meaning', 'define']", 'usage': '"""<language code> <word>"""'}), "(name='dict', description=\n 'Returns the definition(s) of a word.', aliases=['dictionary', 'def',\n 'definition', 'meaning', 'define'], usage='<language code> <word>')\n", (59044, 59216), False, 'from discord.ext import commands, tasks\n'), ((60966, 61015), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (60983, 61015), False, 'from discord.ext import commands, tasks\n'), ((61021, 61193), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""reddit"""', 'description': '"""Looks up a community or user on Reddit."""', 'aliases': "['subreddit', 'r', 'redditor']", 'usage': '"""<r/subreddit OR u/redditor>"""'}), "(name='reddit', description=\n 'Looks up a community or user on Reddit.', aliases=['subreddit', 'r',\n 'redditor'], usage='<r/subreddit OR u/redditor>')\n", (61037, 61193), False, 'from discord.ext import commands, tasks\n'), ((68306, 68355), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (68323, 68355), False, 'from discord.ext import commands, tasks\n'), ((68361, 68523), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""calc"""', 'description': '"""Does simple math."""', 'aliases': "['calculate', 'calculator', 'cal', 'math', 'maths', 'safeeval']", 'usage': '"""<input>"""'}), "(name='calc', description='Does simple math.', aliases=[\n 'calculate', 'calculator', 'cal', 'math', 'maths', 'safeeval'], usage=\n '<input>')\n", (68377, 68523), False, 'from discord.ext import commands, tasks\n'), ((71611, 71660), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (71628, 71660), False, 'from discord.ext import commands, tasks\n'), ((71666, 71841), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""sqrt"""', 'usage': '"""<input>"""', 'hidden': '(True)', 'aliases': "['square', 'root']", 'description': '"""Calculates the square root of a given value or math expession."""'}), "(name='sqrt', usage='<input>', hidden=True, aliases=[\n 'square', 'root'], description=\n 'Calculates the square root of a given value or math expession.')\n", (71682, 71841), False, 'from discord.ext import commands, tasks\n'), ((72168, 72217), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (72185, 72217), False, 'from discord.ext import commands, tasks\n'), ((72223, 72551), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""wordcount"""', 'description': '"""Counts the number of words and characters in an input."""', 'aliases': "['lettercount', 'countletter', 'countchar', 'countletters', 'char', 'chars',\n 'letters', 'charcount', 'wc', 'countword', 'word', 'words',\n 'countwords', 'letter']", 'usage': '"""<input OR text attachment>"""'}), "(name='wordcount', description=\n 'Counts the number of words and characters in an input.', aliases=[\n 'lettercount', 'countletter', 'countchar', 'countletters', 'char',\n 'chars', 'letters', 'charcount', 'wc', 'countword', 'word', 'words',\n 'countwords', 'letter'], usage='<input OR text attachment>')\n", (72239, 72551), False, 'from discord.ext import commands, tasks\n'), ((74252, 74301), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (74269, 74301), False, 'from discord.ext import commands, tasks\n'), ((74307, 74484), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""country"""', 'description': '"""Shows information about a country."""', 'aliases': "['location', 'countries', 'place', 'nation']", 'usage': '"""<country name OR code>"""'}), "(name='country', description=\n 'Shows information about a country.', aliases=['location', 'countries',\n 'place', 'nation'], usage='<country name OR code>')\n", (74323, 74484), False, 'from discord.ext import commands, tasks\n'), ((77877, 77926), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (77894, 77926), False, 'from discord.ext import commands, tasks\n'), ((77932, 78081), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""ip"""', 'description': '"""Shows information about an IP address."""', 'aliases': "['ipaddress']", 'hidden': '(True)', 'usage': '"""<IP address>"""'}), "(name='ip', description=\n 'Shows information about an IP address.', aliases=['ipaddress'], hidden\n =True, usage='<IP address>')\n", (77948, 78081), False, 'from discord.ext import commands, tasks\n'), ((79009, 79058), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (79026, 79058), False, 'from discord.ext import commands, tasks\n'), ((79064, 79249), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""element"""', 'description': '"""Shows information about a chemical element."""', 'aliases': "['elem', 'chem', 'chemical']", 'hidden': '(True)', 'usage': '"""<element symbol or name>"""'}), "(name='element', description=\n 'Shows information about a chemical element.', aliases=['elem', 'chem',\n 'chemical'], hidden=True, usage='<element symbol or name>')\n", (79080, 79249), False, 'from discord.ext import commands, tasks\n'), ((82453, 82502), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (82470, 82502), False, 'from discord.ext import commands, tasks\n'), ((82508, 82638), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""periodic"""', 'description': '"""Shows the periodic table."""', 'aliases': "['periotictable', 'elements']", 'hidden': '(True)'}), "(name='periodic', description='Shows the periodic table.',\n aliases=['periotictable', 'elements'], hidden=True)\n", (82524, 82638), False, 'from discord.ext import commands, tasks\n'), ((82825, 82874), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (82842, 82874), False, 'from discord.ext import commands, tasks\n'), ((82880, 83057), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""sohcahtoa"""', 'description': '"""SOH CAH TOA."""', 'aliases': "['trigonometry', 'triggernometry', 'sincostan', 'sinecostan', 'sine', 'cos',\n 'tan']", 'hidden': '(True)'}), "(name='sohcahtoa', description='SOH CAH TOA.', aliases=[\n 'trigonometry', 'triggernometry', 'sincostan', 'sinecostan', 'sine',\n 'cos', 'tan'], hidden=True)\n", (82896, 83057), False, 'from discord.ext import commands, tasks\n'), ((83242, 83291), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (83259, 83291), False, 'from discord.ext import commands, tasks\n'), ((83297, 83412), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""osi"""', 'description': '"""Shows the OSI Model."""', 'aliases': "['osimodel', '7layers']", 'hidden': '(True)'}), "(name='osi', description='Shows the OSI Model.', aliases=[\n 'osimodel', '7layers'], hidden=True)\n", (83313, 83412), False, 'from discord.ext import commands, tasks\n'), ((83593, 83642), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (83610, 83642), False, 'from discord.ext import commands, tasks\n'), ((83648, 83800), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""normalbodytemp"""', 'description': '"""Shows the normal body temperature range chart."""', 'aliases': "['bodytemp', 'nbt']", 'hidden': '(True)'}), "(name='normalbodytemp', description=\n 'Shows the normal body temperature range chart.', aliases=['bodytemp',\n 'nbt'], hidden=True)\n", (83664, 83800), False, 'from discord.ext import commands, tasks\n'), ((83987, 84036), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(2)', 'commands.BucketType.user'], {}), '(1, 2, commands.BucketType.user)\n', (84004, 84036), False, 'from discord.ext import commands, tasks\n'), ((84042, 84316), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Gets information and generates a citation for an article via DOI number."""', 'aliases': "['reference', 'ref', 'citation', 'doi', 'cit', 'altmetric', 'altmetrics',\n 'cite', 'art']", 'usage': '"""<DOI number> [citation style]"""', 'name': '"""article"""'}), "(description=\n 'Gets information and generates a citation for an article via DOI number.',\n aliases=['reference', 'ref', 'citation', 'doi', 'cit', 'altmetric',\n 'altmetrics', 'cite', 'art'], usage='<DOI number> [citation style]',\n name='article')\n", (84058, 84316), False, 'from discord.ext import commands, tasks\n'), ((89991, 90040), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (90008, 90040), False, 'from discord.ext import commands, tasks\n'), ((90046, 90365), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""quartile"""', 'usage': '"""<numbers separated with ;> ["all" to show all points]"""', 'aliases': "['avg', 'average', 'mean', 'median', 'mode', 'q1', 'q2', 'q3', 'range',\n 'sd', 'iqr', 'quartiles', 'boxplot', 'box', 'qir']", 'description': '"""Computes statistical data from a set of numerical values."""'}), '(name=\'quartile\', usage=\n \'<numbers separated with ;> ["all" to show all points]\', aliases=[\'avg\',\n \'average\', \'mean\', \'median\', \'mode\', \'q1\', \'q2\', \'q3\', \'range\', \'sd\',\n \'iqr\', \'quartiles\', \'boxplot\', \'box\', \'qir\'], description=\n \'Computes statistical data from a set of numerical values.\')\n', (90062, 90365), False, 'from discord.ext import commands, tasks\n'), ((94033, 94082), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (94050, 94082), False, 'from discord.ext import commands, tasks\n'), ((95328, 95377), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (95345, 95377), False, 'from discord.ext import commands, tasks\n'), ((95383, 95600), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""zodiac"""', 'description': '"""Converts a date to its zodiac sign."""', 'hidden': '(True)', 'aliases': "['starsign', 'horoscope', 'zs']", 'usage': '"""[month] [day]\n\nAlternative usage(s):\n\n- <zodiac sign>"""'}), '(name=\'zodiac\', description=\n \'Converts a date to its zodiac sign.\', hidden=True, aliases=[\'starsign\',\n \'horoscope\', \'zs\'], usage=\n """[month] [day]\n\nAlternative usage(s):\n\n- <zodiac sign>""")\n', (95399, 95600), False, 'from discord.ext import commands, tasks\n'), ((97334, 97383), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (97351, 97383), False, 'from discord.ext import commands, tasks\n'), ((97389, 97609), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""chinesezodiac"""', 'description': '"""Converts a year to its Chinese zodiac sign."""', 'usage': '"""[year]"""', 'aliases': "['cz', 'zodiacchinese', 'year', 'yearofthe', 'ly', 'leap', 'leapyear']", 'hidden': '(True)'}), "(name='chinesezodiac', description=\n 'Converts a year to its Chinese zodiac sign.', usage='[year]', aliases=\n ['cz', 'zodiacchinese', 'year', 'yearofthe', 'ly', 'leap', 'leapyear'],\n hidden=True)\n", (97405, 97609), False, 'from discord.ext import commands, tasks\n'), ((98231, 98280), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(2)', 'commands.BucketType.user'], {}), '(1, 2, commands.BucketType.user)\n', (98248, 98280), False, 'from discord.ext import commands, tasks\n'), ((98286, 98767), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Shows how far apart two dates are."""', 'aliases': "['weekday', 'day', 'days', 'dates', 'age', 'today']", 'usage': '(\n \'[date #1 day] [date #1 month] [date #1 year] [date #2 day] [date #2 month] [date #2 year]\\n\\n\'\n +\n """Alternative usage(s):\n\n- <days (+/-) from today OR weeks (+/- ending with w) from today>\n\n"""\n +\n \'- <date day> <date month> <date year> <days (+/-) from date OR weeks (+/- ending with w) from date>\'\n )', 'name': '"""date"""'}), '(description=\'Shows how far apart two dates are.\', aliases=\n [\'weekday\', \'day\', \'days\', \'dates\', \'age\', \'today\'], usage=\n """[date #1 day] [date #1 month] [date #1 year] [date #2 day] [date #2 month] [date #2 year]\n\n"""\n +\n """Alternative usage(s):\n\n- <days (+/-) from today OR weeks (+/- ending with w) from today>\n\n"""\n +\n \'- <date day> <date month> <date year> <days (+/-) from date OR weeks (+/- ending with w) from date>\'\n , name=\'date\')\n', (98302, 98767), False, 'from discord.ext import commands, tasks\n'), ((103963, 104012), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(1)', 'commands.BucketType.user'], {}), '(1, 1, commands.BucketType.user)\n', (103980, 104012), False, 'from discord.ext import commands, tasks\n'), ((104018, 104183), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""iss"""', 'description': '"""Gets information about the International Space Station and all humans in space."""', 'aliases': "['space']", 'hidden': '(True)'}), "(name='iss', description=\n 'Gets information about the International Space Station and all humans in space.'\n , aliases=['space'], hidden=True)\n", (104034, 104183), False, 'from discord.ext import commands, tasks\n'), ((105761, 105810), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (105778, 105810), False, 'from discord.ext import commands, tasks\n'), ((105816, 106154), 'discord.ext.commands.command', 'commands.command', ([], {'usage': '"""<note #1 with octave (e.g. F#2)> <note #2 with octave (e.g. G5)>"""', 'hidden': '(True)', 'aliases': "['octave', 'note', 'notes', 'semitone', 'semitones', 'vocalrange',\n 'octaves', 'notesrange']", 'name': '"""noterange"""', 'description': '"""Shows the range in octaves and semitones between two given musical notes."""'}), "(usage=\n '<note #1 with octave (e.g. F#2)> <note #2 with octave (e.g. G5)>',\n hidden=True, aliases=['octave', 'note', 'notes', 'semitone',\n 'semitones', 'vocalrange', 'octaves', 'notesrange'], name='noterange',\n description=\n 'Shows the range in octaves and semitones between two given musical notes.'\n )\n", (105832, 106154), False, 'from discord.ext import commands, tasks\n'), ((107548, 107597), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (107565, 107597), False, 'from discord.ext import commands, tasks\n'), ((107603, 107863), 'discord.ext.commands.command', 'commands.command', ([], {'description': "('Adds a timestamp to a YouTube video link, ' +\n 'useful for mobile users who cannot copy links with timestamps.')", 'hidden': '(True)', 'aliases': "['yt', 'ytts', 'ytt']", 'usage': '"""<YouTube video link> <timestamp>"""', 'name': '"""yttimestamp"""'}), "(description='Adds a timestamp to a YouTube video link, ' +\n 'useful for mobile users who cannot copy links with timestamps.',\n hidden=True, aliases=['yt', 'ytts', 'ytt'], usage=\n '<YouTube video link> <timestamp>', name='yttimestamp')\n", (107619, 107863), False, 'from discord.ext import commands, tasks\n'), ((108725, 108774), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (108742, 108774), False, 'from discord.ext import commands, tasks\n'), ((108780, 109023), 'discord.ext.commands.command', 'commands.command', ([], {'description': '"""Shows the age timeline of a hypothetical person born in a certain year up until adulthood."""', 'usage': '"""[year (1500-2500)]\n\nAlternative usage(s):\n\n- <age (0-100)>"""', 'name': '"""agelist"""', 'hidden': '(True)'}), '(description=\n \'Shows the age timeline of a hypothetical person born in a certain year up until adulthood.\'\n , usage=\n """[year (1500-2500)]\n\nAlternative usage(s):\n\n- <age (0-100)>""", name=\n \'agelist\', hidden=True)\n', (108796, 109023), False, 'from discord.ext import commands, tasks\n'), ((110318, 110367), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(3)', 'commands.BucketType.user'], {}), '(1, 3, commands.BucketType.user)\n', (110335, 110367), False, 'from discord.ext import commands, tasks\n'), ((110373, 110564), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""google"""', 'description': '"""Generates search URLs for Google, Bing, and DuckDuckGo."""', 'aliases': "['search', 'ddg', 'duckduckgo', 'lookup', 'bing']", 'usage': '"""<keywords>"""'}), "(name='google', description=\n 'Generates search URLs for Google, Bing, and DuckDuckGo.', aliases=[\n 'search', 'ddg', 'duckduckgo', 'lookup', 'bing'], usage='<keywords>')\n", (110389, 110564), False, 'from discord.ext import commands, tasks\n'), ((111369, 111419), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(15)', 'commands.BucketType.user'], {}), '(1, 15, commands.BucketType.user)\n', (111386, 111419), False, 'from discord.ext import commands, tasks\n'), ((111425, 111591), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""wolfram"""', 'description': '"""Queries things using the Wolfram|Alpha API."""', 'aliases': "['wolf', 'wa', 'wolframalpha', 'query']", 'usage': '"""<input>"""'}), "(name='wolfram', description=\n 'Queries things using the Wolfram|Alpha API.', aliases=['wolf', 'wa',\n 'wolframalpha', 'query'], usage='<input>')\n", (111441, 111591), False, 'from discord.ext import commands, tasks\n'), ((113906, 113956), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (113923, 113956), False, 'from discord.ext import commands, tasks\n'), ((113962, 114177), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""blurface"""', 'description': '"""Detects faces in an image and blurs them."""', 'hidden': '(True)', 'aliases': "['faceblur', 'blurfaces', 'anonymize', 'anonymise', 'blur']", 'usage': '"""<image attachment>"""'}), "(name='blurface', description=\n 'Detects faces in an image and blurs them.', hidden=True, aliases=[\n 'faceblur', 'blurfaces', 'anonymize', 'anonymise', 'blur'], usage=\n '<image attachment>')\n", (113978, 114177), False, 'from discord.ext import commands, tasks\n'), ((1818, 1863), 'cv2.dnn.readNetFromCaffe', 'dnn.readNetFromCaffe', (['prototxtPath', 'modelPath'], {}), '(prototxtPath, modelPath)\n', (1838, 1863), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((1880, 1896), 'cv2.imread', 'imread', (['filename'], {}), '(filename)\n', (1886, 1896), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((2005, 2069), 'cv2.dnn.blobFromImage', 'dnn.blobFromImage', (['image', '(1.0)', '(300, 300)', '(104.0, 177.0, 123.0)'], {}), '(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n', (2022, 2069), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((12034, 12103), 'discord.Embed', 'Embed', ([], {'title': 'title', 'description': 'f"""Requested by: {ctx.author.mention}"""'}), "(title=title, description=f'Requested by: {ctx.author.mention}')\n", (12039, 12103), False, 'from discord import Embed, File, channel\n'), ((12316, 12352), 'discord.File', 'File', (['f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(f'{funcs.PATH}/temp/{imgName}')\n", (12320, 12352), False, 'from discord import Embed, File, channel\n'), ((61274, 61375), 'asyncpraw.Reddit', 'Reddit', ([], {'client_id': 'config.redditClientID', 'client_secret': 'config.redditClientSecret', 'user_agent': '"""*"""'}), "(client_id=config.redditClientID, client_secret=config.\n redditClientSecret, user_agent='*')\n", (61280, 61375), False, 'from asyncpraw import Reddit\n'), ((73848, 73873), 'discord.Embed', 'Embed', ([], {'title': '"""Word Count"""'}), "(title='Word Count')\n", (73853, 73873), False, 'from discord import Embed, File, channel\n'), ((98967, 98983), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (98981, 98983), False, 'from datetime import datetime, timedelta\n'), ((110743, 110770), 'urllib.parse.urlencode', 'parse.urlencode', (["{'q': inp}"], {}), "({'q': inp})\n", (110758, 110770), False, 'from urllib import parse\n'), ((110786, 110915), 'src.utils.funcs.newButtonView', 'funcs.newButtonView', (['(2)'], {'label': '"""Google"""', 'url': 'f"""https://www.google.com/search?{param}"""', 'emoji': "self.client.emoji['google']"}), "(2, label='Google', url=\n f'https://www.google.com/search?{param}', emoji=self.client.emoji['google']\n )\n", (110805, 110915), False, 'from src.utils import funcs\n'), ((110943, 111076), 'src.utils.funcs.newButtonView', 'funcs.newButtonView', (['(2)'], {'label': '"""Bing"""', 'url': 'f"""https://www.bing.com/search?{param}"""', 'emoji': "self.client.emoji['bing']", 'view': 'view'}), "(2, label='Bing', url=\n f'https://www.bing.com/search?{param}', emoji=self.client.emoji['bing'],\n view=view)\n", (110962, 111076), False, 'from src.utils import funcs\n'), ((1482, 1527), 'src.utils.funcs.generateJson', 'funcs.generateJson', (['"""reminders"""', "{'list': []}"], {}), "('reminders', {'list': []})\n", (1500, 1527), False, 'from src.utils import funcs\n'), ((1672, 1719), 'src.utils.funcs.getResource', 'funcs.getResource', (['self.name', '"""deploy.prototxt"""'], {}), "(self.name, 'deploy.prototxt')\n", (1689, 1719), False, 'from src.utils import funcs\n'), ((1753, 1801), 'src.utils.funcs.getResource', 'funcs.getResource', (['self.name', '"""model.caffemodel"""'], {}), "(self.name, 'model.caffemodel')\n", (1770, 1801), False, 'from src.utils import funcs\n'), ((2750, 2787), 'src.utils.funcs.readJson', 'funcs.readJson', (['"""data/reminders.json"""'], {}), "('data/reminders.json')\n", (2764, 2787), False, 'from src.utils import funcs\n'), ((4550, 4587), 'src.utils.funcs.readJson', 'funcs.readJson', (['"""data/reminders.json"""'], {}), "('data/reminders.json')\n", (4564, 4587), False, 'from src.utils import funcs\n'), ((5759, 5796), 'src.utils.funcs.readJson', 'funcs.readJson', (['"""data/reminders.json"""'], {}), "('data/reminders.json')\n", (5773, 5796), False, 'from src.utils import funcs\n'), ((5815, 5821), 'time.time', 'time', ([], {}), '()\n', (5819, 5821), False, 'from time import gmtime, mktime, time\n'), ((12234, 12299), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['fig.write_image', 'f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(fig.write_image, f'{funcs.PATH}/temp/{imgName}')\n", (12250, 12299), False, 'from src.utils import funcs\n'), ((13608, 13637), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (13628, 13637), False, 'from src.utils import funcs\n'), ((14925, 14954), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (14945, 14954), False, 'from src.utils import funcs\n'), ((16232, 16261), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (16252, 16261), False, 'from src.utils import funcs\n'), ((17036, 17083), 'discord.Embed', 'Embed', ([], {'description': 'f"""https://github.com/{repo}"""'}), "(description=f'https://github.com/{repo}')\n", (17041, 17083), False, 'from discord import Embed, File, channel\n'), ((19516, 19602), 'discord.Embed', 'Embed', ([], {'description': "('Statistics taken at: `' + data['statistic_taken_at'] + ' UTC`')"}), "(description='Statistics taken at: `' + data['statistic_taken_at'] +\n ' UTC`')\n", (19521, 19602), False, 'from discord import Embed, File, channel\n'), ((24109, 24147), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Empty input."""'], {}), "(None, 'Empty input.')\n", (24125, 24147), False, 'from src.utils import funcs\n'), ((30684, 30715), 'src.utils.funcs.celsiusToFahrenheit', 'funcs.celsiusToFahrenheit', (['temp'], {}), '(temp)\n', (30709, 30715), False, 'from src.utils import funcs\n'), ((30837, 30868), 'src.utils.funcs.celsiusToFahrenheit', 'funcs.celsiusToFahrenheit', (['high'], {}), '(high)\n', (30862, 30868), False, 'from src.utils import funcs\n'), ((30888, 30918), 'src.utils.funcs.celsiusToFahrenheit', 'funcs.celsiusToFahrenheit', (['low'], {}), '(low)\n', (30913, 30918), False, 'from src.utils import funcs\n'), ((36318, 36371), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Cannot process empty input."""'], {}), "(None, 'Cannot process empty input.')\n", (36334, 36371), False, 'from src.utils import funcs\n'), ((41453, 41478), 'discord.Embed', 'Embed', ([], {'description': 'output'}), '(description=output)\n', (41458, 41478), False, 'from discord import Embed, File, channel\n'), ((51281, 51303), 'discord.Embed', 'Embed', ([], {'title': '"""QR Code"""'}), "(title='QR Code')\n", (51286, 51303), False, 'from discord import Embed, File, channel\n'), ((51321, 51329), 'qrcode.QRCode', 'QRCode', ([], {}), '()\n', (51327, 51329), False, 'from qrcode import QRCode\n'), ((51655, 51691), 'discord.File', 'File', (['f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(f'{funcs.PATH}/temp/{imgName}')\n", (51659, 51691), False, 'from discord import Embed, File, channel\n'), ((51933, 51962), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (51953, 51962), False, 'from src.utils import funcs\n'), ((53043, 53117), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""No attachment or URL detected, please try again."""'], {}), "(None, 'No attachment or URL detected, please try again.')\n", (53059, 53117), False, 'from src.utils import funcs\n'), ((59813, 59891), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['"""Invalid language code!"""', 'f"""Valid options:\n\n{codesList}"""'], {}), '(\'Invalid language code!\', f"""Valid options:\n\n{codesList}""")\n', (59829, 59891), False, 'from src.utils import funcs\n'), ((74215, 74245), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['filename'], {}), '(filename)\n', (74235, 74245), False, 'from src.utils import funcs\n'), ((75937, 75990), 'discord.Embed', 'Embed', ([], {'title': 'f"""{data[\'name\']} ({data[\'alpha3Code\']})"""'}), '(title=f"{data[\'name\']} ({data[\'alpha3Code\']})")\n', (75942, 75990), False, 'from discord import Embed, File, channel\n'), ((78259, 78285), 'discord.Embed', 'Embed', ([], {'title': "data['query']"}), "(title=data['query'])\n", (78264, 78285), False, 'from discord import Embed, File, channel\n'), ((79349, 79369), 'mendeleev.element', 'element', (['elementname'], {}), '(elementname)\n', (79356, 79369), False, 'from mendeleev import element\n'), ((80290, 80368), 'discord.Embed', 'Embed', ([], {'title': 'f"""{name} ({elementobj.symbol})"""', 'description': "(desc if desc else '')"}), "(title=f'{name} ({elementobj.symbol})', description=desc if desc else '')\n", (80295, 80368), False, 'from discord import Embed, File, channel\n'), ((82706, 82827), 'src.utils.funcs.sendImage', 'funcs.sendImage', (['ctx', '"""https://media.discordapp.net/attachments/871621453521485864/882103596563431424/table.jpg"""'], {}), "(ctx,\n 'https://media.discordapp.net/attachments/871621453521485864/882103596563431424/table.jpg'\n )\n", (82721, 82827), False, 'from src.utils import funcs\n'), ((83121, 83244), 'src.utils.funcs.sendImage', 'funcs.sendImage', (['ctx', '"""https://media.discordapp.net/attachments/771404776410972161/954017475668885534/unknown.png"""'], {}), "(ctx,\n 'https://media.discordapp.net/attachments/771404776410972161/954017475668885534/unknown.png'\n )\n", (83136, 83244), False, 'from src.utils import funcs\n'), ((83474, 83595), 'src.utils.funcs.sendImage', 'funcs.sendImage', (['ctx', '"""https://cdn.discordapp.com/attachments/771404776410972161/950404988369240104/unknown.png"""'], {}), "(ctx,\n 'https://cdn.discordapp.com/attachments/771404776410972161/950404988369240104/unknown.png'\n )\n", (83489, 83595), False, 'from src.utils import funcs\n'), ((83869, 83989), 'src.utils.funcs.sendImage', 'funcs.sendImage', (['ctx', '"""https://cdn.discordapp.com/attachments/771404776410972161/851367517241999380/image0.jpg"""'], {}), "(ctx,\n 'https://cdn.discordapp.com/attachments/771404776410972161/851367517241999380/image0.jpg'\n )\n", (83884, 83989), False, 'from src.utils import funcs\n'), ((91723, 91747), 'statistics.median', 'median', (['data[-halflist:]'], {}), '(data[-halflist:])\n', (91729, 91747), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((91765, 91788), 'statistics.median', 'median', (['data[:halflist]'], {}), '(data[:halflist])\n', (91771, 91788), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((93264, 93275), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (93273, 93275), True, 'from plotly import graph_objects as go\n'), ((93728, 93764), 'discord.File', 'File', (['f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(f'{funcs.PATH}/temp/{imgName}')\n", (93732, 93764), False, 'from discord import Embed, File, channel\n'), ((93997, 94026), 'src.utils.funcs.deleteTempFile', 'funcs.deleteTempFile', (['imgName'], {}), '(imgName)\n', (94017, 94026), False, 'from src.utils import funcs\n'), ((104597, 104675), 'discord.Embed', 'Embed', ([], {'description': '"""https://en.wikipedia.org/wiki/International_Space_Station"""'}), "(description='https://en.wikipedia.org/wiki/International_Space_Station')\n", (104602, 104675), False, 'from discord import Embed, File, channel\n'), ((109087, 109103), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (109101, 109103), False, 'from datetime import datetime, timedelta\n'), ((114423, 114461), 'src.utils.funcs.useImageFunc', 'funcs.useImageFunc', (['ctx', 'self.blurFace'], {}), '(ctx, self.blurFace)\n', (114441, 114461), False, 'from src.utils import funcs\n'), ((1623, 1629), 'time.time', 'time', ([], {}), '()\n', (1627, 1629), False, 'from time import gmtime, mktime, time\n'), ((2454, 2495), 'cv2.GaussianBlur', 'GaussianBlur', (['face', '(kernelW, kernelH)', '(0)'], {}), '(face, (kernelW, kernelH), 0)\n', (2466, 2495), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((2567, 2613), 'cv2.imwrite', 'imwrite', (['f"""{funcs.PATH}/temp/{imgName}"""', 'image'], {}), "(f'{funcs.PATH}/temp/{imgName}', image)\n", (2574, 2613), False, 'from cv2 import GaussianBlur, dnn, imread, imwrite\n'), ((3844, 3892), 'src.utils.funcs.dumpJson', 'funcs.dumpJson', (['"""data/reminders.json"""', 'reminders'], {}), "('data/reminders.json', reminders)\n", (3858, 3892), False, 'from src.utils import funcs\n'), ((7878, 7897), 'src.utils.funcs.randomHex', 'funcs.randomHex', (['(16)'], {}), '(16)\n', (7893, 7897), False, 'from src.utils import funcs\n'), ((12816, 12822), 'time.time', 'time', ([], {}), '()\n', (12820, 12822), False, 'from time import gmtime, mktime, time\n'), ((13438, 13463), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (13454, 13463), False, 'from src.utils import funcs\n'), ((13480, 13548), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""An error occurred, please try again later."""'], {}), "(None, 'An error occurred, please try again later.')\n", (13496, 13548), False, 'from src.utils import funcs\n'), ((14031, 14037), 'time.time', 'time', ([], {}), '()\n', (14035, 14037), False, 'from time import gmtime, mktime, time\n'), ((14755, 14780), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (14771, 14780), False, 'from src.utils import funcs\n'), ((14797, 14865), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""An error occurred, please try again later."""'], {}), "(None, 'An error occurred, please try again later.')\n", (14813, 14865), False, 'from src.utils import funcs\n'), ((15343, 15349), 'time.time', 'time', ([], {}), '()\n', (15347, 15349), False, 'from time import gmtime, mktime, time\n'), ((16062, 16087), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (16078, 16087), False, 'from src.utils import funcs\n'), ((16104, 16172), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""An error occurred, please try again later."""'], {}), "(None, 'An error occurred, please try again later.')\n", (16120, 16172), False, 'from src.utils import funcs\n'), ((16952, 17019), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["('https://api.codetabs.com/v1/loc/?github=' + repo)"], {}), "('https://api.codetabs.com/v1/loc/?github=' + repo)\n", (16968, 17019), False, 'from src.utils import funcs\n'), ((17657, 17682), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (17673, 17682), False, 'from src.utils import funcs\n'), ((17699, 17760), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown repository or server error."""'], {}), "(None, 'Unknown repository or server error.')\n", (17715, 17760), False, 'from src.utils import funcs\n'), ((18284, 18386), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""https://corona-virus-world-and-india-data.p.rapidapi.com/api"""'], {'headers': 'headers'}), "('https://corona-virus-world-and-india-data.p.rapidapi.com/api'\n , headers=headers)\n", (18300, 18386), False, 'from src.utils import funcs\n'), ((23647, 23672), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (23663, 23672), False, 'from src.utils import funcs\n'), ((23689, 23745), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or server error."""'], {}), "(None, 'Invalid input or server error.')\n", (23705, 23745), False, 'from src.utils import funcs\n'), ((28307, 28364), 'discord.Embed', 'Embed', ([], {'title': 'f"""Flight {flightstr}"""', 'description': 'flighturl'}), "(title=f'Flight {flightstr}', description=flighturl)\n", (28312, 28364), False, 'from discord import Embed, File, channel\n'), ((30339, 30360), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['url'], {}), '(url)\n', (30355, 30360), False, 'from src.utils import funcs\n'), ((32275, 32300), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (32291, 32300), False, 'from src.utils import funcs\n'), ((32317, 32376), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown location or server error."""'], {}), "(None, 'Unknown location or server error.')\n", (32333, 32376), False, 'from src.utils import funcs\n'), ((33059, 33101), 'deep_translator.constants.GOOGLE_CODES_TO_LANGUAGES.keys', 'constants.GOOGLE_CODES_TO_LANGUAGES.keys', ([], {}), '()\n', (33099, 33101), False, 'from deep_translator import GoogleTranslator, constants\n'), ((33366, 33410), 'deep_translator.GoogleTranslator', 'GoogleTranslator', ([], {'source': '"""auto"""', 'target': 'dest'}), "(source='auto', target=dest)\n", (33382, 33410), False, 'from deep_translator import GoogleTranslator, constants\n'), ((33607, 33632), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (33623, 33632), False, 'from src.utils import funcs\n'), ((33649, 33708), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""An error occurred. Invalid input?"""'], {}), "(None, 'An error occurred. Invalid input?')\n", (33665, 33708), False, 'from src.utils import funcs\n'), ((34193, 34309), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.exchangeratesapi.io/v1/latest"""'], {'params': "{'access_key': config.exchangeratesapiKey}"}), "('http://api.exchangeratesapi.io/v1/latest', params={\n 'access_key': config.exchangeratesapiKey})\n", (34209, 34309), False, 'from src.utils import funcs\n'), ((35887, 35912), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (35903, 35912), False, 'from src.utils import funcs\n'), ((39928, 39956), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['categories'], {}), '(categories)\n', (39944, 39956), False, 'from src.utils import funcs\n'), ((40494, 40514), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['lb'], {}), '(lb)\n', (40510, 40514), False, 'from src.utils import funcs\n'), ((40709, 40774), 'src.utils.funcs.timeDifferenceStr', 'funcs.timeDifferenceStr', (["run['times']['primary_t']", '(0)'], {'noStr': '(True)'}), "(run['times']['primary_t'], 0, noStr=True)\n", (40732, 40774), False, 'from src.utils import funcs\n'), ((41888, 41913), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (41904, 41913), False, 'from src.utils import funcs\n'), ((42749, 42863), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""https://www.speedrun.com/api/v1/runs?game={gameID}&status=new&embed=players&max=200"""'], {}), "(\n f'https://www.speedrun.com/api/v1/runs?game={gameID}&status=new&embed=players&max=200'\n )\n", (42765, 42863), False, 'from src.utils import funcs\n'), ((45643, 45678), 'discord.Embed', 'Embed', ([], {'description': '"""No runs found."""'}), "(description='No runs found.')\n", (45648, 45678), False, 'from discord import Embed, File, channel\n'), ((45987, 46012), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (46003, 46012), False, 'from src.utils import funcs\n'), ((50105, 50160), 'discord.Embed', 'Embed', ([], {'description': 'p', 'title': 'f"""{author} - {title}"""[:256]'}), "(description=p, title=f'{author} - {title}'[:256])\n", (50110, 50160), False, 'from discord import Embed, File, channel\n'), ((50559, 50584), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (50575, 50584), False, 'from src.utils import funcs\n'), ((51218, 51224), 'time.time', 'time', ([], {}), '()\n', (51222, 51224), False, 'from time import gmtime, mktime, time\n'), ((51791, 51816), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (51807, 51816), False, 'from src.utils import funcs\n'), ((51833, 51873), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (51849, 51873), False, 'from src.utils import funcs\n'), ((52422, 52430), 'asyncio.sleep', 'sleep', (['(3)'], {}), '(3)\n', (52427, 52430), False, 'from asyncio import TimeoutError, sleep\n'), ((53364, 53427), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""https://run.glot.io/languages"""'], {'verify': '(False)'}), "('https://run.glot.io/languages', verify=False)\n", (53380, 53427), False, 'from src.utils import funcs\n'), ((54789, 54831), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['versionurl'], {'verify': '(False)'}), '(versionurl, verify=False)\n', (54805, 54831), False, 'from src.utils import funcs\n'), ((57148, 57173), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (57164, 57173), False, 'from src.utils import funcs\n'), ((58068, 58076), 'time.gmtime', 'gmtime', ([], {}), '()\n', (58074, 58076), False, 'from time import gmtime, mktime, time\n'), ((58095, 58128), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (58117, 58128), False, 'from datetime import datetime, timedelta\n'), ((68183, 68208), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (68199, 68208), False, 'from src.utils import funcs\n'), ((68225, 68266), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid search."""'], {}), "(None, 'Invalid search.')\n", (68241, 68266), False, 'from src.utils import funcs\n'), ((71498, 71523), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (71514, 71523), False, 'from src.utils import funcs\n'), ((72055, 72080), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (72071, 72080), False, 'from src.utils import funcs\n'), ((72678, 72684), 'time.time', 'time', ([], {}), '()\n', (72682, 72684), False, 'from time import gmtime, mktime, time\n'), ((73786, 73827), 'src.utils.funcs.replaceCharacters', 'funcs.replaceCharacters', (['inp', 'punctuation'], {}), '(inp, punctuation)\n', (73809, 73827), False, 'from src.utils import funcs\n'), ((77739, 77764), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (77755, 77764), False, 'from src.utils import funcs\n'), ((77781, 77837), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or server error."""'], {}), "(None, 'Invalid input or server error.')\n", (77797, 77837), False, 'from src.utils import funcs\n'), ((78164, 78212), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""http://ip-api.com/json/{ip}"""'], {}), "(f'http://ip-api.com/json/{ip}')\n", (78180, 78212), False, 'from src.utils import funcs\n'), ((78871, 78896), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (78887, 78896), False, 'from src.utils import funcs\n'), ((78913, 78969), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or server error."""'], {}), "(None, 'Invalid input or server error.')\n", (78929, 78969), False, 'from src.utils import funcs\n'), ((82343, 82368), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (82359, 82368), False, 'from src.utils import funcs\n'), ((89878, 89903), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (89894, 89903), False, 'from src.utils import funcs\n'), ((90489, 90495), 'time.time', 'time', ([], {}), '()\n', (90493, 90495), False, 'from time import gmtime, mktime, time\n'), ((93302, 93365), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'data', 'quartilemethod': '"""linear"""', 'name': '"""Linear Quartile"""'}), "(y=data, quartilemethod='linear', name='Linear Quartile')\n", (93308, 93365), True, 'from plotly import graph_objects as go\n'), ((93393, 93462), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'data', 'quartilemethod': '"""inclusive"""', 'name': '"""Inclusive Quartile"""'}), "(y=data, quartilemethod='inclusive', name='Inclusive Quartile')\n", (93399, 93462), True, 'from plotly import graph_objects as go\n'), ((93490, 93559), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'data', 'quartilemethod': '"""exclusive"""', 'name': '"""Exclusive Quartile"""'}), "(y=data, quartilemethod='exclusive', name='Exclusive Quartile')\n", (93496, 93559), True, 'from plotly import graph_objects as go\n'), ((93642, 93707), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['fig.write_image', 'f"""{funcs.PATH}/temp/{imgName}"""'], {}), "(fig.write_image, f'{funcs.PATH}/temp/{imgName}')\n", (93658, 93707), False, 'from src.utils import funcs\n'), ((93864, 93889), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (93880, 93889), False, 'from src.utils import funcs\n'), ((96988, 97012), 'src.utils.funcs.dateToZodiac', 'funcs.dateToZodiac', (['date'], {}), '(date)\n', (97006, 97012), False, 'from src.utils import funcs\n'), ((97254, 97294), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (97270, 97294), False, 'from src.utils import funcs\n'), ((97695, 97709), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (97707, 97709), False, 'from datetime import datetime, timedelta\n'), ((97997, 98017), 'src.utils.funcs.leapYear', 'funcs.leapYear', (['year'], {}), '(year)\n', (98011, 98017), False, 'from src.utils import funcs\n'), ((98151, 98191), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (98167, 98191), False, 'from src.utils import funcs\n'), ((101427, 101451), 'discord.Embed', 'Embed', ([], {'title': '"""Two Dates"""'}), "(title='Two Dates')\n", (101432, 101451), False, 'from discord import Embed, File, channel\n'), ((103699, 103720), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (103715, 103720), False, 'from src.utils import funcs\n'), ((103771, 103796), 'src.utils.funcs.formatting', 'funcs.formatting', (['"""Today"""'], {}), "('Today')\n", (103787, 103796), False, 'from src.utils import funcs\n'), ((103841, 103866), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (103857, 103866), False, 'from src.utils import funcs\n'), ((103883, 103923), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (103899, 103923), False, 'from src.utils import funcs\n'), ((104267, 104340), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.open-notify.org/iss-now.json"""'], {'verify': '(False)'}), "('http://api.open-notify.org/iss-now.json', verify=False)\n", (104283, 104340), False, 'from src.utils import funcs\n'), ((104418, 104490), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.open-notify.org/astros.json"""'], {'verify': '(False)'}), "('http://api.open-notify.org/astros.json', verify=False)\n", (104434, 104490), False, 'from src.utils import funcs\n'), ((105640, 105665), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (105656, 105665), False, 'from src.utils import funcs\n'), ((105682, 105721), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error."""'], {}), "(None, 'Server error.')\n", (105698, 105721), False, 'from src.utils import funcs\n'), ((107341, 107366), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (107357, 107366), False, 'from src.utils import funcs\n'), ((107383, 107423), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (107399, 107423), False, 'from src.utils import funcs\n'), ((110289, 110310), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (110305, 110310), False, 'from src.utils import funcs\n'), ((112110, 112117), 'discord.Embed', 'Embed', ([], {}), '()\n', (112115, 112117), False, 'from discord import Embed, File, channel\n'), ((2295, 2314), 'numpy.array', 'array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (2300, 2314), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((2922, 2976), 'src.utils.funcs.userIDNotBlacklisted', 'funcs.userIDNotBlacklisted', (["reminder['data']['userID']"], {}), "(reminder['data']['userID'])\n", (2948, 2976), False, 'from src.utils import funcs\n'), ((3099, 3167), 'discord.Embed', 'Embed', ([], {'title': '"""⚠️ Reminder"""', 'description': "reminder['data']['reminder']"}), "(title='⚠️ Reminder', description=reminder['data']['reminder'])\n", (3104, 3167), False, 'from discord import Embed, File, channel\n'), ((6088, 6159), 'discord.Embed', 'Embed', ([], {'title': '"""Your Reminders"""', 'description': "reminder['data']['reminder']"}), "(title='Your Reminders', description=reminder['data']['reminder'])\n", (6093, 6159), False, 'from discord import Embed, File, channel\n'), ((6608, 6657), 'discord.Embed', 'Embed', ([], {'title': '"""Your Reminders"""', 'description': '"""None"""'}), "(title='Your Reminders', description='None')\n", (6613, 6657), False, 'from discord import Embed, File, channel\n'), ((17586, 17611), 'src.utils.funcs.githubRepoPic', 'funcs.githubRepoPic', (['repo'], {}), '(repo)\n', (17605, 17611), False, 'from src.utils import funcs\n'), ((24462, 24527), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['url'], {'headers': "{'User-agent': '*'}", 'params': 'params'}), "(url, headers={'User-agent': '*'}, params=params)\n", (24478, 24527), False, 'from src.utils import funcs\n'), ((27468, 27557), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["(realdepart + data['airport']['origin']['timezone']['offset'])"], {}), "(realdepart + data['airport']['origin']['timezone'][\n 'offset'])\n", (27490, 27557), False, 'from datetime import datetime, timedelta\n'), ((27586, 27680), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["(realarrive + data['airport']['destination']['timezone']['offset'])"], {}), "(realarrive + data['airport']['destination'][\n 'timezone']['offset'])\n", (27608, 27680), False, 'from datetime import datetime, timedelta\n'), ((27709, 27794), 'src.utils.funcs.dateBirthday', 'funcs.dateBirthday', (['realdepart.day', 'realdepart.month', 'realdepart.year'], {'noBD': '(True)'}), '(realdepart.day, realdepart.month, realdepart.year, noBD=True\n )\n', (27727, 27794), False, 'from src.utils import funcs\n'), ((29724, 29749), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (29740, 29749), False, 'from src.utils import funcs\n'), ((29770, 29827), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown flight or server error."""'], {}), "(None, 'Unknown flight or server error.')\n", (29786, 29827), False, 'from src.utils import funcs\n'), ((33442, 33477), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['g.translate', 'text'], {}), '(g.translate, text)\n', (33458, 33477), False, 'from src.utils import funcs\n'), ((38861, 38886), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (38877, 38886), False, 'from src.utils import funcs\n'), ((38907, 38963), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or server error."""'], {}), "(None, 'Invalid input or server error.')\n", (38923, 38963), False, 'from src.utils import funcs\n'), ((44159, 44222), 'src.utils.funcs.timeDifferenceStr', 'funcs.timeDifferenceStr', (["i['times']['primary_t']", '(0)'], {'noStr': '(True)'}), "(i['times']['primary_t'], 0, noStr=True)\n", (44182, 44222), False, 'from src.utils import funcs\n'), ((46528, 46615), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.urbandictionary.com/v0/define"""'], {'params': "{'term': term}"}), "('http://api.urbandictionary.com/v0/define', params={'term':\n term})\n", (46544, 46615), False, 'from src.utils import funcs\n'), ((47259, 47299), 'src.utils.funcs.timeStrToDatetime', 'funcs.timeStrToDatetime', (["c['written_on']"], {}), "(c['written_on'])\n", (47282, 47299), False, 'from src.utils import funcs\n'), ((47324, 47352), 'discord.Embed', 'Embed', ([], {'description': 'permalink'}), '(description=permalink)\n', (47329, 47352), False, 'from discord import Embed, File, channel\n'), ((48821, 48846), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (48837, 48846), False, 'from src.utils import funcs\n'), ((49243, 49356), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""https://api.genius.com/search"""'], {'params': "{'q': keywords, 'access_token': config.geniusToken}"}), "('https://api.genius.com/search', params={'q': keywords,\n 'access_token': config.geniusToken})\n", (49259, 49356), False, 'from src.utils import funcs\n'), ((52657, 52679), 'src.utils.funcs.decodeQR', 'funcs.decodeQR', (['qrlink'], {}), '(qrlink)\n', (52671, 52679), False, 'from src.utils import funcs\n'), ((52798, 52886), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Cannot detect QR code. Maybe try making the image clearer?"""'], {}), "(None,\n 'Cannot detect QR code. Maybe try making the image clearer?')\n", (52814, 52886), False, 'from src.utils import funcs\n'), ((52935, 52960), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (52951, 52960), False, 'from src.utils import funcs\n'), ((58290, 58326), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['timestamp'], {}), '(timestamp)\n', (58315, 58326), False, 'from datetime import datetime, timedelta\n'), ((59949, 60037), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""https://api.dictionaryapi.dev/api/v2/entries/{langcode}/{word}"""'], {}), "(\n f'https://api.dictionaryapi.dev/api/v2/entries/{langcode}/{word}')\n", (59965, 60037), False, 'from src.utils import funcs\n'), ((60841, 60866), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (60857, 60866), False, 'from src.utils import funcs\n'), ((60887, 60926), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown word."""'], {}), "(None, 'Unknown word.')\n", (60903, 60926), False, 'from src.utils import funcs\n'), ((61995, 62082), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['"""NSFW/Over 18!"""', '"""Please view this community in an NSFW channel."""'], {}), "('NSFW/Over 18!',\n 'Please view this community in an NSFW channel.')\n", (62011, 62082), False, 'from src.utils import funcs\n'), ((62561, 62713), 'discord.Embed', 'Embed', ([], {'description': "(f'https://www.reddit.com/r/{subreddit.display_name}' + ' ([Old Reddit](' +\n f'https://old.reddit.com/r/{subreddit.display_name}))')"}), "(description=f'https://www.reddit.com/r/{subreddit.display_name}' +\n ' ([Old Reddit](' + f'https://old.reddit.com/r/{subreddit.display_name}))')\n", (62566, 62713), False, 'from discord import Embed, File, channel\n'), ((63052, 63100), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['subreddit.created_utc'], {}), '(subreddit.created_utc)\n', (63077, 63100), False, 'from datetime import datetime, timedelta\n'), ((68050, 68142), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['"""Invalid input!"""', '"""Please use `r/"subreddit name"` or `u/"username"`."""'], {}), '(\'Invalid input!\',\n \'Please use `r/"subreddit name"` or `u/"username"`.\')\n', (68066, 68142), False, 'from src.utils import funcs\n'), ((72768, 72804), 'src.utils.funcs.readTxtAttachment', 'funcs.readTxtAttachment', (['ctx.message'], {}), '(ctx.message)\n', (72791, 72804), False, 'from src.utils import funcs\n'), ((85946, 86019), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["('https://api.altmetric.com/v1/doi/' + doi)"], {'verify': '(False)'}), "('https://api.altmetric.com/v1/doi/' + doi, verify=False)\n", (85962, 86019), False, 'from src.utils import funcs\n'), ((86164, 86185), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (86180, 86185), False, 'from src.utils import funcs\n'), ((89160, 89238), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["('https://metrics-api.dimensions.ai/doi/' + doi)"], {'verify': '(False)'}), "('https://metrics-api.dimensions.ai/doi/' + doi, verify=False)\n", (89176, 89238), False, 'from src.utils import funcs\n'), ((95762, 95788), 'src.utils.funcs.getZodiacInfo', 'funcs.getZodiacInfo', (['month'], {}), '(month)\n', (95781, 95788), False, 'from src.utils import funcs\n'), ((96514, 96544), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month'], {}), '(month)\n', (96537, 96544), False, 'from src.utils import funcs\n'), ((99290, 99306), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (99304, 99306), False, 'from datetime import datetime, timedelta\n'), ((99309, 99332), 'datetime.timedelta', 'timedelta', ([], {'days': 'day1int'}), '(days=day1int)\n', (99318, 99332), False, 'from datetime import datetime, timedelta\n'), ((104551, 104573), 'datetime.datetime', 'datetime', (['(1998)', '(11)', '(20)'], {}), '(1998, 11, 20)\n', (104559, 104573), False, 'from datetime import datetime, timedelta\n'), ((105013, 105058), 'src.utils.funcs.dateBirthday', 'funcs.dateBirthday', (['dt.day', 'dt.month', 'dt.year'], {}), '(dt.day, dt.month, dt.year)\n', (105031, 105058), False, 'from src.utils import funcs\n'), ((106481, 106504), 'src.utils.funcs.noteFinder', 'funcs.noteFinder', (['note1'], {}), '(note1)\n', (106497, 106504), False, 'from src.utils import funcs\n'), ((106506, 106529), 'src.utils.funcs.noteFinder', 'funcs.noteFinder', (['note2'], {}), '(note2)\n', (106522, 106529), False, 'from src.utils import funcs\n'), ((111193, 111331), 'src.utils.funcs.newButtonView', 'funcs.newButtonView', (['(2)'], {'label': '"""DuckDuckGo"""', 'url': 'f"""https://www.duckduckgo.com/?{param}"""', 'emoji': "self.client.emoji['ddg']", 'view': 'view'}), "(2, label='DuckDuckGo', url=\n f'https://www.duckduckgo.com/?{param}', emoji=self.client.emoji['ddg'],\n view=view)\n", (111212, 111331), False, 'from src.utils import funcs\n'), ((111969, 112040), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['"""http://api.wolframalpha.com/v2/query"""'], {'params': 'params'}), "('http://api.wolframalpha.com/v2/query', params=params)\n", (111985, 112040), False, 'from src.utils import funcs\n'), ((113765, 113790), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (113781, 113790), False, 'from src.utils import funcs\n'), ((2904, 2910), 'time.time', 'time', ([], {}), '()\n', (2908, 2910), False, 'from time import gmtime, mktime, time\n'), ((4319, 4460), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', 'f"""You must specify a reminder ID! See `{self.client.command_prefix}reminders` for a list of your reminders."""'], {}), "(None,\n f'You must specify a reminder ID! See `{self.client.command_prefix}reminders` for a list of your reminders.'\n )\n", (4335, 4460), False, 'from src.utils import funcs\n'), ((5031, 5161), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', 'f"""Unknown reminder ID. See `{self.client.command_prefix}reminders` for a list of your reminders."""'], {}), "(None,\n f'Unknown reminder ID. See `{self.client.command_prefix}reminders` for a list of your reminders.'\n )\n", (5047, 5161), False, 'from src.utils import funcs\n'), ((5682, 5731), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Please leave a message!"""'], {}), "(None, 'Please leave a message!')\n", (5698, 5731), False, 'from src.utils import funcs\n'), ((8358, 8412), 'src.utils.funcs.timeDifferenceStr', 'funcs.timeDifferenceStr', (["reminder['data']['time']", 'now'], {}), "(reminder['data']['time'], now)\n", (8381, 8412), False, 'from src.utils import funcs\n'), ((12730, 12793), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Title must be 100 characters or less."""'], {}), "(None, 'Title must be 100 characters or less.')\n", (12746, 12793), False, 'from src.utils import funcs\n'), ((13195, 13231), 'plotly.graph_objects.Pie', 'go.Pie', ([], {'labels': 'labels', 'values': 'values'}), '(labels=labels, values=values)\n', (13201, 13231), True, 'from plotly import graph_objects as go\n'), ((13945, 14008), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Title must be 100 characters or less."""'], {}), "(None, 'Title must be 100 characters or less.')\n", (13961, 14008), False, 'from src.utils import funcs\n'), ((14410, 14440), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'labels', 'y': 'values'}), '(x=labels, y=values)\n', (14420, 14440), True, 'from plotly import graph_objects as go\n'), ((15257, 15320), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Title must be 100 characters or less."""'], {}), "(None, 'Title must be 100 characters or less.')\n", (15273, 15320), False, 'from src.utils import funcs\n'), ((15722, 15748), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'labels', 'y': 'values'}), '(x=labels, y=values)\n', (15728, 15748), True, 'from plotly import graph_objects as go\n'), ((31708, 31745), 'src.utils.funcs.degreesToDirection', 'funcs.degreesToDirection', (['winddegrees'], {}), '(winddegrees)\n', (31732, 31745), False, 'from src.utils import funcs\n'), ((33537, 33561), 'src.utils.funcs.formatting', 'funcs.formatting', (['output'], {}), '(output)\n', (33553, 33561), False, 'from src.utils import funcs\n'), ((41244, 41273), 'src.utils.funcs.timeStr', 'funcs.timeStr', (['d', 'h', 'm', 's', 'ms'], {}), '(d, h, m, s, ms)\n', (41257, 41273), False, 'from src.utils import funcs\n'), ((43109, 43178), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""https://www.speedrun.com/api/v1/categories/{cat}"""'], {}), "(f'https://www.speedrun.com/api/v1/categories/{cat}')\n", (43125, 43178), False, 'from src.utils import funcs\n'), ((43410, 43471), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["queuedata['pagination']['links'][-1]['uri']"], {}), "(queuedata['pagination']['links'][-1]['uri'])\n", (43426, 43471), False, 'from src.utils import funcs\n'), ((44933, 44958), 'discord.Embed', 'Embed', ([], {'description': 'output'}), '(description=output)\n', (44938, 44958), False, 'from discord import Embed, File, channel\n'), ((46429, 46467), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Empty input."""'], {}), "(None, 'Empty input.')\n", (46445, 46467), False, 'from src.utils import funcs\n'), ((49799, 49825), 'lyricsgenius.Genius', 'Genius', (['config.geniusToken'], {}), '(config.geniusToken)\n', (49805, 49825), False, 'from lyricsgenius import Genius\n'), ((50473, 50513), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'embeds'], {}), '(ctx, self.client, m, embeds)\n', (50484, 50513), False, 'from src.utils.page_buttons import PageButtons\n'), ((53654, 53750), 'discord.Embed', 'Embed', ([], {'title': '"""Please select a language below or input `quit` to quit..."""', 'description': 'output'}), "(title='Please select a language below or input `quit` to quit...',\n description=output)\n", (53659, 53750), False, 'from discord import Embed, File, channel\n'), ((56522, 56533), 'json.dumps', 'dumps', (['data'], {}), '(data)\n', (56527, 56533), False, 'from json import JSONDecodeError, dumps\n'), ((60683, 60707), 'discord.Embed', 'Embed', ([], {'title': 'f""""{word}\\""""'}), '(title=f\'"{word}"\')\n', (60688, 60707), False, 'from discord import Embed, File, channel\n'), ((60746, 60787), 'src.utils.funcs.formatting', 'funcs.formatting', (['output[:-1]'], {'limit': '(1000)'}), '(output[:-1], limit=1000)\n', (60762, 60787), False, 'from src.utils import funcs\n'), ((65075, 65160), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['"""NSFW/Over 18!"""', '"""Please view this profile in an NSFW channel."""'], {}), "('NSFW/Over 18!',\n 'Please view this profile in an NSFW channel.')\n", (65091, 65160), False, 'from src.utils import funcs\n'), ((65203, 65343), 'discord.Embed', 'Embed', ([], {'description': "(f'https://www.reddit.com/user/{redditor.name}' + ' ([Old Reddit](' +\n f'https://old.reddit.com/user/{redditor.name}))')"}), "(description=f'https://www.reddit.com/user/{redditor.name}' +\n ' ([Old Reddit](' + f'https://old.reddit.com/user/{redditor.name}))')\n", (65208, 65343), False, 'from discord import Embed, File, channel\n'), ((71346, 71371), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (71362, 71371), False, 'from src.utils import funcs\n'), ((73221, 73239), 'PyPDF2.PdfFileReader', 'PdfFileReader', (['pdf'], {}), '(pdf)\n', (73234, 73239), False, 'from PyPDF2 import PdfFileReader\n'), ((73716, 73769), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Cannot process empty input."""'], {}), "(None, 'Cannot process empty input.')\n", (73732, 73769), False, 'from src.utils import funcs\n'), ((84492, 84562), 'src.utils.funcs.replaceCharacters', 'funcs.replaceCharacters', (['doi', "['https://doi.org/', 'doi:', 'doi.org/']"], {}), "(doi, ['https://doi.org/', 'doi:', 'doi.org/'])\n", (84515, 84562), False, 'from src.utils import funcs\n'), ((85802, 85823), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (85818, 85823), False, 'from src.utils import funcs\n'), ((96700, 96725), 'src.utils.funcs.valueToOrdinal', 'funcs.valueToOrdinal', (['day'], {}), '(day)\n', (96720, 96725), False, 'from src.utils import funcs\n'), ((97929, 97960), 'src.utils.funcs.yearToChineseZodiac', 'funcs.yearToChineseZodiac', (['year'], {}), '(year)\n', (97954, 97960), False, 'from src.utils import funcs\n'), ((99368, 99382), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99380, 99382), False, 'from datetime import datetime, timedelta\n'), ((99420, 99434), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99432, 99434), False, 'from datetime import datetime, timedelta\n'), ((99472, 99486), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99484, 99486), False, 'from datetime import datetime, timedelta\n'), ((99741, 99772), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month2'], {}), '(month2)\n', (99764, 99772), False, 'from src.utils import funcs\n'), ((99866, 99880), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99878, 99880), False, 'from datetime import datetime, timedelta\n'), ((99916, 99930), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99928, 99930), False, 'from datetime import datetime, timedelta\n'), ((99966, 99980), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (99978, 99980), False, 'from datetime import datetime, timedelta\n'), ((100229, 100259), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month'], {}), '(month)\n', (100252, 100259), False, 'from src.utils import funcs\n'), ((100528, 100551), 'datetime.timedelta', 'timedelta', ([], {'days': 'day2int'}), '(days=day2int)\n', (100537, 100551), False, 'from datetime import datetime, timedelta\n'), ((108046, 108091), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Not a YouTube link."""'], {}), "(None, 'Not a YouTube link.')\n", (108062, 108091), False, 'from src.utils import funcs\n'), ((110687, 110725), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Empty input."""'], {}), "(None, 'Empty input.')\n", (110703, 110725), False, 'from src.utils import funcs\n'), ((111716, 111754), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Empty input."""'], {}), "(None, 'Empty input.')\n", (111732, 111754), False, 'from src.utils import funcs\n'), ((114301, 114350), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""No attachment detected."""'], {}), "(None, 'No attachment detected.')\n", (114317, 114350), False, 'from src.utils import funcs\n'), ((6953, 7000), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'yourreminders'], {}), '(ctx, self.client, m, yourreminders)\n', (6964, 7000), False, 'from src.utils.page_buttons import PageButtons\n'), ((7755, 7829), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""That value is too big or your input is too long."""'], {}), "(None, 'That value is too big or your input is too long.')\n", (7771, 7829), False, 'from src.utils import funcs\n'), ((12185, 12215), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['values[i]'], {}), '(values[i])\n', (12204, 12215), False, 'from src.utils import funcs\n'), ((30630, 30636), 'time.time', 'time', ([], {}), '()\n', (30634, 30636), False, 'from time import gmtime, mktime, time\n'), ((35947, 36007), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input or unknown currency."""'], {}), "(None, 'Invalid input or unknown currency.')\n", (35963, 36007), False, 'from src.utils import funcs\n'), ((41948, 42003), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error or unknown game."""'], {}), "(None, 'Server error or unknown game.')\n", (41964, 42003), False, 'from src.utils import funcs\n'), ((45559, 45603), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'outputlist'], {}), '(ctx, self.client, m, outputlist)\n', (45570, 45603), False, 'from src.utils.page_buttons import PageButtons\n'), ((46047, 46102), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error or unknown game."""'], {}), "(None, 'Server error or unknown game.')\n", (46063, 46102), False, 'from src.utils import funcs\n'), ((47644, 47684), 'src.utils.funcs.formatting', 'funcs.formatting', (['definition'], {'limit': '(1000)'}), '(definition, limit=1000)\n', (47660, 47684), False, 'from src.utils import funcs\n'), ((48727, 48767), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'embeds'], {}), '(ctx, self.client, m, embeds)\n', (48738, 48767), False, 'from src.utils.page_buttons import PageButtons\n'), ((50619, 50686), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error or song doesn\'t have lyrics."""'], {}), '(None, "Server error or song doesn\'t have lyrics.")\n', (50635, 50686), False, 'from src.utils import funcs\n'), ((52743, 52763), 'src.utils.funcs.formatting', 'funcs.formatting', (['qr'], {}), '(qr)\n', (52759, 52763), False, 'from src.utils import funcs\n'), ((55364, 55395), 'src.utils.funcs.readTxtAttachment', 'funcs.readTxtAttachment', (['option'], {}), '(option)\n', (55387, 55395), False, 'from src.utils import funcs\n'), ((63161, 63206), 'src.utils.funcs.dateBirthday', 'funcs.dateBirthday', (['dt.day', 'dt.month', 'dt.year'], {}), '(dt.day, dt.month, dt.year)\n', (63179, 63206), False, 'from src.utils import funcs\n'), ((65921, 65968), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['redditor.created_utc'], {}), '(redditor.created_utc)\n', (65946, 65968), False, 'from datetime import datetime, timedelta\n'), ((68659, 68678), 'src.utils.funcs.evalMath', 'funcs.evalMath', (['inp'], {}), '(inp)\n', (68673, 68678), False, 'from src.utils import funcs\n'), ((73153, 73191), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['open', 'filepath', '"""rb"""'], {}), "(open, filepath, 'rb')\n", (73169, 73191), False, 'from src.utils import funcs\n'), ((73511, 73538), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['pdf.close'], {}), '(pdf.close)\n', (73527, 73538), False, 'from src.utils import funcs\n'), ((73599, 73624), 'src.utils.funcs.printError', 'funcs.printError', (['ctx', 'ex'], {}), '(ctx, ex)\n', (73615, 73624), False, 'from src.utils import funcs\n'), ((80739, 80784), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['elementobj.atomic_weight'], {}), '(elementobj.atomic_weight)\n', (80758, 80784), False, 'from src.utils import funcs\n'), ((82403, 82445), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid element."""'], {}), "(None, 'Invalid element.')\n", (82419, 82445), False, 'from src.utils import funcs\n'), ((85070, 85078), 'platform.system', 'system', ([], {}), '()\n', (85076, 85078), False, 'from platform import system\n'), ((89769, 89790), 'src.utils.funcs.formatting', 'funcs.formatting', (['res'], {}), '(res)\n', (89785, 89790), False, 'from src.utils import funcs\n'), ((92417, 92440), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['q1'], {}), '(q1)\n', (92436, 92440), False, 'from src.utils import funcs\n'), ((92582, 92605), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['q3'], {}), '(q3)\n', (92601, 92605), False, 'from src.utils import funcs\n'), ((92672, 92700), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['(q3 - q1)'], {}), '(q3 - q1)\n', (92691, 92700), False, 'from src.utils import funcs\n'), ((96189, 96203), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (96201, 96203), False, 'from datetime import datetime, timedelta\n'), ((96271, 96285), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (96283, 96285), False, 'from datetime import datetime, timedelta\n'), ((96451, 96481), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month'], {}), '(month)\n', (96474, 96481), False, 'from src.utils import funcs\n'), ((97148, 97170), 'src.utils.funcs.getZodiacInfo', 'funcs.getZodiacInfo', (['z'], {}), '(z)\n', (97167, 97170), False, 'from src.utils import funcs\n'), ((99656, 99687), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month2'], {}), '(month2)\n', (99679, 99687), False, 'from src.utils import funcs\n'), ((100147, 100177), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month'], {}), '(month)\n', (100170, 100177), False, 'from src.utils import funcs\n'), ((101128, 101159), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month2'], {}), '(month2)\n', (101151, 101159), False, 'from src.utils import funcs\n'), ((108491, 108531), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid input."""'], {}), "(None, 'Invalid input.')\n", (108507, 108531), False, 'from src.utils import funcs\n'), ((109395, 109491), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Year must be 1500-2500 inclusive, and age must be 0-100 inclusive."""'], {}), "(None,\n 'Year must be 1500-2500 inclusive, and age must be 0-100 inclusive.')\n", (109411, 109491), False, 'from src.utils import funcs\n'), ((109623, 109662), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid year."""'], {}), "(None, 'Invalid year.')\n", (109639, 109662), False, 'from src.utils import funcs\n'), ((35700, 35734), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['initialamount'], {}), '(initialamount)\n', (35719, 35734), False, 'from src.utils import funcs\n'), ((35798, 35825), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['amount'], {}), '(amount)\n', (35817, 35825), False, 'from src.utils import funcs\n'), ((40976, 41002), 'src.utils.funcs.getRequest', 'funcs.getRequest', (["p['uri']"], {}), "(p['uri'])\n", (40992, 41002), False, 'from src.utils import funcs\n'), ((43745, 43814), 'src.utils.funcs.getRequest', 'funcs.getRequest', (['f"""https://www.speedrun.com/api/v1/categories/{cat}"""'], {}), "(f'https://www.speedrun.com/api/v1/categories/{cat}')\n", (43761, 43814), False, 'from src.utils import funcs\n'), ((44727, 44756), 'src.utils.funcs.timeStr', 'funcs.timeStr', (['d', 'h', 'm', 's', 'ms'], {}), '(d, h, m, s, ms)\n', (44740, 44756), False, 'from src.utils import funcs\n'), ((46762, 46801), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown term."""'], {}), "(None, 'Unknown term.')\n", (46778, 46801), False, 'from src.utils import funcs\n'), ((47776, 47813), 'src.utils.funcs.formatting', 'funcs.formatting', (['example'], {'limit': '(1000)'}), '(example, limit=1000)\n', (47792, 47813), False, 'from src.utils import funcs\n'), ((49530, 49569), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Unknown song."""'], {}), "(None, 'Unknown song.')\n", (49546, 49569), False, 'from src.utils import funcs\n'), ((57029, 57102), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Code exceeded the maximum allowed running time."""'], {}), "(None, 'Code exceeded the maximum allowed running time.')\n", (57045, 57102), False, 'from src.utils import funcs\n'), ((57896, 57957), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Time zone must be -12-14 inclusive."""'], {}), "(None, 'Time zone must be -12-14 inclusive.')\n", (57912, 57957), False, 'from src.utils import funcs\n'), ((58397, 58441), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid timestamp."""'], {}), "(None, 'Invalid timestamp.')\n", (58413, 58441), False, 'from src.utils import funcs\n'), ((71433, 71447), 'random.choice', 'choice', (['answer'], {}), '(answer)\n', (71439, 71447), False, 'from random import choice\n'), ((73365, 73407), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['reader.getPage', '(page - 1)'], {}), '(reader.getPage, page - 1)\n', (73381, 73407), False, 'from src.utils import funcs\n'), ((73446, 73483), 'src.utils.funcs.funcToCoro', 'funcs.funcToCoro', (['pageobj.extractText'], {}), '(pageobj.extractText)\n', (73462, 73483), False, 'from src.utils import funcs\n'), ((81126, 81149), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['ar'], {}), '(ar)\n', (81145, 81149), False, 'from src.utils import funcs\n'), ((81237, 81260), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['en'], {}), '(en)\n', (81256, 81260), False, 'from src.utils import funcs\n'), ((81347, 81370), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['fi'], {}), '(fi)\n', (81366, 81370), False, 'from src.utils import funcs\n'), ((81454, 81477), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['mp'], {}), '(mp)\n', (81473, 81477), False, 'from src.utils import funcs\n'), ((81561, 81584), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['bp'], {}), '(bp)\n', (81580, 81584), False, 'from src.utils import funcs\n'), ((87022, 87042), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (87030, 87042), False, 'from datetime import datetime, timedelta\n'), ((87045, 87089), 'datetime.timedelta', 'timedelta', ([], {'seconds': "altmetric['published_on']"}), "(seconds=altmetric['published_on'])\n", (87054, 87089), False, 'from datetime import datetime, timedelta\n'), ((87291, 87325), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['pub.month'], {}), '(pub.month)\n', (87314, 87325), False, 'from src.utils import funcs\n'), ((92175, 92185), 'statistics.mean', 'mean', (['data'], {}), '(data)\n', (92179, 92185), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((92519, 92531), 'statistics.median', 'median', (['data'], {}), '(data)\n', (92525, 92531), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((92876, 92888), 'statistics.pstdev', 'pstdev', (['data'], {}), '(data)\n', (92882, 92888), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((92966, 92977), 'statistics.stdev', 'stdev', (['data'], {}), '(data)\n', (92971, 92977), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((93059, 93068), 'numpy.min', 'min', (['data'], {}), '(data)\n', (93062, 93068), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((93150, 93159), 'numpy.max', 'max', (['data'], {}), '(data)\n', (93153, 93159), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((93231, 93240), 'numpy.sum', 'sum', (['data'], {}), '(data)\n', (93234, 93240), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((95110, 95132), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['b'], {}), '(b)\n', (95129, 95132), False, 'from src.utils import funcs\n'), ((95141, 95165), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['lcm'], {}), '(lcm)\n', (95160, 95165), False, 'from src.utils import funcs\n'), ((100652, 100666), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (100664, 100666), False, 'from datetime import datetime, timedelta\n'), ((100745, 100759), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (100757, 100759), False, 'from datetime import datetime, timedelta\n'), ((100839, 100853), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (100851, 100853), False, 'from datetime import datetime, timedelta\n'), ((101039, 101070), 'src.utils.funcs.monthNameToNumber', 'funcs.monthNameToNumber', (['month2'], {}), '(month2)\n', (101062, 101070), False, 'from src.utils import funcs\n'), ((101703, 101745), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['dateobjs[0].month'], {}), '(dateobjs[0].month)\n', (101726, 101745), False, 'from src.utils import funcs\n'), ((102079, 102121), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['dateobjs[1].month'], {}), '(dateobjs[1].month)\n', (102102, 102121), False, 'from src.utils import funcs\n'), ((113306, 113346), 'src.utils.page_buttons.PageButtons', 'PageButtons', (['ctx', 'self.client', 'm', 'embeds'], {}), '(ctx, self.client, m, embeds)\n', (113317, 113346), False, 'from src.utils.page_buttons import PageButtons\n'), ((113836, 113898), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Server error or query limit reached."""'], {}), "(None, 'Server error or query limit reached.')\n", (113852, 113898), False, 'from src.utils import funcs\n'), ((6453, 6488), 'src.utils.funcs.timeDifferenceStr', 'funcs.timeDifferenceStr', (['rtime', 'now'], {}), '(rtime, now)\n', (6476, 6488), False, 'from src.utils import funcs\n'), ((9381, 9418), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""No entries."""'], {}), "(None, 'No entries.')\n", (9397, 9418), False, 'from src.utils import funcs\n'), ((10521, 10558), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""No entries."""'], {}), "(None, 'No entries.')\n", (10537, 10558), False, 'from src.utils import funcs\n'), ((26606, 26640), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['realarrive'], {}), '(realarrive)\n', (26628, 26640), False, 'from datetime import datetime, timedelta\n'), ((26643, 26660), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (26658, 26660), False, 'from datetime import datetime, timedelta\n'), ((26904, 26938), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['realarrive'], {}), '(realarrive)\n', (26926, 26938), False, 'from datetime import datetime, timedelta\n'), ((26941, 26975), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['realdepart'], {}), '(realdepart)\n', (26963, 26975), False, 'from datetime import datetime, timedelta\n'), ((27231, 27248), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (27246, 27248), False, 'from datetime import datetime, timedelta\n'), ((27251, 27285), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['realdepart'], {}), '(realdepart)\n', (27273, 27285), False, 'from datetime import datetime, timedelta\n'), ((37073, 37115), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid article."""'], {}), "(None, 'Invalid article.')\n", (37089, 37115), False, 'from src.utils import funcs\n'), ((45323, 45350), 'src.utils.funcs.strictRounding', 'funcs.strictRounding', (['total'], {}), '(total)\n', (45343, 45350), False, 'from src.utils import funcs\n'), ((54437, 54480), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid language."""'], {}), "(None, 'Invalid language.')\n", (54453, 54480), False, 'from src.utils import funcs\n'), ((56926, 56950), 'src.utils.funcs.formatting', 'funcs.formatting', (['stderr'], {}), '(stderr)\n', (56942, 56950), False, 'from src.utils import funcs\n'), ((66029, 66074), 'src.utils.funcs.dateBirthday', 'funcs.dateBirthday', (['dt.day', 'dt.month', 'dt.year'], {}), '(dt.day, dt.month, dt.year)\n', (66047, 66074), False, 'from src.utils import funcs\n'), ((79526, 79568), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid element."""'], {}), "(None, 'Invalid element.')\n", (79542, 79568), False, 'from src.utils import funcs\n'), ((92279, 92289), 'statistics.mode', 'mode', (['data'], {}), '(data)\n', (92283, 92289), False, 'from statistics import mean, median, mode, pstdev, stdev\n'), ((92773, 92782), 'numpy.max', 'max', (['data'], {}), '(data)\n', (92776, 92782), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((92785, 92794), 'numpy.min', 'min', (['data'], {}), '(data)\n', (92788, 92794), False, 'from numpy import array, max, min, sqrt, squeeze, sum\n'), ((95047, 95069), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['a'], {}), '(a)\n', (95066, 95069), False, 'from src.utils import funcs\n'), ((112601, 112658), 'src.utils.funcs.formatting', 'funcs.formatting', (["c['subpods'][0]['plaintext']"], {'limit': '(200)'}), "(c['subpods'][0]['plaintext'], limit=200)\n", (112617, 112658), False, 'from src.utils import funcs\n'), ((113603, 113659), 'src.utils.funcs.formatting', 'funcs.formatting', (['"""Check your spelling, and use English"""'], {}), "('Check your spelling, and use English')\n", (113619, 113659), False, 'from src.utils import funcs\n'), ((7603, 7656), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', 'f"""Invalid input: `{minutes}`"""'], {}), "(None, f'Invalid input: `{minutes}`')\n", (7619, 7656), False, 'from src.utils import funcs\n'), ((8434, 8485), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (["reminder['data']['time']"], {}), "(reminder['data']['time'])\n", (8459, 8485), False, 'from datetime import datetime, timedelta\n'), ((10792, 10832), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid value."""'], {}), "(None, 'Invalid value.')\n", (10808, 10832), False, 'from src.utils import funcs\n'), ((37850, 37892), 'src.utils.funcs.errorEmbed', 'funcs.errorEmbed', (['None', '"""Invalid article."""'], {}), "(None, 'Invalid article.')\n", (37866, 37892), False, 'from src.utils import funcs\n'), ((56774, 56816), 'src.utils.funcs.formatting', 'funcs.formatting', (["(data['stdout'] or 'None')"], {}), "(data['stdout'] or 'None')\n", (56790, 56816), False, 'from src.utils import funcs\n'), ((67735, 67777), 'src.utils.funcs.formatting', 'funcs.formatting', (['comment.body'], {'limit': '(1000)'}), '(comment.body, limit=1000)\n', (67751, 67777), False, 'from src.utils import funcs\n'), ((71983, 72002), 'src.utils.funcs.evalMath', 'funcs.evalMath', (['val'], {}), '(val)\n', (71997, 72002), False, 'from src.utils import funcs\n'), ((94878, 94900), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['a'], {}), '(a)\n', (94897, 94900), False, 'from src.utils import funcs\n'), ((94941, 94963), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['b'], {}), '(b)\n', (94960, 94963), False, 'from src.utils import funcs\n'), ((94972, 94996), 'src.utils.funcs.removeDotZero', 'funcs.removeDotZero', (['hcf'], {}), '(hcf)\n', (94991, 94996), False, 'from src.utils import funcs\n'), ((102502, 102541), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['dateobj2.month'], {}), '(dateobj2.month)\n', (102525, 102541), False, 'from src.utils import funcs\n'), ((102766, 102804), 'src.utils.funcs.monthNumberToName', 'funcs.monthNumberToName', (['dateobj.month'], {}), '(dateobj.month)\n', (102789, 102804), False, 'from src.utils import funcs\n'), ((33263, 33305), 'deep_translator.constants.GOOGLE_CODES_TO_LANGUAGES.keys', 'constants.GOOGLE_CODES_TO_LANGUAGES.keys', ([], {}), '()\n', (33303, 33305), False, 'from deep_translator import GoogleTranslator, constants\n'), ((71202, 71247), 'src.utils.funcs.getResource', 'funcs.getResource', (['self.name', '"""copypasta.txt"""'], {}), "(self.name, 'copypasta.txt')\n", (71219, 71247), False, 'from src.utils import funcs\n'), ((3226, 3258), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['rtime'], {}), '(rtime)\n', (3251, 3258), False, 'from datetime import datetime, timedelta\n'), ((6336, 6368), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['rtime'], {}), '(rtime)\n', (6361, 6368), False, 'from datetime import datetime, timedelta\n')] |
#!/usr/bin/env python3
import subprocess
host_list = ['www.cisco.com', 'www.google.com', '192.168.2.1']
for host in host_list:
print('*' * 10)
print('host: ' + host)
p = subprocess.Popen(['ping', '-c', '1', host], stdout=subprocess.PIPE)
print(p.communicate())
| [
"subprocess.Popen"
] | [((185, 252), 'subprocess.Popen', 'subprocess.Popen', (["['ping', '-c', '1', host]"], {'stdout': 'subprocess.PIPE'}), "(['ping', '-c', '1', host], stdout=subprocess.PIPE)\n", (201, 252), False, 'import subprocess\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2020/6/2 19:16
# @Author : dorom
# @File : indexPage.py
# @Software: PyCharm
from utils.filePath import FilePath
from utils.logger import MyLogger
from utils.readYaml import ReadYaml
from driverOption.baseApi import BaseApi
from errorExecption.eleNotFound import EleNotFound
class IndexPage(object):
def __init__(self,driver):
self.driver = driver
self.base = BaseApi(self.driver)
self.readyaml = ReadYaml()
self.indexPage = self.readyaml.getStream(FilePath.androidIndexPage)
self.explore = self.readyaml.getNode(self.indexPage,"explore")
self.classTab= self.readyaml.getNode(self.indexPage,"class")
self.reconnetButton = self.readyaml.getNode(self.indexPage,"reconnetButton")
self.questionBank = self.readyaml.getNode(self.indexPage,"questionBank")
self.myStudy = self.readyaml.getNode(self.indexPage,"myStudy")
self.mySelf = self.readyaml.getNode(self.indexPage,"mySelf")
print(self.mySelf)
def comeNATIVEAPP(self):
"""切换到原生"""
self.base.switchContext("NATIVE_APP")
def chooseNavigation(self):
pass
class Explore(IndexPage):
def chooseNavigation(self):
if self.base.checkElement(self.explore):
self.base.click(self.explore)
else:
raise EleNotFound("探索按钮未找到")
class ClassIm(IndexPage):
def chooseNavigation(self):
if self.base.checkElement(self.classTab):
self.base.click(self.classTab)
else:
raise EleNotFound("班级按钮未找到")
class Learn(IndexPage):
def chooseNavigation(self):
if self.base.checkElement(self.myStudy):
self.base.click(self.myStudy)
else:
raise EleNotFound("学习中心按钮未找到")
class QuestionBank(IndexPage):
def chooseNavigation(self):
if self.base.checkElement(self.questionBank):
self.base.click(self.questionBank)
else:
raise EleNotFound("题库按钮未找到")
class MySelf(IndexPage):
def chooseNavigation(self):
if self.base.checkElement(self.mySelf,180):
self.base.click(self.mySelf)
else:
raise EleNotFound("我的按钮未找到")
| [
"driverOption.baseApi.BaseApi",
"utils.readYaml.ReadYaml",
"errorExecption.eleNotFound.EleNotFound"
] | [((439, 459), 'driverOption.baseApi.BaseApi', 'BaseApi', (['self.driver'], {}), '(self.driver)\n', (446, 459), False, 'from driverOption.baseApi import BaseApi\n'), ((484, 494), 'utils.readYaml.ReadYaml', 'ReadYaml', ([], {}), '()\n', (492, 494), False, 'from utils.readYaml import ReadYaml\n'), ((1373, 1395), 'errorExecption.eleNotFound.EleNotFound', 'EleNotFound', (['"""探索按钮未找到"""'], {}), "('探索按钮未找到')\n", (1384, 1395), False, 'from errorExecption.eleNotFound import EleNotFound\n'), ((1582, 1604), 'errorExecption.eleNotFound.EleNotFound', 'EleNotFound', (['"""班级按钮未找到"""'], {}), "('班级按钮未找到')\n", (1593, 1604), False, 'from errorExecption.eleNotFound import EleNotFound\n'), ((1787, 1811), 'errorExecption.eleNotFound.EleNotFound', 'EleNotFound', (['"""学习中心按钮未找到"""'], {}), "('学习中心按钮未找到')\n", (1798, 1811), False, 'from errorExecption.eleNotFound import EleNotFound\n'), ((2012, 2034), 'errorExecption.eleNotFound.EleNotFound', 'EleNotFound', (['"""题库按钮未找到"""'], {}), "('题库按钮未找到')\n", (2023, 2034), False, 'from errorExecption.eleNotFound import EleNotFound\n'), ((2220, 2242), 'errorExecption.eleNotFound.EleNotFound', 'EleNotFound', (['"""我的按钮未找到"""'], {}), "('我的按钮未找到')\n", (2231, 2242), False, 'from errorExecption.eleNotFound import EleNotFound\n')] |
import time
import os
import psycopg2
import psycopg2.extras
from pyinfraboxutils import get_logger
logger = get_logger('infrabox')
def connect_db():
while True:
try:
conn = psycopg2.connect(dbname=os.environ['INFRABOX_DATABASE_DB'],
user=os.environ['INFRABOX_DATABASE_USER'],
password=os.environ['<PASSWORD>'],
host=os.environ['INFRABOX_DATABASE_HOST'],
port=os.environ['INFRABOX_DATABASE_PORT'])
return conn
except Exception as e:
logger.warn("Could not connect to db: %s", e)
time.sleep(3)
class DB(object):
def __init__(self, conn):
self.conn = conn
def execute_one(self, stmt, args=None):
r = self.execute_many(stmt, args)
if not r:
return r
return r[0]
def execute_many(self, stmt, args=None):
c = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute(stmt, args)
r = c.fetchall()
c.close()
return r
def execute_one_dict(self, stmt, args=None):
r = self.execute_many_dict(stmt, args)
if not r:
return r
return r[0]
def execute_many_dict(self, stmt, args=None):
c = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
c.execute(stmt, args)
r = c.fetchall()
c.close()
return r
def execute(self, stmt, args=None):
c = self.conn.cursor()
c.execute(stmt, args)
c.close()
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
def close(self):
self.conn.close()
| [
"psycopg2.connect",
"time.sleep",
"pyinfraboxutils.get_logger"
] | [((110, 132), 'pyinfraboxutils.get_logger', 'get_logger', (['"""infrabox"""'], {}), "('infrabox')\n", (120, 132), False, 'from pyinfraboxutils import get_logger\n'), ((200, 438), 'psycopg2.connect', 'psycopg2.connect', ([], {'dbname': "os.environ['INFRABOX_DATABASE_DB']", 'user': "os.environ['INFRABOX_DATABASE_USER']", 'password': "os.environ['<PASSWORD>']", 'host': "os.environ['INFRABOX_DATABASE_HOST']", 'port': "os.environ['INFRABOX_DATABASE_PORT']"}), "(dbname=os.environ['INFRABOX_DATABASE_DB'], user=os.environ\n ['INFRABOX_DATABASE_USER'], password=os.environ['<PASSWORD>'], host=os.\n environ['INFRABOX_DATABASE_HOST'], port=os.environ[\n 'INFRABOX_DATABASE_PORT'])\n", (216, 438), False, 'import psycopg2\n'), ((693, 706), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (703, 706), False, 'import time\n')] |
import argparse
import multiprocessing
import os
import pickle
import subprocess
import sys
from random import randint
from time import sleep
import georasters as gr
import numpy as np
from osgeo import gdal, osr
def save_img(data, geotransform, proj, outPath, noDataValue=np.nan, split=False):
# Start the gdal driver for GeoTIFF
if outPath == "MEM":
driver = gdal.GetDriverByName("MEM")
else:
driver = gdal.GetDriverByName("GTiff")
paths = []
shape = data.shape
if len(shape) > 2:
if split:
for i in range(shape[2]):
path = os.path.join(
os.path.dirname(outPath),
str(i + 1) + "_" + os.path.basename(outPath),
)
ds = driver.Create(
path,
shape[1],
shape[0],
1,
gdal.GDT_Float32,
["COMPRESS=LZW", "NUM_THREADS=ALL_CPUS", "BIGTIFF=YES"],
)
ds.SetProjection(proj)
ds.SetGeoTransform(geotransform)
ds.GetRasterBand(1).WriteArray(data[:, :, i])
ds.GetRasterBand(1).SetNoDataValue(noDataValue)
ds.FlushCache()
ds = None
paths.append(path)
else:
ds = driver.Create(
outPath,
shape[1],
shape[0],
shape[2],
gdal.GDT_Float32,
["COMPRESS=LZW", "NUM_THREADS=ALL_CPUS", "BIGTIFF=YES"],
)
ds.SetProjection(proj)
ds.SetGeoTransform(geotransform)
for i in range(shape[2]):
ds.GetRasterBand(i + 1).WriteArray(data[:, :, i])
ds.GetRasterBand(i + 1).SetNoDataValue(noDataValue)
ds.FlushCache()
ds = None
paths.append(outPath)
else:
ds = driver.Create(
outPath,
shape[1],
shape[0],
1,
gdal.GDT_Float32,
["COMPRESS=LZW", "NUM_THREADS=ALL_CPUS", "BIGTIFF=YES"],
)
ds.SetProjection(proj)
ds.SetGeoTransform(geotransform)
ds.GetRasterBand(1).WriteArray(data)
ds.GetRasterBand(1).SetNoDataValue(noDataValue)
ds.FlushCache()
ds = None
paths.append(outPath)
return paths
def get_raster_info(raster_path):
ret = gdal.Info(raster_path, options="-json")
num_bands = len(ret["bands"])
prj = ret["coordinateSystem"]["wkt"]
geoinformation = ret["geoTransform"]
try:
nodata = ret["bands"][0]["noDataValue"]
except KeyError:
nodata = -9999
return prj, geoinformation, nodata, num_bands
def apply_band_to_filepath(path, band_num):
band_path = os.path.join(
os.path.dirname(path), str(band_num) + "_" + os.path.basename(path)
)
return band_path
def get_band_from_filepath(path):
num = os.path.basename(path).split("_")[0]
return num
def get_band_raster(input_raster, split_file, current_band):
sleep(randint(1, 20))
split_outPath = apply_band_to_filepath(split_file, current_band)
subprocess.run(
[
"gdal_translate",
"-b",
str(current_band),
"-co",
"COMPRESS=LZW",
"-co",
"NUM_THREADS=ALL_CPUS",
input_raster,
split_outPath,
]
)
print("band {} is split".format(current_band))
return split_outPath
def warp_raster_func(warped_file, split_file_path, kwargs, dest_prj):
sleep(randint(1, 20))
srs = osr.SpatialReference(wkt=dest_prj)
band_n = get_band_from_filepath(split_file_path)
warp_outPath = apply_band_to_filepath(warped_file, band_n)
kwargs["dstSRS"] = srs
gdal.Warp(warp_outPath, split_file_path, **kwargs)
print("band {} is warped".format(band_n))
return warp_outPath
def align_raster_func(path, alignraster, pickle_filename):
sleep(randint(1, 20))
band = get_band_from_filepath(path)
out_filename = apply_band_to_filepath(pickle_filename, band)
(alignedraster_o, alignedraster_a, GeoT_a) = gr.align_rasters(
path, alignraster, how=np.mean
)
array = np.array(alignedraster_o)
with open(out_filename, "wb") as f:
pickle.dump([array, path, GeoT_a], f)
print("band {} is aligned".format(out_filename))
return out_filename
def save_raster_func(
alignraster_prj, dst_filename, alignraster_nodata, pickle_filename
):
sleep(randint(1, 20))
with open(pickle_filename, "rb") as f:
line = pickle.load(f)
n = get_band_from_filepath(pickle_filename)
dest_name = apply_band_to_filepath(dst_filename, n)
save_img(
line[0],
list(line[2]),
alignraster_prj,
dest_name,
noDataValue=alignraster_nodata,
split=False,
)
print("band {} is saved".format(dest_name))
return dest_name
def align_raster_full(alignraster, input_raster, dst_filename, temp_dir):
# NOTE: rasterband is kept track of with integers before an underscore in the file names
# this is used for split and warp, it is not used for align or save because of ram limitations
cpu = multiprocessing.cpu_count() - 1
# get raster data
(
alignraster_prj,
alignraster_geoinformation,
alignraster_nodata,
alignraster_num_bands,
) = get_raster_info(alignraster)
(
input_raster_prj,
input_raster_geoinformation,
input_raster_nodata,
input_raster_num_bands,
) = get_raster_info(input_raster)
xRes = alignraster_geoinformation[1]
yRes = alignraster_geoinformation[5]
# split input raster into bands
split_file = os.path.join(temp_dir, "split_raster.tif")
input = [(input_raster, split_file, n + 1) for n in range(input_raster_num_bands)]
with multiprocessing.Pool(processes=cpu) as p:
split_file_paths = p.starmap_async(get_band_raster, input).get()
p.close()
p.join()
print("input file is split")
print(split_file_paths)
sleep(randint(1, 20))
# warp each split file
kwargs = {
"format": "GTiff",
"xRes": xRes,
"yRes": yRes,
"resampleAlg": "lanczos",
"srcNodata": input_raster_nodata,
"dstNodata": alignraster_nodata,
"creationOptions": ["COMPRESS=LZW", "NUM_THREADS=ALL_CPUS"],
}
warped_file = os.path.join(temp_dir, "warped_raster.tif")
input = [
(warped_file, split_file_path, kwargs, alignraster_prj)
for split_file_path in split_file_paths
]
with multiprocessing.Pool(processes=cpu) as p:
warped_paths = p.starmap_async(warp_raster_func, input).get()
p.close()
p.join()
print("gdal warp has finished")
print(warped_paths)
sleep(randint(1, 20))
# align each warped file
aligned_file = os.path.join(temp_dir, "aligned_pickle.pkl")
input = [(warped_path, alignraster, aligned_file) for warped_path in warped_paths]
with multiprocessing.Pool(processes=3) as p:
pickle_paths = p.starmap_async(align_raster_func, input).get()
p.close()
p.join()
print("align raster has finished")
print(pickle_paths)
sleep(randint(1, 20))
# save the pkl to raster
input = [
(alignraster_prj, dst_filename, alignraster_nodata, pickle_filename)
for pickle_filename in pickle_paths
]
with multiprocessing.Pool(processes=4) as p:
result = p.starmap_async(save_raster_func, input).get()
p.close()
p.join()
print("saving rasters has finished")
print(result)
def get_parser():
desc = "make the transforms for the icp json in a directory"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--dst_filename", "-d", help="Path to save band rasters")
parser.add_argument("--raster", "-r", help="Path to input raster")
parser.add_argument("--alignraster", "-a", help="path to raster to align to")
parser.add_argument("--temp_dir", "-t", help="directory to use for temp files")
return parser
def main(rawargs):
"""
align raster to another raster
Parameters
----------
dst_filename: str
Path to save band rasters
raster: str
Path to input raster
alignraster: str
path to raster to align to
temp_dir: str
directory to use for temp files
Returns
-------
None
Examples
--------
python align_raster.py \
-d "/media/desktop-linux/my_book/testdata/2013_sat.tif" \
-r "/home/desktop-linux/2013_imagery/clipped_data/clipped_2013_sat.tif" \
-a "/home/desktop-linux/2013_imagery/clipped_data/clipped_2013_nlcd.tif" \
-t "/media/desktop-linux/my_book/testdata/working"
"""
args = get_parser().parse_args(rawargs)
dst_filename = args.dst_filename
raster = args.raster
if not os.path.exists(raster):
print("Unable to find path to raster: %s" % args.raster)
sys.exit(1)
alignraster = args.alignraster
if not os.path.exists(alignraster):
print("Unable to find path to alignraster: %s" % args.alignraster)
sys.exit(1)
temp_dir = args.temp_dir
if not os.path.exists(temp_dir):
print("Unable to find path to temp_dir: %s" % args.temp_dir)
sys.exit(1)
align_raster_full(alignraster, raster, dst_filename, temp_dir)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"os.path.exists",
"pickle.dump",
"argparse.ArgumentParser",
"osgeo.gdal.Warp",
"osgeo.osr.SpatialReference",
"os.path.join",
"pickle.load",
"multiprocessing.cpu_count",
"numpy.array",
"os.path.dirname",
"osgeo.gdal.Info",
"multiprocessing.Pool",
"os.path.basename",
"sys.exit",
"osgeo.gda... | [((2477, 2516), 'osgeo.gdal.Info', 'gdal.Info', (['raster_path'], {'options': '"""-json"""'}), "(raster_path, options='-json')\n", (2486, 2516), False, 'from osgeo import gdal, osr\n'), ((3684, 3718), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {'wkt': 'dest_prj'}), '(wkt=dest_prj)\n', (3704, 3718), False, 'from osgeo import gdal, osr\n'), ((3866, 3916), 'osgeo.gdal.Warp', 'gdal.Warp', (['warp_outPath', 'split_file_path'], {}), '(warp_outPath, split_file_path, **kwargs)\n', (3875, 3916), False, 'from osgeo import gdal, osr\n'), ((4228, 4276), 'georasters.align_rasters', 'gr.align_rasters', (['path', 'alignraster'], {'how': 'np.mean'}), '(path, alignraster, how=np.mean)\n', (4244, 4276), True, 'import georasters as gr\n'), ((4303, 4328), 'numpy.array', 'np.array', (['alignedraster_o'], {}), '(alignedraster_o)\n', (4311, 4328), True, 'import numpy as np\n'), ((5828, 5870), 'os.path.join', 'os.path.join', (['temp_dir', '"""split_raster.tif"""'], {}), "(temp_dir, 'split_raster.tif')\n", (5840, 5870), False, 'import os\n'), ((6529, 6572), 'os.path.join', 'os.path.join', (['temp_dir', '"""warped_raster.tif"""'], {}), "(temp_dir, 'warped_raster.tif')\n", (6541, 6572), False, 'import os\n'), ((6997, 7041), 'os.path.join', 'os.path.join', (['temp_dir', '"""aligned_pickle.pkl"""'], {}), "(temp_dir, 'aligned_pickle.pkl')\n", (7009, 7041), False, 'import os\n'), ((7850, 7891), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (7873, 7891), False, 'import argparse\n'), ((380, 407), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""MEM"""'], {}), "('MEM')\n", (400, 407), False, 'from osgeo import gdal, osr\n'), ((435, 464), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (455, 464), False, 'from osgeo import gdal, osr\n'), ((2869, 2890), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2884, 2890), False, 'import os\n'), ((3135, 3149), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (3142, 3149), False, 'from random import randint\n'), ((3658, 3672), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (3665, 3672), False, 'from random import randint\n'), ((4058, 4072), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (4065, 4072), False, 'from random import randint\n'), ((4377, 4414), 'pickle.dump', 'pickle.dump', (['[array, path, GeoT_a]', 'f'], {}), '([array, path, GeoT_a], f)\n', (4388, 4414), False, 'import pickle\n'), ((4600, 4614), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (4607, 4614), False, 'from random import randint\n'), ((4674, 4688), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4685, 4688), False, 'import pickle\n'), ((5306, 5333), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5331, 5333), False, 'import multiprocessing\n'), ((5967, 6002), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'cpu'}), '(processes=cpu)\n', (5987, 6002), False, 'import multiprocessing\n'), ((6189, 6203), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (6196, 6203), False, 'from random import randint\n'), ((6714, 6749), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'cpu'}), '(processes=cpu)\n', (6734, 6749), False, 'import multiprocessing\n'), ((6932, 6946), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (6939, 6946), False, 'from random import randint\n'), ((7138, 7171), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(3)'}), '(processes=3)\n', (7158, 7171), False, 'import multiprocessing\n'), ((7358, 7372), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (7365, 7372), False, 'from random import randint\n'), ((7554, 7587), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (7574, 7587), False, 'import multiprocessing\n'), ((9030, 9052), 'os.path.exists', 'os.path.exists', (['raster'], {}), '(raster)\n', (9044, 9052), False, 'import os\n'), ((9127, 9138), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9135, 9138), False, 'import sys\n'), ((9186, 9213), 'os.path.exists', 'os.path.exists', (['alignraster'], {}), '(alignraster)\n', (9200, 9213), False, 'import os\n'), ((9298, 9309), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9306, 9309), False, 'import sys\n'), ((9351, 9375), 'os.path.exists', 'os.path.exists', (['temp_dir'], {}), '(temp_dir)\n', (9365, 9375), False, 'import os\n'), ((9454, 9465), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9462, 9465), False, 'import sys\n'), ((2914, 2936), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2930, 2936), False, 'import os\n'), ((3010, 3032), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3026, 3032), False, 'import os\n'), ((640, 664), 'os.path.dirname', 'os.path.dirname', (['outPath'], {}), '(outPath)\n', (655, 664), False, 'import os\n'), ((705, 730), 'os.path.basename', 'os.path.basename', (['outPath'], {}), '(outPath)\n', (721, 730), False, 'import os\n')] |
#==============================================================================
# ConfigManger_test.py
#==============================================================================
from pymtl import *
from pclib.test import TestVectorSimulator
from onehot import Mux, Demux
#------------------------------------------------------------------------------
# test_Mux
#------------------------------------------------------------------------------
def test_Mux( dump_vcd, test_verilog ):
nports = 2
data_nbits = 16
# Define test input and output functions
def tv_in( model, test_vector ):
model.sel .value = test_vector[0]
model.in_[0].value = test_vector[1]
model.in_[1].value = test_vector[2]
def tv_out( model, test_vector ):
assert model.out == test_vector[3]
# Select and elaborate the model under test
model = Mux( nports, dtype = data_nbits )
model.vcd_file = dump_vcd
if test_verilog:
model = TranslationTool( model )
model.elaborate()
# Define the test vectors
test_vectors = [
# sel in[0] in[1] out
[ 0b00, 0x1111, 0x2222, 0x0000 ],
[ 0b01, 0x1111, 0x2222, 0x1111 ],
[ 0b10, 0x1111, 0x2222, 0x2222 ],
[ 0b00, 0x1111, 0x2222, 0x0000 ],
]
# Create the simulator and configure it
sim = TestVectorSimulator( model, test_vectors, tv_in, tv_out )
# Run the simulator
sim.run_test()
#------------------------------------------------------------------------------
# test_Demux
#------------------------------------------------------------------------------
def test_Demux( dump_vcd, test_verilog ):
nports = 2
data_nbits = 16
# Define test input and output functions
def tv_in( model, test_vector ):
model.sel.value = test_vector[0]
model.in_.value = test_vector[1]
def tv_out( model, test_vector ):
assert model.out[0] == test_vector[2]
assert model.out[1] == test_vector[3]
# Select and elaborate the model under test
model = Demux( nports, dtype = data_nbits )
model.vcd_file = dump_vcd
if test_verilog:
model = TranslationTool( model )
model.elaborate()
# Define the test vectors
test_vectors = [
# sel in_ out[0] out[1]
[ 0b00, 0x3333, 0x0000, 0x0000 ],
[ 0b01, 0x1111, 0x1111, 0x0000 ],
[ 0b10, 0x2222, 0x0000, 0x2222 ],
[ 0b00, 0x1111, 0x0000, 0x0000 ],
]
# Create the simulator and configure it
sim = TestVectorSimulator( model, test_vectors, tv_in, tv_out )
# Run the simulator
sim.run_test()
| [
"onehot.Mux",
"pclib.test.TestVectorSimulator",
"onehot.Demux"
] | [((868, 897), 'onehot.Mux', 'Mux', (['nports'], {'dtype': 'data_nbits'}), '(nports, dtype=data_nbits)\n', (871, 897), False, 'from onehot import Mux, Demux\n'), ((1292, 1347), 'pclib.test.TestVectorSimulator', 'TestVectorSimulator', (['model', 'test_vectors', 'tv_in', 'tv_out'], {}), '(model, test_vectors, tv_in, tv_out)\n', (1311, 1347), False, 'from pclib.test import TestVectorSimulator\n'), ((1975, 2006), 'onehot.Demux', 'Demux', (['nports'], {'dtype': 'data_nbits'}), '(nports, dtype=data_nbits)\n', (1980, 2006), False, 'from onehot import Mux, Demux\n'), ((2405, 2460), 'pclib.test.TestVectorSimulator', 'TestVectorSimulator', (['model', 'test_vectors', 'tv_in', 'tv_out'], {}), '(model, test_vectors, tv_in, tv_out)\n', (2424, 2460), False, 'from pclib.test import TestVectorSimulator\n')] |
# copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import math
import xml.etree.ElementTree as ET
from PIL import Image
def resize_img(img):
""" 调整图片尺寸
Args:
img: 图片信息
"""
h, w = img.shape[:2]
min_size = 580
if w >= h and w > min_size:
new_w = min_size
new_h = new_w * h / w
elif h >= w and h > min_size:
new_h = min_size
new_w = new_h * w / h
else:
new_h = h
new_w = w
new_img = cv2.resize(
img, (int(new_w), int(new_h)), interpolation=cv2.INTER_CUBIC)
scale_value = new_w / w
return new_img, scale_value
def plot_det_label(image, anno, labels):
""" 目标检测类型生成标注图
Args:
image: 图片路径
anno: 图片标注
labels: 图片所属数据集的类别信息
"""
catid2color = {}
img = cv2.imread(image)
img, scale_value = resize_img(img)
tree = ET.parse(anno)
objs = tree.findall('object')
color_map = get_color_map_list(len(labels) + 1)
for i, obj in enumerate(objs):
cname = obj.find('name').text
catid = labels.index(cname)
if cname not in labels:
continue
xmin = int(float(obj.find('bndbox').find('xmin').text) * scale_value)
ymin = int(float(obj.find('bndbox').find('ymin').text) * scale_value)
xmax = int(float(obj.find('bndbox').find('xmax').text) * scale_value)
ymax = int(float(obj.find('bndbox').find('ymax').text) * scale_value)
if catid not in catid2color:
catid2color[catid] = color_map[catid + 1]
color = tuple(catid2color[catid])
img = draw_rectangle_and_cname(img, xmin, ymin, xmax, ymax, cname,
color)
return img
def plot_seg_label(anno):
""" 语义分割类型生成标注图
Args:
anno: 图片标注
"""
label = pil_imread(anno)
pse_label = gray2pseudo(label)
return pse_label
def plot_insseg_label(image, anno, labels, alpha=0.7):
""" 实例分割类型生成标注图
Args:
image: 图片路径
anno: 图片标注
labels: 图片所属数据集的类别信息
"""
anno = np.load(anno, allow_pickle=True).tolist()
catid2color = dict()
img = cv2.imread(image)
img, scale_value = resize_img(img)
color_map = get_color_map_list(len(labels) + 1)
img_h = anno['h']
img_w = anno['w']
gt_class = anno['gt_class']
gt_bbox = anno['gt_bbox']
gt_poly = anno['gt_poly']
num_bbox = gt_bbox.shape[0]
num_mask = len(gt_poly)
# 描绘mask信息
img_array = np.array(img).astype('float32')
for i in range(num_mask):
cname = gt_class[i]
catid = labels.index(cname)
if cname not in labels:
continue
if catid not in catid2color:
catid2color[catid] = color_map[catid + 1]
color = np.array(catid2color[catid]).astype('float32')
import pycocotools.mask as mask_util
for x in range(len(gt_poly[i])):
for y in range(len(gt_poly[i][x])):
gt_poly[i][x][y] = int(float(gt_poly[i][x][y]) * scale_value)
poly = gt_poly[i]
rles = mask_util.frPyObjects(poly,
int(float(img_h) * scale_value),
int(float(img_w) * scale_value))
rle = mask_util.merge(rles)
mask = mask_util.decode(rle) * 255
idx = np.nonzero(mask)
img_array[idx[0], idx[1], :] *= 1.0 - alpha
img_array[idx[0], idx[1], :] += alpha * color
img = img_array.astype('uint8')
for i in range(num_bbox):
cname = gt_class[i]
catid = labels.index(cname)
if cname not in labels:
continue
if catid not in catid2color:
catid2color[catid] = color_map[catid]
color = tuple(catid2color[catid])
xmin, ymin, xmax, ymax = gt_bbox[i]
img = draw_rectangle_and_cname(img,
int(float(xmin) * scale_value),
int(float(ymin) * scale_value),
int(float(xmax) * scale_value),
int(float(ymax) * scale_value), cname,
color)
return img
def draw_rectangle_and_cname(img, xmin, ymin, xmax, ymax, cname, color):
""" 根据提供的标注信息,给图片描绘框体和类别显示
Args:
img: 图片路径
xmin: 检测框最小的x坐标
ymin: 检测框最小的y坐标
xmax: 检测框最大的x坐标
ymax: 检测框最大的y坐标
cname: 类别信息
color: 类别与颜色的对应信息
"""
# 描绘检测框
line_width = math.ceil(2 * max(img.shape[0:2]) / 600)
cv2.rectangle(
img,
pt1=(xmin, ymin),
pt2=(xmax, ymax),
color=color,
thickness=line_width)
# 计算并描绘类别信息
text_thickness = math.ceil(2 * max(img.shape[0:2]) / 1200)
fontscale = math.ceil(0.5 * max(img.shape[0:2]) / 600)
tw, th = cv2.getTextSize(
cname, 0, fontScale=fontscale, thickness=text_thickness)[0]
cv2.rectangle(
img,
pt1=(xmin + 1, ymin - th),
pt2=(xmin + int(0.7 * tw) + 1, ymin),
color=color,
thickness=-1)
cv2.putText(
img,
cname, (int(xmin) + 3, int(ymin) - 5),
0,
0.6 * fontscale, (255, 255, 255),
lineType=cv2.LINE_AA,
thickness=text_thickness)
return img
def pil_imread(file_path):
""" 将图片读成np格式数据
Args:
file_path: 图片路径
"""
img = Image.open(file_path)
return np.asarray(img)
def get_color_map_list(num_classes):
""" 为类别信息生成对应的颜色列表
Args:
num_classes: 类别数量
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def gray2pseudo(gray_image):
""" 将分割的结果映射到图片
Args:
gray_image: 灰度图
"""
color_map = get_color_map_list(256)
color_map = np.array(color_map).astype("uint8")
# 用OpenCV进行色彩映射
c1 = cv2.LUT(gray_image, color_map[:, 0])
c2 = cv2.LUT(gray_image, color_map[:, 1])
c3 = cv2.LUT(gray_image, color_map[:, 2])
pseudo_img = np.dstack((c1, c2, c3))
return pseudo_img
| [
"cv2.rectangle",
"numpy.dstack",
"PIL.Image.open",
"xml.etree.ElementTree.parse",
"pycocotools.mask.decode",
"numpy.asarray",
"cv2.LUT",
"numpy.array",
"pycocotools.mask.merge",
"numpy.nonzero",
"cv2.getTextSize",
"numpy.load",
"cv2.imread"
] | [((1435, 1452), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (1445, 1452), False, 'import cv2\n'), ((1503, 1517), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno'], {}), '(anno)\n', (1511, 1517), True, 'import xml.etree.ElementTree as ET\n'), ((2771, 2788), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (2781, 2788), False, 'import cv2\n'), ((5192, 5285), 'cv2.rectangle', 'cv2.rectangle', (['img'], {'pt1': '(xmin, ymin)', 'pt2': '(xmax, ymax)', 'color': 'color', 'thickness': 'line_width'}), '(img, pt1=(xmin, ymin), pt2=(xmax, ymax), color=color,\n thickness=line_width)\n', (5205, 5285), False, 'import cv2\n'), ((6027, 6048), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (6037, 6048), False, 'from PIL import Image\n'), ((6060, 6075), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (6070, 6075), True, 'import numpy as np\n'), ((6853, 6889), 'cv2.LUT', 'cv2.LUT', (['gray_image', 'color_map[:, 0]'], {}), '(gray_image, color_map[:, 0])\n', (6860, 6889), False, 'import cv2\n'), ((6899, 6935), 'cv2.LUT', 'cv2.LUT', (['gray_image', 'color_map[:, 1]'], {}), '(gray_image, color_map[:, 1])\n', (6906, 6935), False, 'import cv2\n'), ((6945, 6981), 'cv2.LUT', 'cv2.LUT', (['gray_image', 'color_map[:, 2]'], {}), '(gray_image, color_map[:, 2])\n', (6952, 6981), False, 'import cv2\n'), ((6999, 7022), 'numpy.dstack', 'np.dstack', (['(c1, c2, c3)'], {}), '((c1, c2, c3))\n', (7008, 7022), True, 'import numpy as np\n'), ((3876, 3897), 'pycocotools.mask.merge', 'mask_util.merge', (['rles'], {}), '(rles)\n', (3891, 3897), True, 'import pycocotools.mask as mask_util\n'), ((3955, 3971), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (3965, 3971), True, 'import numpy as np\n'), ((5475, 5547), 'cv2.getTextSize', 'cv2.getTextSize', (['cname', '(0)'], {'fontScale': 'fontscale', 'thickness': 'text_thickness'}), '(cname, 0, fontScale=fontscale, thickness=text_thickness)\n', (5490, 5547), False, 'import cv2\n'), ((2694, 2726), 'numpy.load', 'np.load', (['anno'], {'allow_pickle': '(True)'}), '(anno, allow_pickle=True)\n', (2701, 2726), True, 'import numpy as np\n'), ((3107, 3120), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3115, 3120), True, 'import numpy as np\n'), ((3913, 3934), 'pycocotools.mask.decode', 'mask_util.decode', (['rle'], {}), '(rle)\n', (3929, 3934), True, 'import pycocotools.mask as mask_util\n'), ((6788, 6807), 'numpy.array', 'np.array', (['color_map'], {}), '(color_map)\n', (6796, 6807), True, 'import numpy as np\n'), ((3393, 3421), 'numpy.array', 'np.array', (['catid2color[catid]'], {}), '(catid2color[catid])\n', (3401, 3421), True, 'import numpy as np\n')] |
"""
@author <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(PROJECT_ROOT)
import numpy as np
import tensorflow as tf # TF 1.x
class MatrixEstimator(object):
""" TensorFlow 1.x implementation of estimator proposed in:
<NAME>., <NAME>., & <NAME>. (2015). Measures of entropy
from data using infinitely divisible kernels. IEEE Transactions on
Information Theory, 61(1), 535-548.
with modifications made by the authors to attenuate scale and dimension
related artifacts in the gaussian kernel.
The originally proposed kernel width formula is:
sigma = gamma * n ^ (-1 / (4+d))
with gamma being some empirical constant.
If normalize_scale is set to True, then the variable is first normalized
to zero mean and unit variance, as:
x -> (x - mean) / sqrt(var + epsilon)
which is equivalent to add the standard deviation as a multiplicative
dependence in sigma, as done in the classical Silverman rule. This is done
to achieve invariance to changes of scale during the mutual information
estimation process. Epsilon is a small number to avoid division by zero.
If normalize_dimension is set to True, then sigma is computed as:
sigma = gamma * sqrt(d) * n ^ (-1 / (4+d))
This is done to center the distribution of pair-wise distances to the same
mean across variables with different dimensions, and as a consequence
to attenuate dimension related artifacts.
Note that normalize_scale=False and normalize_dimension=False will give you
the original version of the estimator.
The estimator with those modifications was used in:
<NAME>. & <NAME>., "On the Information Plane of Autoencoders,"
in 2020 International Joint Conference on Neural Networks (IJCNN).
Full text available at: https://arxiv.org/abs/2005.07783
If you find this software useful, please consider citing our work.
"""
def __init__(
self,
gamma=1.0,
alpha=1.01,
epsilon=1e-8,
normalize_scale=True,
normalize_dimension=True,
log_base=2,
use_memory_efficient_gram=False,
):
self.gamma = gamma
self.alpha = alpha
self.epsilon = epsilon
self.normalize_scale = normalize_scale
self.normalize_dimension = normalize_dimension
self.log_base = log_base
self.use_memory_efficient_gram = use_memory_efficient_gram
def _compute_sigma(self, x):
x_dims = tf.shape(x)
n = tf.cast(x_dims[0], tf.float32)
d = tf.cast(x_dims[1], tf.float32)
sigma = self.gamma * n ** (-1 / (4 + d))
if self.normalize_dimension:
sigma = sigma * tf.sqrt(d)
return sigma
def _normalize_variable(self, x, x_is_image):
if x_is_image:
mean_x = tf.reduce_mean(x)
var_x = tf.reduce_mean(tf.square(x - mean_x))
else:
mean_x, var_x = tf.nn.moments(x, [0])
std_x = tf.sqrt(var_x + self.epsilon)
x = (x - mean_x) / std_x
return x
def normalized_gram(self, x, sigma_x=None, x_is_image=False):
"""If sigma_x is provided, then that value will be used. Otherwise,
it will be automatically computed using the formula.
If x_is_image is True, then the normalization of scale (if applicable)
is done aggregating all dimensions. If false, each dimension is
normalized independently.
"""
if sigma_x is None:
sigma_x = self._compute_sigma(x)
if self.normalize_scale:
x = self._normalize_variable(x, x_is_image)
# Compute pairwise distances (distance matrix)
if self.use_memory_efficient_gram:
# This option stores a smaller tensor in memory, which might be more convenient for you
# when the dimensionality of the variable is too large, at the cost of introducing some
# rounding errors due to the intermediate steps
# (although I expect them to be insignificant in most cases),
# because it performs
# (N, Dim) matmul (Dim, N) = (N, N)
# thanks to an equivalent formulation of the pairwise distances
pairwise_dot = tf.matmul(x, tf.transpose(x)) # (N, N) = (N, Dim) matmul (Dim, N)
norms = tf.diag_part(pairwise_dot) # (N,)
norms = tf.reshape(norms, [-1, 1]) # (N, 1)
pairwise_distance = norms - 2 * pairwise_dot + tf.transpose(norms) # (N, N) = (N, 1) - (N, N) + (1, N)
# Avoids small negatives due to possible rounding errors
pairwise_distance = tf.nn.relu(pairwise_distance) # (N, N)
else:
# This option is more robust to rounding errors at the cost of storing a larger tensor
# in memory, because it performs
# (N, 1, Dim) - (1, N, Dim) = (N, N, Dim)
# which is the straightforward difference matrix that is then squared and reduced to (N, N)
pairwise_difference = x[:, tf.newaxis, :] - x[tf.newaxis, :, :] # (N, N, Dim) = (N, 1, Dim) - (1, N, Dim)
pairwise_squared_difference = tf.square(pairwise_difference) # (N, N, Dim)
pairwise_distance = tf.reduce_sum(pairwise_squared_difference, axis=2) # (N, N)
# We don't bother with the normalization constant of the gaussian kernel
# since it is canceled out during normalization of the Gram matrix
den = 2 * (sigma_x ** 2)
gram = tf.exp(-pairwise_distance / den)
# Normalize gram
x_dims = tf.shape(x)
n = tf.cast(x_dims[0], tf.float32)
norm_gram = gram / n
return norm_gram
def entropy(
self, x, sigma_x=None, x_is_image=False):
"""See 'normalized_gram' doc."""
norm_gram = self.normalized_gram(x, sigma_x, x_is_image)
entropy = self.entropy_with_gram(norm_gram)
return entropy
def joint_entropy(
self, x, y, sigma_x=None, sigma_y=None,
x_is_image=False, y_is_image=False):
"""See 'normalized_gram' doc."""
norm_gram_a = self.normalized_gram(x, sigma_x, x_is_image)
norm_gram_b = self.normalized_gram(y, sigma_y, y_is_image)
joint_entropy = self.joint_entropy_with_gram(norm_gram_a, norm_gram_b)
return joint_entropy
def mutual_information(
self, x, y, sigma_x=None, sigma_y=None,
x_is_image=False, y_is_image=False):
"""See 'normalized_gram' doc."""
norm_gram_a = self.normalized_gram(x, sigma_x, x_is_image)
norm_gram_b = self.normalized_gram(y, sigma_y, y_is_image)
mi_xy = self.mutual_information_with_gram(norm_gram_a, norm_gram_b)
return mi_xy
def entropy_with_gram(self, norm_gram):
with tf.device('/cpu:0'):
eigvals, _ = tf.self_adjoint_eig(norm_gram)
# Fix possible numerical instabilities:
# Remove small negatives
eigvals = tf.nn.relu(eigvals)
# Ensure eigenvalues sum 1
eigvals = eigvals / tf.reduce_sum(eigvals)
# Compute entropy in the specified base
sum_term = tf.reduce_sum(eigvals ** self.alpha)
entropy = tf.log(sum_term) / (1.0 - self.alpha)
entropy = entropy / np.log(self.log_base)
return entropy
def joint_entropy_with_gram(self, norm_gram_a, norm_gram_b):
n = tf.cast(tf.shape(norm_gram_a)[0], tf.float32)
norm_gram = n * tf.multiply(norm_gram_a, norm_gram_b)
joint_entropy = self.entropy_with_gram(norm_gram)
return joint_entropy
def mutual_information_with_gram(self, norm_gram_a, norm_gram_b):
h_x = self.entropy_with_gram(norm_gram_a)
h_y = self.entropy_with_gram(norm_gram_b)
h_xy = self.joint_entropy_with_gram(norm_gram_a, norm_gram_b)
mi_xy = h_x + h_y - h_xy
return mi_xy
| [
"tensorflow.shape",
"tensorflow.self_adjoint_eig",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.nn.moments",
"numpy.log",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.cast",
"sys.path.append",
"tensorflow.log",
"tensorflow.square",
"tensorflow.device",
"os.p... | [((233, 262), 'sys.path.append', 'sys.path.append', (['PROJECT_ROOT'], {}), '(PROJECT_ROOT)\n', (248, 262), False, 'import sys\n'), ((199, 224), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (214, 224), False, 'import os\n'), ((2690, 2701), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2698, 2701), True, 'import tensorflow as tf\n'), ((2714, 2744), 'tensorflow.cast', 'tf.cast', (['x_dims[0]', 'tf.float32'], {}), '(x_dims[0], tf.float32)\n', (2721, 2744), True, 'import tensorflow as tf\n'), ((2757, 2787), 'tensorflow.cast', 'tf.cast', (['x_dims[1]', 'tf.float32'], {}), '(x_dims[1], tf.float32)\n', (2764, 2787), True, 'import tensorflow as tf\n'), ((3185, 3214), 'tensorflow.sqrt', 'tf.sqrt', (['(var_x + self.epsilon)'], {}), '(var_x + self.epsilon)\n', (3192, 3214), True, 'import tensorflow as tf\n'), ((5703, 5735), 'tensorflow.exp', 'tf.exp', (['(-pairwise_distance / den)'], {}), '(-pairwise_distance / den)\n', (5709, 5735), True, 'import tensorflow as tf\n'), ((5778, 5789), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (5786, 5789), True, 'import tensorflow as tf\n'), ((5802, 5832), 'tensorflow.cast', 'tf.cast', (['x_dims[0]', 'tf.float32'], {}), '(x_dims[0], tf.float32)\n', (5809, 5832), True, 'import tensorflow as tf\n'), ((3029, 3046), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {}), '(x)\n', (3043, 3046), True, 'import tensorflow as tf\n'), ((3147, 3168), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0]'], {}), '(x, [0])\n', (3160, 3168), True, 'import tensorflow as tf\n'), ((4533, 4559), 'tensorflow.diag_part', 'tf.diag_part', (['pairwise_dot'], {}), '(pairwise_dot)\n', (4545, 4559), True, 'import tensorflow as tf\n'), ((4588, 4614), 'tensorflow.reshape', 'tf.reshape', (['norms', '[-1, 1]'], {}), '(norms, [-1, 1])\n', (4598, 4614), True, 'import tensorflow as tf\n'), ((4842, 4871), 'tensorflow.nn.relu', 'tf.nn.relu', (['pairwise_distance'], {}), '(pairwise_distance)\n', (4852, 4871), True, 'import tensorflow as tf\n'), ((5359, 5389), 'tensorflow.square', 'tf.square', (['pairwise_difference'], {}), '(pairwise_difference)\n', (5368, 5389), True, 'import tensorflow as tf\n'), ((5437, 5487), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['pairwise_squared_difference'], {'axis': '(2)'}), '(pairwise_squared_difference, axis=2)\n', (5450, 5487), True, 'import tensorflow as tf\n'), ((7008, 7027), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7017, 7027), True, 'import tensorflow as tf\n'), ((7054, 7084), 'tensorflow.self_adjoint_eig', 'tf.self_adjoint_eig', (['norm_gram'], {}), '(norm_gram)\n', (7073, 7084), True, 'import tensorflow as tf\n'), ((7196, 7215), 'tensorflow.nn.relu', 'tf.nn.relu', (['eigvals'], {}), '(eigvals)\n', (7206, 7215), True, 'import tensorflow as tf\n'), ((7385, 7421), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(eigvals ** self.alpha)'], {}), '(eigvals ** self.alpha)\n', (7398, 7421), True, 'import tensorflow as tf\n'), ((7707, 7744), 'tensorflow.multiply', 'tf.multiply', (['norm_gram_a', 'norm_gram_b'], {}), '(norm_gram_a, norm_gram_b)\n', (7718, 7744), True, 'import tensorflow as tf\n'), ((2902, 2912), 'tensorflow.sqrt', 'tf.sqrt', (['d'], {}), '(d)\n', (2909, 2912), True, 'import tensorflow as tf\n'), ((3082, 3103), 'tensorflow.square', 'tf.square', (['(x - mean_x)'], {}), '(x - mean_x)\n', (3091, 3103), True, 'import tensorflow as tf\n'), ((4459, 4474), 'tensorflow.transpose', 'tf.transpose', (['x'], {}), '(x)\n', (4471, 4474), True, 'import tensorflow as tf\n'), ((4684, 4703), 'tensorflow.transpose', 'tf.transpose', (['norms'], {}), '(norms)\n', (4696, 4703), True, 'import tensorflow as tf\n'), ((7287, 7309), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['eigvals'], {}), '(eigvals)\n', (7300, 7309), True, 'import tensorflow as tf\n'), ((7444, 7460), 'tensorflow.log', 'tf.log', (['sum_term'], {}), '(sum_term)\n', (7450, 7460), True, 'import tensorflow as tf\n'), ((7514, 7535), 'numpy.log', 'np.log', (['self.log_base'], {}), '(self.log_base)\n', (7520, 7535), True, 'import numpy as np\n'), ((7645, 7666), 'tensorflow.shape', 'tf.shape', (['norm_gram_a'], {}), '(norm_gram_a)\n', (7653, 7666), True, 'import tensorflow as tf\n')] |
import pandas as pd
cities = pd.read_csv("../Resources/cities.csv")
cities.set_index('City_ID', inplace=True)
cities.to_html("../Resources/cities.html") | [
"pandas.read_csv"
] | [((29, 67), 'pandas.read_csv', 'pd.read_csv', (['"""../Resources/cities.csv"""'], {}), "('../Resources/cities.csv')\n", (40, 67), True, 'import pandas as pd\n')] |
import os
import numpy as np
from pwtools.common import is_seq, file_write
from .testenv import testdir
def test_is_seq():
fn = os.path.join(testdir, 'is_seq_test_file')
file_write(fn, 'lala')
fd = open(fn , 'r')
for xx in ([1,2,3], (1,2,3), np.array([1,2,3])):
print(type(xx))
assert is_seq(xx) is True
for xx in ('aaa', fd):
print(type(xx))
assert is_seq(xx) is False
fd.close()
| [
"pwtools.common.is_seq",
"numpy.array",
"os.path.join",
"pwtools.common.file_write"
] | [((133, 174), 'os.path.join', 'os.path.join', (['testdir', '"""is_seq_test_file"""'], {}), "(testdir, 'is_seq_test_file')\n", (145, 174), False, 'import os\n'), ((179, 201), 'pwtools.common.file_write', 'file_write', (['fn', '"""lala"""'], {}), "(fn, 'lala')\n", (189, 201), False, 'from pwtools.common import is_seq, file_write\n'), ((259, 278), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (267, 278), True, 'import numpy as np\n'), ((318, 328), 'pwtools.common.is_seq', 'is_seq', (['xx'], {}), '(xx)\n', (324, 328), False, 'from pwtools.common import is_seq, file_write\n'), ((403, 413), 'pwtools.common.is_seq', 'is_seq', (['xx'], {}), '(xx)\n', (409, 413), False, 'from pwtools.common import is_seq, file_write\n')] |
# Save to HDF because cPickle fails with very large arrays
# https://github.com/numpy/numpy/issues/2396
import h5py
import numpy as np
import tempfile
import unittest
def dict_to_hdf(fname, d):
"""
Save a dict-of-dict datastructure where values are numpy arrays
to a .hdf5 file
"""
with h5py.File(fname, 'w') as f:
def _dict_to_group(root, d):
for key, val in d.iteritems():
if isinstance(val, dict):
grp = root.create_group(key)
_dict_to_group(grp, val)
else:
root.create_dataset(key, data=val)
_dict_to_group(f, d)
def hdf_to_dict(fname):
"""
Loads a dataset saved using dict_to_hdf
"""
with h5py.File(fname, 'r') as f:
def _load_to_dict(root):
d = {}
for key, val in root.iteritems():
if isinstance(val, h5py.Group):
d[key] = _load_to_dict(val)
else:
d[key] = val.value
return d
return _load_to_dict(f)
def load(exp_name, ret_d=False, data_fname='data.hdf5'):
d = hdf_to_dict('../%s' % data_fname)
mosaic = d['mosaic']
id2label = d['id2label']
train_ij = d['experiments'][exp_name]['train_ij']
test_ij = d['experiments'][exp_name]['test_ij']
y_train = d['experiments'][exp_name]['y_train']
y_test = d['experiments'][exp_name]['y_test']
if ret_d:
return mosaic, id2label, train_ij, test_ij, y_train, y_test, d
else:
return mosaic, id2label, train_ij, test_ij, y_train, y_test
# -- Unit tests
class HDFIOTest(unittest.TestCase):
def test_hdfio(self):
d = {
'a' : np.random.rand(5, 3),
'b' : {
'c' : np.random.randn(1, 2),
'd' : {
'e' : np.random.randn(10, 5),
'f' : np.random.randn(10, 5),
}
}
}
with tempfile.NamedTemporaryFile() as f:
dict_to_hdf(f.name, d)
d2 = hdf_to_dict(f.name)
self.assertItemsEqual(d, d2)
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.rand",
"h5py.File",
"tempfile.NamedTemporaryFile",
"unittest.main",
"numpy.random.randn"
] | [((2172, 2187), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2185, 2187), False, 'import unittest\n'), ((308, 329), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (317, 329), False, 'import h5py\n'), ((753, 774), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (762, 774), False, 'import h5py\n'), ((1725, 1745), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)'], {}), '(5, 3)\n', (1739, 1745), True, 'import numpy as np\n'), ((1991, 2020), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2018, 2020), False, 'import tempfile\n'), ((1789, 1810), 'numpy.random.randn', 'np.random.randn', (['(1)', '(2)'], {}), '(1, 2)\n', (1804, 1810), True, 'import numpy as np\n'), ((1862, 1884), 'numpy.random.randn', 'np.random.randn', (['(10)', '(5)'], {}), '(10, 5)\n', (1877, 1884), True, 'import numpy as np\n'), ((1912, 1934), 'numpy.random.randn', 'np.random.randn', (['(10)', '(5)'], {}), '(10, 5)\n', (1927, 1934), True, 'import numpy as np\n')] |
#!/usr/bin/env python
""" pygame.examples.freetype_misc
Miscellaneous (or misc) means:
"consisting of a mixture of various things that are not
usually connected with each other"
Adjective
All those words you read on computers, magazines, books, and such over the years?
Probably a lot of them were constructed with...
The FreeType Project: a free, high-quality and portable Font engine.
https://freetype.org
Next time you're reading something. Think of them.
Herein lies a *BOLD* demo consisting of a mixture of various things.
Not only is it a *BOLD* demo, it's an
italics demo,
a rotated demo,
it's a blend,
and is sized to go nicely with a cup of tea*.
* also goes well with coffee.
Enjoy!
"""
import os
import pygame as pg
import pygame.freetype as freetype
def run():
pg.init()
fontdir = os.path.dirname(os.path.abspath(__file__))
font = freetype.Font(os.path.join(fontdir, "data", "sans.ttf"))
screen = pg.display.set_mode((800, 600))
screen.fill("gray")
font.underline_adjustment = 0.5
font.pad = True
font.render_to(
screen,
(32, 32),
"Hello World",
"red3",
"dimgray",
size=64,
style=freetype.STYLE_UNDERLINE | freetype.STYLE_OBLIQUE,
)
font.pad = False
font.render_to(
screen,
(32, 128),
"abcdefghijklm",
"dimgray",
"green3",
size=64,
)
font.vertical = True
font.render_to(screen, (32, 200), "Vertical?", "blue3", None, size=32)
font.vertical = False
font.render_to(screen, (64, 190), "Let's spin!", "red3", None, size=48, rotation=55)
font.render_to(
screen, (160, 290), "All around!", "green3", None, size=48, rotation=-55
)
font.render_to(screen, (250, 220), "and BLEND", (255, 0, 0, 128), None, size=64)
font.render_to(screen, (265, 237), "or BLAND!", (0, 0xCC, 28, 128), None, size=64)
# Some pinwheels
font.origin = True
for angle in range(0, 360, 45):
font.render_to(screen, (150, 420), ")", "black", size=48, rotation=angle)
font.vertical = True
for angle in range(15, 375, 30):
font.render_to(screen, (600, 400), "|^*", "orange", size=48, rotation=angle)
font.vertical = False
font.origin = False
utext = "I \u2665 Unicode"
font.render_to(screen, (298, 320), utext, (0, 0xCC, 0xDD), None, size=64)
utext = "\u2665"
font.render_to(screen, (480, 32), utext, "gray", "red3", size=148)
font.render_to(
screen,
(380, 380),
"...yes, this is an SDL surface",
"black",
None,
size=24,
style=freetype.STYLE_STRONG,
)
font.origin = True
r = font.render_to(
screen,
(100, 530),
"stretch",
"red3",
None,
size=(24, 24),
style=freetype.STYLE_NORMAL,
)
font.render_to(
screen,
(100 + r.width, 530),
" VERTICAL",
"red3",
None,
size=(24, 48),
style=freetype.STYLE_NORMAL,
)
r = font.render_to(
screen,
(100, 580),
"stretch",
"blue3",
None,
size=(24, 24),
style=freetype.STYLE_NORMAL,
)
font.render_to(
screen,
(100 + r.width, 580),
" HORIZONTAL",
"blue3",
None,
size=(48, 24),
style=freetype.STYLE_NORMAL,
)
pg.display.flip()
while 1:
if pg.event.wait().type in (pg.QUIT, pg.KEYDOWN, pg.MOUSEBUTTONDOWN):
break
pg.quit()
if __name__ == "__main__":
run()
| [
"pygame.init",
"pygame.quit",
"pygame.display.set_mode",
"pygame.display.flip",
"os.path.join",
"pygame.event.wait",
"os.path.abspath"
] | [((847, 856), 'pygame.init', 'pg.init', ([], {}), '()\n', (854, 856), True, 'import pygame as pg\n'), ((997, 1028), 'pygame.display.set_mode', 'pg.display.set_mode', (['(800, 600)'], {}), '((800, 600))\n', (1016, 1028), True, 'import pygame as pg\n'), ((3474, 3491), 'pygame.display.flip', 'pg.display.flip', ([], {}), '()\n', (3489, 3491), True, 'import pygame as pg\n'), ((3607, 3616), 'pygame.quit', 'pg.quit', ([], {}), '()\n', (3614, 3616), True, 'import pygame as pg\n'), ((888, 913), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (903, 913), False, 'import os\n'), ((940, 981), 'os.path.join', 'os.path.join', (['fontdir', '"""data"""', '"""sans.ttf"""'], {}), "(fontdir, 'data', 'sans.ttf')\n", (952, 981), False, 'import os\n'), ((3517, 3532), 'pygame.event.wait', 'pg.event.wait', ([], {}), '()\n', (3530, 3532), True, 'import pygame as pg\n')] |
#!/usr/bin/env python
import camera
print("taking a picture")
imagePath = camera.capture()
print("captured %s" % imagePath)
| [
"camera.capture"
] | [((76, 92), 'camera.capture', 'camera.capture', ([], {}), '()\n', (90, 92), False, 'import camera\n')] |
import argparse
from datetime import datetime as dt
from lightgbm import LGBMRegressor
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold
import yaml
# from models import lgbm as my_lgbm
from cv import r2_cv
from preprocessing import load_x, load_y
from utils import print_exit, print_float
# Don't define any function in this file,
# thus don't define main function.
# use var `now` in config file and submit file.
now = dt.now().strftime('%Y-%m-%d-%H-%M-%S')
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/default.yml')
options = parser.parse_args()
with open(options.config, 'r') as file:
config = yaml.safe_load(file)
features = config['extracted_features']
col_id_name = config['col_id_name']
col_target_name = config['col_target_name']
dropped_ids = config['dropped_ids']
lgbm_params = config['lgbm_params']
n_folds = config['cv']['n_folds']
Xs = load_x(features, dropped_ids)
X_train_all = Xs['train']
X_test = Xs['test']
y_train_all = load_y(col_id_name, col_target_name, dropped_ids)
reg_params = lgbm_params['instance']
regressor = LGBMRegressor(
boosting_type=reg_params['boosting_type'],
learning_rate=reg_params['learning_rate'],
reg_alpha=reg_params['reg_alpha'],
reg_lambda=reg_params['reg_lambda'],
random_state=reg_params['random_state'],
silent=reg_params['silent'],
)
# cv_scores = r2_cv(regressor, X_train_all, y_train_all, n_folds)
# cv_score = cv_scores.mean()
# Train
regressor.fit(X_train_all, y_train_all)
# Predict
y_pred_logarithmic = regressor.predict(X_test)
y_pred = np.exp(y_pred_logarithmic)
# Evaluate
y_pred_from_train = regressor.predict(X_train_all)
score = r2_score(y_train_all, y_pred_from_train)
sub_df = pd.DataFrame(
pd.read_feather('data/input/test.feather')[col_id_name]
)
sub_df[col_target_name] = y_pred
sub_df.to_csv(
'./data/output/sub_{time}_{score:.5f}.csv'.format(
time=now,
score=score,
),
index=False
)
config_file_name = './configs/{time}_{score:.5f}.yml'.format(
time=now,
score=score,
)
with open(config_file_name, 'w') as file:
yaml.dump(config, file)
| [
"pandas.read_feather",
"preprocessing.load_x",
"argparse.ArgumentParser",
"yaml.dump",
"lightgbm.LGBMRegressor",
"preprocessing.load_y",
"numpy.exp",
"yaml.safe_load",
"datetime.datetime.now",
"sklearn.metrics.r2_score"
] | [((543, 568), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (566, 568), False, 'import argparse\n'), ((972, 1001), 'preprocessing.load_x', 'load_x', (['features', 'dropped_ids'], {}), '(features, dropped_ids)\n', (978, 1001), False, 'from preprocessing import load_x, load_y\n'), ((1062, 1111), 'preprocessing.load_y', 'load_y', (['col_id_name', 'col_target_name', 'dropped_ids'], {}), '(col_id_name, col_target_name, dropped_ids)\n', (1068, 1111), False, 'from preprocessing import load_x, load_y\n'), ((1162, 1417), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {'boosting_type': "reg_params['boosting_type']", 'learning_rate': "reg_params['learning_rate']", 'reg_alpha': "reg_params['reg_alpha']", 'reg_lambda': "reg_params['reg_lambda']", 'random_state': "reg_params['random_state']", 'silent': "reg_params['silent']"}), "(boosting_type=reg_params['boosting_type'], learning_rate=\n reg_params['learning_rate'], reg_alpha=reg_params['reg_alpha'],\n reg_lambda=reg_params['reg_lambda'], random_state=reg_params[\n 'random_state'], silent=reg_params['silent'])\n", (1175, 1417), False, 'from lightgbm import LGBMRegressor\n'), ((1643, 1669), 'numpy.exp', 'np.exp', (['y_pred_logarithmic'], {}), '(y_pred_logarithmic)\n', (1649, 1669), True, 'import numpy as np\n'), ((1741, 1781), 'sklearn.metrics.r2_score', 'r2_score', (['y_train_all', 'y_pred_from_train'], {}), '(y_train_all, y_pred_from_train)\n', (1749, 1781), False, 'from sklearn.metrics import r2_score\n'), ((718, 738), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (732, 738), False, 'import yaml\n'), ((2177, 2200), 'yaml.dump', 'yaml.dump', (['config', 'file'], {}), '(config, file)\n', (2186, 2200), False, 'import yaml\n'), ((494, 502), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (500, 502), True, 'from datetime import datetime as dt\n'), ((1810, 1852), 'pandas.read_feather', 'pd.read_feather', (['"""data/input/test.feather"""'], {}), "('data/input/test.feather')\n", (1825, 1852), True, 'import pandas as pd\n')] |
"""Module for the interactive cmd prompt"""
from typing import Any, Dict
import attr
# Logger
from loguru import logger
# This is where we will import all sub-component modules
from pyentrez import entrez_scraper as SCRAPE
from pyentrez.db import mongo_entrez as MDB
from pyentrez.utils import string_utils as su
logger.opt(colors=True)
manager_dict = Dict[str, Any]
@attr.s
class CommandEntrez(object):
"""Interactive commandline prompt implementation of pyentrez.
Attributes:
scrape (Any): scrape is the scraper from the top-level
mdb (Any): mdb is the top-level database being used
prompt (Any): instance of StringUtils interactive prompt
"""
scrape = attr.ib(default=None)
mdb = attr.ib(default=None)
prompt = attr.ib(default=None)
def __attrs_post_init__(self):
self.scrape = SCRAPE.Scraper(self)
self.mdb = MDB.DBLoader(self)
self.prompt = su.InteractivePrompt(self)
def query(self):
"""Handles parsing a query and printing UID's.
Calls for the prompt to print the response for a query and returns the user's input.
The user's input is sent through esearch, which returns a handle. That handle is parsed,
the UID's and articles attached to them are added to the database, and the list of UID's
are printed.
"""
query = self.prompt.input('QUERY')
uid = self.scrape.esearch(query)
results = self.scrape.efetch(uid)
ids = self.mdb.add_many(results)
for i in ids:
print(f'{i}')
def start(self):
"""Initializes a prompt and waits for user input.
If user input matches query it runs the query routine. Otherwise it does nothing,
allowing the app to exit.
"""
# TODO: setup functions for other user options
task = self.prompt.input('NOTUI_WELCOME')
if task.isdigit():
if int(task) == 1:
self.query()
| [
"pyentrez.db.mongo_entrez.DBLoader",
"pyentrez.utils.string_utils.InteractivePrompt",
"pyentrez.entrez_scraper.Scraper",
"loguru.logger.opt",
"attr.ib"
] | [((316, 339), 'loguru.logger.opt', 'logger.opt', ([], {'colors': '(True)'}), '(colors=True)\n', (326, 339), False, 'from loguru import logger\n'), ((701, 722), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (708, 722), False, 'import attr\n'), ((733, 754), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (740, 754), False, 'import attr\n'), ((768, 789), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (775, 789), False, 'import attr\n'), ((848, 868), 'pyentrez.entrez_scraper.Scraper', 'SCRAPE.Scraper', (['self'], {}), '(self)\n', (862, 868), True, 'from pyentrez import entrez_scraper as SCRAPE\n'), ((888, 906), 'pyentrez.db.mongo_entrez.DBLoader', 'MDB.DBLoader', (['self'], {}), '(self)\n', (900, 906), True, 'from pyentrez.db import mongo_entrez as MDB\n'), ((929, 955), 'pyentrez.utils.string_utils.InteractivePrompt', 'su.InteractivePrompt', (['self'], {}), '(self)\n', (949, 955), True, 'from pyentrez.utils import string_utils as su\n')] |
import click
def split_list(a_list):
half = len(a_list)//2
return a_list[:half], a_list[half:]
def generate_string(inputString):
inputLength = len(inputString)
if inputLength > 25:
splitStr = inputString.split(" ")
lineOne, lineTwo = split_list(splitStr)
output = r""" ' ' ' ' '
/\,/"`"`"\`\ /\,
| ` ` |
` ⌒ ⌒ `
( ◉ ❤︎ ◉ )
( ⌣ ) ----- %s
( ) %s
( )
( )
( )
( )"`"``"`(``)
( )
( )
( )
( )
( )`( )(( )
\, ,/ \, ,/ \ \ /
⌣ ⌣ ⌣ ⌣""" % (
" ".join(lineOne),
" ".join(lineTwo),
)
else:
output = (
r""" ' ' ' ' '
/\,/"`"`"\`\ /\,
| ` ` |
` ⌒ ⌒ `
( ◉ ❤︎ ◉ )
( ⌣ ) ----- %s
( )
( )
( )
( )
( )"`"``"`(``)
( )
( )
( )
( )
( )`( )(( )
\, ,/ \, ,/ \ \ /
⌣ ⌣ ⌣ ⌣"""
% inputString
)
return output
def print_message(inputString):
print(generate_string(inputString) + "\n", end="")
@click.command()
@click.argument("message", default="")
def main(message):
click.echo(generate_string(message))
if __name__ == "__main__":
main()
| [
"click.argument",
"click.command"
] | [((1436, 1451), 'click.command', 'click.command', ([], {}), '()\n', (1449, 1451), False, 'import click\n'), ((1454, 1491), 'click.argument', 'click.argument', (['"""message"""'], {'default': '""""""'}), "('message', default='')\n", (1468, 1491), False, 'import click\n')] |
from difflib import SequenceMatcher
from discord import Embed
from discord.ext.commands import CommandNotFound
from src.utils.config import Prefix
async def commandSuggest(bot, message, commands):
similar_commands = []
if message.startswith(Prefix):
for command in commands:
similar_ratio = SequenceMatcher(
None, message, command).ratio()
if similar_ratio >= 0.6:
similar_commands.append([command, similar_ratio])
similar_commands = sorted(
similar_commands, key=lambda l: l[1], reverse=True)
if similar_commands == []:
desc = '\n'.join(commands)
return f"คำสั่งที่เรามีคือ\n {desc}"
else:
return f"คุณกำลังจะพิมพ์ {Prefix}{similar_commands[0][0]} หรือเปล่า"
async def commandSuggestFromError(ctx, bot, error):
print(error)
if isinstance(error, CommandNotFound):
msg = ctx.message.content.split()[0]
em = Embed(title=f"ไม่พบคำสั่งที่ชื่อว่า {msg}",
description=await commandSuggest(bot, msg,
bot.all_commands),
color=ctx.author.color)
await ctx.send(embed=em)
| [
"difflib.SequenceMatcher"
] | [((321, 360), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', 'message', 'command'], {}), '(None, message, command)\n', (336, 360), False, 'from difflib import SequenceMatcher\n')] |
"""
Description
-----------
Implements variants of the MCC by composing cross-layer models, analysis engines, and steps.
:Authors:
- <NAME>
"""
from mcc.model import *
from mcc.framework import *
from mcc.analyses import *
from mcc.simulation import *
from mcc.complex_analyses import *
from mcc.importexport import *
class BaseModelQuery:
""" Stores existing component architecture and corresponding inputs.
"""
def __init__(self):
# store queries (filenames and graphs) by name
self._queries = dict()
# store component instances and mapping from components to corresponding queries
self._components = None
def _merge(self, components, query):
if self._components is not None:
# copy 'query' param from _components to components
for c in _components.graph.nodes():
for cnew in components.graph.nodes():
if c.identifier() == cnew.identifier():
q = self._components.untracked_get_param_value('query', c)
components.untracked_set_param_value('query', cnew, q)
# replace _components
self._components = components
# new components are mapped to query
for c in self._components.graph.nodes():
if not self._components.untracked_isset_param_value('query', c):
self._components.untracked_set_param_value('query', c, query)
def insert(self, name, query_graph, comp_inst, filename=None):
# insert name and query graph
self._queries[name] = { 'filename' : filename,
'graph' : query_graph }
# merge comp_inst into current components
assert len(comp_inst.graph.nodes()) > 0, "inserting empty graph"
self._merge(comp_inst, name)
def base_arch(self):
# For each subsystem, return an object that aggregates
# the existing functions and components.
subsystems = dict()
# aggregate components per subsystem
for c in self._components.graph.nodes():
name = self._components.untracked_get_param_value('mapping', c).name()
if name not in subsystems:
subsystems[name] = set()
subsystems[name].add(c)
arch = set()
for name, comps in subsystems.items():
instances = [c.untracked_obj() for c in comps]
arch.add(BaseChild('base', name, instances, self._components.graph.subgraph(comps, {'mapping'})))
return arch
def instances(self, subsystem):
# return instances of given subsystem
instances = set()
for c in self._components.graph.nodes():
if subsystem == self._components.untracked_get_param_value('mapping', c).name():
inst = c.untracked_obj()
inst._static = True
instances.add(inst)
return instances
class MccBase:
""" MCC base class. Implements helper functions for common transformation steps.
"""
def __init__(self, repo):
"""
Args:
:param repo: component and contract repository
"""
self.repo = repo
def _complete_mapping(self, model, layer, source_param='mapping'):
# inherit mapping from all neighbours (excluding static platform components)
model.add_step(InheritFromBothStep(layer,
param=source_param,
target_param='mapping',
engines={StaticEngine(layer)}))
def _map_functions(self, model, layer):
fa = model.by_name[layer]
me = MappingEngine(fa, model.repo, model.platform, cost_sensitive=True)
cpme = CPMappingEngine(fa, model.repo, model.platform)
pfmap = NodeStep(Map(me, 'map functions'))
pfmap.add_operation(BatchAssign(cpme, 'map functions'))
pfmap.add_operation(BatchCheck(me, 'map functions'))
model.add_step(pfmap)
def _connect_functions(self, model, slayer, dlayer):
fq = model.by_name[slayer]
fa = model.by_name[dlayer]
fe = FunctionEngine(fq, fa, model.repo)
deps = NodeStep(Map(fe, 'dependencies'))
deps.add_operation(Assign(fe, 'dependencies'))
deps.add_operation(BatchTransform(fe, fa, 'dependencies'))
model.add_step(deps)
model.add_step(CopyEdgeStep(fq, fa, {'service'}))
def _map_and_connect_functions(self, model, slayer, dlayer):
fq = model.by_name[slayer]
fa = model.by_name[dlayer]
me = MappingEngine(fq, model.repo, model.platform, cost_sensitive=True)
#cpme = CPMappingEngine(fq, model.repo, model.platform)
fe = FunctionEngine(fq, fa, model.repo)
step = NodeStep( Map(fe, 'dependencies'))
step.add_operation( Map(me, 'map functions'))
step.add_operation( BatchAssign(me, 'map functions'))
step.add_operation( BatchCheck(me, 'map functions'))
step.add_operation( Assign(fe, 'dependencies'))
step.add_operation(BatchTransform(fe, fa, 'dependencies'))
model.add_step(step)
model.add_step(CopyEdgeStep(fq, fa, {'service'}))
def _select_components(self, model, slayer, dlayer, envmodel):
""" Selects components for nodes in source layer and transforms into target layer.
Args:
:param model: cross-layer model
:type model: :class:`mcc.framework.Registry`
:param slayer: source layer
:param dlayer: target layer
"""
fc = model.by_name[slayer]
ca = model.by_name[dlayer]
ce = ComponentEngine(fc, self.repo)
rtee = RteEngine(fc)
spe = SpecEngine(fc)
# Map operation is the first when selecting components
comps = Map(ce, 'component')
comps.register_ae(rtee) # consider rte requirements
comps.register_ae(spe) # consider spec requirements
# check platform compatibility
pf_compat = NodeStep(comps) # get components from repo
assign = pf_compat.add_operation(Assign(ce, 'component')) # choose component
check = pf_compat.add_operation(Check(rtee, name='RTE requirements')) # check rte requirements
check.register_ae(spe) # check spec requirements
model.add_step(pf_compat)
# check dependencies
model.add_step(NodeStep(Check(DependencyEngine(fc), name='dependencies')))
# select pattern (dummy step, nothing happening here)
pe = PatternEngine(fc, ca)
epe = EnvPatternEngine(fc, envmodel)
patterns = Map(pe, 'pattern')
patterns.register_ae(epe)
patstep = NodeStep(patterns)
patstep.add_operation(Assign(pe, 'pattern'))
# sanity check and transform
patstep.add_operation( Check(pe, name='pattern'))
patstep.add_operation(Transform(pe, ca, 'pattern'))
model.add_step(patstep)
# select service connection and fix constraints
# remark: for the moment, we assume there is only one possible connection left
se = ServiceEngine(fc, ca)
connect = EdgeStep(Check(se, name='connect'))
connect.add_operation(Map(se, name='connect'))
connect.add_operation(Assign(se, name='connect'))
connect.add_operation(Transform(se, ca, name='connect'))
model.add_step(connect)
# # check network bandwith
# model.add_step_unsafe(NodeStep(BatchCheck(NetworkEngine(fc), name='network bandwith')))
# copy mapping from slayer to dlayer
self._complete_mapping(model, ca, source_param='mapping')
# check mapping
model.add_step(NodeStep(Check(MappingEngine(ca, model.repo, model.platform), name='platform mapping is complete')))
# TODO (?) check that connections satisfy functional dependencies
def _insert_protocolstacks(self, model, slayer, dlayer):
""" Inserts protocol stacks for edges in source layer and transforms into target layer.
Args:
:param model: cross-layer model
:type model: :class:`mcc.framework.Registry`
:param slayer: source layer
:param dlayer: target layer
"""
slayer = model.by_name[slayer]
dlayer = model.by_name[dlayer]
# select protocol stacks
pse = ProtocolStackEngine(slayer, self.repo)
select = EdgeStep(Map(pse))
select.add_operation(Assign(pse))
# select pattern (if composite)
pe = PatternEngine(slayer, dlayer, source_param='protocolstack')
select.add_operation(Map(pe))
select.add_operation(Assign(pe))
model.add_step(select)
# copy nodes
model.add_step(CopyNodeStep(slayer, dlayer, {'mapping', 'pattern-config'}))
# copy or transform edges
model.add_step(EdgeStep(Transform(pe, dlayer)))
model.add_step(CopyServicesStep(slayer, dlayer))
# derive mapping
self._complete_mapping(model, dlayer)
# check that service dependencies are satisfied and connections are local
model.add_step(EdgeStep(Check(ComponentDependencyEngine(dlayer), name='service dependencies')))
model.add_step(NodeStep(Check(ComponentDependencyEngine(dlayer), name='service dependencies')))
def _insert_muxers(self, model, slayer, dlayer):
""" Inserts multiplexers for edges in source layer and transforms into target layer.
Args:
:param model: cross-layer model
:type model: :class:`mcc.framework.Registry`
:param slayer: source layer
:param dlayer: target layer
"""
slayer = model.by_name[slayer]
dlayer = model.by_name[dlayer]
# select muxers and transform
me = MuxerEngine(slayer, dlayer, self.repo)
select = NodeStep(Map(me))
select.add_operation(Assign(me))
select.add_operation(Transform(me, dlayer))
model.add_step(select)
# adapt edges to inserted muxers and transform
adapt_edges = EdgeStep(Map(me))
adapt_edges.add_operation(Assign(me))
adapt_edges.add_operation(Transform(me, dlayer))
model.add_step(adapt_edges)
# derive mapping
self._complete_mapping(model, dlayer)
# check that service dependencies are satisfied and connections are local
model.add_step(NodeStep(Check(ComponentDependencyEngine(dlayer), name='service dependencies')))
def _insert_proxies(self, model, slayer, dlayer):
""" Inserts proxies for edges in source layer and transforms into target layer.
Args:
:param model: cross-layer model
:type model: :class:`mcc.framework.Registry`
:param slayer: source layer
:param dlayer: target layer
"""
fa = model.by_name[slayer]
fc = model.by_name[dlayer]
re = ReachabilityEngine(fa, fc, model.platform)
# decide on reachability
reachability = EdgeStep(Map(re, 'carrier')) # map edges to carrier
reachability.add_operation(Assign(re, 'carrier')) # choose communication carrier
model.add_step(reachability)
# copy nodes to comm arch
model.add_step(CopyNodeStep(fa, fc, {'mapping'}))
# perform arc split
model.add_step(EdgeStep(BatchTransform(re, fc, 'arc split')))
def _merge_components(self, model, slayer, dlayer, factory, pf_model):
""" Merge components into component instantiations.
Args:
:param model: cross-layer model
:type model: :class:`mcc.framework.Registry`
:param slayer: source layer
:param dlayer: target layer
:param factory: instance factory
:type factory: :class:`mcc.model.InstanceFactory`
"""
ca = model.by_name[slayer]
ci = model.by_name[dlayer]
ie = InstantiationEngine(ca, ci, factory, 'tmp-mapping')
self.ie = ie
instantiate = NodeStep(Map(ie, 'instantiate'))
instantiate.add_operation(Assign(ie, 'instantiate'))
instantiate.add_operation(Transform(ie, ci, 'instantiate'))
model.add_step(instantiate)
connect = EdgeStep(Map(ie, 'copy edges'))
connect.add_operation(Assign(ie, 'copy edges'))
connect.add_operation(Transform(ie, ci, 'copy edges'))
model.add_step(connect)
ce = CoprocEngine(ci, pf_model, 'tmp-mapping')
coproc = NodeStep( Map(ce, 'coproc'))
coproc.add_operation(Assign(ce, 'coproc'))
model.add_step(coproc)
# check singleton (per PfComponent)
se = SingletonEngine(ci, pf_model)
model.add_step(NodeStep(Check(se, 'check singleton and cardinality')))
def _assign_resources(self, model, layer):
layer = model.by_name[layer]
# TODO actually assign resources as specified in repo
# currently, we just take the resources as specified but check
# whether they exceed any threshold
ce = QuantumEngine(layer, name='caps')
resources = NodeStep(BatchCheck(ce, 'caps'))
re = QuantumEngine(layer, name='ram')
resources.add_operation(BatchCheck(re, 'ram'))
model.add_step(resources)
def _reliability_check(self, model, layer, constrmodel):
""" perform reliability checks
"""
layer = model.by_name[layer]
re = ReliabilityEngine(layer, model.by_order[1:], constrmodel)
model.add_step_unsafe(NodeStep(BatchCheck(re, 'check reliability')))
def _assign_affinity(self, model, layer):
layer = model.by_name[layer]
ae = AffinityEngine(layer)
step = NodeStep( Map(ae, 'set affinity'))
step.add_operation(Assign(ae, 'set affinity'))
model.add_step(step)
def _timing_model(self, model, pf_model, slayer, dlayer, constrmodel):
""" Build taskgraph layer and perform timing checks
"""
slayer = model.by_name[slayer]
tg = model.by_name[dlayer]
core = TasksCoreEngine(slayer)
rpc = TasksRPCEngine(slayer)
ae = TaskgraphEngine(slayer, tg)
tasks = NodeStep( Map(core, 'get coretasks'))
tasks.add_operation( Assign(core, 'get coretasks'))
tasks.add_operation( Map(rpc, 'get rpctasks'))
tasks.add_operation( Assign(rpc, 'get rpctasks'))
tasks.add_operation( Map(ae, 'build taskgraph'))
tasks.add_operation( Assign(ae, 'build taskgraph'))
tasks.add_operation( Check(ae, 'build taskgraph'))
tasks.add_operation(Transform(ae, tg, 'build taskgraph'))
model.add_step(tasks)
con = EdgeStep(Map(ae, 'connect tasks'))
con.add_operation(Assign(ae, 'connect tasks'))
con.add_operation(Transform(ae, tg, 'connect tasks'))
model.add_step(con)
# assign event model to interrupt tasks
acte = ActivationEngine(tg)
activation = NodeStep( Map(acte, 'activation pattern'))
activation.add_operation( Assign(acte, 'activation patterns'))
model.add_step(activation)
# assign priorities
pe = PriorityEngine(slayer, taskgraph=tg, platform=pf_model)
prios = NodeStep( BatchMap(pe, 'assign priorities'))
prios.add_operation(BatchAssign(pe, 'assign priorities'))
model.add_step_unsafe(prios)
# assign WCETs
we = WcetEngine(tg)
wcets = NodeStep( Map(we, 'WCETs'))
wcets.add_operation( Assign(we, 'WCETs'))
model.add_step(wcets)
self.wcet_engine = we
def _timing_check(self, model, slayer, dlayer, constrmodel, ae):
slayer = model.by_name[slayer]
tg = model.by_name[dlayer]
# perform CPA
pycpa = CPAEngine(tg, slayer, model.by_order[1:], constrmodel)
check = BatchCheck(pycpa, 'CPA')
if ae:
check.register_ae(ae)
model.add_step(NodeStep(check))
class SimpleMcc(MccBase):
""" Composes MCC for Genode systems. Only considers functional requirements.
"""
def __init__(self, repo, test_backtracking=False,
chronologicaltracking=False,
test_adaptation=False,
from_scratch=False):
assert test_backtracking == False or test_adaptation == False
assert chronologicaltracking == False or test_adaptation == False
MccBase.__init__(self, repo)
self._test_backtracking = test_backtracking
self._test_adaptation = test_adaptation
self._replay_adaptations = test_adaptation if isinstance(test_adaptation, str) else False
self._from_scratch = from_scratch if test_adaptation else False
self._nonchronological = not chronologicaltracking
assert self._replay_adaptations or not self._from_scratch
def search_config(self, pf_model, system, base=None, outpath=None, with_da=False, da_path=None, dot_mcc=False,
dot_ae=False, dot_layer=False, envmodel=None, constrmodel=None):
""" Searches a system configuration for the given query.
Args:
:param base: base model (existing functions/components)
:param base: BaseModelQuery object
:param platform: platform parser
:type platform: parser object
:param system: system configuruation parser
:type system: parser object
:param outpath: output path/prefix
:type outpath: str
"""
# check function/composite/component references, compatibility and routes in system and subsystems
# 2) we create a new system model
model = SystemModel(self.repo, pf_model, dotpath=outpath if dot_layer else None)
# 3) create query model
query_model = FuncArchQuery(system)
# 4a) create system model from query model and base
model.from_query(query_model, 'func_query', base)
# parse constraints
if constrmodel is not None:
constrmodel.parse(model)
self._map_and_connect_functions(model, 'func_query', 'func_arch')
# self._map_functions(model, 'func_query')
# self._connect_functions(model, 'func_query', 'func_arch')
# solve reachability and transform into comm_arch
self._insert_proxies(model, slayer='func_arch', dlayer='comm_arch')
# select components and transform into comp_arch
self._select_components(model, slayer='comm_arch', dlayer='comp_arch-pre1', envmodel=envmodel)
# TODO test case for protocol stack insertion
self._insert_protocolstacks(model, slayer='comp_arch-pre1', dlayer='comp_arch-pre2')
# insert muxers (if connections present and muxer is available)
# TODO test case for replication
self._insert_muxers(model, slayer='comp_arch-pre2', dlayer='comp_arch')
# create instance factory and insert existing instance from base
instance_factory = InstanceFactory()
if base is not None:
# for each subsystem insert existing instances into factory
for pfc in pf_model.platform_graph.nodes():
instance_factory.insert_existing_instances(pfc.name(), base.instances(pfc.name()))
# implement transformation/merge into component instantiation
self._merge_components(model, slayer='comp_arch', dlayer='comp_inst',
factory=instance_factory, pf_model=pf_model)
# assign and check resource consumptions (RAM, caps)
self._assign_resources(model, layer='comp_inst')
# do not do scheduling stuff for base model
if base is not None:
# assign affinity
self._assign_affinity(model, layer='comp_inst')
self._timing_model(model, pf_model, slayer='comp_inst',
dlayer='task_graph',
constrmodel=constrmodel)
sim = None
if self._test_backtracking:
sim = BacktrackingSimulation(model.by_name['task_graph'], model, outpath=outpath)
if constrmodel is not None:
self._reliability_check(model, layer='comp_inst', constrmodel=constrmodel)
self._timing_check(model, slayer='comp_inst', dlayer='task_graph',
constrmodel=constrmodel, ae=sim)
if self._test_adaptation and not self._from_scratch:
sim = AdaptationSimulation(model.by_name['task_graph'], model, wcet_engine=self.wcet_engine,
replayfile=self._test_adaptation if self._replay_adaptations else None,
factor=self._test_adaptation if not self._replay_adaptations else 1.1,
outpath=outpath)
model.add_step(NodeStep(BatchCheck(sim)))
# model.print_steps()
if outpath is not None and dot_mcc:
model.write_dot(outpath+'mcc.dot')
if with_da:
from mcc import extern
if da_path is None:
da_path = outpath
da_engine = extern.DependencyAnalysisEngine(model, model.by_order, outpath+'model.pickle', outpath+'query.xml', da_path+'response.xml')
da_step = NodeStep(BatchMap(da_engine))
da_step.add_operation(BatchAssign(da_engine))
model.add_step_unsafe(da_step)
try:
if base and self._from_scratch:
se = SimulationEngine(None, model)
with open(self._replay_adaptations, 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
model.clear()
self.ie.factory.reset()
model.from_query(query_model, 'func_query', base)
# parse constraints
if constrmodel is not None:
constrmodel.reset()
constrmodel.parse(model)
model.execute(outpath, nonchronological=self._nonchronological)
se.record_solution()
se._last_iteration = 0
se._last_rolledback = 0
se._last_variables = set()
print("Replaying adaptation of %s to %d" % (row['taskname'],
int(row['wcet'])))
self.wcet_engine.update_wcet(row['taskname'], int(row['wcet']))
se.write_stats(outpath[:outpath.rfind('/')] + '/solutions.csv')
else:
model.execute(outpath, nonchronological=self._nonchronological)
except Exception as e:
if sim:
sim.write_stats(outpath[:outpath.rfind('/')] + '/solutions.csv')
if self._test_adaptation and not self._replay_adaptations:
sim.write_adaptations(outpath[:outpath.rfind('/')] + '/adaptations.csv')
elif self._from_scratch:
se.write_stats(outpath[:outpath.rfind('/')] + '/solutions.csv')
print(e)
export = PickleExporter(model)
export.write(outpath+'model-error.pickle')
raise e
if outpath is not None and dot_ae:
model.write_analysis_engine_dependency_graph(outpath+'ae_dep_graph.dot')
export = PickleExporter(model)
export.write(outpath+'model.pickle')
return (query_model, model)
| [
"mcc.extern.DependencyAnalysisEngine"
] | [((21467, 21600), 'mcc.extern.DependencyAnalysisEngine', 'extern.DependencyAnalysisEngine', (['model', 'model.by_order', "(outpath + 'model.pickle')", "(outpath + 'query.xml')", "(da_path + 'response.xml')"], {}), "(model, model.by_order, outpath +\n 'model.pickle', outpath + 'query.xml', da_path + 'response.xml')\n", (21498, 21600), False, 'from mcc import extern\n')] |
from rest_framework import serializers
allow_blank = {'default': '', 'initial': '', 'allow_blank': True}
allow_null = {'default': None, 'initial': None, 'allow_null': True}
empty_list = {'default': [], 'initial': [], 'many': True}
class PersonSerializer(serializers.Serializer):
birth_date = serializers.DateField(**allow_null)
gender = serializers.CharField(max_length=1, **allow_blank)
height = serializers.IntegerField(**allow_null)
weight = serializers.IntegerField(**allow_null)
residence_type = serializers.CharField(max_length=20, **allow_blank)
months_at_current_residence = serializers.IntegerField(**allow_null)
marital_status = serializers.CharField(max_length=50, **allow_blank)
education = serializers.CharField(max_length=50, **allow_blank)
occupation = serializers.CharField(max_length=50, **allow_blank)
relationship_to_applicant = serializers.CharField(max_length=50, **allow_blank)
student = serializers.NullBooleanField(required=False)
tobacco = serializers.NullBooleanField(required=False)
expectant_parent = serializers.NullBooleanField(required=False)
relative_heart = serializers.NullBooleanField(required=False)
relative_cancer = serializers.NullBooleanField(required=False)
hospitalized = serializers.NullBooleanField(required=False)
ongoing_medical_treatment = serializers.NullBooleanField(required=False)
dui = serializers.NullBooleanField(required=False)
previously_denied = serializers.NullBooleanField(required=False)
hazard_pilot = serializers.NullBooleanField(required=False)
hazard_felony = serializers.NullBooleanField(required=False)
hazard_other = serializers.NullBooleanField(required=False)
hazardous_activity = serializers.NullBooleanField(required=False)
medical_condition = serializers.CharField(max_length=200, **allow_blank)
def to_internal_value(self, data):
if 'DUI' in data:
data['dui'] = data.pop('DUI')
return super(PersonSerializer, self).to_internal_value(data)
class MedicationSerializer(serializers.Serializer):
medication_name = serializers.CharField(max_length=50, **allow_blank)
dosage = serializers.CharField(max_length=30, **allow_blank)
frequency = serializers.CharField(max_length=30, **allow_blank)
comment = serializers.CharField(max_length=30, **allow_blank)
class RequestedPolicySerializer(serializers.Serializer):
coverage_type = serializers.CharField(max_length=30, **allow_blank)
coverage_amount = serializers.CharField(max_length=20, **allow_blank)
coverage_term = serializers.CharField(max_length=20, **allow_blank)
class CurrentPolicySerializer(serializers.Serializer):
insurance_company = serializers.CharField(max_length=50, **allow_blank)
expiration_date = serializers.DateField(**allow_null)
insured_since = serializers.DateField(**allow_null)
coverage_type = serializers.CharField(max_length=30, **allow_blank)
class PingDataSerializer(serializers.Serializer):
persons = PersonSerializer(many=True, allow_empty=False)
medications = MedicationSerializer(**empty_list)
requested_policy = RequestedPolicySerializer(**allow_null)
current_policy = CurrentPolicySerializer(**allow_null)
class PostDataSerializer(serializers.Serializer):
persons = PersonSerializer(many=True, allow_empty=False)
medications = MedicationSerializer(**empty_list)
requested_policy = RequestedPolicySerializer(**allow_null)
current_policy = CurrentPolicySerializer(**allow_null)
| [
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.CharField",
"rest_framework.serializers.NullBooleanField",
"rest_framework.serializers.DateField"
] | [((299, 334), 'rest_framework.serializers.DateField', 'serializers.DateField', ([], {}), '(**allow_null)\n', (320, 334), False, 'from rest_framework import serializers\n'), ((348, 398), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(1)'}), '(max_length=1, **allow_blank)\n', (369, 398), False, 'from rest_framework import serializers\n'), ((412, 450), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '(**allow_null)\n', (436, 450), False, 'from rest_framework import serializers\n'), ((464, 502), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '(**allow_null)\n', (488, 502), False, 'from rest_framework import serializers\n'), ((524, 575), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(20)'}), '(max_length=20, **allow_blank)\n', (545, 575), False, 'from rest_framework import serializers\n'), ((610, 648), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '(**allow_null)\n', (634, 648), False, 'from rest_framework import serializers\n'), ((670, 721), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(50)'}), '(max_length=50, **allow_blank)\n', (691, 721), False, 'from rest_framework import serializers\n'), ((738, 789), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(50)'}), '(max_length=50, **allow_blank)\n', (759, 789), False, 'from rest_framework import serializers\n'), ((807, 858), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(50)'}), '(max_length=50, **allow_blank)\n', (828, 858), False, 'from rest_framework import serializers\n'), ((891, 942), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(50)'}), '(max_length=50, **allow_blank)\n', (912, 942), False, 'from rest_framework import serializers\n'), ((957, 1001), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (985, 1001), False, 'from rest_framework import serializers\n'), ((1016, 1060), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1044, 1060), False, 'from rest_framework import serializers\n'), ((1084, 1128), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1112, 1128), False, 'from rest_framework import serializers\n'), ((1150, 1194), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1178, 1194), False, 'from rest_framework import serializers\n'), ((1217, 1261), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1245, 1261), False, 'from rest_framework import serializers\n'), ((1281, 1325), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1309, 1325), False, 'from rest_framework import serializers\n'), ((1358, 1402), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1386, 1402), False, 'from rest_framework import serializers\n'), ((1413, 1457), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1441, 1457), False, 'from rest_framework import serializers\n'), ((1482, 1526), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1510, 1526), False, 'from rest_framework import serializers\n'), ((1546, 1590), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1574, 1590), False, 'from rest_framework import serializers\n'), ((1611, 1655), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1639, 1655), False, 'from rest_framework import serializers\n'), ((1675, 1719), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1703, 1719), False, 'from rest_framework import serializers\n'), ((1745, 1789), 'rest_framework.serializers.NullBooleanField', 'serializers.NullBooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1773, 1789), False, 'from rest_framework import serializers\n'), ((1814, 1866), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(200)'}), '(max_length=200, **allow_blank)\n', (1835, 1866), False, 'from rest_framework import serializers\n'), ((2120, 2171), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(50)'}), '(max_length=50, **allow_blank)\n', (2141, 2171), False, 'from rest_framework import serializers\n'), ((2185, 2236), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(30)'}), '(max_length=30, **allow_blank)\n', (2206, 2236), False, 'from rest_framework import serializers\n'), ((2253, 2304), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(30)'}), '(max_length=30, **allow_blank)\n', (2274, 2304), False, 'from rest_framework import serializers\n'), ((2319, 2370), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(30)'}), '(max_length=30, **allow_blank)\n', (2340, 2370), False, 'from rest_framework import serializers\n'), ((2450, 2501), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(30)'}), '(max_length=30, **allow_blank)\n', (2471, 2501), False, 'from rest_framework import serializers\n'), ((2524, 2575), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(20)'}), '(max_length=20, **allow_blank)\n', (2545, 2575), False, 'from rest_framework import serializers\n'), ((2596, 2647), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(20)'}), '(max_length=20, **allow_blank)\n', (2617, 2647), False, 'from rest_framework import serializers\n'), ((2729, 2780), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(50)'}), '(max_length=50, **allow_blank)\n', (2750, 2780), False, 'from rest_framework import serializers\n'), ((2803, 2838), 'rest_framework.serializers.DateField', 'serializers.DateField', ([], {}), '(**allow_null)\n', (2824, 2838), False, 'from rest_framework import serializers\n'), ((2859, 2894), 'rest_framework.serializers.DateField', 'serializers.DateField', ([], {}), '(**allow_null)\n', (2880, 2894), False, 'from rest_framework import serializers\n'), ((2915, 2966), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(30)'}), '(max_length=30, **allow_blank)\n', (2936, 2966), False, 'from rest_framework import serializers\n')] |
"""
Name and sort the pictures uniformly
Example:
rename(Image_dir, Rename_dir, 'img')
Notes:
image_dir: Path to unnamed picture folder
rename_dir: Path to renamed picture folder
parent path:
dirnames: List of folders in this directory
filenames: List of files in this directory
filename[0]: Image_DataLoader'
filename[1]: '.py'
"""
import os
import cv2
from PIL import Image
def RenameImage(image_dir, rename_dir, datasets):
img_num = 1
for parent, dirnames, filenames in os.walk(image_dir):
filenames.sort()
print(filenames)
for filename in filenames:
if os.path.splitext(filename)[1] == '.png':
currentPath = os.path.join(parent, filename)
img = Image.open(currentPath)
if datasets == 'img':
img.save(rename_dir + '/' + 'img' + str(img_num) + '.png')
print(rename_dir + '/' + 'img' + str(img_num) + '.png')
elif datasets == 'label':
img.save(rename_dir + '/' + 'label' + str(img_num) + '.png')
print(rename_dir + '/' + 'label' + str(img_num) + '.png')
else:
print(' ERROR: datasets=img or label')
img_num += 1
elif os.path.splitext(filename)[1] == '.TIF':
currentPath = os.path.join(parent, filename)
img = cv2.imread(currentPath)
if datasets == 'img':
cv2.imwrite(rename_dir + '/' + 'img' + str(img_num) + '.png', img,
[int(cv2.IMWRITE_PNG_COMPRESSION), 6])
print(rename_dir + '/' + 'img' + str(img_num) + '.png')
elif datasets == 'label':
cv2.imwrite(rename_dir + '/' + 'label' + str(img_num) + '.png', img,
[int(cv2.IMWRITE_PNG_COMPRESSION), 6])
print(rename_dir + '/' + 'label' + str(img_num) + '.png')
else:
print(' ERROR: datasets=img or label')
img_num += 1
else:
print('ERROR')
print('Rename Finished')
Image_dir = r'D:\CPP\ThresholdDataset1.0\Gray\train\0.25'
Rename_dir = r'D:\CPP\ThresholdDataset2.0\Gray\train\0.25'
RenameImage(Image_dir, Rename_dir, datasets='label')
| [
"PIL.Image.open",
"os.path.splitext",
"os.path.join",
"cv2.imread",
"os.walk"
] | [((488, 506), 'os.walk', 'os.walk', (['image_dir'], {}), '(image_dir)\n', (495, 506), False, 'import os\n'), ((679, 709), 'os.path.join', 'os.path.join', (['parent', 'filename'], {}), '(parent, filename)\n', (691, 709), False, 'import os\n'), ((732, 755), 'PIL.Image.open', 'Image.open', (['currentPath'], {}), '(currentPath)\n', (742, 755), False, 'from PIL import Image\n'), ((608, 634), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (624, 634), False, 'import os\n'), ((1348, 1378), 'os.path.join', 'os.path.join', (['parent', 'filename'], {}), '(parent, filename)\n', (1360, 1378), False, 'import os\n'), ((1401, 1424), 'cv2.imread', 'cv2.imread', (['currentPath'], {}), '(currentPath)\n', (1411, 1424), False, 'import cv2\n'), ((1277, 1303), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1293, 1303), False, 'import os\n')] |
#! /usr/bin/env python3
"""Disassemble MIPS binary to assembly"""
from inst import instructions as I
import unify as U
import functools as F
def rename(name, value, width):
if name in ['rs', 'rt', 'rd']:
return f'${int(value, 2)}'
if name in ['imm', 'offset']:
if value[0] == '1':
return str(int(value, 2) - (1 << width))
else:
return str(int(value, 2))
if name in ['target']:
return hex(int(value, 2))
return str(int(value, 2))
def parseBin(binary, naming=rename):
names = None
for _, inst in I.items():
names = U.unify_binary(binary, inst.bin)
if names is not None:
break
if names is None:
raise ValueError('no unifying instruction')
names = {k:rename(k, *v) for k, v in names.items()}
return inst.name + ' ' + F.reduce(
lambda s, x: s.replace(*map(str, x)),
names.items(), inst.args
)
if __name__ == '__main__':
import sys
import argparse
parser = argparse.ArgumentParser(
description='Disassemble MIPS machine code into assembly'
)
parser.add_argument(
'-i',
dest='input',
metavar='input',
type=argparse.FileType('r'),
default=sys.stdin,
help='input file, if unspecified read from stdin'
)
parser.add_argument(
'-o',
dest='output',
metavar='output',
type=argparse.FileType('w'),
default=sys.stdout,
help='output file, if unspecified write to stdout'
)
args = parser.parse_args(sys.argv[1:])
with args.input as input:
prog = input.readlines()
prog = list(map(parseBin, prog))
with args.output as output:
for i in prog:
output.write(i + '\n')
| [
"unify.unify_binary",
"argparse.FileType",
"argparse.ArgumentParser",
"inst.instructions.items"
] | [((518, 527), 'inst.instructions.items', 'I.items', ([], {}), '()\n', (525, 527), True, 'from inst import instructions as I\n'), ((897, 984), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Disassemble MIPS machine code into assembly"""'}), "(description=\n 'Disassemble MIPS machine code into assembly')\n", (920, 984), False, 'import argparse\n'), ((539, 571), 'unify.unify_binary', 'U.unify_binary', (['binary', 'inst.bin'], {}), '(binary, inst.bin)\n', (553, 571), True, 'import unify as U\n'), ((1057, 1079), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (1074, 1079), False, 'import argparse\n'), ((1231, 1253), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (1248, 1253), False, 'import argparse\n')] |
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from rdflib import Namespace, URIRef
class DottedNamespace(Namespace):
"""
An RDF namespace that supports the FHIR dotted notation (e.g. fhir:Patient.status)
"""
def __new__(cls, value):
return Namespace.__new__(cls, value)
def __getattribute__(self, item: str) -> "DottedURIRef":
if item == 'index':
return DottedURIRef(str(self) + item)
else:
return super().__getattribute__(item)
def __getattr__(self, item: str) -> "DottedURIRef":
return DottedURIRef(str(self) + item)
def __eq__(self, other):
return super().__eq__(other)
def __hash__(self):
return super().__hash__()
class DottedURIRef(URIRef):
"""
A URIRef that supports the FHIR dotted notation
"""
def __new__(cls, value, base=None):
return URIRef.__new__(cls, value, base)
def __getattr__(self, item: str) -> "DottedURIRef":
return DottedURIRef(str(self) + '.' + item)
def __eq__(self, other):
if isinstance(self, URIRef) and isinstance(other, URIRef):
return str(self) == str(other)
else:
return False
def __hash__(self):
fqn = URIRef.__module__ + '.' + URIRef.__name__
return hash(fqn) ^ hash(str(self))
| [
"rdflib.URIRef.__new__",
"rdflib.Namespace.__new__"
] | [((1762, 1791), 'rdflib.Namespace.__new__', 'Namespace.__new__', (['cls', 'value'], {}), '(cls, value)\n', (1779, 1791), False, 'from rdflib import Namespace, URIRef\n'), ((2378, 2410), 'rdflib.URIRef.__new__', 'URIRef.__new__', (['cls', 'value', 'base'], {}), '(cls, value, base)\n', (2392, 2410), False, 'from rdflib import Namespace, URIRef\n')] |
#!/usr/bin/python
# ------------------------------------------------------------------------------
# birdland_bootstrap.py - A program to startup Start-From_PYInstaller.py
# in the src directory, which will invoke the birdland.py program.
# This is used with the PyInstaller installation package.
# WRW 19 Mar 2022 - Don't use __main__.py. Leave that for command line
# invocations only.
# WRW 20 Mar 2022 - Got rid of intermediate startup.py (or subsequently-named file).
# Set python environment (search path and cwd) before starting birdland.
# ------------------------------------------------------------------------------
import sys
import os
from pathlib import Path
from src.birdland import main
path = Path( Path( __file__ ).parent, 'src' ).as_posix()
sys.path.append( path )
os.chdir( path )
if __name__ == '__main__':
sys.exit( main() )
| [
"os.chdir",
"sys.path.append",
"src.birdland.main",
"pathlib.Path"
] | [((786, 807), 'sys.path.append', 'sys.path.append', (['path'], {}), '(path)\n', (801, 807), False, 'import sys\n'), ((810, 824), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (818, 824), False, 'import os\n'), ((869, 875), 'src.birdland.main', 'main', ([], {}), '()\n', (873, 875), False, 'from src.birdland import main\n'), ((742, 756), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (746, 756), False, 'from pathlib import Path\n')] |
# Generated by Django 3.2.7 on 2021-09-08 00:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invites', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='invite',
name='invitee_email',
field=models.EmailField(db_index=True, max_length=254, verbose_name='invitee email'),
),
]
| [
"django.db.models.EmailField"
] | [((332, 410), 'django.db.models.EmailField', 'models.EmailField', ([], {'db_index': '(True)', 'max_length': '(254)', 'verbose_name': '"""invitee email"""'}), "(db_index=True, max_length=254, verbose_name='invitee email')\n", (349, 410), False, 'from django.db import migrations, models\n')] |
from django.urls import reverse, resolve
def test_dashboard():
assert reverse('dashboards:dashboard') == '/'
assert resolve('/').view_name == 'dashboards:dashboard'
| [
"django.urls.resolve",
"django.urls.reverse"
] | [((76, 107), 'django.urls.reverse', 'reverse', (['"""dashboards:dashboard"""'], {}), "('dashboards:dashboard')\n", (83, 107), False, 'from django.urls import reverse, resolve\n'), ((126, 138), 'django.urls.resolve', 'resolve', (['"""/"""'], {}), "('/')\n", (133, 138), False, 'from django.urls import reverse, resolve\n')] |
from modules.search.wikipedia import Wiki
class RawGenerator:
def __init__(self, sentence: str):
self.sentence = sentence
self.res = {}
def results(self):
print(self.sentence)
for i in [self.sentence]:
wiki = Wiki(i)
search = wiki.search()
if search:
self.res.update({i: wiki.result})
| [
"modules.search.wikipedia.Wiki"
] | [((264, 271), 'modules.search.wikipedia.Wiki', 'Wiki', (['i'], {}), '(i)\n', (268, 271), False, 'from modules.search.wikipedia import Wiki\n')] |
from model.contact import Contact
from model.group import Group
import random
def test_add_contact_to_group(app, db_orm):
if len(db_orm.get_contacts_without_group()) == 0:
app.contact.create(Contact(firstname="Tester"))
if len(db_orm.get_group_list() == 0):
app.group.create(Group(name="Test Group"))
contacts_not_in_groups = db_orm.get_contacts_without_group()
contact = random.choice(contacts_not_in_groups)
group = random.choice(db_orm.get_group_list())
app.contact.add_contact_to_group(contact.id, group.id)
assert (contact in db_orm.get_contacts_in_group(group))
| [
"model.group.Group",
"random.choice",
"model.contact.Contact"
] | [((406, 443), 'random.choice', 'random.choice', (['contacts_not_in_groups'], {}), '(contacts_not_in_groups)\n', (419, 443), False, 'import random\n'), ((205, 232), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""Tester"""'}), "(firstname='Tester')\n", (212, 232), False, 'from model.contact import Contact\n'), ((301, 325), 'model.group.Group', 'Group', ([], {'name': '"""Test Group"""'}), "(name='Test Group')\n", (306, 325), False, 'from model.group import Group\n')] |
import pathlib
from typing import Any, Dict, MutableMapping
import toml
def IsSpecificPythonTool(toml_file_path: str, tool_name:str):
toml_loaded: MutableMapping[str,Any]=toml.load(toml_file_path)
if(toml_loaded.keys().__contains__("tool")):
toml_tool: Dict[str,Any]=toml_loaded["tool"]
if(toml_tool.keys().__contains__(tool_name)):
return True
return False
def IsFileFlit(toml_file_path: str) -> bool:
return IsSpecificPythonTool(toml_file_path,"flit")
def IsFilePoetry(toml_file_path: str) -> bool:
return IsSpecificPythonTool(toml_file_path,"poetry")
def IsFileCargo(toml_file_path: str) -> bool:
"""
Cargo requires the file to be named Cargo.toml
Checking only file path should not be ambiguous
"""
return pathlib.Path(toml_file_path).name == "Cargo.toml"
def IsFileJuliaPkg(toml_file_path: str) -> bool:
"""
Julia's pkg tool uses Project.toml file.
"""
return pathlib.Path(toml_file_path).name == "Project.toml" | [
"toml.load",
"pathlib.Path"
] | [((176, 201), 'toml.load', 'toml.load', (['toml_file_path'], {}), '(toml_file_path)\n', (185, 201), False, 'import toml\n'), ((782, 810), 'pathlib.Path', 'pathlib.Path', (['toml_file_path'], {}), '(toml_file_path)\n', (794, 810), False, 'import pathlib\n'), ((954, 982), 'pathlib.Path', 'pathlib.Path', (['toml_file_path'], {}), '(toml_file_path)\n', (966, 982), False, 'import pathlib\n')] |
import json
import pathlib
import urllib3
from functools import partial
import dash
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from dash.dash import no_update
from flask import flash, get_flashed_messages
from flask_caching import Cache
from data import dev
import preprocessing
from settings import (
SECRET_KEY, DB_URL, DEBUG, MANAGE_DB, SKIP_TS, SC_FILTERS, USE_DUMMY_DATA, CACHE_CONFIG, MAX_WARNINGS, MAX_INFOS)
import scenario
import graphs
from models import db, get_model_options, Filter, Colors, Labels
urllib3.disable_warnings()
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
# Initialize app
app = dash.Dash(
__name__,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=4.0"},
],
)
server = app.server
server.secret_key = SECRET_KEY
# Database
server.config["SQLALCHEMY_DATABASE_URI"] = DB_URL
server.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(server)
# Cache
cache = Cache()
cache.init_app(server, config=CACHE_CONFIG)
# Layout
if not MANAGE_DB:
from layout import get_layout, get_graph_options, get_error_and_warnings_div
app.layout = partial(get_layout, app, scenarios=scenario.get_scenarios())
@cache.memoize()
def get_scenario_data(scenario_id, table):
app.logger.info(f"Loading scenario data #{scenario_id} (not cached)...")
if USE_DUMMY_DATA:
return dev.get_dummy_data(scenario_id, table)
return scenario.get_scenario_data(scenario_id, table)
@cache.memoize()
def get_multiple_scenario_data(*scenario_ids, table):
app.logger.info("Merging scenario data (not cached)...")
scenarios = [
get_scenario_data(scenario_id, table) for scenario_id in scenario_ids
]
merged = scenario.merge_scenario_data(scenarios)
app.logger.info("Merged scenario data")
return merged
@cache.memoize()
def get_multiple_scenario_filters(*scenario_ids):
app.logger.info("Merging scenario data (not cached)...")
scenarios = [
scenario.get_scenario_filters(scenario_id) for scenario_id in scenario_ids
]
merged = scenario.merge_scenario_data(scenarios)
app.logger.info("Merged scenario data")
return merged
@app.callback(
Output(component_id="dd_scenario", component_property="options"),
Input('scenario_reload', 'n_clicks'),
)
def reload_scenarios(_):
scenarios = scenario.get_scenarios()
return [
{
"label": f"{sc['id']}, {sc['scenario']}, {sc['source']}",
"value": sc["id"],
}
for sc in scenarios
]
@app.callback(
[
Output(component_id="load_filters", component_property="options"),
Output(component_id="save_filters_name", component_property="value"),
],
Input('save_filters', 'n_clicks'),
[
State(component_id="save_filters_name", component_property="value"),
State(component_id=f"graph_scalars_options", component_property='children'),
State(component_id=f"graph_timeseries_options", component_property='children'),
State(component_id="aggregation_group_by", component_property="value"),
State(component_id=f"filters", component_property='children')
]
)
def save_filters(_, name, graph_scalars_options, graph_timeseries_options, agg_group_by, filter_div):
if not name:
raise PreventUpdate
filters = preprocessing.extract_filters("scalars", filter_div)
filters["agg_group_by"] = agg_group_by
scalar_graph_options = preprocessing.extract_graph_options(graph_scalars_options)
ts_graph_options = preprocessing.extract_graph_options(graph_timeseries_options)
db_filter = Filter(
name=name,
filters=filters,
scalar_graph_options=scalar_graph_options,
ts_graph_options=ts_graph_options
)
db.session.add(db_filter)
db.session.commit()
return get_model_options(Filter), ""
@app.callback(
[
Output(component_id="load_colors", component_property="options"),
Output(component_id="save_colors_name", component_property="value"),
Output(component_id="colors_error", component_property="children"),
],
Input('save_colors', 'n_clicks'),
[
State(component_id="save_colors_name", component_property="value"),
State(component_id="colors", component_property='value')
]
)
def save_colors(_, name, str_colors):
if not name:
raise PreventUpdate
try:
colors = json.loads(str_colors)
except json.JSONDecodeError as je:
flash(f"Could not read color mapping. Input must be valid JSON. (Error: {je})", "error")
return get_model_options(Colors), "", show_logs()
db_colors = Colors(
name=name,
colors=colors,
)
db.session.add(db_colors)
db.session.commit()
return get_model_options(Colors), "", show_logs()
@app.callback(
[
Output(component_id="load_labels", component_property="options"),
Output(component_id="save_labels_name", component_property="value"),
Output(component_id="labels_error", component_property="children"),
],
Input('save_labels', 'n_clicks'),
[
State(component_id="save_labels_name", component_property="value"),
State(component_id="labels", component_property='value')
]
)
def save_labels(_, name, str_labels):
if not name:
raise PreventUpdate
try:
labels = json.loads(str_labels)
except json.JSONDecodeError as je:
flash(f"Could not read labels. Input must be valid JSON. (Error: {je})", "error")
return get_model_options(Labels), "", show_logs()
db_labels = Labels(
name=name,
labels=labels,
)
db.session.add(db_labels)
db.session.commit()
return get_model_options(Labels), "", show_logs()
@app.callback(
[
Output(component_id="graph_scalars_plot_switch", component_property="value"),
Output(component_id="graph_timeseries_plot_switch", component_property="value"),
Output(component_id="aggregation_group_by", component_property="value")
] +
[Output(component_id=f"filter-{filter_}", component_property='value') for filter_ in SC_FILTERS] +
[Output(component_id="save_load_errors", component_property="children")],
Input('load_filters', "value"),
State(component_id="dd_scenario", component_property="value"),
prevent_initial_call=True
)
def load_filters(name, scenarios):
if not name:
raise PreventUpdate
if not scenarios:
flash("No scenario selected - cannot load filters without scenario", "error")
return (
no_update,
no_update,
no_update,
*([no_update] * len(SC_FILTERS)),
show_logs(),
)
db_filter = Filter.query.filter_by(name=name).first()
filters = [db_filter.filters.get(filter_, None) for filter_ in SC_FILTERS]
flash("Successfully loaded filters", "info")
return (
db_filter.scalar_graph_options["type"],
db_filter.ts_graph_options["type"],
db_filter.filters["agg_group_by"],
*filters,
show_logs(),
)
@app.callback(
Output(component_id="colors", component_property="value"),
Input('load_colors', "value"),
prevent_initial_call=True
)
def load_colors(name):
if not name:
raise PreventUpdate
db_colors = Colors.query.filter_by(name=name).first()
return json.dumps(db_colors.colors)
@app.callback(
Output(component_id="labels", component_property="value"),
Input('load_labels', "value"),
prevent_initial_call=True
)
def load_labels(name):
if not name:
raise PreventUpdate
db_labels = Labels.query.filter_by(name=name).first()
return json.dumps(db_labels.labels)
@app.callback(
[Output(component_id=f"filter-{filter_}", component_property="options") for filter_ in SC_FILTERS],
[Input(component_id="dd_scenario", component_property="value")],
)
def load_scenario(scenarios):
if scenarios is None:
raise PreventUpdate
scenarios = scenarios if isinstance(scenarios, list) else [scenarios]
filters = get_multiple_scenario_filters(*scenarios)
app.logger.info("Data successfully loaded")
return preprocessing.get_filter_options(filters)
@app.callback(
[Output(component_id=f"graph_scalars_options", component_property="children")],
[
Input(component_id="graph_scalars_plot_switch", component_property="value"),
Input('load_filters', "value"),
],
prevent_initial_call=True
)
def toggle_scalar_graph_options(plot_type, name):
# Have to use "callback_context" as every component can only have one output callback
ctx = dash.callback_context
if ctx.triggered[0]["prop_id"] == "graph_scalars_plot_switch.value":
graph_scalar_options = get_graph_options("scalars", plot_type)
else:
if not name:
raise PreventUpdate
db_filter = Filter.query.filter_by(name=name).first()
graph_scalar_options = get_graph_options(
"scalars", db_filter.scalar_graph_options["type"], db_filter.scalar_graph_options["options"])
return graph_scalar_options,
@app.callback(
[Output(component_id=f"graph_timeseries_options", component_property="children")],
[
Input(component_id="graph_timeseries_plot_switch", component_property="value"),
Input('load_filters', "value"),
],
prevent_initial_call=True
)
def toggle_timeseries_graph_options(plot_type, name):
# Have to use "callback_context" as every component can only have one output callback
ctx = dash.callback_context
if ctx.triggered[0]["prop_id"] == "graph_timeseries_plot_switch.value":
graph_timeseries_options = get_graph_options("timeseries", plot_type)
else:
if not name:
raise PreventUpdate
db_filter = Filter.query.filter_by(name=name).first()
graph_timeseries_options = get_graph_options(
"timeseries", db_filter.ts_graph_options["type"], db_filter.ts_graph_options["options"])
return graph_timeseries_options,
@app.callback(
[
Output(component_id='graph_scalars', component_property='figure'),
Output(component_id='table_scalars', component_property='data'),
Output(component_id='table_scalars', component_property='columns'),
Output(component_id='graph_scalars_error', component_property='children'),
],
[
Input(component_id="refresh_scalars", component_property="n_clicks"),
Input(component_id="show_scalars_data", component_property='value'),
],
[
State(component_id="units", component_property='children'),
State(component_id=f"graph_scalars_options", component_property='children'),
State(component_id=f"filters", component_property='children'),
State(component_id="colors", component_property="value"),
State(component_id="labels", component_property="value"),
State(component_id="aggregation_group_by", component_property="value"),
State(component_id="dd_scenario", component_property="value"),
],
prevent_initial_call=True
)
def scalar_graph(_, show_data, units_div, graph_scalars_options, filter_div, colors, labels, agg_group_by, scenarios):
if scenarios is None:
raise PreventUpdate
data = get_multiple_scenario_data(*scenarios, table="oed_scalars")
filters = preprocessing.extract_filters("scalars", filter_div)
units = preprocessing.extract_unit_options(units_div)
graph_options = preprocessing.extract_graph_options(graph_scalars_options)
colors = preprocessing.extract_colors(colors)
graph_options["options"]["color_discrete_map"] = colors
labels = preprocessing.extract_labels(labels)
graph_options["options"]["labels"] = labels
try:
preprocessed_data = preprocessing.prepare_scalars(data, agg_group_by, units, filters)
except preprocessing.PreprocessingError:
return graphs.get_empty_fig(), [], [], show_logs()
if preprocessed_data.empty:
flash("No data for current filter settings", "warning")
return graphs.get_empty_fig(), [], [], show_logs()
try:
fig = graphs.get_scalar_plot(preprocessed_data, graph_options)
except graphs.PlottingError:
return graphs.get_empty_fig(), [], [], show_logs()
if show_data and "True" in show_data:
columns = [{"name": i, "id": i} for i in preprocessed_data.columns]
data_table = preprocessed_data.applymap(str).to_dict("records")
else:
columns = []
data_table = []
return fig, data_table, columns, show_logs()
@app.callback(
[
Output(component_id='graph_timeseries', component_property='figure'),
Output(component_id='table_timeseries', component_property='data'),
Output(component_id='table_timeseries', component_property='columns'),
Output(component_id='graph_timeseries_error', component_property='children'),
],
[
Input(component_id="refresh_timeseries", component_property="n_clicks"),
Input(component_id="show_timeseries_data", component_property='value'),
],
[
State(component_id="units", component_property='children'),
State(component_id="graph_timeseries_options", component_property='children'),
State(component_id=f"filters", component_property='children'),
State(component_id="colors", component_property="value"),
State(component_id="labels", component_property="value"),
State(component_id="aggregation_group_by", component_property="value"),
State(component_id="dd_scenario", component_property="value"),
],
prevent_initial_call=True
)
def timeseries_graph(
_, show_data, units_div, graph_timeseries_options, filter_div, colors, labels, agg_group_by, scenarios
):
if scenarios is None or SKIP_TS:
raise PreventUpdate
data = get_multiple_scenario_data(*scenarios, table="oed_timeseries")
filters = preprocessing.extract_filters(
"timeseries", filter_div
)
units = preprocessing.extract_unit_options(units_div)
graph_options = preprocessing.extract_graph_options(graph_timeseries_options)
colors = preprocessing.extract_colors(colors)
graph_options["options"]["color_discrete_map"] = colors
labels = preprocessing.extract_labels(labels)
graph_options["options"]["labels"] = labels
try:
preprocessed_data = preprocessing.prepare_timeseries(data, agg_group_by, units, filters)
except preprocessing.PreprocessingError:
return graphs.get_empty_fig(), [], [], show_logs()
if preprocessed_data.empty:
flash("No data for current filter settings", "warning")
return graphs.get_empty_fig(), [], [], show_logs()
try:
fig = graphs.get_timeseries_plot(preprocessed_data, graph_options)
except graphs.PlottingError:
return graphs.get_empty_fig(), [], [], show_logs()
if show_data and "True" in show_data:
columns = [{"name": i, "id": i} for i in preprocessed_data.columns]
data_table = preprocessed_data.applymap(str).to_dict("records")
else:
columns = []
data_table = []
return fig, data_table, columns, show_logs()
def show_logs():
errors = get_flashed_messages(category_filter=["error"])
warnings = get_flashed_messages(category_filter=["warning"])
if len(warnings) > MAX_WARNINGS:
warnings = warnings[:MAX_WARNINGS]
warnings.append(f"Too many warnings (>{MAX_WARNINGS}) - Skipping further warnings...")
infos = get_flashed_messages(category_filter=["info"])
if len(infos) > MAX_INFOS:
infos = infos[:MAX_INFOS]
infos.append(f"Too many infos (>{MAX_INFOS}) - Skipping further infos...")
return get_error_and_warnings_div(errors, warnings, infos)
if __name__ == "__main__":
app.run_server(debug=DEBUG)
| [
"preprocessing.prepare_scalars",
"data.dev.get_dummy_data",
"preprocessing.extract_colors",
"flask.get_flashed_messages",
"dash.dependencies.Input",
"preprocessing.extract_filters",
"graphs.get_empty_fig",
"models.Colors",
"layout.get_error_and_warnings_div",
"models.Labels",
"dash.Dash",
"lay... | [((569, 595), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (593, 595), False, 'import urllib3\n'), ((677, 786), 'dash.Dash', 'dash.Dash', (['__name__'], {'meta_tags': "[{'name': 'viewport', 'content': 'width=device-width, initial-scale=4.0'}]"}), "(__name__, meta_tags=[{'name': 'viewport', 'content':\n 'width=device-width, initial-scale=4.0'}])\n", (686, 786), False, 'import dash\n'), ((978, 997), 'models.db.init_app', 'db.init_app', (['server'], {}), '(server)\n', (989, 997), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((1015, 1022), 'flask_caching.Cache', 'Cache', ([], {}), '()\n', (1020, 1022), False, 'from flask_caching import Cache\n'), ((1481, 1527), 'scenario.get_scenario_data', 'scenario.get_scenario_data', (['scenario_id', 'table'], {}), '(scenario_id, table)\n', (1507, 1527), False, 'import scenario\n'), ((1777, 1816), 'scenario.merge_scenario_data', 'scenario.merge_scenario_data', (['scenarios'], {}), '(scenarios)\n', (1805, 1816), False, 'import scenario\n'), ((2129, 2168), 'scenario.merge_scenario_data', 'scenario.merge_scenario_data', (['scenarios'], {}), '(scenarios)\n', (2157, 2168), False, 'import scenario\n'), ((2403, 2427), 'scenario.get_scenarios', 'scenario.get_scenarios', ([], {}), '()\n', (2425, 2427), False, 'import scenario\n'), ((2252, 2316), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""dd_scenario"""', 'component_property': '"""options"""'}), "(component_id='dd_scenario', component_property='options')\n", (2258, 2316), False, 'from dash.dependencies import Input, Output, State\n'), ((2322, 2358), 'dash.dependencies.Input', 'Input', (['"""scenario_reload"""', '"""n_clicks"""'], {}), "('scenario_reload', 'n_clicks')\n", (2327, 2358), False, 'from dash.dependencies import Input, Output, State\n'), ((3394, 3446), 'preprocessing.extract_filters', 'preprocessing.extract_filters', (['"""scalars"""', 'filter_div'], {}), "('scalars', filter_div)\n", (3423, 3446), False, 'import preprocessing\n'), ((3517, 3575), 'preprocessing.extract_graph_options', 'preprocessing.extract_graph_options', (['graph_scalars_options'], {}), '(graph_scalars_options)\n', (3552, 3575), False, 'import preprocessing\n'), ((3599, 3660), 'preprocessing.extract_graph_options', 'preprocessing.extract_graph_options', (['graph_timeseries_options'], {}), '(graph_timeseries_options)\n', (3634, 3660), False, 'import preprocessing\n'), ((3678, 3795), 'models.Filter', 'Filter', ([], {'name': 'name', 'filters': 'filters', 'scalar_graph_options': 'scalar_graph_options', 'ts_graph_options': 'ts_graph_options'}), '(name=name, filters=filters, scalar_graph_options=\n scalar_graph_options, ts_graph_options=ts_graph_options)\n', (3684, 3795), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((3833, 3858), 'models.db.session.add', 'db.session.add', (['db_filter'], {}), '(db_filter)\n', (3847, 3858), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((3863, 3882), 'models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3880, 3882), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((2783, 2816), 'dash.dependencies.Input', 'Input', (['"""save_filters"""', '"""n_clicks"""'], {}), "('save_filters', 'n_clicks')\n", (2788, 2816), False, 'from dash.dependencies import Input, Output, State\n'), ((4719, 4751), 'models.Colors', 'Colors', ([], {'name': 'name', 'colors': 'colors'}), '(name=name, colors=colors)\n', (4725, 4751), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((4779, 4804), 'models.db.session.add', 'db.session.add', (['db_colors'], {}), '(db_colors)\n', (4793, 4804), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((4809, 4828), 'models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4826, 4828), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((4186, 4218), 'dash.dependencies.Input', 'Input', (['"""save_colors"""', '"""n_clicks"""'], {}), "('save_colors', 'n_clicks')\n", (4191, 4218), False, 'from dash.dependencies import Input, Output, State\n'), ((5671, 5703), 'models.Labels', 'Labels', ([], {'name': 'name', 'labels': 'labels'}), '(name=name, labels=labels)\n', (5677, 5703), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((5731, 5756), 'models.db.session.add', 'db.session.add', (['db_labels'], {}), '(db_labels)\n', (5745, 5756), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((5761, 5780), 'models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5778, 5780), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((5145, 5177), 'dash.dependencies.Input', 'Input', (['"""save_labels"""', '"""n_clicks"""'], {}), "('save_labels', 'n_clicks')\n", (5150, 5177), False, 'from dash.dependencies import Input, Output, State\n'), ((6934, 6978), 'flask.flash', 'flash', (['"""Successfully loaded filters"""', '"""info"""'], {}), "('Successfully loaded filters', 'info')\n", (6939, 6978), False, 'from flask import flash, get_flashed_messages\n'), ((6307, 6337), 'dash.dependencies.Input', 'Input', (['"""load_filters"""', '"""value"""'], {}), "('load_filters', 'value')\n", (6312, 6337), False, 'from dash.dependencies import Input, Output, State\n'), ((6343, 6404), 'dash.dependencies.State', 'State', ([], {'component_id': '"""dd_scenario"""', 'component_property': '"""value"""'}), "(component_id='dd_scenario', component_property='value')\n", (6348, 6404), False, 'from dash.dependencies import Input, Output, State\n'), ((7457, 7485), 'json.dumps', 'json.dumps', (['db_colors.colors'], {}), '(db_colors.colors)\n', (7467, 7485), False, 'import json\n'), ((7193, 7250), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""colors"""', 'component_property': '"""value"""'}), "(component_id='colors', component_property='value')\n", (7199, 7250), False, 'from dash.dependencies import Input, Output, State\n'), ((7256, 7285), 'dash.dependencies.Input', 'Input', (['"""load_colors"""', '"""value"""'], {}), "('load_colors', 'value')\n", (7261, 7285), False, 'from dash.dependencies import Input, Output, State\n'), ((7771, 7799), 'json.dumps', 'json.dumps', (['db_labels.labels'], {}), '(db_labels.labels)\n', (7781, 7799), False, 'import json\n'), ((7507, 7564), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""labels"""', 'component_property': '"""value"""'}), "(component_id='labels', component_property='value')\n", (7513, 7564), False, 'from dash.dependencies import Input, Output, State\n'), ((7570, 7599), 'dash.dependencies.Input', 'Input', (['"""load_labels"""', '"""value"""'], {}), "('load_labels', 'value')\n", (7575, 7599), False, 'from dash.dependencies import Input, Output, State\n'), ((8265, 8306), 'preprocessing.get_filter_options', 'preprocessing.get_filter_options', (['filters'], {}), '(filters)\n', (8297, 8306), False, 'import preprocessing\n'), ((11447, 11499), 'preprocessing.extract_filters', 'preprocessing.extract_filters', (['"""scalars"""', 'filter_div'], {}), "('scalars', filter_div)\n", (11476, 11499), False, 'import preprocessing\n'), ((11512, 11557), 'preprocessing.extract_unit_options', 'preprocessing.extract_unit_options', (['units_div'], {}), '(units_div)\n', (11546, 11557), False, 'import preprocessing\n'), ((11578, 11636), 'preprocessing.extract_graph_options', 'preprocessing.extract_graph_options', (['graph_scalars_options'], {}), '(graph_scalars_options)\n', (11613, 11636), False, 'import preprocessing\n'), ((11650, 11686), 'preprocessing.extract_colors', 'preprocessing.extract_colors', (['colors'], {}), '(colors)\n', (11678, 11686), False, 'import preprocessing\n'), ((11760, 11796), 'preprocessing.extract_labels', 'preprocessing.extract_labels', (['labels'], {}), '(labels)\n', (11788, 11796), False, 'import preprocessing\n'), ((14040, 14095), 'preprocessing.extract_filters', 'preprocessing.extract_filters', (['"""timeseries"""', 'filter_div'], {}), "('timeseries', filter_div)\n", (14069, 14095), False, 'import preprocessing\n'), ((14122, 14167), 'preprocessing.extract_unit_options', 'preprocessing.extract_unit_options', (['units_div'], {}), '(units_div)\n', (14156, 14167), False, 'import preprocessing\n'), ((14188, 14249), 'preprocessing.extract_graph_options', 'preprocessing.extract_graph_options', (['graph_timeseries_options'], {}), '(graph_timeseries_options)\n', (14223, 14249), False, 'import preprocessing\n'), ((14263, 14299), 'preprocessing.extract_colors', 'preprocessing.extract_colors', (['colors'], {}), '(colors)\n', (14291, 14299), False, 'import preprocessing\n'), ((14373, 14409), 'preprocessing.extract_labels', 'preprocessing.extract_labels', (['labels'], {}), '(labels)\n', (14401, 14409), False, 'import preprocessing\n'), ((15327, 15374), 'flask.get_flashed_messages', 'get_flashed_messages', ([], {'category_filter': "['error']"}), "(category_filter=['error'])\n", (15347, 15374), False, 'from flask import flash, get_flashed_messages\n'), ((15390, 15439), 'flask.get_flashed_messages', 'get_flashed_messages', ([], {'category_filter': "['warning']"}), "(category_filter=['warning'])\n", (15410, 15439), False, 'from flask import flash, get_flashed_messages\n'), ((15627, 15673), 'flask.get_flashed_messages', 'get_flashed_messages', ([], {'category_filter': "['info']"}), "(category_filter=['info'])\n", (15647, 15673), False, 'from flask import flash, get_flashed_messages\n'), ((15833, 15884), 'layout.get_error_and_warnings_div', 'get_error_and_warnings_div', (['errors', 'warnings', 'infos'], {}), '(errors, warnings, infos)\n', (15859, 15884), False, 'from layout import get_layout, get_graph_options, get_error_and_warnings_div\n'), ((1431, 1469), 'data.dev.get_dummy_data', 'dev.get_dummy_data', (['scenario_id', 'table'], {}), '(scenario_id, table)\n', (1449, 1469), False, 'from data import dev\n'), ((2035, 2077), 'scenario.get_scenario_filters', 'scenario.get_scenario_filters', (['scenario_id'], {}), '(scenario_id)\n', (2064, 2077), False, 'import scenario\n'), ((3895, 3920), 'models.get_model_options', 'get_model_options', (['Filter'], {}), '(Filter)\n', (3912, 3920), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((2627, 2692), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""load_filters"""', 'component_property': '"""options"""'}), "(component_id='load_filters', component_property='options')\n", (2633, 2692), False, 'from dash.dependencies import Input, Output, State\n'), ((2702, 2770), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""save_filters_name"""', 'component_property': '"""value"""'}), "(component_id='save_filters_name', component_property='value')\n", (2708, 2770), False, 'from dash.dependencies import Input, Output, State\n'), ((2832, 2899), 'dash.dependencies.State', 'State', ([], {'component_id': '"""save_filters_name"""', 'component_property': '"""value"""'}), "(component_id='save_filters_name', component_property='value')\n", (2837, 2899), False, 'from dash.dependencies import Input, Output, State\n'), ((2909, 2984), 'dash.dependencies.State', 'State', ([], {'component_id': 'f"""graph_scalars_options"""', 'component_property': '"""children"""'}), "(component_id=f'graph_scalars_options', component_property='children')\n", (2914, 2984), False, 'from dash.dependencies import Input, Output, State\n'), ((2994, 3072), 'dash.dependencies.State', 'State', ([], {'component_id': 'f"""graph_timeseries_options"""', 'component_property': '"""children"""'}), "(component_id=f'graph_timeseries_options', component_property='children')\n", (2999, 3072), False, 'from dash.dependencies import Input, Output, State\n'), ((3082, 3152), 'dash.dependencies.State', 'State', ([], {'component_id': '"""aggregation_group_by"""', 'component_property': '"""value"""'}), "(component_id='aggregation_group_by', component_property='value')\n", (3087, 3152), False, 'from dash.dependencies import Input, Output, State\n'), ((3162, 3223), 'dash.dependencies.State', 'State', ([], {'component_id': 'f"""filters"""', 'component_property': '"""children"""'}), "(component_id=f'filters', component_property='children')\n", (3167, 3223), False, 'from dash.dependencies import Input, Output, State\n'), ((4485, 4507), 'json.loads', 'json.loads', (['str_colors'], {}), '(str_colors)\n', (4495, 4507), False, 'import json\n'), ((4841, 4866), 'models.get_model_options', 'get_model_options', (['Colors'], {}), '(Colors)\n', (4858, 4866), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((3956, 4020), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""load_colors"""', 'component_property': '"""options"""'}), "(component_id='load_colors', component_property='options')\n", (3962, 4020), False, 'from dash.dependencies import Input, Output, State\n'), ((4030, 4097), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""save_colors_name"""', 'component_property': '"""value"""'}), "(component_id='save_colors_name', component_property='value')\n", (4036, 4097), False, 'from dash.dependencies import Input, Output, State\n'), ((4107, 4173), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""colors_error"""', 'component_property': '"""children"""'}), "(component_id='colors_error', component_property='children')\n", (4113, 4173), False, 'from dash.dependencies import Input, Output, State\n'), ((4234, 4300), 'dash.dependencies.State', 'State', ([], {'component_id': '"""save_colors_name"""', 'component_property': '"""value"""'}), "(component_id='save_colors_name', component_property='value')\n", (4239, 4300), False, 'from dash.dependencies import Input, Output, State\n'), ((4310, 4366), 'dash.dependencies.State', 'State', ([], {'component_id': '"""colors"""', 'component_property': '"""value"""'}), "(component_id='colors', component_property='value')\n", (4315, 4366), False, 'from dash.dependencies import Input, Output, State\n'), ((5444, 5466), 'json.loads', 'json.loads', (['str_labels'], {}), '(str_labels)\n', (5454, 5466), False, 'import json\n'), ((5793, 5818), 'models.get_model_options', 'get_model_options', (['Labels'], {}), '(Labels)\n', (5810, 5818), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((4915, 4979), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""load_labels"""', 'component_property': '"""options"""'}), "(component_id='load_labels', component_property='options')\n", (4921, 4979), False, 'from dash.dependencies import Input, Output, State\n'), ((4989, 5056), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""save_labels_name"""', 'component_property': '"""value"""'}), "(component_id='save_labels_name', component_property='value')\n", (4995, 5056), False, 'from dash.dependencies import Input, Output, State\n'), ((5066, 5132), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""labels_error"""', 'component_property': '"""children"""'}), "(component_id='labels_error', component_property='children')\n", (5072, 5132), False, 'from dash.dependencies import Input, Output, State\n'), ((5193, 5259), 'dash.dependencies.State', 'State', ([], {'component_id': '"""save_labels_name"""', 'component_property': '"""value"""'}), "(component_id='save_labels_name', component_property='value')\n", (5198, 5259), False, 'from dash.dependencies import Input, Output, State\n'), ((5269, 5325), 'dash.dependencies.State', 'State', ([], {'component_id': '"""labels"""', 'component_property': '"""value"""'}), "(component_id='labels', component_property='value')\n", (5274, 5325), False, 'from dash.dependencies import Input, Output, State\n'), ((6548, 6625), 'flask.flash', 'flash', (['"""No scenario selected - cannot load filters without scenario"""', '"""error"""'], {}), "('No scenario selected - cannot load filters without scenario', 'error')\n", (6553, 6625), False, 'from flask import flash, get_flashed_messages\n'), ((7822, 7892), 'dash.dependencies.Output', 'Output', ([], {'component_id': 'f"""filter-{filter_}"""', 'component_property': '"""options"""'}), "(component_id=f'filter-{filter_}', component_property='options')\n", (7828, 7892), False, 'from dash.dependencies import Input, Output, State\n'), ((7926, 7987), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""dd_scenario"""', 'component_property': '"""value"""'}), "(component_id='dd_scenario', component_property='value')\n", (7931, 7987), False, 'from dash.dependencies import Input, Output, State\n'), ((8854, 8893), 'layout.get_graph_options', 'get_graph_options', (['"""scalars"""', 'plot_type'], {}), "('scalars', plot_type)\n", (8871, 8893), False, 'from layout import get_layout, get_graph_options, get_error_and_warnings_div\n'), ((9050, 9165), 'layout.get_graph_options', 'get_graph_options', (['"""scalars"""', "db_filter.scalar_graph_options['type']", "db_filter.scalar_graph_options['options']"], {}), "('scalars', db_filter.scalar_graph_options['type'],\n db_filter.scalar_graph_options['options'])\n", (9067, 9165), False, 'from layout import get_layout, get_graph_options, get_error_and_warnings_div\n'), ((8329, 8405), 'dash.dependencies.Output', 'Output', ([], {'component_id': 'f"""graph_scalars_options"""', 'component_property': '"""children"""'}), "(component_id=f'graph_scalars_options', component_property='children')\n", (8335, 8405), False, 'from dash.dependencies import Input, Output, State\n'), ((8422, 8497), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""graph_scalars_plot_switch"""', 'component_property': '"""value"""'}), "(component_id='graph_scalars_plot_switch', component_property='value')\n", (8427, 8497), False, 'from dash.dependencies import Input, Output, State\n'), ((8507, 8537), 'dash.dependencies.Input', 'Input', (['"""load_filters"""', '"""value"""'], {}), "('load_filters', 'value')\n", (8512, 8537), False, 'from dash.dependencies import Input, Output, State\n'), ((9772, 9814), 'layout.get_graph_options', 'get_graph_options', (['"""timeseries"""', 'plot_type'], {}), "('timeseries', plot_type)\n", (9789, 9814), False, 'from layout import get_layout, get_graph_options, get_error_and_warnings_div\n'), ((9975, 10085), 'layout.get_graph_options', 'get_graph_options', (['"""timeseries"""', "db_filter.ts_graph_options['type']", "db_filter.ts_graph_options['options']"], {}), "('timeseries', db_filter.ts_graph_options['type'],\n db_filter.ts_graph_options['options'])\n", (9992, 10085), False, 'from layout import get_layout, get_graph_options, get_error_and_warnings_div\n'), ((9230, 9309), 'dash.dependencies.Output', 'Output', ([], {'component_id': 'f"""graph_timeseries_options"""', 'component_property': '"""children"""'}), "(component_id=f'graph_timeseries_options', component_property='children')\n", (9236, 9309), False, 'from dash.dependencies import Input, Output, State\n'), ((9326, 9404), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""graph_timeseries_plot_switch"""', 'component_property': '"""value"""'}), "(component_id='graph_timeseries_plot_switch', component_property='value')\n", (9331, 9404), False, 'from dash.dependencies import Input, Output, State\n'), ((9414, 9444), 'dash.dependencies.Input', 'Input', (['"""load_filters"""', '"""value"""'], {}), "('load_filters', 'value')\n", (9419, 9444), False, 'from dash.dependencies import Input, Output, State\n'), ((11882, 11947), 'preprocessing.prepare_scalars', 'preprocessing.prepare_scalars', (['data', 'agg_group_by', 'units', 'filters'], {}), '(data, agg_group_by, units, filters)\n', (11911, 11947), False, 'import preprocessing\n'), ((12092, 12147), 'flask.flash', 'flash', (['"""No data for current filter settings"""', '"""warning"""'], {}), "('No data for current filter settings', 'warning')\n", (12097, 12147), False, 'from flask import flash, get_flashed_messages\n'), ((12230, 12286), 'graphs.get_scalar_plot', 'graphs.get_scalar_plot', (['preprocessed_data', 'graph_options'], {}), '(preprocessed_data, graph_options)\n', (12252, 12286), False, 'import graphs\n'), ((10163, 10228), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""graph_scalars"""', 'component_property': '"""figure"""'}), "(component_id='graph_scalars', component_property='figure')\n", (10169, 10228), False, 'from dash.dependencies import Input, Output, State\n'), ((10238, 10301), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""table_scalars"""', 'component_property': '"""data"""'}), "(component_id='table_scalars', component_property='data')\n", (10244, 10301), False, 'from dash.dependencies import Input, Output, State\n'), ((10311, 10377), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""table_scalars"""', 'component_property': '"""columns"""'}), "(component_id='table_scalars', component_property='columns')\n", (10317, 10377), False, 'from dash.dependencies import Input, Output, State\n'), ((10387, 10460), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""graph_scalars_error"""', 'component_property': '"""children"""'}), "(component_id='graph_scalars_error', component_property='children')\n", (10393, 10460), False, 'from dash.dependencies import Input, Output, State\n'), ((10483, 10551), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""refresh_scalars"""', 'component_property': '"""n_clicks"""'}), "(component_id='refresh_scalars', component_property='n_clicks')\n", (10488, 10551), False, 'from dash.dependencies import Input, Output, State\n'), ((10561, 10628), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""show_scalars_data"""', 'component_property': '"""value"""'}), "(component_id='show_scalars_data', component_property='value')\n", (10566, 10628), False, 'from dash.dependencies import Input, Output, State\n'), ((10651, 10709), 'dash.dependencies.State', 'State', ([], {'component_id': '"""units"""', 'component_property': '"""children"""'}), "(component_id='units', component_property='children')\n", (10656, 10709), False, 'from dash.dependencies import Input, Output, State\n'), ((10719, 10794), 'dash.dependencies.State', 'State', ([], {'component_id': 'f"""graph_scalars_options"""', 'component_property': '"""children"""'}), "(component_id=f'graph_scalars_options', component_property='children')\n", (10724, 10794), False, 'from dash.dependencies import Input, Output, State\n'), ((10804, 10865), 'dash.dependencies.State', 'State', ([], {'component_id': 'f"""filters"""', 'component_property': '"""children"""'}), "(component_id=f'filters', component_property='children')\n", (10809, 10865), False, 'from dash.dependencies import Input, Output, State\n'), ((10875, 10931), 'dash.dependencies.State', 'State', ([], {'component_id': '"""colors"""', 'component_property': '"""value"""'}), "(component_id='colors', component_property='value')\n", (10880, 10931), False, 'from dash.dependencies import Input, Output, State\n'), ((10941, 10997), 'dash.dependencies.State', 'State', ([], {'component_id': '"""labels"""', 'component_property': '"""value"""'}), "(component_id='labels', component_property='value')\n", (10946, 10997), False, 'from dash.dependencies import Input, Output, State\n'), ((11007, 11077), 'dash.dependencies.State', 'State', ([], {'component_id': '"""aggregation_group_by"""', 'component_property': '"""value"""'}), "(component_id='aggregation_group_by', component_property='value')\n", (11012, 11077), False, 'from dash.dependencies import Input, Output, State\n'), ((11087, 11148), 'dash.dependencies.State', 'State', ([], {'component_id': '"""dd_scenario"""', 'component_property': '"""value"""'}), "(component_id='dd_scenario', component_property='value')\n", (11092, 11148), False, 'from dash.dependencies import Input, Output, State\n'), ((14495, 14563), 'preprocessing.prepare_timeseries', 'preprocessing.prepare_timeseries', (['data', 'agg_group_by', 'units', 'filters'], {}), '(data, agg_group_by, units, filters)\n', (14527, 14563), False, 'import preprocessing\n'), ((14708, 14763), 'flask.flash', 'flash', (['"""No data for current filter settings"""', '"""warning"""'], {}), "('No data for current filter settings', 'warning')\n", (14713, 14763), False, 'from flask import flash, get_flashed_messages\n'), ((14846, 14906), 'graphs.get_timeseries_plot', 'graphs.get_timeseries_plot', (['preprocessed_data', 'graph_options'], {}), '(preprocessed_data, graph_options)\n', (14872, 14906), False, 'import graphs\n'), ((12705, 12773), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""graph_timeseries"""', 'component_property': '"""figure"""'}), "(component_id='graph_timeseries', component_property='figure')\n", (12711, 12773), False, 'from dash.dependencies import Input, Output, State\n'), ((12783, 12849), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""table_timeseries"""', 'component_property': '"""data"""'}), "(component_id='table_timeseries', component_property='data')\n", (12789, 12849), False, 'from dash.dependencies import Input, Output, State\n'), ((12859, 12928), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""table_timeseries"""', 'component_property': '"""columns"""'}), "(component_id='table_timeseries', component_property='columns')\n", (12865, 12928), False, 'from dash.dependencies import Input, Output, State\n'), ((12938, 13014), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""graph_timeseries_error"""', 'component_property': '"""children"""'}), "(component_id='graph_timeseries_error', component_property='children')\n", (12944, 13014), False, 'from dash.dependencies import Input, Output, State\n'), ((13037, 13108), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""refresh_timeseries"""', 'component_property': '"""n_clicks"""'}), "(component_id='refresh_timeseries', component_property='n_clicks')\n", (13042, 13108), False, 'from dash.dependencies import Input, Output, State\n'), ((13118, 13188), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""show_timeseries_data"""', 'component_property': '"""value"""'}), "(component_id='show_timeseries_data', component_property='value')\n", (13123, 13188), False, 'from dash.dependencies import Input, Output, State\n'), ((13211, 13269), 'dash.dependencies.State', 'State', ([], {'component_id': '"""units"""', 'component_property': '"""children"""'}), "(component_id='units', component_property='children')\n", (13216, 13269), False, 'from dash.dependencies import Input, Output, State\n'), ((13279, 13356), 'dash.dependencies.State', 'State', ([], {'component_id': '"""graph_timeseries_options"""', 'component_property': '"""children"""'}), "(component_id='graph_timeseries_options', component_property='children')\n", (13284, 13356), False, 'from dash.dependencies import Input, Output, State\n'), ((13366, 13427), 'dash.dependencies.State', 'State', ([], {'component_id': 'f"""filters"""', 'component_property': '"""children"""'}), "(component_id=f'filters', component_property='children')\n", (13371, 13427), False, 'from dash.dependencies import Input, Output, State\n'), ((13437, 13493), 'dash.dependencies.State', 'State', ([], {'component_id': '"""colors"""', 'component_property': '"""value"""'}), "(component_id='colors', component_property='value')\n", (13442, 13493), False, 'from dash.dependencies import Input, Output, State\n'), ((13503, 13559), 'dash.dependencies.State', 'State', ([], {'component_id': '"""labels"""', 'component_property': '"""value"""'}), "(component_id='labels', component_property='value')\n", (13508, 13559), False, 'from dash.dependencies import Input, Output, State\n'), ((13569, 13639), 'dash.dependencies.State', 'State', ([], {'component_id': '"""aggregation_group_by"""', 'component_property': '"""value"""'}), "(component_id='aggregation_group_by', component_property='value')\n", (13574, 13639), False, 'from dash.dependencies import Input, Output, State\n'), ((13649, 13710), 'dash.dependencies.State', 'State', ([], {'component_id': '"""dd_scenario"""', 'component_property': '"""value"""'}), "(component_id='dd_scenario', component_property='value')\n", (13654, 13710), False, 'from dash.dependencies import Input, Output, State\n'), ((1228, 1252), 'scenario.get_scenarios', 'scenario.get_scenarios', ([], {}), '()\n', (1250, 1252), False, 'import scenario\n'), ((4555, 4647), 'flask.flash', 'flash', (['f"""Could not read color mapping. Input must be valid JSON. (Error: {je})"""', '"""error"""'], {}), "(f'Could not read color mapping. Input must be valid JSON. (Error: {je})',\n 'error')\n", (4560, 4647), False, 'from flask import flash, get_flashed_messages\n'), ((5514, 5599), 'flask.flash', 'flash', (['f"""Could not read labels. Input must be valid JSON. (Error: {je})"""', '"""error"""'], {}), "(f'Could not read labels. Input must be valid JSON. (Error: {je})',\n 'error')\n", (5519, 5599), False, 'from flask import flash, get_flashed_messages\n'), ((6809, 6842), 'models.Filter.query.filter_by', 'Filter.query.filter_by', ([], {'name': 'name'}), '(name=name)\n', (6831, 6842), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((6230, 6300), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""save_load_errors"""', 'component_property': '"""children"""'}), "(component_id='save_load_errors', component_property='children')\n", (6236, 6300), False, 'from dash.dependencies import Input, Output, State\n'), ((7404, 7437), 'models.Colors.query.filter_by', 'Colors.query.filter_by', ([], {'name': 'name'}), '(name=name)\n', (7426, 7437), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((7718, 7751), 'models.Labels.query.filter_by', 'Labels.query.filter_by', ([], {'name': 'name'}), '(name=name)\n', (7740, 7751), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((12163, 12185), 'graphs.get_empty_fig', 'graphs.get_empty_fig', ([], {}), '()\n', (12183, 12185), False, 'import graphs\n'), ((14779, 14801), 'graphs.get_empty_fig', 'graphs.get_empty_fig', ([], {}), '()\n', (14799, 14801), False, 'import graphs\n'), ((612, 634), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (624, 634), False, 'import pathlib\n'), ((4659, 4684), 'models.get_model_options', 'get_model_options', (['Colors'], {}), '(Colors)\n', (4676, 4684), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((5611, 5636), 'models.get_model_options', 'get_model_options', (['Labels'], {}), '(Labels)\n', (5628, 5636), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((5867, 5943), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""graph_scalars_plot_switch"""', 'component_property': '"""value"""'}), "(component_id='graph_scalars_plot_switch', component_property='value')\n", (5873, 5943), False, 'from dash.dependencies import Input, Output, State\n'), ((5953, 6032), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""graph_timeseries_plot_switch"""', 'component_property': '"""value"""'}), "(component_id='graph_timeseries_plot_switch', component_property='value')\n", (5959, 6032), False, 'from dash.dependencies import Input, Output, State\n'), ((6042, 6113), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""aggregation_group_by"""', 'component_property': '"""value"""'}), "(component_id='aggregation_group_by', component_property='value')\n", (6048, 6113), False, 'from dash.dependencies import Input, Output, State\n'), ((6127, 6195), 'dash.dependencies.Output', 'Output', ([], {'component_id': 'f"""filter-{filter_}"""', 'component_property': '"""value"""'}), "(component_id=f'filter-{filter_}', component_property='value')\n", (6133, 6195), False, 'from dash.dependencies import Input, Output, State\n'), ((8977, 9010), 'models.Filter.query.filter_by', 'Filter.query.filter_by', ([], {'name': 'name'}), '(name=name)\n', (8999, 9010), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((9898, 9931), 'models.Filter.query.filter_by', 'Filter.query.filter_by', ([], {'name': 'name'}), '(name=name)\n', (9920, 9931), False, 'from models import db, get_model_options, Filter, Colors, Labels\n'), ((12008, 12030), 'graphs.get_empty_fig', 'graphs.get_empty_fig', ([], {}), '()\n', (12028, 12030), False, 'import graphs\n'), ((12335, 12357), 'graphs.get_empty_fig', 'graphs.get_empty_fig', ([], {}), '()\n', (12355, 12357), False, 'import graphs\n'), ((14624, 14646), 'graphs.get_empty_fig', 'graphs.get_empty_fig', ([], {}), '()\n', (14644, 14646), False, 'import graphs\n'), ((14955, 14977), 'graphs.get_empty_fig', 'graphs.get_empty_fig', ([], {}), '()\n', (14975, 14977), False, 'import graphs\n')] |
from autotest import AutoTest
from recorder import Recorder
autotest = AutoTest()
def test(args):
print("start test ...")
recorder = Recorder(args)
autotest.test_time_case(recorder, args)
recorder.output(AutoTest.error_log, -1)
print("test finish, cases: {0} errors: {1} accuracy: {2}%".format(
recorder.case_count, recorder.failed_count, recorder.accuracy * 100.0)) | [
"autotest.AutoTest",
"recorder.Recorder"
] | [((73, 83), 'autotest.AutoTest', 'AutoTest', ([], {}), '()\n', (81, 83), False, 'from autotest import AutoTest\n'), ((144, 158), 'recorder.Recorder', 'Recorder', (['args'], {}), '(args)\n', (152, 158), False, 'from recorder import Recorder\n')] |
import CreadorDeMapa
import Mazmorra
import Tesoro
import Textura
import Mapa
class CreadorMapaJungla(CreadorDeMapa):
@property
def mapa(self):
return self.__mapa
def crearMapa(self):
mazmorra = Mazmorra()
tesoro = Tesoro(100, 10000)
textura = Textura("nieve.png", 100, 200)
self.__mapa = Mapa(mazmorra, tesoro, textura)
return self.__mapa
| [
"Mazmorra",
"Tesoro",
"Textura",
"Mapa"
] | [((226, 236), 'Mazmorra', 'Mazmorra', ([], {}), '()\n', (234, 236), False, 'import Mazmorra\n'), ((254, 272), 'Tesoro', 'Tesoro', (['(100)', '(10000)'], {}), '(100, 10000)\n', (260, 272), False, 'import Tesoro\n'), ((291, 321), 'Textura', 'Textura', (['"""nieve.png"""', '(100)', '(200)'], {}), "('nieve.png', 100, 200)\n", (298, 321), False, 'import Textura\n'), ((344, 375), 'Mapa', 'Mapa', (['mazmorra', 'tesoro', 'textura'], {}), '(mazmorra, tesoro, textura)\n', (348, 375), False, 'import Mapa\n')] |
import unittest
import pyxb.binding.datatypes as xsd
class Test_anyType (unittest.TestCase):
def testRange (self):
self.assertFalse("Datatype anyType test not implemented")
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((218, 233), 'unittest.main', 'unittest.main', ([], {}), '()\n', (231, 233), False, 'import unittest\n')] |
#!/usr/bin/env python3
from aws_cdk import core
from infra.infra_stack import InfraStack
app = core.App()
InfraStack(app, "infra")
app.synth()
| [
"infra.infra_stack.InfraStack",
"aws_cdk.core.App"
] | [((99, 109), 'aws_cdk.core.App', 'core.App', ([], {}), '()\n', (107, 109), False, 'from aws_cdk import core\n'), ((110, 134), 'infra.infra_stack.InfraStack', 'InfraStack', (['app', '"""infra"""'], {}), "(app, 'infra')\n", (120, 134), False, 'from infra.infra_stack import InfraStack\n')] |
import random
import os
files= os.listdir("data/VOC2007/Annotations")
files = [os.path.splitext(file)[0] for file in files]
random.shuffle(files)
train_perc = .8
thres = int(len(files) * train_perc)
train=files[:thres]
test=files[thres+1:]
outfolder = "data/VOC2007/ImageSets/Main"
with open(os.path.join(outfolder, 'test.txt'), "w") as f:
f.write('\n'.join(test))
with open(os.path.join(outfolder, 'trainval.txt'), "w") as f:
f.write('\n'.join(train))
| [
"os.path.join",
"os.listdir",
"os.path.splitext",
"random.shuffle"
] | [((32, 70), 'os.listdir', 'os.listdir', (['"""data/VOC2007/Annotations"""'], {}), "('data/VOC2007/Annotations')\n", (42, 70), False, 'import os\n'), ((126, 147), 'random.shuffle', 'random.shuffle', (['files'], {}), '(files)\n', (140, 147), False, 'import random\n'), ((81, 103), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (97, 103), False, 'import os\n'), ((298, 333), 'os.path.join', 'os.path.join', (['outfolder', '"""test.txt"""'], {}), "(outfolder, 'test.txt')\n", (310, 333), False, 'import os\n'), ((385, 424), 'os.path.join', 'os.path.join', (['outfolder', '"""trainval.txt"""'], {}), "(outfolder, 'trainval.txt')\n", (397, 424), False, 'import os\n')] |
# authors: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-11-20
"""Downloads a raw csv dataset from the web to a local filepath.
Usage: download_data.py --url=<url> --out_file=<out_file>
Options:
--url=<url> URL from where to download the data (must be in csv format)
--out_file=<out_file> Path and filename where to locally write the file
"""
import os
import pandas as pd
from docopt import docopt
opt = docopt(__doc__)
# reads an online csv and writes it to local directory
def main(url, out_file):
data = pd.read_csv(url, header=None, low_memory = False)
try:
data.to_csv(out_file, index=False)
except:
os.makedirs(os.path.dirname(out_file))
data.to_csv(out_file, index=False)
if __name__ == "__main__":
main(opt["--url"], opt["--out_file"])
| [
"os.path.dirname",
"docopt.docopt",
"pandas.read_csv"
] | [((432, 447), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (438, 447), False, 'from docopt import docopt\n'), ((540, 587), 'pandas.read_csv', 'pd.read_csv', (['url'], {'header': 'None', 'low_memory': '(False)'}), '(url, header=None, low_memory=False)\n', (551, 587), True, 'import pandas as pd\n'), ((674, 699), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (689, 699), False, 'import os\n')] |
import datetime
import pandas
from evidently.analyzers.utils import process_columns
from evidently.pipeline.column_mapping import ColumnMapping
def test_process_columns() -> None:
dataset = pandas.DataFrame.from_dict([
dict(datetime=datetime.datetime.now(),
target=1,
prediction=1,
feature1=0,
feature2=1,
cat_feature1="o",
cat_feature2="b")])
columns = process_columns(dataset, ColumnMapping())
assert columns.utility_columns.id_column is None
# process_columns has a problem with columns order - it returns not sorted list
# we have to before a fix use sorted for comparing with sorted expected data
assert sorted(columns.num_feature_names) == ['feature1', 'feature2']
assert sorted(columns.cat_feature_names) == ['cat_feature1', 'cat_feature2']
| [
"evidently.pipeline.column_mapping.ColumnMapping",
"datetime.datetime.now"
] | [((478, 493), 'evidently.pipeline.column_mapping.ColumnMapping', 'ColumnMapping', ([], {}), '()\n', (491, 493), False, 'from evidently.pipeline.column_mapping import ColumnMapping\n'), ((249, 272), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (270, 272), False, 'import datetime\n')] |
"""load cmat cmat10.
import matplotlib.pyplit as plt
import seaborn as sns
sns.set()
plt.ion() # interactive plot
plt.clf(); sns.heatmap(cmat, cmap="gist_earth_r").invert_yaxis()
plt.clf(); sns.heatmap(cmat, cmap="viridis_r").invert_yaxis()
"""
import pickle
from pathlib import Path
cdir = Path(__file__).parent.resolve()
cmat = pickle.load(open(cdir / "cos_matrix.pkl", "rb"))
cmat10 = pickle.load(open(cdir / "cos_matrix10.pkl", "rb"))
| [
"pathlib.Path"
] | [((298, 312), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (302, 312), False, 'from pathlib import Path\n')] |
# https://www.jwz.org/doc/threading.html
# https://github.com/akuchling/jwzthreading
import re
from collections import deque
restrip_pat = re.compile("""(
(Re(\[\d+\])?:) | (\[ [^]]+ \])
\s*)+
""", re.IGNORECASE | re.VERBOSE)
class Container:
"""Contains a tree of messages.
Instance attributes:
.message : Message
Message corresponding to this tree node. This can be None,
if a Message-Id is referenced but no message with the ID is
included.
.children : [Container]
Possibly-empty list of child containers.
.parent : Container
Parent container; may be None.
"""
_slots__ = ['message', 'parent', 'children']
def __init__(self):
self.parent = None
self.message = None
self.children = []
def __repr__(self):
return '<%s %x: %r>' % (self.__class__.__name__, id(self), self.message)
def __len__(self):
counter = 0
if self.message:
counter += 1
for child in self.children:
counter += len(child)
return counter
def is_dummy(self):
return self.message is None
def add_child(self, child):
if child.parent:
child.parent.remove_child(child)
self.children.append(child)
child.parent = self
def remove_child(self, child):
self.children.remove(child)
child.parent = None
def has_descendant(self, ctr):
"""(Container): bool
Returns true if 'ctr' is a descendant of this Container.
"""
# To avoid recursing indefinitely, we'll do a depth-first search;
# 'seen' tracks the containers we've already seen, and 'stack'
# is a deque containing containers that we need to look at.
stack = deque()
stack.append(self)
seen = set()
while stack:
node = stack.pop()
if node is ctr:
return True
seen.add(node)
for child in node.children:
if child not in seen:
stack.append(child)
return False
def get_folder_uid(self):
folder_uid = []
if self.message:
folder_uid.append(
(self.message.folder, self.message.uid))
for child in self.children:
folder_uid += child.get_folder_uid()
return folder_uid
class Message (object):
"""Represents a message to be threaded.
Instance attributes:
.subject : str
Subject line of the message.
.message_id : str
Message ID as retrieved from the Message-ID header.
.references : [str]
List of message IDs from the In-Reply-To and References headers.
.message : any
Can contain information for the caller's use (e.g. an RFC-822 message object).
"""
__slots__ = ['folder', 'uid', 'subject', 'message_id', 'references',]
def __init__(self, msg=None):
self.folder = msg.get('folder','')
self.uid = msg.get('uid','')
self.subject = msg.get('subject','')
self.message_id = None
self.references = []
def __repr__(self):
return '<%s: %r>' % (self.__class__.__name__, self.message_id)
def make_message(msg):
"""(msg:rfc822.Message) : Message
Create a Message object for threading purposes from an RFC822
message.
"""
new = Message(msg)
m_id = msg.get('message_id', '')
if m_id is None:
raise ValueError("Message does not contain a Message-ID: header")
new.message_id = m_id
# Get list of unique message IDs from the References: header
new.references = msg.get('references','')
# Get In-Reply-To: header and add it to references
in_reply_to = msg.get('in_reply_to','')
if in_reply_to:
msg_id = in_reply_to[0]
if msg_id not in new.references:
new.references.append(msg_id)
return new
def prune_container(container: Container):
"""(container:Container) : [Container]
Recursively prune a tree of containers, as described in step 4
of the algorithm. Returns a list of the children that should replace
this container.
"""
# Prune children, assembling a new list of children
new_children = []
for ctr in container.children[:]:
L = prune_container(ctr)
new_children.extend(L)
container.remove_child(ctr)
for c in new_children:
container.add_child(c)
if (container.message is None and
len(container.children) == 0):
# 4.A: nuke empty containers
return []
elif (container.message is None and
(len(container.children) == 1 or
container.parent is not None)):
# 4.B: promote children
L = container.children[:]
for c in L:
container.remove_child(c)
return L
else:
# Leave this node in place
return [container]
def thread(msglist):
"""([Message]) : {string:Container}
The main threading function. This takes a list of Message
objects, and returns a dictionary mapping subjects to Containers.
Containers are trees, with the .children attribute containing a
list of subtrees, so callers can then sort children by date or
poster or whatever.
"""
id_table = {}
for msg in msglist:
# 1A
this_container = id_table.get(msg.message_id, None)
if this_container is not None:
this_container.message = msg
else:
this_container = Container()
this_container.message = msg
id_table[msg.message_id] = this_container
# 1B
prev = None
for ref in msg.references:
container = id_table.get(ref, None)
if container is None:
container = Container()
container.message_id = ref
id_table[ref] = container
if (prev is not None):
# Don't add link if it would create a loop
if container is this_container:
continue
if container.has_descendant(prev):
continue
prev.add_child(container)
prev = container
if prev is not None:
prev.add_child(this_container)
# 2. Find root set
root_set = [container for container in id_table.values()
if container.parent is None]
# 3. Delete id_table
del id_table
# 4. Prune empty containers
for container in root_set:
assert container.parent == None
new_root_set = []
for container in root_set:
L = prune_container(container)
new_root_set.extend(L)
root_set = new_root_set
# 5. Group root set by subject
subject_table = {}
for container in root_set:
if container.message:
subj = container.message.subject
else:
c = container.children[0]
subj = container.children[0].message.subject
subj = restrip_pat.sub('', subj)
if subj == "":
continue
existing = subject_table.get(subj, None)
if (existing is None or
(existing.message is not None and
container.message is None) or
(existing.message is not None and
container.message is not None and
len(existing.message.subject) > len(container.message.subject))):
subject_table[subj] = container
# 5C
for container in root_set:
if container.message:
subj = container.message.subject
else:
subj = container.children[0].message.subject
subj = restrip_pat.sub('', subj)
ctr = subject_table.get(subj)
if ctr is None or ctr is container:
continue
if ctr.is_dummy() and container.is_dummy():
for c in ctr.children:
container.add_child(c)
elif ctr.is_dummy() or container.is_dummy():
if ctr.is_dummy():
ctr.add_child(container)
else:
container.add_child(ctr)
elif len(ctr.message.subject) < len(container.message.subject):
# ctr has fewer levels of 're:' headers
ctr.add_child(container)
elif len(ctr.message.subject) > len(container.message.subject):
# container has fewer levels of 're:' headers
container.add_child(ctr)
else:
new = Container()
new.add_child(ctr)
new.add_child(container)
subject_table[subj] = new
return subject_table
def print_container(ctr, depth=0, debug=0):
import sys
sys.stdout.write(depth*' ')
if debug:
# Printing the repr() is more useful for debugging
sys.stdout.write(repr(ctr))
else:
sys.stdout.write(repr(ctr.message and ctr.message.subject + " uid: " + str(
ctr.message.uid) + " folder:" + str(ctr.message.folder)))
sys.stdout.write('\n')
for c in ctr.children:
print_container(c, depth+1)
def conversation_threading(msglist):
subject_table = thread(msglist)
# Output
return (x for x in subject_table.values() if (len(x) > 1 and x.message))
| [
"sys.stdout.write",
"collections.deque",
"re.compile"
] | [((141, 240), 're.compile', 're.compile', (['"""(\n (Re(\\\\[\\\\d+\\\\])?:) | (\\\\[ [^]]+ \\\\])\n\\\\s*)+\n"""', '(re.IGNORECASE | re.VERBOSE)'], {}), '("""(\n (Re(\\\\[\\\\d+\\\\])?:) | (\\\\[ [^]]+ \\\\])\n\\\\s*)+\n""", re.\n IGNORECASE | re.VERBOSE)\n', (151, 240), False, 'import re\n'), ((8698, 8727), 'sys.stdout.write', 'sys.stdout.write', (["(depth * ' ')"], {}), "(depth * ' ')\n", (8714, 8727), False, 'import sys\n'), ((9004, 9026), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (9020, 9026), False, 'import sys\n'), ((1789, 1796), 'collections.deque', 'deque', ([], {}), '()\n', (1794, 1796), False, 'from collections import deque\n')] |
import os
import shutil
import subprocess
from easygui import *
import time
folderPath = './picsNow'
registerNow = './RegisterNow'
for image in os.listdir(folderPath):
if os.path.exists(registerNow):
shutil.rmtree(registerNow)
os.makedirs(registerNow)
cmd = ["python2", "detect.py", os.path.join(folderPath, image)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p.wait()
result = p.communicate()
msgbox(result)
time.sleep(2)
cmd = ["python2", "spreadsheet.py"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p.wait()
result = p.communicate()
msgbox(result)
time.sleep(2)
cmd = ["python2", "identify.py"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p.wait()
result = p.communicate()
msgbox(result)
time.sleep(2)
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"subprocess.Popen",
"os.path.join",
"time.sleep",
"shutil.rmtree"
] | [((146, 168), 'os.listdir', 'os.listdir', (['folderPath'], {}), '(folderPath)\n', (156, 168), False, 'import os\n'), ((174, 201), 'os.path.exists', 'os.path.exists', (['registerNow'], {}), '(registerNow)\n', (188, 201), False, 'import os\n'), ((233, 257), 'os.makedirs', 'os.makedirs', (['registerNow'], {}), '(registerNow)\n', (244, 257), False, 'import os\n'), ((329, 374), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE)\n', (345, 374), False, 'import subprocess\n'), ((429, 442), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (439, 442), False, 'import time\n'), ((486, 531), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE)\n', (502, 531), False, 'import subprocess\n'), ((586, 599), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (596, 599), False, 'import time\n'), ((641, 686), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE)\n', (657, 686), False, 'import subprocess\n'), ((741, 754), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (751, 754), False, 'import time\n'), ((205, 231), 'shutil.rmtree', 'shutil.rmtree', (['registerNow'], {}), '(registerNow)\n', (218, 231), False, 'import shutil\n'), ((291, 322), 'os.path.join', 'os.path.join', (['folderPath', 'image'], {}), '(folderPath, image)\n', (303, 322), False, 'import os\n')] |
# math function
import math
# round number
x = 2.9
print(round(x))
# absolute value of number
y = -5
print(abs(y))
# make use of the math library
z = 2.5
print(math.ceil(z)) # round up the number
print(math.floor(z)) # round down the number
# for finding more math module just search "python math module"
| [
"math.ceil",
"math.floor"
] | [((166, 178), 'math.ceil', 'math.ceil', (['z'], {}), '(z)\n', (175, 178), False, 'import math\n'), ((208, 221), 'math.floor', 'math.floor', (['z'], {}), '(z)\n', (218, 221), False, 'import math\n')] |
'''tzinfo timezone information for America/Panama.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Panama(DstTzInfo):
'''America/Panama timezone definition. See datetime.tzinfo for details'''
zone = 'America/Panama'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1908,4,22,5,19,36),
]
_transition_info = [
i(-19200,0,'CMT'),
i(-18000,0,'EST'),
]
Panama = Panama()
| [
"pytz.tzinfo.memorized_ttinfo",
"pytz.tzinfo.memorized_datetime"
] | [((346, 365), 'pytz.tzinfo.memorized_datetime', 'd', (['(1)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(1, 1, 1, 0, 0, 0)\n', (347, 365), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((362, 387), 'pytz.tzinfo.memorized_datetime', 'd', (['(1908)', '(4)', '(22)', '(5)', '(19)', '(36)'], {}), '(1908, 4, 22, 5, 19, 36)\n', (363, 387), True, 'from pytz.tzinfo import memorized_datetime as d\n'), ((420, 439), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(-19200)', '(0)', '"""CMT"""'], {}), "(-19200, 0, 'CMT')\n", (421, 439), True, 'from pytz.tzinfo import memorized_ttinfo as i\n'), ((439, 458), 'pytz.tzinfo.memorized_ttinfo', 'i', (['(-18000)', '(0)', '"""EST"""'], {}), "(-18000, 0, 'EST')\n", (440, 458), True, 'from pytz.tzinfo import memorized_ttinfo as i\n')] |
from django.conf.urls import url
from . import views
app_name = 'gantt'
urlpatterns = [
# ex: /polls/
#url(r'^$', views.index, name='index'),
url(r'^$', views.index, name='index'),
url(r'^project/$', views.project),
url(r'^modify_project/$', views.modify_project),
#url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
]
| [
"django.conf.urls.url"
] | [((156, 192), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (159, 192), False, 'from django.conf.urls import url\n'), ((199, 231), 'django.conf.urls.url', 'url', (['"""^project/$"""', 'views.project'], {}), "('^project/$', views.project)\n", (202, 231), False, 'from django.conf.urls import url\n'), ((238, 284), 'django.conf.urls.url', 'url', (['"""^modify_project/$"""', 'views.modify_project'], {}), "('^modify_project/$', views.modify_project)\n", (241, 284), False, 'from django.conf.urls import url\n')] |
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from mlxtend.utils import assert_raises
from mlxtend.utils import check_Xy, format_kwarg_dictionaries
import numpy as np
import sys
import os
y = np.array([1, 2, 3, 4])
X = np.array([[1., 2.], [3., 4.], [5., 6.], [7., 8.]])
d_default = {'key1': 1, 'key2': 2}
d_user = {'key3': 3, 'key4': 4}
protected_keys = ['key1', 'key4']
def test_check_Xy_ok():
check_Xy(X, y)
def test_check_Xy_invalid_type_X():
expect = "X must be a NumPy array. Found <class 'list'>"
if (sys.version_info < (3, 0)):
expect = expect.replace('class', 'type')
assert_raises(ValueError,
expect,
check_Xy,
[1, 2, 3, 4],
y)
def test_check_Xy_float16_X():
check_Xy(X.astype(np.float16), y)
def test_check_Xy_float16_y():
check_Xy(X, y.astype(np.int16))
def test_check_Xy_invalid_type_y():
expect = "y must be a NumPy array. Found <class 'list'>"
if (sys.version_info < (3, 0)):
expect = expect.replace('class', 'type')
assert_raises(ValueError,
expect,
check_Xy,
X,
[1, 2, 3, 4])
def test_check_Xy_invalid_dtype_X():
assert_raises(ValueError,
'X must be an integer or float array. Found object.',
check_Xy,
X.astype('object'),
y)
def test_check_Xy_invalid_dtype_y():
if (sys.version_info > (3, 0)):
expect = ('y must be an integer array. Found <U1. '
'Try passing the array as y.astype(np.integer)')
else:
expect = ('y must be an integer array. Found |S1. '
'Try passing the array as y.astype(np.integer)')
assert_raises(ValueError,
expect,
check_Xy,
X,
np.array(['a', 'b', 'c', 'd']))
def test_check_Xy_invalid_dim_y():
if sys.version_info[:2] == (2, 7) and os.name == 'nt':
s = 'y must be a 1D array. Found (4L, 2L)'
else:
s = 'y must be a 1D array. Found (4, 2)'
assert_raises(ValueError,
s,
check_Xy,
X,
X.astype(np.integer))
def test_check_Xy_invalid_dim_X():
if sys.version_info[:2] == (2, 7) and os.name == 'nt':
s = 'X must be a 2D array. Found (4L,)'
else:
s = 'X must be a 2D array. Found (4,)'
assert_raises(ValueError,
s,
check_Xy,
y,
y)
def test_check_Xy_unequal_length_X():
assert_raises(ValueError,
('y and X must contain the same number of samples. '
'Got y: 4, X: 3'),
check_Xy,
X[1:],
y)
def test_check_Xy_unequal_length_y():
assert_raises(ValueError,
('y and X must contain the same number of samples. '
'Got y: 3, X: 4'),
check_Xy,
X,
y[1:])
def test_format_kwarg_dictionaries_defaults_empty():
empty = format_kwarg_dictionaries()
assert isinstance(empty, dict)
assert len(empty) == 0
def test_format_kwarg_dictionaries_protected_keys():
formatted_kwargs = format_kwarg_dictionaries(
default_kwargs=d_default,
user_kwargs=d_user,
protected_keys=protected_keys)
for key in protected_keys:
assert key not in formatted_kwargs
def test_format_kwarg_dictionaries_no_default_kwargs():
formatted_kwargs = format_kwarg_dictionaries(user_kwargs=d_user)
assert formatted_kwargs == d_user
def test_format_kwarg_dictionaries_no_user_kwargs():
formatted_kwargs = format_kwarg_dictionaries(default_kwargs=d_default)
assert formatted_kwargs == d_default
def test_format_kwarg_dictionaries_default_kwargs_invalid_type():
invalid_kwargs = 'not a dictionary'
message = ('d must be of type dict or None, but got '
'{} instead'.format(type(invalid_kwargs)))
assert_raises(TypeError,
message,
format_kwarg_dictionaries,
default_kwargs=invalid_kwargs)
def test_format_kwarg_dictionaries_user_kwargs_invalid_type():
invalid_kwargs = 'not a dictionary'
message = ('d must be of type dict or None, but got '
'{} instead'.format(type(invalid_kwargs)))
assert_raises(TypeError,
message,
format_kwarg_dictionaries,
user_kwargs=invalid_kwargs)
| [
"numpy.array",
"mlxtend.utils.format_kwarg_dictionaries",
"mlxtend.utils.assert_raises",
"mlxtend.utils.check_Xy"
] | [((266, 288), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (274, 288), True, 'import numpy as np\n'), ((293, 351), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]'], {}), '([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])\n', (301, 351), True, 'import numpy as np\n'), ((476, 490), 'mlxtend.utils.check_Xy', 'check_Xy', (['X', 'y'], {}), '(X, y)\n', (484, 490), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((679, 739), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', 'expect', 'check_Xy', '[1, 2, 3, 4]', 'y'], {}), '(ValueError, expect, check_Xy, [1, 2, 3, 4], y)\n', (692, 739), False, 'from mlxtend.utils import assert_raises\n'), ((1140, 1200), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', 'expect', 'check_Xy', 'X', '[1, 2, 3, 4]'], {}), '(ValueError, expect, check_Xy, X, [1, 2, 3, 4])\n', (1153, 1200), False, 'from mlxtend.utils import assert_raises\n'), ((2547, 2591), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', 's', 'check_Xy', 'y', 'y'], {}), '(ValueError, s, check_Xy, y, y)\n', (2560, 2591), False, 'from mlxtend.utils import assert_raises\n'), ((2708, 2828), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', '"""y and X must contain the same number of samples. Got y: 4, X: 3"""', 'check_Xy', 'X[1:]', 'y'], {}), "(ValueError,\n 'y and X must contain the same number of samples. Got y: 4, X: 3',\n check_Xy, X[1:], y)\n", (2721, 2828), False, 'from mlxtend.utils import assert_raises\n'), ((2961, 3081), 'mlxtend.utils.assert_raises', 'assert_raises', (['ValueError', '"""y and X must contain the same number of samples. Got y: 3, X: 4"""', 'check_Xy', 'X', 'y[1:]'], {}), "(ValueError,\n 'y and X must contain the same number of samples. Got y: 3, X: 4',\n check_Xy, X, y[1:])\n", (2974, 3081), False, 'from mlxtend.utils import assert_raises\n'), ((3237, 3264), 'mlxtend.utils.format_kwarg_dictionaries', 'format_kwarg_dictionaries', ([], {}), '()\n', (3262, 3264), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((3405, 3511), 'mlxtend.utils.format_kwarg_dictionaries', 'format_kwarg_dictionaries', ([], {'default_kwargs': 'd_default', 'user_kwargs': 'd_user', 'protected_keys': 'protected_keys'}), '(default_kwargs=d_default, user_kwargs=d_user,\n protected_keys=protected_keys)\n', (3430, 3511), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((3749, 3794), 'mlxtend.utils.format_kwarg_dictionaries', 'format_kwarg_dictionaries', ([], {'user_kwargs': 'd_user'}), '(user_kwargs=d_user)\n', (3774, 3794), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((3911, 3962), 'mlxtend.utils.format_kwarg_dictionaries', 'format_kwarg_dictionaries', ([], {'default_kwargs': 'd_default'}), '(default_kwargs=d_default)\n', (3936, 3962), False, 'from mlxtend.utils import check_Xy, format_kwarg_dictionaries\n'), ((4232, 4328), 'mlxtend.utils.assert_raises', 'assert_raises', (['TypeError', 'message', 'format_kwarg_dictionaries'], {'default_kwargs': 'invalid_kwargs'}), '(TypeError, message, format_kwarg_dictionaries, default_kwargs\n =invalid_kwargs)\n', (4245, 4328), False, 'from mlxtend.utils import assert_raises\n'), ((4603, 4696), 'mlxtend.utils.assert_raises', 'assert_raises', (['TypeError', 'message', 'format_kwarg_dictionaries'], {'user_kwargs': 'invalid_kwargs'}), '(TypeError, message, format_kwarg_dictionaries, user_kwargs=\n invalid_kwargs)\n', (4616, 4696), False, 'from mlxtend.utils import assert_raises\n'), ((1964, 1994), 'numpy.array', 'np.array', (["['a', 'b', 'c', 'd']"], {}), "(['a', 'b', 'c', 'd'])\n", (1972, 1994), True, 'import numpy as np\n')] |
# Generated by Django 3.2.7 on 2021-10-05 07:21
import json
from django.db import migrations
def update_fields_with_images(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Programme = apps.get_model("programmes", "Programme")
for prog in Programme.objects.all():
# VENDEUR
field = prog.vendeur
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
prog.vendeur = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
# ACQUEREUR
field = prog.acquereur
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
prog.acquereur = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
# REFERENCE NOTAIRE
field = prog.reference_notaire
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
prog.reference_notaire = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
# REFERENCE PUBLICATION ACTE
field = prog.reference_publication_acte
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
prog.reference_publication_acte = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
# ACTE DE PROPRIETE
field = prog.acte_de_propriete
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
prog.acte_de_propriete = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
# ACTE NOTARIAL
field = prog.acte_notarial
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
prog.acte_notarial = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
# EDD VOLUMETRIQUE
field = prog.edd_volumetrique
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
prog.edd_volumetrique = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
# EDD CLASSIQUE
field = prog.edd_classique
if field is not None and field != "":
try:
json.loads(field)
except json.decoder.JSONDecodeError:
prog.edd_classique = json.dumps(
{
"files": {},
"text": field if isinstance(field, str) else "",
}
)
prog.save()
def rollback_field_with_images(apps, schema_editor):
print("rollback")
Programme = apps.get_model("programmes", "Programme")
for prog in Programme.objects.all():
# VENDEUR
field = prog.vendeur
if field is not None and field != "":
try:
json_field = json.loads(field)
prog.vendeur = json_field["text"] if "text" in json_field else ""
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
# ACQUEREUR
field = prog.acquereur
if field is not None and field != "":
try:
json_field = json.loads(field)
prog.acquereur = json_field["text"] if "text" in json_field else ""
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
# REFERENCE NOTAIRE
field = prog.reference_notaire
if field is not None and field != "":
try:
json_field = json.loads(field)
prog.reference_notaire = (
json_field["text"] if "text" in json_field else ""
)
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
# REFERENCE PUBLICATION ACTE
field = prog.reference_publication_acte
if field is not None and field != "":
try:
json_field = json.loads(field)
prog.reference_publication_acte = (
json_field["text"] if "text" in json_field else ""
)
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
# ACTE DE PROPRIETE
field = prog.acte_de_propriete
if field is not None and field != "":
try:
json_field = json.loads(field)
prog.acte_de_propriete = (
json_field["text"] if "text" in json_field else ""
)
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
# ACTE NOTARIAL
field = prog.acte_notarial
if field is not None and field != "":
try:
json_field = json.loads(field)
prog.acte_notarial = json_field["text"] if "text" in json_field else ""
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
# EDD VOLUMETRIQUE
field = prog.edd_volumetrique
if field is not None and field != "":
try:
json_field = json.loads(field)
prog.edd_volumetrique = (
json_field["text"] if "text" in json_field else ""
)
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
# EDD CLASSIQUE
field = prog.edd_classique
if field is not None and field != "":
try:
json_field = json.loads(field)
prog.edd_classique = json_field["text"] if "text" in json_field else ""
except json.decoder.JSONDecodeError:
print(f"json error : {field}")
prog.save()
class Migration(migrations.Migration):
dependencies = [
("programmes", "0021_auto_20210927_0853"),
]
operations = [
migrations.RunPython(update_fields_with_images, rollback_field_with_images),
]
| [
"json.loads",
"django.db.migrations.RunPython"
] | [((7250, 7325), 'django.db.migrations.RunPython', 'migrations.RunPython', (['update_fields_with_images', 'rollback_field_with_images'], {}), '(update_fields_with_images, rollback_field_with_images)\n', (7270, 7325), False, 'from django.db import migrations\n'), ((519, 536), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (529, 536), False, 'import json\n'), ((932, 949), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (942, 949), False, 'import json\n'), ((1363, 1380), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (1373, 1380), False, 'import json\n'), ((1820, 1837), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (1830, 1837), False, 'import json\n'), ((2268, 2285), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (2278, 2285), False, 'import json\n'), ((2699, 2716), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (2709, 2716), False, 'import json\n'), ((3132, 3149), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (3142, 3149), False, 'import json\n'), ((3562, 3579), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (3572, 3579), False, 'import json\n'), ((4187, 4204), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (4197, 4204), False, 'import json\n'), ((4527, 4544), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (4537, 4544), False, 'import json\n'), ((4885, 4902), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (4895, 4902), False, 'import json\n'), ((5309, 5326), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (5319, 5326), False, 'import json\n'), ((5724, 5741), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (5734, 5741), False, 'import json\n'), ((6122, 6139), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (6132, 6139), False, 'import json\n'), ((6482, 6499), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (6492, 6499), False, 'import json\n'), ((6879, 6896), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (6889, 6896), False, 'import json\n')] |
from _thread import start_new_thread
from hamcrest import assert_that, equal_to, is_in
from hamcrest.core.core.is_ import is_
from pandas.core.frame import DataFrame
from pytest import fail
from tanuki.data_store.column import Column
class TestColumn:
def test_type_casting(self) -> None:
data = [1, 2, 3]
column = Column("test", data)
print(column)
assert_that(column.tolist(), equal_to([1, 2, 3]))
column = Column[int]("test", [1, 2, 3])
assert_that(column.tolist(), equal_to([1, 2, 3]))
column = Column[int]("test", [1.23, 2.23, 3.23])
assert_that(column.tolist(), equal_to([1, 2, 3]))
column = Column[float]("test", [1.23, 2.23, 3.23])
assert_that(column.tolist(), equal_to([1.23, 2.23, 3.23]))
column = Column[str]("test", [1, 2, 3])
assert_that(column.tolist(), equal_to(["1", "2", "3"]))
column = Column[bool]("test", [0, 1, 2])
assert_that(column.tolist(), equal_to([False, True, True]))
column: Column[bool] = Column("test", [0, 1, 2])
assert_that(column.tolist(), equal_to([0, 1, 2]))
try:
Column[float]("test", ["a", "b", "c"])
fail("Expected cast exception")
except Exception as e:
assert_that("Failed to cast 'String' to 'Float64'", is_in(str(e)))
def test_multi_threaded(self) -> None:
data = [1, 2, 3]
thread_running = True
def assign_types():
while thread_running:
Column[bool]("test", data)
start_new_thread(assign_types, ((),))
for _ in range(1000):
column = Column("test", data)
assert_that(column.tolist(), equal_to(data))
thread_running = False
def test_first(self) -> None:
column = Column[int]("test", [1, 2, 3])
assert_that(column.first().tolist(), equal_to([1]))
def test_equals(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(column1.equals(column1), equal_to(True))
assert_that(column1.equals(column2), equal_to(False))
assert_that(column1.equals(1), equal_to(False))
def test_eq(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(
DataFrame({"test": [True, True, True]}).equals(column1 == column1), is_(True)
)
assert_that(
DataFrame({"test": [False, True, False]}).equals(column1 == column2), is_(True)
)
assert_that(DataFrame({"test": [False, True, False]}).equals(column1 == 2), is_(True))
def test_ne(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(
DataFrame({"test": [False, False, False]}).equals(column1 != column1), is_(True)
)
assert_that(
DataFrame({"test": [True, False, True]}).equals(column1 != column2), is_(True)
)
assert_that(DataFrame({"test": [True, True, False]}).equals(column1 != 3), is_(True))
def test_gt(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(
DataFrame({"test": [False, False, False]}).equals(column1 > column1), is_(True)
)
assert_that(
DataFrame({"test": [True, False, True]}).equals(column2 > column1), is_(True)
)
assert_that(DataFrame({"test": [True, False, True]}).equals(column2 > 3), is_(True))
def test_ge(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(
DataFrame({"test": [True, True, True]}).equals(column1 >= column1), is_(True)
)
assert_that(
DataFrame({"test": [False, True, False]}).equals(column1 >= column2), is_(True)
)
assert_that(DataFrame({"test": [False, False, True]}).equals(column1 >= 3), is_(True))
def test_lt(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(
DataFrame({"test": [False, False, False]}).equals(column1 < column1), is_(True)
)
assert_that(
DataFrame({"test": [True, False, True]}).equals(column1 < column2), is_(True)
)
assert_that(DataFrame({"test": [False, True, False]}).equals(column2 < 3), is_(True))
def test_le(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
column2 = Column[int]("test", [4, 2, 5])
assert_that(
DataFrame({"test": [True, True, True]}).equals(column1 <= column1), is_(True)
)
assert_that(
DataFrame({"test": [True, True, True]}).equals(column1 <= column2), is_(True)
)
assert_that(DataFrame({"test": [False, True, False]}).equals(column2 <= 3), is_(True))
def test_getitem(self) -> None:
column1 = Column[int]("test", [1, 2, 3])
assert_that(column1[1].item(), equal_to(2))
column2 = Column[int]("test", [1, 2, 3], index=[1, 2, 3])
assert_that(column2[1].item(), equal_to(1))
def test_iter(self) -> None:
column = Column[int]("test", [1, 2, 3])
expected = [1, 2, 3]
for (_, a), e in zip(column,expected):
assert_that(a, equal_to(e))
def test_str(self) -> None:
column = Column[int]("test", [1, 2, 3])
assert_that(str(column), equal_to(" test\nindex \n0 1\n1 2\n2 3"))
def test_repr(self) -> None:
column = Column[int]("test", [1, 2, 3])
assert_that(repr(column), equal_to(" test\nindex \n0 1\n1 2\n2 3"))
| [
"tanuki.data_store.column.Column",
"hamcrest.core.core.is_.is_",
"pytest.fail",
"pandas.core.frame.DataFrame",
"hamcrest.equal_to",
"_thread.start_new_thread"
] | [((339, 359), 'tanuki.data_store.column.Column', 'Column', (['"""test"""', 'data'], {}), "('test', data)\n", (345, 359), False, 'from tanuki.data_store.column import Column\n'), ((1053, 1078), 'tanuki.data_store.column.Column', 'Column', (['"""test"""', '[0, 1, 2]'], {}), "('test', [0, 1, 2])\n", (1059, 1078), False, 'from tanuki.data_store.column import Column\n'), ((1570, 1607), '_thread.start_new_thread', 'start_new_thread', (['assign_types', '((),)'], {}), '(assign_types, ((),))\n', (1586, 1607), False, 'from _thread import start_new_thread\n'), ((419, 438), 'hamcrest.equal_to', 'equal_to', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (427, 438), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((526, 545), 'hamcrest.equal_to', 'equal_to', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (534, 545), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((642, 661), 'hamcrest.equal_to', 'equal_to', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (650, 661), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((760, 788), 'hamcrest.equal_to', 'equal_to', (['[1.23, 2.23, 3.23]'], {}), '([1.23, 2.23, 3.23])\n', (768, 788), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((876, 901), 'hamcrest.equal_to', 'equal_to', (["['1', '2', '3']"], {}), "(['1', '2', '3'])\n", (884, 901), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((990, 1019), 'hamcrest.equal_to', 'equal_to', (['[False, True, True]'], {}), '([False, True, True])\n', (998, 1019), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((1116, 1135), 'hamcrest.equal_to', 'equal_to', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1124, 1135), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((1214, 1245), 'pytest.fail', 'fail', (['"""Expected cast exception"""'], {}), "('Expected cast exception')\n", (1218, 1245), False, 'from pytest import fail\n'), ((1659, 1679), 'tanuki.data_store.column.Column', 'Column', (['"""test"""', 'data'], {}), "('test', data)\n", (1665, 1679), False, 'from tanuki.data_store.column import Column\n'), ((1896, 1909), 'hamcrest.equal_to', 'equal_to', (['[1]'], {}), '([1])\n', (1904, 1909), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((2090, 2104), 'hamcrest.equal_to', 'equal_to', (['(True)'], {}), '(True)\n', (2098, 2104), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((2151, 2166), 'hamcrest.equal_to', 'equal_to', (['(False)'], {}), '(False)\n', (2159, 2166), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((2207, 2222), 'hamcrest.equal_to', 'equal_to', (['(False)'], {}), '(False)\n', (2215, 2222), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((2455, 2464), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (2458, 2464), False, 'from hamcrest.core.core.is_ import is_\n'), ((2578, 2587), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (2581, 2587), False, 'from hamcrest.core.core.is_ import is_\n'), ((2682, 2691), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (2685, 2691), False, 'from hamcrest.core.core.is_ import is_\n'), ((2927, 2936), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (2930, 2936), False, 'from hamcrest.core.core.is_ import is_\n'), ((3049, 3058), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (3052, 3058), False, 'from hamcrest.core.core.is_ import is_\n'), ((3152, 3161), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (3155, 3161), False, 'from hamcrest.core.core.is_ import is_\n'), ((3396, 3405), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (3399, 3405), False, 'from hamcrest.core.core.is_ import is_\n'), ((3517, 3526), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (3520, 3526), False, 'from hamcrest.core.core.is_ import is_\n'), ((3619, 3628), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (3622, 3628), False, 'from hamcrest.core.core.is_ import is_\n'), ((3861, 3870), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (3864, 3870), False, 'from hamcrest.core.core.is_ import is_\n'), ((3984, 3993), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (3987, 3993), False, 'from hamcrest.core.core.is_ import is_\n'), ((4088, 4097), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (4091, 4097), False, 'from hamcrest.core.core.is_ import is_\n'), ((4332, 4341), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (4335, 4341), False, 'from hamcrest.core.core.is_ import is_\n'), ((4453, 4462), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (4456, 4462), False, 'from hamcrest.core.core.is_ import is_\n'), ((4556, 4565), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (4559, 4565), False, 'from hamcrest.core.core.is_ import is_\n'), ((4798, 4807), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (4801, 4807), False, 'from hamcrest.core.core.is_ import is_\n'), ((4919, 4928), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (4922, 4928), False, 'from hamcrest.core.core.is_ import is_\n'), ((5023, 5032), 'hamcrest.core.core.is_.is_', 'is_', (['(True)'], {}), '(True)\n', (5026, 5032), False, 'from hamcrest.core.core.is_ import is_\n'), ((5159, 5170), 'hamcrest.equal_to', 'equal_to', (['(2)'], {}), '(2)\n', (5167, 5170), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((5277, 5288), 'hamcrest.equal_to', 'equal_to', (['(1)'], {}), '(1)\n', (5285, 5288), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((5602, 5677), 'hamcrest.equal_to', 'equal_to', (['""" test\nindex \n0 1\n1 2\n2 3"""'], {}), '(""" test\nindex \n0 1\n1 2\n2 3""")\n', (5610, 5677), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((5795, 5870), 'hamcrest.equal_to', 'equal_to', (['""" test\nindex \n0 1\n1 2\n2 3"""'], {}), '(""" test\nindex \n0 1\n1 2\n2 3""")\n', (5803, 5870), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((1721, 1735), 'hamcrest.equal_to', 'equal_to', (['data'], {}), '(data)\n', (1729, 1735), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((5475, 5486), 'hamcrest.equal_to', 'equal_to', (['e'], {}), '(e)\n', (5483, 5486), False, 'from hamcrest import assert_that, equal_to, is_in\n'), ((2387, 2426), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, True, True]}"], {}), "({'test': [True, True, True]})\n", (2396, 2426), False, 'from pandas.core.frame import DataFrame\n'), ((2508, 2549), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, True, False]}"], {}), "({'test': [False, True, False]})\n", (2517, 2549), False, 'from pandas.core.frame import DataFrame\n'), ((2618, 2659), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, True, False]}"], {}), "({'test': [False, True, False]})\n", (2627, 2659), False, 'from pandas.core.frame import DataFrame\n'), ((2856, 2898), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, False, False]}"], {}), "({'test': [False, False, False]})\n", (2865, 2898), False, 'from pandas.core.frame import DataFrame\n'), ((2980, 3020), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, False, True]}"], {}), "({'test': [True, False, True]})\n", (2989, 3020), False, 'from pandas.core.frame import DataFrame\n'), ((3089, 3129), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, True, False]}"], {}), "({'test': [True, True, False]})\n", (3098, 3129), False, 'from pandas.core.frame import DataFrame\n'), ((3326, 3368), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, False, False]}"], {}), "({'test': [False, False, False]})\n", (3335, 3368), False, 'from pandas.core.frame import DataFrame\n'), ((3449, 3489), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, False, True]}"], {}), "({'test': [True, False, True]})\n", (3458, 3489), False, 'from pandas.core.frame import DataFrame\n'), ((3557, 3597), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, False, True]}"], {}), "({'test': [True, False, True]})\n", (3566, 3597), False, 'from pandas.core.frame import DataFrame\n'), ((3793, 3832), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, True, True]}"], {}), "({'test': [True, True, True]})\n", (3802, 3832), False, 'from pandas.core.frame import DataFrame\n'), ((3914, 3955), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, True, False]}"], {}), "({'test': [False, True, False]})\n", (3923, 3955), False, 'from pandas.core.frame import DataFrame\n'), ((4024, 4065), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, False, True]}"], {}), "({'test': [False, False, True]})\n", (4033, 4065), False, 'from pandas.core.frame import DataFrame\n'), ((4262, 4304), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, False, False]}"], {}), "({'test': [False, False, False]})\n", (4271, 4304), False, 'from pandas.core.frame import DataFrame\n'), ((4385, 4425), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, False, True]}"], {}), "({'test': [True, False, True]})\n", (4394, 4425), False, 'from pandas.core.frame import DataFrame\n'), ((4493, 4534), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, True, False]}"], {}), "({'test': [False, True, False]})\n", (4502, 4534), False, 'from pandas.core.frame import DataFrame\n'), ((4730, 4769), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, True, True]}"], {}), "({'test': [True, True, True]})\n", (4739, 4769), False, 'from pandas.core.frame import DataFrame\n'), ((4851, 4890), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [True, True, True]}"], {}), "({'test': [True, True, True]})\n", (4860, 4890), False, 'from pandas.core.frame import DataFrame\n'), ((4959, 5000), 'pandas.core.frame.DataFrame', 'DataFrame', (["{'test': [False, True, False]}"], {}), "({'test': [False, True, False]})\n", (4968, 5000), False, 'from pandas.core.frame import DataFrame\n')] |
import unittest
import hail as hl
from lib.model.seqr_mt_schema import SeqrVariantSchema
from tests.data.sample_vep import VEP_DATA, DERIVED_DATA
class TestSeqrModel(unittest.TestCase):
def _get_filtered_mt(self, rsid='rs35471880'):
mt = hl.import_vcf('tests/data/1kg_30variants.vcf.bgz')
mt = hl.split_multi(mt.filter_rows(mt.rsid == rsid))
return mt
def test_variant_derived_fields(self):
rsid = 'rs35471880'
mt = self._get_filtered_mt(rsid).annotate_rows(**VEP_DATA[rsid])
seqr_schema = SeqrVariantSchema(mt)
seqr_schema.sorted_transcript_consequences().doc_id(length=512).variant_id().contig().pos().start().end().ref().alt() \
.pos().xstart().xstop().xpos().transcript_consequence_terms().transcript_ids().main_transcript().gene_ids() \
.coding_gene_ids().domains().ac().af().an().annotate_all()
mt = seqr_schema.select_annotated_mt()
obj = mt.rows().collect()[0]
# Cannot do a nested compare because of nested hail objects, so do one by one.
fields = ['AC', 'AF', 'AN', 'codingGeneIds', 'docId', 'domains', 'end', 'geneIds', 'ref', 'alt', 'start',
'variantId', 'transcriptIds', 'xpos', 'xstart', 'xstop', 'contig']
for field in fields:
self.assertEqual(obj[field], DERIVED_DATA[rsid][field])
self.assertEqual(obj['mainTranscript']['transcript_id'], DERIVED_DATA[rsid]['mainTranscript']['transcript_id'])
def test_variant_genotypes(self):
mt = self._get_filtered_mt()
seqr_schema = SeqrVariantSchema(mt)
mt = seqr_schema.genotypes().select_annotated_mt()
genotypes = mt.rows().collect()[0].genotypes
actual = {gen['sample_id']: dict(gen) for gen in genotypes}
expected = {'HG00731': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 73.0, 'sample_id': 'HG00731'},
'HG00732': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 70.0, 'sample_id': 'HG00732'},
'HG00733': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 66.0, 'sample_id': 'HG00733'},
'NA19675': {'num_alt': 1, 'gq': 99, 'ab': 0.6000000238418579, 'dp': 29.0,
'sample_id': 'NA19675'},
'NA19678': {'num_alt': 0, 'gq': 78, 'ab': 0.0, 'dp': 28.0, 'sample_id': 'NA19678'},
'NA19679': {'num_alt': 1, 'gq': 99, 'ab': 0.3571428656578064, 'dp': 27.0,
'sample_id': 'NA19679'},
'NA20870': {'num_alt': 1, 'gq': 99, 'ab': 0.5142857432365417, 'dp': 67.0,
'sample_id': 'NA20870'},
'NA20872': {'num_alt': 1, 'gq': 99, 'ab': 0.5066666603088379, 'dp': 74.0,
'sample_id': 'NA20872'},
'NA20874': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 69.0, 'sample_id': 'NA20874'},
'NA20875': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 93.0, 'sample_id': 'NA20875'},
'NA20876': {'num_alt': 1, 'gq': 99, 'ab': 0.4383561611175537, 'dp': 70.0,
'sample_id': 'NA20876'},
'NA20877': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 76.0, 'sample_id': 'NA20877'},
'NA20878': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 73.0, 'sample_id': 'NA20878'},
'NA20881': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 69.0, 'sample_id': 'NA20881'},
'NA20885': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 82.0, 'sample_id': 'NA20885'},
'NA20888': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 74.0, 'sample_id': 'NA20888'}}
self.assertEqual(actual, expected)
def test_samples_num_alt(self):
mt = self._get_filtered_mt()
seqr_schema = SeqrVariantSchema(mt)
mt = seqr_schema.samples_no_call().samples_num_alt().select_annotated_mt()
row = mt.rows().flatten().collect()[0]
self.assertEqual(row.samples_no_call, set())
self.assertEqual(row['samples_num_alt.1'], {'NA19679', 'NA19675', 'NA20870', 'NA20876', 'NA20872'})
self.assertEqual(row['samples_num_alt.2'], set())
def test_samples_gq(self):
non_empty = {
'samples_gq.75_to_80': {'NA19678'}
}
start = 0
end = 95
step = 5
mt = self._get_filtered_mt()
seqr_schema = SeqrVariantSchema(mt)
mt = seqr_schema.samples_gq(start, end, step).select_annotated_mt()
row = mt.rows().flatten().collect()[0]
for name, samples in non_empty.items():
self.assertEqual(row[name], samples)
for i in range(start, end, step):
name = 'samples_gq.%i_to_%i' % (i, i+step)
if name not in non_empty:
self.assertEqual(row[name], set())
def test_samples_ab(self):
non_empty = {
'samples_ab.35_to_40': {'NA19679'},
'samples_ab.40_to_45': {'NA20876'},
}
start = 0
end = 45
step = 5
mt = self._get_filtered_mt()
seqr_schema = SeqrVariantSchema(mt)
mt = seqr_schema.samples_ab(start, end, step).select_annotated_mt()
row = mt.rows().flatten().collect()[0]
for name, samples in non_empty.items():
self.assertEqual(row[name], samples)
for i in range(start, end, step):
name = 'samples_ab.%i_to_%i' % (i, i+step)
if name not in non_empty:
self.assertEqual(row[name], set())
| [
"lib.model.seqr_mt_schema.SeqrVariantSchema",
"hail.import_vcf"
] | [((254, 304), 'hail.import_vcf', 'hl.import_vcf', (['"""tests/data/1kg_30variants.vcf.bgz"""'], {}), "('tests/data/1kg_30variants.vcf.bgz')\n", (267, 304), True, 'import hail as hl\n'), ((552, 573), 'lib.model.seqr_mt_schema.SeqrVariantSchema', 'SeqrVariantSchema', (['mt'], {}), '(mt)\n', (569, 573), False, 'from lib.model.seqr_mt_schema import SeqrVariantSchema\n'), ((1583, 1604), 'lib.model.seqr_mt_schema.SeqrVariantSchema', 'SeqrVariantSchema', (['mt'], {}), '(mt)\n', (1600, 1604), False, 'from lib.model.seqr_mt_schema import SeqrVariantSchema\n'), ((3825, 3846), 'lib.model.seqr_mt_schema.SeqrVariantSchema', 'SeqrVariantSchema', (['mt'], {}), '(mt)\n', (3842, 3846), False, 'from lib.model.seqr_mt_schema import SeqrVariantSchema\n'), ((4420, 4441), 'lib.model.seqr_mt_schema.SeqrVariantSchema', 'SeqrVariantSchema', (['mt'], {}), '(mt)\n', (4437, 4441), False, 'from lib.model.seqr_mt_schema import SeqrVariantSchema\n'), ((5123, 5144), 'lib.model.seqr_mt_schema.SeqrVariantSchema', 'SeqrVariantSchema', (['mt'], {}), '(mt)\n', (5140, 5144), False, 'from lib.model.seqr_mt_schema import SeqrVariantSchema\n')] |
from flask_restplus import Api
from flask import Blueprint
from app.main.controller.user_controller import api as user_ns
from app.main.controller.auth_controller import api as auth_ns
blueprint = Blueprint('api', __name__)
api = Api(blueprint,
title='IT Jobs La Rioja',
version='1.0',
description='IT jobs La Rioja'
)
api.add_namespace(user_ns, path='/user')
api.add_namespace(auth_ns)
| [
"flask.Blueprint",
"flask_restplus.Api"
] | [((199, 225), 'flask.Blueprint', 'Blueprint', (['"""api"""', '__name__'], {}), "('api', __name__)\n", (208, 225), False, 'from flask import Blueprint\n'), ((233, 325), 'flask_restplus.Api', 'Api', (['blueprint'], {'title': '"""IT Jobs La Rioja"""', 'version': '"""1.0"""', 'description': '"""IT jobs La Rioja"""'}), "(blueprint, title='IT Jobs La Rioja', version='1.0', description=\n 'IT jobs La Rioja')\n", (236, 325), False, 'from flask_restplus import Api\n')] |