code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from pymatting.util.util import (
grid_coordinates,
sparse_conv_matrix,
weights_to_laplacian,
)
import numpy as np
def uniform_laplacian(image, radius=1):
"""This function returns a Laplacian matrix with all weights equal to one.
Parameters
------------
image: numpy.ndarray
Image with shape :math:`h\\times w \\times 3`
radius: int
Local window size, defaults to 1
Returns
-------
L: scipy.sparse.spmatrix
Matting Laplacian
"""
height, width = image.shape[:2]
window_size = 2 * radius + 1
W = sparse_conv_matrix(width, height, np.ones((window_size, window_size)))
return weights_to_laplacian(W)
|
[
"numpy.ones",
"pymatting.util.util.weights_to_laplacian"
] |
[((666, 689), 'pymatting.util.util.weights_to_laplacian', 'weights_to_laplacian', (['W'], {}), '(W)\n', (686, 689), False, 'from pymatting.util.util import grid_coordinates, sparse_conv_matrix, weights_to_laplacian\n'), ((617, 652), 'numpy.ones', 'np.ones', (['(window_size, window_size)'], {}), '((window_size, window_size))\n', (624, 652), True, 'import numpy as np\n')]
|
from model import *
from collections import defaultdict
def sort_urls(clientip):
urlcnt = defaultdict(int)
for request in Request.where(clientip=clientip).select():
urlcnt[request.host + request.url] += 1
return zip(sorted(urlcnt.keys(), key=lambda k: urlcnt[k])[::-1], sorted(urlcnt.values())[::-1])
if __name__ == "__main__":
for kv in sort_urls("172.16.31.10")[:10]:
print(kv[0], kv[1])
|
[
"collections.defaultdict"
] |
[((96, 112), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (107, 112), False, 'from collections import defaultdict\n')]
|
import unittest
from tkinter import messagebox
from moneymanager import MoneyManager, item_types
class TestMoneyManager(unittest.TestCase):
def setUp(self):
self.user = MoneyManager()
self.user.balance = 1000.0
def test_legal_deposit_works(self):
'''Tests that depositing money using the account's deposit funds function
adds the amount to the balance'''
self.user.deposit_funds(1000)
self.assertEqual(2000, self.user.balance)
def test_illegal_deposit_raises_exception(self):
'''Tests that that depositing a value that is not a float results into
an exception being raised'''
self.assertEqual(self.user.deposit_funds("4dssjd"), False)
def test_legal_entry(self):
'''Tests that adding a new entry with a legal amount subtracts the funds from
the balance'''
self.user.add_entry(999, item_types[0])
self.assertEqual(self.user.balance, 1)
def test_illegal_entry_amount(self):
'''Tets that withdrawing an illegal amount raises an exception'''
valid_input, valid_type, correct_amount = self.user.add_entry(
'banana', item_types[1])
self.assertEqual(
(valid_input, valid_type, correct_amount), (False, True, True))
def test_illegal_entry_type(self):
'''Tests that adding illegal entry type raises an exception'''
valid_input, valid_type, correct_amount = self.user.add_entry(
90, 'banana')
self.assertEqual(
(valid_input, valid_type, correct_amount), (True, False, True))
def test_insufficient_funds_entry(self):
'''Tests when a user tries to spend more than the account balance'''
valid_input, valid_type, correct_amount = self.user.add_entry(
1001, item_types[0])
self.assertEqual(
(valid_input, valid_type, correct_amount), (True, True, False))
unittest.main()
|
[
"unittest.main",
"moneymanager.MoneyManager"
] |
[((1978, 1993), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1991, 1993), False, 'import unittest\n'), ((192, 206), 'moneymanager.MoneyManager', 'MoneyManager', ([], {}), '()\n', (204, 206), False, 'from moneymanager import MoneyManager, item_types\n')]
|
from ufora.FORA.python.PurePython.testModules.same_line_number.B import B
class A(object):
def __init__(self, m):
self.m = m
def foo(self):
return B(self.m)
|
[
"ufora.FORA.python.PurePython.testModules.same_line_number.B.B"
] |
[((174, 183), 'ufora.FORA.python.PurePython.testModules.same_line_number.B.B', 'B', (['self.m'], {}), '(self.m)\n', (175, 183), False, 'from ufora.FORA.python.PurePython.testModules.same_line_number.B import B\n')]
|
# -*- coding: utf-8 -*-
"""
clikraken.api.private.get_balance
This module queries the Balance method of Kraken's API
and outputs the results in a tabular format.
Licensed under the Apache License, Version 2.0. See the LICENSE file.
"""
import argparse
from collections import OrderedDict
from decimal import Decimal
from clikraken.api.api_utils import query_api
from clikraken.clikraken_utils import _tabulate as tabulate
from clikraken.clikraken_utils import csv
from clikraken.clikraken_utils import process_options
def get_balance():
"""Get user balance function to use in python scripts."""
args = process_options({}, {})
res = get_balance_api(args)
copy = {}
for asset in res:
val = Decimal(res[asset])
if len(asset) == 4 and asset[0] in ["Z", "X"]:
copy[asset[1:]] = val
else:
copy[asset] = val
return copy
def get_balance_api(args):
"""Get user balance API call."""
res = query_api("private", "Balance", {}, args)
return res
def get_balance_cmd(args=None):
"""Get user balance CLI cmd."""
res = get_balance_api(args)
bal_list = []
for asset in res:
# Initialize an OrderedDict to garantee the column order
# for later use with the tabulate function
asset_dict = OrderedDict()
# Remove leading Z or X from asset pair if it is of length 4
asset_dict["asset"] = (
asset[1:] if len(asset) == 4 and asset[0] in ["Z", "X"] else asset
)
asset_dict["balance"] = res[asset]
bal_list.append(asset_dict)
if not bal_list:
return
# Sort alphabetically
bal_list = sorted(bal_list, key=lambda asset_dict: asset_dict["asset"])
if args.csv:
print(csv(bal_list, headers="keys"))
else:
print(tabulate(bal_list, headers="keys"))
def init(subparsers):
parser_balance = subparsers.add_parser(
"balance",
aliases=["bal"],
help="[private] Get your current balance",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_balance.set_defaults(sub_func=get_balance_cmd)
|
[
"clikraken.clikraken_utils.csv",
"clikraken.clikraken_utils._tabulate",
"decimal.Decimal",
"clikraken.api.api_utils.query_api",
"collections.OrderedDict",
"clikraken.clikraken_utils.process_options"
] |
[((617, 640), 'clikraken.clikraken_utils.process_options', 'process_options', (['{}', '{}'], {}), '({}, {})\n', (632, 640), False, 'from clikraken.clikraken_utils import process_options\n'), ((968, 1009), 'clikraken.api.api_utils.query_api', 'query_api', (['"""private"""', '"""Balance"""', '{}', 'args'], {}), "('private', 'Balance', {}, args)\n", (977, 1009), False, 'from clikraken.api.api_utils import query_api\n'), ((723, 742), 'decimal.Decimal', 'Decimal', (['res[asset]'], {}), '(res[asset])\n', (730, 742), False, 'from decimal import Decimal\n'), ((1306, 1319), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1317, 1319), False, 'from collections import OrderedDict\n'), ((1761, 1790), 'clikraken.clikraken_utils.csv', 'csv', (['bal_list'], {'headers': '"""keys"""'}), "(bal_list, headers='keys')\n", (1764, 1790), False, 'from clikraken.clikraken_utils import csv\n'), ((1816, 1850), 'clikraken.clikraken_utils._tabulate', 'tabulate', (['bal_list'], {'headers': '"""keys"""'}), "(bal_list, headers='keys')\n", (1824, 1850), True, 'from clikraken.clikraken_utils import _tabulate as tabulate\n')]
|
import os
import sys
cmd1 = "python train_scratch.py --save_path='experiments/CIFAR10/baseline/mobilenetv2/'"
os.system(cmd1)
|
[
"os.system"
] |
[((111, 126), 'os.system', 'os.system', (['cmd1'], {}), '(cmd1)\n', (120, 126), False, 'import os\n')]
|
"""
★ Task: Write an algorithm that can find whether a word is present in a sentence. ★
"""
import string
from datastruct.tree import Trie
def math_word(word: str, sentence: str) -> bool:
table = str.maketrans({key: None for key in string.punctuation})
sentence = sentence.translate(table).lower()
words = sentence.split()
repertoire = Trie()
for w in words:
repertoire.insert(w)
return repertoire.search(word)
if __name__ == '__main__':
s = 'How to match a word in a string? Use Trie.'
print(math_word('how', s))
|
[
"datastruct.tree.Trie"
] |
[((355, 361), 'datastruct.tree.Trie', 'Trie', ([], {}), '()\n', (359, 361), False, 'from datastruct.tree import Trie\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gzip
import json
import os
import time
from tqdm import tqdm
from clinicgen.data.image2text import _CaptioningData
class Flickr30kData(_CaptioningData):
IMAGE_NUM = 158915
DIR_IMAGES = 'flickr30k-images'
FILE_CAPTIONS = os.path.join('flickr30k', 'results_20130124.token')
def __init__(self, root, meta=None, split=None, target_transform=None, cache_image=False, img_mode='center',
img_augment=False, cache_text=False, dump_dir=None):
if not cache_text:
raise ValueError('Flickr30k data only supports cached texts')
super().__init__(root, split=split, cache_image=cache_image, cache_text=cache_text, dump_dir=dump_dir)
pre_transform, self.transform = Flickr30kData.get_transform(cache_image, img_mode, img_augment)
self.target_transform = target_transform
self.multi_instance = True
if dump_dir is not None:
t = time.time()
if self.load():
print('Loaded data dump from %s (%.2fs)' % (dump_dir, time.time() - t))
self.pre_processes()
return
splits = None
if meta is not None:
splits = {}
with open(meta, encoding='utf-8') as f:
meta_data = json.load(f)
for entry in meta_data['images']:
splits[entry['filename']] = entry['split']
captions = os.path.join(root, self.FILE_CAPTIONS)
with open(captions, encoding='utf-8') as f:
with tqdm(total=self.IMAGE_NUM) as pbar:
pbar.set_description('Data ({0})'.format(split))
count = 0
interval = 1000
prev_image, buffer = None, []
for line in f:
entry = line.rstrip().split('\t')
image = entry[0].split('#')[0]
if split is None or (image in splits and splits[image] == split):
report = gzip.compress(entry[1].encode('utf-8'))
if prev_image is not None and image != prev_image:
count = self._append_image(prev_image, buffer, count, pre_transform)
buffer = []
prev_image = image
buffer.append((entry[0], report))
else:
count += 1
if count >= interval:
pbar.update(count)
count = 0
if len(buffer) > 0:
count = self._append_image(prev_image, buffer, count, pre_transform)
if count > 0:
pbar.update(count)
if dump_dir is not None:
self.dump()
self.pre_processes()
def _append_image(self, image_id, buffer, count, pre_transform):
if len(buffer) > 0:
image = os.path.join(self.root, self.DIR_IMAGES, image_id)
if self.cache_image:
image = self.bytes_image(image, pre_transform)
if self.split == 'test' or self.split == 'val':
buffer = [e[1] for e in buffer]
self.ids.append(image_id)
self.samples.append((image, buffer))
self.targets.append(buffer)
count += len(buffer)
else:
for entry_id, report in buffer:
self.ids.append(entry_id)
self.samples.append((image, report))
self.targets.append(report)
count += 1
return count
def pre_processes(self):
self.pre_transform_texts(self.split)
|
[
"tqdm.tqdm",
"json.load",
"os.path.join",
"time.time"
] |
[((288, 339), 'os.path.join', 'os.path.join', (['"""flickr30k"""', '"""results_20130124.token"""'], {}), "('flickr30k', 'results_20130124.token')\n", (300, 339), False, 'import os\n'), ((1464, 1502), 'os.path.join', 'os.path.join', (['root', 'self.FILE_CAPTIONS'], {}), '(root, self.FILE_CAPTIONS)\n', (1476, 1502), False, 'import os\n'), ((974, 985), 'time.time', 'time.time', ([], {}), '()\n', (983, 985), False, 'import time\n'), ((2967, 3017), 'os.path.join', 'os.path.join', (['self.root', 'self.DIR_IMAGES', 'image_id'], {}), '(self.root, self.DIR_IMAGES, image_id)\n', (2979, 3017), False, 'import os\n'), ((1318, 1330), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1327, 1330), False, 'import json\n'), ((1572, 1598), 'tqdm.tqdm', 'tqdm', ([], {'total': 'self.IMAGE_NUM'}), '(total=self.IMAGE_NUM)\n', (1576, 1598), False, 'from tqdm import tqdm\n'), ((1084, 1095), 'time.time', 'time.time', ([], {}), '()\n', (1093, 1095), False, 'import time\n')]
|
import matplotlib.pyplot as plt
import csv
import pandas as pd
import numpy as np
# room d137b #
file = open("data_raw/_d137b.csv")
read = csv.reader(file, delimiter=';')
_read = pd.DataFrame(list(read)).to_numpy()
data_start = 1
data_end = 511
e2_q = _read[:, 1][data_start:data_end].astype(np.float) * -1
e2_ti = _read[:, 2][data_start:data_end].astype(np.float)
e2_te = _read[:, 3][data_start:data_end].astype(np.float)
file.close()
|
[
"csv.reader"
] |
[((141, 172), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""";"""'}), "(file, delimiter=';')\n", (151, 172), False, 'import csv\n')]
|
import functools
def foo(x, y):
print(x, " * ", y)
return x * y
r = range(5, 10)
res = functools.reduce(foo, r)
print(res)
|
[
"functools.reduce"
] |
[((100, 124), 'functools.reduce', 'functools.reduce', (['foo', 'r'], {}), '(foo, r)\n', (116, 124), False, 'import functools\n')]
|
import json
from tempfile import NamedTemporaryFile
from datetime import datetime
import zzlog
def test_loginfo():
with NamedTemporaryFile() as f:
logger = zzlog.setup(
logger_root='.',
filename=f.name,
)
message = 'Hello World!'
logger.error(message)
with open(f.name) as f:
lines = f.readlines()
logs = [json.loads(l) for l in lines]
assert len(logs) == 1
log = logs[0]
assert 'level' in log
assert log['level'] == 'ERROR'
assert 'name' in log
assert log['name'] == '.'
assert 'timestamp' in log
t = datetime.strptime(log['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
assert (t - datetime.now()).total_seconds() < 1
assert 'message' in log
assert log['message'] == 'Hello World!'
|
[
"tempfile.NamedTemporaryFile",
"json.loads",
"zzlog.setup",
"datetime.datetime.strptime",
"datetime.datetime.now"
] |
[((128, 148), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (146, 148), False, 'from tempfile import NamedTemporaryFile\n'), ((172, 217), 'zzlog.setup', 'zzlog.setup', ([], {'logger_root': '"""."""', 'filename': 'f.name'}), "(logger_root='.', filename=f.name)\n", (183, 217), False, 'import zzlog\n'), ((700, 759), 'datetime.datetime.strptime', 'datetime.strptime', (["log['timestamp']", '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(log['timestamp'], '%Y-%m-%d %H:%M:%S.%f')\n", (717, 759), False, 'from datetime import datetime\n'), ((404, 417), 'json.loads', 'json.loads', (['l'], {}), '(l)\n', (414, 417), False, 'import json\n'), ((784, 798), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (796, 798), False, 'from datetime import datetime\n')]
|
from copy import deepcopy
from numpy import zeros
from pyNastran.converters.cart3d.cart3d import Cart3D
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
class Cart3d_Mesher(Cart3D):
def __init__(self, log=None, debug=False):
Cart3D.__init__(self, log=log, debug=debug)
def read_cart3d(self, cart3d_filename, result_names=None):
Cart3D.read_cart3d(self, cart3d_filename, result_names=result_names)
#self.nodes = nodes
#self.elements = elements - 1
def _get_segment(self, a, eid, segments):
if a in segments:
a_elems = segments[a]
i = a_elems.index(eid)
a_elems.pop(i)
eid_a = a_elems[0]
return eid_a
return None
def _get_segments(self, nodes, elements):
segments = {} # key=eid,
lengths = {}
for eid, e in elements.items():
a = tuple(sorted([e[0], e[1]])) # segments of e
b = tuple(sorted([e[1], e[2]]))
c = tuple(sorted([e[2], e[0]]))
if a in segments:
segments[a].append(eid)
else:
segments[a] = [eid]
lengths[a] = nodes[a[1]] - nodes[a[0]]
if b in segments:
segments[b].append(eid)
else:
segments[b] = [eid]
lengths[b] = nodes[b[1]] - nodes[b[0]]
if c in segments:
segments[c].append(eid)
else:
segments[c] = [eid]
lengths[c] = nodes[c[1]] - nodes[c[0]]
return segments, lengths
def make_quads(self, nodes, elements):
raise NotImplementedError()
segments, lengths = self._get_segments(nodes, elements)
for eid, e in elements.items():
a = tuple(sorted([e[0], e[1]])) # segments of e
b = tuple(sorted([e[1], e[2]]))
c = tuple(sorted([e[2], e[0]]))
#a.sort()
#b.sort()
#c.sort()
print(eid, e)
print(segments[a])
print(lengths[a])
print(len(segments[a]))
eidA = self._get_segment(a, eid, segments)
eidB = self._get_segment(b, eid, segments)
eidC = self._get_segment(c, eid, segments)
print("eidA=%s eidB=%s eidC=%s" % (eidA, eidB, eidC))
if eidA:
i = 0
e2 = elements[eidA]
self.check_quad(nodes, eid, eidA, e, e2, a, b, c, i)
del segments[a]
if eidB:
i = 1
e2 = elements[eidB]
self.check_quad(nodes, eid, eidB, e, e2, a, b, c, i)
del segments[b]
if eidC:
i = 2
e2 = elements[eidC]
self.check_quad(nodes, eid, eidC, e, e2, a, b, c, i)
del segments[c]
print("------")
#break
#for segment in segments:
asdf
def _check_quad(self, nodes, eid, eidA, e, e2, a, b, c, i):
r"""
::
A----B
| \ e|
|e2 \|
C----D
two tests
1. folding angle A-B x A-C
2a. abs(A-C) - abs(B-D) = 0 (abs to prevent 2L)
2b. abs(A-B) - abs(C-D) = 0
"""
iplus1 = i + 1
iplus2 = i + 2
if iplus1 > 2:
iplus1 -= 3
if iplus2 > 2:
iplus2 -= 3
print(i, iplus1)
print(iplus1, iplus2)
print(iplus2, i)
AD = nodes[e[i]] - nodes[e[iplus1]]
AB = nodes[e[iplus1]] - nodes[e[iplus2]]
BD = nodes[e[iplus2]] - nodes[e[i]]
print(AD)
print(AB)
print(BD)
print(e2)
j = e2.index(e[i])
jplus1 = j + 1
jplus2 = j + 2
if jplus1 > 2:
jplus1 -= 3
if jplus2 > 2:
jplus2 -= 3
print("DA = ", e[j], e[jplus1])
DA = nodes[e[j]] - nodes[e[jplus1]]
print(DA)
asdf
def project(self, bdf_filename, x0, growth_rate=1.3, nlayers=10):
x = zeros(nlayers, dtype='float64')
for i in range(nlayers):
x[i] = x0 * growth_rate ** i
#print(self.nodes.shape)
#print(self.elements.shape)
nnodes = self.nodes.shape[0]
nelements = self.elements.shape[0]
nnodes2 = nnodes * (nlayers + 1)
npents = nelements * nlayers
cnormals = self.get_normals(self.nodes, self.elements)
nnormals = self.get_normals_at_nodes(self.nodes, self.elements, cnormals)
nodes = zeros((nnodes2, 3), dtype='float64')
pents = zeros((npents, 6), dtype='int32')
ih1 = 0
in1 = 0
in2 = nnodes
nodes[in1:in2, :] = self.nodes
in1 += nnodes
in2 += nnodes
ih1 = 0
ih2 = nelements
print('x = %s' % x)
for i in range(nlayers):
nodes_old = self.nodes
dx = nnormals * x[i]
nodes_new = self.nodes + dx
dn0 = nnodes * i
dn1 = nnodes * (i + 1)
elements_old = self.elements + nnodes * i
elements_new = self.elements + nnodes * (i + 1)
pents[ih1:ih2, 0:3] = deepcopy(elements_old)
pents[ih1:ih2, 3:6] = deepcopy(elements_new)
nodes[in1:in2, :] = deepcopy(nodes_new)
in1 += nnodes
in2 += nnodes
ih1 += nelements
ih2 += nelements
with open(bdf_filename, 'wb') as f:
f.write('CEND\n')
f.write('BEGIN BULK\n')
pents += 1
cid = None
for nid, grid in enumerate(nodes):
if nid % 5000 == 0:
print('writing nid=%s' % (nid + 1))
card = ['GRID', nid + 1, cid, ] + list(grid)
f.write(print_card_16(card))
pid = 0
mid = 1
for eid, penta in enumerate(pents):
if (eid + 1) % nelements == 1:
pid += 1
card = ['PSOLID', pid, mid]
f.write(print_card_8(card))
print('bumping pid -> %s' % pid)
if eid % 5000 == 0:
print('writing eid=%s' % (eid + 1))
card = ['CPENTA', eid + 1, pid, ] + list(penta)
f.write(print_card_8(card))
card = ['MAT1', mid, 1.0e7, None, 0.3]
f.write(print_card_8(card))
f.write('ENDDATA\n')
def main():
cart3d_filename = 'threePlugs_bin.tri'
cart3d = Cart3d_Mesher(log=None, debug=False)
cart3d.read_cart3d(cart3d_filename)
x0 = 10.
nlayers = 5
bdf_filename = 'threePlugs_volume.bdf'
cart3d.project(bdf_filename, x0, growth_rate=1.3, nlayers=nlayers)
#cart3d.write_bdf(bdf_filename)
if __name__ == '__main__':
main()
|
[
"copy.deepcopy",
"pyNastran.converters.cart3d.cart3d.Cart3D.read_cart3d",
"pyNastran.bdf.field_writer_8.print_card_8",
"numpy.zeros",
"pyNastran.converters.cart3d.cart3d.Cart3D.__init__",
"pyNastran.bdf.field_writer_16.print_card_16"
] |
[((301, 344), 'pyNastran.converters.cart3d.cart3d.Cart3D.__init__', 'Cart3D.__init__', (['self'], {'log': 'log', 'debug': 'debug'}), '(self, log=log, debug=debug)\n', (316, 344), False, 'from pyNastran.converters.cart3d.cart3d import Cart3D\n'), ((417, 485), 'pyNastran.converters.cart3d.cart3d.Cart3D.read_cart3d', 'Cart3D.read_cart3d', (['self', 'cart3d_filename'], {'result_names': 'result_names'}), '(self, cart3d_filename, result_names=result_names)\n', (435, 485), False, 'from pyNastran.converters.cart3d.cart3d import Cart3D\n'), ((4178, 4209), 'numpy.zeros', 'zeros', (['nlayers'], {'dtype': '"""float64"""'}), "(nlayers, dtype='float64')\n", (4183, 4209), False, 'from numpy import zeros\n'), ((4677, 4713), 'numpy.zeros', 'zeros', (['(nnodes2, 3)'], {'dtype': '"""float64"""'}), "((nnodes2, 3), dtype='float64')\n", (4682, 4713), False, 'from numpy import zeros\n'), ((4730, 4763), 'numpy.zeros', 'zeros', (['(npents, 6)'], {'dtype': '"""int32"""'}), "((npents, 6), dtype='int32')\n", (4735, 4763), False, 'from numpy import zeros\n'), ((5324, 5346), 'copy.deepcopy', 'deepcopy', (['elements_old'], {}), '(elements_old)\n', (5332, 5346), False, 'from copy import deepcopy\n'), ((5381, 5403), 'copy.deepcopy', 'deepcopy', (['elements_new'], {}), '(elements_new)\n', (5389, 5403), False, 'from copy import deepcopy\n'), ((5436, 5455), 'copy.deepcopy', 'deepcopy', (['nodes_new'], {}), '(nodes_new)\n', (5444, 5455), False, 'from copy import deepcopy\n'), ((6557, 6575), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['card'], {}), '(card)\n', (6569, 6575), False, 'from pyNastran.bdf.field_writer_8 import print_card_8\n'), ((5950, 5969), 'pyNastran.bdf.field_writer_16.print_card_16', 'print_card_16', (['card'], {}), '(card)\n', (5963, 5969), False, 'from pyNastran.bdf.field_writer_16 import print_card_16\n'), ((6465, 6483), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['card'], {}), '(card)\n', (6477, 6483), False, 'from pyNastran.bdf.field_writer_8 import print_card_8\n'), ((6212, 6230), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['card'], {}), '(card)\n', (6224, 6230), False, 'from pyNastran.bdf.field_writer_8 import print_card_8\n')]
|
from textwrap import dedent
import pytest
from tests.cli.run_cli import run_cli
from tests.helpers.common_test_tables import customers_test_table
from tests.helpers.data_source_fixture import DataSourceFixture
from tests.helpers.fixtures import test_data_source
from tests.helpers.mock_file_system import MockFileSystem
@pytest.mark.skipif(
test_data_source != "postgres",
reason="Run for postgres only as nothing data source specific is tested.",
)
def test_cli_update_distribution_file(data_source_fixture: DataSourceFixture, mock_file_system: MockFileSystem):
table_name = data_source_fixture.ensure_test_table(customers_test_table)
user_home_dir = mock_file_system.user_home_dir()
mock_file_system.files = {
f"{user_home_dir}/configuration.yml": data_source_fixture.create_test_configuration_yaml_str(),
f"{user_home_dir}/customers_distribution_reference.yml": dedent(
f"""
table: {table_name}
column: size
method: continuous
"""
),
}
run_cli(
[
"update",
"-c",
"configuration.yml",
"-d",
data_source_fixture.data_source.data_source_name,
f"{user_home_dir}/customers_distribution_reference.yml",
]
)
print(mock_file_system.files[f"{user_home_dir}/customers_distribution_reference.yml"])
|
[
"textwrap.dedent",
"pytest.mark.skipif",
"tests.cli.run_cli.run_cli"
] |
[((324, 454), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(test_data_source != 'postgres')"], {'reason': '"""Run for postgres only as nothing data source specific is tested."""'}), "(test_data_source != 'postgres', reason=\n 'Run for postgres only as nothing data source specific is tested.')\n", (342, 454), False, 'import pytest\n'), ((1069, 1237), 'tests.cli.run_cli.run_cli', 'run_cli', (["['update', '-c', 'configuration.yml', '-d', data_source_fixture.data_source\n .data_source_name, f'{user_home_dir}/customers_distribution_reference.yml']"], {}), "(['update', '-c', 'configuration.yml', '-d', data_source_fixture.\n data_source.data_source_name,\n f'{user_home_dir}/customers_distribution_reference.yml'])\n", (1076, 1237), False, 'from tests.cli.run_cli import run_cli\n'), ((906, 1044), 'textwrap.dedent', 'dedent', (['f"""\n table: {table_name}\n column: size\n method: continuous\n """'], {}), '(\n f"""\n table: {table_name}\n column: size\n method: continuous\n """\n )\n', (912, 1044), False, 'from textwrap import dedent\n')]
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import population
import world_data
countries, provinces = world_data.get_countries_provinces()
countryPopulation = population.get_all_population_data()
countries.extend(['Hubei'])
# todo: single loop, cleanup
countryDeaths = []
for country in countries:
try:
if countryPopulation[country] < 1000000:
continue
province = 'all'
country2 = country
if country == 'Hubei':
country2 = 'China'
province = 'Hubei'
XCDR_data = np.array(world_data.get_country_xcdr(country2, province=province,
returnDates=True))
cases = int(XCDR_data[-1, 1]) # last row, third column
deaths = int(XCDR_data[-1, 2]) # last row, third column
deathDelta = int(XCDR_data[-1, 2] - XCDR_data[-8, 2])
if deaths < 10:
continue
recovered = int(XCDR_data[-1, 3]) # last row, third column
date = XCDR_data[-1, 0]
countryDeaths.append((country, cases, deaths, recovered, date, deathDelta))
except Exception as e:
print("fail: ", country, sys.exc_info()[0], e)
countryDeathsPC = []
countryDeathsDeltaPC = []
for ccdrd in countryDeaths:
country, cases, deaths, recovered, date, deathDelta = ccdrd
try:
pop = population.get_population(country)
countryDeathsPC.append((country, deaths * 1.0e6 / pop, deaths, pop, date))
countryDeathsDeltaPC.append((country, deathDelta * 1.0e6 / pop, deathDelta, pop, date))
#countryDeathrate.append((country, 100.0 * deaths / cases, deaths, pop))
except KeyError:
print("fail: ", country)
print()
countryDeathsPC = sorted(countryDeathsPC, key = lambda x: x[1]) # sort by second subitem
countryDeathsPC.reverse() # in place
countryDeathsDeltaPC = sorted(countryDeathsDeltaPC, key = lambda x: x[1]) # sort by second subitem
countryDeathsDeltaPC.reverse() # in place
dCountryDeathsPCXY = {}
for country, trash, trash, trash, trash in countryDeathsPC[0:20]:
province = 'all'
country2 = country
if country == 'Hubei':
country2 = 'China'
province = 'Hubei'
XCDR_data = np.array(world_data.get_country_xcdr(country2, province=province, returnDates=True))
pop = population.get_population(country)
#Y = 100.0 * XCDR_data[:,2] / XCDR_data[:,1]
Y = XCDR_data[:,2] / pop * 1.0e6
dCountryDeathsPCXY[country] = (XCDR_data[:,0], Y)
fig = plt.figure(dpi=75, figsize=(20,16))
ax = fig.add_subplot(111)
#ax.set_yscale("log", nonposy='clip')
for country in dCountryDeathsPCXY:
ax.plot(dCountryDeathsPCXY[country][0], dCountryDeathsPCXY[country][1],
alpha=0.5, lw=2, label=country)
legend = ax.legend(title='deaths per 1M capita (beta)')
print()
print('beta, there might be bugs')
print('current deaths per capita')
for country, deathsPC, deaths, pop, date in countryDeathsPC[0:20]:
print("%-15s" % country, ': %10.1f %5d %10d %s' % (deathsPC, deaths, pop, date.strftime("%Y-%m-%d")))
print()
print('new deaths per capita per week')
for country, deathsDeltaPC, deathsDelta, pop, date in countryDeathsDeltaPC[0:20]:
print("%-15s" % country, ': %10.1f %5d %10d %s' % (deathsDeltaPC, deathsDelta, pop, date.strftime("%Y-%m-%d")))
plt.show()
|
[
"world_data.get_countries_provinces",
"matplotlib.pyplot.show",
"world_data.get_country_xcdr",
"population.get_all_population_data",
"population.get_population",
"matplotlib.pyplot.figure",
"sys.exc_info"
] |
[((123, 159), 'world_data.get_countries_provinces', 'world_data.get_countries_provinces', ([], {}), '()\n', (157, 159), False, 'import world_data\n'), ((180, 216), 'population.get_all_population_data', 'population.get_all_population_data', ([], {}), '()\n', (214, 216), False, 'import population\n'), ((2514, 2550), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(75)', 'figsize': '(20, 16)'}), '(dpi=75, figsize=(20, 16))\n', (2524, 2550), True, 'import matplotlib.pyplot as plt\n'), ((3327, 3337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3335, 3337), True, 'import matplotlib.pyplot as plt\n'), ((2331, 2365), 'population.get_population', 'population.get_population', (['country'], {}), '(country)\n', (2356, 2365), False, 'import population\n'), ((1373, 1407), 'population.get_population', 'population.get_population', (['country'], {}), '(country)\n', (1398, 1407), False, 'import population\n'), ((2245, 2319), 'world_data.get_country_xcdr', 'world_data.get_country_xcdr', (['country2'], {'province': 'province', 'returnDates': '(True)'}), '(country2, province=province, returnDates=True)\n', (2272, 2319), False, 'import world_data\n'), ((574, 648), 'world_data.get_country_xcdr', 'world_data.get_country_xcdr', (['country2'], {'province': 'province', 'returnDates': '(True)'}), '(country2, province=province, returnDates=True)\n', (601, 648), False, 'import world_data\n'), ((1188, 1202), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1200, 1202), False, 'import sys\n')]
|
import asyncio
from dataclasses import dataclass
from typing import Dict, List
import aiohttp
from bs4 import BeautifulSoup
from products.models import Product
@dataclass
class CrawlerResponse:
url: str
image: str
content: str
name: str
class Crawler(object):
async def _fetch_url(self, url: str) -> str:
"""
This function returns given url HTML response.
"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
html = await response.text()
return html
async def _fetch_category_links(self, url: str) -> List[str]:
content: str = await self._fetch_url(url)
soup = BeautifulSoup(content, 'html.parser')
urls = []
product_markups = soup.find_all('div', class_='gallery-item')
for product_markup in product_markups:
link = product_markup.find('a')
if 'product-link-to-pdp' not in link.get('class'):
# Remove promotion banners
continue
if link and link.get('href'):
urls.append(
f"https:{link.get('href')}"
)
return urls
async def _fetch_product(self, url: str) -> CrawlerResponse:
content: str = await self._fetch_url(url)
soup = BeautifulSoup(content, 'html.parser')
image = (soup.find('div', class_='product-img')
.find('img').get('data-src-desktop'))
name = soup.find('h1').get_text()
return CrawlerResponse(
name=name,
url=url,
content=content,
image=f'https:{image}'
)
def execute(self, category_url: str) -> List[CrawlerResponse]:
loop = asyncio.get_event_loop()
# fetch product links
detail_urls: List[str] = loop.run_until_complete(
self._fetch_category_links(category_url)
)
if not detail_urls:
return []
# fetch product contents
products, _ = loop.run_until_complete(asyncio.wait([
self._fetch_product(url)
for url in detail_urls
]))
loop.close()
return [
product.result()
for product in products
]
class CrawlerDataImport(object):
def process_item(self, item: CrawlerResponse) -> Product:
instance = Product.objects.filter(remote_url=item.url).first()
if not instance:
instance = Product()
instance.remote_url = item.url
instance.html_content = item.content
instance.image = item.image
instance.name = item.name
return instance
def execute(self, products: List[CrawlerResponse]) -> Dict[str, int]:
create, update = [], []
for item in products:
instance = self.process_item(item)
if instance.id:
update.append(instance)
else:
create.append(instance)
Product.objects.bulk_create(create)
for instance in update:
instance.save()
return {
'created': len(create),
'updated': len(update)
}
|
[
"asyncio.get_event_loop",
"products.models.Product.objects.filter",
"products.models.Product.objects.bulk_create",
"aiohttp.ClientSession",
"products.models.Product",
"bs4.BeautifulSoup"
] |
[((724, 761), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (737, 761), False, 'from bs4 import BeautifulSoup\n'), ((1364, 1401), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (1377, 1401), False, 'from bs4 import BeautifulSoup\n'), ((1790, 1814), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1812, 1814), False, 'import asyncio\n'), ((3039, 3074), 'products.models.Product.objects.bulk_create', 'Product.objects.bulk_create', (['create'], {}), '(create)\n', (3066, 3074), False, 'from products.models import Product\n'), ((430, 453), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (451, 453), False, 'import aiohttp\n'), ((2529, 2538), 'products.models.Product', 'Product', ([], {}), '()\n', (2536, 2538), False, 'from products.models import Product\n'), ((2429, 2472), 'products.models.Product.objects.filter', 'Product.objects.filter', ([], {'remote_url': 'item.url'}), '(remote_url=item.url)\n', (2451, 2472), False, 'from products.models import Product\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.urls import reverse_lazy
from django.views.generic import ListView
from portfolio.blog.models import Blog
from portfolio.blog.forms import BlogForm
from portfolio.views import BaseSudoView
class BlogFormView(BaseSudoView):
model = Blog
form_class = BlogForm
template_name = 'post_add.html'
success_url = reverse_lazy('portfolio:blog:blog_index')
class BlogView(ListView):
template_name = 'post_index.html'
def get_queryset(self):
if self.request.user.is_authenticated:
return Blog.objects.filter(user=self.request.user)
return
def get_context_data(self, **kwargs):
context = super(BlogView, self).get_context_data(**kwargs)
context['objects'] = Blog.objects.all()
return context
|
[
"django.urls.reverse_lazy",
"portfolio.blog.models.Blog.objects.filter",
"portfolio.blog.models.Blog.objects.all"
] |
[((400, 441), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""portfolio:blog:blog_index"""'], {}), "('portfolio:blog:blog_index')\n", (412, 441), False, 'from django.urls import reverse_lazy\n'), ((801, 819), 'portfolio.blog.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (817, 819), False, 'from portfolio.blog.models import Blog\n'), ((603, 646), 'portfolio.blog.models.Blog.objects.filter', 'Blog.objects.filter', ([], {'user': 'self.request.user'}), '(user=self.request.user)\n', (622, 646), False, 'from portfolio.blog.models import Blog\n')]
|
#
from QAPUBSUB.producer import publisher_routing
from QAPUBSUB.consumer import subscriber_routing
from QUANTAXIS.QAEngine import QA_Thread
from QA_OTGBroker import on_pong, on_message, on_error, subscribe_quote, on_close, login, peek
import websocket
import threading
import click
import time
import json
import pymongo
from QARealtimeCollector.util import fix_dict
from QARealtimeCollector.setting import mongo_ip, eventmq_ip
class QARTC_WsCollector(QA_Thread):
def __init__(self):
super().__init__()
self.ws = websocket.WebSocketApp('wss://openmd.shinnytech.com/t/md/front/mobile',
on_pong=on_pong,
on_message=self.on_message,
on_error=on_error,
on_close=on_close)
def _onopen(ws):
def run():
ws.send(peek())
threading.Thread(target=run, daemon=False).start()
self.quoteclient = pymongo.MongoClient(host=mongo_ip).QAREALTIME.realtimeQuote
self.ws.on_open = _onopen
self.data = {}
self.subscribe_list = ['SHFE.rb1910', 'DCE.j1909']
self.sub = subscriber_routing(host=eventmq_ip, exchange='QARealtime_Market', routing_key='future')
self.sub.callback = self.callback
threading.Thread(target=self.ws.run_forever,
name='market_websock', daemon=False).start()
threading.Thread(target=self.sub.start,
name='market_subscriber', daemon=True).start()
def on_message(self, message):
print(message)
message = json.loads(message)
if 'data' in message.keys():
data = message['data'][0]
if 'quotes' in data.keys():
data = data['quotes']
for items in data.keys():
try:
item = items.replace('.', '_')
if item not in self.data.keys():
self.data[item] = data[items]
else:
for keys in data[items].keys():
self.data[item][keys] = data[items][keys]
self.data[item]['instrument_id'] = item
self.quoteclient.update_one({'instrument_id': item},
{'$set': self.data[item]}, upsert=True)
except Exception as e:
print(e)
self.ws.send(peek())
def callback(self, a, b, c, data):
data = json.loads(data)
if data['topic'] == 'subscribe':
new_ins = data['code'].replace('_', '.').split(',')
import copy
old = len(self.subscribe_list)
self.subscribe_list.extend(new_ins)
self.subscribe_list = list(
set(self.subscribe_list))
if old < len(self.subscribe_list):
self.ws.send(subscribe_quote(','.join(self.subscribe_list)))
def run(self):
time.sleep(2)
self.ws.send(subscribe_quote('SHFE.rb1910,DCE.j1909'))
while True:
time.sleep(1)
if __name__ == "__main__":
QARTC_WsCollector().start()
|
[
"websocket.WebSocketApp",
"pymongo.MongoClient",
"threading.Thread",
"QAPUBSUB.consumer.subscriber_routing",
"json.loads",
"time.sleep",
"QA_OTGBroker.subscribe_quote",
"QA_OTGBroker.peek"
] |
[((536, 698), 'websocket.WebSocketApp', 'websocket.WebSocketApp', (['"""wss://openmd.shinnytech.com/t/md/front/mobile"""'], {'on_pong': 'on_pong', 'on_message': 'self.on_message', 'on_error': 'on_error', 'on_close': 'on_close'}), "('wss://openmd.shinnytech.com/t/md/front/mobile',\n on_pong=on_pong, on_message=self.on_message, on_error=on_error,\n on_close=on_close)\n", (558, 698), False, 'import websocket\n'), ((1222, 1313), 'QAPUBSUB.consumer.subscriber_routing', 'subscriber_routing', ([], {'host': 'eventmq_ip', 'exchange': '"""QARealtime_Market"""', 'routing_key': '"""future"""'}), "(host=eventmq_ip, exchange='QARealtime_Market',\n routing_key='future')\n", (1240, 1313), False, 'from QAPUBSUB.consumer import subscriber_routing\n'), ((1672, 1691), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (1682, 1691), False, 'import json\n'), ((2640, 2656), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2650, 2656), False, 'import json\n'), ((3112, 3125), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3122, 3125), False, 'import time\n'), ((2577, 2583), 'QA_OTGBroker.peek', 'peek', ([], {}), '()\n', (2581, 2583), False, 'from QA_OTGBroker import on_pong, on_message, on_error, subscribe_quote, on_close, login, peek\n'), ((3147, 3187), 'QA_OTGBroker.subscribe_quote', 'subscribe_quote', (['"""SHFE.rb1910,DCE.j1909"""'], {}), "('SHFE.rb1910,DCE.j1909')\n", (3162, 3187), False, 'from QA_OTGBroker import on_pong, on_message, on_error, subscribe_quote, on_close, login, peek\n'), ((3221, 3234), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3231, 3234), False, 'import time\n'), ((1027, 1061), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {'host': 'mongo_ip'}), '(host=mongo_ip)\n', (1046, 1061), False, 'import pymongo\n'), ((1360, 1446), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.ws.run_forever', 'name': '"""market_websock"""', 'daemon': '(False)'}), "(target=self.ws.run_forever, name='market_websock', daemon=\n False)\n", (1376, 1446), False, 'import threading\n'), ((1483, 1561), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.sub.start', 'name': '"""market_subscriber"""', 'daemon': '(True)'}), "(target=self.sub.start, name='market_subscriber', daemon=True)\n", (1499, 1561), False, 'import threading\n'), ((928, 934), 'QA_OTGBroker.peek', 'peek', ([], {}), '()\n', (932, 934), False, 'from QA_OTGBroker import on_pong, on_message, on_error, subscribe_quote, on_close, login, peek\n'), ((948, 990), 'threading.Thread', 'threading.Thread', ([], {'target': 'run', 'daemon': '(False)'}), '(target=run, daemon=False)\n', (964, 990), False, 'import threading\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import subprocess
from flask import Flask, render_template
from sh import git
app = Flask(__name__)
version = git("rev-parse", "--short", "HEAD").strip()
command = os.getenv("HEATLAMP_SCRIPT")
def validate():
"""
Validate the application configuration before launching.
"""
missing = []
if not command:
missing.append((
"HEATLAMP_SCRIPT",
"The shell script to execute when a webhook is triggered."
))
if missing:
print("Missing required configuration values:\n", file=sys.stderr)
for envvar, purpose in missing:
print(" {}: {}".format(envvar, purpose), file=sys.stderr)
print(file=sys.stderr)
sys.exit(1)
validate()
@app.route("/", methods=["GET"])
def status():
"""
Status check. Display the current version of heatlamp, some basic
diagnostics, and a simple form that may be used to manually trigger
a deployment.
"""
return render_template("status.html", version=version)
@app.route("/", methods=["POST", "PUT"])
def refresh():
"""
Webhook accepted. Perform the configured action.
"""
status = subprocess.call(["/bin/bash", command])
if status == 0:
return "success", 200
else:
return "failure", 500
if __name__ == "__main__":
app.run(host='0.0.0.0', port=10100)
|
[
"flask.Flask",
"subprocess.call",
"flask.render_template",
"sh.git",
"os.getenv",
"sys.exit"
] |
[((192, 207), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (197, 207), False, 'from flask import Flask, render_template\n'), ((273, 301), 'os.getenv', 'os.getenv', (['"""HEATLAMP_SCRIPT"""'], {}), "('HEATLAMP_SCRIPT')\n", (282, 301), False, 'import os\n'), ((1075, 1122), 'flask.render_template', 'render_template', (['"""status.html"""'], {'version': 'version'}), "('status.html', version=version)\n", (1090, 1122), False, 'from flask import Flask, render_template\n'), ((1264, 1303), 'subprocess.call', 'subprocess.call', (["['/bin/bash', command]"], {}), "(['/bin/bash', command])\n", (1279, 1303), False, 'import subprocess\n'), ((219, 254), 'sh.git', 'git', (['"""rev-parse"""', '"""--short"""', '"""HEAD"""'], {}), "('rev-parse', '--short', 'HEAD')\n", (222, 254), False, 'from sh import git\n'), ((814, 825), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (822, 825), False, 'import sys\n')]
|
"""
This module provides distance helper functions.
"""
import numpy as np
import diversipy
def distance_to_boundary(points, cuboid=None):
"""Calculate the distance of each point to the boundary of some cuboid.
This distance is simply the minimum of all differences between
a point and the lower and upper bounds. This function also checks if all
calculated distances are larger than zero. If not, some points must be
located outside the cuboid.
Parameters
----------
points : array_like
2-D array of `n` points.
cuboid : tuple of array_like, optional
Contains the min and max bounds of the considered cuboid. If
omitted, the unit hypercube is assumed.
Returns
-------
distances : numpy array
1-D array of `n` distances
"""
if cuboid is None:
cuboid = diversipy.cube.unitcube(points.shape[1])
min_bounds, max_bounds = cuboid
dists_to_min_bounds = (points - np.asarray(min_bounds)).min(axis=1)
dists_to_max_bounds = (np.asarray(max_bounds) - points).min(axis=1)
distances = np.minimum(dists_to_min_bounds, dists_to_max_bounds)
assert np.all(distances >= 0.0) # are all points contained in cuboid?
return distances
def distance_matrix(points1, points2, norm=2, max_dist=None):
"""Calculate the distance between each combination of points in two sets.
Parameters
----------
points1 : array_like
2-D array of `n1` points.
points2 : array_like
2-D array of `n2` points.
norm : int, optional
Norm to use for the distance, by default 2 (euclidean norm).
max_dist : array_like, optional
1-D array of largest possible distance in each dimension.
Providing these values has the consequence of treating the cuboid as a torus.
This is useful for eliminating edge effects induced by the lack of neighbor
points outside the bounds of the cuboid.d
Returns
-------
distances : numpy array
(`n1` x `n2`) array of distances
"""
points1 = np.atleast_2d(points1)
points2 = np.atleast_2d(points2)
assert points1.shape[1] == points2.shape[1]
diff = np.expand_dims(points1, 1) - np.expand_dims(points2, 0)
if max_dist is not None:
diff = np.abs(diff)
diff = np.minimum(diff, max_dist - diff)
assert (diff >= 0).all() # are all points inside the cuboid?
return np.linalg.norm(diff, axis=-1, ord=norm)
|
[
"numpy.minimum",
"numpy.abs",
"diversipy.cube.unitcube",
"numpy.asarray",
"numpy.expand_dims",
"numpy.linalg.norm",
"numpy.all",
"numpy.atleast_2d"
] |
[((1091, 1143), 'numpy.minimum', 'np.minimum', (['dists_to_min_bounds', 'dists_to_max_bounds'], {}), '(dists_to_min_bounds, dists_to_max_bounds)\n', (1101, 1143), True, 'import numpy as np\n'), ((1155, 1179), 'numpy.all', 'np.all', (['(distances >= 0.0)'], {}), '(distances >= 0.0)\n', (1161, 1179), True, 'import numpy as np\n'), ((2063, 2085), 'numpy.atleast_2d', 'np.atleast_2d', (['points1'], {}), '(points1)\n', (2076, 2085), True, 'import numpy as np\n'), ((2100, 2122), 'numpy.atleast_2d', 'np.atleast_2d', (['points2'], {}), '(points2)\n', (2113, 2122), True, 'import numpy as np\n'), ((2425, 2464), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {'axis': '(-1)', 'ord': 'norm'}), '(diff, axis=-1, ord=norm)\n', (2439, 2464), True, 'import numpy as np\n'), ((854, 894), 'diversipy.cube.unitcube', 'diversipy.cube.unitcube', (['points.shape[1]'], {}), '(points.shape[1])\n', (877, 894), False, 'import diversipy\n'), ((2182, 2208), 'numpy.expand_dims', 'np.expand_dims', (['points1', '(1)'], {}), '(points1, 1)\n', (2196, 2208), True, 'import numpy as np\n'), ((2211, 2237), 'numpy.expand_dims', 'np.expand_dims', (['points2', '(0)'], {}), '(points2, 0)\n', (2225, 2237), True, 'import numpy as np\n'), ((2282, 2294), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (2288, 2294), True, 'import numpy as np\n'), ((2310, 2343), 'numpy.minimum', 'np.minimum', (['diff', '(max_dist - diff)'], {}), '(diff, max_dist - diff)\n', (2320, 2343), True, 'import numpy as np\n'), ((967, 989), 'numpy.asarray', 'np.asarray', (['min_bounds'], {}), '(min_bounds)\n', (977, 989), True, 'import numpy as np\n'), ((1030, 1052), 'numpy.asarray', 'np.asarray', (['max_bounds'], {}), '(max_bounds)\n', (1040, 1052), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
from brownie import accounts, network, project, config
OPENSEA_FORMAT = "https://testnets.opensea.io/assets/{}/{}"
sample_token_uri = "https://ipfs.io/ipfs/QmPwpUKU1KAxbuTCNBCLfE6N4EPWvyY7g2oBCjfLvtHvof?filename=picture.json"
def main():
print("testing...")
proj = project.load('./', name='NFT')
proj.load_config()
from brownie.project.NFT import ArtistPicture
minter_wallet = accounts.add(config['wallets']['from_key'])
network.connect('rinkeby')
if len(ArtistPicture) == 0:
picture_nft = ArtistPicture.deploy({'from': minter_wallet})
else:
picture_nft = ArtistPicture[-1]
token_id = picture_nft.tokenCounter()
transaction = picture_nft.createCollectible(sample_token_uri, {'from': minter_wallet})
transaction.wait(1)
print("Minting complete: {}".format(
OPENSEA_FORMAT.format(picture_nft.address, token_id)
))
if __name__ == "__main__":
main()
|
[
"brownie.project.NFT.ArtistPicture.deploy",
"brownie.network.connect",
"brownie.project.load",
"brownie.accounts.add"
] |
[((299, 329), 'brownie.project.load', 'project.load', (['"""./"""'], {'name': '"""NFT"""'}), "('./', name='NFT')\n", (311, 329), False, 'from brownie import accounts, network, project, config\n'), ((424, 467), 'brownie.accounts.add', 'accounts.add', (["config['wallets']['from_key']"], {}), "(config['wallets']['from_key'])\n", (436, 467), False, 'from brownie import accounts, network, project, config\n'), ((473, 499), 'brownie.network.connect', 'network.connect', (['"""rinkeby"""'], {}), "('rinkeby')\n", (488, 499), False, 'from brownie import accounts, network, project, config\n'), ((555, 600), 'brownie.project.NFT.ArtistPicture.deploy', 'ArtistPicture.deploy', (["{'from': minter_wallet}"], {}), "({'from': minter_wallet})\n", (575, 600), False, 'from brownie.project.NFT import ArtistPicture\n')]
|
from fabric.api import *
import re
env.hosts=['nimbus-gateway.eng.vmware.com']
env.user=''# Active Directory username
env.password=''# Active Directory password
vm_name=""
def find_vcenter(output):
for x in reversed(output.splitlines()):
if re.search('"done"',x):
pass
elif re.search('".*: %s"'%vm_name,x):
return "Found......"+x.strip('"')
break;
elif re.search('".*:.*"',x):
pass
else:
break;
return False
def check():
global vm_name
print("checking Sc datacenter........")
with hide('output','running'),settings(prompts={'\n> ':'/this.children.each{|k,v| begin; if v.is_a?(VIM); '+
'v.children.first[1].children["vms"].children["CoreOS"].children.each{|vm,_| p "#{k}: #{vm}";}; end; rescue; next; end;};p"done";exit;'
,'/dinesha> ': 'cd /'}):
output=run('NIMBUS_LOCATION=sc NIMBUS_CONTEXTS=cat,general /mts/git/bin/nimbus-rvc')
if output.return_code !=0:
return "Error checking sc2"
result=find_vcenter(output)
if result:
return result
print("Checking Wdc datacenter.....")
with hide('output','running'),settings(prompts={'\n> ':'/this.children.each{|k,v| begin; if v.is_a?(VIM); '+
'v.children.first[1].children["vms"].children["CoreOS"].children.each{|vm,_| p "#{k}: #{vm}";}; end; rescue; next; end;};p"done";exit;'
,'/dinesha> ': 'cd /'}):
output=run('NIMBUS_LOCATION=wdc NIMBUS_CONTEXTS=general /mts/git/bin/nimbus-rvc')
if output.return_code != 0:
return "Error checking wdc"
result = find_vcenter(output)
if result:
return result
return "No Matches"
def search():
global vm_name
vm_name=raw_input("Enter the minion name.....")
vm_name.strip("\n")
if len(vm_name)>0:
answer=execute(check)
answer=answer['nimbus-gateway.eng.vmware.com']
print(answer)
if __name__=='__main__':
search()
|
[
"re.search"
] |
[((252, 274), 're.search', 're.search', (['""""done\\""""', 'x'], {}), '(\'"done"\', x)\n', (261, 274), False, 'import re\n'), ((301, 335), 're.search', 're.search', (['(\'".*: %s"\' % vm_name)', 'x'], {}), '(\'".*: %s"\' % vm_name, x)\n', (310, 335), False, 'import re\n'), ((406, 429), 're.search', 're.search', (['"""".*:.*\\""""', 'x'], {}), '(\'".*:.*"\', x)\n', (415, 429), False, 'import re\n')]
|
"""Map from manufacturer to standard clusters for thermostatic valves."""
import logging
from zigpy.profiles import zha
import zigpy.types as t
from zigpy.zcl.clusters.general import Basic, Groups, Identify, Ota, Scenes, Time
from . import (
TuyaManufClusterAttributes,
TuyaPowerConfigurationCluster,
TuyaThermostat,
TuyaThermostatCluster,
TuyaUserInterfaceCluster,
)
from ..const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
MODELS_INFO,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
# info from https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/common.js#L113
# and https://github.com/Koenkk/zigbee-herdsman-converters/blob/master/converters/fromZigbee.js#L362
SITERWELL_CHILD_LOCK_ATTR = 0x0107 # [0] unlocked [1] child-locked
SITERWELL_WINDOW_DETECT_ATTR = 0x0112 # [0] inactive [1] active
SITERWELL_VALVE_DETECT_ATTR = 0x0114 # [0] do not report [1] report
SITERWELL_VALVE_STATE_ATTR = 0x026D # [0,0,0,55] opening percentage
SITERWELL_TARGET_TEMP_ATTR = 0x0202 # [0,0,0,210] target room temp (decidegree)
SITERWELL_TEMPERATURE_ATTR = 0x0203 # [0,0,0,200] current room temp (decidegree)
SITERWELL_BATTERY_ATTR = 0x0215 # [0,0,0,98] battery charge
SITERWELL_MODE_ATTR = 0x0404 # [0] off [1] scheduled [2] manual
_LOGGER = logging.getLogger(__name__)
class SiterwellManufCluster(TuyaManufClusterAttributes):
"""Manufacturer Specific Cluster of some thermostatic valves."""
manufacturer_attributes = {
SITERWELL_CHILD_LOCK_ATTR: ("child_lock", t.uint8_t),
SITERWELL_WINDOW_DETECT_ATTR: ("window_detection", t.uint8_t),
SITERWELL_VALVE_DETECT_ATTR: ("valve_detect", t.uint8_t),
SITERWELL_VALVE_STATE_ATTR: ("valve_state", t.uint32_t),
SITERWELL_TARGET_TEMP_ATTR: ("target_temperature", t.uint32_t),
SITERWELL_TEMPERATURE_ATTR: ("temperature", t.uint32_t),
SITERWELL_BATTERY_ATTR: ("battery", t.uint32_t),
SITERWELL_MODE_ATTR: ("mode", t.uint8_t),
}
TEMPERATURE_ATTRS = {
SITERWELL_TEMPERATURE_ATTR: "local_temp",
SITERWELL_TARGET_TEMP_ATTR: "occupied_heating_setpoint",
}
def _update_attribute(self, attrid, value):
super()._update_attribute(attrid, value)
if attrid in self.TEMPERATURE_ATTRS:
self.endpoint.device.thermostat_bus.listener_event(
"temperature_change",
self.TEMPERATURE_ATTRS[attrid],
value * 10, # decidegree to centidegree
)
elif attrid == SITERWELL_MODE_ATTR:
self.endpoint.device.thermostat_bus.listener_event("mode_change", value)
self.endpoint.device.thermostat_bus.listener_event(
"state_change", value > 0
)
elif attrid == SITERWELL_VALVE_STATE_ATTR:
self.endpoint.device.thermostat_bus.listener_event("state_change", value)
elif attrid == SITERWELL_CHILD_LOCK_ATTR:
mode = 1 if value else 0
self.endpoint.device.ui_bus.listener_event("child_lock_change", mode)
elif attrid == SITERWELL_BATTERY_ATTR:
self.endpoint.device.battery_bus.listener_event("battery_change", value)
class SiterwellThermostat(TuyaThermostatCluster):
"""Thermostat cluster for some thermostatic valves."""
def map_attribute(self, attribute, value):
"""Map standardized attribute value to dict of manufacturer values."""
if attribute == "occupied_heating_setpoint":
# centidegree to decidegree
return {SITERWELL_TARGET_TEMP_ATTR: round(value / 10)}
if attribute in ("system_mode", "programing_oper_mode"):
if attribute == "system_mode":
system_mode = value
oper_mode = self._attr_cache.get(
self.attridx["programing_oper_mode"],
self.ProgrammingOperationMode.Simple,
)
else:
system_mode = self._attr_cache.get(
self.attridx["system_mode"], self.SystemMode.Heat
)
oper_mode = value
if system_mode == self.SystemMode.Off:
return {SITERWELL_MODE_ATTR: 0}
if system_mode == self.SystemMode.Heat:
if oper_mode == self.ProgrammingOperationMode.Schedule_programming_mode:
return {SITERWELL_MODE_ATTR: 1}
if oper_mode == self.ProgrammingOperationMode.Simple:
return {SITERWELL_MODE_ATTR: 2}
self.error("Unsupported value for ProgrammingOperationMode")
else:
self.error("Unsupported value for SystemMode")
def mode_change(self, value):
"""System Mode change."""
if value == 0:
self._update_attribute(self.attridx["system_mode"], self.SystemMode.Off)
return
if value == 1:
mode = self.ProgrammingOperationMode.Schedule_programming_mode
else:
mode = self.ProgrammingOperationMode.Simple
self._update_attribute(self.attridx["system_mode"], self.SystemMode.Heat)
self._update_attribute(self.attridx["programing_oper_mode"], mode)
class SiterwellUserInterface(TuyaUserInterfaceCluster):
"""HVAC User interface cluster for tuya electric heating thermostats."""
_CHILD_LOCK_ATTR = SITERWELL_CHILD_LOCK_ATTR
class SiterwellGS361(TuyaThermostat):
"""SiterwellGS361 Thermostatic radiator valve and clones."""
signature = {
# endpoint=1 profile=260 device_type=0 device_version=0 input_clusters=[0, 3]
# output_clusters=[3, 25]>
MODELS_INFO: [
("_TYST11_jeaxp72v", "eaxp72v"),
("_TYST11_kfvq6avy", "fvq6avy"),
],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,
INPUT_CLUSTERS: [Basic.cluster_id, Identify.cluster_id],
OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.THERMOSTAT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Identify.cluster_id,
SiterwellManufCluster,
SiterwellThermostat,
SiterwellUserInterface,
TuyaPowerConfigurationCluster,
],
OUTPUT_CLUSTERS: [Identify.cluster_id, Ota.cluster_id],
}
}
}
class MoesHY368(TuyaThermostat):
"""MoesHY368 Thermostatic radiator valve."""
signature = {
# endpoint=1 profile=260 device_type=81 device_version=0 input_clusters=[0, 4, 5, 61184]
# output_clusters=[10, 25]>
MODELS_INFO: [
("_TZE200_ckud7u2l", "TS0601"),
("_TZE200_kfvq6avy", "TS0601"),
],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
TuyaManufClusterAttributes.cluster_id,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
}
},
}
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.THERMOSTAT,
INPUT_CLUSTERS: [
Basic.cluster_id,
Groups.cluster_id,
Scenes.cluster_id,
SiterwellManufCluster,
SiterwellThermostat,
SiterwellUserInterface,
TuyaPowerConfigurationCluster,
],
OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
}
}
}
|
[
"logging.getLogger"
] |
[((1296, 1323), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1313, 1323), False, 'import logging\n')]
|
from panther_base_helpers import gsuite_details_lookup as details_lookup
from panther_base_helpers import gsuite_parameter_lookup as param_lookup
USER_SUSPENDED_EVENTS = {
'account_disabled_generic',
'account_disabled_spamming_through_relay',
'account_disabled_spamming',
'account_disabled_hijacked',
}
def rule(event):
if event['id'].get('applicationName') != 'login':
return False
return bool(details_lookup('account_warning', USER_SUSPENDED_EVENTS, event))
def title(event):
details = details_lookup('account_warning', USER_SUSPENDED_EVENTS, event)
user = param_lookup(details.get('parameters', {}), 'affected_email_address')
if not user:
user = '<UNKNOWN_USER>'
return 'User [{}]\'s account was disabled'.format(user)
|
[
"panther_base_helpers.gsuite_details_lookup"
] |
[((530, 593), 'panther_base_helpers.gsuite_details_lookup', 'details_lookup', (['"""account_warning"""', 'USER_SUSPENDED_EVENTS', 'event'], {}), "('account_warning', USER_SUSPENDED_EVENTS, event)\n", (544, 593), True, 'from panther_base_helpers import gsuite_details_lookup as details_lookup\n'), ((431, 494), 'panther_base_helpers.gsuite_details_lookup', 'details_lookup', (['"""account_warning"""', 'USER_SUSPENDED_EVENTS', 'event'], {}), "('account_warning', USER_SUSPENDED_EVENTS, event)\n", (445, 494), True, 'from panther_base_helpers import gsuite_details_lookup as details_lookup\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def load_initial_data(apps, schema_editor):
Locale = apps.get_model('base', 'Locale')
for locale_kwargs in LOCALES:
Locale.objects.create(**locale_kwargs)
Project = apps.get_model('base', 'Project')
project = Project.objects.create(
name='Pontoon Intro',
slug='pontoon-intro',
url=settings.SITE_URL + '/intro/',
links=True,
repository_type='git',
repository_url='https://github.com/mozilla/pontoon-intro.git',
info_brief=('This is a demo website, used for demonstration purposes '
'only. You can translate on the website itself by double '
'clicking on page elements. Access to advanced features '
'like translation memory and machine translation is '
'available by clicking on the menu icon in the top-left '
'corner.')
)
# The "historical" version of the Project model that this migration
# uses has trouble working with the ManyToManyField on the Project
# model. Our workaround is to use the auto-generated intermediate
# model directly to create the relation between project and locales.
locale = Locale.objects.get(code='en-GB')
Project.locales.through.objects.create(project_id=project.id, locale_id=locale.id)
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.RunPython(load_initial_data),
]
LOCALES = [
{
u'code': u'af',
u'name': u'Afrikaans',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'af-ZA', u'name': u'Afrikaans'
},
{
u'code': u'ak',
u'name': u'Akan',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'sq',
u'name': u'Albanian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'sq-AL', u'name': u'Albanian'
},
{
u'code': u'aln', u'name': u'Albanian Gheg'
},
{
u'code': u'am',
u'name': u'Amharic',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'am-ET', u'name': u'Amharic'
},
{
u'code': u'ar',
u'name': u'Arabic',
u'nplurals': u'6',
u'plural_rule': u'(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=11 ? 4 : 5)'
},
{
u'code': u'ar-SA', u'name': u'Arabic'
},
{
u'code': u'an',
u'name': u'Aragonese',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'hy',
u'name': u'Armenian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'hy-AM', u'name': u'Armenian'
},
{
u'code': u'as', u'name': u'Assamese'
},
{
u'code': u'as-IN', u'name': u'Assamese'
},
{
u'code': u'ast',
u'name': u'Asturian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'az',
u'name': u'Azerbaijani',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'az-AZ', u'name': u'Azerbaijani'
},
{
u'code': u'bal', u'name': u'Balochi'
},
{
u'code': u'eu',
u'name': u'Basque',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'eu-ES', u'name': u'Basque'
},
{
u'code': u'be',
u'name': u'Belarusian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'be-BY', u'name': u'Belarusian'
},
{
u'code': u'be@tarask', u'name': u'Belarusian'
},
{
u'code': u'bn',
u'name': u'Bengali',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'bn-BD',
u'name': u'Bengali',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'bn-IN',
u'name': u'Bengali',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'bs',
u'name': u'Bosnian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'bs-BA', u'name': u'Bosnian'
},
{
u'code': u'br',
u'name': u'Breton',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'bg',
u'name': u'Bulgarian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'bg-BG', u'name': u'Bulgarian'
},
{
u'code': u'my',
u'name': u'Burmese',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'my-MM', u'name': u'Burmese'
},
{
u'code': u'ca',
u'name': u'Catalan',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ca-ES', u'name': u'Catalan'
},
{
u'code': u'ca@valencia', u'name': u'Catalan'
},
{
u'code': u'hne',
u'name': u'Chhattisgarhi',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'zh',
u'name': u'Chinese',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'zh-CN',
u'name': u'Chinese',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'zh-CN.GB2312', u'name': u'Chinese'
},
{
u'code': u'zh-HK',
u'name': u'Chinese',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'zh-TW',
u'name': u'Chinese',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'zh-TW.Big5', u'name': u'Chinese'
},
{
u'code': u'kw',
u'name': u'Cornish',
u'nplurals': u'4',
u'plural_rule': u'(n==1) ? 0 : (n==2) ? 1 : (n == 3) ? 2 : 3'
},
{
u'code': u'crh', u'name': u'Crimean Turkish'
},
{
u'code': u'hr',
u'name': u'Croatian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'hr-HR', u'name': u'Croatian'
},
{
u'code': u'cs',
u'name': u'Czech',
u'nplurals': u'3',
u'plural_rule': u'(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2'
},
{
u'code': u'cs-CZ', u'name': u'Czech'
},
{
u'code': u'da',
u'name': u'Danish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'da-DK', u'name': u'Danish'
},
{
u'code': u'nl',
u'name': u'Dutch',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'nl-BE', u'name': u'Dutch'
},
{
u'code': u'nl-NL', u'name': u'Dutch'
},
{
u'code': u'dz',
u'name': u'Dzongkha',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'dz-BT', u'name': u'Dzongkha'
},
{
u'code': u'en',
u'name': u'English',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'en-AU',
u'name': u'English',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'en-CA',
u'name': u'English',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'en-IE',
u'name': u'English',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'en-ZA',
u'name': u'English',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'en-GB',
u'name': u'English',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'en-US',
u'name': u'English',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'eo',
u'name': u'Esperanto',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'et',
u'name': u'Estonian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'et-EE', u'name': u'Estonian'
},
{
u'code': u'fo',
u'name': u'Faroese',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'fo-FO', u'name': u'Faroese'
},
{
u'code': u'fil',
u'name': u'Filipino',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'fi',
u'name': u'Finnish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'fi-FI', u'name': u'Finnish'
},
{
u'code': u'frp', u'name': u'Franco-Proven\xe7al'
},
{
u'code': u'fr',
u'name': u'French',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'fr-CA', u'name': u'French'
},
{
u'code': u'fr-FR', u'name': u'French'
},
{
u'code': u'fr-CH', u'name': u'French'
},
{
u'code': u'fur',
u'name': u'Friulian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ff',
u'name': u'Fulah',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'gd',
u'name': u'<NAME>',
u'nplurals': u'4',
u'plural_rule': u'(n==1 || n==11) ? 0 : (n==2 || n==12) ? 1 : (n > 2 && n < 20) ? 2 : 3'
},
{
u'code': u'gl',
u'name': u'Galician',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'gl-ES', u'name': u'Galician'
},
{
u'code': u'ka',
u'name': u'Georgian',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'ka-GE', u'name': u'Georgian'
},
{
u'code': u'de',
u'name': u'German',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'de-DE', u'name': u'German'
},
{
u'code': u'de-CH', u'name': u'German'
},
{
u'code': u'el',
u'name': u'Greek',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'el-GR', u'name': u'Greek'
},
{
u'code': u'gu',
u'name': u'Gujarati',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'gu-IN',
u'name': u'Gujarati',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'gun',
u'name': u'Gun',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'ht',
u'name': u'Haitian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ht-HT', u'name': u'Haitian'
},
{
u'code': u'ha',
u'name': u'Hausa',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'he',
u'name': u'Hebrew',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'he-IL', u'name': u'Hebrew'
},
{
u'code': u'hi',
u'name': u'Hindi',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'hi-IN',
u'name': u'Hindi',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'hu',
u'name': u'Hungarian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'hu-HU', u'name': u'Hungarian'
},
{
u'code': u'is',
u'name': u'Icelandic',
u'nplurals': u'2',
u'plural_rule': u'(n%10!=1 || n%100==11)'
},
{
u'code': u'is-IS', u'name': u'Icelandic'
},
{
u'code': u'ig', u'name': u'Igbo'
},
{
u'code': u'ilo', u'name': u'Iloko'
},
{
u'code': u'id',
u'name': u'Indonesian',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'id-ID', u'name': u'Indonesian'
},
{
u'code': u'ia',
u'name': u'Interlingua',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ga',
u'name': u'Irish',
u'nplurals': u'5',
u'plural_rule': u'n==1 ? 0 : n==2 ? 1 : n<7 ? 2 : n<11 ? 3 : 4'
},
{
u'code': u'ga-IE', u'name': u'Irish'
},
{
u'code': u'it',
u'name': u'Italian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'it-IT', u'name': u'Italian'
},
{
u'code': u'ja',
u'name': u'Japanese',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'ja-JP', u'name': u'Japanese'
},
{
u'code': u'jv',
u'name': u'Javanese',
u'nplurals': u'2',
u'plural_rule': u'(n != 0)'
},
{
u'code': u'kn',
u'name': u'Kannada',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'kn-IN', u'name': u'Kannada'
},
{
u'code': u'ks', u'name': u'Kashmiri'
},
{
u'code': u'ks-IN', u'name': u'Kashmiri'
},
{
u'code': u'csb',
u'name': u'Kashubian',
u'nplurals': u'3',
u'plural_rule': u'(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'kk', u'name': u'Kazakh', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'kk-KZ', u'name': u'Kazakh'
},
{
u'code': u'km', u'name': u'Khmer', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'km-KH', u'name': u'Khmer'
},
{
u'code': u'rw',
u'name': u'Kinyarwanda',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ky', u'name': u'Kirgyz', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'tlh', u'name': u'Klingon'
},
{
u'code': u'ko', u'name': u'Korean', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'ko-KR', u'name': u'Korean'
},
{
u'code': u'ku',
u'name': u'Kurdish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ku-IQ', u'name': u'Kurdish'
},
{
u'code': u'lo', u'name': u'Lao', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'lo-LA', u'name': u'Lao'
},
{
u'code': u'la', u'name': u'Latin'
},
{
u'code': u'lv',
u'name': u'Latvian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'
},
{
u'code': u'lv-LV', u'name': u'Latvian'
},
{
u'code': u'lij',
u'name': u'Ligurian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'li', u'name': u'Limburgian'
},
{
u'code': u'ln',
u'name': u'Lingala',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'lt',
u'name': u'Lithuanian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 or n%100>=20) ? 1 : 2)'
},
{
u'code': u'lt-LT', u'name': u'Lithuanian'
},
{
u'code': u'nds', u'name': u'Low German'
},
{
u'code': u'lb',
u'name': u'Luxembourgish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'mk',
u'name': u'Macedonian',
u'nplurals': u'2',
u'plural_rule': u'(n==1 || n%10==1 ? 0 : 1)'
},
{
u'code': u'mk-MK', u'name': u'Macedonian'
},
{
u'code': u'mai',
u'name': u'Maithili',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'mg',
u'name': u'Malagasy',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'ms', u'name': u'Malay', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'ml',
u'name': u'Malayalam',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ml-IN', u'name': u'Malayalam'
},
{
u'code': u'ms-MY', u'name': u'Malay'
},
{
u'code': u'mt',
u'name': u'Maltese',
u'nplurals': u'4',
u'plural_rule': u'(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'
},
{
u'code': u'mt-MT', u'name': u'Maltese'
},
{
u'code': u'mi',
u'name': u'Maori',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'arn',
u'name': u'Mapudungun',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'mr',
u'name': u'Marathi',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'mr-IN', u'name': u'Marathi'
},
{
u'code': u'mn',
u'name': u'Mongolian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'mn-MN', u'name': u'Mongolian'
},
{
u'code': u'nah',
u'name': u'Nahuatl',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'nr', u'name': u'Ndebele, South'
},
{
u'code': u'nap',
u'name': u'Neapolitan',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ne',
u'name': u'Nepali',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ne-NP', u'name': u'Nepali'
},
{
u'code': u'se',
u'name': u'Northern Sami',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'nso',
u'name': u'Northern Sotho',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'no',
u'name': u'Norwegian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'nb',
u'name': u'Norwegian Bokm\xe5l',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'nb-NO', u'name': u'Norwegian Bokm\xe5l'
},
{
u'code': u'no-NO', u'name': u'Norwegian'
},
{
u'code': u'nn',
u'name': u'Norwegian Nynorsk',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'nn-NO', u'name': u'Norwegian Nynorsk'
},
{
u'code': u'oc',
u'name': u'Occitan',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'or',
u'name': u'Oriya',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'or-IN', u'name': u'Oriya'
},
{
u'code': u'pa',
u'name': u'Panjabi',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'pa-IN', u'name': u'Panjabi'
},
{
u'code': u'pap',
u'name': u'Papiamento',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'fa',
u'name': u'Persian',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'fa-IR', u'name': u'Persian'
},
{
u'code': u'pms',
u'name': u'Piemontese',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'pl',
u'name': u'Polish',
u'nplurals': u'3',
u'plural_rule': u'(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'pl-PL', u'name': u'Polish'
},
{
u'code': u'pt',
u'name': u'Portuguese',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'pt-BR',
u'name': u'Portuguese',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'pt-PT', u'name': u'Portuguese'
},
{
u'code': u'ps',
u'name': u'Pushto',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ro',
u'name': u'Romanian',
u'nplurals': u'3',
u'plural_rule': u'(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'
},
{
u'code': u'ro-RO', u'name': u'Romanian'
},
{
u'code': u'rm',
u'name': u'Romansh',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ru',
u'name': u'Russian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'ru-RU', u'name': u'Russian'
},
{
u'code': u'sm', u'name': u'Samoan'
},
{
u'code': u'sc', u'name': u'Sardinian'
},
{
u'code': u'sco',
u'name': u'Scots',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'sr',
u'name': u'Serbian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'sr@latin',
u'name': u'Serbian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'sr-RS@latin', u'name': u'Serbian'
},
{
u'code': u'sr-RS', u'name': u'Serbian'
},
{
u'code': u'sn', u'name': u'Shona'
},
{
u'code': u'si',
u'name': u'Sinhala',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'si-LK', u'name': u'Sinhala'
},
{
u'code': u'sk',
u'name': u'Slovak',
u'nplurals': u'3',
u'plural_rule': u'(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2'
},
{
u'code': u'sk-SK', u'name': u'Slovak'
},
{
u'code': u'sl',
u'name': u'Slovenian',
u'nplurals': u'4',
u'plural_rule': u'(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'
},
{
u'code': u'sl-SI',
u'name': u'Slovenian',
u'nplurals': u'4',
u'plural_rule': u'(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'
},
{
u'code': u'so',
u'name': u'Somali',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'son',
u'name': u'Songhay',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'st',
u'name': u'Sotho, Southern',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'st-ZA', u'name': u'Sotho, Southern'
},
{
u'code': u'es-AR',
u'name': u'Spanish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'es-BO', u'name': u'Spanish'
},
{
u'code': u'es',
u'name': u'Spanish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'es-CL',
u'name': u'Spanish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'es-CO', u'name': u'Spanish'
},
{
u'code': u'es-CR', u'name': u'Spanish'
},
{
u'code': u'es-DO', u'name': u'Spanish'
},
{
u'code': u'es-EC', u'name': u'Spanish'
},
{
u'code': u'es-SV', u'name': u'Spanish'
},
{
u'code': u'es-MX',
u'name': u'Spanish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'es-NI', u'name': u'Spanish'
},
{
u'code': u'es-PA', u'name': u'Spanish'
},
{
u'code': u'es-PY', u'name': u'Spanish'
},
{
u'code': u'es-PE', u'name': u'Spanish'
},
{
u'code': u'es-PR', u'name': u'Spanish'
},
{
u'code': u'es-ES', u'name': u'Spanish'
},
{
u'code': u'es-UY', u'name': u'Spanish'
},
{
u'code': u'es-VE', u'name': u'Spanish'
},
{
u'code': u'su',
u'name': u'Sundanese',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'sw',
u'name': u'Swahili',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'sw-KE', u'name': u'Swahili'
},
{
u'code': u'sv',
u'name': u'Swedish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'sv-FI', u'name': u'Swedish'
},
{
u'code': u'sv-SE',
u'name': u'Swedish',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'tl', u'name': u'Tagalog'
},
{
u'code': u'tl-PH', u'name': u'Tagalog'
},
{
u'code': u'tg',
u'name': u'Tajik',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'tg-TJ', u'name': u'Tajik'
},
{
u'code': u'ta',
u'name': u'Tamil',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ta-IN',
u'name': u'Tamil',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ta-LK', u'name': u'Tamil'
},
{
u'code': u'tt', u'name': u'Tatar', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'te',
u'name': u'Telugu',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'te-IN', u'name': u'Telugu'
},
{
u'code': u'th', u'name': u'Thai', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'th-TH', u'name': u'Thai'
},
{
u'code': u'bo',
u'name': u'Tibetan',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'bo-CN', u'name': u'Tibetan'
},
{
u'code': u'ti',
u'name': u'Tigrinya',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'to', u'name': u'Tongan'
},
{
u'code': u'tr',
u'name': u'Turkish',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'tr-TR', u'name': u'Turkish'
},
{
u'code': u'tk',
u'name': u'Turkmen',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ug', u'name': u'Uighur', u'nplurals': u'1', u'plural_rule': u'0'
},
{
u'code': u'uk',
u'name': u'Ukrainian',
u'nplurals': u'3',
u'plural_rule': u'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'
},
{
u'code': u'uk-UA', u'name': u'Ukrainian'
},
{
u'code': u'hsb',
u'name': u'Upper Sorbian',
u'nplurals': u'4',
u'plural_rule': u'(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'
},
{
u'code': u'ur',
u'name': u'Urdu',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'ur-PK', u'name': u'Urdu'
},
{
u'code': u'uz',
u'name': u'Uzbek',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u've', u'name': u'Venda'
},
{
u'code': u'vi',
u'name': u'Vietnamese',
u'nplurals': u'1',
u'plural_rule': u'0'
},
{
u'code': u'vi-VN', u'name': u'Vietnamese'
},
{
u'code': u'vls', u'name': u'Vlaams'
},
{
u'code': u'wa',
u'name': u'Walloon',
u'nplurals': u'2',
u'plural_rule': u'(n > 1)'
},
{
u'code': u'cy',
u'name': u'Welsh',
u'nplurals': u'4',
u'plural_rule': u'(n==1) ? 0 : (n==2) ? 1 : (n != 8 && n != 11) ? 2 : 3'
},
{
u'code': u'cy-GB', u'name': u'Welsh'
},
{
u'code': u'fy',
u'name': u'Western Frisian',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'fy-NL', u'name': u'Western Frisian'
},
{
u'code': u'wo',
u'name': u'Wolof',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'wo-SN', u'name': u'Wolof'
},
{
u'code': u'xh',
u'name': u'Xhosa',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'yi', u'name': u'Yiddish'
},
{
u'code': u'yo',
u'name': u'Yoruba',
u'nplurals': u'2',
u'plural_rule': u'(n != 1)'
},
{
u'code': u'zu', u'name': u'Zulu'
},
{
u'code': u'zu-ZA', u'name': u'Zulu'
},
{
u'code': u'dsb',
u'name': u'Lower Sorbian',
u'nplurals': u'4',
u'plural_rule': u'(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'
}]
|
[
"django.db.migrations.RunPython"
] |
[((1592, 1631), 'django.db.migrations.RunPython', 'migrations.RunPython', (['load_initial_data'], {}), '(load_initial_data)\n', (1612, 1631), False, 'from django.db import migrations\n')]
|
"""
@Project : DuReader
@Module : punctuation_sub.py
@Author : Deco [<EMAIL>]
@Created : 5/16/18 1:36 PM
@Desc :
"""
import string
import re
def clean_sentence(st):
intab = string.punctuation + '。,“”‘’():;?·—《》、'
outtab = ' '
table = str.maketrans(dict.fromkeys(intab, outtab))
st1 = st.translate(table)
return st1
def clean_sentence2(st):
"""
数据预处理
:param st: string
:return: string
"""
in_tab = r'''[{}]'''
out_tab = ' '
# out_tab = 'p'
clean = re.sub(in_tab, out_tab, st)
return clean
def clean_sentence3(st):
"""
数据预处理
:param st: string
:return: string
"""
in_tab = '[' + string.punctuation + '。,“”‘’():;?·—《》、' + ']'
out_tab = ''
clean = re.sub(in_tab, out_tab, st)
return clean
def clean_sentence4(st):
"""
数据预处理
:param st: string
:return: string
"""
in_tab = string.punctuation + '。,“”‘’():;?·—《》、'
clean = ''.join([c for c in st if c not in in_tab])
# string search, time complexity m*O(n)
return clean
def clean_sentence5(st):
"""
数据预处理
:param st: string
:return: string
"""
in_tab = string.punctuation + '。,“”‘’():;?·—《》、'
pt = set(p for p in in_tab)
clean = ''.join([c for c in st if c not in pt])
# hash search, time complexity m*O(1)
return clean
if __name__ == "__main__":
print(string.punctuation)
print(clean_sentence4('The period will be removed.'))
print(clean_sentence5('The period will be removed.'))
|
[
"re.sub"
] |
[((523, 550), 're.sub', 're.sub', (['in_tab', 'out_tab', 'st'], {}), '(in_tab, out_tab, st)\n', (529, 550), False, 'import re\n'), ((757, 784), 're.sub', 're.sub', (['in_tab', 'out_tab', 'st'], {}), '(in_tab, out_tab, st)\n', (763, 784), False, 'import re\n')]
|
# Author: Hologram <<EMAIL>>
#
# Copyright 2016 - Hologram (Konekt, Inc.)
#
# LICENSE: Distributed under the terms of the MIT License
#
# test_Modem.py - This file implements unit tests for the Modem class.
import pytest
import sys
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from Exceptions.HologramError import SerialError
from Hologram.Network.Modem import Modem
from UtilClasses import ModemResult
def mock_write(modem, message):
return True
def mock_read(modem):
return True
def mock_readline(modem, timeout=None, hide=False):
return ''
def mock_open_serial_port(modem, device_name=None):
return True
def mock_close_serial_port(modem):
return True
def mock_detect_usable_serial_port(modem, stop_on_first=True):
return '/dev/ttyUSB0'
@pytest.fixture
def no_serial_port(monkeypatch):
monkeypatch.setattr(Modem, '_read_from_serial_port', mock_read)
monkeypatch.setattr(Modem, '_readline_from_serial_port', mock_readline)
monkeypatch.setattr(Modem, '_write_to_serial_port_and_flush', mock_write)
monkeypatch.setattr(Modem, 'openSerialPort', mock_open_serial_port)
monkeypatch.setattr(Modem, 'closeSerialPort', mock_close_serial_port)
monkeypatch.setattr(Modem, 'detect_usable_serial_port', mock_detect_usable_serial_port)
# CONSTRUCTOR
def test_init_modem_no_args(no_serial_port):
modem = Modem()
assert(modem.timeout == 1)
assert(modem.socket_identifier == 0)
assert(modem.chatscript_file.endswith('/chatscripts/default-script'))
assert(modem._at_sockets_available == False)
assert(modem.description == 'Modem')
def test_init_modem_chatscriptfileoverride(no_serial_port):
modem = Modem(chatscript_file='test-chatscript')
assert(modem.timeout == 1)
assert(modem.socket_identifier == 0)
assert(modem.chatscript_file == 'test-chatscript')
def test_get_result_string(no_serial_port):
modem = Modem()
assert(modem.getResultString(0) == 'Modem returned OK')
assert(modem.getResultString(-1) == 'Modem timeout')
assert(modem.getResultString(-2) == 'Modem error')
assert(modem.getResultString(-3) == 'Modem response doesn\'t match expected return value')
assert(modem.getResultString(-99) == 'Unknown response code')
# PROPERTIES
def test_get_location(no_serial_port):
modem = Modem()
with pytest.raises(NotImplementedError) as e:
assert(modem.location == 'test location')
assert('This modem does not support this property' in str(e))
# DEBUGWRITE
def test_debugwrite(no_serial_port):
modem = Modem()
assert(modem.debug_out == '')
modem.debugwrite('test')
assert(modem.debug_out == 'test')
modem.debugwrite('test222', hide=True)
assert(modem.debug_out == 'test') # debug_out shouldn't change since hide is enabled.
# MODEMWRITE
def test_modemwrite(no_serial_port):
modem = Modem()
assert(modem.debug_out == '')
# use all method arg default values.
modem.modemwrite('test-cmd')
assert(modem.debug_out == 'test-cmd')
modem.modemwrite('test2', start=True)
assert(modem.debug_out == '[test2')
modem.modemwrite('test3', start=True, hide=True)
# This should be the same as the previous debug_out because hide is enabled.
assert(modem.debug_out == '[test2')
modem.modemwrite('test4', start=True, end=True)
assert(modem.debug_out == '[test4]')
modem.modemwrite('test5', start=True, at=True, seteq=True, read=True, end=True)
assert(modem.debug_out == '[ATtest5=?]')
# COMMAND_RESULT
def test_command_result(no_serial_port):
modem = Modem()
# OK with an empty response list.
assert(modem.result == ModemResult.OK)
result, resp = modem._command_result()
assert(result == ModemResult.OK)
assert(resp == [])
# OK with a response list of one element.
modem.result = ModemResult.OK
modem.response = ['test1']
result, resp = modem._command_result()
assert(result == ModemResult.OK)
assert(resp == 'test1') # should return just a string
# INVALID
modem.result = ModemResult.Invalid
modem.response = ['test1', 'test2', 'test3']
result, resp = modem._command_result()
assert(result == ModemResult.Invalid)
assert(resp == ['test1', 'test2', 'test3'])
# NOMATCH
modem.result = ModemResult.NoMatch
# This should still be a list since it's not ModemResult.OK.
modem.response = ['test1']
result, resp = modem._command_result()
assert(result == ModemResult.NoMatch)
assert(resp == ['test1'])
# ERROR
modem.result = ModemResult.Error
modem.response = []
result, resp = modem._command_result()
assert(result == ModemResult.Error)
assert(resp == [])
# TIMEOUT
modem.result = ModemResult.Timeout
result, resp = modem._command_result()
assert(result == ModemResult.Timeout)
assert(resp == [])
# HANDLEURC
# These are static methods that can be tested independently.
# We decided to wrap it all here under this test object
class TestModemProtectedStaticMethods():
def test_check_registered_string(self):
result = '+CREG: 2,5,"5585","404C790",6'
registered = Modem._check_registered_helper('+CREG', result)
assert(registered)
def test_registered_basic_unregistered_string(self):
# This should force strips left and right, but the return value will
# still be false since 3 is elem 1 in [2, 3, 2]
result = '2, 3, 2'
registered = Modem._check_registered_helper('+CREG', result)
assert(registered == False)
def test_registered_empty_string(self):
result = ''
with pytest.raises(SerialError) as e:
registered = Modem._check_registered_helper('+CREG', result)
def test_check_registered_short_list(self):
result = ['+CREG: 5,"5585","404C78A",6',
'+CREG: 5,"5585","404C790",6',
'+CREG: 2,5,"5585","404C790",6']
registered = Modem._check_registered_helper('+CREG', result)
assert(registered)
def test_registered_empty_list(self):
result = []
with pytest.raises(SerialError) as e:
registered = Modem._check_registered_helper('+CREG', result)
def test_check_registered_long_list(self):
result = ['+CREG: 5,"5585","404EF4D",6',
'+CREG: 5,"5585","404C816",6',
'+CREG: 5,"5585","404C790",6',
'+CREG: 5,"5585","404C816",6',
'+CREG: 5,"5585","404EF4D",6',
'+CREG: 5,"5585","404C78A",6',
'+CREG: 5,"5585","404C790",6',
'+CREG: 5,"5585","404C816",6',
'+CREG: 2',
'+CREG: 5,"5585","404C790",6',
'+CREG: 2,5,"5585","404C790",6']
registered = Modem._check_registered_helper('+CREG', result)
assert(registered)
|
[
"sys.path.append",
"pytest.raises",
"Hologram.Network.Modem.Modem._check_registered_helper",
"Hologram.Network.Modem.Modem"
] |
[((234, 254), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (249, 254), False, 'import sys\n'), ((255, 276), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (270, 276), False, 'import sys\n'), ((277, 301), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (292, 301), False, 'import sys\n'), ((1379, 1386), 'Hologram.Network.Modem.Modem', 'Modem', ([], {}), '()\n', (1384, 1386), False, 'from Hologram.Network.Modem import Modem\n'), ((1696, 1736), 'Hologram.Network.Modem.Modem', 'Modem', ([], {'chatscript_file': '"""test-chatscript"""'}), "(chatscript_file='test-chatscript')\n", (1701, 1736), False, 'from Hologram.Network.Modem import Modem\n'), ((1921, 1928), 'Hologram.Network.Modem.Modem', 'Modem', ([], {}), '()\n', (1926, 1928), False, 'from Hologram.Network.Modem import Modem\n'), ((2330, 2337), 'Hologram.Network.Modem.Modem', 'Modem', ([], {}), '()\n', (2335, 2337), False, 'from Hologram.Network.Modem import Modem\n'), ((2574, 2581), 'Hologram.Network.Modem.Modem', 'Modem', ([], {}), '()\n', (2579, 2581), False, 'from Hologram.Network.Modem import Modem\n'), ((2883, 2890), 'Hologram.Network.Modem.Modem', 'Modem', ([], {}), '()\n', (2888, 2890), False, 'from Hologram.Network.Modem import Modem\n'), ((3598, 3605), 'Hologram.Network.Modem.Modem', 'Modem', ([], {}), '()\n', (3603, 3605), False, 'from Hologram.Network.Modem import Modem\n'), ((2347, 2381), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2360, 2381), False, 'import pytest\n'), ((5179, 5226), 'Hologram.Network.Modem.Modem._check_registered_helper', 'Modem._check_registered_helper', (['"""+CREG"""', 'result'], {}), "('+CREG', result)\n", (5209, 5226), False, 'from Hologram.Network.Modem import Modem\n'), ((5493, 5540), 'Hologram.Network.Modem.Modem._check_registered_helper', 'Modem._check_registered_helper', (['"""+CREG"""', 'result'], {}), "('+CREG', result)\n", (5523, 5540), False, 'from Hologram.Network.Modem import Modem\n'), ((5980, 6027), 'Hologram.Network.Modem.Modem._check_registered_helper', 'Modem._check_registered_helper', (['"""+CREG"""', 'result'], {}), "('+CREG', result)\n", (6010, 6027), False, 'from Hologram.Network.Modem import Modem\n'), ((6828, 6875), 'Hologram.Network.Modem.Modem._check_registered_helper', 'Modem._check_registered_helper', (['"""+CREG"""', 'result'], {}), "('+CREG', result)\n", (6858, 6875), False, 'from Hologram.Network.Modem import Modem\n'), ((5655, 5681), 'pytest.raises', 'pytest.raises', (['SerialError'], {}), '(SerialError)\n', (5668, 5681), False, 'import pytest\n'), ((5713, 5760), 'Hologram.Network.Modem.Modem._check_registered_helper', 'Modem._check_registered_helper', (['"""+CREG"""', 'result'], {}), "('+CREG', result)\n", (5743, 5760), False, 'from Hologram.Network.Modem import Modem\n'), ((6131, 6157), 'pytest.raises', 'pytest.raises', (['SerialError'], {}), '(SerialError)\n', (6144, 6157), False, 'import pytest\n'), ((6189, 6236), 'Hologram.Network.Modem.Modem._check_registered_helper', 'Modem._check_registered_helper', (['"""+CREG"""', 'result'], {}), "('+CREG', result)\n", (6219, 6236), False, 'from Hologram.Network.Modem import Modem\n')]
|
from app.Empleados import Empleados #en from ya declaro el modulo
from app.Ejecutivo import Ejecutivo
import time
if __name__== "__main__":
# este es el main
print(__name__)
# Paso los parametros... 1 Modulo, 2 Clase, 3 Parametro
e1 = Empleados("Juan", 2500)
# Creo el segundo objeto
e2 = Empleados("Maria", 4250)
#print("Empleados Totales: " + str(Empleados.__conteo))
e1.mostrar_conteo()
print("Informacion de Empleados")
e1.mostrar_empleado()
e2.mostrar_empleado()
# creo OBJETO de clase EJECUTIVO
ex1 = Ejecutivo("Petra", 15000)
# creo OBJETO de clase EJECUTIVO, Y OVERLOAD le paso el parametro SALUDO para visualizar el atributo xq es privado
ex1.mostrar_empleado("Sr(a)")
ex1.mostrar_conteo()
ex1.mandar()
ex1_salario_neto = ex1.calcular_salario_neto()
#modo mejor: {}metodo nuevo ,{:.3f}metodo viejo, aqui le doy 3 dec por ejemplo
#print("El Sr(a). {} tiene salario neto de ${:.2f}".format(ex1.__nombre, ex1_salario_neto))
e2_salario_neto = e2.calcular_salario_neto()
# modo mejor: {}metodo nuevo ,{:.3f}metodo viejo, aqui le doy 3 dec por ejemplo
#print("El Sr(a). {} tiene salario neto de ${:.2f}".format(e2.nombre,e2_salario_neto))
# print("El Sr(a)." + ex1.nombre +" tiene salario neto" + str(ex1_salario_neto)) #modo 1
# print("El Sr(a). %s tiene salario neto de $%2f." % (ex1.nombre, ex1_salario_neto)) #modo 2
#time.sleep(10) #este es una mala practica porque mato el objeto
#del e2
#e2.mostrar_empleado()
|
[
"app.Ejecutivo.Ejecutivo",
"app.Empleados.Empleados"
] |
[((261, 284), 'app.Empleados.Empleados', 'Empleados', (['"""Juan"""', '(2500)'], {}), "('Juan', 2500)\n", (270, 284), False, 'from app.Empleados import Empleados\n'), ((325, 349), 'app.Empleados.Empleados', 'Empleados', (['"""Maria"""', '(4250)'], {}), "('Maria', 4250)\n", (334, 349), False, 'from app.Empleados import Empleados\n'), ((582, 607), 'app.Ejecutivo.Ejecutivo', 'Ejecutivo', (['"""Petra"""', '(15000)'], {}), "('Petra', 15000)\n", (591, 607), False, 'from app.Ejecutivo import Ejecutivo\n')]
|
import os.path as osp
from unittest import TestCase
import matplotlib.pyplot as plt
from pylinac import CatPhan503, CatPhan504, CatPhan600
from pylinac.core.geometry import Point
from tests.utils import save_file, LoadingTestBase, LocationMixin
TEST_DIR = osp.join(osp.dirname(__file__), 'test_files', 'CBCT')
plt.close('all')
class CBCTLoading(LoadingTestBase, TestCase):
klass = CatPhan504
constructor_input = osp.join(TEST_DIR, 'Pelvis')
demo_load_method = 'from_demo_images'
url = 'CatPhan504.zip'
zip = osp.join(TEST_DIR, 'CBCT_4.zip')
class GeneralTests(TestCase):
"""Test general things when using cbct module."""
def setUp(self):
self.cbct = CatPhan504.from_demo_images()
def test_demo(self):
"""Run the demo to make sure it works."""
self.cbct.run_demo()
def test_helpers(self):
"""Test the various helper methods."""
self.cbct.analyze()
self.cbct._return_results()
def test_phan_center(self):
"""Test locations of the phantom center."""
known_phan_center = Point(257, 255)
self.cbct.analyze()
self.assertAlmostEqual(self.cbct.ctp404.phan_center.x, known_phan_center.x, delta=0.7)
self.assertAlmostEqual(self.cbct.ctp404.phan_center.y, known_phan_center.y, delta=0.7)
class PlottingSaving(TestCase):
@classmethod
def setUpClass(cls):
cls.cbct = CatPhan504.from_demo_images()
cls.cbct.analyze()
@classmethod
def tearDownClass(cls):
plt.close('all')
def test_save_image(self):
"""Test that saving an image does something."""
for method in ['save_analyzed_image', 'save_analyzed_subimage']:
methodcall = getattr(self.cbct, method)
save_file(methodcall)
def test_plot_images(self):
"""Test the various plotting functions."""
self.cbct.plot_analyzed_image()
for item in ['hu', 'un', 'mtf', 'sp', 'prof', 'lin', 'lc']:
self.cbct.plot_analyzed_subimage(item)
self.cbct.plot_analyzed_subimage('lin', delta=False)
with self.assertRaises(ValueError):
self.cbct.plot_analyzed_subimage('sr')
class CBCTMixin(LocationMixin):
"""A mixin to use for testing Varian CBCT scans; does not inherit from TestCase as it would be run
otherwise."""
catphan = CatPhan504
check_uid = True
origin_slice = 0
file_path = []
dir_location = TEST_DIR
hu_tolerance = 40
scaling_tolerance = 1
zip = True
expected_roll = 0
hu_values = {}
unif_values = {}
mtf_values = {}
avg_line_length = 50
slice_thickness = 2
lowcon_visible = 0
@classmethod
def setUpClass(cls):
filename = cls.get_filename()
if cls.zip:
cls.cbct = cls.catphan.from_zip(filename)
else:
cls.cbct = cls.catphan(filename)
cls.cbct.analyze(cls.hu_tolerance, cls.scaling_tolerance)
print("Num of CBCT images: {}".format(len(cls.cbct.dicom_stack)))
@classmethod
def tearDownClass(cls):
# somewhere there is a memory leak if ``cbct`` isn't deleted.
delattr(cls, 'cbct')
def test_slice_thickness(self):
"""Test the slice thickness."""
self.assertAlmostEqual(self.cbct.ctp404.meas_slice_thickness, float(self.cbct.dicom_stack.metadata.SliceThickness), delta=0.3)
def test_lowcontrast_bubbles(self):
"""Test the number of low contrast bubbles visible."""
if not isinstance(self.cbct, CatPhan503):
self.assertAlmostEqual(self.cbct.ctp515.rois_visible, self.lowcon_visible, delta=1)
def test_slice_locations(self):
"""Test the locations of the slices of interest."""
self.assertAlmostEqual(self.cbct.origin_slice, self.origin_slice, delta=1)
def test_phantom_roll(self):
"""Test the roll of the phantom."""
self.assertAlmostEqual(self.cbct.catphan_roll, self.expected_roll, delta=0.3)
def test_HU_values(self):
"""Test HU values."""
for key, roi in self.cbct.ctp404.hu_rois.items():
exp_val = self.hu_values[key]
meas_val = roi.pixel_value
self.assertAlmostEqual(exp_val, meas_val, delta=5)
def test_uniformity_values(self):
"""Test Uniformity HU values."""
for key, exp_val in self.unif_values.items():
meas_val = self.cbct.ctp486.rois[key].pixel_value
self.assertAlmostEqual(exp_val, meas_val, delta=5)
def test_geometry_line_length(self):
"""Test the geometry distances."""
self.assertAlmostEqual(self.avg_line_length, self.cbct.ctp404.avg_line_length, delta=0.1)
def test_MTF_values(self):
"""Test MTF values."""
for key, exp_mtf in self.mtf_values.items():
meas_mtf = self.cbct.ctp528.mtf(key)
self.assertAlmostEqual(exp_mtf, meas_mtf, delta=0.1)
def test_pdf(self):
save_file(self.cbct.publish_pdf, 'temp')
class CBCTDemo(CBCTMixin, TestCase):
"""Test the CBCT demo (Varian high quality head protocol)."""
expected_roll = -0.3
origin_slice = 32
hu_values = {'Poly': -45, 'Acrylic': 117, 'Delrin': 341, 'Air': -998, 'Teflon': 997, 'PMP': -200, 'LDPE': -103}
unif_values = {'Center': 17, 'Left': 10, 'Right': 0, 'Top': 6, 'Bottom': 6}
mtf_values = {80: 0.64, 90: 0.61, 60: 0.85, 70: 0.74, 95: 0.45}
avg_line_length = 49.92
lowcon_visible = 3
@classmethod
def setUpClass(cls):
cls.cbct = CatPhan504.from_demo_images()
cls.cbct.analyze()
class CBCT4(CBCTMixin, TestCase):
"""A Varian CBCT dataset"""
file_path = ['CBCT_4.zip']
expected_roll = -2.57
origin_slice = 31
hu_values = {'Poly': -33, 'Acrylic': 119, 'Delrin': 335, 'Air': -979, 'Teflon': 970, 'PMP': -185, 'LDPE': -94}
unif_values = {'Center': 17, 'Left': 10, 'Right': 22, 'Top': 18, 'Bottom': 13}
mtf_values = {80: 0.47, 90: 0.39, 60: 0.63, 70: 0.55, 95: 0.3}
lowcon_visible = 3
class Elekta2(CBCTMixin, TestCase):
"""An Elekta CBCT dataset"""
catphan = CatPhan503
file_path = ['Elekta_2.zip']
origin_slice = 162
hu_values = {'Poly': -319, 'Acrylic': -224, 'Delrin': -91, 'Air': -863, 'Teflon': 253, 'PMP': -399, 'LDPE': -350}
unif_values = {'Center': -285, 'Left': -279, 'Right': -278, 'Top': -279, 'Bottom': -279}
mtf_values = {80: 0.53, 90: 0.44, 60: 0.74, 70: 0.63, 95: 0.36}
class CatPhan600_2(CBCTMixin, TestCase):
"""An Elekta CBCT dataset"""
catphan = CatPhan600
file_path = ['zzCAT201602.zip']
expected_roll = -0.64
origin_slice = 34
hu_values = {'Poly': -29, 'Acrylic': 123, 'Delrin': 336, 'Air': -932, 'Teflon': 897, 'PMP': -164, 'LDPE': -80}
hu_passed = False
unif_values = {'Center': 14, 'Left': 15, 'Right': 15, 'Top': 16, 'Bottom': 13}
mtf_values = {80: 0.55, 90: 0.45, 60: 0.7, 70: 0.63, 95: 0.46}
avg_line_length = 50.02
|
[
"tests.utils.save_file",
"os.path.join",
"matplotlib.pyplot.close",
"os.path.dirname",
"pylinac.CatPhan504.from_demo_images",
"pylinac.core.geometry.Point"
] |
[((313, 329), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (322, 329), True, 'import matplotlib.pyplot as plt\n'), ((268, 289), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (279, 289), True, 'import os.path as osp\n'), ((425, 453), 'os.path.join', 'osp.join', (['TEST_DIR', '"""Pelvis"""'], {}), "(TEST_DIR, 'Pelvis')\n", (433, 453), True, 'import os.path as osp\n'), ((533, 565), 'os.path.join', 'osp.join', (['TEST_DIR', '"""CBCT_4.zip"""'], {}), "(TEST_DIR, 'CBCT_4.zip')\n", (541, 565), True, 'import os.path as osp\n'), ((694, 723), 'pylinac.CatPhan504.from_demo_images', 'CatPhan504.from_demo_images', ([], {}), '()\n', (721, 723), False, 'from pylinac import CatPhan503, CatPhan504, CatPhan600\n'), ((1082, 1097), 'pylinac.core.geometry.Point', 'Point', (['(257)', '(255)'], {}), '(257, 255)\n', (1087, 1097), False, 'from pylinac.core.geometry import Point\n'), ((1412, 1441), 'pylinac.CatPhan504.from_demo_images', 'CatPhan504.from_demo_images', ([], {}), '()\n', (1439, 1441), False, 'from pylinac import CatPhan503, CatPhan504, CatPhan600\n'), ((1523, 1539), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1532, 1539), True, 'import matplotlib.pyplot as plt\n'), ((4951, 4991), 'tests.utils.save_file', 'save_file', (['self.cbct.publish_pdf', '"""temp"""'], {}), "(self.cbct.publish_pdf, 'temp')\n", (4960, 4991), False, 'from tests.utils import save_file, LoadingTestBase, LocationMixin\n'), ((5521, 5550), 'pylinac.CatPhan504.from_demo_images', 'CatPhan504.from_demo_images', ([], {}), '()\n', (5548, 5550), False, 'from pylinac import CatPhan503, CatPhan504, CatPhan600\n'), ((1765, 1786), 'tests.utils.save_file', 'save_file', (['methodcall'], {}), '(methodcall)\n', (1774, 1786), False, 'from tests.utils import save_file, LoadingTestBase, LocationMixin\n')]
|
# -*- coding: utf-8 -*-
from collections import defaultdict
class SystemMetricsGrabber:
def __init__(self):
self.epoch = -1
self.metrics = {}
def update(self, **kwargs):
self.metrics[self.epoch].update(**kwargs)
def update_epoch(self):
self.epoch += 1
self.metrics[self.epoch] = MetricsMeter()
def state_dict(self):
metrics_state_dict = {}
for e in range(self.epoch + 1):
metrics_state_dict[e] = self.metrics[e].state_dict()
return {
'epoch': self.epoch,
'metrics': metrics_state_dict,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
for e, metrics_state_dict in state_dict['metrics'].items():
self.metrics[e] = MetricsMeter()
self.metrics[e].load_state_dict(metrics_state_dict)
class MetricsGrabber:
def __init__(self):
self.epoch = -1
self.is_train = True
self.train_metrics = {}
self.valid_metrics = {}
def get_best_epoch(self, key, mode):
best_metric_value, best_epoch = None, None
for e in range(self.epoch + 1):
metric_value = self.valid_metrics[e].avg[key]
if e == 0:
best_metric_value, best_epoch = metric_value, e
elif mode == 'min':
if metric_value < best_metric_value:
best_metric_value, best_epoch = metric_value, e
elif mode == 'max':
if metric_value > best_metric_value:
best_metric_value, best_epoch = metric_value, e
else:
raise ValueError('Incorrect mode')
return {'key': key, 'mode': mode, 'epoch': best_epoch, 'metric_value': best_metric_value}
def get_last_epoch(self, key):
return {'key': key, 'epoch': self.epoch, 'metric_value': self.valid_metrics[self.epoch].avg[key]}
def update(self, **kwargs):
if self.is_train:
self.train_metrics[self.epoch].update(**kwargs)
else:
self.valid_metrics[self.epoch].update(**kwargs)
def update_epoch(self):
self.epoch += 1
self.train_metrics[self.epoch] = MetricsMeter()
self.valid_metrics[self.epoch] = MetricsMeter()
def state_dict(self):
train_metrics_state_dict, valid_metrics_state_dict = {}, {}
for e in range(self.epoch + 1):
train_metrics_state_dict[e] = self.train_metrics[e].state_dict()
valid_metrics_state_dict[e] = self.valid_metrics[e].state_dict()
return {
'epoch': self.epoch,
'is_train': self.is_train,
'train_metrics': train_metrics_state_dict,
'valid_metrics': valid_metrics_state_dict,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
self.is_train = state_dict['is_train']
for e, metrics_state_dict in state_dict['train_metrics'].items():
self.train_metrics[e] = MetricsMeter()
self.train_metrics[e].load_state_dict(metrics_state_dict)
for e, metrics_state_dict in state_dict['valid_metrics'].items():
self.valid_metrics[e] = MetricsMeter()
self.valid_metrics[e].load_state_dict(metrics_state_dict)
class MetricsMeter:
def __init__(self):
self.avg = defaultdict(float)
self.sum = defaultdict(float)
self.history = defaultdict(list)
def update(self, **kwargs):
for key, value in kwargs.items():
self.history[key].append(value)
self.sum[key] += value
self.avg[key] = self.sum[key] / len(self.history[key])
def state_dict(self):
history_state_dict = {}
avg_state_dict = {}
sum_state_dict = {}
for key, value in self.history.items():
history_state_dict[key] = value
for key, value in self.avg.items():
avg_state_dict[key] = value
for key, value in self.sum.items():
sum_state_dict[key] = value
return {
'history': history_state_dict,
'avg': avg_state_dict,
'sum': sum_state_dict,
}
def load_state_dict(self, state_dict):
for key, value in state_dict['history'].items():
self.history[key] = value
for key, value in state_dict['avg'].items():
self.avg[key] = value
for key, value in state_dict['sum'].items():
self.sum[key] = value
|
[
"collections.defaultdict"
] |
[((3383, 3401), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (3394, 3401), False, 'from collections import defaultdict\n'), ((3421, 3439), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (3432, 3439), False, 'from collections import defaultdict\n'), ((3463, 3480), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3474, 3480), False, 'from collections import defaultdict\n')]
|
import pytest
import brightwind as bw
DATA = bw.load_csv(bw.demo_datasets.demo_data)
DATA = bw.apply_cleaning(DATA, bw.demo_datasets.demo_cleaning_file)
WSPD_COLS = ['Spd80mN', 'Spd80mS', 'Spd60mN', 'Spd60mS', 'Spd40mN', 'Spd40mS']
WDIR_COLS = ['Dir78mS', 'Dir58mS', 'Dir38mS']
def test_average():
# Specify columns in data which contain the anemometer measurements from which to calculate shear
anemometers = DATA[['Spd80mN', 'Spd60mN', 'Spd40mN']]
# Specify the heights of these anemometers
heights = [80, 60, 40]
# Test initialisation
shear_avg_power_law = bw.Shear.Average(anemometers, heights)
shear_avg_log_law = bw.Shear.Average(anemometers, heights, calc_method='log_law')
# Test attributes
assert round(shear_avg_power_law.alpha, 4) == 0.1434
assert round(shear_avg_log_law.roughness, 4) == 0.0549
# Test apply
shear_avg_power_law.apply(DATA['Spd80mN'], 40, 60)
shear_avg_log_law.apply(DATA['Spd80mN'], 40, 60)
assert True
# Test specific values
wspds = [7.74, 8.2, 8.57]
heights = [60, 80, 100]
specific_test = bw.Shear.Average(wspds, heights)
assert round(specific_test.alpha, 9) == 0.199474297
wspds = [8, 8.365116]
heights = [80, 100]
specific_test = bw.Shear.Average(wspds, heights)
assert round(specific_test.alpha, 1) == 0.2
specific_test_log = bw.Shear.Average(wspds, heights, calc_method='log_law')
assert round(specific_test_log.roughness, 9) == 0.602156994
def test_by_sector():
# Specify columns in data which contain the anemometer measurements from which to calculate shear
anemometers = DATA[['Spd80mN', 'Spd60mN', 'Spd40mN']]
# Specify the heights of these anemometers
heights = [80, 60, 40]
# Specify directions
directions = DATA['Dir78mS']
# custom bins
custom_bins = [0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360]
# Test initialisation
shear_by_sector_power_law = bw.Shear.BySector(anemometers, heights, directions)
shear_by_sector_log_law = bw.Shear.BySector(anemometers, heights, directions, calc_method='log_law')
shear_by_sector_custom_bins = bw.Shear.BySector(anemometers, heights, directions,
direction_bin_array=custom_bins)
# test attributes
shear_by_sector_power_law.plot
assert round(shear_by_sector_power_law.alpha.mean(), 4) == 0.1235
shear_by_sector_custom_bins.plot
assert round(shear_by_sector_custom_bins.alpha.mean(), 4) == 0.1265
# Test apply
shear_by_sector_power_law.apply(DATA['Spd80mN'], directions, 40, 60)
shear_by_sector_log_law.apply(DATA['Spd80mN'], directions, 40, 60)
shear_by_sector_custom_bins.apply(DATA['Spd80mN'], directions, 40, 60)
assert True
def test_time_of_day():
# Specify columns in data which contain the anemometer measurements from which to calculate shear
anemometers = DATA[['Spd80mN', 'Spd60mN', 'Spd40mN']]
# Specify the heights of these anemometers
heights = [80, 60, 40]
# Test initialisation
shear_by_tod_power_law = bw.Shear.TimeOfDay(anemometers, heights)
shear_by_tod_power_law = bw.Shear.TimeOfDay(anemometers, heights, by_month=False)
shear_by_tod_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law')
shear_by_tod_log_law = bw.Shear.TimeOfDay(anemometers, heights, by_month=False, calc_method='log_law')
# Test attributes
assert round(shear_by_tod_power_law.alpha.mean()[0], 4) == 0.1473
shear_by_tod_log_law.roughness
# Test apply
shear_by_tod_power_law.apply(DATA['Spd80mN'], 40, 60)
shear_by_tod_log_law.apply(DATA['Spd80mN'], 40, 60)
assert True
def test_time_series():
# Specify columns in data which contain the anemometer measurements from which to calculate shear
anemometers = DATA[['Spd80mN', 'Spd60mN', 'Spd40mN']]
# Specify the heights of these anemometers
heights = [80, 60, 40]
anemometers = anemometers[:100]
# Test initialisation
shear_by_ts_power_law = bw.Shear.TimeSeries(anemometers, heights)
shear_by_ts_power_law = bw.Shear.TimeSeries(anemometers, heights, maximise_data=True)
shear_by_ts_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law')
shear_by_ts_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law',
maximise_data=True)
# Test attributes
assert round(shear_by_ts_power_law.alpha.mean(), 4) == 0.1786
assert shear_by_ts_log_law.roughness.mean() == 4.306534305567819e+68
# Test apply
shear_by_ts_power_law.apply(DATA['Spd80mN'], 40, 60)
shear_by_ts_log_law.apply(DATA['Spd80mN'], 40, 60)
assert True
def test_scale():
# Specify columns in data which contain the anemometer measurements from which to calculate shear
bw.Shear.scale(DATA['Spd40mN'], 40, 60, alpha=.2)
bw.Shear.scale(DATA['Spd40mN'], 40, 60, calc_method='log_law', roughness=.03)
assert True
|
[
"brightwind.Shear.BySector",
"brightwind.apply_cleaning",
"brightwind.Shear.TimeSeries",
"brightwind.Shear.scale",
"brightwind.Shear.Average",
"brightwind.load_csv",
"brightwind.Shear.TimeOfDay"
] |
[((46, 85), 'brightwind.load_csv', 'bw.load_csv', (['bw.demo_datasets.demo_data'], {}), '(bw.demo_datasets.demo_data)\n', (57, 85), True, 'import brightwind as bw\n'), ((93, 153), 'brightwind.apply_cleaning', 'bw.apply_cleaning', (['DATA', 'bw.demo_datasets.demo_cleaning_file'], {}), '(DATA, bw.demo_datasets.demo_cleaning_file)\n', (110, 153), True, 'import brightwind as bw\n'), ((588, 626), 'brightwind.Shear.Average', 'bw.Shear.Average', (['anemometers', 'heights'], {}), '(anemometers, heights)\n', (604, 626), True, 'import brightwind as bw\n'), ((651, 712), 'brightwind.Shear.Average', 'bw.Shear.Average', (['anemometers', 'heights'], {'calc_method': '"""log_law"""'}), "(anemometers, heights, calc_method='log_law')\n", (667, 712), True, 'import brightwind as bw\n'), ((1100, 1132), 'brightwind.Shear.Average', 'bw.Shear.Average', (['wspds', 'heights'], {}), '(wspds, heights)\n', (1116, 1132), True, 'import brightwind as bw\n'), ((1260, 1292), 'brightwind.Shear.Average', 'bw.Shear.Average', (['wspds', 'heights'], {}), '(wspds, heights)\n', (1276, 1292), True, 'import brightwind as bw\n'), ((1365, 1420), 'brightwind.Shear.Average', 'bw.Shear.Average', (['wspds', 'heights'], {'calc_method': '"""log_law"""'}), "(wspds, heights, calc_method='log_law')\n", (1381, 1420), True, 'import brightwind as bw\n'), ((1957, 2008), 'brightwind.Shear.BySector', 'bw.Shear.BySector', (['anemometers', 'heights', 'directions'], {}), '(anemometers, heights, directions)\n', (1974, 2008), True, 'import brightwind as bw\n'), ((2039, 2113), 'brightwind.Shear.BySector', 'bw.Shear.BySector', (['anemometers', 'heights', 'directions'], {'calc_method': '"""log_law"""'}), "(anemometers, heights, directions, calc_method='log_law')\n", (2056, 2113), True, 'import brightwind as bw\n'), ((2148, 2237), 'brightwind.Shear.BySector', 'bw.Shear.BySector', (['anemometers', 'heights', 'directions'], {'direction_bin_array': 'custom_bins'}), '(anemometers, heights, directions, direction_bin_array=\n custom_bins)\n', (2165, 2237), True, 'import brightwind as bw\n'), ((3091, 3131), 'brightwind.Shear.TimeOfDay', 'bw.Shear.TimeOfDay', (['anemometers', 'heights'], {}), '(anemometers, heights)\n', (3109, 3131), True, 'import brightwind as bw\n'), ((3161, 3217), 'brightwind.Shear.TimeOfDay', 'bw.Shear.TimeOfDay', (['anemometers', 'heights'], {'by_month': '(False)'}), '(anemometers, heights, by_month=False)\n', (3179, 3217), True, 'import brightwind as bw\n'), ((3245, 3308), 'brightwind.Shear.TimeOfDay', 'bw.Shear.TimeOfDay', (['anemometers', 'heights'], {'calc_method': '"""log_law"""'}), "(anemometers, heights, calc_method='log_law')\n", (3263, 3308), True, 'import brightwind as bw\n'), ((3336, 3415), 'brightwind.Shear.TimeOfDay', 'bw.Shear.TimeOfDay', (['anemometers', 'heights'], {'by_month': '(False)', 'calc_method': '"""log_law"""'}), "(anemometers, heights, by_month=False, calc_method='log_law')\n", (3354, 3415), True, 'import brightwind as bw\n'), ((4042, 4083), 'brightwind.Shear.TimeSeries', 'bw.Shear.TimeSeries', (['anemometers', 'heights'], {}), '(anemometers, heights)\n', (4061, 4083), True, 'import brightwind as bw\n'), ((4112, 4173), 'brightwind.Shear.TimeSeries', 'bw.Shear.TimeSeries', (['anemometers', 'heights'], {'maximise_data': '(True)'}), '(anemometers, heights, maximise_data=True)\n', (4131, 4173), True, 'import brightwind as bw\n'), ((4201, 4265), 'brightwind.Shear.TimeSeries', 'bw.Shear.TimeSeries', (['anemometers', 'heights'], {'calc_method': '"""log_law"""'}), "(anemometers, heights, calc_method='log_law')\n", (4220, 4265), True, 'import brightwind as bw\n'), ((4292, 4380), 'brightwind.Shear.TimeSeries', 'bw.Shear.TimeSeries', (['anemometers', 'heights'], {'calc_method': '"""log_law"""', 'maximise_data': '(True)'}), "(anemometers, heights, calc_method='log_law',\n maximise_data=True)\n", (4311, 4380), True, 'import brightwind as bw\n'), ((4857, 4907), 'brightwind.Shear.scale', 'bw.Shear.scale', (["DATA['Spd40mN']", '(40)', '(60)'], {'alpha': '(0.2)'}), "(DATA['Spd40mN'], 40, 60, alpha=0.2)\n", (4871, 4907), True, 'import brightwind as bw\n'), ((4911, 4989), 'brightwind.Shear.scale', 'bw.Shear.scale', (["DATA['Spd40mN']", '(40)', '(60)'], {'calc_method': '"""log_law"""', 'roughness': '(0.03)'}), "(DATA['Spd40mN'], 40, 60, calc_method='log_law', roughness=0.03)\n", (4925, 4989), True, 'import brightwind as bw\n')]
|
import importlib
import os
import re
import shlex
import subprocess
import sys
import warnings
from importlib.metadata import PackageNotFoundError, distribution
from importlib.util import find_spec
from pathlib import Path
from shutil import rmtree
from unittest import skip
import pytest
import requests
from .unit_test import PyPI_Release, Version, log, reuse
not_local = "GITHUB_REF" in os.environ
is_win = sys.platform.lower().startswith("win")
not_win = not is_win
# Add in-progress tests here
def test_template(reuse):
pass
def test_is_platform_compatible_macos(reuse):
platform_tags = reuse.use.get_supported()
platform_tag = next(iter(platform_tags))
info = {
"comment_text": "",
"digests": {
"md5": "2651049b70d2ec07d8afd7637f198807",
"sha256": "cc6bd4fd593cb261332568485e20a0712883cf631f6f5e8e86a52caa8b2b50ff",
},
"downloads": -1,
"filename": f"numpy-1.19.5-cp3{sys.version_info[1]}-cp3{sys.version_info[1]}m-{platform_tag}.whl",
"has_sig": False,
"md5_digest": "2651049b70d2ec07d8afd7637f198807",
"packagetype": "bdist_wheel",
"python_version": "source",
"requires_python": ">=3.6",
"size": 15599590,
"upload_time": "2021-01-05T17:19:38",
"upload_time_iso_8601": "2021-01-05T17:19:38.152665Z",
"url": f"https://files.pythonhosted.org/packages/6a/9d/984f87a8d5b28b1d4afc042d8f436a76d6210fb582214f35a0ea1db3be66/numpy-1.19.5-cp3{sys.version_info[1]}-cp3{sys.version_info[1]}m-{platform_tag}.whl",
"yanked": False,
"yanked_reason": None,
"version": "1.19.5",
}
assert reuse._is_platform_compatible(PyPI_Release(**info), platform_tags)
def test_is_platform_compatible_win(reuse):
platform_tags = reuse.use.get_supported()
platform_tag = next(iter(platform_tags))
info = {
"comment_text": "",
"digests": {
"md5": "baf1bd7e3a8c19367103483d1fd61cfc",
"sha256": "dbd18bcf4889b720ba13a27ec2f2aac1981bd41203b3a3b27ba7a33f88ae4827",
},
"downloads": -1,
"filename": f"numpy-1.19.5-cp3{sys.version_info[1]}-cp3{sys.version_info[1]}m-{platform_tag}.whl",
"has_sig": False,
"md5_digest": "baf1bd7e3a8c19367103483d1fd61cfc",
"packagetype": "bdist_wheel",
"python_version": f"cp3{sys.version_info[1]}",
"requires_python": f">=3.{sys.version_info[1]}",
"size": 13227547,
"upload_time": "2021-01-05T17:24:53",
"upload_time_iso_8601": "2021-01-05T17:24:53.052845Z",
"url": f"https://files.pythonhosted.org/packages/ea/bc/da526221bc111857c7ef39c3af670bbcf5e69c247b0d22e51986f6d0c5c2/numpy-1.19.5-cp3{sys.version_info[1]}-cp3{sys.version_info[1]}m-{platform_tag}.whl",
"yanked": False,
"yanked_reason": None,
"version": "1.19.5",
}
assert reuse._is_platform_compatible(
PyPI_Release(**info), platform_tags, include_sdist=False
)
def test_pure_python_package(reuse):
# https://pypi.org/project/example-pypi-package/
file = (
reuse.Path.home()
/ ".justuse-python/packages/example_pypi_package-0.1.0-py3-none-any.whl"
)
venv_dir = reuse.Path.home() / ".justuse-python/venv/example-pypi-package/0.1.0"
file.unlink(missing_ok=True)
if venv_dir.exists():
rmtree(venv_dir)
test = reuse(
"example-pypi-package/examplepy",
version="0.1.0",
hashes={
"3c1b4ddf718d85bde796a20cf3fdea254a33a4dc89129dff5bfc5b7cd760c86b",
"ce89b1fe92abc55b4349bc58462ba255c42132598df6fe3a416a75b39b872a77",
},
modes=reuse.auto_install,
)
assert (
venv_dir.exists() == False
), "Should not have created venv for example-pypi-package"
assert str(test.Number(2)) == "2"
if file.exists():
file.unlink()
def test_db_setup(reuse):
assert reuse.registry
@pytest.mark.skipif(True, reason="broken")
def test_no_isolation(reuse):
assert test_load_multi_version(reuse, "numpy", "1.19.0", 1)
assert test_load_multi_version(reuse, "numpy", "1.19.0", 1)
def installed_or_skip(reuse, name, version=None):
if not (spec := find_spec(name)):
pytest.skip(f"{name} not installed")
return False
try:
dist = distribution(spec.name)
except PackageNotFoundError as pnfe:
pytest.skip(f"{name} partially installed: {spec=}, {pnfe}")
if not (
(ver := dist.metadata["version"])
and (not version or reuse.Version(version)) == (not ver or reuse.Version(ver))
):
pytest.skip(f"found '{name}' v{ver}, but require v{version}")
return False
return True
@pytest.mark.skipif(not_local, reason="requires matplotlib")
def test_use_str(reuse):
if not installed_or_skip(reuse, "matplotlib"):
return
mod = reuse("matplotlib/matplotlib.pyplot")
assert mod
@pytest.mark.skipif(not_local, reason="requires matplotlib")
def test_use_tuple(reuse):
if not installed_or_skip(reuse, "matplotlib"):
return
mod = reuse(("matplotlib", "matplotlib.pyplot"))
assert mod
@pytest.mark.skipif(not_local, reason="requires matplotlib")
def test_use_kwargs(reuse):
if not installed_or_skip(reuse, "matplotlib"):
return
mod = reuse(package_name="matplotlib", module_name="matplotlib.pyplot")
assert mod
|
[
"importlib.util.find_spec",
"pytest.skip",
"sys.platform.lower",
"pytest.mark.skipif",
"shutil.rmtree",
"importlib.metadata.distribution"
] |
[((3957, 3998), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(True)'], {'reason': '"""broken"""'}), "(True, reason='broken')\n", (3975, 3998), False, 'import pytest\n'), ((4730, 4789), 'pytest.mark.skipif', 'pytest.mark.skipif', (['not_local'], {'reason': '"""requires matplotlib"""'}), "(not_local, reason='requires matplotlib')\n", (4748, 4789), False, 'import pytest\n'), ((4947, 5006), 'pytest.mark.skipif', 'pytest.mark.skipif', (['not_local'], {'reason': '"""requires matplotlib"""'}), "(not_local, reason='requires matplotlib')\n", (4965, 5006), False, 'import pytest\n'), ((5171, 5230), 'pytest.mark.skipif', 'pytest.mark.skipif', (['not_local'], {'reason': '"""requires matplotlib"""'}), "(not_local, reason='requires matplotlib')\n", (5189, 5230), False, 'import pytest\n'), ((413, 433), 'sys.platform.lower', 'sys.platform.lower', ([], {}), '()\n', (431, 433), False, 'import sys\n'), ((3375, 3391), 'shutil.rmtree', 'rmtree', (['venv_dir'], {}), '(venv_dir)\n', (3381, 3391), False, 'from shutil import rmtree\n'), ((4255, 4291), 'pytest.skip', 'pytest.skip', (['f"""{name} not installed"""'], {}), "(f'{name} not installed')\n", (4266, 4291), False, 'import pytest\n'), ((4337, 4360), 'importlib.metadata.distribution', 'distribution', (['spec.name'], {}), '(spec.name)\n', (4349, 4360), False, 'from importlib.metadata import PackageNotFoundError, distribution\n'), ((4628, 4689), 'pytest.skip', 'pytest.skip', (['f"""found \'{name}\' v{ver}, but require v{version}"""'], {}), '(f"found \'{name}\' v{ver}, but require v{version}")\n', (4639, 4689), False, 'import pytest\n'), ((4229, 4244), 'importlib.util.find_spec', 'find_spec', (['name'], {}), '(name)\n', (4238, 4244), False, 'from importlib.util import find_spec\n'), ((4410, 4475), 'pytest.skip', 'pytest.skip', (['f"""{name} partially installed: spec={spec!r}, {pnfe}"""'], {}), "(f'{name} partially installed: spec={spec!r}, {pnfe}')\n", (4421, 4475), False, 'import pytest\n')]
|
import numpy as np
a = np.arange(6)
print(a)
# [0 1 2 3 4 5]
print(a.reshape(2, 3))
# [[0 1 2]
# [3 4 5]]
print(a.reshape(-1, 3))
# [[0 1 2]
# [3 4 5]]
print(a.reshape(2, -1))
# [[0 1 2]
# [3 4 5]]
# print(a.reshape(3, 4))
# ValueError: cannot reshape array of size 6 into shape (3,4)
# print(a.reshape(-1, 4))
# ValueError: cannot reshape array of size 6 into shape (4)
l = [0, 1, 2, 3, 4, 5]
print(np.array(l).reshape(-1, 3).tolist())
# [[0, 1, 2], [3, 4, 5]]
print(np.array(l).reshape(3, -1).tolist())
# [[0, 1], [2, 3], [4, 5]]
|
[
"numpy.array",
"numpy.arange"
] |
[((24, 36), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (33, 36), True, 'import numpy as np\n'), ((411, 422), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (419, 422), True, 'import numpy as np\n'), ((480, 491), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (488, 491), True, 'import numpy as np\n')]
|
import random
import time
def random_sleep_time():
x = random.randint(5,15)
x = x*0.1
time.sleep(x)
|
[
"random.randint",
"time.sleep"
] |
[((64, 85), 'random.randint', 'random.randint', (['(5)', '(15)'], {}), '(5, 15)\n', (78, 85), False, 'import random\n'), ((105, 118), 'time.sleep', 'time.sleep', (['x'], {}), '(x)\n', (115, 118), False, 'import time\n')]
|
"""
The interface for data preprocessing.
Authors:
<NAME>
"""
import numpy as np
import pandas as pd
from collections import Counter
class FeatureExtractor(object):
def __init__(self):
self.idf_vec = None
self.mean_vec = None
self.events = None
def df_fit_transform(self, X_seq):
"""
Fit and transform the data matrix.
Variant of a similar to "fit_transform" function,
but with using a pandas lib for more convenient debugging.
Args:
X_seq: ndarray, log sequences matrix
Returns:
X_new: The transformed data matrix
"""
print('====== Transformed train data summary ======')
x_counts = []
for i in range(X_seq.shape[0]):
event_counts = Counter(X_seq[i])
x_counts.append(event_counts)
X_df = pd.DataFrame(x_counts)
X_df = X_df.fillna(0)
self.events = X_df.columns
num_instance, num_event = X_df.shape
#tf - idf term weighting
df_vec = np.sum(X_df > 0, axis=0)
self.idf_vec = np.log(num_instance / (df_vec + 1e-8))
idf_matrix = X_df * np.tile(self.idf_vec, (num_instance, 1))
X = idf_matrix
#zero-mean normalization
mean_vec = X.mean(axis=0)
self.mean_vec = mean_vec.values.reshape(1, num_event)
X = X - np.tile(self.mean_vec, (num_instance, 1))
X_new = X
print('Train data shape: {}-by-{}\n'.format(X_new.shape[0], X_new.shape[1]))
return X_new
def fit_transform(self, X_seq):
"""
Fit and transform the data matrix
Args:
X_seq: ndarray, log sequences matrix
Returns:
X_new: The transformed data matrix
"""
print('====== Transformed train data summary ======')
x_counts = []
for i in range(X_seq.shape[0]):
event_counts = Counter(X_seq[i])
x_counts.append(event_counts)
X_df = pd.DataFrame(x_counts)
X_df = X_df.fillna(0)
self.events = X_df.columns
X = X_df.values
num_instance, num_event = X.shape
#tf - idf term weighting
df_vec = np.sum(X > 0, axis=0)
self.idf_vec = np.log(num_instance / (df_vec + 1e-8))
idf_matrix = X * np.tile(self.idf_vec, (num_instance, 1))
X = idf_matrix
#zero-mean normalization
mean_vec = X.mean(axis=0)
self.mean_vec = mean_vec.reshape(1, num_event)
X = X - np.tile(self.mean_vec, (num_instance, 1))
X_new = X
print('Train data shape: {}-by-{}\n'.format(X_new.shape[0], X_new.shape[1]))
return X_new
def transform(self, X_seq):
"""
Transform the data matrix with trained parameters
Args:
X_seq: log sequences matrix
Returns:
X_new: The transformed data matrix
"""
print('====== Transformed test data summary ======')
X_counts = []
for i in range(X_seq.shape[0]):
event_counts = Counter(X_seq[i])
X_counts.append(event_counts)
X_df = pd.DataFrame(X_counts)
X_df = X_df.fillna(0)
empty_events = set(self.events) - set(X_df.columns)
for event in empty_events:
X_df[event] = [0] * len(X_df)
# only those events (keys) that were in the training data set are taken into account
X = X_df[self.events].values
num_instance, num_event = X.shape
# tf - idf term weighting
idf_matrix = X * np.tile(self.idf_vec, (num_instance, 1))
X = idf_matrix
# zero-mean normalization
X = X - np.tile(self.mean_vec, (num_instance, 1))
X_new = X
print('Test data shape: {}-by-{}\n'.format(X_new.shape[0], X_new.shape[1]))
return X_new
|
[
"pandas.DataFrame",
"numpy.sum",
"numpy.log",
"numpy.tile",
"collections.Counter"
] |
[((900, 922), 'pandas.DataFrame', 'pd.DataFrame', (['x_counts'], {}), '(x_counts)\n', (912, 922), True, 'import pandas as pd\n'), ((1085, 1109), 'numpy.sum', 'np.sum', (['(X_df > 0)'], {'axis': '(0)'}), '(X_df > 0, axis=0)\n', (1091, 1109), True, 'import numpy as np\n'), ((1133, 1172), 'numpy.log', 'np.log', (['(num_instance / (df_vec + 1e-08))'], {}), '(num_instance / (df_vec + 1e-08))\n', (1139, 1172), True, 'import numpy as np\n'), ((2059, 2081), 'pandas.DataFrame', 'pd.DataFrame', (['x_counts'], {}), '(x_counts)\n', (2071, 2081), True, 'import pandas as pd\n'), ((2265, 2286), 'numpy.sum', 'np.sum', (['(X > 0)'], {'axis': '(0)'}), '(X > 0, axis=0)\n', (2271, 2286), True, 'import numpy as np\n'), ((2310, 2349), 'numpy.log', 'np.log', (['(num_instance / (df_vec + 1e-08))'], {}), '(num_instance / (df_vec + 1e-08))\n', (2316, 2349), True, 'import numpy as np\n'), ((3226, 3248), 'pandas.DataFrame', 'pd.DataFrame', (['X_counts'], {}), '(X_counts)\n', (3238, 3248), True, 'import pandas as pd\n'), ((825, 842), 'collections.Counter', 'Counter', (['X_seq[i]'], {}), '(X_seq[i])\n', (832, 842), False, 'from collections import Counter\n'), ((1201, 1241), 'numpy.tile', 'np.tile', (['self.idf_vec', '(num_instance, 1)'], {}), '(self.idf_vec, (num_instance, 1))\n', (1208, 1241), True, 'import numpy as np\n'), ((1411, 1452), 'numpy.tile', 'np.tile', (['self.mean_vec', '(num_instance, 1)'], {}), '(self.mean_vec, (num_instance, 1))\n', (1418, 1452), True, 'import numpy as np\n'), ((1984, 2001), 'collections.Counter', 'Counter', (['X_seq[i]'], {}), '(X_seq[i])\n', (1991, 2001), False, 'from collections import Counter\n'), ((2374, 2414), 'numpy.tile', 'np.tile', (['self.idf_vec', '(num_instance, 1)'], {}), '(self.idf_vec, (num_instance, 1))\n', (2381, 2414), True, 'import numpy as np\n'), ((2577, 2618), 'numpy.tile', 'np.tile', (['self.mean_vec', '(num_instance, 1)'], {}), '(self.mean_vec, (num_instance, 1))\n', (2584, 2618), True, 'import numpy as np\n'), ((3151, 3168), 'collections.Counter', 'Counter', (['X_seq[i]'], {}), '(X_seq[i])\n', (3158, 3168), False, 'from collections import Counter\n'), ((3648, 3688), 'numpy.tile', 'np.tile', (['self.idf_vec', '(num_instance, 1)'], {}), '(self.idf_vec, (num_instance, 1))\n', (3655, 3688), True, 'import numpy as np\n'), ((3762, 3803), 'numpy.tile', 'np.tile', (['self.mean_vec', '(num_instance, 1)'], {}), '(self.mean_vec, (num_instance, 1))\n', (3769, 3803), True, 'import numpy as np\n')]
|
from django.contrib import admin
# Register your models here.
from app1.models import Release
from app1.models import Comments
from app1.models import User
from app1.models import Collections
admin.site.register(Release )
admin.site.register(Comments)
admin.site.register(User)
admin.site.register( Collections)
|
[
"django.contrib.admin.site.register"
] |
[((200, 228), 'django.contrib.admin.site.register', 'admin.site.register', (['Release'], {}), '(Release)\n', (219, 228), False, 'from django.contrib import admin\n'), ((231, 260), 'django.contrib.admin.site.register', 'admin.site.register', (['Comments'], {}), '(Comments)\n', (250, 260), False, 'from django.contrib import admin\n'), ((262, 287), 'django.contrib.admin.site.register', 'admin.site.register', (['User'], {}), '(User)\n', (281, 287), False, 'from django.contrib import admin\n'), ((289, 321), 'django.contrib.admin.site.register', 'admin.site.register', (['Collections'], {}), '(Collections)\n', (308, 321), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python implementation of <NAME>'s MST code
# <NAME>
# October 11, 2016
# MIT License
import sys
import toml
import parameters
import model
import controller
import reference
import output
from utils import time
def run(config):
"""Run a fixed length simulation based on the inputs provided in the config."""
# Init lookup table of parameters
params = parameters.LookupTable(config)
# Init MST model
mst = model.MST(params, config)
# Init LQG controller
ctrl = controller.LQG(config["control"])
# Init reference signal
ref = reference.Translator(params, config["control"])
# Init output plotter (will plot on exit)
with output.Plotter(config) as out:
# Main loop
y = [mst.lmbda0, mst.flux]
for t in time(config["time"])[0][1:]:
r = ref(t)
u = ctrl.control(r, y)
y = mst.step(t, u)
out.save(t, r, u, y, mst, ctrl)
if __name__ == "__main__":
# Parse config file
if len(sys.argv) != 2:
print("Usage: {0} <config.toml>".format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[1]) as config_file:
config = toml.loads(config_file.read())
# Run fixed length simulation
try:
run(config)
except UserWarning as warning:
print(warning)
except StopIteration as info:
print(info)
|
[
"model.MST",
"controller.LQG",
"output.Plotter",
"parameters.LookupTable",
"utils.time",
"reference.Translator",
"sys.exit"
] |
[((408, 438), 'parameters.LookupTable', 'parameters.LookupTable', (['config'], {}), '(config)\n', (430, 438), False, 'import parameters\n'), ((465, 490), 'model.MST', 'model.MST', (['params', 'config'], {}), '(params, config)\n', (474, 490), False, 'import model\n'), ((523, 556), 'controller.LQG', 'controller.LQG', (["config['control']"], {}), "(config['control'])\n", (537, 556), False, 'import controller\n'), ((590, 637), 'reference.Translator', 'reference.Translator', (['params', "config['control']"], {}), "(params, config['control'])\n", (610, 637), False, 'import reference\n'), ((688, 710), 'output.Plotter', 'output.Plotter', (['config'], {}), '(config)\n', (702, 710), False, 'import output\n'), ((1031, 1042), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1039, 1042), False, 'import sys\n'), ((773, 793), 'utils.time', 'time', (["config['time']"], {}), "(config['time'])\n", (777, 793), False, 'from utils import time\n')]
|
from django.contrib import admin
from django.urls import path, include
from allez.views import index
urlpatterns = [
path('admin/', admin.site.urls),
path('', index, name='index'),
path('carnival/', include('carnival.urls')),
path('accounts/', include('accounts.urls')),
path('accounts/', include('accounts.urls')),
path('riders/', include('riders.urls')),
]
|
[
"django.urls.path",
"django.urls.include"
] |
[((123, 154), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (127, 154), False, 'from django.urls import path, include\n'), ((160, 189), 'django.urls.path', 'path', (['""""""', 'index'], {'name': '"""index"""'}), "('', index, name='index')\n", (164, 189), False, 'from django.urls import path, include\n'), ((213, 237), 'django.urls.include', 'include', (['"""carnival.urls"""'], {}), "('carnival.urls')\n", (220, 237), False, 'from django.urls import path, include\n'), ((262, 286), 'django.urls.include', 'include', (['"""accounts.urls"""'], {}), "('accounts.urls')\n", (269, 286), False, 'from django.urls import path, include\n'), ((311, 335), 'django.urls.include', 'include', (['"""accounts.urls"""'], {}), "('accounts.urls')\n", (318, 335), False, 'from django.urls import path, include\n'), ((358, 380), 'django.urls.include', 'include', (['"""riders.urls"""'], {}), "('riders.urls')\n", (365, 380), False, 'from django.urls import path, include\n')]
|
#---------------------------------------------------------------------------------------------
# Create an application called "Online Auction" with id "online_auc"
#
# Create a device to represent the app server called "OA-AppServer-1" with id "oa-appserver-1"
#
# Create a type of entity to represent transactions on an app server called "Transaction" with
# type id of "TRANSACTION" with two metrics "request_response_time" and "number_of_requests"
#
# Create two transactions to represent activity on the appserver
# "Bid Transaction" with id oa-appserver-1.bid-tx
# "Browse Catalog" with id oa-appserver-1.browse_catalog
#
# Make the metrcs number_of_requests and request_response time KPI's for the application.
#---------------------------------------------------------------------------------------------
import json
import pycurl
import os
#-------------------------------------------------------
# Specify api key
#-------------------------------------------------------
apikey = os.environ["TSI_API_KEY"]
#-------------------------------------------------------
# Set up headers
#-------------------------------------------------------
headers = ['Expect:', 'Content-Type: application/json' , 'X-API-KEY: ' + apikey]
#---------------------------------------------------------------------------------------------
# Create the application
#---------------------------------------------------------------------------------------------
newEntity = {
"entity_type_id": "APPLICATION",
"name": "Random",
"tags": [
"app_id:random"
],
"cfg_attr_values": {},
"entity_id": "random",
"source_id": "random",
"cfg_attr_values":
{
"kpis":[
{"entity_type_id":"TRANSACTION",
"entity_type_name":"Transaction",
"entity_id":"oa-appserver-1.bid_tx",
"title":"Number of Requests",
"application_id":"random",
"application_name":"Random",
"metric_name":"Number of Requests",
"metric_uom":"#",
"metric_id":"random"}
]
}
}
#-------------------------------------------------------
# Specify the uri
#-------------------------------------------------------
url = "https://truesight.bmc.com/api/v1/entities"
#-------------------------------------------------------
# Issue the request
#-------------------------------------------------------
c= pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER,headers )
c.setopt(pycurl.CUSTOMREQUEST, "POST")
data = json.dumps(newEntity)
c.setopt(pycurl.POSTFIELDS,data)
c.perform()
print ("status code:=" + str(c.getinfo(pycurl.HTTP_CODE)))
c.close()
#-------------------------------------------------------
# Create a device
#-------------------------------------------------------
newEntity = {
"entity_type_id": "DEVICE",
"name": "OA-AppServer-1",
"tags": [
"app_id:online_auc"
],
"cfg_attr_values": {},
"entity_id": "oa-appserver-1",
"source_id": "sample",
}
#-------------------------------------------------------
# Issue the request
#-------------------------------------------------------
c= pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER,headers )
c.setopt(pycurl.CUSTOMREQUEST, "POST")
data = json.dumps(newEntity)
c.setopt(pycurl.POSTFIELDS,data)
c.perform()
print ("status code:=" + str(c.getinfo(pycurl.HTTP_CODE)))
c.close()
#-------------------------------------------------------
# Create a monitored instance
#-------------------------------------------------------
newEntity = {
"entity_type_id": "TRANSACTION",
"name": "Bid Transaction",
"tags": [
"app_id:online_auc"
],
"cfg_attr_values": {},
"entity_id": "oa-appserver-1.bid_tx",
"source_id": "sample",
"parent_entity_type_id":"DEVICE",
"parent_entity_id":"oa-appserver-1"
}
#-------------------------------------------------------
# Issue the request
#-------------------------------------------------------
c= pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER,headers )
c.setopt(pycurl.CUSTOMREQUEST, "POST")
data = json.dumps(newEntity)
c.setopt(pycurl.POSTFIELDS,data)
c.perform()
print ("status code:=" + str(c.getinfo(pycurl.HTTP_CODE)))
c.close()
#-------------------------------------------------------
# Create a monitored instance
#-------------------------------------------------------
newEntity = {
"entity_type_id": "TRANSACTION",
"name": "Browse Catalog",
"tags": [
"app_id:online_auc"
],
"cfg_attr_values": {},
"entity_id": "oa-appserver-1.browse_catalog",
"source_id": "sample",
"parent_entity_type_id":"DEVICE",
"parent_entity_id":"oa-appserver-1"
}
#-------------------------------------------------------
# Issue the request
#-------------------------------------------------------
c= pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER,headers )
c.setopt(pycurl.CUSTOMREQUEST, "POST")
data = json.dumps(newEntity)
c.setopt(pycurl.POSTFIELDS,data)
c.perform()
print ("status code:=" + str(c.getinfo(pycurl.HTTP_CODE)))
c.close()
#-------------------------------------------------------
# Create app level metrics
#-------------------------------------------------------
myMetaData = {
"id": "random",
"name": "Random",
"metrics": [
{
"id": "random",
"name": "Random",
"data_type": "number",
"uom": "#",
"kpi": "True",
"key": "True",
}
]
}
url = "https://truesight.bmc.com/api/v1/meta"
#-------------------------------------------------------
# Issue the request
#-------------------------------------------------------
c= pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER,headers )
c.setopt(pycurl.CUSTOMREQUEST, "POST")
data = json.dumps(myMetaData)
c.setopt(pycurl.POSTFIELDS,data)
c.perform()
print ("status code:=" + str(c.getinfo(pycurl.HTTP_CODE)))
c.close()
|
[
"pycurl.Curl",
"json.dumps"
] |
[((2614, 2627), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (2625, 2627), False, 'import pycurl\n'), ((2741, 2762), 'json.dumps', 'json.dumps', (['newEntity'], {}), '(newEntity)\n', (2751, 2762), False, 'import json\n'), ((3432, 3445), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (3443, 3445), False, 'import pycurl\n'), ((3559, 3580), 'json.dumps', 'json.dumps', (['newEntity'], {}), '(newEntity)\n', (3569, 3580), False, 'import json\n'), ((4363, 4376), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (4374, 4376), False, 'import pycurl\n'), ((4490, 4511), 'json.dumps', 'json.dumps', (['newEntity'], {}), '(newEntity)\n', (4500, 4511), False, 'import json\n'), ((5303, 5316), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (5314, 5316), False, 'import pycurl\n'), ((5430, 5451), 'json.dumps', 'json.dumps', (['newEntity'], {}), '(newEntity)\n', (5440, 5451), False, 'import json\n'), ((6180, 6193), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (6191, 6193), False, 'import pycurl\n'), ((6307, 6329), 'json.dumps', 'json.dumps', (['myMetaData'], {}), '(myMetaData)\n', (6317, 6329), False, 'import json\n')]
|
"""
A group of spiking neurons with noise `~U(0, potential_noise_scale)` is added
to `n_neurons * prob_rand_fire` neurons at each step.
Each spiking neuron has an internal membrane potential that
increases with each incoming spike. The potential persists but slowly
decreases over time. Each neuron fires when its potential surpasses
some firing threshold and does not fire again for the duration
of its refractory period.
"""
import numpy as np
from spikey.module import Key
from spikey.snn.neuron.template import Neuron
class RandPotential(Neuron):
"""
A group of spiking neurons with noise `~U(0, potential_noise_scale)` is added
to `n_neurons * prob_rand_fire` neurons at each step.
Each spiking neuron has an internal membrane potential that
increases with each incoming spike. The potential persists but slowly
decreases over time. Each neuron fires when its potential surpasses
some firing threshold and does not fire again for the duration
of its refractory period.
Parameters
----------
kwargs: dict
Dictionary with values for each key in NECESSARY_KEYS.
Examples
--------
.. code-block:: python
config = {
"magnitude": 2,
"n_neurons": 100,
"neuron_pct_inhibitory": .2,
"potential_decay": .2,
"prob_rand_fire": .08,
"refractory_period": 1,
"resting_mv": 0,
"spike_delay": 0,
"potential_noise_scale": .1,
}
neurons = Neuron(**config)
neurons.reset()
weights = np.random.uniform(0, 2, size=(config['n_neurons'], config['n_neurons]))
for i in range(100):
spikes = self.neurons()
neurons += np.sum(
weights * spikes.reshape((-1, 1)), axis=0
)
.. code-block:: python
class network_template(Network):
keys = {
"magnitude": 2,
"n_neurons": 100,
"neuron_pct_inhibitory": .2,
"potential_decay": .2,
"prob_rand_fire": .08,
"refractory_period": 1,
"potential_noise_scale": .1,
}
parts = {
"neurons": Neuron
}
"""
NECESSARY_KEYS = Neuron.extend_keys(
[Key("potential_noise_scale", "Multiplier of leak to add to potential.", float)]
)
def __call__(self) -> np.bool:
"""
Add noise `~U(0, potential_noise_scale)` to `n_neurons * prob_rand_fire` neurons
then determine whether each neuron will fire or not according to threshold.
Called once per network step.
Parameters
----------
threshold: float
Spiking threshold, neurons schedule spikes if potentials >= threshold.
Returns
-------
ndarray[n_neurons, dtype=bool] Spike output from each neuron at the current timestep.
Examples
--------
.. code-block:: python
config = {
"magnitude": 2,
"n_neurons": 100,
"neuron_pct_inhibitory": .2,
"potential_decay": .2,
"prob_rand_fire": .08,
"refractory_period": 1,
"potential_noise_scale": .1,
"firing_threshold": 16,
}
neurons = Neuron(**config)
neurons.reset()
weights = np.random.uniform(0, 2, size=(config['n_neurons'], config['n_neurons]))
for i in range(100):
spikes = self.neurons()
neurons += np.sum(
weights * spikes.reshape((-1, 1)), axis=0
)
"""
noise = np.random.uniform(0, self._potential_noise_scale, size=self._n_neurons)
noise[
~(np.random.uniform(0, 1, size=self._n_neurons) <= self._prob_rand_fire)
] = 0
self.potentials += noise
spike_occurences = self.potentials >= self._firing_threshold
self.refractory_timers[spike_occurences] = self._refractory_period + 1
self.schedule += self.spike_shape * np.int_(spike_occurences)
output = self.schedule[0] * self.polarities * self._magnitude
return output
|
[
"numpy.int_",
"numpy.random.uniform",
"spikey.module.Key"
] |
[((3757, 3828), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self._potential_noise_scale'], {'size': 'self._n_neurons'}), '(0, self._potential_noise_scale, size=self._n_neurons)\n', (3774, 3828), True, 'import numpy as np\n'), ((2339, 2417), 'spikey.module.Key', 'Key', (['"""potential_noise_scale"""', '"""Multiplier of leak to add to potential."""', 'float'], {}), "('potential_noise_scale', 'Multiplier of leak to add to potential.', float)\n", (2342, 2417), False, 'from spikey.module import Key\n'), ((4171, 4196), 'numpy.int_', 'np.int_', (['spike_occurences'], {}), '(spike_occurences)\n', (4178, 4196), True, 'import numpy as np\n'), ((3858, 3903), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'self._n_neurons'}), '(0, 1, size=self._n_neurons)\n', (3875, 3903), True, 'import numpy as np\n')]
|
from snovault import (
CONNECTION,
upgrade_step,
)
@upgrade_step('hotspot_quality_metric', '3', '4')
def hotspot_quality_metric_3_4(value, system):
return
@upgrade_step('hotspot_quality_metric', '4', '5')
def hotspot_quality_metric_4_5(value, system):
# http://redmine.encodedcc.org/issues/2491
if 'assay_term_id' in value:
del value['assay_term_id']
if 'notes' in value:
if value['notes']:
value['notes'] = value['notes'].strip()
else:
del value['notes']
@upgrade_step('hotspot_quality_metric', '5', '6')
def hotspot_quality_metric_5_6(value, system):
# http://redmine.encodedcc.org/issues/4845
if 'SPOT score' in value:
value['SPOT2 score'] = value['SPOT score']
del value['SPOT score']
# http://redmine.encodedcc.org/issues/4748
aliases = []
if 'aliases' in value and value['aliases']:
aliases = value['aliases']
else:
return
aliases_to_remove = []
for i in range(0, len(aliases)):
new_alias = ''
if 'roadmap-epigenomics' in aliases[i]:
if '||' in aliases[i]:
scrub_parts = aliases[i].split('||')
date_split = scrub_parts[1].split(' ')
date = "-".join([date_split[1].strip(), date_split[2].strip(), date_split[5].strip()])
scrubbed_list = [scrub_parts[0].strip(), date.strip(), scrub_parts[2].strip()]
if len(scrub_parts) == 4:
scrubbed_list.append(scrub_parts[3].strip())
new_alias = '_'.join(scrubbed_list)
parts = aliases[i].split(':') if not new_alias else new_alias.split(':')
namespace = parts[0]
if namespace in ['ucsc_encode_db', 'UCSC_encode_db', 'versionof']:
# Remove the alias with the bad namespace
aliases_to_remove.append(aliases[i])
namespace = 'encode'
if namespace in ['CGC']:
namespace = namespace.lower()
rest = '_'.join(parts[1:]).strip()
# Remove or substitute bad characters and multiple whitespaces
import re
if '"' or '#' or '@' or '!' or '$' or '^' or '&' or '|' or '~' or ';' or '`' in rest:
rest = re.sub(r'[\"#@!$^&|~;`\/\\]', '', rest)
rest = ' '.join(rest.split())
if '%' in rest:
rest = re.sub(r'%', 'pct', rest)
if '[' or '{' in rest:
rest = re.sub('[\[{]', '(', rest)
if ']' or '}' in rest:
rest = re.sub('[\]}]', ')', rest)
new_alias = ':'.join([namespace, rest])
if new_alias not in aliases:
aliases[i] = new_alias
if aliases_to_remove and aliases:
for a in aliases_to_remove:
if a in aliases:
aliases.remove(a)
|
[
"re.sub",
"snovault.upgrade_step"
] |
[((62, 110), 'snovault.upgrade_step', 'upgrade_step', (['"""hotspot_quality_metric"""', '"""3"""', '"""4"""'], {}), "('hotspot_quality_metric', '3', '4')\n", (74, 110), False, 'from snovault import CONNECTION, upgrade_step\n'), ((172, 220), 'snovault.upgrade_step', 'upgrade_step', (['"""hotspot_quality_metric"""', '"""4"""', '"""5"""'], {}), "('hotspot_quality_metric', '4', '5')\n", (184, 220), False, 'from snovault import CONNECTION, upgrade_step\n'), ((535, 583), 'snovault.upgrade_step', 'upgrade_step', (['"""hotspot_quality_metric"""', '"""5"""', '"""6"""'], {}), "('hotspot_quality_metric', '5', '6')\n", (547, 583), False, 'from snovault import CONNECTION, upgrade_step\n'), ((2253, 2295), 're.sub', 're.sub', (['"""[\\\\"#@!$^&|~;`\\\\/\\\\\\\\]"""', '""""""', 'rest'], {}), '(\'[\\\\"#@!$^&|~;`\\\\/\\\\\\\\]\', \'\', rest)\n', (2259, 2295), False, 'import re\n'), ((2378, 2402), 're.sub', 're.sub', (['"""%"""', '"""pct"""', 'rest'], {}), "('%', 'pct', rest)\n", (2384, 2402), False, 'import re\n'), ((2454, 2481), 're.sub', 're.sub', (['"""[\\\\[{]"""', '"""("""', 'rest'], {}), "('[\\\\[{]', '(', rest)\n", (2460, 2481), False, 'import re\n'), ((2531, 2558), 're.sub', 're.sub', (['"""[\\\\]}]"""', '""")"""', 'rest'], {}), "('[\\\\]}]', ')', rest)\n", (2537, 2558), False, 'import re\n')]
|
"""
숫자 5를 6으로 볼수도, 6을 5로 볼수도 있다.
"""
from sys import stdin
a, b = map(str, stdin.readline().split())
max_val = int(a.replace('5', '6')) + int(b.replace('5', '6'))
min_val = int(a.replace('6', '5')) + int(b.replace('6', '5'))
print(min_val, max_val)
|
[
"sys.stdin.readline"
] |
[((77, 93), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (91, 93), False, 'from sys import stdin\n')]
|
import torch
import bert.tokenization as tokenization
from bert.modeling import BertConfig, BertModel
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from sqlnet.dbengine import DBEngine
from train import test
import random
from decimal import Decimal
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config = {}
config["batch_size"] = 8
config["data_path"] = "../data/"
config["num_target_layers"] = 2
config["dropout"] = 0.3
config["max_seq_length"] = 222
config["toy_model"] = False
config["toy_size"] = 12
config["accumulate_gradients"] = 2
config["EG"] = False
def get_opt(model, model_bert):
opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=1e-3, weight_decay=0)
opt_bert = torch.optim.Adam(filter(lambda p: p.requires_grad, model_bert.parameters()),
lr=1e-5, weight_decay=0)
return opt, opt_bert
def get_bert(BERT_PATH):
bert_config_file = BERT_PATH + "/bert_config_uncased_L-12_H-768_A-12.json"
vocab_file = BERT_PATH + "/vocab_uncased_L-12_H-768_A-12.txt"
init_checkpoint = BERT_PATH + "/pytorch_model_uncased_L-12_H-768_A-12.bin"
bert_config = BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=True)
bert_config.print_status()
model_bert = BertModel(bert_config)
model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'))
print("Load pre-trained BERT parameters.")
model_bert.to(device)
return model_bert, tokenizer, bert_config
def train(train_loader, train_table, model, model_bert, opt, bert_config, tokenizer,
max_seq_length, num_target_layers, accumulate_gradients=1, check_grad=True,
st_pos=0, opt_bert=None, path_db=None, dset_name='train'):
ave_loss = 0
count = 0 # count the # of examples
count_sc = 0 # count the # of correct predictions of select column
count_sa = 0 # of selectd aggregation
count_wn = 0 # of where number
count_wc = 0 # of where column
count_wo = 0 # of where operator
count_wv = 0 # of where-value
count_wvi = 0 # of where-value index (on question tokens)
count_logic_form_acc = 0 # of logical form acc
count_execute_acc = 0 # of execution acc
# Engine for SQL querying.
engine = DBEngine(os.path.join(path_db, f"{dset_name}.db"))
explored_data_list = []
for batch_index, batch_data in enumerate(train_loader):
count += len(batch_data)
if count < st_pos:
continue
# Get fields
question, question_token, sql, sql_text, sql_t, table, header_token, header \
= get_fields(batch_data, train_table, no_hs_t=True, no_sql_t=True)
len_question_bert, len_header_token, number_header, \
question_token_bert, token_to_berttoken_index, berttoken_to_token_index \
= get_wemb_bert_v2(bert_config, model_bert, tokenizer, question_token, header, max_seq_length,
num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
# select column
def equal_in(cell_, pred_answer_column):
for cell in pred_answer_column:
if cell == cell_:
return True
return False
# RL
# where number
def list_in_list(small,big):
for cell in big:
try:
cell_ = int(cell)
if cell_ in small:
return True
cell_ = float(cell)
if cell_ in small:
return True
except:
cell_ = str(cell)
if cell_.lower() in small:
return True
for cell in small:
try:
cell_ = int(cell)
if cell_ in big:
return True
cell_ = float(cell)
if cell_ in big:
return True
except:
cell_ = str(cell)
if cell_.lower() in big:
return True
return False
def list_exact_match(input1,input2):
tmp1 = [str(item) for item in input1]
tmp2 = [str(item) for item in input2]
if sorted(tmp1)==sorted(tmp2):
return True
return False
def contains(big_list,small_list):
return set(small_list).issubset(set(big_list))
for i in range(len(batch_data)):
print(sql[i])
explored_data = {}
explore_count = 0
breakall = False
reward_where = False
reward_where_cond1 = 0
reward_where_cond2 = 0
reward_where_cond3 = 0
reward_where_cond4 = 0
len_question = len(question_token[i])
gt_answer_list = batch_data[i]["answer"]
where_number_random = 0
select_column_random = -1
select_agg_random = 0
col = 0
op = 0
start = 0
end = 0
col2 = 0
op2 = 0
start2 = 0
end2 = 0
col3 = 0
op3 = 0
start3 = 0
end3 = 0
col4 = 0
op4 = 0
start4 = 0
end4 = 0
saved_where_number = -1
saved_col1 = -1
saved_op1 = -1
saved_start1 = -1
saved_end1 = -1
saved = False
# select_column_random = 3
# where_number_random = 1
while True:
final_select_agg = None
final_select_column = None
final_conds = []
tmp_conds = []
if where_number_random == 0:
select_column_random += 1
if select_column_random==len(header[i]):
select_agg_random+=1
select_column_random=0
if select_agg_random==6:
where_number_random+=1
select_agg_random=0
if where_number_random == 1:
end+=1
if end>=len_question+1:
start+=1
end = start + 1
if start>=len_question:
op+=1
start=0
if op==3:
col+=1
op=0
if col==len(header[i]):
select_column_random+=1
col=0
if select_column_random==len(header[i]):
select_agg_random+=1
select_column_random=0
if select_agg_random==6:
where_number_random+=1
select_agg_random=0
if saved == True and where_number_random==2:
col = saved_col1
start = saved_start1
end = saved_end1
op = saved_op1
end2 += 1
if end2 >= len_question+1:
start2 += 1
end2 = start2 + 1
if start2 >= len_question:
op2 += 1
start2 = 0
if op2 == 3:
col2 += 1
op2 = 0
if col2 == len(header[i]):
select_column_random += 1
col2 = 0
if select_column_random == len(header[i]):
select_agg_random += 1
select_column_random = 0
if select_agg_random == 6:
where_number_random += 1
select_agg_random = 0
if saved==False and where_number_random==2:
end += 1
if end >= len_question+1:
start += 1
end = start + 1
if start >= len_question:
op += 1
start = 0
if op == 3:
col += 1
op = 0
if col == len(header[i]):
end2 += 1
col = 0
if end2 >= len_question+1:
start2 += 1
end2 = start2 + 1
if start2 >= len_question:
op2 += 1
start2 = 0
if op2 == 3:
col2 += 1
op2 = 0
if col2 == len(header[i]):
select_column_random += 1
col2 = 0
if select_column_random==len(header[i]):
select_agg_random+=1
select_column_random=0
if select_agg_random==6:
where_number_random+=1
select_agg_random=0
if where_number_random==3:
# break #TODO
end += 1
if end >= len_question+1:
start += 1
end = start + 1
if start >= len_question:
op += 1
start = 0
if op == 3:
col += 1
op = 0
if col == len(header[i]):
end2 += 1
col = 0
if end2 >= len_question+1:
start2 += 1
end2 = start2 + 1
if start2 >= len_question:
op2 += 1
start2 = 0
if op2 == 3:
col2 += 1
op2 = 0
if col2 == len(header[i]):
end3 += 1
col2 = 0
if end3 >= len_question+1:
start3 += 1
end3 = start3 + 1
if start3 >= len_question:
op3 += 1
start3 = 0
if op3 == 3:
col3 += 1
op3 = 0
if col3 == len(header[i]) :
select_column_random += 1
col3 = 0
if select_column_random==len(header[i]):
select_agg_random+=1
select_column_random=0
if select_agg_random==6:
where_number_random+=1
select_agg_random=0
if where_number_random == 4:
end += 1
if end >= len_question+1:
start += 1
end = start + 1
if start >= len_question:
op += 1
start = 0
if op == 3:
col += 1
op = 0
if col == len(header[i]) :
end2 += 1
col = 0
if end2 >= len_question+1:
start2 += 1
end2 = start2 + 1
if start2 >= len_question:
op2 += 1
start2 = 0
if op2 == 3:
col2 += 1
op2 = 0
if col2 == len(header[i]):
end3 += 1
col2 = 0
if end3 >= len_question+1:
start3 += 1
end3 = start3 + 1
if start3 >= len_question:
op3 += 1
start3 = 0
if op3 == 3:
col3 += 1
op3 = 0
if col3 == len(header[i]):
end4 += 1
col3 = 0
if end4 >= len_question+1:
start4 += 1
end4 = start4+1
if start4 >= len_question:
op4 += 1
start4 = 0
if op4 == 3:
col4 += 1
op4 = 0
if col4 == len(header[i]):
select_column_random += 1
col4 = 0
if select_column_random == len(header[i]):
select_agg_random += 1
select_column_random = 0
if select_agg_random == 6:
where_number_random += 1
select_agg_random = 0
if where_number_random == 1:
cond = []
cond.append(col)
cond.append(op)
pr_wv_str = question_token[i][start:end]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
# if type(cond_value_) == str: # and random.randint(1,2)==1:
# op = 0
cond.append(cond_value_)
tmp_conds.append(cond)
if where_number_random == 2:
cond = []
cond.append(col)
cond.append(op)
pr_wv_str = question_token[i][start:end]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
cond = []
cond.append(col2)
cond.append(op2)
pr_wv_str = question_token[i][start2:end2]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
if where_number_random == 3:
cond = []
cond.append(col)
cond.append(op)
pr_wv_str = question_token[i][start:end]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
cond = []
cond.append(col2)
cond.append(op2)
pr_wv_str = question_token[i][start2:end2]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
cond = []
cond.append(col3)
cond.append(op3)
pr_wv_str = question_token[i][start3:end3]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
if where_number_random == 4:
cond = []
cond.append(col)
cond.append(op)
pr_wv_str = question_token[i][start:end]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
cond = []
cond.append(col2)
cond.append(op2)
pr_wv_str = question_token[i][start2:end2]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
cond = []
cond.append(col3)
cond.append(op3)
pr_wv_str = question_token[i][start3:end3]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
cond = []
cond.append(col4)
cond.append(op4)
pr_wv_str = question_token[i][start4:end4]
cond_value = merge_wv_t1_eng(pr_wv_str, question[i])
try:
cond_value_ = float(cond_value)
except:
cond_value_ = cond_value
cond.append(cond_value_)
tmp_conds.append(cond)
# print(select_column_random, select_agg_random, tmp_conds)
pred_answer_column = engine.execute(table[i]['id'], select_column_random, select_agg_random, tmp_conds)
explore_count += 1
if explore_count % 100000 == 0:
print(explore_count)
if explore_count > 500000:
break
exact_match = list_exact_match(gt_answer_list, pred_answer_column)
if where_number_random==1 and not exact_match and contains(pred_answer_column,gt_answer_list)\
and saved_where_number==-1 and op==0: # 可能会导致1condition的错过
# where_number_random = 2 # 可能会导致1condition的错过
saved_start1 = start
saved_end1 = end
saved_col1 = col
saved_op1 = op
saved = True
# answer in
if exact_match:
if pred_answer_column==[None]:
break
print("explore sql", select_column_random, select_agg_random, tmp_conds)
if type(gt_answer_list[0]) == str and select_agg_random!=0:
print("fake sql")
elif where_number_random == 1 and type(tmp_conds[0][2])==str and tmp_conds[0][1]!=0 or\
where_number_random == 2 and type(tmp_conds[1][2]) == str and tmp_conds[1][1] != 0 or\
where_number_random == 3 and type(tmp_conds[2][2]) == str and tmp_conds[2][1] != 0 or \
where_number_random == 4 and type(tmp_conds[3][2]) == str and tmp_conds[3][1] != 0:
print("fake sql")
else:
# print("explore answer", pred_answer_column)
if type(pred_answer_column[0])==int or type(pred_answer_column[0])==float:
final_select_agg = select_agg_random
else:
final_select_agg = 0
if final_select_agg == 0:
pred_answer_column2 = engine.execute(table[i]['id'], select_column_random, 0, [])
for cell in gt_answer_list:
if cell in pred_answer_column2 or equal_in(cell, pred_answer_column2):
final_select_column = select_column_random
break
else:
final_select_column = select_column_random
if final_select_agg == 0:
pred_answer_column3 = engine.execute(table[i]['id'], "*", 0, tmp_conds)
# answer in
for cell in gt_answer_list:
if cell in pred_answer_column3 or equal_in(cell, pred_answer_column3):
reward_where = True
break
else:
reward_where = True
# same column: word in question and where column
if where_number_random >= 1:
pred_answer_column4 = engine.execute(table[i]['id'], tmp_conds[0][0], 0, [])
for cell in pred_answer_column4:
try:
cell_ = str(float(cell))
if cell_ in question[i].lower():
reward_where_cond1 += 0.1
break
cell_ = str(int(cell))
if cell_ in question[i].lower():
reward_where_cond1 += 0.1
break
except:
cell = str(cell)
if cell in question[i].lower():
reward_where_cond1 += 0.1
break
# same column: where value and where column
value = tmp_conds[0][2]
if value in pred_answer_column4:
reward_where_cond1 += 0.1
try:
value = float(tmp_conds[0][2])
if value in pred_answer_column4:
reward_where_cond1 += 0.1
except:
pass
try:
value = int(tmp_conds[0][2])
if value in pred_answer_column4:
reward_where_cond1 += 0.1
except:
pass
try:
value = str(int(tmp_conds[0][2]))
if value in pred_answer_column4:
reward_where_cond1 += 0.1
except:
pass
try:
value = str(float(tmp_conds[0][2]))
if value in pred_answer_column4:
reward_where_cond1 += 0.1
except:
pass
# same column: word in question and where column
if where_number_random >= 2:
pred_answer_column4 = engine.execute(table[i]['id'], tmp_conds[1][0], 0, [])
for cell in pred_answer_column4:
try:
cell_ = str(float(cell))
if cell_ in question[i].lower():
reward_where_cond2 += 0.1
break
cell_ = str(int(cell))
if cell_ in question[i].lower():
reward_where_cond2 += 0.1
break
except:
cell = str(cell)
if cell in question[i].lower():
reward_where_cond2 += 0.1
break
# same column: where value and where column
value = tmp_conds[1][2]
if value in pred_answer_column4:
reward_where_cond2 += 0.1
try:
value = float(tmp_conds[1][2])
if value in pred_answer_column4:
reward_where_cond2 += 0.1
except:
pass
try:
value = int(tmp_conds[1][2])
if value in pred_answer_column4:
reward_where_cond2 += 0.1
except:
pass
try:
value = str(int(tmp_conds[1][2]))
if value in pred_answer_column4:
reward_where_cond2 += 0.1
except:
pass
try:
value = str(float(tmp_conds[1][2]))
if value in pred_answer_column4:
reward_where_cond2 += 0.1
except:
pass
# same column: word in question and where column
if where_number_random >= 3:
pred_answer_column4 = engine.execute(table[i]['id'], tmp_conds[2][0], 0, [])
for cell in pred_answer_column4:
try:
cell_ = str(float(cell))
if cell_ in question[i].lower():
reward_where_cond3 += 0.1
break
cell_ = str(int(cell))
if cell_ in question[i].lower():
reward_where_cond3 += 0.1
break
except:
cell = str(cell)
if cell in question[i].lower():
reward_where_cond3 += 0.1
break
# same column: where value and where column
value = tmp_conds[2][2]
if value in pred_answer_column4:
reward_where_cond3 += 0.1
try:
value = float(tmp_conds[2][2])
if value in pred_answer_column4:
reward_where_cond3 += 0.1
except:
pass
try:
value = int(tmp_conds[2][2])
if value in pred_answer_column4:
reward_where_cond3 += 0.1
except:
pass
try:
value = str(int(tmp_conds[2][2]))
if value in pred_answer_column4:
reward_where_cond3 += 0.1
except:
pass
try:
value = str(float(tmp_conds[2][2]))
if value in pred_answer_column4:
reward_where_cond3 += 0.1
except:
pass
# same column: word in question and where column
if where_number_random >= 4:
pred_answer_column4 = engine.execute(table[i]['id'], tmp_conds[3][0], 0, [])
for cell in pred_answer_column4:
try:
cell_ = str(float(cell))
if cell_ in question[i].lower():
reward_where_cond4 += 0.1
break
cell_ = str(int(cell))
if cell_ in question[i].lower():
reward_where_cond4 += 0.1
break
except:
cell = str(cell)
if cell in question[i].lower():
reward_where_cond4 += 0.1
break
# same column: where value and where column
value = tmp_conds[3][2]
if value in pred_answer_column4:
reward_where_cond4 += 0.1
try:
value = float(tmp_conds[3][2])
if value in pred_answer_column4:
reward_where_cond4 += 0.1
except:
pass
try:
value = int(tmp_conds[3][2])
if value in pred_answer_column4:
reward_where_cond4 += 0.1
except:
pass
try:
value = str(int(tmp_conds[3][2]))
if value in pred_answer_column4:
reward_where_cond4 += 0.1
except:
pass
try:
value = str(float(tmp_conds[3][2]))
if value in pred_answer_column4:
reward_where_cond4 += 0.1
except:
pass
""" 有问题,cond op 只能强制为 = 因为 > 或 < 不在一行
if where_number_random >= 1 and final_select_agg==0:
tmp_conds2 = tmp_conds
tmp_conds2[0][1] = 0 # EQUAL
pred_answer_column5 = engine.execute(table[i]['id'], tmp_conds2[0][0], 0, tmp_conds2)
# same row: the answer and this cell
for row in table[i]["rows"]:
if list_in_list(pred_answer_column5, row) and list_in_list(gt_answer_list, row):
reward_where_cond1 += 0.1
break
if where_number_random >= 2 and final_select_agg==0:
tmp_conds2 = tmp_conds
tmp_conds2[0][1] = 0 # EQUAL
tmp_conds2[1][1] = 0 # EQUAL
pred_answer_column5 = engine.execute(table[i]['id'], tmp_conds2[1][0], 0, tmp_conds2)
# same row: the answer and this cell
for row in table[i]["rows"]:
if list_in_list(pred_answer_column5, row) and list_in_list(gt_answer_list, row):
reward_where_cond2 += 0.1
break
if where_number_random >= 3 and final_select_agg==0:
tmp_conds2 = tmp_conds
tmp_conds2[0][1] = 0 # EQUAL
tmp_conds2[1][1] = 0 # EQUAL
tmp_conds2[2][1] = 0 # EQUAL
pred_answer_column5 = engine.execute(table[i]['id'], tmp_conds2[2][0], 0, tmp_conds2)
# same row: the answer and this cell
for row in table[i]["rows"]:
if list_in_list(pred_answer_column5, row) and list_in_list(gt_answer_list, row):
reward_where_cond3 += 0.1
break
if where_number_random >= 4 and final_select_agg==0:
tmp_conds2 = tmp_conds
tmp_conds2[0][1] = 0 # EQUAL
tmp_conds2[1][1] = 0 # EQUAL
tmp_conds2[2][1] = 0 # EQUAL
tmp_conds2[3][1] = 0 # EQUAL
pred_answer_column5 = engine.execute(table[i]['id'], tmp_conds2[3][0], 0, tmp_conds2)
# same row: the answer and this cell
for row in table[i]["rows"]:
if list_in_list(pred_answer_column5, row) and list_in_list(gt_answer_list, row):
reward_where_cond4 += 0.1
break
"""
if reward_where_cond1>=0.2 and reward_where==True and where_number_random>=1:
final_conds.append(tmp_conds[0])
if reward_where_cond2 >= 0.2 and reward_where == True and where_number_random >= 2:
final_conds.append(tmp_conds[1])
if reward_where_cond3 >= 0.2 and reward_where == True and where_number_random >= 3:
final_conds.append(tmp_conds[2])
if reward_where_cond4 >= 0.2 and reward_where == True and where_number_random >= 4:
final_conds.append(tmp_conds[3])
if final_select_agg!=None and final_select_column!=None and (
where_number_random == 1 and len(final_conds) == 1 or
where_number_random == 2 and len(final_conds) == 2 or
where_number_random == 3 and len(final_conds) == 3 or
where_number_random == 4 and len(final_conds) == 4):
break
if final_select_agg!=None and final_select_column!=None and where_number_random==0:
break
if final_select_column!=None:
explored_data["sel"] = final_select_column
explored_data["agg"] = final_select_agg
explored_data["conds"] = final_conds
explored_data_list.append(explored_data)
print(len(explored_data_list))
one_data = batch_data[i]
one_data["sql"] = explored_data
one_data["query"] = explored_data
f = open("gen_data.jsonl", mode="a", encoding="utf-8")
json.dump(one_data, f)
f.write('\n')
f.close()
print("Done")
ave_loss /= count
acc_sc = count_sc / count
acc_sa = count_sa / count
acc_wn = count_wn / count
acc_wc = count_wc / count
acc_wo = count_wo / count
acc_wvi = count_wv / count
acc_wv = count_wv / count
acc_lx = count_logic_form_acc / count
acc_x = count_execute_acc / count
acc = [ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx, acc_x]
aux_out = 1
return acc, aux_out
def get_data(path_wikisql, args):
train_data, train_table, dev_data, dev_table, _, _ = load_wikisql_v2(path_wikisql,
args["toy_model"],
args["toy_size"],
no_w2i=True,
no_hs_tok=True)
train_loader, dev_loader = get_loader_wikisql(train_data, dev_data, args["batch_size"], shuffle_train=True)
return train_data, train_table, dev_data, dev_table, train_loader, dev_loader
def get_models(config, BERT_PATH, trained=False, path_model_bert=None, path_model=None):
# some constants
agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
cond_ops = ['=', '>', '<', 'OP'] # do not know why 'OP' required. Hence,
# Get BERT
model_bert, tokenizer, bert_config = get_bert(BERT_PATH)
input_size = bert_config.hidden_size * config["num_target_layers"] # Seq-to-SQL input vector dimenstion
return tokenizer, bert_config
def print_result(epoch, acc, dname):
ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx, acc_x = acc
print(f'{dname} results ------------')
print(
f" Epoch: {epoch}, ave loss: {ave_loss}, acc_sc: {acc_sc:.3f}, acc_sa: {acc_sa:.3f}, acc_wn: {acc_wn:.3f}, \
acc_wc: {acc_wc:.3f}, acc_wo: {acc_wo:.3f}, acc_wvi: {acc_wvi:.3f}, acc_wv: {acc_wv:.3f}, acc_lx: {acc_lx:.3f}, acc_x: {acc_x:.3f}"
)
if __name__ == '__main__':
## 2. Paths
path_h = '..'
path_wikisql = os.path.join(path_h, 'data')
BERT_PATH = path_wikisql
path_save_for_evaluation = './'
## 3. Load data
train_data, train_table, dev_data, dev_table, train_loader, dev_loader = get_data(path_wikisql, config)
tokenizer, bert_config = get_models(config, BERT_PATH)
# opt, opt_bert = get_opt(model, model_bert)
## 6. Train
acc_lx_t_best = -1
epoch_best = -1
epoch = 1
for epoch in range(epoch):
# train
acc_train, aux_out_train = train(train_loader,
train_table,
None,
None,
None,
None,
tokenizer,
config["max_seq_length"],
config["num_target_layers"],
config["accumulate_gradients"],
opt_bert=None,
st_pos=0,
path_db=path_wikisql,
dset_name='train')
print_result(epoch, acc_train, 'train')
|
[
"bert.modeling.BertConfig.from_json_file",
"torch.load",
"torch.cuda.is_available",
"bert.tokenization.FullTokenizer",
"bert.modeling.BertModel"
] |
[((1245, 1288), 'bert.modeling.BertConfig.from_json_file', 'BertConfig.from_json_file', (['bert_config_file'], {}), '(bert_config_file)\n', (1270, 1288), False, 'from bert.modeling import BertConfig, BertModel\n'), ((1305, 1374), 'bert.tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'vocab_file', 'do_lower_case': '(True)'}), '(vocab_file=vocab_file, do_lower_case=True)\n', (1331, 1374), True, 'import bert.tokenization as tokenization\n'), ((1433, 1455), 'bert.modeling.BertModel', 'BertModel', (['bert_config'], {}), '(bert_config)\n', (1442, 1455), False, 'from bert.modeling import BertConfig, BertModel\n'), ((327, 352), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (350, 352), False, 'import torch\n'), ((1487, 1534), 'torch.load', 'torch.load', (['init_checkpoint'], {'map_location': '"""cpu"""'}), "(init_checkpoint, map_location='cpu')\n", (1497, 1534), False, 'import torch\n')]
|
from skimage import color
import numpy as np
from matplotlib import pyplot as plt
def plot_cielab(l):
min_range = -110
max_range = 110
ab_range = np.linspace(min_range, max_range, 500)
b, a = np.meshgrid(ab_range, ab_range)
color_cielab = np.array([np.ones(a.shape) * l, a, b]).T
color_rgb = color.lab2rgb(color_cielab)
# Cut out saturated colors
saturated = np.any((color_rgb == 1.) | (color_rgb == 0.), axis=2)
color_rgb[saturated] = (1., 1., 1.)
fig, ax = plt.subplots()
ax.imshow(color_rgb[::-1], extent=[min_range, max_range, min_range, max_range])
def cielab_circle(l, r_a, r_b, count=8):
theta = np.arange(0, 2*np.pi, 2*np.pi / count)
# ab_range = np.linspace(np.ones(100), np.sin(theta), 100)
a = np.cos(theta) * r_a
b = np.sin(theta) * r_b
color_cielab = np.array([np.ones(a.shape) * l, a, b]).T
color_rgb = color.lab2rgb([color_cielab])[0]
fig, ax = plt.subplots()
# ax.scatter(a, b, s=200, c=color_rgb)
# ax.set_facecolor(color.lab2rgb([[[10, 0, -5]]])[0][0])
ax.imshow(color_rgb[None, :, :])
int_color_rgb = (color_rgb * 255).astype(int)
for r, g, b in int_color_rgb:
test_str = '███ abcdefghijklmnopqrstuvwxyz'
print(f"\033[38;2;{r};{g};{b}m{test_str} {r:3d} {g:3d} {b:3d}\033[0m")
def plot_cielab_line(l_start, a_start, b_start, l_end, a_end, b_end, count=1000):
l = np.linspace(l_start, l_end, count)
a = np.linspace(a_start, a_end, count)
b = np.linspace(b_start, b_end, count)
color_cielab = np.array([l, a, b]).T
color_rgb = color.lab2rgb([color_cielab])[0]
fig, ax = plt.subplots()
# ax.scatter(a, b, s=200, c=color_rgb)
# ax.set_facecolor(color.lab2rgb([[[10, 0, -5]]])[0][0])
ax.imshow(color_rgb[None, :, ::-1], aspect='auto')
|
[
"numpy.meshgrid",
"numpy.ones",
"numpy.any",
"skimage.color.lab2rgb",
"numpy.sin",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.subplots"
] |
[((159, 197), 'numpy.linspace', 'np.linspace', (['min_range', 'max_range', '(500)'], {}), '(min_range, max_range, 500)\n', (170, 197), True, 'import numpy as np\n'), ((210, 241), 'numpy.meshgrid', 'np.meshgrid', (['ab_range', 'ab_range'], {}), '(ab_range, ab_range)\n', (221, 241), True, 'import numpy as np\n'), ((318, 345), 'skimage.color.lab2rgb', 'color.lab2rgb', (['color_cielab'], {}), '(color_cielab)\n', (331, 345), False, 'from skimage import color\n'), ((394, 449), 'numpy.any', 'np.any', (['((color_rgb == 1.0) | (color_rgb == 0.0))'], {'axis': '(2)'}), '((color_rgb == 1.0) | (color_rgb == 0.0), axis=2)\n', (400, 449), True, 'import numpy as np\n'), ((503, 517), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (515, 517), True, 'from matplotlib import pyplot as plt\n'), ((656, 698), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(2 * np.pi / count)'], {}), '(0, 2 * np.pi, 2 * np.pi / count)\n', (665, 698), True, 'import numpy as np\n'), ((940, 954), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (952, 954), True, 'from matplotlib import pyplot as plt\n'), ((1404, 1438), 'numpy.linspace', 'np.linspace', (['l_start', 'l_end', 'count'], {}), '(l_start, l_end, count)\n', (1415, 1438), True, 'import numpy as np\n'), ((1447, 1481), 'numpy.linspace', 'np.linspace', (['a_start', 'a_end', 'count'], {}), '(a_start, a_end, count)\n', (1458, 1481), True, 'import numpy as np\n'), ((1490, 1524), 'numpy.linspace', 'np.linspace', (['b_start', 'b_end', 'count'], {}), '(b_start, b_end, count)\n', (1501, 1524), True, 'import numpy as np\n'), ((1631, 1645), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1643, 1645), True, 'from matplotlib import pyplot as plt\n'), ((767, 780), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (773, 780), True, 'import numpy as np\n'), ((795, 808), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (801, 808), True, 'import numpy as np\n'), ((892, 921), 'skimage.color.lab2rgb', 'color.lab2rgb', (['[color_cielab]'], {}), '([color_cielab])\n', (905, 921), False, 'from skimage import color\n'), ((1545, 1564), 'numpy.array', 'np.array', (['[l, a, b]'], {}), '([l, a, b])\n', (1553, 1564), True, 'import numpy as np\n'), ((1583, 1612), 'skimage.color.lab2rgb', 'color.lab2rgb', (['[color_cielab]'], {}), '([color_cielab])\n', (1596, 1612), False, 'from skimage import color\n'), ((271, 287), 'numpy.ones', 'np.ones', (['a.shape'], {}), '(a.shape)\n', (278, 287), True, 'import numpy as np\n'), ((845, 861), 'numpy.ones', 'np.ones', (['a.shape'], {}), '(a.shape)\n', (852, 861), True, 'import numpy as np\n')]
|
from setuptools import setup
VERSION = '6.10.0'
setup(
name='Marvin',
version=VERSION,
description='Marvin - Python client for Cosmic',
author='<NAME>',
author_email='<EMAIL>',
maintainer='Mission Critical Cloud',
maintainer_email='<EMAIL>',
long_description='Marvin is the Cosmic python client written around the unittest framework',
url='https://github.com/MissionCriticalCloud/marvin',
packages=[
'marvin',
'marvin.cloudstackAPI',
'marvin.lib',
'marvin.config',
'marvin.utils',
],
package_dir={'marvin': 'marvin'},
package_data={
'marvin': [
'utils/marvin_logging.yaml'
]
},
license='LICENSE.txt',
install_requires=[
'mysql-connector-python >= 1.1.6',
'requests >= 2.2.1',
'paramiko >= 1.13.0',
'nose >= 1.3.3',
'ddt >= 0.4.0',
'pyvmomi >= 5.5.0',
'netaddr >= 0.7.14',
'pyyaml >= 3.11'
],
py_modules=[
'marvin.marvinPlugin'
],
zip_safe=False,
entry_points={
'nose.plugins': [
'marvinPlugin = marvin.marvinPlugin:MarvinPlugin'
]
}
)
|
[
"setuptools.setup"
] |
[((50, 983), 'setuptools.setup', 'setup', ([], {'name': '"""Marvin"""', 'version': 'VERSION', 'description': '"""Marvin - Python client for Cosmic"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""Mission Critical Cloud"""', 'maintainer_email': '"""<EMAIL>"""', 'long_description': '"""Marvin is the Cosmic python client written around the unittest framework"""', 'url': '"""https://github.com/MissionCriticalCloud/marvin"""', 'packages': "['marvin', 'marvin.cloudstackAPI', 'marvin.lib', 'marvin.config',\n 'marvin.utils']", 'package_dir': "{'marvin': 'marvin'}", 'package_data': "{'marvin': ['utils/marvin_logging.yaml']}", 'license': '"""LICENSE.txt"""', 'install_requires': "['mysql-connector-python >= 1.1.6', 'requests >= 2.2.1',\n 'paramiko >= 1.13.0', 'nose >= 1.3.3', 'ddt >= 0.4.0',\n 'pyvmomi >= 5.5.0', 'netaddr >= 0.7.14', 'pyyaml >= 3.11']", 'py_modules': "['marvin.marvinPlugin']", 'zip_safe': '(False)', 'entry_points': "{'nose.plugins': ['marvinPlugin = marvin.marvinPlugin:MarvinPlugin']}"}), "(name='Marvin', version=VERSION, description=\n 'Marvin - Python client for Cosmic', author='<NAME>', author_email=\n '<EMAIL>', maintainer='Mission Critical Cloud', maintainer_email=\n '<EMAIL>', long_description=\n 'Marvin is the Cosmic python client written around the unittest framework',\n url='https://github.com/MissionCriticalCloud/marvin', packages=[\n 'marvin', 'marvin.cloudstackAPI', 'marvin.lib', 'marvin.config',\n 'marvin.utils'], package_dir={'marvin': 'marvin'}, package_data={\n 'marvin': ['utils/marvin_logging.yaml']}, license='LICENSE.txt',\n install_requires=['mysql-connector-python >= 1.1.6',\n 'requests >= 2.2.1', 'paramiko >= 1.13.0', 'nose >= 1.3.3',\n 'ddt >= 0.4.0', 'pyvmomi >= 5.5.0', 'netaddr >= 0.7.14',\n 'pyyaml >= 3.11'], py_modules=['marvin.marvinPlugin'], zip_safe=False,\n entry_points={'nose.plugins': [\n 'marvinPlugin = marvin.marvinPlugin:MarvinPlugin']})\n", (55, 983), False, 'from setuptools import setup\n')]
|
# Generated by Django 3.0.10 on 2021-06-22 12:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bypass', '0010_auto_20210622_1509'),
]
operations = [
migrations.AlterModelOptions(
name='vdgoobject',
options={'ordering': ['address'], 'verbose_name': 'Объект ВДГО', 'verbose_name_plural': 'Объекты ВДГО'},
),
]
|
[
"django.db.migrations.AlterModelOptions"
] |
[((235, 396), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""vdgoobject"""', 'options': "{'ordering': ['address'], 'verbose_name': 'Объект ВДГО',\n 'verbose_name_plural': 'Объекты ВДГО'}"}), "(name='vdgoobject', options={'ordering': [\n 'address'], 'verbose_name': 'Объект ВДГО', 'verbose_name_plural':\n 'Объекты ВДГО'})\n", (263, 396), False, 'from django.db import migrations, models\n')]
|
import re
from typing import List
DCOS_MIGRATE_NAMESPACE = "migration.dcos.d2iq.com"
_invalid_label = re.compile('[^-a-zA-Z0-9]')
def make_label(name: str) -> str:
# An alphanumeric (a-z, and 0-9) string, with a maximum length of 63
# characters, with the '-' character allowed anywhere except the first or
# last character, suitable for use as a hostname or segment in a domain
# name.
if not name:
name = 'x'
# Recode the name as ASCII, ignoring any non-ascii characters
name = name.encode().decode('ascii', errors='ignore')
# Replace any non-alphanumeric values with `-`
name = _invalid_label.sub('-', name)
# Name cannot be longer than 63 characters
name = name[:63]
# First and last character cannot be `-`
if name[0] == '-':
name = 'x' + name[1:]
if name[-1] == '-':
name = name[:-1] + '0'
return name
# Labels can have upper and lower case, and if the name is > 63 characters we have no choice but to truncate.
# Subdomains are labels but must be lowercase and we can fit longer names into multiple segments.
# Hence `make_segment` is similar to `make_label` without lowercasing and using dots to keep more data.
def make_segment(name: str) -> str:
parts = []
names = name.split('.')
for name in names:
if not name:
name = 'x'
# Recode the name as ASCII, ignoring any non-ascii characters
name = name.encode().decode('ascii', errors='ignore')
# Replace any non-alphanumeric values with `-`
name = _invalid_label.sub('-', name)
while name:
# First character must be alphabetic
if not name[0].isalpha():
name = 'x' + name
part = name[:63]
name = name[63:]
if part[-1] == '-':
part = part[:62] + '0'
parts.append(part)
return '.'.join(parts)
def make_subdomain(names: List[str]) -> str:
# One or more lowercase rfc1035/rfc1123 labels separated by '.' with a
# maximum length of 253 characters.
# `filter` removes any empty names
# `make_segment` breaks names into valid name segments
name = ".".join([make_segment(n) for n in filter(None, names)])
# Truncate to maximum size. If this puts a dot or dash at the end, then
# keep truncating.
name = name[:253]
while name and name[-1] in '.-':
name = name[:-1]
return name.lower()
def dnsify(name: str) -> str:
"""
Replace DC/OS folders with dots and
replace other invalid characters with `_`
>>> dnsify("folder/sec!ret")
"folder.sec_ret"
"""
_invalid_secret_key = re.compile('[^-._a-zA-Z0-9]')
# Replace DC/OS folders with dots
name = ".".join(list(filter(None, name.split("/"))))
# Replace other invalid characters with `_`
# `folder/sec!ret` becomes `folder.sec_ret`
return _invalid_secret_key.sub('_', name)
def namespace_path(name: str) -> str:
"""
Uses namespace constant. Adds every argument as path part
>>> namespace_path("cluster-id")
"migration.dcos.d2iq.com/cluster-id"
"""
return "/".join([DCOS_MIGRATE_NAMESPACE, name])
|
[
"re.compile"
] |
[((104, 131), 're.compile', 're.compile', (['"""[^-a-zA-Z0-9]"""'], {}), "('[^-a-zA-Z0-9]')\n", (114, 131), False, 'import re\n'), ((2670, 2699), 're.compile', 're.compile', (['"""[^-._a-zA-Z0-9]"""'], {}), "('[^-._a-zA-Z0-9]')\n", (2680, 2699), False, 'import re\n')]
|
#
# Copyright 2019 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Oauth2 callback handler
"""
from odahuflow.jupyterlab.handlers.base import build_redirect_url, build_oauth_url
from odahuflow.jupyterlab.handlers.cloud import BaseCloudOdahuflowHandler
from odahuflow.jupyterlab.handlers.helper import decorate_handler_for_exception, ODAHUFLOW_OAUTH_STATE_COOKIE_NAME, \
ODAHUFLOW_OAUTH_TOKEN_COOKIE_NAME, url_join
from odahuflow.sdk import config
from odahuflow.sdk.clients.oauth_handler import get_oauth_token_issuer_url, get_id_token
OAUTH_STATE_ARGUMENT = 'state'
JUPYTERLAB_MAIN_PAGE = '/lab?'
# pylint: disable=W0223
class OAuth2Callback(BaseCloudOdahuflowHandler):
"""
Oauth2 callback handler
"""
@decorate_handler_for_exception
def get(self):
"""
Retrieve and save the id token
:return: None
"""
# pylint: disable=E1120
if self.get_argument(OAUTH_STATE_ARGUMENT) != self.get_cookie(ODAHUFLOW_OAUTH_STATE_COOKIE_NAME):
raise ValueError('State parameters from application and authorization server are different.')
code = self.get_argument('code', strip=True)
target_url, _ = build_oauth_url()
issue_token_url = get_oauth_token_issuer_url(target_url)
if not issue_token_url:
raise Exception(f'Can not get URL for issuing long-life token from {target_url}')
login_result = get_id_token(code, issue_token_url, build_redirect_url())
if not login_result:
raise Exception(f'Failed to get long-life token from {issue_token_url}')
self.set_cookie(ODAHUFLOW_OAUTH_TOKEN_COOKIE_NAME, login_result.id_token)
self.redirect(url_join(config.JUPYTER_REDIRECT_URL, JUPYTERLAB_MAIN_PAGE))
# pylint: disable=W0223
class OAuth2Info(BaseCloudOdahuflowHandler):
"""
Oauth2 info handler
"""
@decorate_handler_for_exception
def get(self):
"""
Builds and returns oauth url of authorization server
:return: None
"""
oauth_url, state = build_oauth_url()
self.set_cookie(ODAHUFLOW_OAUTH_STATE_COOKIE_NAME, state)
self.finish(oauth_url)
|
[
"odahuflow.jupyterlab.handlers.base.build_redirect_url",
"odahuflow.jupyterlab.handlers.helper.url_join",
"odahuflow.sdk.clients.oauth_handler.get_oauth_token_issuer_url",
"odahuflow.jupyterlab.handlers.base.build_oauth_url"
] |
[((1733, 1750), 'odahuflow.jupyterlab.handlers.base.build_oauth_url', 'build_oauth_url', ([], {}), '()\n', (1748, 1750), False, 'from odahuflow.jupyterlab.handlers.base import build_redirect_url, build_oauth_url\n'), ((1778, 1816), 'odahuflow.sdk.clients.oauth_handler.get_oauth_token_issuer_url', 'get_oauth_token_issuer_url', (['target_url'], {}), '(target_url)\n', (1804, 1816), False, 'from odahuflow.sdk.clients.oauth_handler import get_oauth_token_issuer_url, get_id_token\n'), ((2608, 2625), 'odahuflow.jupyterlab.handlers.base.build_oauth_url', 'build_oauth_url', ([], {}), '()\n', (2623, 2625), False, 'from odahuflow.jupyterlab.handlers.base import build_redirect_url, build_oauth_url\n'), ((2003, 2023), 'odahuflow.jupyterlab.handlers.base.build_redirect_url', 'build_redirect_url', ([], {}), '()\n', (2021, 2023), False, 'from odahuflow.jupyterlab.handlers.base import build_redirect_url, build_oauth_url\n'), ((2245, 2304), 'odahuflow.jupyterlab.handlers.helper.url_join', 'url_join', (['config.JUPYTER_REDIRECT_URL', 'JUPYTERLAB_MAIN_PAGE'], {}), '(config.JUPYTER_REDIRECT_URL, JUPYTERLAB_MAIN_PAGE)\n', (2253, 2304), False, 'from odahuflow.jupyterlab.handlers.helper import decorate_handler_for_exception, ODAHUFLOW_OAUTH_STATE_COOKIE_NAME, ODAHUFLOW_OAUTH_TOKEN_COOKIE_NAME, url_join\n')]
|
from Statistics.Mean import mean
from numpy import absolute, asarray
def var(data):
x = (absolute(asarray(data) - mean(data)))
y = x **2
z = mean(y)
return round(z, 13)
# variance is the square of mean deviation
|
[
"Statistics.Mean.mean",
"numpy.asarray"
] |
[((155, 162), 'Statistics.Mean.mean', 'mean', (['y'], {}), '(y)\n', (159, 162), False, 'from Statistics.Mean import mean\n'), ((104, 117), 'numpy.asarray', 'asarray', (['data'], {}), '(data)\n', (111, 117), False, 'from numpy import absolute, asarray\n'), ((120, 130), 'Statistics.Mean.mean', 'mean', (['data'], {}), '(data)\n', (124, 130), False, 'from Statistics.Mean import mean\n')]
|
"""
Matrix related utility functions
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import functools as _functools
import itertools as _itertools
import warnings as _warnings
import numpy as _np
import scipy.linalg as _spl
import scipy.optimize as _spo
import scipy.sparse as _sps
import scipy.sparse.linalg as _spsl
from pygsti.tools.basistools import change_basis
try:
from . import fastcalc as _fastcalc
except ImportError:
_fastcalc = None
#EXPM_DEFAULT_TOL = 1e-7
EXPM_DEFAULT_TOL = 2**-53 # Scipy default
def trace(m): # memory leak in numpy causes repeated trace calls to eat up all memory --TODO: Cython this
"""
The trace of a matrix, sum_i m[i,i].
A memory leak in some version of numpy can cause repeated calls to numpy's
trace function to eat up all available system memory, and this function
does not have this problem.
Parameters
----------
m : numpy array
the matrix (any object that can be double-indexed)
Returns
-------
element type of m
The trace of m.
"""
return sum([m[i, i] for i in range(m.shape[0])])
# with warnings.catch_warnings():
# warnings.filterwarnings('error')
# try:
# ret =
# except Warning:
# print "BAD trace from:\n"
# for i in range(M.shape[0]):
# print M[i,i]
# raise ValueError("STOP")
# return ret
def is_hermitian(mx, tol=1e-9):
"""
Test whether mx is a hermitian matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is hermitian, otherwise False.
"""
(m, n) = mx.shape
for i in range(m):
if abs(mx[i, i].imag) > tol: return False
for j in range(i + 1, n):
if abs(mx[i, j] - mx[j, i].conjugate()) > tol: return False
return True
def is_pos_def(mx, tol=1e-9):
"""
Test whether mx is a positive-definite matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is positive-semidefinite, otherwise False.
"""
evals = _np.linalg.eigvals(mx)
return all([ev > -tol for ev in evals])
def is_valid_density_mx(mx, tol=1e-9):
"""
Test whether mx is a valid density matrix (hermitian, positive-definite, and unit trace).
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is a valid density matrix, otherwise False.
"""
return is_hermitian(mx, tol) and is_pos_def(mx, tol) and abs(trace(mx) - 1.0) < tol
def frobeniusnorm(ar):
"""
Compute the frobenius norm of an array (or matrix),
sqrt( sum( each_element_of_a^2 ) )
Parameters
----------
ar : numpy array
What to compute the frobenius norm of. Note that ar can be any shape
or number of dimenions.
Returns
-------
float or complex
depending on the element type of ar.
"""
return _np.sqrt(_np.sum(ar**2))
def frobeniusnorm_squared(ar):
"""
Compute the squared frobenius norm of an array (or matrix),
sum( each_element_of_a^2 ) )
Parameters
----------
ar : numpy array
What to compute the squared frobenius norm of. Note that ar can be any
shape or number of dimenions.
Returns
-------
float or complex
depending on the element type of ar.
"""
return _np.sum(ar**2)
def nullspace(m, tol=1e-7):
"""
Compute the nullspace of a matrix.
Parameters
----------
m : numpy array
An matrix of shape (M,N) whose nullspace to compute.
tol : float , optional
Nullspace tolerance, used when comparing singular values with zero.
Returns
-------
An matrix of shape (M,K) whose columns contain nullspace basis vectors.
"""
_, s, vh = _np.linalg.svd(m)
rank = (s > tol).sum()
return vh[rank:].T.copy()
def nullspace_qr(m, tol=1e-7):
"""
Compute the nullspace of a matrix using the QR decomposition.
The QR decomposition is faster but less accurate than the SVD
used by :func:`nullspace`.
Parameters
----------
m : numpy array
An matrix of shape (M,N) whose nullspace to compute.
tol : float , optional
Nullspace tolerance, used when comparing diagonal values of R with zero.
Returns
-------
An matrix of shape (M,K) whose columns contain nullspace basis vectors.
"""
#if M,N = m.shape, and q,r,p = _spl.qr(...)
# q.shape == (N,N), r.shape = (N,M), p.shape = (M,)
q, r, _ = _spl.qr(m.T, mode='full', pivoting=True)
rank = (_np.abs(_np.diagonal(r)) > tol).sum()
#DEBUG: requires q,r,p = _sql.qr(...) above
#assert( _np.linalg.norm(_np.dot(q,r) - m.T[:,p]) < 1e-8) #check QR decomp
#print("Rank QR = ",rank)
#print('\n'.join(map(str,_np.abs(_np.diagonal(r)))))
#print("Ret = ", q[:,rank:].shape, " Q = ",q.shape, " R = ",r.shape)
return q[:, rank:]
def nice_nullspace(m, tol=1e-7):
"""
Computes the nullspace of a matrix, and tries to return a "nice" basis for it.
Columns of the returned value (a basis for the nullspace) each have a maximum
absolute value of 1.0 and are chosen so as to align with the the original
matrix's basis as much as possible (the basis is found by projecting each
original basis vector onto an arbitrariliy-found nullspace and keeping only
a set of linearly independent projections).
Parameters
----------
m : numpy array
An matrix of shape (M,N) whose nullspace to compute.
tol : float , optional
Nullspace tolerance, used when comparing diagonal values of R with zero.
Returns
-------
An matrix of shape (M,K) whose columns contain nullspace basis vectors.
"""
nullsp = nullspace(m, tol)
nullsp_projector = _np.dot(nullsp, nullsp.conj().T)
keepers = []; current_rank = 0
for i in range(nullsp_projector.shape[1]): # same as mx.shape[1]
rank = _np.linalg.matrix_rank(nullsp_projector[:, 0:i + 1], tol=tol)
if rank > current_rank:
keepers.append(i)
current_rank = rank
ret = _np.take(nullsp_projector, keepers, axis=1)
for j in range(ret.shape[1]): # normalize columns so largest element is +1.0
mx = abs(max(ret[:, j]))
if mx > 1e-6: ret[:, j] /= mx
return ret
def normalize_columns(m, return_norms=False, ord=None):
"""
Normalizes the columns of a matrix.
Parameters
----------
m : numpy.ndarray or scipy sparse matrix
The matrix.
return_norms : bool, optional
If `True`, also return a 1D array containing the norms
of the columns (before they were normalized).
ord : int, optional
The order of the norm. See :function:`numpy.linalg.norm`.
Returns
-------
normalized_m : numpy.ndarray
The matrix after columns are normalized
column_norms : numpy.ndarray
Only returned when `return_norms=True`, a 1-dimensional array
of the pre-normalization norm of each column.
"""
norms = column_norms(m, ord)
norms[norms == 0.0] = 1.0 # avoid division of zero-column by zero
normalized_m = scale_columns(m, 1 / norms)
return (normalized_m, norms) if return_norms else normalized_m
def column_norms(m, ord=None):
"""
Compute the norms of the columns of a matrix.
Parameters
----------
m : numpy.ndarray or scipy sparse matrix
The matrix.
ord : int, optional
The order of the norm. See :function:`numpy.linalg.norm`.
Returns
-------
numpy.ndarray
A 1-dimensional array of the column norms (length is number of columns of `m`).
"""
if _sps.issparse(m):
#this could be done more efficiently, e.g. by converting to csc and taking column norms directly
norms = _np.array([_np.linalg.norm(m[:, j].todense(), ord=ord) for j in range(m.shape[1])])
else:
norms = _np.array([_np.linalg.norm(m[:, j], ord=ord) for j in range(m.shape[1])])
return norms
def scale_columns(m, scale_values):
"""
Scale each column of a matrix by a given value.
Usually used for normalization purposes, when the
matrix columns represent vectors.
Parameters
----------
m : numpy.ndarray or scipy sparse matrix
The matrix.
scale_values : numpy.ndarray
A 1-dimensional array of scale values, one per
column of `m`.
Returns
-------
numpy.ndarray or scipy sparse matrix
A copy of `m` with scaled columns, possibly with different sparsity structure.
"""
if _sps.issparse(m):
assert(len(scale_values) == m.shape[1])
m_csc = _sps.csc_matrix(m)
for j, scale in enumerate(scale_values):
m_csc.data[m_csc.indptr[j]:m_csc.indptr[j + 1]] *= scale
return m_csc
else:
return m * scale_values[None, :]
def columns_are_orthogonal(m, tol=1e-7):
"""
Checks whether a matrix contains orthogonal columns.
The columns do not need to be normalized. In the
complex case, two vectors v and w are considered orthogonal
if `dot(v.conj(), w) == 0`.
Parameters
----------
m : numpy.ndarray
The matrix to check.
tol : float, optional
Tolerance for checking whether dot products are zero.
Returns
-------
bool
"""
if m.size == 0: return True # boundary case
check = _np.dot(m.conj().T, m)
check[_np.diag_indices_from(check)] = 0.0
return bool(_np.linalg.norm(check) / check.size < tol)
def columns_are_orthonormal(m, tol=1e-7):
"""
Checks whether a matrix contains orthogonal columns.
The columns do not need to be normalized. In the
complex case, two vectors v and w are considered orthogonal
if `dot(v.conj(), w) == 0`.
Parameters
----------
m : numpy.ndarray
The matrix to check.
tol : float, optional
Tolerance for checking whether dot products are zero.
Returns
-------
bool
"""
if m.size == 0: return True # boundary case
check = _np.dot(m.conj().T, m)
return bool(_np.allclose(check, _np.identity(check.shape[0], 'd'), atol=tol))
def independent_columns(m, initial_independent_cols=None, tol=1e-7):
"""
Computes the indices of the linearly-independent columns in a matrix.
Optionally starts with a "base" matrix of independent columns, so that
the returned indices indicate the columns of `m` that are independent
of all the base columns and the other independent columns of `m`.
Parameters
----------
m : numpy.ndarray or scipy sparse matrix
The matrix.
initial_independent_cols : numpy.ndarray or scipy sparse matrix, optional
If not `None`, a matrix of known-to-be independent columns so to test the
columns of `m` with respect to (in addition to the already chosen independent
columns of `m`.
tol : float, optional
Tolerance threshold used to decide whether a singular value is nonzero
(it is if it's is greater than `tol`).
Returns
-------
list
A list of the independent-column indices of `m`.
"""
indep_cols = []
if not _sps.issparse(m):
running_indep_cols = initial_independent_cols.copy() \
if (initial_independent_cols is not None) else _np.empty((m.shape[0], 0), m.dtype)
num_indep_cols = running_indep_cols.shape[0]
for j in range(m.shape[1]):
trial = _np.concatenate((running_indep_cols, m[:, j]), axis=1)
if _np.linalg.matrix_rank(trial, tol=tol) == num_indep_cols + 1:
running_indep_cols = trial
indep_cols.append(j)
num_indep_cols += 1
else: # sparse case
from scipy.sparse.linalg.eigen.arpack.arpack import ArpackNoConvergence as _ArpackNoConvergence
running_indep_cols = initial_independent_cols.copy() \
if (initial_independent_cols is not None) else _sps.csc_matrix((m.shape[0], 0), dtype=m.dtype)
num_indep_cols = running_indep_cols.shape[0]
for j in range(m.shape[1]):
trial = _sps.hstack((running_indep_cols, m[:, j]))
try:
lowest_sval = _spsl.svds(trial, k=1, which="SM", return_singular_vectors=False)
except _ArpackNoConvergence:
lowest_sval = 0 # assume lack of convergence means smallest singular value was too small (?)
if lowest_sval > tol: # trial fogi dirs still linearly independent (full rank)
running_indep_cols = trial
indep_cols.append(j)
# else trial column made fogi dirs linearly dependent and so don't tally indep column
return indep_cols
def pinv_of_matrix_with_orthogonal_columns(m):
""" TODO: docstring """
col_scaling = _np.sum(_np.abs(m)**2, axis=0)
m_with_scaled_cols = m.conj() * col_scaling[None, :]
return m_with_scaled_cols.T
def matrix_sign(m):
"""
The "sign" matrix of `m`
Parameters
----------
m : numpy.ndarray
the matrix.
Returns
-------
numpy.ndarray
"""
#Notes: sign(m) defined s.t. eigvecs of sign(m) are evecs of m
# and evals of sign(m) are +/-1 or 0 based on sign of eigenvalues of m
#Using the extremely numerically stable (but expensive) Schur method
# see http://www.maths.manchester.ac.uk/~higham/fm/OT104HighamChapter5.pdf
N = m.shape[0]; assert(m.shape == (N, N)), "m must be square!"
T, Z = _spl.schur(m, 'complex') # m = Z T Z^H where Z is unitary and T is upper-triangular
U = _np.zeros(T.shape, 'complex') # will be sign(T), which is easy to compute
# (U is also upper triangular), and then sign(m) = Z U Z^H
# diagonals are easy
U[_np.diag_indices_from(U)] = _np.sign(_np.diagonal(T))
#Off diagonals: use U^2 = I or TU = UT
# Note: Tij = Uij = 0 when i > j and i==j easy so just consider i<j case
# 0 = sum_k Uik Ukj = (i!=j b/c off-diag)
# FUTURE: speed this up by using np.dot instead of sums below
for j in range(1, N):
for i in range(j - 1, -1, -1):
S = U[i, i] + U[j, j]
if _np.isclose(S, 0): # then use TU = UT
if _np.isclose(T[i, i] - T[j, j], 0): # then just set to zero
U[i, j] = 0.0 # TODO: check correctness of this case
else:
U[i, j] = T[i, j] * (U[i, i] - U[j, j]) / (T[i, i] - T[j, j]) + \
sum([U[i, k] * T[k, j] - T[i, k] * U[k, j] for k in range(i + 1, j)]) \
/ (T[i, i] - T[j, j])
else: # use U^2 = I
U[i, j] = - sum([U[i, k] * U[k, j] for k in range(i + 1, j)]) / S
return _np.dot(Z, _np.dot(U, _np.conjugate(Z.T)))
#Quick & dirty - not always stable:
#U,_,Vt = _np.linalg.svd(M)
#return _np.dot(U,Vt)
def print_mx(mx, width=9, prec=4, withbrackets=False):
"""
Print matrix in pretty format.
Will print real or complex matrices with a desired precision and
"cell" width.
Parameters
----------
mx : numpy array
the matrix (2-D array) to print.
width : int, opitonal
the width (in characters) of each printed element
prec : int optional
the precision (in characters) of each printed element
withbrackets : bool, optional
whether to print brackets and commas to make the result
something that Python can read back in.
Returns
-------
None
"""
print(mx_to_string(mx, width, prec, withbrackets))
def mx_to_string(m, width=9, prec=4, withbrackets=False):
"""
Generate a "pretty-format" string for a matrix.
Will generate strings for real or complex matrices with a desired
precision and "cell" width.
Parameters
----------
m : numpy.ndarray
array to print.
width : int, opitonal
the width (in characters) of each converted element
prec : int optional
the precision (in characters) of each converted element
withbrackets : bool, optional
whether to print brackets and commas to make the result
something that Python can read back in.
Returns
-------
string
matrix m as a pretty formated string.
"""
if m.size == 0: return ""
s = ""; tol = 10**(-prec)
if _np.max(abs(_np.imag(m))) > tol:
return mx_to_string_complex(m, width, width, prec)
if len(m.shape) == 1: m = _np.array(m)[None, :] # so it works w/vectors too
if withbrackets: s += "["
for i in range(m.shape[0]):
if withbrackets: s += " [" if i > 0 else "["
for j in range(m.shape[1]):
if abs(m[i, j]) < tol: s += '{0: {w}.0f}'.format(0, w=width)
else: s += '{0: {w}.{p}f}'.format(m[i, j].real, w=width, p=prec)
if withbrackets and j + 1 < m.shape[1]: s += ","
if withbrackets: s += "]," if i + 1 < m.shape[0] else "]]"
s += "\n"
return s
def mx_to_string_complex(m, real_width=9, im_width=9, prec=4):
"""
Generate a "pretty-format" string for a complex-valued matrix.
Parameters
----------
m : numpy array
array to format.
real_width : int, opitonal
the width (in characters) of the real part of each element.
im_width : int, opitonal
the width (in characters) of the imaginary part of each element.
prec : int optional
the precision (in characters) of each element's real and imaginary parts.
Returns
-------
string
matrix m as a pretty formated string.
"""
if len(m.shape) == 1: m = m[None, :] # so it works w/vectors too
s = ""; tol = 10**(-prec)
for i in range(m.shape[0]):
for j in range(m.shape[1]):
if abs(m[i, j].real) < tol: s += "{0: {w}.0f}".format(0, w=real_width)
else: s += "{0: {w}.{p}f}".format(m[i, j].real, w=real_width, p=prec)
if abs(m[i, j].imag) < tol: s += "{0: >+{w}.0f}j".format(0, w=im_width)
else: s += "{0: >+{w}.{p}f}j".format(m[i, j].imag, w=im_width, p=prec)
s += "\n"
return s
def unitary_superoperator_matrix_log(m, mx_basis):
"""
Construct the logarithm of superoperator matrix `m`.
This function assumes that `m` acts as a unitary on density-matrix space,
(`m: rho -> U rho Udagger`) so that log(m) can be written as the action
by Hamiltonian `H`:
`log(m): rho -> -i[H,rho]`.
Parameters
----------
m : numpy array
The superoperator matrix whose logarithm is taken
mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
numpy array
A matrix `logM`, of the same shape as `m`, such that `m = exp(logM)`
and `logM` can be written as the action `rho -> -i[H,rho]`.
"""
from . import lindbladtools as _lt # (would create circular imports if at top)
from . import optools as _ot # (would create circular imports if at top)
M_std = change_basis(m, mx_basis, "std")
evals = _np.linalg.eigvals(M_std)
assert(_np.allclose(_np.abs(evals), 1.0)) # simple but technically incomplete check for a unitary superop
# (e.g. could be anti-unitary: diag(1, -1, -1, -1))
U = _ot.process_mx_to_unitary(M_std)
H = _spl.logm(U) / -1j # U = exp(-iH)
logM_std = _lt.hamiltonian_to_lindbladian(H) # rho --> -i[H, rho] * sqrt(d)/2
logM = change_basis(logM_std * (2.0 / _np.sqrt(H.shape[0])), "std", mx_basis)
assert(_np.linalg.norm(_spl.expm(logM) - m) < 1e-8) # expensive b/c of expm - could comment for performance
return logM
def near_identity_matrix_log(m, tol=1e-8):
"""
Construct the logarithm of superoperator matrix `m` that is near the identity.
If `m` is real, the resulting logarithm will be real.
Parameters
----------
m : numpy array
The superoperator matrix whose logarithm is taken
tol : float, optional
The tolerance used when testing for zero imaginary parts.
Returns
-------
numpy array
An matrix `logM`, of the same shape as `m`, such that `m = exp(logM)`
and `logM` is real when `m` is real.
"""
# A near-identity matrix should have a unique logarithm, and it should be
# real if the original matrix is real
M_is_real = bool(_np.linalg.norm(m.imag) < tol)
logM = _spl.logm(m)
if M_is_real:
assert(_np.linalg.norm(logM.imag) < tol), \
"Failed to construct a real logarithm! " \
+ "This is probably because m is not near the identity.\n" \
+ "Its eigenvalues are: " + str(_np.linalg.eigvals(m))
logM = logM.real
return logM
def approximate_matrix_log(m, target_logm, target_weight=10.0, tol=1e-6):
"""
Construct an approximate logarithm of superoperator matrix `m` that is real and near the `target_logm`.
The equation `m = exp( logM )` is allowed to become inexact in order to make
`logM` close to `target_logm`. In particular, the objective function that is
minimized is (where `||` indicates the 2-norm):
`|exp(logM) - m|_1 + target_weight * ||logM - target_logm||^2`
Parameters
----------
m : numpy array
The superoperator matrix whose logarithm is taken
target_logm : numpy array
The target logarithm
target_weight : float
A weighting factor used to blance the exactness-of-log term
with the closeness-to-target term in the optimized objective
function. This value multiplies the latter term.
tol : float, optional
Optimzer tolerance.
Returns
-------
logM : numpy array
An matrix of the same shape as `m`.
"""
assert(_np.linalg.norm(m.imag) < 1e-8), "Argument `m` must be a *real* matrix!"
mx_shape = m.shape
def _objective(flat_logm):
logM = flat_logm.reshape(mx_shape)
testM = _spl.expm(logM)
ret = target_weight * _np.linalg.norm(logM - target_logm)**2 + \
_np.linalg.norm(testM.flatten() - m.flatten(), 1)
#print("DEBUG: ",ret)
return ret
#Alt objective1: puts L1 on target term
#return _np.linalg.norm(testM-m)**2 + target_weight*_np.linalg.norm(
# logM.flatten() - target_logm.flatten(), 1)
#Alt objective2: all L2 terms (ridge regression)
#return target_weight*_np.linalg.norm(logM-target_logm)**2 + \
# _np.linalg.norm(testM - m)**2
#from .. import optimize as _opt
#print_obj_func = _opt.create_obj_func_printer(_objective) #only ever prints to stdout!
print_obj_func = None
logM = _np.real(real_matrix_log(m, action_if_imaginary="ignore")) # just drop any imaginary part
initial_flat_logM = logM.flatten() # + 0.1*target_logm.flatten()
# Note: adding some of target_logm doesn't seem to help; and hurts in easy cases
if _objective(initial_flat_logM) > 1e-16: # otherwise initial logM is fine!
#print("Initial objective fn val = ",_objective(initial_flat_logM))
#print("Initial inexactness = ",_np.linalg.norm(_spl.expm(logM)-m),
# _np.linalg.norm(_spl.expm(logM).flatten()-m.flatten(), 1),
# _np.linalg.norm(logM-target_logm)**2)
solution = _spo.minimize(_objective, initial_flat_logM, options={'maxiter': 1000},
method='L-BFGS-B', callback=print_obj_func, tol=tol)
logM = solution.x.reshape(mx_shape)
#print("Final objective fn val = ",_objective(solution.x))
#print("Final inexactness = ",_np.linalg.norm(_spl.expm(logM)-m),
# _np.linalg.norm(_spl.expm(logM).flatten()-m.flatten(), 1),
# _np.linalg.norm(logM-target_logm)**2)
return logM
def real_matrix_log(m, action_if_imaginary="raise", tol=1e-8):
"""
Construct a *real* logarithm of real matrix `m`.
This is possible when negative eigenvalues of `m` come in pairs, so
that they can be viewed as complex conjugate pairs.
Parameters
----------
m : numpy array
The matrix to take the logarithm of
action_if_imaginary : {"raise","warn","ignore"}, optional
What action should be taken if a real-valued logarithm cannot be found.
"raise" raises a ValueError, "warn" issues a warning, and "ignore"
ignores the condition and simply returns the complex-valued result.
tol : float, optional
An internal tolerance used when testing for equivalence and zero
imaginary parts (real-ness).
Returns
-------
logM : numpy array
An matrix `logM`, of the same shape as `m`, such that `m = exp(logM)`
"""
assert(_np.linalg.norm(_np.imag(m)) < tol), "real_matrix_log must be passed a *real* matrix!"
evals, U = _np.linalg.eig(m)
U = U.astype("complex")
used_indices = set()
neg_real_pairs_real_evecs = []
neg_real_pairs_conj_evecs = []
unpaired_indices = []
for i, ev in enumerate(evals):
if i in used_indices: continue
used_indices.add(i)
if abs(_np.imag(ev)) < tol and _np.real(ev) < 0:
evec1 = U[:, i]
if _np.linalg.norm(_np.imag(evec1)) < tol:
# evec1 is real, so look for ev2 corresponding to another real evec
for j, ev2 in enumerate(evals[i + 1:], start=i + 1):
if abs(ev - ev2) < tol and _np.linalg.norm(_np.imag(U[:, j])) < tol:
used_indices.add(j)
neg_real_pairs_real_evecs.append((i, j)); break
else: unpaired_indices.append(i)
else:
# evec1 is complex, so look for ev2 corresponding to the conjugate of evec1
evec1C = evec1.conjugate()
for j, ev2 in enumerate(evals[i + 1:], start=i + 1):
if abs(ev - ev2) < tol and _np.linalg.norm(evec1C - U[:, j]) < tol:
used_indices.add(j)
neg_real_pairs_conj_evecs.append((i, j)); break
else: unpaired_indices.append(i)
log_evals = _np.log(evals.astype("complex"))
# astype guards against case all evals are real but some are negative
#DEBUG
#print("DB: evals = ",evals)
#print("DB: log_evals:",log_evals)
#for i,ev in enumerate(log_evals):
# print(i,": ",ev, ",".join([str(j) for j in range(U.shape[0]) if abs(U[j,i]) > 0.05]))
#print("DB: neg_real_pairs_real_evecs = ",neg_real_pairs_real_evecs)
#print("DB: neg_real_pairs_conj_evecs = ",neg_real_pairs_conj_evecs)
#print("DB: evec[5] = ",mx_to_string(U[:,5]))
#print("DB: evec[6] = ",mx_to_string(U[:,6]))
for (i, j) in neg_real_pairs_real_evecs: # need to adjust evecs as well
log_evals[i] = _np.log(-evals[i]) + 1j * _np.pi
log_evals[j] = log_evals[i].conjugate()
U[:, i] = (U[:, i] + 1j * U[:, j]) / _np.sqrt(2)
U[:, j] = U[:, i].conjugate()
for (i, j) in neg_real_pairs_conj_evecs: # evecs already conjugates of each other
log_evals[i] = _np.log(-evals[i].real) + 1j * _np.pi
log_evals[j] = log_evals[i].conjugate()
#Note: if *don't* conjugate j-th, then this picks *consistent* branch cut (what scipy would do), which
# results, in general, in a complex logarithm BUT one which seems more intuitive (?) - at least permits
# expected angle extraction, etc.
logM = _np.dot(U, _np.dot(_np.diag(log_evals), _np.linalg.inv(U)))
#if there are unpaired negative real eigenvalues, the logarithm might be imaginary
mayBeImaginary = bool(len(unpaired_indices) > 0)
imMag = _np.linalg.norm(_np.imag(logM))
if mayBeImaginary and imMag > tol:
if action_if_imaginary == "raise":
raise ValueError("Cannot construct a real log: unpaired negative"
+ " real eigenvalues: %s" % [evals[i] for i in unpaired_indices])
#+ "\nDEBUG m = \n%s" % m + "\nDEBUG evals = %s" % evals)
elif action_if_imaginary == "warn":
_warnings.warn("Cannot construct a real log: unpaired negative"
+ " real eigenvalues: %s" % [evals[i] for i in unpaired_indices])
elif action_if_imaginary == "ignore":
pass
else:
assert(False), "Invalid 'action_if_imaginary' argument: %s" % action_if_imaginary
else:
assert(imMag <= tol), "real_matrix_log failed to construct a real logarithm!"
logM = _np.real(logM)
return logM
## ------------------------ Erik : Matrix tools that Tim has moved here -----------
from scipy.linalg import sqrtm as _sqrtm
import itertools as _ittls
def column_basis_vector(i, dim):
"""
Returns the ith standard basis vector in dimension dim.
Parameters
----------
i : int
Basis vector index.
dim : int
Vector dimension.
Returns
-------
numpy.ndarray
An array of shape `(dim, 1)` that is all zeros except for
its `i`-th element, which equals 1.
"""
output = _np.zeros([dim, 1], float)
output[i] = 1.
return output
def vec(matrix_in):
"""
Stacks the columns of a matrix to return a vector
Parameters
----------
matrix_in : numpy.ndarray
Returns
-------
numpy.ndarray
"""
return [b for a in _np.transpose(matrix_in) for b in a]
def unvec(vector_in):
"""
Slices a vector into the columns of a matrix.
Parameters
----------
vector_in : numpy.ndarray
Returns
-------
numpy.ndarray
"""
dim = int(_np.sqrt(len(vector_in)))
return _np.transpose(_np.array(list(
zip(*[_ittls.chain(vector_in,
_ittls.repeat(None, dim - 1))] * dim))))
def norm1(m):
"""
Returns the 1 norm of a matrix
Parameters
----------
m : numpy.ndarray
The matrix.
Returns
-------
numpy.ndarray
"""
return float(_np.real(_np.trace(_sqrtm(_np.dot(m.conj().T, m)))))
def random_hermitian(dim):
"""
Generates a random Hermitian matrix
Parameters
----------
dim : int
the matrix dimensinon.
Returns
-------
numpy.ndarray
"""
my_norm = 0.
while my_norm < 0.5:
dim = int(dim)
a = _np.random.random(size=[dim, dim])
b = _np.random.random(size=[dim, dim])
c = a + 1.j * b + (a + 1.j * b).conj().T
my_norm = norm1(c)
return c / my_norm
def norm1to1(operator, num_samples=10000, mx_basis="gm", return_list=False):
"""
The Hermitian 1-to-1 norm of a superoperator represented in the standard basis.
This is calculated via Monte-Carlo sampling. The definition of Hermitian 1-to-1
norm can be found in arxiv:1109.6887.
Parameters
----------
operator : numpy.ndarray
The operator matrix to take the norm of.
num_samples : int, optional
Number of Monte-Carlo samples.
mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis
The basis of `operator`.
return_list : bool, optional
Whether the entire list of sampled values is returned or just the maximum.
Returns
-------
float or list
Depends on the value of `return_list`.
"""
std_operator = change_basis(operator, mx_basis, 'std')
rand_dim = int(_np.sqrt(float(len(std_operator))))
vals = [norm1(unvec(_np.dot(std_operator, vec(random_hermitian(rand_dim)))))
for n in range(num_samples)]
if return_list:
return vals
else:
return max(vals)
## ------------------------ General utility fns -----------------------------------
def complex_compare(a, b):
"""
Comparison function for complex numbers that compares real part, then imaginary part.
Parameters
----------
a : complex
b : complex
Returns
-------
-1 if a < b
0 if a == b
+1 if a > b
"""
if a.real < b.real: return -1
elif a.real > b.real: return 1
elif a.imag < b.imag: return -1
elif a.imag > b.imag: return 1
else: return 0
def prime_factors(n):
"""
GCD algorithm to produce prime factors of `n`
Parameters
----------
n : int
The number to factorize.
Returns
-------
list
The prime factors of `n`.
"""
i = 2; factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def minweight_match(a, b, metricfn=None, return_pairs=True,
pass_indices_to_metricfn=False):
"""
Matches the elements of two vectors, `a` and `b` by minimizing the weight between them.
The weight is defined as the sum of `metricfn(x,y)` over all `(x,y)` pairs
(`x` in `a` and `y` in `b`).
Parameters
----------
a : list or numpy.ndarray
First 1D array to match elements between.
b : list or numpy.ndarray
Second 1D array to match elements between.
metricfn : function, optional
A function of two float parameters, `x` and `y`,which defines the cost
associated with matching `x` with `y`. If None, `abs(x-y)` is used.
return_pairs : bool, optional
If True, the matching is also returned.
pass_indices_to_metricfn : bool, optional
If True, the metric function is passed two *indices* into the `a` and
`b` arrays, respectively, instead of the values.
Returns
-------
weight_array : numpy.ndarray
The array of weights corresponding to the min-weight matching. The sum
of this array's elements is the minimized total weight.
pairs : list
Only returned when `return_pairs == True`, a list of 2-tuple pairs of
indices `(ix,iy)` giving the indices into `a` and `b` respectively of
each matched pair. The first (ix) indices will be in continuous
ascending order starting at zero.
"""
assert(len(a) == len(b))
if metricfn is None:
def metricfn(x, y): return abs(x - y)
D = len(a)
weightMx = _np.empty((D, D), 'd')
if pass_indices_to_metricfn:
for i, x in enumerate(a):
weightMx[i, :] = [metricfn(i, j) for j, y in enumerate(b)]
else:
for i, x in enumerate(a):
weightMx[i, :] = [metricfn(x, y) for j, y in enumerate(b)]
a_inds, b_inds = _spo.linear_sum_assignment(weightMx)
assert(_np.allclose(a_inds, range(D))), "linear_sum_assignment returned unexpected row indices!"
matched_pairs = list(zip(a_inds, b_inds))
min_weights = weightMx[a_inds, b_inds]
if return_pairs:
return min_weights, matched_pairs
else:
return min_weights
def minweight_match_realmxeigs(a, b, metricfn=None,
pass_indices_to_metricfn=False, eps=1e-9):
"""
Matches the elements of `a` and `b`, whose elements are assumed to either real or one-half of a conjugate pair.
Matching is performed by minimizing the weight between elements,
defined as the sum of `metricfn(x,y)` over all `(x,y)` pairs
(`x` in `a` and `y` in `b`). If straightforward matching fails
to preserve eigenvalue conjugacy relations, then real and conjugate-
pair eigenvalues are matched *separately* to ensure relations are
preserved (but this can result in a sub-optimal matching). A
ValueError is raised when the elements of `a` and `b` have incompatible
conjugacy structures (#'s of conjugate vs. real pairs).
Parameters
----------
a : numpy.ndarray
First 1D array to match.
b : numpy.ndarray
Second 1D array to match.
metricfn : function, optional
A function of two float parameters, `x` and `y`,which defines the cost
associated with matching `x` with `y`. If None, `abs(x-y)` is used.
pass_indices_to_metricfn : bool, optional
If True, the metric function is passed two *indices* into the `a` and
`b` arrays, respectively, instead of the values.
eps : float, optional
Tolerance when checking if eigenvalues are equal to each other.
Returns
-------
pairs : list
A list of 2-tuple pairs of indices `(ix,iy)` giving the indices into
`a` and `b` respectively of each matched pair.
"""
def check(pairs):
for i, (p0, p1) in enumerate(pairs):
for q0, q1 in pairs[i + 1:]:
a_conj = _np.isclose(a[p0], _np.conjugate(a[q0]))
b_conj = _np.isclose(b[p1], _np.conjugate(b[q1]))
if (abs(a[p0].imag) > 1e-6 and a_conj and not b_conj) or \
(abs(b[p1].imag) > 1e-6 and b_conj and not a_conj):
#print("DB: FALSE at: ",(p0,p1),(q0,q1),(a[p0],b[p1]),(a[q0],b[q1]),a_conj,b_conj)
return False
return True
#First attempt:
# See if matching everything at once satisfies conjugacy relations
# (if this works, this is the best, since it considers everything)
_, pairs = minweight_match(a, b, metricfn, True,
pass_indices_to_metricfn)
if check(pairs):
return pairs # we're done! that was easy
#Otherwise we fall back to considering real values and conj pairs separately
#identify real values and conjugate pairs
def split_real_conj(ar):
real_inds = []; conj_inds = []
for i, v in enumerate(ar):
if abs(v.imag) < eps: real_inds.append(i)
else:
for pair in conj_inds:
if i in pair: break # ok, we've already found v's pair
else:
for j, v2 in enumerate(ar[i + 1:], start=i + 1):
if _np.isclose(_np.conj(v), v2) and all([(j not in cpair) for cpair in conj_inds]):
conj_inds.append((i, j)); break
else:
raise ValueError("No conjugate pair found for %s" % str(v))
# choose 'a+ib' to be representative of pair
conj_rep_inds = [p0 if (ar[p0].imag > ar[p1].imag) else p1
for (p0, p1) in conj_inds]
return real_inds, conj_inds, conj_rep_inds
def add_conjpair(ar, conj_inds, conj_rep_inds, real_inds):
for ii, i in enumerate(real_inds):
for jj, j in enumerate(real_inds[ii + 1:], start=ii + 1):
if _np.isclose(ar[i], ar[j]):
conj_inds.append((i, j))
conj_rep_inds.append(i)
del real_inds[jj]; del real_inds[ii] # note: we know jj > ii
return True
return False
a_real, a_conj, a_reps = split_real_conj(a) # hold indices to a & b arrays
b_real, b_conj, b_reps = split_real_conj(b) # hold indices to a & b arrays
while len(a_conj) > len(b_conj): # try to add real-pair(s) to b_conj
if not add_conjpair(b, b_conj, b_reps, b_real):
raise ValueError(("Vectors `a` and `b` don't have the same conjugate-pair structure, "
" and so they cannot be matched in a way the preserves this structure."))
while len(b_conj) > len(a_conj): # try to add real-pair(s) to a_conj
if not add_conjpair(a, a_conj, a_reps, a_real):
raise ValueError(("Vectors `a` and `b` don't have the same conjugate-pair structure, "
" and so they cannot be matched in a way the preserves this structure."))
#Note: problem with this approach is that we might convert a
# real-pair -> conj-pair sub-optimally (i.e. there might be muliple
# such conversions and we just choose one at random).
_, pairs1 = minweight_match(a[a_real], b[b_real], metricfn, True,
pass_indices_to_metricfn)
_, pairs2 = minweight_match(a[a_reps], b[b_reps], metricfn, True,
pass_indices_to_metricfn)
#pair1 gives matching of real values, pairs2 gives that of conj pairs.
# Now just need to assemble a master pairs list to return.
pairs = []
for p0, p1 in pairs1: # p0 & p1 are indices into a_real & b_real
pairs.append((a_real[p0], b_real[p1]))
for p0, p1 in pairs2: # p0 & p1 are indices into a_reps & b_reps
pairs.append((a_reps[p0], b_reps[p1]))
a_other = a_conj[p0][0] if (a_conj[p0][0] != a_reps[p0]) else a_conj[p0][1]
b_other = b_conj[p1][0] if (b_conj[p1][0] != b_reps[p1]) else b_conj[p1][1]
pairs.append((a_other, b_other))
return sorted(pairs, key=lambda x: x[0]) # sort by a's index
def _fas(a, inds, rhs, add=False):
"""
Fancy Assignment, equivalent to `a[*inds] = rhs` but with
the elements of inds (allowed to be integers, slices, or
integer arrays) always specifing a generalize-slice along
the given dimension. This avoids some weird numpy indexing
rules that make using square brackets a pain.
"""
inds = tuple([slice(None) if (i is None) else i for i in inds])
#Mixes of ints and tuples are fine, and a single
# index-list index is fine too. The case we need to
# deal with is indexing a multi-dimensional array with
# one or more index-lists
if all([isinstance(i, (int, slice)) for i in inds]) or len(inds) == 1:
if add:
a[inds] += rhs # all integers or slices behave nicely
else:
a[inds] = rhs # all integers or slices behave nicely
else:
#convert each dimension's index to a list, take a product of
# these lists, and flatten the right hand side to get the
# proper assignment:
b = []
single_int_inds = [] # for Cython, a and rhs must have the same
# number of dims. This keeps track of single-ints
for ii, i in enumerate(inds):
if isinstance(i, (int, _np.int64)):
b.append(_np.array([i], _np.int64))
single_int_inds.append(ii)
elif isinstance(i, slice):
b.append(_np.array(list(range(*i.indices(a.shape[ii]))), _np.int64))
else:
b.append(_np.array(i, _np.int64))
nDims = len(b)
if nDims > 0 and all([len(x) > 0 for x in b]): # b/c a[()] just returns the entire array!
if _fastcalc is not None and not add:
#Note: we rarely/never use add=True, so don't bother implementing in Cython yet...
if len(single_int_inds) > 0:
remove_single_int_dims = [b[i][0] if (i in single_int_inds) else slice(None)
for i in range(nDims)] # e.g. [:,2,:] if index 1 is a single int
for ii in reversed(single_int_inds): del b[ii] # remove single-int els of b
av = a[tuple(remove_single_int_dims)] # a view into a
nDims -= len(single_int_inds) # for cython routines below
else:
av = a
#Note: we do not require these arrays to be contiguous
if nDims == 1:
_fastcalc.fast_fas_helper_1d(av, rhs, b[0])
elif nDims == 2:
_fastcalc.fast_fas_helper_2d(av, rhs, b[0], b[1])
elif nDims == 3:
_fastcalc.fast_fas_helper_3d(av, rhs, b[0], b[1], b[2])
else:
raise NotImplementedError("No fas helper for nDims=%d" % nDims)
else:
indx_tups = list(_itertools.product(*b))
inds = tuple(zip(*indx_tups)) # un-zips to one list per dim
if add:
a[inds] += rhs.flatten()
else:
a[inds] = rhs.flatten()
#OLD DEBUG: just a reference for building the C-implementation (this is very slow in python!)
##Alt: C-able impl
#indsPerDim = b # list of indices per dimension
#nDims = len(inds)
#b = [0]*nDims
#a_strides = []; stride = 1
#for s in reversed(a.shape):
# a_strides.insert(0,stride)
# stride *= s
#rhs_dims = rhs.shape
#
#a_indx = 0
#for i in range(nDims):
# a_indx += indsPerDim[i][0] * a_strides[i]
#rhs_indx = 0
#
#while(True):
#
# #a.flat[a_indx] = rhs.flat[rhs_indx]
# assert(_np.isclose(a.flat[a_indx],rhs.flat[rhs_indx]))
# rhs_indx += 1 # always increments by 1
#
# #increment b ~ itertools.product & update vec_index_noop = _np.dot(self.multipliers, b)
# for i in range(nDims-1,-1,-1):
# if b[i]+1 < rhs_dims[i]:
# a_indx -= indsPerDim[i][b[i]] * a_strides[i]
# b[i] += 1
# a_indx += indsPerDim[i][b[i]] * a_strides[i]
# break
# else:
# a_indx -= indsPerDim[i][b[i]] * a_strides[i]
# b[i] = 0
# a_indx += indsPerDim[i][b[i]] * a_strides[i]
# else:
# break # can't increment anything - break while(True) loop
return a
def _findx_shape(a, inds):
""" Returns the shape of a fancy-indexed array (`a[*inds].shape`) """
shape = []
for ii, N in enumerate(a.shape):
indx = inds[ii] if ii < len(inds) else None
if indx is None: shape.append(N)
elif isinstance(indx, slice):
shape.append(len(range(*indx.indices(N))))
else: # assume indx is an index list or array
shape.append(len(indx))
return shape
def _findx(a, inds, always_copy=False):
"""
Fancy Indexing, equivalent to `a[*inds].copy()` but with
the elements of inds (allowed to be integers, slices, or
integer arrays) always specifing a generalize-slice along
the given dimension. This avoids some weird numpy indexing
rules that make using square brackets a pain.
"""
inds = tuple([slice(None) if (i is None) else i for i in inds])
#Mixes of ints and tuples are fine, and a single
# index-list index is fine too. The case we need to
# deal with is indexing a multi-dimensional array with
# one or more index-lists
if all([isinstance(i, (int, slice)) for i in inds]) or len(inds) == 1:
return a[inds].copy() if always_copy else a[inds] # all integers or slices behave nicely
else:
#Need to copy to a new array
b = []; squeeze = []
for ii, i in enumerate(inds):
if isinstance(i, int):
b.append([i]); squeeze.append(ii) # squeeze ii-th dimension at end
elif isinstance(i, slice):
b.append(list(range(*i.indices(a.shape[ii]))))
else:
b.append(list(i))
a_inds_shape = [len(x) for x in b]
indx_tups = list(_itertools.product(*b))
if len(indx_tups) > 0: # b/c a[()] just returns the entire array!
inds = tuple(zip(*indx_tups)) # un-zips to one list per dim
a_inds = a[inds].copy() # a 1D array of flattened "fancy" a[inds]
a_inds.shape = a_inds_shape # reshape
else:
a_inds = _np.zeros(a_inds_shape, a.dtype) # has zero elements
assert(a_inds.size == 0)
a_inds = a_inds.squeeze(axis=tuple(squeeze))
return a_inds
def safe_dot(a, b):
"""
Performs dot(a,b) correctly when neither, either, or both arguments are sparse matrices.
Parameters
----------
a : numpy.ndarray or scipy.sparse matrix.
First matrix.
b : numpy.ndarray or scipy.sparse matrix.
Second matrix.
Returns
-------
numpy.ndarray or scipy.sparse matrix
"""
if _sps.issparse(a):
return a.dot(b) # sparseMx.dot works for both sparse and dense args
elif _sps.issparse(b):
# to return a sparse mx even when a is dense (asymmetric behavior):
# --> return _sps.csr_matrix(a).dot(b) # numpyMx.dot can't handle sparse argument
return _np.dot(a, b.toarray())
else:
return _np.dot(a, b)
def safe_real(a, inplace=False, check=False):
"""
Get the real-part of `a`, where `a` can be either a dense array or a sparse matrix.
Parameters
----------
a : numpy.ndarray or scipy.sparse matrix.
Array to take real part of.
inplace : bool, optional
Whether this operation should be done in-place.
check : bool, optional
If True, raise a `ValueError` if `a` has a nonzero imaginary part.
Returns
-------
numpy.ndarray or scipy.sparse matrix
"""
if check:
assert(safe_norm(a, 'imag') < 1e-6), "Check failed: taking real-part of matrix w/nonzero imaginary part"
if _sps.issparse(a):
if _sps.isspmatrix_csr(a):
if inplace:
ret = _sps.csr_matrix((_np.real(a.data), a.indices, a.indptr), shape=a.shape, dtype='d')
else: # copy
ret = _sps.csr_matrix((_np.real(a.data).copy(), a.indices.copy(),
a.indptr.copy()), shape=a.shape, dtype='d')
ret.eliminate_zeros()
return ret
else:
raise NotImplementedError("safe_real() doesn't work with %s matrices yet" % str(type(a)))
else:
return _np.real(a)
def safe_imag(a, inplace=False, check=False):
"""
Get the imaginary-part of `a`, where `a` can be either a dense array or a sparse matrix.
Parameters
----------
a : numpy.ndarray or scipy.sparse matrix.
Array to take imaginary part of.
inplace : bool, optional
Whether this operation should be done in-place.
check : bool, optional
If True, raise a `ValueError` if `a` has a nonzero real part.
Returns
-------
numpy.ndarray or scipy.sparse matrix
"""
if check:
assert(safe_norm(a, 'real') < 1e-6), "Check failed: taking imag-part of matrix w/nonzero real part"
if _sps.issparse(a):
if _sps.isspmatrix_csr(a):
if inplace:
ret = _sps.csr_matrix((_np.imag(a.data), a.indices, a.indptr), shape=a.shape, dtype='d')
else: # copy
ret = _sps.csr_matrix((_np.imag(a.data).copy(), a.indices.copy(),
a.indptr.copy()), shape=a.shape, dtype='d')
ret.eliminate_zeros()
return ret
else:
raise NotImplementedError("safe_real() doesn't work with %s matrices yet" % str(type(a)))
else:
return _np.imag(a)
def safe_norm(a, part=None):
"""
Get the frobenius norm of a matrix or vector, `a`, when it is either a dense array or a sparse matrix.
Parameters
----------
a : ndarray or scipy.sparse matrix
The matrix or vector to take the norm of.
part : {None,'real','imag'}
If not None, return the norm of the real or imaginary
part of `a`.
Returns
-------
float
"""
if part == 'real': takepart = _np.real
elif part == 'imag': takepart = _np.imag
else: takepart = lambda x: x
if _sps.issparse(a):
assert(_sps.isspmatrix_csr(a)), "Non-CSR sparse formats not implemented"
return _np.linalg.norm(takepart(a.data))
else:
return _np.linalg.norm(takepart(a))
# could also use _spsl.norm(A)
def safe_onenorm(a):
"""
Computes the 1-norm of the dense or sparse matrix `a`.
Parameters
----------
a : ndarray or sparse matrix
The matrix or vector to take the norm of.
Returns
-------
float
"""
if _sps.isspmatrix(a):
return sparse_onenorm(a)
else:
return _np.linalg.norm(a, 1)
def csr_sum_indices(csr_matrices):
"""
Precomputes the indices needed to sum a set of CSR sparse matrices.
Computes the index-arrays needed for use in :method:`csr_sum`,
along with the index pointer and column-indices arrays for constructing
a "template" CSR matrix to be the destination of `csr_sum`.
Parameters
----------
csr_matrices : list
The SciPy CSR matrices to be summed.
Returns
-------
ind_arrays : list
A list of numpy arrays giving the destination data-array indices
of each element of `csr_matrices`.
indptr, indices : numpy.ndarray
The row-pointer and column-indices arrays specifying the sparsity
structure of a the destination CSR matrix.
N : int
The dimension of the destination matrix (and of each member of
`csr_matrices`)
"""
if len(csr_matrices) == 0: return [], _np.empty(0, int), _np.empty(0, int), 0
N = csr_matrices[0].shape[0]
for mx in csr_matrices:
assert(mx.shape == (N, N)), "Matrices must have the same square shape!"
indptr = [0]
indices = []
csr_sum_array = [list() for mx in csr_matrices]
#FUTURE sort column indices
for iRow in range(N):
dataInds = {} # keys = column indices, values = data indices (for data in current row)
for iMx, mx in enumerate(csr_matrices):
for i in range(mx.indptr[iRow], mx.indptr[iRow + 1]):
iCol = mx.indices[i]
if iCol not in dataInds: # add a new element to final mx
indices.append(iCol)
dataInds[iCol] = len(indices) - 1 # marks the final data index for this column
csr_sum_array[iMx].append(dataInds[iCol])
indptr.append(len(indices))
#convert lists -> arrays
csr_sum_array = [_np.array(lst, _np.int64) for lst in csr_sum_array]
indptr = _np.array(indptr)
indices = _np.array(indices)
return csr_sum_array, indptr, indices, N
def csr_sum(data, coeffs, csr_mxs, csr_sum_indices):
"""
Accelerated summation of several CSR-format sparse matrices.
:method:`csr_sum_indices` precomputes the necessary indices for
summing directly into the data-array of a destination CSR sparse matrix.
If `data` is the data-array of matrix `D` (for "destination"), then this
method performs:
`D += sum_i( coeff[i] * csr_mxs[i] )`
Note that `D` is not returned; the sum is done internally into D's
data-array.
Parameters
----------
data : numpy.ndarray
The data-array of the destination CSR-matrix.
coeffs : iterable
The weight coefficients which multiply each summed matrix.
csr_mxs : iterable
A list of CSR matrix objects whose data-array is given by
`obj.data` (e.g. a SciPy CSR sparse matrix).
csr_sum_indices : list
A list of precomputed index arrays as returned by
:method:`csr_sum_indices`.
Returns
-------
None
"""
for coeff, mx, inds in zip(coeffs, csr_mxs, csr_sum_indices):
data[inds] += coeff * mx.data
def csr_sum_flat_indices(csr_matrices):
"""
Precomputes quantities allowing fast computation of linear combinations of CSR sparse matrices.
The returned quantities can later be used to quickly compute a linear
combination of the CSR sparse matrices `csr_matrices`.
Computes the index and data arrays needed for use in :method:`csr_sum_flat`,
along with the index pointer and column-indices arrays for constructing
a "template" CSR matrix to be the destination of `csr_sum_flat`.
Parameters
----------
csr_matrices : list
The SciPy CSR matrices to be summed.
Returns
-------
flat_dest_index_array : numpy array
A 1D array of one element per nonzero element in any of
`csr_matrices`, giving the destination-index of that element.
flat_csr_mx_data : numpy array
A 1D array of the same length as `flat_dest_index_array`, which
simply concatenates the data arrays of `csr_matrices`.
mx_nnz_indptr : numpy array
A 1D array of length `len(csr_matrices)+1` such that the data
for the i-th element of `csr_matrices` lie in the index-range of
mx_nnz_indptr[i] to mx_nnz_indptr[i+1]-1 of the flat arrays.
indptr, indices : numpy.ndarray
The row-pointer and column-indices arrays specifying the sparsity
structure of a the destination CSR matrix.
N : int
The dimension of the destination matrix (and of each member of
`csr_matrices`)
"""
csr_sum_array, indptr, indices, N = csr_sum_indices(csr_matrices)
if len(csr_sum_array) == 0:
return (_np.empty(0, int), _np.empty(0, 'd'), _np.zeros(1, int), indptr, indices, N)
flat_dest_index_array = _np.ascontiguousarray(_np.concatenate(csr_sum_array, axis=0), dtype=int)
flat_csr_mx_data = _np.ascontiguousarray(_np.concatenate([mx.data for mx in csr_matrices], axis=0), dtype=complex)
mx_nnz_indptr = _np.cumsum([0] + [mx.nnz for mx in csr_matrices], dtype=int)
return flat_dest_index_array, flat_csr_mx_data, mx_nnz_indptr, indptr, indices, N
if _fastcalc is None:
def csr_sum_flat(data, coeffs, flat_dest_index_array, flat_csr_mx_data, mx_nnz_indptr):
"""
Computation of the summation of several CSR-format sparse matrices.
:method:`csr_sum_flat_indices` precomputes the necessary indices for
summing directly into the data-array of a destination CSR sparse matrix.
If `data` is the data-array of matrix `D` (for "destination"), then this
method performs:
`D += sum_i( coeff[i] * csr_mxs[i] )`
Note that `D` is not returned; the sum is done internally into D's
data-array.
Parameters
----------
data : numpy.ndarray
The data-array of the destination CSR-matrix.
coeffs : ndarray
The weight coefficients which multiply each summed matrix.
flat_dest_index_array : ndarray
The index array generated by :function:`csr_sum_flat_indices`.
flat_csr_mx_data : ndarray
The data array generated by :function:`csr_sum_flat_indices`.
mx_nnz_indptr : ndarray
The number-of-nonzero-elements pointer array generated by
:function:`csr_sum_flat_indices`.
Returns
-------
None
"""
Nmxs = len(mx_nnz_indptr) - 1 # the number of CSR matrices
for iMx in range(Nmxs):
coeff = coeffs[iMx]
for i in range(mx_nnz_indptr[iMx], mx_nnz_indptr[iMx + 1]):
data[flat_dest_index_array[i]] += coeff * flat_csr_mx_data[i]
else:
def csr_sum_flat(data, coeffs, flat_dest_index_array, flat_csr_mx_data, mx_nnz_indptr):
"""
Computes the summation of several CSR-format sparse matrices.
:method:`csr_sum_flat_indices` precomputes the necessary indices for
summing directly into the data-array of a destination CSR sparse matrix.
If `data` is the data-array of matrix `D` (for "destination"), then this
method performs:
`D += sum_i( coeff[i] * csr_mxs[i] )`
Note that `D` is not returned; the sum is done internally into D's
data-array.
Parameters
----------
data : numpy.ndarray
The data-array of the destination CSR-matrix.
coeffs : ndarray
The weight coefficients which multiply each summed matrix.
flat_dest_index_array : ndarray
The index array generated by :function:`csr_sum_flat_indices`.
flat_csr_mx_data : ndarray
The data array generated by :function:`csr_sum_flat_indices`.
mx_nnz_indptr : ndarray
The number-of-nonzero-elements pointer array generated by
:function:`csr_sum_flat_indices`.
"""
coeffs_complex = _np.ascontiguousarray(coeffs, dtype=complex)
return _fastcalc.fast_csr_sum_flat(data, coeffs_complex, flat_dest_index_array, flat_csr_mx_data, mx_nnz_indptr)
def expm_multiply_prep(a, tol=EXPM_DEFAULT_TOL):
"""
Computes "prepared" meta-info about matrix `a`, to be used in `expm_multiply_fast`.
This includes a shifted version of `a`.
Parameters
----------
a : numpy.ndarray
the matrix that will belater exponentiated.
tol : float, optional
Tolerance used to within matrix exponentiation routines.
Returns
-------
tuple
A tuple of values to pass to `expm_multiply_fast`.
"""
if len(a.shape) != 2 or a.shape[0] != a.shape[1]:
raise ValueError('expected a to be like a square matrix')
assert(_sps.isspmatrix_csr(a)) # assuming this allows faster computations
n = a.shape[0]
n0 = 1 # always act exp(a) on *single* vectors
mu = _spsl._expm_multiply._trace(a) / float(n)
#ident = _spsl._expm_multiply._ident_like(a) #general case
if _fastcalc is None:
ident = _sps.identity(a.shape[0], dtype=a.dtype, format='csr') # CSR specific
a = a - mu * ident # SLOW!
else:
indptr = _np.empty(n + 1, _np.int64)
indices = _np.empty(a.data.shape[0] + n, _np.int64) # pessimistic (assume no diags exist)
data = _np.empty(a.data.shape[0] + n, a.dtype) # pessimistic (assume no diags exist)
nxt = _fastcalc.csr_subtract_identity(a.data,
_np.ascontiguousarray(a.indptr, _np.int64),
_np.ascontiguousarray(a.indices, _np.int64),
data, indptr, indices, -mu, n)
a = _sps.csr_matrix((data[0:nxt], indices[0:nxt], indptr), shape=(n, n))
#DB: CHECK: assert(_spsl.norm(A1 - A2) < 1e-6); a = A1
#exact_1_norm specific for CSR
A_1_norm = max(_np.sum(_np.abs(a.data[_np.where(a.indices == iCol)])) for iCol in range(n))
#A_1_norm = _spsl._expm_multiply._exact_1_norm(a) # general case
t = 1.0 # always
if t * A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = _spsl._expm_multiply.LazyOperatorNormInfo(t * a, A_1_norm=t * A_1_norm, ell=ell)
m_star, s = _spsl._expm_multiply._fragment_3_1(norm_info, n0, tol, ell=ell)
eta = _np.exp(t * mu / float(s))
assert(_sps.isspmatrix_csr(a))
return a, mu, m_star, s, eta
if _fastcalc is None:
def expm_multiply_fast(prep_a, v, tol=EXPM_DEFAULT_TOL):
"""
Multiplies `v` by an exponentiated matrix.
Parameters
----------
prep_a : tuple
A tuple of values from :function:`expm_multiply_prep` that
defines the matrix to be exponentiated and holds other pre-computed
quantities.
v : numpy.ndarray
Vector to multiply (take dot product with).
tol : float, optional
Tolerance used to within matrix exponentiation routines.
Returns
-------
numpy.ndarray
"""
A, mu, m_star, s, eta = prep_a
return _custom_expm_multiply_simple_core(
A, v, mu, m_star, s, tol, eta) # t == 1.0 always, `balance` not implemented so removed
else:
def expm_multiply_fast(prep_a, v, tol=EXPM_DEFAULT_TOL):
"""
Multiplies `v` by an exponentiated matrix.
Parameters
----------
prep_a : tuple
A tuple of values from :function:`expm_multiply_prep` that
defines the matrix to be exponentiated and holds other pre-computed
quantities.
v : numpy.ndarray
Vector to multiply (take dot product with).
tol : float, optional
Tolerance used to within matrix exponentiation routines.
Returns
-------
numpy.ndarray
"""
#Note: copy v for now since it's modified by simple_core fn
A, mu, m_star, s, eta = prep_a
indices = _np.array(A.indices, dtype=int) # convert to 64-bit ints if needed
indptr = _np.array(A.indptr, dtype=int)
return _fastcalc.custom_expm_multiply_simple_core(A.data, indptr, indices,
v.copy(), mu, m_star, s, tol, eta)
def _custom_expm_multiply_simple_core(a, b, mu, m_star, s, tol, eta): # t == 1.0 replaced below
"""
a helper function. Note that this (python) version works when a is a LinearOperator
as well as a SciPy CSR sparse matrix.
"""
#if balance:
# raise NotImplementedError
F = b
for i in range(s):
#if m_star > 0: #added
# c1 = _np.linalg.norm(b, _np.inf) #_exact_inf_norm(b)
for j in range(m_star):
coeff = 1.0 / float(s * (j + 1)) # t == 1.0
b = coeff * a.dot(b)
F = F + b
# if j % 3 == 0: #every == 3 #TODO: work on this
# c2 = _np.linalg.norm(b, _np.inf) #_exact_inf_norm(b)
# if c1 + c2 <= tol * _np.linalg.norm(F, _np.inf): #_exact_inf_norm(F)
# break
# c1 = c2
F = eta * F
b = F
return F
#From SciPy source, as a reference - above we assume A is a sparse csr matrix
# and B is a dense vector
#def _exact_inf_norm(A):
# # A compatibility function which should eventually disappear.
# if scipy.sparse.isspmatrix(A):
# return max(abs(A).sum(axis=1).flat)
# else:
# return np.linalg.norm(A, np.inf)
#
#
#def _exact_1_norm(A):
# # A compatibility function which should eventually disappear.
# if scipy.sparse.isspmatrix(A):
# return max(abs(A).sum(axis=0).flat)
# else:
# return np.linalg.norm(A, 1)
def expop_multiply_prep(op, a_1_norm=None, tol=EXPM_DEFAULT_TOL):
"""
Returns "prepared" meta-info about operation op, which is assumed to be traceless (so no shift is needed).
Used as input for use with `_custom_expm_multiply_simple_core` or fast C-reps.
Parameters
----------
op : scipy.sparse.linalg.LinearOperator
The operator to exponentiate.
a_1_norm : float, optional
The 1-norm (if computed separately) of `op`.
tol : float, optional
Tolerance used to within matrix exponentiation routines.
Returns
-------
tuple
A tuple of values to pass to `expm_multiply_fast`.
"""
assert(isinstance(op, _spsl.LinearOperator))
if len(op.shape) != 2 or op.shape[0] != op.shape[1]:
raise ValueError('expected op to have equal input and output dimensions')
# n = op.shape[0]
n0 = 1 # always act exp(op) on *single* vectors
mu = 0 # _spsl._expm_multiply._trace(A) / float(n)
#ASSUME op is *traceless*
#FUTURE: get exact_1_norm specific for our ops - now just use approximate
if a_1_norm is None:
a_1_norm = _spsl.onenormest(op)
#t = 1.0 # always, so t*<X> => just <X> below
if a_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = _spsl._expm_multiply.LazyOperatorNormInfo(op, A_1_norm=a_1_norm, ell=ell)
m_star, s = _spsl._expm_multiply._fragment_3_1(norm_info, n0, tol, ell=ell)
eta = 1.0 # _np.exp(t*mu / float(s)) # b/c mu always == 0 (traceless assumption)
return mu, m_star, s, eta
def sparse_equal(a, b, atol=1e-8):
"""
Checks whether two Scipy sparse matrices are (almost) equal.
Parameters
----------
a : scipy.sparse matrix
First matrix.
b : scipy.sparse matrix
Second matrix.
atol : float, optional
The tolerance to use, passed to `numpy.allclose`, when comparing
the elements of `a` and `b`.
Returns
-------
bool
"""
if _np.array_equal(a.shape, b.shape) == 0:
return False
r1, c1 = a.nonzero()
r2, c2 = b.nonzero()
lidx1 = _np.ravel_multi_index((r1, c1), a.shape)
lidx2 = _np.ravel_multi_index((r2, c2), b.shape)
sidx1 = lidx1.argsort()
sidx2 = lidx2.argsort()
index_match = _np.array_equal(lidx1[sidx1], lidx2[sidx2])
if index_match == 0:
return False
else:
v1 = a.data
v2 = b.data
V1 = v1[sidx1]
V2 = v2[sidx2]
return _np.allclose(V1, V2, atol=atol)
def sparse_onenorm(a):
"""
Computes the 1-norm of the scipy sparse matrix `a`.
Parameters
----------
a : scipy sparse matrix
The matrix or vector to take the norm of.
Returns
-------
float
"""
return max(abs(a).sum(axis=0).flat)
# also == return _spsl.norm(a, ord=1) (comparable speed)
def ndarray_base(a, verbosity=0):
"""
Get the base memory object for numpy array `a`.
This is found by following `.base` until it comes up None.
Parameters
----------
a : numpy.ndarray
Array to get base of.
verbosity : int, optional
Print additional debugging information if this is > 0.
Returns
-------
numpy.ndarray
"""
if verbosity: print("ndarray_base debug:")
while a.base is not None:
if verbosity: print(" -> base = ", id(a.base))
a = a.base
if verbosity: print(" ==> ", id(a))
return a
def to_unitary(scaled_unitary):
"""
Compute the scaling factor required to turn a scalar multiple of a unitary matrix to a unitary matrix.
Parameters
----------
scaled_unitary : ndarray
A scaled unitary matrix
Returns
-------
scale : float
unitary : ndarray
Such that `scale * unitary == scaled_unitary`.
"""
scaled_identity = _np.dot(scaled_unitary, _np.conjugate(scaled_unitary.T))
scale = _np.sqrt(scaled_identity[0, 0])
assert(_np.allclose(scaled_identity / (scale**2), _np.identity(scaled_identity.shape[0], 'd'))), \
"Given `scaled_unitary` does not appear to be a scaled unitary matrix!"
return scale, (scaled_unitary / scale)
def sorted_eig(mx):
"""
Similar to `numpy.eig`, but returns sorted output.
In particular, the eigenvalues and vectors sorted by eigenvalue,
where sorting is done according to (real_part, imaginary_part) tuple.
Parameters
----------
mx : numpy.ndarray
Matrix to act on.
Returns
-------
eigenvalues : numpy.ndarray
eigenvectors : numpy.ndarray
"""
ev, U = _np.linalg.eig(mx)
sorted_evals = sorted(list(enumerate(ev)), key=lambda x: (x[1].real, x[1].imag))
sorted_ev = ev.copy()
sorted_U = U.copy()
for idest, (isrc, _) in enumerate(sorted_evals):
sorted_ev[idest] = ev[isrc]
sorted_U[:, idest] = U[:, isrc]
return sorted_ev, sorted_U
def compute_kite(eigenvalues):
"""
Computes the "kite" corresponding to a list of eigenvalues.
The kite is defined as a list of integers, each indicating that
there is a degnenerate block of that many eigenvalues within
`eigenvalues`. Thus the sum of the list values equals `len(eigenvalues)`.
Parameters
----------
eigenvalues : numpy.ndarray
A *sorted* array of eigenvalues.
Returns
-------
list
A list giving the multiplicity structure of `evals`.
"""
kite = []
blk = 0; last_ev = eigenvalues[0]
for ev in eigenvalues:
if _np.isclose(ev, last_ev):
blk += 1
else:
kite.append(blk)
blk = 1; last_ev = ev
kite.append(blk)
return kite
def find_zero_communtant_connection(u, u_inv, u0, u0_inv, kite):
"""
Find a matrix `R` such that u_inv R u0 is diagonal AND log(R) has no projection onto the commutant of G0.
More specifically, find a matrix `R` such that u_inv R u0 is diagonal
(so G = R G0 Rinv if G and G0 share the same eigenvalues and have eigenvectors u
and u0 respectively) AND log(R) has no (zero) projection onto the commutant of
G0 = u0 diag(evals) u0_inv.
Parameters
----------
u : numpy.ndarray
Usually the eigenvector matrix of a gate (G).
u_inv : numpy.ndarray
Inverse of `u`.
u0 : numpy.ndarray
Usually the eigenvector matrix of the corresponding target gate (G0).
u0_inv : numpy.ndarray
Inverse of `u0`.
kite : list
The kite structure of `u0`.
Returns
-------
numpy.ndarray
"""
#0. Let R be a matrix that maps G0 -> Gp, where Gp has evecs of G and evals of G0.
#1. Does R vanish on the commutant of G0? If so, we’re done.
#2. Define x = PROJ_COMMUTANT[ log(R) ], and X = exp(-x).
#3. Redefine R = X.R.
#4. GOTO 1.
# G0 = u0 * diag * u0_inv, G = u * diag * u_inv
D = project_onto_kite(_np.dot(u_inv, u0), kite)
R = _np.dot(u, _np.dot(D, u0_inv)) # Include D so R is as close to identity as possible
assert(_np.linalg.norm(R.imag) < 1e-8)
def project_onto_commutant(x):
a = _np.dot(u0_inv, _np.dot(x, u0))
a = project_onto_kite(a, kite)
return _np.dot(u0, _np.dot(a, u0_inv))
iter = 0; lastR = R
while iter < 100:
#Starting condition = u_inv * R * u0 is diagonal, so
# G' = R G0 Rinv where G' has the same spectrum as G0 but different eigenvecs (u vs u0)
assert(_np.linalg.norm(R.imag) < 1e-8)
test = _np.dot(u_inv, _np.dot(R, u0))
assert(_np.linalg.norm(project_onto_antikite(test, kite)) < 1e-8)
r = real_matrix_log(R)
assert(_np.linalg.norm(r.imag) < 1e-8), "log of real matrix should be real!"
r_on_comm = project_onto_commutant(r)
assert(_np.linalg.norm(r_on_comm.imag) < 1e-8), "projection to commutant should not make complex!"
oncomm_norm = _np.linalg.norm(r_on_comm)
#print("Iter %d: onkite-norm = %g lastdiff = %g" % (iter, oncomm_norm, _np.linalg.norm(R-lastR)))
# if r has desired form or we didn't really update R
if oncomm_norm < 1e-12 or (iter > 0 and _np.linalg.norm(R - lastR) < 1e-8):
break # STOP - converged!
X = _spl.expm(-r_on_comm)
assert(_np.linalg.norm(X.imag) < 1e-8)
lastR = R
R = _np.dot(R, X)
iter += 1
assert(_np.linalg.norm(R.imag) < 1e-8), "R should always be real!"
return R.real
def project_onto_kite(mx, kite):
"""
Project `mx` onto `kite`, so `mx` is zero everywhere except on the kite.
Parameters
----------
mx : numpy.ndarray
Matrix to project.
kite : list
A kite structure.
Returns
-------
numpy.ndarray
"""
#Kite is a list of block sizes, such that sum(kite) == dimension of `mx`
mx = mx.copy()
dim = mx.shape[0]
assert(dim == mx.shape[1]), "`mx` must be square!"
k0 = 0
for k in kite:
mx[k0:k0 + k, k0 + k:] = 0
mx[k0 + k:, k0:k0 + k] = 0
k0 += k
assert(k0 == dim), "Invalid kite %d-dimensional matrix: %s" % (dim, str(kite))
return mx
def project_onto_antikite(mx, kite):
"""
Project `mx` onto the complement of `kite`, so `mx` is zero everywhere *on* the kite.
Parameters
----------
mx : numpy.ndarray
Matrix to project.
kite : list
A kite structure.
Returns
-------
numpy.ndarray
"""
#Kite is a list of block sizes, such that sum(kite) == dimension of `mx`
mx = mx.copy()
dim = mx.shape[0]
assert(dim == mx.shape[1]), "`mx` must be square!"
k0 = 0
for k in kite:
mx[k0:k0 + k, k0:k0 + k] = 0
k0 += k
assert(k0 == dim), "Invalid kite %d-dimensional matrix: %s" % (dim, str(kite))
return mx
def remove_dependent_cols(mx, tol=1e-7):
"""
Removes the linearly dependent columns of a matrix.
Parameters
----------
mx : numpy.ndarray
The input matrix
Returns
-------
A linearly independent subset of the columns of `mx`.
"""
last_rank = 0; cols_to_remove = []
for j in range(mx.shape[1]):
rnk = _np.linalg.matrix_rank(mx[:, 0:j + 1], tol)
if rnk == last_rank:
cols_to_remove.append(j)
else:
last_rank = rnk
#print("Removing %d cols" % len(cols_to_remove))
return _np.delete(mx, cols_to_remove, axis=1)
def intersection_space(space1, space2, tol=1e-7, use_nice_nullspace=False):
"""
TODO: docstring
"""
VW = _np.concatenate((space1, -space2), axis=1)
nullsp = nice_nullspace(VW, tol) if use_nice_nullspace else nullspace(VW, tol)
#nullsp = _spl.null_space(VW, rcond=1e-3) # alternative
return _np.dot(space1, nullsp[0:space1.shape[1], :])
def union_space(space1, space2, tol=1e-7):
"""
TODO: docstring
"""
VW = _np.concatenate((space1, space2), axis=1)
return remove_dependent_cols(VW, tol)
#UNUSED
#def spectral_radius(x):
# if hasattr(x, 'ndim') and x.ndim == 2: # then interpret as a numpy array and take norm
# evals = _np.sort(_np.linalg.eigvals(x))
# return abs(evals[-1] - evals[0])
# else:
# return x
def jamiolkowski_angle(hamiltonian_mx):
"""
TODO: docstring
"""
Hmx = hamiltonian_mx
d = Hmx.shape[0]
I = _np.identity(d)
errmap = _np.kron(I, _spl.expm(1j * Hmx))
psi = _np.zeros(d**2) # will be a maximally entangled state
for i in range(d):
x = _np.zeros(d); x[i] = 1.0
xx = _np.kron(x, x)
psi += xx / _np.sqrt(d)
assert(_np.isclose(_np.dot(psi, psi), 1.0))
cos_theta = abs(_np.dot(psi.conj(), _np.dot(errmap, psi)))
return _np.real_if_close(_np.arccos(cos_theta))
#cos_squared_theta = entanglement_infidelity(expm(1j * Hmx), identity)
#return _np.arccos(_np.sqrt(cos_squared_theta))
def zvals_to_dense(self, zvals, superket=True):
"""
Construct the dense operator or superoperator representation of a computational basis state.
Parameters
----------
zvals : list or numpy.ndarray
The z-values, each 0 or 1, defining the computational basis state.
superket : bool, optional
If `True`, the super-ket representation of the state is returned. If `False`,
then the complex ket representation is returned.
Returns
-------
numpy.ndarray
"""
if superket:
factor_dim = 4
v0 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, 1), 'd') # '0' qubit state as Pauli dmvec
v1 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, -1), 'd') # '1' qubit state as Pauli dmvec
else:
factor_dim = 2
v0 = _np.array((1, 0), complex) # '0' qubit state as complex state vec
v1 = _np.array((0, 1), complex) # '1' qubit state as complex state vec
v = (v0, v1)
if _fastcalc is None: # do it the slow way using numpy
return _functools.reduce(_np.kron, [v[i] for i in zvals])
else:
fast_kron_array = _np.ascontiguousarray(
_np.empty((len(self._zvals), factor_dim), v0.dtype))
fast_kron_factordims = _np.ascontiguousarray(_np.array([factor_dim] * len(self._zvals), _np.int64))
for i, zi in enumerate(self._zvals):
fast_kron_array[i, :] = v[zi]
ret = _np.ascontiguousarray(_np.empty(factor_dim**len(self._zvals), v0.dtype))
if superket:
_fastcalc.fast_kron(ret, fast_kron_array, fast_kron_factordims)
else:
_fastcalc.fast_kron_complex(ret, fast_kron_array, fast_kron_factordims)
return ret
def int64_parity(x):
"""
Compute the partity of x.
Recursively divide a (64-bit) integer (x) into two equal
halves and take their XOR until only 1 bit is left.
Parameters
----------
x : int64
Returns
-------
int64
"""
x = (x & 0x00000000FFFFFFFF) ^ (x >> 32)
x = (x & 0x000000000000FFFF) ^ (x >> 16)
x = (x & 0x00000000000000FF) ^ (x >> 8)
x = (x & 0x000000000000000F) ^ (x >> 4)
x = (x & 0x0000000000000003) ^ (x >> 2)
x = (x & 0x0000000000000001) ^ (x >> 1)
return x & 1 # return the last bit (0 or 1)
def zvals_int64_to_dense(zvals_int, nqubits, outvec=None, trust_outvec_sparsity=False, abs_elval=None):
"""
Fills a dense array with the super-ket representation of a computational basis state.
Parameters
----------
zvals_int : int64
The array of (up to 64) z-values, encoded as the 0s and 1s in the binary representation
of this integer.
nqubits : int
The number of z-values (up to 64)
outvec : numpy.ndarray, optional
The output array, which must be a 1D array of length 4**nqubits or `None`, in
which case a new array is allocated.
trust_outvec_sparsity : bool, optional
When `True`, it is assumed that the provided `outvec` starts as all zeros
and so only non-zero elements of outvec need to be set.
abs_elval : float
the value `1 / (sqrt(2)**nqubits)`, which can be passed here so that
it doesn't need to be recomputed on every call to this function. If
`None`, then we just compute the value.
Returns
-------
numpy.ndarray
"""
if outvec is None:
outvec = _np.zeros(4**nqubits, 'd')
if abs_elval is None:
abs_elval = 1 / (_np.sqrt(2)**nqubits)
# when trust_outvec_sparsity is True, assume we only need to fill in the
# non-zero elements of outvec (i.e. that outvec is already zero wherever
# this vector is zero).
if not trust_outvec_sparsity:
outvec[:] = 0 # reset everything to zero
N = nqubits
# there are nQubits factors
# each factor (4-element, 1Q dmvec) has 2 zero elements and 2 nonzero ones
# loop is over all non-zero elements of the final outvec by looping over
# all the sets of *entirely* nonzero elements from the factors.
# Let the two possible nonzero elements of the k-th factor be represented
# by the k-th bit of `finds` below, which ranges from 0 to 2^nFactors-1
for finds in range(2**N):
#Create the final index (within outvec) corresponding to finds
# assume, like tensorprod, that factor ordering == kron ordering
# so outvec = kron( factor[0], factor[1], ... factor[N-1] ).
# Let factorDim[k] == 4**(N-1-k) be the stride associated with the k-th index
# Whenever finds[bit k] == 0 => finalIndx += 0*factorDim[k]
# finds[bit k] == 1 => finalIndx += 3*factorDim[k] (3 b/c factor's 2nd nonzero el is at index 3)
finalIndx = sum([3 * (4**(N - 1 - k)) for k in range(N) if bool(finds & (1 << k))])
#Determine the sign of this element (the element is either +/- (1/sqrt(2))^N )
# A minus sign is picked up whenever finds[bit k] == 1 (which means we're looking
# at the index=3 element of the factor vec) AND zvals_int[bit k] == 1
# (which means it's a [1 0 0 -1] state rather than a [1 0 0 1] state).
# Since we only care whether the number of minus signs is even or odd, we can
# BITWISE-AND finds with zvals_int (giving an integer whose binary-expansion's
# number of 1's == the number of minus signs) and compute the parity of this.
minus_sign = int64_parity(finds & zvals_int)
outvec[finalIndx] = -abs_elval if minus_sign else abs_elval
return outvec
|
[
"numpy.linalg.eigvals",
"numpy.sum",
"numpy.diag_indices_from",
"numpy.abs",
"scipy.sparse.issparse",
"numpy.empty",
"numpy.allclose",
"pygsti.tools.basistools.change_basis",
"numpy.imag",
"numpy.linalg.svd",
"numpy.linalg.norm",
"numpy.isclose",
"scipy.sparse.linalg._expm_multiply.LazyOperatorNormInfo",
"scipy.linalg.schur",
"numpy.diag",
"numpy.conjugate",
"scipy.sparse.isspmatrix",
"scipy.sparse.isspmatrix_csr",
"scipy.optimize.minimize",
"scipy.sparse.linalg.svds",
"numpy.transpose",
"numpy.identity",
"numpy.linalg.eig",
"numpy.cumsum",
"numpy.linalg.matrix_rank",
"numpy.ravel_multi_index",
"scipy.sparse.identity",
"numpy.real",
"scipy.sparse.linalg.onenormest",
"numpy.kron",
"itertools.product",
"numpy.arccos",
"numpy.diagonal",
"scipy.linalg.logm",
"numpy.conj",
"functools.reduce",
"scipy.linalg.qr",
"scipy.sparse.csr_matrix",
"numpy.linalg.inv",
"numpy.dot",
"numpy.delete",
"numpy.concatenate",
"scipy.optimize.linear_sum_assignment",
"itertools.repeat",
"scipy.linalg.expm",
"numpy.log",
"scipy.sparse.linalg._expm_multiply._trace",
"numpy.zeros",
"scipy.sparse.csc_matrix",
"numpy.random.random",
"numpy.take",
"numpy.array",
"scipy.sparse.hstack",
"scipy.sparse.linalg._expm_multiply._fragment_3_1",
"numpy.array_equal",
"warnings.warn",
"numpy.where",
"numpy.ascontiguousarray",
"numpy.sqrt"
] |
[((2965, 2987), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['mx'], {}), '(mx)\n', (2983, 2987), True, 'import numpy as _np\n'), ((4368, 4384), 'numpy.sum', '_np.sum', (['(ar ** 2)'], {}), '(ar ** 2)\n', (4375, 4384), True, 'import numpy as _np\n'), ((4800, 4817), 'numpy.linalg.svd', '_np.linalg.svd', (['m'], {}), '(m)\n', (4814, 4817), True, 'import numpy as _np\n'), ((5528, 5568), 'scipy.linalg.qr', '_spl.qr', (['m.T'], {'mode': '"""full"""', 'pivoting': '(True)'}), "(m.T, mode='full', pivoting=True)\n", (5535, 5568), True, 'import scipy.linalg as _spl\n'), ((7127, 7170), 'numpy.take', '_np.take', (['nullsp_projector', 'keepers'], {'axis': '(1)'}), '(nullsp_projector, keepers, axis=1)\n', (7135, 7170), True, 'import numpy as _np\n'), ((8701, 8717), 'scipy.sparse.issparse', '_sps.issparse', (['m'], {}), '(m)\n', (8714, 8717), True, 'import scipy.sparse as _sps\n'), ((9608, 9624), 'scipy.sparse.issparse', '_sps.issparse', (['m'], {}), '(m)\n', (9621, 9624), True, 'import scipy.sparse as _sps\n'), ((14543, 14567), 'scipy.linalg.schur', '_spl.schur', (['m', '"""complex"""'], {}), "(m, 'complex')\n", (14553, 14567), True, 'import scipy.linalg as _spl\n'), ((14636, 14665), 'numpy.zeros', '_np.zeros', (['T.shape', '"""complex"""'], {}), "(T.shape, 'complex')\n", (14645, 14665), True, 'import numpy as _np\n'), ((20233, 20265), 'pygsti.tools.basistools.change_basis', 'change_basis', (['m', 'mx_basis', '"""std"""'], {}), "(m, mx_basis, 'std')\n", (20245, 20265), False, 'from pygsti.tools.basistools import change_basis\n'), ((20278, 20303), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['M_std'], {}), '(M_std)\n', (20296, 20303), True, 'import numpy as _np\n'), ((21601, 21613), 'scipy.linalg.logm', '_spl.logm', (['m'], {}), '(m)\n', (21610, 21613), True, 'import scipy.linalg as _spl\n'), ((26029, 26046), 'numpy.linalg.eig', '_np.linalg.eig', (['m'], {}), '(m)\n', (26043, 26046), True, 'import numpy as _np\n'), ((30301, 30327), 'numpy.zeros', '_np.zeros', (['[dim, 1]', 'float'], {}), '([dim, 1], float)\n', (30310, 30327), True, 'import numpy as _np\n'), ((32523, 32562), 'pygsti.tools.basistools.change_basis', 'change_basis', (['operator', 'mx_basis', '"""std"""'], {}), "(operator, mx_basis, 'std')\n", (32535, 32562), False, 'from pygsti.tools.basistools import change_basis\n'), ((35381, 35403), 'numpy.empty', '_np.empty', (['(D, D)', '"""d"""'], {}), "((D, D), 'd')\n", (35390, 35403), True, 'import numpy as _np\n'), ((35680, 35716), 'scipy.optimize.linear_sum_assignment', '_spo.linear_sum_assignment', (['weightMx'], {}), '(weightMx)\n', (35706, 35716), True, 'import scipy.optimize as _spo\n'), ((49205, 49221), 'scipy.sparse.issparse', '_sps.issparse', (['a'], {}), '(a)\n', (49218, 49221), True, 'import scipy.sparse as _sps\n'), ((50225, 50241), 'scipy.sparse.issparse', '_sps.issparse', (['a'], {}), '(a)\n', (50238, 50241), True, 'import scipy.sparse as _sps\n'), ((51462, 51478), 'scipy.sparse.issparse', '_sps.issparse', (['a'], {}), '(a)\n', (51475, 51478), True, 'import scipy.sparse as _sps\n'), ((52598, 52614), 'scipy.sparse.issparse', '_sps.issparse', (['a'], {}), '(a)\n', (52611, 52614), True, 'import scipy.sparse as _sps\n'), ((53089, 53107), 'scipy.sparse.isspmatrix', '_sps.isspmatrix', (['a'], {}), '(a)\n', (53104, 53107), True, 'import scipy.sparse as _sps\n'), ((55097, 55114), 'numpy.array', '_np.array', (['indptr'], {}), '(indptr)\n', (55106, 55114), True, 'import numpy as _np\n'), ((55129, 55147), 'numpy.array', '_np.array', (['indices'], {}), '(indices)\n', (55138, 55147), True, 'import numpy as _np\n'), ((58243, 58303), 'numpy.cumsum', '_np.cumsum', (['([0] + [mx.nnz for mx in csr_matrices])'], {'dtype': 'int'}), '([0] + [mx.nnz for mx in csr_matrices], dtype=int)\n', (58253, 58303), True, 'import numpy as _np\n'), ((61952, 61974), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (61971, 61974), True, 'import scipy.sparse as _sps\n'), ((63592, 63614), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (63611, 63614), True, 'import scipy.sparse as _sps\n'), ((69080, 69120), 'numpy.ravel_multi_index', '_np.ravel_multi_index', (['(r1, c1)', 'a.shape'], {}), '((r1, c1), a.shape)\n', (69101, 69120), True, 'import numpy as _np\n'), ((69133, 69173), 'numpy.ravel_multi_index', '_np.ravel_multi_index', (['(r2, c2)', 'b.shape'], {}), '((r2, c2), b.shape)\n', (69154, 69173), True, 'import numpy as _np\n'), ((69249, 69292), 'numpy.array_equal', '_np.array_equal', (['lidx1[sidx1]', 'lidx2[sidx2]'], {}), '(lidx1[sidx1], lidx2[sidx2])\n', (69264, 69292), True, 'import numpy as _np\n'), ((69446, 69477), 'numpy.allclose', '_np.allclose', (['V1', 'V2'], {'atol': 'atol'}), '(V1, V2, atol=atol)\n', (69458, 69477), True, 'import numpy as _np\n'), ((70872, 70903), 'numpy.sqrt', '_np.sqrt', (['scaled_identity[0, 0]'], {}), '(scaled_identity[0, 0])\n', (70880, 70903), True, 'import numpy as _np\n'), ((71549, 71567), 'numpy.linalg.eig', '_np.linalg.eig', (['mx'], {}), '(mx)\n', (71563, 71567), True, 'import numpy as _np\n'), ((77330, 77368), 'numpy.delete', '_np.delete', (['mx', 'cols_to_remove'], {'axis': '(1)'}), '(mx, cols_to_remove, axis=1)\n', (77340, 77368), True, 'import numpy as _np\n'), ((77492, 77534), 'numpy.concatenate', '_np.concatenate', (['(space1, -space2)'], {'axis': '(1)'}), '((space1, -space2), axis=1)\n', (77507, 77534), True, 'import numpy as _np\n'), ((77690, 77735), 'numpy.dot', '_np.dot', (['space1', 'nullsp[0:space1.shape[1], :]'], {}), '(space1, nullsp[0:space1.shape[1], :])\n', (77697, 77735), True, 'import numpy as _np\n'), ((77826, 77867), 'numpy.concatenate', '_np.concatenate', (['(space1, space2)'], {'axis': '(1)'}), '((space1, space2), axis=1)\n', (77841, 77867), True, 'import numpy as _np\n'), ((78290, 78305), 'numpy.identity', '_np.identity', (['d'], {}), '(d)\n', (78302, 78305), True, 'import numpy as _np\n'), ((78362, 78379), 'numpy.zeros', '_np.zeros', (['(d ** 2)'], {}), '(d ** 2)\n', (78371, 78379), True, 'import numpy as _np\n'), ((3933, 3949), 'numpy.sum', '_np.sum', (['(ar ** 2)'], {}), '(ar ** 2)\n', (3940, 3949), True, 'import numpy as _np\n'), ((6961, 7022), 'numpy.linalg.matrix_rank', '_np.linalg.matrix_rank', (['nullsp_projector[:, 0:i + 1]'], {'tol': 'tol'}), '(nullsp_projector[:, 0:i + 1], tol=tol)\n', (6983, 7022), True, 'import numpy as _np\n'), ((9691, 9709), 'scipy.sparse.csc_matrix', '_sps.csc_matrix', (['m'], {}), '(m)\n', (9706, 9709), True, 'import scipy.sparse as _sps\n'), ((10466, 10494), 'numpy.diag_indices_from', '_np.diag_indices_from', (['check'], {}), '(check)\n', (10487, 10494), True, 'import numpy as _np\n'), ((12225, 12241), 'scipy.sparse.issparse', '_sps.issparse', (['m'], {}), '(m)\n', (12238, 12241), True, 'import scipy.sparse as _sps\n'), ((14806, 14830), 'numpy.diag_indices_from', '_np.diag_indices_from', (['U'], {}), '(U)\n', (14827, 14830), True, 'import numpy as _np\n'), ((14843, 14858), 'numpy.diagonal', '_np.diagonal', (['T'], {}), '(T)\n', (14855, 14858), True, 'import numpy as _np\n'), ((20328, 20342), 'numpy.abs', '_np.abs', (['evals'], {}), '(evals)\n', (20335, 20342), True, 'import numpy as _np\n'), ((20520, 20532), 'scipy.linalg.logm', '_spl.logm', (['U'], {}), '(U)\n', (20529, 20532), True, 'import scipy.linalg as _spl\n'), ((22955, 22978), 'numpy.linalg.norm', '_np.linalg.norm', (['m.imag'], {}), '(m.imag)\n', (22970, 22978), True, 'import numpy as _np\n'), ((23142, 23157), 'scipy.linalg.expm', '_spl.expm', (['logM'], {}), '(logM)\n', (23151, 23157), True, 'import scipy.linalg as _spl\n'), ((24514, 24642), 'scipy.optimize.minimize', '_spo.minimize', (['_objective', 'initial_flat_logM'], {'options': "{'maxiter': 1000}", 'method': '"""L-BFGS-B"""', 'callback': 'print_obj_func', 'tol': 'tol'}), "(_objective, initial_flat_logM, options={'maxiter': 1000},\n method='L-BFGS-B', callback=print_obj_func, tol=tol)\n", (24527, 24642), True, 'import scipy.optimize as _spo\n'), ((28889, 28903), 'numpy.imag', '_np.imag', (['logM'], {}), '(logM)\n', (28897, 28903), True, 'import numpy as _np\n'), ((29726, 29740), 'numpy.real', '_np.real', (['logM'], {}), '(logM)\n', (29734, 29740), True, 'import numpy as _np\n'), ((31544, 31578), 'numpy.random.random', '_np.random.random', ([], {'size': '[dim, dim]'}), '(size=[dim, dim])\n', (31561, 31578), True, 'import numpy as _np\n'), ((31591, 31625), 'numpy.random.random', '_np.random.random', ([], {'size': '[dim, dim]'}), '(size=[dim, dim])\n', (31608, 31625), True, 'import numpy as _np\n'), ((49309, 49325), 'scipy.sparse.issparse', '_sps.issparse', (['b'], {}), '(b)\n', (49322, 49325), True, 'import scipy.sparse as _sps\n'), ((50254, 50276), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (50273, 50276), True, 'import scipy.sparse as _sps\n'), ((50796, 50807), 'numpy.real', '_np.real', (['a'], {}), '(a)\n', (50804, 50807), True, 'import numpy as _np\n'), ((51491, 51513), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (51510, 51513), True, 'import scipy.sparse as _sps\n'), ((52033, 52044), 'numpy.imag', '_np.imag', (['a'], {}), '(a)\n', (52041, 52044), True, 'import numpy as _np\n'), ((52631, 52653), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (52650, 52653), True, 'import scipy.sparse as _sps\n'), ((53167, 53188), 'numpy.linalg.norm', '_np.linalg.norm', (['a', '(1)'], {}), '(a, 1)\n', (53182, 53188), True, 'import numpy as _np\n'), ((55032, 55057), 'numpy.array', '_np.array', (['lst', '_np.int64'], {}), '(lst, _np.int64)\n', (55041, 55057), True, 'import numpy as _np\n'), ((58053, 58091), 'numpy.concatenate', '_np.concatenate', (['csr_sum_array'], {'axis': '(0)'}), '(csr_sum_array, axis=0)\n', (58068, 58091), True, 'import numpy as _np\n'), ((58149, 58206), 'numpy.concatenate', '_np.concatenate', (['[mx.data for mx in csr_matrices]'], {'axis': '(0)'}), '([mx.data for mx in csr_matrices], axis=0)\n', (58164, 58206), True, 'import numpy as _np\n'), ((61164, 61208), 'numpy.ascontiguousarray', '_np.ascontiguousarray', (['coeffs'], {'dtype': 'complex'}), '(coeffs, dtype=complex)\n', (61185, 61208), True, 'import numpy as _np\n'), ((62101, 62131), 'scipy.sparse.linalg._expm_multiply._trace', '_spsl._expm_multiply._trace', (['a'], {}), '(a)\n', (62128, 62131), True, 'import scipy.sparse.linalg as _spsl\n'), ((62250, 62304), 'scipy.sparse.identity', '_sps.identity', (['a.shape[0]'], {'dtype': 'a.dtype', 'format': '"""csr"""'}), "(a.shape[0], dtype=a.dtype, format='csr')\n", (62263, 62304), True, 'import scipy.sparse as _sps\n'), ((62384, 62411), 'numpy.empty', '_np.empty', (['(n + 1)', '_np.int64'], {}), '(n + 1, _np.int64)\n', (62393, 62411), True, 'import numpy as _np\n'), ((62430, 62471), 'numpy.empty', '_np.empty', (['(a.data.shape[0] + n)', '_np.int64'], {}), '(a.data.shape[0] + n, _np.int64)\n', (62439, 62471), True, 'import numpy as _np\n'), ((62526, 62565), 'numpy.empty', '_np.empty', (['(a.data.shape[0] + n)', 'a.dtype'], {}), '(a.data.shape[0] + n, a.dtype)\n', (62535, 62565), True, 'import numpy as _np\n'), ((62929, 62997), 'scipy.sparse.csr_matrix', '_sps.csr_matrix', (['(data[0:nxt], indices[0:nxt], indptr)'], {'shape': '(n, n)'}), '((data[0:nxt], indices[0:nxt], indptr), shape=(n, n))\n', (62944, 62997), True, 'import scipy.sparse as _sps\n'), ((63378, 63463), 'scipy.sparse.linalg._expm_multiply.LazyOperatorNormInfo', '_spsl._expm_multiply.LazyOperatorNormInfo', (['(t * a)'], {'A_1_norm': '(t * A_1_norm)', 'ell': 'ell'}), '(t * a, A_1_norm=t * A_1_norm, ell=ell\n )\n', (63419, 63463), True, 'import scipy.sparse.linalg as _spsl\n'), ((63479, 63542), 'scipy.sparse.linalg._expm_multiply._fragment_3_1', '_spsl._expm_multiply._fragment_3_1', (['norm_info', 'n0', 'tol'], {'ell': 'ell'}), '(norm_info, n0, tol, ell=ell)\n', (63513, 63542), True, 'import scipy.sparse.linalg as _spsl\n'), ((65216, 65247), 'numpy.array', '_np.array', (['A.indices'], {'dtype': 'int'}), '(A.indices, dtype=int)\n', (65225, 65247), True, 'import numpy as _np\n'), ((65301, 65331), 'numpy.array', '_np.array', (['A.indptr'], {'dtype': 'int'}), '(A.indptr, dtype=int)\n', (65310, 65331), True, 'import numpy as _np\n'), ((68085, 68105), 'scipy.sparse.linalg.onenormest', '_spsl.onenormest', (['op'], {}), '(op)\n', (68101, 68105), True, 'import scipy.sparse.linalg as _spsl\n'), ((68250, 68323), 'scipy.sparse.linalg._expm_multiply.LazyOperatorNormInfo', '_spsl._expm_multiply.LazyOperatorNormInfo', (['op'], {'A_1_norm': 'a_1_norm', 'ell': 'ell'}), '(op, A_1_norm=a_1_norm, ell=ell)\n', (68291, 68323), True, 'import scipy.sparse.linalg as _spsl\n'), ((68344, 68407), 'scipy.sparse.linalg._expm_multiply._fragment_3_1', '_spsl._expm_multiply._fragment_3_1', (['norm_info', 'n0', 'tol'], {'ell': 'ell'}), '(norm_info, n0, tol, ell=ell)\n', (68378, 68407), True, 'import scipy.sparse.linalg as _spsl\n'), ((68955, 68988), 'numpy.array_equal', '_np.array_equal', (['a.shape', 'b.shape'], {}), '(a.shape, b.shape)\n', (68970, 68988), True, 'import numpy as _np\n'), ((70827, 70858), 'numpy.conjugate', '_np.conjugate', (['scaled_unitary.T'], {}), '(scaled_unitary.T)\n', (70840, 70858), True, 'import numpy as _np\n'), ((70958, 71001), 'numpy.identity', '_np.identity', (['scaled_identity.shape[0]', '"""d"""'], {}), "(scaled_identity.shape[0], 'd')\n", (70970, 71001), True, 'import numpy as _np\n'), ((72478, 72502), 'numpy.isclose', '_np.isclose', (['ev', 'last_ev'], {}), '(ev, last_ev)\n', (72489, 72502), True, 'import numpy as _np\n'), ((73858, 73876), 'numpy.dot', '_np.dot', (['u_inv', 'u0'], {}), '(u_inv, u0)\n', (73865, 73876), True, 'import numpy as _np\n'), ((73903, 73921), 'numpy.dot', '_np.dot', (['D', 'u0_inv'], {}), '(D, u0_inv)\n', (73910, 73921), True, 'import numpy as _np\n'), ((73988, 74011), 'numpy.linalg.norm', '_np.linalg.norm', (['R.imag'], {}), '(R.imag)\n', (74003, 74011), True, 'import numpy as _np\n'), ((74850, 74876), 'numpy.linalg.norm', '_np.linalg.norm', (['r_on_comm'], {}), '(r_on_comm)\n', (74865, 74876), True, 'import numpy as _np\n'), ((75181, 75202), 'scipy.linalg.expm', '_spl.expm', (['(-r_on_comm)'], {}), '(-r_on_comm)\n', (75190, 75202), True, 'import scipy.linalg as _spl\n'), ((75281, 75294), 'numpy.dot', '_np.dot', (['R', 'X'], {}), '(R, X)\n', (75288, 75294), True, 'import numpy as _np\n'), ((75325, 75348), 'numpy.linalg.norm', '_np.linalg.norm', (['R.imag'], {}), '(R.imag)\n', (75340, 75348), True, 'import numpy as _np\n'), ((77114, 77157), 'numpy.linalg.matrix_rank', '_np.linalg.matrix_rank', (['mx[:, 0:j + 1]', 'tol'], {}), '(mx[:, 0:j + 1], tol)\n', (77136, 77157), True, 'import numpy as _np\n'), ((78331, 78352), 'scipy.linalg.expm', '_spl.expm', (['(1.0j * Hmx)'], {}), '(1.0j * Hmx)\n', (78340, 78352), True, 'import scipy.linalg as _spl\n'), ((78452, 78464), 'numpy.zeros', '_np.zeros', (['d'], {}), '(d)\n', (78461, 78464), True, 'import numpy as _np\n'), ((78490, 78504), 'numpy.kron', '_np.kron', (['x', 'x'], {}), '(x, x)\n', (78498, 78504), True, 'import numpy as _np\n'), ((78560, 78577), 'numpy.dot', '_np.dot', (['psi', 'psi'], {}), '(psi, psi)\n', (78567, 78577), True, 'import numpy as _np\n'), ((78677, 78698), 'numpy.arccos', '_np.arccos', (['cos_theta'], {}), '(cos_theta)\n', (78687, 78698), True, 'import numpy as _np\n'), ((79627, 79653), 'numpy.array', '_np.array', (['(1, 0)', 'complex'], {}), '((1, 0), complex)\n', (79636, 79653), True, 'import numpy as _np\n'), ((79707, 79733), 'numpy.array', '_np.array', (['(0, 1)', 'complex'], {}), '((0, 1), complex)\n', (79716, 79733), True, 'import numpy as _np\n'), ((79867, 79917), 'functools.reduce', '_functools.reduce', (['_np.kron', '[v[i] for i in zvals]'], {}), '(_np.kron, [v[i] for i in zvals])\n', (79884, 79917), True, 'import functools as _functools\n'), ((82235, 82263), 'numpy.zeros', '_np.zeros', (['(4 ** nqubits)', '"""d"""'], {}), "(4 ** nqubits, 'd')\n", (82244, 82263), True, 'import numpy as _np\n'), ((11155, 11188), 'numpy.identity', '_np.identity', (['check.shape[0]', '"""d"""'], {}), "(check.shape[0], 'd')\n", (11167, 11188), True, 'import numpy as _np\n'), ((12365, 12400), 'numpy.empty', '_np.empty', (['(m.shape[0], 0)', 'm.dtype'], {}), '((m.shape[0], 0), m.dtype)\n', (12374, 12400), True, 'import numpy as _np\n'), ((12511, 12565), 'numpy.concatenate', '_np.concatenate', (['(running_indep_cols, m[:, j])'], {'axis': '(1)'}), '((running_indep_cols, m[:, j]), axis=1)\n', (12526, 12565), True, 'import numpy as _np\n'), ((13012, 13059), 'scipy.sparse.csc_matrix', '_sps.csc_matrix', (['(m.shape[0], 0)'], {'dtype': 'm.dtype'}), '((m.shape[0], 0), dtype=m.dtype)\n', (13027, 13059), True, 'import scipy.sparse as _sps\n'), ((13170, 13212), 'scipy.sparse.hstack', '_sps.hstack', (['(running_indep_cols, m[:, j])'], {}), '((running_indep_cols, m[:, j]))\n', (13181, 13212), True, 'import scipy.sparse as _sps\n'), ((13875, 13885), 'numpy.abs', '_np.abs', (['m'], {}), '(m)\n', (13882, 13885), True, 'import numpy as _np\n'), ((15208, 15225), 'numpy.isclose', '_np.isclose', (['S', '(0)'], {}), '(S, 0)\n', (15219, 15225), True, 'import numpy as _np\n'), ((15798, 15816), 'numpy.conjugate', '_np.conjugate', (['Z.T'], {}), '(Z.T)\n', (15811, 15816), True, 'import numpy as _np\n'), ((17518, 17530), 'numpy.array', '_np.array', (['m'], {}), '(m)\n', (17527, 17530), True, 'import numpy as _np\n'), ((21559, 21582), 'numpy.linalg.norm', '_np.linalg.norm', (['m.imag'], {}), '(m.imag)\n', (21574, 21582), True, 'import numpy as _np\n'), ((21647, 21673), 'numpy.linalg.norm', '_np.linalg.norm', (['logM.imag'], {}), '(logM.imag)\n', (21662, 21673), True, 'import numpy as _np\n'), ((25943, 25954), 'numpy.imag', '_np.imag', (['m'], {}), '(m)\n', (25951, 25954), True, 'import numpy as _np\n'), ((28010, 28028), 'numpy.log', '_np.log', (['(-evals[i])'], {}), '(-evals[i])\n', (28017, 28028), True, 'import numpy as _np\n'), ((28136, 28147), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (28144, 28147), True, 'import numpy as _np\n'), ((28297, 28320), 'numpy.log', '_np.log', (['(-evals[i].real)'], {}), '(-evals[i].real)\n', (28304, 28320), True, 'import numpy as _np\n'), ((28679, 28698), 'numpy.diag', '_np.diag', (['log_evals'], {}), '(log_evals)\n', (28687, 28698), True, 'import numpy as _np\n'), ((28700, 28717), 'numpy.linalg.inv', '_np.linalg.inv', (['U'], {}), '(U)\n', (28714, 28717), True, 'import numpy as _np\n'), ((30584, 30608), 'numpy.transpose', '_np.transpose', (['matrix_in'], {}), '(matrix_in)\n', (30597, 30608), True, 'import numpy as _np\n'), ((48328, 48350), 'itertools.product', '_itertools.product', (['*b'], {}), '(*b)\n', (48346, 48350), True, 'import itertools as _itertools\n'), ((48665, 48697), 'numpy.zeros', '_np.zeros', (['a_inds_shape', 'a.dtype'], {}), '(a_inds_shape, a.dtype)\n', (48674, 48697), True, 'import numpy as _np\n'), ((49557, 49570), 'numpy.dot', '_np.dot', (['a', 'b'], {}), '(a, b)\n', (49564, 49570), True, 'import numpy as _np\n'), ((54095, 54112), 'numpy.empty', '_np.empty', (['(0)', 'int'], {}), '(0, int)\n', (54104, 54112), True, 'import numpy as _np\n'), ((54114, 54131), 'numpy.empty', '_np.empty', (['(0)', 'int'], {}), '(0, int)\n', (54123, 54131), True, 'import numpy as _np\n'), ((57925, 57942), 'numpy.empty', '_np.empty', (['(0)', 'int'], {}), '(0, int)\n', (57934, 57942), True, 'import numpy as _np\n'), ((57944, 57961), 'numpy.empty', '_np.empty', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (57953, 57961), True, 'import numpy as _np\n'), ((57963, 57980), 'numpy.zeros', '_np.zeros', (['(1)', 'int'], {}), '(1, int)\n', (57972, 57980), True, 'import numpy as _np\n'), ((62705, 62747), 'numpy.ascontiguousarray', '_np.ascontiguousarray', (['a.indptr', '_np.int64'], {}), '(a.indptr, _np.int64)\n', (62726, 62747), True, 'import numpy as _np\n'), ((62795, 62838), 'numpy.ascontiguousarray', '_np.ascontiguousarray', (['a.indices', '_np.int64'], {}), '(a.indices, _np.int64)\n', (62816, 62838), True, 'import numpy as _np\n'), ((74084, 74098), 'numpy.dot', '_np.dot', (['x', 'u0'], {}), '(x, u0)\n', (74091, 74098), True, 'import numpy as _np\n'), ((74166, 74184), 'numpy.dot', '_np.dot', (['a', 'u0_inv'], {}), '(a, u0_inv)\n', (74173, 74184), True, 'import numpy as _np\n'), ((74405, 74428), 'numpy.linalg.norm', '_np.linalg.norm', (['R.imag'], {}), '(R.imag)\n', (74420, 74428), True, 'import numpy as _np\n'), ((74467, 74481), 'numpy.dot', '_np.dot', (['R', 'u0'], {}), '(R, u0)\n', (74474, 74481), True, 'import numpy as _np\n'), ((74604, 74627), 'numpy.linalg.norm', '_np.linalg.norm', (['r.imag'], {}), '(r.imag)\n', (74619, 74627), True, 'import numpy as _np\n'), ((74735, 74766), 'numpy.linalg.norm', '_np.linalg.norm', (['r_on_comm.imag'], {}), '(r_on_comm.imag)\n', (74750, 74766), True, 'import numpy as _np\n'), ((75218, 75241), 'numpy.linalg.norm', '_np.linalg.norm', (['X.imag'], {}), '(X.imag)\n', (75233, 75241), True, 'import numpy as _np\n'), ((78525, 78536), 'numpy.sqrt', '_np.sqrt', (['d'], {}), '(d)\n', (78533, 78536), True, 'import numpy as _np\n'), ((78625, 78645), 'numpy.dot', '_np.dot', (['errmap', 'psi'], {}), '(errmap, psi)\n', (78632, 78645), True, 'import numpy as _np\n'), ((79421, 79449), 'numpy.array', '_np.array', (['(1, 0, 0, 1)', '"""d"""'], {}), "((1, 0, 0, 1), 'd')\n", (79430, 79449), True, 'import numpy as _np\n'), ((79517, 79546), 'numpy.array', '_np.array', (['(1, 0, 0, -1)', '"""d"""'], {}), "((1, 0, 0, -1), 'd')\n", (79526, 79546), True, 'import numpy as _np\n'), ((8961, 8994), 'numpy.linalg.norm', '_np.linalg.norm', (['m[:, j]'], {'ord': 'ord'}), '(m[:, j], ord=ord)\n', (8976, 8994), True, 'import numpy as _np\n'), ((10519, 10541), 'numpy.linalg.norm', '_np.linalg.norm', (['check'], {}), '(check)\n', (10534, 10541), True, 'import numpy as _np\n'), ((12581, 12619), 'numpy.linalg.matrix_rank', '_np.linalg.matrix_rank', (['trial'], {'tol': 'tol'}), '(trial, tol=tol)\n', (12603, 12619), True, 'import numpy as _np\n'), ((13261, 13326), 'scipy.sparse.linalg.svds', '_spsl.svds', (['trial'], {'k': '(1)', 'which': '"""SM"""', 'return_singular_vectors': '(False)'}), "(trial, k=1, which='SM', return_singular_vectors=False)\n", (13271, 13326), True, 'import scipy.sparse.linalg as _spsl\n'), ((15266, 15299), 'numpy.isclose', '_np.isclose', (['(T[i, i] - T[j, j])', '(0)'], {}), '(T[i, i] - T[j, j], 0)\n', (15277, 15299), True, 'import numpy as _np\n'), ((17407, 17418), 'numpy.imag', '_np.imag', (['m'], {}), '(m)\n', (17415, 17418), True, 'import numpy as _np\n'), ((20680, 20700), 'numpy.sqrt', '_np.sqrt', (['H.shape[0]'], {}), '(H.shape[0])\n', (20688, 20700), True, 'import numpy as _np\n'), ((20747, 20762), 'scipy.linalg.expm', '_spl.expm', (['logM'], {}), '(logM)\n', (20756, 20762), True, 'import scipy.linalg as _spl\n'), ((21856, 21877), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['m'], {}), '(m)\n', (21874, 21877), True, 'import numpy as _np\n'), ((26338, 26350), 'numpy.real', '_np.real', (['ev'], {}), '(ev)\n', (26346, 26350), True, 'import numpy as _np\n'), ((29287, 29421), 'warnings.warn', '_warnings.warn', (["('Cannot construct a real log: unpaired negative' + ' real eigenvalues: %s' %\n [evals[i] for i in unpaired_indices])"], {}), "('Cannot construct a real log: unpaired negative' + \n ' real eigenvalues: %s' % [evals[i] for i in unpaired_indices])\n", (29301, 29421), True, 'import warnings as _warnings\n'), ((39703, 39728), 'numpy.isclose', '_np.isclose', (['ar[i]', 'ar[j]'], {}), '(ar[i], ar[j])\n', (39714, 39728), True, 'import numpy as _np\n'), ((79407, 79418), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (79415, 79418), True, 'import numpy as _np\n'), ((79503, 79514), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (79511, 79514), True, 'import numpy as _np\n'), ((82313, 82324), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (82321, 82324), True, 'import numpy as _np\n'), ((5589, 5604), 'numpy.diagonal', '_np.diagonal', (['r'], {}), '(r)\n', (5601, 5604), True, 'import numpy as _np\n'), ((23188, 23223), 'numpy.linalg.norm', '_np.linalg.norm', (['(logM - target_logm)'], {}), '(logM - target_logm)\n', (23203, 23223), True, 'import numpy as _np\n'), ((26314, 26326), 'numpy.imag', '_np.imag', (['ev'], {}), '(ev)\n', (26322, 26326), True, 'import numpy as _np\n'), ((26415, 26430), 'numpy.imag', '_np.imag', (['evec1'], {}), '(evec1)\n', (26423, 26430), True, 'import numpy as _np\n'), ((37759, 37779), 'numpy.conjugate', '_np.conjugate', (['a[q0]'], {}), '(a[q0])\n', (37772, 37779), True, 'import numpy as _np\n'), ((37825, 37845), 'numpy.conjugate', '_np.conjugate', (['b[q1]'], {}), '(b[q1])\n', (37838, 37845), True, 'import numpy as _np\n'), ((43188, 43213), 'numpy.array', '_np.array', (['[i]', '_np.int64'], {}), '([i], _np.int64)\n', (43197, 43213), True, 'import numpy as _np\n'), ((44814, 44836), 'itertools.product', '_itertools.product', (['*b'], {}), '(*b)\n', (44832, 44836), True, 'import itertools as _itertools\n'), ((75093, 75119), 'numpy.linalg.norm', '_np.linalg.norm', (['(R - lastR)'], {}), '(R - lastR)\n', (75108, 75119), True, 'import numpy as _np\n'), ((43425, 43448), 'numpy.array', '_np.array', (['i', '_np.int64'], {}), '(i, _np.int64)\n', (43434, 43448), True, 'import numpy as _np\n'), ((50341, 50357), 'numpy.real', '_np.real', (['a.data'], {}), '(a.data)\n', (50349, 50357), True, 'import numpy as _np\n'), ((51578, 51594), 'numpy.imag', '_np.imag', (['a.data'], {}), '(a.data)\n', (51586, 51594), True, 'import numpy as _np\n'), ((63135, 63163), 'numpy.where', '_np.where', (['(a.indices == iCol)'], {}), '(a.indices == iCol)\n', (63144, 63163), True, 'import numpy as _np\n'), ((27115, 27148), 'numpy.linalg.norm', '_np.linalg.norm', (['(evec1C - U[:, j])'], {}), '(evec1C - U[:, j])\n', (27130, 27148), True, 'import numpy as _np\n'), ((26655, 26672), 'numpy.imag', '_np.imag', (['U[:, j]'], {}), '(U[:, j])\n', (26663, 26672), True, 'import numpy as _np\n'), ((39043, 39054), 'numpy.conj', '_np.conj', (['v'], {}), '(v)\n', (39051, 39054), True, 'import numpy as _np\n'), ((50472, 50488), 'numpy.real', '_np.real', (['a.data'], {}), '(a.data)\n', (50480, 50488), True, 'import numpy as _np\n'), ((51709, 51725), 'numpy.imag', '_np.imag', (['a.data'], {}), '(a.data)\n', (51717, 51725), True, 'import numpy as _np\n'), ((30969, 30997), 'itertools.repeat', '_ittls.repeat', (['None', '(dim - 1)'], {}), '(None, dim - 1)\n', (30982, 30997), True, 'import itertools as _ittls\n')]
|
from etl.jobs.transformation.treatment_component_transformer_job import transform_treatment_component
from tests.etl.workflow.treatment_component.expected_outputs import expected_treatments_components
from tests.etl.workflow.treatment_component.input_data import treatment_and_component_helper, treatment
from tests.util import convert_to_dataframe, assert_df_are_equal_ignore_id
def test_treatment_component(spark_session):
treatment_and_component_helper_df = convert_to_dataframe(spark_session, treatment_and_component_helper)
treatment_df = convert_to_dataframe(spark_session, treatment)
treatment_component_df = transform_treatment_component(treatment_and_component_helper_df, treatment_df)
expected_df = convert_to_dataframe(spark_session, expected_treatments_components)
assert_df_are_equal_ignore_id(treatment_component_df, expected_df)
|
[
"etl.jobs.transformation.treatment_component_transformer_job.transform_treatment_component",
"tests.util.assert_df_are_equal_ignore_id",
"tests.util.convert_to_dataframe"
] |
[((467, 534), 'tests.util.convert_to_dataframe', 'convert_to_dataframe', (['spark_session', 'treatment_and_component_helper'], {}), '(spark_session, treatment_and_component_helper)\n', (487, 534), False, 'from tests.util import convert_to_dataframe, assert_df_are_equal_ignore_id\n'), ((554, 600), 'tests.util.convert_to_dataframe', 'convert_to_dataframe', (['spark_session', 'treatment'], {}), '(spark_session, treatment)\n', (574, 600), False, 'from tests.util import convert_to_dataframe, assert_df_are_equal_ignore_id\n'), ((630, 708), 'etl.jobs.transformation.treatment_component_transformer_job.transform_treatment_component', 'transform_treatment_component', (['treatment_and_component_helper_df', 'treatment_df'], {}), '(treatment_and_component_helper_df, treatment_df)\n', (659, 708), False, 'from etl.jobs.transformation.treatment_component_transformer_job import transform_treatment_component\n'), ((727, 794), 'tests.util.convert_to_dataframe', 'convert_to_dataframe', (['spark_session', 'expected_treatments_components'], {}), '(spark_session, expected_treatments_components)\n', (747, 794), False, 'from tests.util import convert_to_dataframe, assert_df_are_equal_ignore_id\n'), ((800, 866), 'tests.util.assert_df_are_equal_ignore_id', 'assert_df_are_equal_ignore_id', (['treatment_component_df', 'expected_df'], {}), '(treatment_component_df, expected_df)\n', (829, 866), False, 'from tests.util import convert_to_dataframe, assert_df_are_equal_ignore_id\n')]
|
import sys
sys.path.append("..")
from engineering_tool.temperatures import *
def Gas_Temperature():
pressure = 0.22 # atm
volume = 10 # Litre or 1 Litre = 1000 cm^3
n_Mole_H2O = 0.056 # mol H2O 1 g / 18 g.mol^-1
R_H2O = 0.08206 # L.atm.mol^-1K^-1
temp = Temperature.Gas(pressure,volume,n_Mole_H2O,R_H2O)
print("Temperature : %f K"%temp)
print("Temperature : %f C"%Temperature.KelvinToCelsius(temp))
print("Temperature : %f F"%Temperature.KelvinToFahrenheit(temp))
Gas_Temperature()
|
[
"sys.path.append"
] |
[((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n')]
|
'''
Created on Jun 27, 2016
@author: rajajosh
'''
import numpy
from scipy.spatial.distance import euclidean
class KNNClassifier(object):
"K-Nearest Neighbors classifier class"
len=0
x_train=[]
y_train=[]
kVal=1
clusters = set()
def __init__(self):
'''
Constructor
'''
pass
def fit(self, x_train, y_train, kVal=3):
"fir the training data. "
self.len=len(x_train)
if self.len>len(y_train): self.len=len(y_train)
if kVal>self.len: kVal=self.len
self.x_train=x_train
self.y_train=y_train
self.clusters = set(y_train)
self.kVal=kVal
def predict(self, x_test):
retArr = []
for testData in x_test:
distArray =[]
for i in range(0,self.len):
distArray.append([euclidean(testData, self.x_train[i]), self.y_train[i]])
distArray.sort()
counts = [0] * len(self.clusters)
for i in range(0,self.kVal):
index=distArray[i][1]
counts[index]=counts[index]+1
largest=0
indexOfLargest=0
for i in range(0,len(counts)):
if counts[i]>largest:
largest=counts[i]
indexOfLargest=i
retArr.append(indexOfLargest)
return numpy.asarray(retArr)
print("done")
|
[
"numpy.asarray",
"scipy.spatial.distance.euclidean"
] |
[((1417, 1438), 'numpy.asarray', 'numpy.asarray', (['retArr'], {}), '(retArr)\n', (1430, 1438), False, 'import numpy\n'), ((883, 919), 'scipy.spatial.distance.euclidean', 'euclidean', (['testData', 'self.x_train[i]'], {}), '(testData, self.x_train[i])\n', (892, 919), False, 'from scipy.spatial.distance import euclidean\n')]
|
# Generated by Django 4.0 on 2022-02-26 16:03
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contatos', '0021_alter_chat_data'),
]
operations = [
migrations.AlterField(
model_name='chat',
name='data',
field=models.DateTimeField(blank=True, default=datetime.datetime(2022, 2, 26, 13, 3, 54, 155992), null=True),
),
]
|
[
"datetime.datetime"
] |
[((385, 434), 'datetime.datetime', 'datetime.datetime', (['(2022)', '(2)', '(26)', '(13)', '(3)', '(54)', '(155992)'], {}), '(2022, 2, 26, 13, 3, 54, 155992)\n', (402, 434), False, 'import datetime\n')]
|
import os
import random
import time
import requests
random.seed(time.time())
ITERATIONS = int(os.getenv('ITERATIONS', '1000000'))
def generate_random_int():
return random.randint(0, 1024)
def generate_random_float():
return random.random()
def request(url):
ri = str(generate_random_int())
response = requests.get(f'{url}?q={ri}')
def main():
url = os.environ.get('HTTP_REQUEST')
print(f"PYTHON START {ITERATIONS} -> {url}")
for i in range(ITERATIONS):
i1 = generate_random_int()
i2 = generate_random_int()
f1 = generate_random_float()
f2 = generate_random_float()
ints = i1+i2
floats = f1*f2
s = "A"*1024
del(s)
request(url)
print("PYTHON END")
if __name__ == '__main__':
main()
|
[
"random.randint",
"time.time",
"os.environ.get",
"random.random",
"requests.get",
"os.getenv"
] |
[((65, 76), 'time.time', 'time.time', ([], {}), '()\n', (74, 76), False, 'import time\n'), ((96, 130), 'os.getenv', 'os.getenv', (['"""ITERATIONS"""', '"""1000000"""'], {}), "('ITERATIONS', '1000000')\n", (105, 130), False, 'import os\n'), ((172, 195), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (186, 195), False, 'import random\n'), ((238, 253), 'random.random', 'random.random', ([], {}), '()\n', (251, 253), False, 'import random\n'), ((325, 354), 'requests.get', 'requests.get', (['f"""{url}?q={ri}"""'], {}), "(f'{url}?q={ri}')\n", (337, 354), False, 'import requests\n'), ((379, 409), 'os.environ.get', 'os.environ.get', (['"""HTTP_REQUEST"""'], {}), "('HTTP_REQUEST')\n", (393, 409), False, 'import os\n')]
|
import argparse
import os
import sys
import pwnlib
from pwnlib.context import context
choices = map(str, [16,32,64])
choices += list(context.oses)
choices += list(context.architectures)
choices += list(context.endiannesses)
def context_arg(arg):
try: context.arch = arg
except Exception: pass
try: context.os = arg
except Exception: pass
try: context.bits = int(arg)
except Exception: arg
try: context.endian = arg
except Exception: pass
return arg
parser = argparse.ArgumentParser(description='Pwntools Command-line Interface',
prog='pwn')
parser_commands = parser.add_subparsers(dest='command')
def main(file=sys.argv[0]):
import pwnlib.commandline.main
name = os.path.splitext(os.path.basename(file))[0]
sys.argv.insert(1, name)
pwnlib.commandline.main.main()
|
[
"pwnlib.commandline.main.main",
"argparse.ArgumentParser",
"sys.argv.insert",
"os.path.basename"
] |
[((498, 585), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pwntools Command-line Interface"""', 'prog': '"""pwn"""'}), "(description='Pwntools Command-line Interface', prog\n ='pwn')\n", (521, 585), False, 'import argparse\n'), ((793, 817), 'sys.argv.insert', 'sys.argv.insert', (['(1)', 'name'], {}), '(1, name)\n', (808, 817), False, 'import sys\n'), ((822, 852), 'pwnlib.commandline.main.main', 'pwnlib.commandline.main.main', ([], {}), '()\n', (850, 852), False, 'import pwnlib\n'), ((762, 784), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (778, 784), False, 'import os\n')]
|
# Ultroid - UserBot
# Copyright (C) 2020 TeamUltroid
#
# This file is a part of < https://github.com/TeamUltroid/Ultroid/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.
"""
✘ Commands Available -
•`{i}calc` - Inline Calculator
"""
import re
from . import *
@ultroid_cmd(pattern="calc")
async def icalc(e):
udB.delete("calc")
results = await ultroid_bot.inline_query(asst.me.username, "calc")
await results[0].click(e.chat_id, silent=True, hide_via=True)
await e.delete()
@in_pattern("calc")
@in_owner
async def _(e):
m = [
"AC",
"C",
"⌫",
"%",
"7",
"8",
"9",
"+",
"4",
"5",
"6",
"-",
"1",
"2",
"3",
"x",
"00",
"0",
".",
"÷",
]
tultd = [Button.inline(f"{x}", data=f"calc{x}") for x in m]
lst = list(zip(tultd[::4], tultd[1::4], tultd[2::4], tultd[3::4]))
lst.append([Button.inline("=", data="calc=")])
calc = e.builder.article("Calc", text="• Ultroid Inline Calculator •", buttons=lst)
await e.answer([calc])
@callback(re.compile("calc(.*)"))
@owner
async def _(e):
x = (e.data_match.group(1)).decode()
if x == "AC":
udB.delete("calc")
return await e.edit(
"• Ultroid Inline Calculator •",
buttons=[Button.inline("Open Calculator Again", data="recalc")],
)
elif x == "C":
udB.delete("calc")
return await e.answer("cleared")
elif x == "⌫":
get = udB.get("calc")
if get:
udB.set("calc", get[:-1])
return await e.answer(str(get[:-1]))
elif x == "%":
get = udB.get("calc")
if get:
udB.set("calc", get + "/100")
return await e.answer(str(get + "/100"))
elif x == "÷":
get = udB.get("calc")
if get:
udB.set("calc", get + "/")
return await e.answer(str(get + "/"))
elif x == "x":
get = udB.get("calc")
if get:
udB.set("calc", get + "*")
return await e.answer(str(get + "*"))
elif x == "=":
get = udB.get("calc")
if get:
if get.endswith(("*", ".", "/", "-", "+")):
get = get[:-1]
out = await calcc(get, e)
try:
num = float(out)
return await e.answer(f"Answer : {num}", cache_time=0, alert=True)
except BaseException:
udB.delete("calc")
return await e.answer("Error", cache_time=0, alert=True)
return await e.answer("None")
else:
get = udB.get("calc")
if get:
udB.set("calc", get + x)
return await e.answer(str(get + x))
udB.set("calc", x)
return await e.answer(str(x))
@callback("recalc")
@owner
async def _(e):
m = [
"AC",
"C",
"⌫",
"%",
"7",
"8",
"9",
"+",
"4",
"5",
"6",
"-",
"1",
"2",
"3",
"x",
"00",
"0",
".",
"÷",
]
tultd = [Button.inline(f"{x}", data=f"calc{x}") for x in m]
lst = list(zip(tultd[::4], tultd[1::4], tultd[2::4], tultd[3::4]))
lst.append([Button.inline("=", data="calc=")])
await e.edit("Noice Inline Calculator", buttons=lst)
HELP.update({f"{__name__.split('.')[1]}": f"{__doc__.format(i=HNDLR)}"})
|
[
"re.compile"
] |
[((1210, 1232), 're.compile', 're.compile', (['"""calc(.*)"""'], {}), "('calc(.*)')\n", (1220, 1232), False, 'import re\n')]
|
import os
from codecs import open
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
# load about information
about = {}
with open(os.path.join(here, 'seq_interval', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
requires = []
setup(
name=about['__title__'],
version=about['__version__'],
keywords=about['__keywords__'],
description=about['__description__'],
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
packages=['seq_interval'],
package_data={'':['LICENSE'], 'seq_interval': []},
package_dir={'seq_interval': 'seq_interval'},
install_requires=requires,
license=about['__license__'],
classifiers=[
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
]
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.setup"
] |
[((277, 1124), 'setuptools.setup', 'setup', ([], {'name': "about['__title__']", 'version': "about['__version__']", 'keywords': "about['__keywords__']", 'description': "about['__description__']", 'author': "about['__author__']", 'author_email': "about['__author_email__']", 'url': "about['__url__']", 'packages': "['seq_interval']", 'package_data': "{'': ['LICENSE'], 'seq_interval': []}", 'package_dir': "{'seq_interval': 'seq_interval'}", 'install_requires': 'requires', 'license': "about['__license__']", 'classifiers': "['Natural Language :: English', 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython']"}), "(name=about['__title__'], version=about['__version__'], keywords=about\n ['__keywords__'], description=about['__description__'], author=about[\n '__author__'], author_email=about['__author_email__'], url=about[\n '__url__'], packages=['seq_interval'], package_data={'': ['LICENSE'],\n 'seq_interval': []}, package_dir={'seq_interval': 'seq_interval'},\n install_requires=requires, license=about['__license__'], classifiers=[\n 'Natural Language :: English', 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython'])\n", (282, 1124), False, 'from setuptools import setup\n'), ((87, 112), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (102, 112), False, 'import os\n'), ((161, 213), 'os.path.join', 'os.path.join', (['here', '"""seq_interval"""', '"""__version__.py"""'], {}), "(here, 'seq_interval', '__version__.py')\n", (173, 213), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Azure Resource Manager (ARM) Redis Operations State Module
.. versionadded:: 2.0.0
.. versionchanged:: 4.0.0
:maintainer: <<EMAIL>>
:configuration: This module requires Azure Resource Manager credentials to be passed via acct. Note that the
authentication parameters are case sensitive.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud.
Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example acct setup for Azure Resource Manager authentication:
.. code-block:: yaml
azurerm:
default:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: <PASSWORD>
The authentication parameters can also be passed as a dictionary of keyword arguments to the ``connection_auth``
parameter of each state, but this is not preferred and could be deprecated in the future.
"""
# Python libs
from __future__ import absolute_import
from dict_tools import differ
import time
import logging
log = logging.getLogger(__name__)
TREQ = {"present": {"require": ["states.azurerm.resource.group.present",]}}
async def present(
hub,
ctx,
name,
resource_group,
location,
sku,
redis_configuration=None,
enable_non_ssl_port=None,
tenant_settings=None,
shard_count=None,
minimum_tls_version=None,
subnet_id=None,
static_ip=None,
zones=None,
polling=True,
poller_interval=60,
poller_timeout=60,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
.. versionchanged:: 4.0.0
Ensure a redis cache exists in the resource group.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
:param location: The geo-location where the resource lives.
:param sku: A dictionary representing the SKU of the Redis cache to deploy. Required parameters include:
- ``name``: The type of Redis cache to deploy. Possible values include: 'Basic', 'Standard', and 'Premium'.
- ``family``: The SKU family to use. Possible values include 'C' for Basic/Standard and 'P' for Premium.
- ``capacity``: The size of the Redis cache to deploy. Possible values include 0, 1, 2, 3, 4, 5, and 6 for the
C (Basic/Standard) family and 1, 2, 3, and 4 for the P (Premium) family.
:param redis_configuration: A dictionary of string key-value pairs that represent all Redis Settings.
Some possible keys include: rdb-backup-enabled, rdb-storage-connection-string, rdb-backup-frequency,
maxmemory-delta, maxmemory-policy, notify-keyspace-events, maxmemory-samples, slowlog-log-slower-than,
slowlog-max-len, list-max-ziplist-entries, list-max-ziplist-value, hash-max-ziplist-entries,
hash-max-ziplist-value, set-max-intset-entries, zset-max-ziplist-entries, zset-max-ziplist-value, and more.
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
Defaults to False.
:param tenant_settings: A dictionary of tenant settings.
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:param minimum_tls_version: The specified TLS version (or higher) that clients are required to use.
Possible values include: '1.0', '1.1', and '1.2'.
:param subnet_id: The full resource ID of a subnet in a virtual network to deploy the Redis cache in. Example
format: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1.
:param static_ip: Static IP address. Required when deploying a Redis cache inside an existing Azure Virtual Network.
:param zones: A list of availability zones denoting where the resource needs to come from.
:param tags: A dictionary of strings can be passed as tag metadata to the Redis cache object.
:param polling: An optional boolean flag representing whether a Poller will be used during the creation of the
Redis Cache. If set to True, a Poller will be used by this operation and the module will not return until the
Redis Cache has completed its creation process and has been successfully provisioned. If set to False, the
module will return once the Redis Cache has successfully begun its creation process. Due to an issue with
polling within the most recent packages of ``azure-mgmt-redis``, the ``poller_interval`` and ``poller_timeout``
parameters are used to assist with polling if this parameter is set to True. Defaults to True.
:param poller_interval: The number of seconds between every attempt of the state module to poll Azure and check if
the Redis Cache has been successfully provisioned. This parameter must be an integer between 30 and 300.
Defaults to 60.
:param poller_timeout: The number of minutes that the state module should attempt to poll Azure about the
provisioning status of the Redis Cache before just returning the result of the execution. This parameter must
be an integer between 30 and 120. Defaults to 45.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure redis cache exists:
azurerm.redis.operations.present:
- name: my_cache
- resource_group: my_group
- sku:
name: 'Premium'
family: 'P'
capacity: 3
- location: 'eastus'
- tags:
contact_name: <NAME>
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
cache = await hub.exec.azurerm.redis.operations.get(
ctx, name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in cache:
action = "update"
if tags:
tag_changes = differ.deep_diff(cache.get("tags", {}), tags)
if tag_changes:
ret["changes"]["tags"] = tag_changes
sku_changes = differ.deep_diff(cache.get("sku"), sku)
if sku_changes:
ret["changes"]["sku"] = sku_changes
if tenant_settings:
tenant_changes = differ.deep_diff(
cache.get("tenant_settings", {}), tenant_settings
)
if tenant_changes:
ret["changes"]["tenant_settings"] = tenant_changes
if redis_configuration:
config_changes = differ.deep_diff(
cache.get("redis_configuration", {}), redis_configuration
)
if config_changes:
ret["changes"]["redis_configuration"] = config_changes
if enable_non_ssl_port is not None:
if enable_non_ssl_port != cache.get("enable_non_ssl_port"):
ret["changes"]["enable_non_ssl_port"] = {
"old": cache.get("enable_non_ssl_port"),
"new": enable_non_ssl_port,
}
if shard_count is not None:
if shard_count != cache.get("shard_count", 0):
ret["changes"]["shard_count"] = {
"old": cache.get("shard_count"),
"new": shard_count,
}
if minimum_tls_version:
if minimum_tls_version != cache.get("minimum_tls_version"):
ret["changes"]["minimum_tls_version"] = {
"old": cache.get("minimum_tls_version"),
"new": minimum_tls_version,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Redis cache {0} is already present.".format(name)
return ret
if ctx["test"]:
ret["result"] = None
ret["comment"] = "Redis cache {0} would be updated.".format(name)
return ret
if ctx["test"]:
ret["comment"] = "Redis cache {0} would be created.".format(name)
ret["result"] = None
return ret
cache_kwargs = kwargs.copy()
cache_kwargs.update(connection_auth)
if action == "create":
cache = await hub.exec.azurerm.redis.operations.create(
ctx=ctx,
name=name,
resource_group=resource_group,
sku=sku,
location=location,
redis_configuration=redis_configuration,
enable_non_ssl_port=enable_non_ssl_port,
tenant_settings=tenant_settings,
shard_count=shard_count,
minimum_tls_version=minimum_tls_version,
subnet_id=subnet_id,
static_ip=static_ip,
zones=zones,
tags=tags,
polling=polling,
**cache_kwargs,
)
else:
cache = await hub.exec.azurerm.redis.operations.update(
ctx=ctx,
name=name,
resource_group=resource_group,
sku=sku,
redis_configuration=redis_configuration,
enable_non_ssl_port=enable_non_ssl_port,
tenant_settings=tenant_settings,
shard_count=shard_count,
minimum_tls_version=minimum_tls_version,
tags=tags,
**cache_kwargs,
)
if polling:
if poller_interval < 30 or poller_interval > 300:
log.error(
"An invalid value was specified within the poller_interval parameter. The default value of 60 (seconds) will be used."
)
poller_interval = 60
if poller_timeout < 30 or poller_timeout > 120:
log.error(
"An invalid value was specified within the poller_timeout parameter. The default value of 60 (minutes) will be used."
)
poller_timeout = 60
# Convert poller_timeout from minutes to seconds
poller_timeout = poller_timeout * 60
polled_time = 0
while polled_time < poller_timeout:
time.sleep(poller_interval)
polled_time += poller_interval
status = await hub.exec.azurerm.redis.operations.get(
ctx=ctx, name=name, resource_group=resource_group
)
if status.get("provisioning_state").lower() in ["succeeded", "failed"]:
break
if action == "create":
ret["changes"] = {"old": {}, "new": cache}
if "error" not in cache:
ret["result"] = True
ret["comment"] = f"Redis cache {name} has been {action}d."
return ret
ret["comment"] = "Failed to {0} Redis cache {1}! ({2})".format(
action, name, cache.get("error")
)
if not ret["result"]:
ret["changes"] = {}
return ret
async def absent(hub, ctx, name, resource_group, connection_auth=None, **kwargs):
"""
.. versionadded:: 2.0.0
Ensure a Redis cache does not exist in the specified resource group.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure redis cache does not exist:
azurerm.redis.operations.absent:
- name: my_redis_cache
- resource_group: my_rg
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
cache = await hub.exec.azurerm.redis.operations.get(
ctx, name, resource_group, **connection_auth
)
if "error" in cache:
ret["result"] = True
ret["comment"] = "Redis cache {0} was not found.".format(name)
return ret
if ctx["test"]:
ret["comment"] = "Redis cache {0} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": cache,
"new": {},
}
return ret
deleted = await hub.exec.azurerm.redis.operations.delete(
ctx, name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Redis cache {0} has been deleted.".format(name)
ret["changes"] = {"old": cache, "new": {}}
return ret
ret["comment"] = "Failed to delete Redis cache {0}!".format(name)
return ret
|
[
"logging.getLogger",
"time.sleep"
] |
[((1819, 1846), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1836, 1846), False, 'import logging\n'), ((11230, 11257), 'time.sleep', 'time.sleep', (['poller_interval'], {}), '(poller_interval)\n', (11240, 11257), False, 'import time\n')]
|
# Name: main
# Author: Reacubeth
# Time: 2020/5/28 12:27
# Mail: <EMAIL>
# Site: www.omegaxyz.com
# *_*coding:utf-8 *_*
from paper2XML import PaperXML
from predictNER import ModelPredict
import time
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
model = ModelPredict('save')
while True:
print('*******************************')
file_path = input('input file path: ').strip()
try:
start = time.time()
paper = PaperXML(file_path)
# print(paper.section_text)
print(model.predict(paper.section_text, clear=True))
print(time.time() - start)
except Exception as e:
print('Error: ', e)
print('Please input PDF file path!!!')
|
[
"paper2XML.PaperXML",
"time.time",
"predictNER.ModelPredict",
"warnings.filterwarnings"
] |
[((217, 250), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (240, 250), False, 'import warnings\n'), ((292, 312), 'predictNER.ModelPredict', 'ModelPredict', (['"""save"""'], {}), "('save')\n", (304, 312), False, 'from predictNER import ModelPredict\n'), ((466, 477), 'time.time', 'time.time', ([], {}), '()\n', (475, 477), False, 'import time\n'), ((498, 517), 'paper2XML.PaperXML', 'PaperXML', (['file_path'], {}), '(file_path)\n', (506, 517), False, 'from paper2XML import PaperXML\n'), ((641, 652), 'time.time', 'time.time', ([], {}), '()\n', (650, 652), False, 'import time\n')]
|
# Notebooks-contrib test
import os
import sys
def bsql_start():
"""
set up users to use SQL in the RAPIDS AI ecosystem
> try to import BlazingContext from BlazingSQL
> offer to install BlazingSQL if module not found
BlazingSQL prereqs:
> Conda: https://docs.blazingdb.com/docs/install-via-conda#section-conda-prerequisites
> Docker: https://docs.blazingdb.com/docs/install-via-docker#section-docker-hub-prerequisites
latest install scripts:
> Conda: https://docs.blazingdb.com/docs/install-via-conda
> Docker: https://docs.blazingdb.com/docs/install-via-docker
> Source: https://docs.blazingdb.com/docs/build-from-source
"""
# is BlazingSQL installed?
try:
from blazingsql import BlazingContext
# yes, indicate success
return "You've got BlazingSQL set up perfectly! Let's get started with SQL in RAPIDS AI!"
# BlazingSQL not found
except ModuleNotFoundError:
# do we want to install BlazingSQL?
print("Unable to locate BlazingSQL. We'll install it now")
# Install JRE first
os.system("apt-get update")
os.system("apt-get -y install default-jre")
# tag BlazingSQL conda install script
b = "conda install -c blazingsql/label/cuda10.0 -c blazingsql"
b += ' -c rapidsai -c nvidia -c conda-forge -c defaults '
b += "blazingsql python=3.7 cudatoolkit=10.0" # CUDA 10, Python 3.7 (BlazingSQL also supports CUDA 9.2)
# tag python version
py = sys.version.split('.') # e.g. output: ['3', '6', '7 | packaged by cond...
if py[0] == '3': # make sure we're in 3
py = py[1] # focus mid version (3.?)
# are we on python 3.6?
if py == '6':
# adjust to 3.6 install script
b = b.replace('python=3.7', 'python=3.6')
# lmk what's going on?
print('Installing BlazingSQL, this should take some time. This is only need to be done once')
# install BlazingSQL
os.system(b)
# indicate completion
return f"Let's get started with SQL in RAPIDS AI!"
if __name__=='__main__':
# check environment for BlazingSQL
check = bsql_start()
print(check)
|
[
"sys.version.split",
"os.system"
] |
[((1147, 1174), 'os.system', 'os.system', (['"""apt-get update"""'], {}), "('apt-get update')\n", (1156, 1174), False, 'import os\n'), ((1187, 1230), 'os.system', 'os.system', (['"""apt-get -y install default-jre"""'], {}), "('apt-get -y install default-jre')\n", (1196, 1230), False, 'import os\n'), ((1594, 1616), 'sys.version.split', 'sys.version.split', (['"""."""'], {}), "('.')\n", (1611, 1616), False, 'import sys\n'), ((2131, 2143), 'os.system', 'os.system', (['b'], {}), '(b)\n', (2140, 2143), False, 'import os\n')]
|
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.report_thread import ReportContext
from streamlit.state.session_state import SessionState
from streamlit.uploaded_file_manager import UploadedFileManager
class ReportContextTest(unittest.TestCase):
def test_set_page_config_immutable(self):
"""st.set_page_config must be called at most once"""
fake_enqueue = lambda msg: None
ctx = ReportContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(msg)
with self.assertRaises(StreamlitAPIException):
ctx.enqueue(msg)
def test_set_page_config_first(self):
"""st.set_page_config must be called before other st commands
when the script has been marked as started"""
fake_enqueue = lambda msg: None
ctx = ReportContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
ctx.on_script_start()
markdown_msg = ForwardMsg()
markdown_msg.delta.new_element.markdown.body = "foo"
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(markdown_msg)
with self.assertRaises(StreamlitAPIException):
ctx.enqueue(msg)
def test_disallow_set_page_config_twice(self):
"""st.set_page_config cannot be called twice"""
fake_enqueue = lambda msg: None
ctx = ReportContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
ctx.on_script_start()
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(msg)
with self.assertRaises(StreamlitAPIException):
same_msg = ForwardMsg()
same_msg.page_config_changed.title = "bar"
ctx.enqueue(same_msg)
def test_set_page_config_reset(self):
"""st.set_page_config should be allowed after a rerun"""
fake_enqueue = lambda msg: None
ctx = ReportContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
ctx.on_script_start()
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(msg)
ctx.reset()
try:
ctx.on_script_start()
ctx.enqueue(msg)
except StreamlitAPIException:
self.fail("set_page_config should have succeeded after reset!")
|
[
"streamlit.uploaded_file_manager.UploadedFileManager",
"streamlit.state.session_state.SessionState",
"streamlit.proto.ForwardMsg_pb2.ForwardMsg"
] |
[((1256, 1268), 'streamlit.proto.ForwardMsg_pb2.ForwardMsg', 'ForwardMsg', ([], {}), '()\n', (1266, 1268), False, 'from streamlit.proto.ForwardMsg_pb2 import ForwardMsg\n'), ((1861, 1873), 'streamlit.proto.ForwardMsg_pb2.ForwardMsg', 'ForwardMsg', ([], {}), '()\n', (1871, 1873), False, 'from streamlit.proto.ForwardMsg_pb2 import ForwardMsg\n'), ((1950, 1962), 'streamlit.proto.ForwardMsg_pb2.ForwardMsg', 'ForwardMsg', ([], {}), '()\n', (1960, 1962), False, 'from streamlit.proto.ForwardMsg_pb2 import ForwardMsg\n'), ((2496, 2508), 'streamlit.proto.ForwardMsg_pb2.ForwardMsg', 'ForwardMsg', ([], {}), '()\n', (2506, 2508), False, 'from streamlit.proto.ForwardMsg_pb2 import ForwardMsg\n'), ((3129, 3141), 'streamlit.proto.ForwardMsg_pb2.ForwardMsg', 'ForwardMsg', ([], {}), '()\n', (3139, 3141), False, 'from streamlit.proto.ForwardMsg_pb2 import ForwardMsg\n'), ((1180, 1194), 'streamlit.state.session_state.SessionState', 'SessionState', ([], {}), '()\n', (1192, 1194), False, 'from streamlit.state.session_state import SessionState\n'), ((1208, 1229), 'streamlit.uploaded_file_manager.UploadedFileManager', 'UploadedFileManager', ([], {}), '()\n', (1227, 1229), False, 'from streamlit.uploaded_file_manager import UploadedFileManager\n'), ((1745, 1759), 'streamlit.state.session_state.SessionState', 'SessionState', ([], {}), '()\n', (1757, 1759), False, 'from streamlit.state.session_state import SessionState\n'), ((1773, 1794), 'streamlit.uploaded_file_manager.UploadedFileManager', 'UploadedFileManager', ([], {}), '()\n', (1792, 1794), False, 'from streamlit.uploaded_file_manager import UploadedFileManager\n'), ((2389, 2403), 'streamlit.state.session_state.SessionState', 'SessionState', ([], {}), '()\n', (2401, 2403), False, 'from streamlit.state.session_state import SessionState\n'), ((2417, 2438), 'streamlit.uploaded_file_manager.UploadedFileManager', 'UploadedFileManager', ([], {}), '()\n', (2436, 2438), False, 'from streamlit.uploaded_file_manager import UploadedFileManager\n'), ((2659, 2671), 'streamlit.proto.ForwardMsg_pb2.ForwardMsg', 'ForwardMsg', ([], {}), '()\n', (2669, 2671), False, 'from streamlit.proto.ForwardMsg_pb2 import ForwardMsg\n'), ((3022, 3036), 'streamlit.state.session_state.SessionState', 'SessionState', ([], {}), '()\n', (3034, 3036), False, 'from streamlit.state.session_state import SessionState\n'), ((3050, 3071), 'streamlit.uploaded_file_manager.UploadedFileManager', 'UploadedFileManager', ([], {}), '()\n', (3069, 3071), False, 'from streamlit.uploaded_file_manager import UploadedFileManager\n')]
|
#t16.py
#nltk matplotlib
#python 2.7+
import nltk, matplotlib
def vocab_growth(text):
vocabulary = set()
for text in texts:
for word in text:
vocabulary.add(word)
yield len(vocabulary)
#def speeches():
# presidents = []
# texts = nltk.defaultdict(list)
##sen = nltk.corpus.state_union.sents()
##for speech in nltk.corpus.state_union.sents():
##for speechlist in sen:nltk.corpus.state_union.words()
# for speech in nltk.corpus.state_union.words():
##print(speechlist)
# print(speech)
##for s in speechlist: speech.append
# president = speech.split('-')[1]
# if president not in texts:
# presidents.append(president)
# texts[president].append(nltk.corpus.state_union.words(speech))
# return [(president, texts[president]) for president in presidents]
import matplotlib
def president():
for president, texts in speeches()[-7:]:
growth = list(vocab_growth(texts))[:10000]
matplotlib.plot(growth, label=president, linewidth=2)
matplotlib.title('Vocabulary Growth in State-of-the-Union Addresses')
matplotlib.legend(loc='lower right')
matplotlib.show()
#president()
def drawtest():
g = list(range(0, 100))
#print(dir(matplotlib))
s = dir(matplotlib)
for s1 in s: print(s1)
#matplotlib.plot(g, label="pre", linewidth=2)
#matplotlib.title('Vocabulary Growth in State-of-the-Union Addresses')
#matplotlib.legend(loc='lower right')
#matplotlib.show()
drawtest()
|
[
"matplotlib.plot",
"matplotlib.show",
"matplotlib.legend",
"matplotlib.title"
] |
[((1058, 1111), 'matplotlib.plot', 'matplotlib.plot', (['growth'], {'label': 'president', 'linewidth': '(2)'}), '(growth, label=president, linewidth=2)\n', (1073, 1111), False, 'import matplotlib\n'), ((1119, 1188), 'matplotlib.title', 'matplotlib.title', (['"""Vocabulary Growth in State-of-the-Union Addresses"""'], {}), "('Vocabulary Growth in State-of-the-Union Addresses')\n", (1135, 1188), False, 'import matplotlib\n'), ((1196, 1232), 'matplotlib.legend', 'matplotlib.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (1213, 1232), False, 'import matplotlib\n'), ((1240, 1257), 'matplotlib.show', 'matplotlib.show', ([], {}), '()\n', (1255, 1257), False, 'import matplotlib\n')]
|
import os
import argparse
import base64
import warnings
from multiprocessing import Pool
import shutil
import zlib
import numpy as np
import cv2
import h5py
from tqdm import tqdm
def encode_single(info):
source_path, target_path, video, video_index, num_videos, delete = info
print('Encoding {} / {} file.'.format(video_index, num_videos))
if os.path.exists(os.path.join(target_path, f'{video}.h5')):
return
file = h5py.File(os.path.join(target_path, f'{video}.h5'), 'w',
driver='core')
for frame in os.listdir(os.path.join(source_path, video)):
with open(os.path.join(source_path, video, frame), 'rb') as frame_file:
string_image = frame_file.read()
string_image = np.void(string_image)
file.create_dataset(frame, data=string_image)
file.close()
if delete:
shutil.rmtree(os.path.join(source_path, video))
return
def encode(opt):
assert os.path.exists(opt.source_path)
if not os.path.exists(opt.target_path):
os.mkdir(opt.target_path)
videos = os.listdir(opt.source_path)
videos = list(filter(lambda x: os.path.isdir(os.path.join(opt.source_path,
x)), videos))
num_videos = len(videos)
if opt.num_worker == 1:
for video in tqdm(videos):
encode_single((opt.source_path, opt.target_path, video))
else:
pool = Pool(opt.num_worker)
pool.map(encode_single, zip([opt.source_path] * num_videos,
[opt.target_path] * num_videos,
videos, range(num_videos),
[num_videos] * num_videos,
[opt.delete_original] * num_videos))
def decode_single(info):
source_path, target_path, video_file, video_index, num_videos, delete = info
print('Decoding {} / {} file.'.format(video_index, num_videos))
video_name = video_file.split('.')[0]
if not os.path.exists(os.path.join(target_path, video_name)):
os.mkdir(os.path.join(target_path, video_name))
file = h5py.File(os.path.join(source_path, video_file), 'r', driver='core')
for key in file.keys():
frame = open(os.path.join(target_path, video_name, key), 'wb')
frame.write(file[key][()].tobytes())
frame.close()
file.close()
if delete:
shutil.rmtree(os.path.join(source_path, video_file))
def decode(opt):
assert os.path.exists(opt.source_path)
if not os.path.exists(opt.target_path):
os.mkdir(opt.target_path)
video_files = os.listdir(opt.source_path)
num_videos = len(video_files)
if opt.num_worker == 1:
for video_file in tqdm(video_files):
decode_single(opt.source_path, opt.target_path, video_file)
else:
pool = Pool(opt.num_worker)
pool.map(decode_single, zip([opt.source_path] * num_videos,
[opt.target_path] * num_videos,
video_files, range(num_videos),
[num_videos] * num_videos,
[opt.delete_original] * num_videos))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-sp', '--source_path', type=str,
default='/home/yhzhai/Downloads/tmp-frames')
parser.add_argument('-tp', '--target_path', type=str,
default='/home/yhzhai/Downloads/tmp-hdf5')
parser.add_argument('--num_worker', type=int, default=4)
parser.add_argument('--single_video', action='store_true', default=False)
parser.add_argument('--decode', action='store_true', default=False)
parser.add_argument('-d', '--delete_original', action='store_true',
default=False)
opt = parser.parse_args()
if not opt.single_video:
if opt.decode:
decode(opt)
else:
encode(opt)
else:
if opt.decode:
source_path, video_file = os.path.split(opt.source_path)
decode_single((source_path, opt.target_path, video_file, 1, 1,
opt.delete_original))
else:
source_path, video_name = os.path.split(opt.source_path)
encode_single((source_path, opt.target_path, video_name, 1, 1,
opt.delete_original))
|
[
"os.mkdir",
"tqdm.tqdm",
"numpy.void",
"argparse.ArgumentParser",
"os.path.exists",
"multiprocessing.Pool",
"os.path.split",
"os.path.join",
"os.listdir"
] |
[((942, 973), 'os.path.exists', 'os.path.exists', (['opt.source_path'], {}), '(opt.source_path)\n', (956, 973), False, 'import os\n'), ((1066, 1093), 'os.listdir', 'os.listdir', (['opt.source_path'], {}), '(opt.source_path)\n', (1076, 1093), False, 'import os\n'), ((2466, 2497), 'os.path.exists', 'os.path.exists', (['opt.source_path'], {}), '(opt.source_path)\n', (2480, 2497), False, 'import os\n'), ((2603, 2630), 'os.listdir', 'os.listdir', (['opt.source_path'], {}), '(opt.source_path)\n', (2613, 2630), False, 'import os\n'), ((3242, 3267), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3265, 3267), False, 'import argparse\n'), ((373, 413), 'os.path.join', 'os.path.join', (['target_path', 'f"""{video}.h5"""'], {}), "(target_path, f'{video}.h5')\n", (385, 413), False, 'import os\n'), ((452, 492), 'os.path.join', 'os.path.join', (['target_path', 'f"""{video}.h5"""'], {}), "(target_path, f'{video}.h5')\n", (464, 492), False, 'import os\n'), ((550, 582), 'os.path.join', 'os.path.join', (['source_path', 'video'], {}), '(source_path, video)\n', (562, 582), False, 'import os\n'), ((985, 1016), 'os.path.exists', 'os.path.exists', (['opt.target_path'], {}), '(opt.target_path)\n', (999, 1016), False, 'import os\n'), ((1026, 1051), 'os.mkdir', 'os.mkdir', (['opt.target_path'], {}), '(opt.target_path)\n', (1034, 1051), False, 'import os\n'), ((1273, 1285), 'tqdm.tqdm', 'tqdm', (['videos'], {}), '(videos)\n', (1277, 1285), False, 'from tqdm import tqdm\n'), ((1381, 1401), 'multiprocessing.Pool', 'Pool', (['opt.num_worker'], {}), '(opt.num_worker)\n', (1385, 1401), False, 'from multiprocessing import Pool\n'), ((2118, 2155), 'os.path.join', 'os.path.join', (['source_path', 'video_file'], {}), '(source_path, video_file)\n', (2130, 2155), False, 'import os\n'), ((2509, 2540), 'os.path.exists', 'os.path.exists', (['opt.target_path'], {}), '(opt.target_path)\n', (2523, 2540), False, 'import os\n'), ((2550, 2575), 'os.mkdir', 'os.mkdir', (['opt.target_path'], {}), '(opt.target_path)\n', (2558, 2575), False, 'import os\n'), ((2719, 2736), 'tqdm.tqdm', 'tqdm', (['video_files'], {}), '(video_files)\n', (2723, 2736), False, 'from tqdm import tqdm\n'), ((2835, 2855), 'multiprocessing.Pool', 'Pool', (['opt.num_worker'], {}), '(opt.num_worker)\n', (2839, 2855), False, 'from multiprocessing import Pool\n'), ((737, 758), 'numpy.void', 'np.void', (['string_image'], {}), '(string_image)\n', (744, 758), True, 'import numpy as np\n'), ((867, 899), 'os.path.join', 'os.path.join', (['source_path', 'video'], {}), '(source_path, video)\n', (879, 899), False, 'import os\n'), ((2001, 2038), 'os.path.join', 'os.path.join', (['target_path', 'video_name'], {}), '(target_path, video_name)\n', (2013, 2038), False, 'import os\n'), ((2058, 2095), 'os.path.join', 'os.path.join', (['target_path', 'video_name'], {}), '(target_path, video_name)\n', (2070, 2095), False, 'import os\n'), ((2226, 2268), 'os.path.join', 'os.path.join', (['target_path', 'video_name', 'key'], {}), '(target_path, video_name, key)\n', (2238, 2268), False, 'import os\n'), ((2397, 2434), 'os.path.join', 'os.path.join', (['source_path', 'video_file'], {}), '(source_path, video_file)\n', (2409, 2434), False, 'import os\n'), ((4051, 4081), 'os.path.split', 'os.path.split', (['opt.source_path'], {}), '(opt.source_path)\n', (4064, 4081), False, 'import os\n'), ((4247, 4277), 'os.path.split', 'os.path.split', (['opt.source_path'], {}), '(opt.source_path)\n', (4260, 4277), False, 'import os\n'), ((603, 642), 'os.path.join', 'os.path.join', (['source_path', 'video', 'frame'], {}), '(source_path, video, frame)\n', (615, 642), False, 'import os\n'), ((1143, 1175), 'os.path.join', 'os.path.join', (['opt.source_path', 'x'], {}), '(opt.source_path, x)\n', (1155, 1175), False, 'import os\n')]
|
# testutils.py
# -*- coding: utf8 -*-
# vim:fileencoding=utf8 ai ts=4 sts=4 et sw=4
# Copyright 2016 National Research Foundation (South African Radio Astronomy Observatory)
# BSD license - see LICENSE for details
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases()
import sys
import logging
import time
import mock
from builtins import object
LOGGER = logging.getLogger(__name__)
def set_attributes_polling(test_case, device_proxy, device_server, poll_periods):
"""Set attribute polling and restore after test
Parameters
----------
test_case : unittest.TestCase instance
device_proxy : tango.DeviceProxy instance
device_server : tango.Device instance
The instance of the device class `device_proxy` is talking to
poll_periods : dict {"attribute_name" : poll_period}
`poll_poriod` in milliseconds as per Tango APIs, 0 or falsy to disable
polling.
Return value
------------
restore_polling : function
This function can be used to restore polling if it is to happen before the end of
the test. Should be idempotent if only one set_attributes_polling() is called per
test.
"""
# TODO (NM 2016-04-11) check if this is still needed after upgrade to Tango 9.x For
# some reason it only works if the device_proxy is used to set polling, but the
# device_server is used to clear the polling. If polling is cleared using device_proxy
# it seem to be impossible to restore the polling afterwards.
attributes = poll_periods.keys()
initial_polling = {
attr: device_proxy.get_attribute_poll_period(attr) for attr in attributes
}
retry_time = 0.5
for attr in attributes:
initial_period = initial_polling[attr]
new_period = poll_periods[attr]
# Disable polling for attributes with poll_period of zero / falsy
# zero initial_period implies no polling currently configed
if not new_period and initial_period != 0:
LOGGER.debug("not setting polling for {}".format(attr))
device_server.stop_poll_attribute(attr)
else:
# Set the polling
LOGGER.debug("setting polling for {}".format(attr))
try:
device_proxy.poll_attribute(attr, new_period)
# TODO See (NM 2016-04-11) comment below about back-to-back calls
time.sleep(0.05)
except Exception:
retry = True
LOGGER.warning(
"Setting polling of attribute {} in {} due to unhandled"
"exception in poll_attribute command".format(attr, retry_time),
exc_info=True,
)
else:
retry = False
if retry:
time.sleep(retry_time)
device_proxy.poll_attribute(attr, new_period)
def restore_polling():
"""Restore initial polling, for use during cleanup / teardown"""
for attr, period in initial_polling.items():
if period == 0:
continue # zero period implies no polling, nothing to do
try:
device_proxy.poll_attribute(attr, period)
# TODO (NM 2016-04-11) For some reason Tango doesn't seem to handle
# back-to-back calls, and even with the sleep it sometimes goes bad. Need
# to check if this is fixed (and core dumps) when we upgrade to Tango 9.x
time.sleep(0.05)
except Exception:
retry = True
LOGGER.warning(
"retrying restore of attribute {} in {} due to unhandled"
"exception in poll_attribute command".format(attr, retry_time),
exc_info=True,
)
else:
retry = False
if retry:
time.sleep(retry_time)
device_proxy.poll_attribute(attr, period)
test_case.addCleanup(restore_polling)
return restore_polling
def disable_attributes_polling(test_case, device_proxy, device_server, attributes):
"""Disable polling for a tango device server, en re-eable at end of test"""
new_periods = {attr: 0 for attr in attributes}
return set_attributes_polling(test_case, device_proxy, device_server, new_periods)
class ClassCleanupUnittestMixin(object):
"""Implement class-level setup/deardown semantics that emulate addCleanup()
Subclasses can define a setUpClassWithCleanup() method that wraps addCleanup
such that cls.addCleanup() can be used to add cleanup methods that will be
called at class tear-down time.
"""
_class_cleanups = []
@classmethod
def setUpClassWithCleanup(cls):
"""Do class-level setup and ensure that cleanup functions are called
It is inteded that subclasses override this class method
In this method calls to `cls.addCleanup` is forwarded to
`cls.addCleanupClass`, which means callables registered with
`cls.addCleanup()` is added to the class-level cleanup function stack.
"""
super(ClassCleanupUnittestMixin, cls).setUpClassWithCleanup()
@classmethod
def addCleanupClass(cls, function, *args, **kwargs):
"""Add a cleanup that will be called at class tear-down time"""
cls._class_cleanups.append((function, args, kwargs))
@classmethod
def doCleanupsClass(cls):
"""Run class-level cleanups registered with `cls.addCleanupClass()`"""
results = []
while cls._class_cleanups:
function, args, kwargs = cls._class_cleanups.pop()
try:
function(*args, **kwargs)
except Exception:
LOGGER.exception("Exception calling class cleanup function")
results.append(sys.exc_info())
if results:
LOGGER.error("Exception(s) raised during class cleanup")
@classmethod
def setUpClass(cls):
"""Call `setUpClassWithCleanup` with `cls.addCleanup` for class-level cleanup
Any exceptions raised during `cls.setUpClassWithCleanup` will result in
the cleanups registered up to that point being called before logging
the exception with traceback.
"""
try:
with mock.patch.object(cls, "addCleanup") as cls_addCleanup:
cls_addCleanup.side_effect = cls.addCleanupClass
cls.setUpClassWithCleanup()
except Exception:
LOGGER.exception("Exception during setUpClass")
cls.doCleanupsClass()
@classmethod
def tearDownClass(cls):
cls.doCleanupsClass()
|
[
"mock.patch.object",
"future.standard_library.install_aliases",
"time.sleep",
"sys.exc_info",
"logging.getLogger"
] |
[((317, 351), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (349, 351), False, 'from future import standard_library\n'), ((442, 469), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (459, 469), False, 'import logging\n'), ((2474, 2490), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (2484, 2490), False, 'import time\n'), ((2883, 2905), 'time.sleep', 'time.sleep', (['retry_time'], {}), '(retry_time)\n', (2893, 2905), False, 'import time\n'), ((3579, 3595), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (3589, 3595), False, 'import time\n'), ((3989, 4011), 'time.sleep', 'time.sleep', (['retry_time'], {}), '(retry_time)\n', (3999, 4011), False, 'import time\n'), ((6419, 6455), 'mock.patch.object', 'mock.patch.object', (['cls', '"""addCleanup"""'], {}), "(cls, 'addCleanup')\n", (6436, 6455), False, 'import mock\n'), ((5945, 5959), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5957, 5959), False, 'import sys\n')]
|
import math
import operator
import numpy as np
def _convert_to_float(fl):
""" This method converts ONLY the numeric values of a string into floats """
try:
return float(fl)
except (ValueError, TypeError):
return fl
def _wrap_to_pi(angle):
""" This method wrap the input angle to 360 [deg]
angle : [deg] """
ang2pi = angle - (angle // (2 * np.pi)) * 2 * np.pi
if ang2pi > np.pi or ang2pi < - np.pi:
ang = ang2pi - np.sign(ang2pi) * 2 * np.pi
return ang
def _quaternion_multiply(quat1, quat0):
""" This method performs a standard quaternion multiplication
quat0 : [qR0, qV0], with qR0 being the real part of the quaternion
quat1 : [qR1, qV1], with qR1 being the real part of the quaternion """
w0, x0, y0, z0 = quat0
w1, x1, y1, z1 = quat1
return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
-x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64)
def _solve_fk(eva, joints):
""" This method solves the forward kinematics problem and extract the results directly as an array
joints : joint angles in [rad]
pos : cartesian position, with respect to robot's origin [m]
orient : orientation quaternion of the end effector """
fk_results = eva.calc_forward_kinematics(joints)
pos_json = fk_results['position']
orient_json = fk_results['orientation']
pos = [pos_json['x'], pos_json['y'], pos_json['z']]
orient = [orient_json['w'], orient_json['x'], orient_json['y'], orient_json['z']]
return pos, orient
def solve_ik_head_down(eva, guess, theta, xyz_absolute):
""" This method solves the inverse kinematics problem for the special case of the end-effector
pointing downwards, perpendicular to the ground.
guess : is the IK guess, a 1x6 array of joint angles in [rad]
theta : angular rotation of axis 6 [deg]
xyz_absolute : cartesian position, with respect to robot's origin [m] """
pos = [xyz_absolute[0], xyz_absolute[1], xyz_absolute[2]] # [m]
pos_json = {'x': (pos[0]), 'y': (pos[1]), 'z': (pos[2])} # [m]
orient_rel = [math.cos(np.deg2rad(theta) / 2), 0, 0, math.sin(np.deg2rad(theta) / 2)]
orient_abs = _quaternion_multiply([0, 0, 1, 0], orient_rel)
orient_json = {'w': (orient_abs[0]), 'x': (orient_abs[1]), 'y': (orient_abs[2]), 'z': (orient_abs[3])}
# Compute IK
result_ik = eva.calc_inverse_kinematics(guess, pos_json, orient_json)
success_ik = result_ik['ik']['result']
joints_ik = result_ik['ik']['joints']
return success_ik, joints_ik
def read_tcp_ip(sock, objects):
""" This method reads and decodes the string sent from the camera """
result = sock.recv(4000)
string_read = result.decode('utf-8')
string_split = string_read.split(",")
camera_string_raw = list(string_split)
passed = False
camera_string = ['']
if len(camera_string_raw) is not 0:
if camera_string_raw[0] == 'start' and camera_string_raw[19] == 'end' and len(camera_string_raw) == 20:
camera_string_raw = [_convert_to_float(fl) for fl in camera_string_raw]
passes = [camera_string_raw[6], camera_string_raw[12], camera_string_raw[18]]
scores = [camera_string_raw[5], camera_string_raw[11], camera_string_raw[17]]
passed_score = [passes[0] * scores[0], passes[1] * scores[1], passes[2] * scores[2]]
max_index, max_value = max(enumerate(passed_score), key=operator.itemgetter(1))
select_obj = objects[max_index]
if max_value > 0:
passed = True
# Extract the best matching object from the string
camera_string = _extract_camera_serial(objects, select_obj, camera_string_raw)
# String format = ['start', 'object_name', float x_mm, float y_mm, float angle]
return passed, camera_string
def _extract_camera_serial(objects, index, camera_string_raw):
""" This method extracts only the best matching object data from the entire string """
camera_string = ['', 0, 0, 0, 0]
if index not in objects:
print('Wrong object in the list')
elif index is 'C':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[1]
camera_string[2] = camera_string_raw[2]
camera_string[3] = camera_string_raw[3]
camera_string[4] = camera_string_raw[4]
elif index is 'M':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[7]
camera_string[2] = camera_string_raw[8]
camera_string[3] = camera_string_raw[9]
camera_string[4] = camera_string_raw[10]
elif index is 'R':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[13]
camera_string[2] = camera_string_raw[14]
camera_string[3] = camera_string_raw[15]
camera_string[4] = camera_string_raw[16]
return camera_string
class EvaVision:
""" This class performs the machine vision operations in order to obtain the object position in Eva's frame """
def __init__(self, eva, string, cal_zero, obj_height=0.0, surf_height=0.0, ee_length=0.0):
self.eva = eva
self.string = string
self.cal = cal_zero
self.obj = obj_height
self.surf = surf_height
self.ee = ee_length
def locate_object(self):
print('Pattern identified is: ', self.string[1])
# Relative object position in camera frame:
x_obj_rel_cam = 0.001*self.string[2] # transform X value from [mm] into [m]
y_obj_rel_cam = 0.001*self.string[3] # transform Y value from [mm] into [m]
# Compute relative object position in Eva's frame:
# Need to known Eva's frame rotation wrt to camera frame
# Convention: start from camera frame and rotate of ang [deg] to get to Eva's frame
ang_cam = 180 # [deg]
x_obj_rel = np.cos(np.deg2rad(ang_cam)) * x_obj_rel_cam + np.sin(np.deg2rad(ang_cam)) * y_obj_rel_cam # [m]
y_obj_rel = -np.sin(np.deg2rad(ang_cam)) * x_obj_rel_cam + np.cos(np.deg2rad(ang_cam)) * y_obj_rel_cam # [m]
# Compute absolute object position of calibration board origin in Eva's frame:
pos_cal = self.eva.calc_forward_kinematics(self.cal)['position']
# Compute absolute object position by summing the calibration board origin to the relative object position
x_obj_abs = x_obj_rel + pos_cal['x'] # [m]
y_obj_abs = y_obj_rel + pos_cal['y'] # [m]
# Compute absolute value of Z
z_obj_abs = abs(self.obj) + self.surf + abs(self.ee)
pos_abs = [x_obj_abs, y_obj_abs, z_obj_abs]
return pos_abs
|
[
"operator.itemgetter",
"numpy.array",
"numpy.sign",
"numpy.deg2rad"
] |
[((835, 1029), 'numpy.array', 'np.array', (['[-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, x1 * w0 + y1 * z0 - z1 * y0 + w1 *\n x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 + z1 * w0 +\n w1 * z0]'], {'dtype': 'np.float64'}), '([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, x1 * w0 + y1 * z0 - z1 *\n y0 + w1 * x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 +\n z1 * w0 + w1 * z0], dtype=np.float64)\n', (843, 1029), True, 'import numpy as np\n'), ((2242, 2259), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (2252, 2259), True, 'import numpy as np\n'), ((2281, 2298), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (2291, 2298), True, 'import numpy as np\n'), ((469, 484), 'numpy.sign', 'np.sign', (['ang2pi'], {}), '(ang2pi)\n', (476, 484), True, 'import numpy as np\n'), ((3573, 3595), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3592, 3595), False, 'import operator\n'), ((6007, 6026), 'numpy.deg2rad', 'np.deg2rad', (['ang_cam'], {}), '(ang_cam)\n', (6017, 6026), True, 'import numpy as np\n'), ((6053, 6072), 'numpy.deg2rad', 'np.deg2rad', (['ang_cam'], {}), '(ang_cam)\n', (6063, 6072), True, 'import numpy as np\n'), ((6171, 6190), 'numpy.deg2rad', 'np.deg2rad', (['ang_cam'], {}), '(ang_cam)\n', (6181, 6190), True, 'import numpy as np\n'), ((6125, 6144), 'numpy.deg2rad', 'np.deg2rad', (['ang_cam'], {}), '(ang_cam)\n', (6135, 6144), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from collections import OrderedDict
from brainstorm.layers.base_layer import Layer
from brainstorm.structure.buffer_structure import (BufferStructure,
StructureTemplate)
from brainstorm.structure.construction import ConstructionWrapper
from brainstorm.utils import LayerValidationError, flatten_time, \
flatten_time_and_features
def Recurrent(size, activation='tanh', name=None):
"""Create a Simple Recurrent layer."""
return ConstructionWrapper.create(RecurrentLayerImpl, size=size,
name=name, activation=activation)
class RecurrentLayerImpl(Layer):
expected_inputs = {'default': StructureTemplate('T', 'B', '...')}
expected_kwargs = {'size', 'activation'}
def setup(self, kwargs, in_shapes):
self.activation = kwargs.get('activation', 'tanh')
self.size = kwargs.get('size', self.in_shapes['default'].feature_size)
if not isinstance(self.size, int):
raise LayerValidationError('size must be int but was {}'.
format(self.size))
in_size = self.in_shapes['default'].feature_size
outputs = OrderedDict()
outputs['default'] = BufferStructure('T', 'B', self.size,
context_size=1)
parameters = OrderedDict()
parameters['W'] = BufferStructure(self.size, in_size)
parameters['R'] = BufferStructure(self.size, self.size)
parameters['bias'] = BufferStructure(self.size)
internals = OrderedDict()
internals['Ha'] = BufferStructure('T', 'B', self.size, context_size=1)
internals['dHa'] = BufferStructure('T', 'B', self.size, context_size=1,
is_backward_only=True)
internals['dHb'] = BufferStructure('T', 'B', self.size, context_size=1,
is_backward_only=True)
return outputs, parameters, internals
def forward_pass(self, buffers, training_pass=True):
# prepare
_h = self.handler
W, R, bias = buffers.parameters
inputs = buffers.inputs.default
outputs = buffers.outputs.default
Ha = buffers.internals.Ha
flat_inputs = flatten_time_and_features(inputs)
flat_H = flatten_time(Ha[:-1])
_h.dot_mm(flat_inputs, W, flat_H, transb=True)
_h.add_mv(flat_H, bias.reshape((1, self.size)), flat_H)
for t in range(inputs.shape[0]):
_h.dot_add_mm(outputs[t - 1], R, Ha[t], transb=True)
_h.act_func[self.activation](Ha[t], outputs[t])
def backward_pass(self, buffers):
# prepare
_h = self.handler
W, R, bias = buffers.parameters
dW, dR, dbias = buffers.gradients
inputs = buffers.inputs.default
outputs = buffers.outputs.default
dinputs = buffers.input_deltas.default
doutputs = buffers.output_deltas.default
Ha, dHa, dHb = buffers.internals
_h.copy_to(doutputs, dHb)
T = inputs.shape[0] - 1
_h.act_func_deriv[self.activation](Ha[T], outputs[T], dHb[T], dHa[T])
for t in range(T - 1, -1, -1):
_h.dot_add_mm(dHa[t + 1], R, dHb[t])
_h.act_func_deriv[self.activation](Ha[t], outputs[t],
dHb[t], dHa[t])
flat_inputs = flatten_time_and_features(inputs)
flat_dinputs = flatten_time_and_features(dinputs)
flat_dHa = flatten_time(dHa[:-1])
# calculate in_deltas and gradients
_h.dot_add_mm(flat_dHa, W, flat_dinputs)
_h.dot_add_mm(flat_dHa, flat_inputs, dW, transa=True)
dbias_tmp = _h.allocate(dbias.shape)
_h.sum_t(flat_dHa, axis=0, out=dbias_tmp)
_h.add_tt(dbias, dbias_tmp, dbias)
flat_outputs = flatten_time(outputs[:-2])
flat_dHa = flatten_time(dHa[1:-1])
_h.dot_add_mm(flat_dHa, flat_outputs, dR, transa=True)
_h.dot_add_mm(dHa[0], outputs[-1], dR, transa=True)
|
[
"brainstorm.structure.buffer_structure.BufferStructure",
"brainstorm.structure.buffer_structure.StructureTemplate",
"brainstorm.utils.flatten_time_and_features",
"brainstorm.utils.flatten_time",
"collections.OrderedDict",
"brainstorm.structure.construction.ConstructionWrapper.create"
] |
[((596, 691), 'brainstorm.structure.construction.ConstructionWrapper.create', 'ConstructionWrapper.create', (['RecurrentLayerImpl'], {'size': 'size', 'name': 'name', 'activation': 'activation'}), '(RecurrentLayerImpl, size=size, name=name,\n activation=activation)\n', (622, 691), False, 'from brainstorm.structure.construction import ConstructionWrapper\n'), ((796, 830), 'brainstorm.structure.buffer_structure.StructureTemplate', 'StructureTemplate', (['"""T"""', '"""B"""', '"""..."""'], {}), "('T', 'B', '...')\n", (813, 830), False, 'from brainstorm.structure.buffer_structure import BufferStructure, StructureTemplate\n'), ((1304, 1317), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1315, 1317), False, 'from collections import OrderedDict\n'), ((1347, 1399), 'brainstorm.structure.buffer_structure.BufferStructure', 'BufferStructure', (['"""T"""', '"""B"""', 'self.size'], {'context_size': '(1)'}), "('T', 'B', self.size, context_size=1)\n", (1362, 1399), False, 'from brainstorm.structure.buffer_structure import BufferStructure, StructureTemplate\n'), ((1466, 1479), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1477, 1479), False, 'from collections import OrderedDict\n'), ((1506, 1541), 'brainstorm.structure.buffer_structure.BufferStructure', 'BufferStructure', (['self.size', 'in_size'], {}), '(self.size, in_size)\n', (1521, 1541), False, 'from brainstorm.structure.buffer_structure import BufferStructure, StructureTemplate\n'), ((1568, 1605), 'brainstorm.structure.buffer_structure.BufferStructure', 'BufferStructure', (['self.size', 'self.size'], {}), '(self.size, self.size)\n', (1583, 1605), False, 'from brainstorm.structure.buffer_structure import BufferStructure, StructureTemplate\n'), ((1635, 1661), 'brainstorm.structure.buffer_structure.BufferStructure', 'BufferStructure', (['self.size'], {}), '(self.size)\n', (1650, 1661), False, 'from brainstorm.structure.buffer_structure import BufferStructure, StructureTemplate\n'), ((1683, 1696), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1694, 1696), False, 'from collections import OrderedDict\n'), ((1723, 1775), 'brainstorm.structure.buffer_structure.BufferStructure', 'BufferStructure', (['"""T"""', '"""B"""', 'self.size'], {'context_size': '(1)'}), "('T', 'B', self.size, context_size=1)\n", (1738, 1775), False, 'from brainstorm.structure.buffer_structure import BufferStructure, StructureTemplate\n'), ((1803, 1878), 'brainstorm.structure.buffer_structure.BufferStructure', 'BufferStructure', (['"""T"""', '"""B"""', 'self.size'], {'context_size': '(1)', 'is_backward_only': '(True)'}), "('T', 'B', self.size, context_size=1, is_backward_only=True)\n", (1818, 1878), False, 'from brainstorm.structure.buffer_structure import BufferStructure, StructureTemplate\n'), ((1949, 2024), 'brainstorm.structure.buffer_structure.BufferStructure', 'BufferStructure', (['"""T"""', '"""B"""', 'self.size'], {'context_size': '(1)', 'is_backward_only': '(True)'}), "('T', 'B', self.size, context_size=1, is_backward_only=True)\n", (1964, 2024), False, 'from brainstorm.structure.buffer_structure import BufferStructure, StructureTemplate\n'), ((2395, 2428), 'brainstorm.utils.flatten_time_and_features', 'flatten_time_and_features', (['inputs'], {}), '(inputs)\n', (2420, 2428), False, 'from brainstorm.utils import LayerValidationError, flatten_time, flatten_time_and_features\n'), ((2446, 2467), 'brainstorm.utils.flatten_time', 'flatten_time', (['Ha[:-1]'], {}), '(Ha[:-1])\n', (2458, 2467), False, 'from brainstorm.utils import LayerValidationError, flatten_time, flatten_time_and_features\n'), ((3524, 3557), 'brainstorm.utils.flatten_time_and_features', 'flatten_time_and_features', (['inputs'], {}), '(inputs)\n', (3549, 3557), False, 'from brainstorm.utils import LayerValidationError, flatten_time, flatten_time_and_features\n'), ((3581, 3615), 'brainstorm.utils.flatten_time_and_features', 'flatten_time_and_features', (['dinputs'], {}), '(dinputs)\n', (3606, 3615), False, 'from brainstorm.utils import LayerValidationError, flatten_time, flatten_time_and_features\n'), ((3635, 3657), 'brainstorm.utils.flatten_time', 'flatten_time', (['dHa[:-1]'], {}), '(dHa[:-1])\n', (3647, 3657), False, 'from brainstorm.utils import LayerValidationError, flatten_time, flatten_time_and_features\n'), ((3976, 4002), 'brainstorm.utils.flatten_time', 'flatten_time', (['outputs[:-2]'], {}), '(outputs[:-2])\n', (3988, 4002), False, 'from brainstorm.utils import LayerValidationError, flatten_time, flatten_time_and_features\n'), ((4022, 4045), 'brainstorm.utils.flatten_time', 'flatten_time', (['dHa[1:-1]'], {}), '(dHa[1:-1])\n', (4034, 4045), False, 'from brainstorm.utils import LayerValidationError, flatten_time, flatten_time_and_features\n')]
|
# -*- coding: utf-8 -*-
from pyramid.view import view_config
from pyramid.response import Response
from intranet3 import helpers as h
from intranet3.utils.views import BaseView
from intranet3.utils import google_calendar as cal
from intranet3.forms.employees import (
LateJustificationForm,
WrongTimeJustificationForm,
AbsenceCreateForm
)
from intranet3.lib.employee import user_leave
from intranet3.models import Late, WrongTime, Absence, DBSession
from intranet3.log import INFO_LOG
LOG = INFO_LOG(__name__)
#if popup was opened on page with justification button we change button to text
CHANGE_STATUS = '<script>$(".justification-info").html(\'<span class="justification-info label">%s</span>\');</script>'
RELOAD_PAGE = '<script>window.location.reload()</script>'
@view_config(route_name='employee_form_late_justification', permission='can_justify_late')
class LateJustification(BaseView):
def get(self):
form = LateJustificationForm(self.request.GET, user=self.request.user)
return dict(form=form)
def post(self):
form = LateJustificationForm(self.request.POST, user=self.request.user)
if form.validate():
late = Late(
user_id=self.request.user.id,
date=form.popup_date.data,
explanation=form.popup_explanation.data,
)
DBSession.add(late)
LOG(u"Late added")
return Response(self._(u'Explanation added') + CHANGE_STATUS % self._('Waits for verification'))
return dict(form=form)
@view_config(route_name='employee_form_wrong_time_justification', permission='can_justify_wrongtime')
class WrongTimeJustification(BaseView):
def get(self):
form = WrongTimeJustificationForm(self.request.GET, user=self.request.user)
return dict(form=form)
def post(self):
form = WrongTimeJustificationForm(self.request.POST, user=self.request.user)
if form.validate():
wrongtime = WrongTime(
user_id=self.request.user.id,
date=form.popup_date.data,
explanation=form.popup_explanation.data,
)
DBSession.add(wrongtime)
LOG(u"WrongTime added")
response = '%s %s' % (self._(u'Explanation added'), CHANGE_STATUS % self._('Waits for verification'))
return Response(response)
return dict(form=form)
@view_config(route_name='employee_form_create_absence', permission='hr_stuff')
class CreateAbsence(BaseView):
def dispatch(self):
form = AbsenceCreateForm(self.request.POST, request=self.request)
days, mandated, used, left = 0, 0, 0, 0
if form.popup_date_start.data:
mandated, used, left = user_leave(self.request, form.popup_date_start.data.year)
if form.popup_date_end.data:
days = h.get_working_days(form.popup_date_start.data, form.popup_date_end.data)
left -= days
if self.request.method == 'POST' and form.validate():
date_start = form.popup_date_start.data
date_end = form.popup_date_end.data
type = form.popup_type.data
remarks = form.popup_remarks.data
user_id = form.popup_user_id.data
absence = Absence(
user_id=user_id,
date_start=date_start,
date_end=date_end,
days=days,
type=type,
remarks=remarks,
)
DBSession.add(absence)
return Response(self._('Done') + RELOAD_PAGE)
return dict(
form=form,
days=days,
mandated=mandated,
used=used,
left=left
)
|
[
"intranet3.models.DBSession.add",
"pyramid.response.Response",
"intranet3.models.Absence",
"intranet3.log.INFO_LOG",
"intranet3.forms.employees.AbsenceCreateForm",
"intranet3.models.Late",
"intranet3.helpers.get_working_days",
"intranet3.lib.employee.user_leave",
"intranet3.forms.employees.LateJustificationForm",
"pyramid.view.view_config",
"intranet3.forms.employees.WrongTimeJustificationForm",
"intranet3.models.WrongTime"
] |
[((505, 523), 'intranet3.log.INFO_LOG', 'INFO_LOG', (['__name__'], {}), '(__name__)\n', (513, 523), False, 'from intranet3.log import INFO_LOG\n'), ((785, 879), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""employee_form_late_justification"""', 'permission': '"""can_justify_late"""'}), "(route_name='employee_form_late_justification', permission=\n 'can_justify_late')\n", (796, 879), False, 'from pyramid.view import view_config\n'), ((1561, 1666), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""employee_form_wrong_time_justification"""', 'permission': '"""can_justify_wrongtime"""'}), "(route_name='employee_form_wrong_time_justification', permission\n ='can_justify_wrongtime')\n", (1572, 1666), False, 'from pyramid.view import view_config\n'), ((2424, 2501), 'pyramid.view.view_config', 'view_config', ([], {'route_name': '"""employee_form_create_absence"""', 'permission': '"""hr_stuff"""'}), "(route_name='employee_form_create_absence', permission='hr_stuff')\n", (2435, 2501), False, 'from pyramid.view import view_config\n'), ((944, 1007), 'intranet3.forms.employees.LateJustificationForm', 'LateJustificationForm', (['self.request.GET'], {'user': 'self.request.user'}), '(self.request.GET, user=self.request.user)\n', (965, 1007), False, 'from intranet3.forms.employees import LateJustificationForm, WrongTimeJustificationForm, AbsenceCreateForm\n'), ((1075, 1139), 'intranet3.forms.employees.LateJustificationForm', 'LateJustificationForm', (['self.request.POST'], {'user': 'self.request.user'}), '(self.request.POST, user=self.request.user)\n', (1096, 1139), False, 'from intranet3.forms.employees import LateJustificationForm, WrongTimeJustificationForm, AbsenceCreateForm\n'), ((1736, 1804), 'intranet3.forms.employees.WrongTimeJustificationForm', 'WrongTimeJustificationForm', (['self.request.GET'], {'user': 'self.request.user'}), '(self.request.GET, user=self.request.user)\n', (1762, 1804), False, 'from intranet3.forms.employees import LateJustificationForm, WrongTimeJustificationForm, AbsenceCreateForm\n'), ((1872, 1941), 'intranet3.forms.employees.WrongTimeJustificationForm', 'WrongTimeJustificationForm', (['self.request.POST'], {'user': 'self.request.user'}), '(self.request.POST, user=self.request.user)\n', (1898, 1941), False, 'from intranet3.forms.employees import LateJustificationForm, WrongTimeJustificationForm, AbsenceCreateForm\n'), ((2572, 2630), 'intranet3.forms.employees.AbsenceCreateForm', 'AbsenceCreateForm', (['self.request.POST'], {'request': 'self.request'}), '(self.request.POST, request=self.request)\n', (2589, 2630), False, 'from intranet3.forms.employees import LateJustificationForm, WrongTimeJustificationForm, AbsenceCreateForm\n'), ((1188, 1295), 'intranet3.models.Late', 'Late', ([], {'user_id': 'self.request.user.id', 'date': 'form.popup_date.data', 'explanation': 'form.popup_explanation.data'}), '(user_id=self.request.user.id, date=form.popup_date.data, explanation=\n form.popup_explanation.data)\n', (1192, 1295), False, 'from intranet3.models import Late, WrongTime, Absence, DBSession\n'), ((1366, 1385), 'intranet3.models.DBSession.add', 'DBSession.add', (['late'], {}), '(late)\n', (1379, 1385), False, 'from intranet3.models import Late, WrongTime, Absence, DBSession\n'), ((1994, 2105), 'intranet3.models.WrongTime', 'WrongTime', ([], {'user_id': 'self.request.user.id', 'date': 'form.popup_date.data', 'explanation': 'form.popup_explanation.data'}), '(user_id=self.request.user.id, date=form.popup_date.data,\n explanation=form.popup_explanation.data)\n', (2003, 2105), False, 'from intranet3.models import Late, WrongTime, Absence, DBSession\n'), ((2177, 2201), 'intranet3.models.DBSession.add', 'DBSession.add', (['wrongtime'], {}), '(wrongtime)\n', (2190, 2201), False, 'from intranet3.models import Late, WrongTime, Absence, DBSession\n'), ((2371, 2389), 'pyramid.response.Response', 'Response', (['response'], {}), '(response)\n', (2379, 2389), False, 'from pyramid.response import Response\n'), ((2753, 2810), 'intranet3.lib.employee.user_leave', 'user_leave', (['self.request', 'form.popup_date_start.data.year'], {}), '(self.request, form.popup_date_start.data.year)\n', (2763, 2810), False, 'from intranet3.lib.employee import user_leave\n'), ((3293, 3403), 'intranet3.models.Absence', 'Absence', ([], {'user_id': 'user_id', 'date_start': 'date_start', 'date_end': 'date_end', 'days': 'days', 'type': 'type', 'remarks': 'remarks'}), '(user_id=user_id, date_start=date_start, date_end=date_end, days=\n days, type=type, remarks=remarks)\n', (3300, 3403), False, 'from intranet3.models import Late, WrongTime, Absence, DBSession\n'), ((3522, 3544), 'intranet3.models.DBSession.add', 'DBSession.add', (['absence'], {}), '(absence)\n', (3535, 3544), False, 'from intranet3.models import Late, WrongTime, Absence, DBSession\n'), ((2875, 2947), 'intranet3.helpers.get_working_days', 'h.get_working_days', (['form.popup_date_start.data', 'form.popup_date_end.data'], {}), '(form.popup_date_start.data, form.popup_date_end.data)\n', (2893, 2947), True, 'from intranet3 import helpers as h\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide some reward processors.
It processes the rewards before returning them; this can be useful to standardize, normalize, center them for instance.
"""
import numpy as np
from pyrobolearn.rewards.reward import Reward
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class RewardProcessor(Reward):
r"""Reward Processor
Wraps the reward and process it. It also acts as a memory of the last received reward signal, which can be
accessed via the `value` attribute.
Examples:
reward = Reward1() + Reward2()
reward = RewardProcessor(reward, <args>)
"""
def __init__(self, reward, range=None):
"""
Initialize the reward processor.
Args:
reward (Reward): reward to process.
range (tuple of float/int, None): range of the reward processor.
"""
super(RewardProcessor, self).__init__()
# set the reward to process
if not isinstance(reward, Reward):
raise TypeError("Expecting the given 'reward' to be an instance of `Reward`, instead got: "
"{}".format(type(reward)))
self.reward = reward
# set the range
self.range = self.reward.range if range is None else range
# set the initial value (randomly)
self.value = np.random.uniform(low=self.range[0], high=self.range[1])
def _compute(self):
"""Compute the reward and cache its value."""
self.value = self.reward._compute()
return self.value
class ShiftRewardProcessor(RewardProcessor):
r"""Shift Reward Processor
Shift the reward by the given amount; that is, it returned: :math:`\hat{r} = r + x` where :math:`x` is the
specified amount to shift the original reward.
"""
def __init__(self, reward, x):
"""
Initialize the shift reward processor.
Args:
reward (Reward): Reward instance to shift.
x (int, float): amount to be shifted.
"""
if not isinstance(x, (int, float)):
raise TypeError("Expecting the given 'x' (=the amount to be shifted) to be an int or float, instead got: "
"{}".format(type(x)))
self.x = x
super(ShiftRewardProcessor, self).__init__(reward, range=self.reward.range + x)
def _compute(self):
reward = self.reward._compute()
self.value = reward + self.x
return self.value
class ClipRewardProcessor(RewardProcessor):
r"""Clip Reward Processor
Processor that clips the given reward to be between [low, high], where `low` and `high` are respectively the
specified lower and higher bound.
"""
def __init__(self, reward, low=-10, high=10):
"""
Initialize the Clip processor.
Args:
reward (Reward): Reward instance to clip.
low (int, float): lower bound
high (int, float): higher bound
"""
super(ClipRewardProcessor, self).__init__(reward)
self.low = low
self.high = high
def _compute(self):
reward = self.reward._compute()
self.value = np.clip(reward, self.low, self.high)
return self.value
class CenterRewardProcessor(RewardProcessor):
r"""Center Reward Processor
Center the reward using the running mean.
"""
def __init__(self, reward):
"""
Initialize the center reward processor.
Args:
reward (Reward): Reward instance to center.
"""
super(CenterRewardProcessor, self).__init__(reward)
self.mean = 0
self.N = 0
def reset(self):
self.mean = 0
self.N = 0
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
# update the mean
self.mean = self.N / (self.N + 1.) * self.mean + 1. / (self.N + 1) * reward
self.N += 1
# center reward
self.value = self.value - self.mean
return self.value
class NormalizeRewardProcessor(RewardProcessor):
r"""Normalize Reward Processor
Normalize the reward such that it is between 0 and 1. That is, it returned
:math:`\hat{r} = \frac{r - r_{min}}{r_{max} - r_{min}}`, where :math:`r \in [r_{min}, r_{max}]`.
Warnings: the first returned reward will be 0.
"""
def __init__(self, reward):
"""
Initialize the normalizer reward processor.
Args:
reward (Reward): Reward instance to normalize.
"""
super(NormalizeRewardProcessor, self).__init__(reward)
self.min = np.infty
self.max = -np.infty
def reset(self):
self.min = np.infty
self.max = -np.infty
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
self.min = np.minimum(reward, self.min)
self.max = np.maximum(reward, self.max)
den = self.max - self.min
if den == 0:
den = 1.
self.value = (reward - self.min) / den
return self.value
class StandardizeRewardProcessor(RewardProcessor):
r"""Standardize Reward Processor
Standardize the reward such that it returns :math:`\hat{r} = \frac{r - \mu}{\sigma}` where :math:`\mu` is the
running mean, and :math:`\sigma` is the running standard deviation. The returned reward will have a mean of 0
and standard deviation of 1.
"""
def __init__(self, reward, epsilon=1.e-4, center=True):
"""
Initialize the standardizer reward processor.
Args:
reward (Reward): Reward instance to standardize.
epsilon (float): threshold to be added to the standard deviation in order to avoid a division by 0.
center (bool): if we should center the data.
"""
super(StandardizeRewardProcessor, self).__init__(reward)
self.eps = epsilon
self.mean = 0
self.var = 1
self.N = 0
self.center = center
def reset(self):
self.mean = 0
self.var = 1
self.N = 0
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
# update the mean
old_mean = self.mean
self.mean = self.N / (self.N + 1.) * self.mean + 1. / (self.N + 1) * reward
# update the var / stddev
frac = 1. / (self.N + 1)
self.var = self.N * frac * self.var + frac * (self.value - old_mean) * (self.value - self.mean)
std = np.sqrt(self.var)
# update total number of data points
self.N += 1
# standardize the reward
if self.center:
self.value = (reward - self.mean) / (std + self.eps)
else:
self.value = reward / (std + self.eps)
return self.value
class GammaAccumulatedRewardProcessor(RewardProcessor):
r"""Gamma reward processor
It will return the accumulated reward until now: :math:`R = \sum_{t'=0}^t \gamma^{t'} r_{t'}`.
"""
def __init__(self, reward, gamma=0.99):
"""
Initialize the gamma accumulator reward processor.
Args:
reward (Reward): Reward instance to process.
gamma (float): discount factor.
"""
super(GammaAccumulatedRewardProcessor, self).__init__(reward)
self.gamma = gamma
self.value = 0. # return value
def reset(self):
self.value = 0.
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
self.value = reward + self.gamma * self.value
return self.value
class GammaStandardizeRewardProcessor(RewardProcessor):
r"""Gamma Standardize Reward Processor
References:
[1] https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_normalize.py
"""
def __init__(self, reward, gamma=0.99, epsilon=1.e-4):
"""
Initialize the gamma standardizer reward processor.
Args:
reward (Reward): Reward instance to process.
gamma (float): discount factor.
epsilon (float): threshold to be added to the standard deviation in order to avoid a division by 0.
"""
super(GammaStandardizeRewardProcessor, self).__init__(reward)
self.gamma = gamma
self.eps = epsilon
self.ret = 0
self.mean = 0
self.var = 1
self.N = 0
def reset(self):
self.ret = 0
self.mean = 0
self.var = 1
self.N = 0
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
# update return
self.ret = reward + self.gamma * self.ret
# update the return mean
old_mean = self.mean
self.mean = self.N / (self.N + 1.) * self.mean + 1. / (self.N + 1) * self.ret
# update the return variance
self.var = self.N / (self.N + 1) * self.var + 1. / (self.N + 1) * (self.ret - old_mean) * (self.ret - self.mean)
std = np.sqrt(self.var)
# update total number of data points
self.N += 1
self.value = reward / (std + self.eps)
return self.value
class ScaleRewardProcessor(RewardProcessor):
r"""Scale Reward Processor
Processor that scales the reward x which is between [x1, x2] to the output y which is between [y1, y2].
"""
def __init__(self, reward, x1, x2, y1, y2):
"""
Initialize the scale reward processor
Args:
reward (Reward): reward function to scale.
x1 (int, float): lower bound of the original reward
x2 (int, float): upper bound of the original reward
y1 (int, float): lower bound of the final reward
y2 (int, float): upper bound of the final reward
"""
super(ScaleRewardProcessor, self).__init__(reward)
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.ratio = (self.y2 - self.y1) / (self.x2 - self.x1)
self.range = (self.y1, self.y2)
def _compute(self):
reward = self.reward._compute()
self.value = self.y1 + (reward - self.x1) * self.ratio
return self.value
|
[
"numpy.random.uniform",
"numpy.minimum",
"numpy.maximum",
"numpy.clip",
"numpy.sqrt"
] |
[((1534, 1590), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.range[0]', 'high': 'self.range[1]'}), '(low=self.range[0], high=self.range[1])\n', (1551, 1590), True, 'import numpy as np\n'), ((3361, 3397), 'numpy.clip', 'np.clip', (['reward', 'self.low', 'self.high'], {}), '(reward, self.low, self.high)\n', (3368, 3397), True, 'import numpy as np\n'), ((5037, 5065), 'numpy.minimum', 'np.minimum', (['reward', 'self.min'], {}), '(reward, self.min)\n', (5047, 5065), True, 'import numpy as np\n'), ((5085, 5113), 'numpy.maximum', 'np.maximum', (['reward', 'self.max'], {}), '(reward, self.max)\n', (5095, 5113), True, 'import numpy as np\n'), ((6694, 6711), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (6701, 6711), True, 'import numpy as np\n'), ((9191, 9208), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (9198, 9208), True, 'import numpy as np\n')]
|
import logging
import ray
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.memory import ray_get_and_free
logger = logging.getLogger(__name__)
def collect_samples(agents, sample_batch_size, num_envs_per_worker,
train_batch_size):
"""Collects at least train_batch_size samples, never discarding any."""
num_timesteps_so_far = 0
trajectories = []
agent_dict = {}
for agent in agents:
fut_sample = agent.sample.remote()
agent_dict[fut_sample] = agent
while agent_dict:
[fut_sample], _ = ray.wait(list(agent_dict))
agent = agent_dict.pop(fut_sample)
next_sample = ray_get_and_free(fut_sample)
num_timesteps_so_far += next_sample.count
trajectories.append(next_sample)
# Only launch more tasks if we don't already have enough pending
pending = len(agent_dict) * sample_batch_size * num_envs_per_worker
if num_timesteps_so_far + pending < train_batch_size:
fut_sample2 = agent.sample.remote()
agent_dict[fut_sample2] = agent
return SampleBatch.concat_samples(trajectories)
|
[
"ray.rllib.policy.sample_batch.SampleBatch.concat_samples",
"ray.rllib.utils.memory.ray_get_and_free",
"logging.getLogger"
] |
[((143, 170), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (160, 170), False, 'import logging\n'), ((1113, 1153), 'ray.rllib.policy.sample_batch.SampleBatch.concat_samples', 'SampleBatch.concat_samples', (['trajectories'], {}), '(trajectories)\n', (1139, 1153), False, 'from ray.rllib.policy.sample_batch import SampleBatch\n'), ((677, 705), 'ray.rllib.utils.memory.ray_get_and_free', 'ray_get_and_free', (['fut_sample'], {}), '(fut_sample)\n', (693, 705), False, 'from ray.rllib.utils.memory import ray_get_and_free\n')]
|
import yaml
_required = ["servers"]
class Config(object):
def __init__(self, configFile):
self.configFile = configFile
self._configData = {}
def readConfig(self):
try:
with open(self.configFile, "r") as config:
configData = yaml.safe_load(config)
except Exception as e:
raise ConfigError(self.configFile, e)
self._validateConfigData(configData)
self._configData = configData
def _validateConfigData(self, configData):
for item in _required:
if item not in configData:
raise ConfigError(self.configFile, "Required item \"{}\" was not found in the config.".format(item))
def __len__(self):
return len(self._configData)
def __iter__(self):
return iter(self._configData)
def __getitem__(self, item):
return self._configData[item]
def itemWithDefault(self, item, default):
if item in self._configData:
return self._configData[item]
return default
def serverItemWithDefault(self, server, item, default):
if item in self._configData["servers"][server]:
return self._configData["servers"][server][item]
if item in self._configData:
return self._configData[item]
return default
class ConfigError(Exception):
def __init(self, configFile, message):
self.configFile = configFile
self.message = message
def __str(self):
return "An error occurred while reading config file {}: {}".format(self.configFile, self.message)
|
[
"yaml.safe_load"
] |
[((288, 310), 'yaml.safe_load', 'yaml.safe_load', (['config'], {}), '(config)\n', (302, 310), False, 'import yaml\n')]
|
#!/usr/bin/env python
from __future__ import print_function
from mmstructlib.cli import ArgParse
from mmstructlib.tools.preprocess import strip_water, strip_hydrogen
from mmstructlib.radii import add_radii
from mmstructlib.tools.summarize import from_atom
import sys
mirror = "/var/data/pdb"
def window_gen(half_window_size, model):
for mol in model.molecules:
if mol.is_protein():
chain = [
mol[index] if index is not None else None
for aa, index in mol.seq_res
]
else:
chain = mol.monomers
l = len(chain)-1
for i in range(l+1):
if chain[i] is not None:
s = max(0, i - half_window_size)
e = min(l, i + half_window_size)
yield chain[i], filter(lambda x: x is not None, chain[s:e+1])
argparse = ArgParse("Calculate residue-wise surface area", mirror_default_path=mirror)
argparse.struct_arg()
argparse.report_chain_arg(nargs='+')
argparse.add_argument("-a", action='store_true', help="Caluclate analytic surface area")
argparse.add_argument("-n", action='store_true', help="Caluclate numerical surface area [default]")
argparse.add_argument("--n_win", action='store_true', help="Caluclate numerical, windowed surface area")
argparse.add_argument("--n_w_win", action='store_true', help="Caluclate numerical, windowed surface area using weighting method")
argparse.add_argument("--half_window", nargs=1, metavar='length', type=int, help="Half window length for windowed calculations %(default)s", default=[5])
argparse.add_argument("--dots", nargs=1, metavar='dots', type=int, help="Number of mesh dots for numerical surface %(default)s", default=[362])
args = argparse()
struct = args.struct
chains = args.chain
half_window = args.half_window[0]
num_dots = args.dots[0]
print(half_window, num_dots)
strip_water(struct[0])
strip_hydrogen(struct[0])
add_radii(struct[0])
area_strs = []
if args.a:
try:
import mmstructlib.sas.alpha_shapes as alpha_shapes
except:
print("analytical sasa not supported in this mmsl installation", file=sys.stderr)
sys.exit(1)
area_strs.append('a_area')
alpha_shapes.area_atoms(struct[0].atoms, out_attr='a_area')
from_atom(struct[0].monomers, 'a_area')
if (args.n or args.n_win or args.n_w_win) or not (args.n or args.a or args.n_win or args.n_w_win):
try:
import mmstructlib.sas.nsc as nsc
except:
print("numerical sasa not supported in this mmsl installation", file=sys.stderr)
sys.exit(1)
if args.n or not (args.n or args.a or args.n_win or args.n_w_win):
area_strs.append('n_area')
nsc.area_atoms(struct[0].atoms, out_attr='n_area', num_dots=num_dots)
from_atom(struct[0].monomers, 'n_area')
if args.n_win:
attr_str = 'n_win_area_{0}'.format(args.n_win)
area_strs.append(attr_str)
for c_res, window in window_gen(half_window, struct[0]):
nsc.area_atoms(
[a for r in window for a in r.atoms],
out_attr=attr_str,
num_dots=num_dots
)
from_atom([c_res], attr_str)
if args.n_w_win:
attr_str = 'n_w_win_area_{0}'.format(args.n_win)
area_strs.append(attr_str)
for mon in struct[0].monomers:
mon.weight = 1.0
nsc.atoms_area_weighted(
struct[0],
half_window,
out_attr=attr_str,
num_dots=num_dots
)
from_atom(struct[0].monomers, attr_str)
print("chain\tid\ttype\t" + "\t".join(area_strs))
for chain in chains:
for mon in chain:
# for i, atom in enumerate(mon):
# print(i, round(atom.n_area, 1), round(atom.a_area, 1))
print("{0}\t{1}\t{2}\t{3}".format(
chain.pdb_id,
mon.id,
mon.type,
"\t".join([ str(round(getattr(mon, area_str),3)) for area_str in area_strs])
))
|
[
"mmstructlib.sas.nsc.area_atoms",
"mmstructlib.radii.add_radii",
"mmstructlib.sas.nsc.atoms_area_weighted",
"mmstructlib.tools.preprocess.strip_hydrogen",
"sys.exit",
"mmstructlib.tools.summarize.from_atom",
"mmstructlib.cli.ArgParse",
"mmstructlib.sas.alpha_shapes.area_atoms",
"mmstructlib.tools.preprocess.strip_water"
] |
[((866, 941), 'mmstructlib.cli.ArgParse', 'ArgParse', (['"""Calculate residue-wise surface area"""'], {'mirror_default_path': 'mirror'}), "('Calculate residue-wise surface area', mirror_default_path=mirror)\n", (874, 941), False, 'from mmstructlib.cli import ArgParse\n'), ((1873, 1895), 'mmstructlib.tools.preprocess.strip_water', 'strip_water', (['struct[0]'], {}), '(struct[0])\n', (1884, 1895), False, 'from mmstructlib.tools.preprocess import strip_water, strip_hydrogen\n'), ((1896, 1921), 'mmstructlib.tools.preprocess.strip_hydrogen', 'strip_hydrogen', (['struct[0]'], {}), '(struct[0])\n', (1910, 1921), False, 'from mmstructlib.tools.preprocess import strip_water, strip_hydrogen\n'), ((1923, 1943), 'mmstructlib.radii.add_radii', 'add_radii', (['struct[0]'], {}), '(struct[0])\n', (1932, 1943), False, 'from mmstructlib.radii import add_radii\n'), ((2197, 2256), 'mmstructlib.sas.alpha_shapes.area_atoms', 'alpha_shapes.area_atoms', (['struct[0].atoms'], {'out_attr': '"""a_area"""'}), "(struct[0].atoms, out_attr='a_area')\n", (2220, 2256), True, 'import mmstructlib.sas.alpha_shapes as alpha_shapes\n'), ((2261, 2300), 'mmstructlib.tools.summarize.from_atom', 'from_atom', (['struct[0].monomers', '"""a_area"""'], {}), "(struct[0].monomers, 'a_area')\n", (2270, 2300), False, 'from mmstructlib.tools.summarize import from_atom\n'), ((2686, 2755), 'mmstructlib.sas.nsc.area_atoms', 'nsc.area_atoms', (['struct[0].atoms'], {'out_attr': '"""n_area"""', 'num_dots': 'num_dots'}), "(struct[0].atoms, out_attr='n_area', num_dots=num_dots)\n", (2700, 2755), True, 'import mmstructlib.sas.nsc as nsc\n'), ((2764, 2803), 'mmstructlib.tools.summarize.from_atom', 'from_atom', (['struct[0].monomers', '"""n_area"""'], {}), "(struct[0].monomers, 'n_area')\n", (2773, 2803), False, 'from mmstructlib.tools.summarize import from_atom\n'), ((3375, 3465), 'mmstructlib.sas.nsc.atoms_area_weighted', 'nsc.atoms_area_weighted', (['struct[0]', 'half_window'], {'out_attr': 'attr_str', 'num_dots': 'num_dots'}), '(struct[0], half_window, out_attr=attr_str, num_dots\n =num_dots)\n', (3398, 3465), True, 'import mmstructlib.sas.nsc as nsc\n'), ((3530, 3569), 'mmstructlib.tools.summarize.from_atom', 'from_atom', (['struct[0].monomers', 'attr_str'], {}), '(struct[0].monomers, attr_str)\n', (3539, 3569), False, 'from mmstructlib.tools.summarize import from_atom\n'), ((2150, 2161), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2158, 2161), False, 'import sys\n'), ((2560, 2571), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2568, 2571), False, 'import sys\n'), ((2990, 3084), 'mmstructlib.sas.nsc.area_atoms', 'nsc.area_atoms', (['[a for r in window for a in r.atoms]'], {'out_attr': 'attr_str', 'num_dots': 'num_dots'}), '([a for r in window for a in r.atoms], out_attr=attr_str,\n num_dots=num_dots)\n', (3004, 3084), True, 'import mmstructlib.sas.nsc as nsc\n'), ((3157, 3185), 'mmstructlib.tools.summarize.from_atom', 'from_atom', (['[c_res]', 'attr_str'], {}), '([c_res], attr_str)\n', (3166, 3185), False, 'from mmstructlib.tools.summarize import from_atom\n')]
|
# Copyright 2021 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a Equinix Virtual Machine object (BareMetal).
"""
from perfkitbenchmarker import errors
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.equinix import util
from six.moves import range
from perfkitbenchmarker import linux_virtual_machine as linux_vm
CLOUD_CONFIG_TEMPLATE = '''#cloud-config
users:
- name: {0}
ssh-authorized-keys:
- {1}
sudo: ['ALL=(ALL) NOPASSWD:ALL']
groups: sudo
shell: /bin/bash
runcmd:
- [ passwd, -l, root ]
- [ chage, -d, -1, -I, -1, -E, -1, -M, 999999, root ]
'''
class MetalVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a Baremetal Virtual Machine ."""
CLOUD = providers.EQUINIX
# Subclasses should override the default image.
DEFAULT_IMAGE = None
def __init__(self, vm_spec):
"""Initialize a BareMetal virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(MetalVirtualMachine, self).__init__(vm_spec)
self.device_id = None
self.max_local_disks = 1
self.local_disk_counter = 0
self.image = self.image or self.DEFAULT_IMAGE
def _Create(self):
"""Create a BareMetal instance ."""
with open(self.ssh_public_key) as f:
public_key = f.read().rstrip('\n')
response, retcode = util.MetalAndParse(
['device', 'create',
'--hostname', self.name,
'--metro', self.zone, #metro
'--plan', self.machine_type, #plan
'--operating-system', self.image, #OS
'--userdata', CLOUD_CONFIG_TEMPLATE.format(
self.user_name, public_key)
])
if retcode:
raise errors.Resource.RetryableCreationError('Creation failed: %s' %
(response,))
self.device_id = response['id']
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data."""
response, retcode = util.MetalAndParse(
['device', 'get', '-i', self.device_id])
for interface in response['ip_addresses']:
if interface['address_family'] != 4:
continue
if interface['public'] == True:
self.ip_address = interface['address']
else:
self.internal_ip = interface['address']
def _Delete(self):
"""Delete a BareMetal VM Device."""
response, retcode = util.MetalAndParse(
['device', 'delete', '-i', self.device_id, '--force'])
# The command doesn't return the HTTP status code, and the error
# format is very difficult to parse, so we string
# search. TODO 404 is a standard error and is not in metal json output
if retcode and '404' in response['errors'][0]['detail']:
return
elif retcode:
raise errors.Resource.RetryableDeletionError('Deletion failed: %s' %
(response,))
def _Exists(self):
"""Returns true if the VM exists."""
response, retcode = util.MetalAndParse(
['device', 'get', self.device_id])
return retcode == 0
#Disk creation needs to be Finished, therefore FIO testing errors will occur unless changed
class Ubuntu1804BasedEquinixVirtualMachine(
MetalVirtualMachine, linux_vm.Ubuntu1804Mixin):
"""
Equinix Metal
"""
|
[
"perfkitbenchmarker.errors.Resource.RetryableDeletionError",
"perfkitbenchmarker.errors.Resource.RetryableCreationError",
"perfkitbenchmarker.providers.equinix.util.MetalAndParse",
"perfkitbenchmarker.vm_util.Retry"
] |
[((2539, 2554), 'perfkitbenchmarker.vm_util.Retry', 'vm_util.Retry', ([], {}), '()\n', (2552, 2554), False, 'from perfkitbenchmarker import vm_util\n'), ((2639, 2698), 'perfkitbenchmarker.providers.equinix.util.MetalAndParse', 'util.MetalAndParse', (["['device', 'get', '-i', self.device_id]"], {}), "(['device', 'get', '-i', self.device_id])\n", (2657, 2698), False, 'from perfkitbenchmarker.providers.equinix import util\n'), ((3047, 3120), 'perfkitbenchmarker.providers.equinix.util.MetalAndParse', 'util.MetalAndParse', (["['device', 'delete', '-i', self.device_id, '--force']"], {}), "(['device', 'delete', '-i', self.device_id, '--force'])\n", (3065, 3120), False, 'from perfkitbenchmarker.providers.equinix import util\n'), ((3647, 3700), 'perfkitbenchmarker.providers.equinix.util.MetalAndParse', 'util.MetalAndParse', (["['device', 'get', self.device_id]"], {}), "(['device', 'get', self.device_id])\n", (3665, 3700), False, 'from perfkitbenchmarker.providers.equinix import util\n'), ((2372, 2447), 'perfkitbenchmarker.errors.Resource.RetryableCreationError', 'errors.Resource.RetryableCreationError', (["('Creation failed: %s' % (response,))"], {}), "('Creation failed: %s' % (response,))\n", (2410, 2447), False, 'from perfkitbenchmarker import errors\n'), ((3432, 3507), 'perfkitbenchmarker.errors.Resource.RetryableDeletionError', 'errors.Resource.RetryableDeletionError', (["('Deletion failed: %s' % (response,))"], {}), "('Deletion failed: %s' % (response,))\n", (3470, 3507), False, 'from perfkitbenchmarker import errors\n')]
|
import time
from typing import Optional
from mitmproxy import ctx
from mitmproxy import flowfilter
from mitmproxy.script import concurrent
from mitmproxy.exceptions import OptionsError
matchall = flowfilter.parse(".")
class Sleeper:
def __init__(self):
self.filter: Optional[flowfilter.TFilter] = matchall
def load(self, loader):
loader.add_option(
"sleep", Optional[int], None,
"Delay client requests (milliseconds)",
)
loader.add_option(
"sleep_filter", Optional[str], None,
"Apply delay to flows which match the filter"
)
def configure(self, updates):
if "sleep" in updates:
sleep = ctx.options.sleep
if sleep and sleep < 0:
raise OptionsError("'sleep' must be >= 0")
if "sleep_filter" in updates:
filt_str = ctx.options.sleep_filter
filt = matchall if not filt_str else flowfilter.parse(filt_str)
if not filt:
raise OptionsError("Invalid filter expression: %s" % filt_str)
self.filter = filt
@concurrent
def request(self, flow):
delay = ctx.options.sleep
if delay and delay > 0 and flowfilter.match(self.filter, flow):
time.sleep(delay / 1000)
addons = [
Sleeper()
]
|
[
"mitmproxy.flowfilter.parse",
"mitmproxy.exceptions.OptionsError",
"mitmproxy.flowfilter.match",
"time.sleep"
] |
[((199, 220), 'mitmproxy.flowfilter.parse', 'flowfilter.parse', (['"""."""'], {}), "('.')\n", (215, 220), False, 'from mitmproxy import flowfilter\n'), ((1237, 1272), 'mitmproxy.flowfilter.match', 'flowfilter.match', (['self.filter', 'flow'], {}), '(self.filter, flow)\n', (1253, 1272), False, 'from mitmproxy import flowfilter\n'), ((1286, 1310), 'time.sleep', 'time.sleep', (['(delay / 1000)'], {}), '(delay / 1000)\n', (1296, 1310), False, 'import time\n'), ((788, 824), 'mitmproxy.exceptions.OptionsError', 'OptionsError', (['"""\'sleep\' must be >= 0"""'], {}), '("\'sleep\' must be >= 0")\n', (800, 824), False, 'from mitmproxy.exceptions import OptionsError\n'), ((960, 986), 'mitmproxy.flowfilter.parse', 'flowfilter.parse', (['filt_str'], {}), '(filt_str)\n', (976, 986), False, 'from mitmproxy import flowfilter\n'), ((1034, 1090), 'mitmproxy.exceptions.OptionsError', 'OptionsError', (["('Invalid filter expression: %s' % filt_str)"], {}), "('Invalid filter expression: %s' % filt_str)\n", (1046, 1090), False, 'from mitmproxy.exceptions import OptionsError\n')]
|
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import argparse
from modeling import BertForPreTraining, BertConfig
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--bert_model", default="bert-large-uncased", type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument('--tf_checkpoint',
type=str,
default="/google_bert_data",
help="Path to directory containing TF checkpoint")
parser.add_argument('--bert_config_path',
type=str,
default="/workspace/phase1",
help="Path bert_config.json is located in")
parser.add_argument('--output_checkpoint', type=str,
default='./checkpoint.pt',
help="Path to output PyT checkpoint")
return parser.parse_args()
def prepare_model(args, device):
# Prepare model
config = BertConfig.from_json_file(args.bert_config_path)
# Padding for divisibility by 8
if config.vocab_size % 8 != 0:
config.vocab_size += 8 - (config.vocab_size % 8)
print('padded vocab size to: {}'.format(config.vocab_size))
# Set some options that the config file is expected to have (but don't need to be set properly
# at this point)
config.pad = False
config.unpad = False
config.dense_seq_output = False
config.fused_mha = False
config.fused_gelu_bias = False
config.fuse_qkv = False
config.fuse_scale = False
config.fuse_mask = False
config.fuse_dropout = False
config.apex_softmax = False
config.enable_stream = False
if config.fuse_mask == True: config.apex_softmax = True
if config.pad == False: config.enable_stream = True
if config.unpad == True: config.fused_mha = False
#Load from TF checkpoint
model = BertForPreTraining.from_pretrained(args.tf_checkpoint, from_tf=True, config=config)
return model
def main():
args = parse_arguments()
device = torch.device("cuda")
model = prepare_model(args, device)
torch.save({'model' : model.state_dict() }, args.output_checkpoint)
if __name__ == "__main__":
main()
|
[
"modeling.BertForPreTraining.from_pretrained",
"argparse.ArgumentParser",
"modeling.BertConfig.from_json_file",
"torch.device"
] |
[((726, 751), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (749, 751), False, 'import argparse\n'), ((1717, 1765), 'modeling.BertConfig.from_json_file', 'BertConfig.from_json_file', (['args.bert_config_path'], {}), '(args.bert_config_path)\n', (1742, 1765), False, 'from modeling import BertForPreTraining, BertConfig\n'), ((2628, 2716), 'modeling.BertForPreTraining.from_pretrained', 'BertForPreTraining.from_pretrained', (['args.tf_checkpoint'], {'from_tf': '(True)', 'config': 'config'}), '(args.tf_checkpoint, from_tf=True, config\n =config)\n', (2662, 2716), False, 'from modeling import BertForPreTraining, BertConfig\n'), ((2785, 2805), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2797, 2805), False, 'import torch\n')]
|
"""Schema classes for test Reports."""
import functools
import json
from copy import deepcopy
import six
from six.moves import range
# pylint: disable=no-name-in-module,import-error
if six.PY2:
from collections import MutableMapping, MutableSequence
else:
from collections.abc import MutableMapping, MutableSequence
# pylint: enable=no-name-in-module,import-error
from marshmallow import Schema, fields, post_load
from testplan.common.serialization.schemas import load_tree_data
from testplan.common.report.schemas import ReportSchema
from testplan.common.serialization import fields as custom_fields
from testplan.common.utils import timing
from .base import TestCaseReport, TestGroupReport, TestReport
__all__ = ["TestCaseReportSchema", "TestGroupReportSchema", "TestReportSchema"]
class IntervalSchema(Schema):
"""Schema for ``timer.Interval``"""
start = custom_fields.UTCDateTime()
end = custom_fields.UTCDateTime(allow_none=True)
@post_load
def make_interval(self, data): # pylint: disable=no-self-use
"""Create an Interal object."""
return timing.Interval(**data)
class TagField(fields.Field):
"""Field for serializing tag data, which is a ``dict`` of ``set``."""
def _serialize(self, value, attr, obj):
return {
tag_name: list(tag_values)
for tag_name, tag_values in value.items()
}
def _deserialize(self, value, attr, data):
return {
tag_name: set(tag_values) for tag_name, tag_values in value.items()
}
class TimerField(fields.Field):
"""
Field for serializing ``timer.Timer`` objects, which is a ``dict``
of ``timer.Interval``.
"""
def _serialize(self, value, attr, obj):
return {
k: IntervalSchema(strict=True).dump(v).data
for k, v in value.items()
}
def _deserialize(self, value, attr, data):
return timing.Timer(
{
k: IntervalSchema(strict=True).load(v).data
for k, v in value.items()
}
)
class EntriesField(fields.Field):
"""
Handle encoding problems gracefully
"""
_BYTES_KEY = "_BYTES_KEY"
@staticmethod
def _binary_to_hex_list(binary_obj):
# make sure the hex repr is capitalized and leftpad'd with a zero
# because '0x0C' is better than '0xc'.
return [
"0x{}".format(hex(b)[2:].upper().zfill(2))
for b in bytearray(binary_obj)
]
@staticmethod
def _hex_list_to_binary(hex_list):
return bytes(bytearray([int(x, 16) for x in hex_list]))
def _render_unencodable_bytes_by_callable(
self, data, binary_serializer, recurse_lvl=0
):
"""
Find the lowest level at which encoding fails - if at all - and
serialize the byte-representation of that with the
``binary_serializer`` function.
:param data: Any data that's meant to be serialized
:type data: Any
:param binary_serializer: A callable that takes a binary object and
returns its serialized representation
:type binary_serializer: Callable[[bytes], Any]
:returns: Serialized representation of ``data``
:rtype: Any
"""
if recurse_lvl == 0:
datacp = deepcopy(data)
else:
datacp = data
try:
json.dumps(datacp, ensure_ascii=True)
return datacp
except (UnicodeDecodeError, TypeError):
if isinstance(datacp, MutableMapping):
for key in six.iterkeys(datacp):
datacp[key] = self._render_unencodable_bytes_by_callable(
data=datacp[key],
binary_serializer=binary_serializer,
recurse_lvl=(recurse_lvl + 1),
)
return datacp
if isinstance(datacp, MutableSequence):
for i in range(len(datacp)):
datacp[i] = self._render_unencodable_bytes_by_callable(
data=datacp[i],
binary_serializer=binary_serializer,
recurse_lvl=(recurse_lvl + 1),
)
return datacp
return {self._BYTES_KEY: binary_serializer(datacp)}
def _serialize(self, value, attr, obj):
super_serialize = lambda v: (
super(EntriesField, self)._serialize(v, attr, obj)
)
try:
json.dumps(value, ensure_ascii=True)
return super_serialize(value)
except (UnicodeDecodeError, TypeError):
value_new = self._render_unencodable_bytes_by_callable(
data=value, binary_serializer=self._binary_to_hex_list
)
return super_serialize(value_new)
def _deserialize(self, value, attr, obj, recurse_lvl=0):
"""
Check deeply to see if there is a {'bytes': [...]} dict and if so
convert it to a bytes object
"""
if recurse_lvl == 0:
valued = super(EntriesField, self)._deserialize(value, attr, obj)
else:
valued = value
if isinstance(valued, MutableMapping):
for key in six.iterkeys(valued):
if key == self._BYTES_KEY:
return self._hex_list_to_binary(valued[key])
valued[key] = self._deserialize(
value=valued[key],
attr=attr,
obj=obj,
recurse_lvl=(recurse_lvl + 1),
)
return valued
if isinstance(valued, MutableSequence):
for i in range(len(valued)):
valued[i] = self._deserialize(
value=valued[i],
attr=attr,
obj=obj,
recurse_lvl=(recurse_lvl + 1),
)
return valued
return valued
class TestCaseReportSchema(ReportSchema):
"""Schema for ``testing.TestCaseReport``"""
source_class = TestCaseReport
status_override = fields.String(allow_none=True)
entries = fields.List(EntriesField())
status = fields.String(dump_only=True)
runtime_status = fields.String(dump_only=True)
counter = fields.Dict(dump_only=True)
suite_related = fields.Bool()
timer = TimerField(required=True)
tags = TagField()
category = fields.String(dump_only=True)
status_reason = fields.String(allow_none=True)
@post_load
def make_report(self, data):
"""
Create the report object, assign ``timer`` &
``status_override`` attributes explicitly
"""
status_override = data.pop("status_override", None)
timer = data.pop("timer")
# We can discard the type field since we know what kind of report we
# are making.
if "type" in data:
data.pop("type")
rep = super(TestCaseReportSchema, self).make_report(data)
rep.status_override = status_override
rep.timer = timer
return rep
class TestGroupReportSchema(TestCaseReportSchema):
"""
Schema for ``testing.TestGroupReportSchema``, supports tree serialization.
"""
source_class = TestGroupReport
# category = fields.String()
part = fields.List(fields.Integer, allow_none=True)
extra_attributes = fields.Dict(allow_none=True)
fix_spec_path = fields.String(allow_none=True)
env_status = fields.String(allow_none=True)
# status_reason = fields.String(allow_none=True)
# runtime_status = fields.String(dump_only=True)
# counter = fields.Dict(dump_only=True)
entries = custom_fields.GenericNested(
schema_context={
TestCaseReport: TestCaseReportSchema,
TestGroupReport: "self",
},
many=True,
)
@post_load
def make_report(self, data):
"""
Propagate tag indices after deserialization
"""
rep = super(TestGroupReportSchema, self).make_report(data)
rep.propagate_tag_indices()
return rep
class TestReportSchema(Schema):
"""Schema for test report root, ``testing.TestReport``."""
timer = TimerField()
name = fields.String()
uid = fields.String()
meta = fields.Dict()
status = fields.String(dump_only=True)
runtime_status = fields.String(dump_only=True)
tags_index = TagField(dump_only=True)
status_override = fields.String(allow_none=True)
information = fields.List(fields.List(fields.String()))
counter = fields.Dict(dump_only=True)
attachments = fields.Dict()
entries = custom_fields.GenericNested(
schema_context={TestGroupReport: TestGroupReportSchema}, many=True
)
category = fields.String(dump_only=True)
@post_load
def make_test_report(self, data): # pylint: disable=no-self-use
"""Create report object & deserialize sub trees."""
load_tree = functools.partial(
load_tree_data,
node_schema=TestGroupReportSchema,
leaf_schema=TestCaseReportSchema,
)
entry_data = data.pop("entries")
status_override = data.pop("status_override")
timer = data.pop("timer")
test_plan_report = TestReport(**data)
test_plan_report.entries = [load_tree(c_data) for c_data in entry_data]
test_plan_report.propagate_tag_indices()
test_plan_report.status_override = status_override
test_plan_report.timer = timer
return test_plan_report
class ShallowTestReportSchema(Schema):
"""Schema for shallow serialization of ``TestReport``."""
name = fields.String(required=True)
uid = fields.String(required=True)
timer = TimerField(required=True)
meta = fields.Dict()
status = fields.String(dump_only=True)
runtime_status = fields.String(dump_only=True)
tags_index = TagField(dump_only=True)
status_override = fields.String(allow_none=True)
counter = fields.Dict(dump_only=True)
attachments = fields.Dict()
entry_uids = fields.List(fields.Str(), dump_only=True)
parent_uids = fields.List(fields.Str())
hash = fields.Integer(dump_only=True)
category = fields.String(dump_only=True)
@post_load
def make_test_report(self, data):
status_override = data.pop("status_override", None)
timer = data.pop("timer")
test_plan_report = TestReport(**data)
test_plan_report.propagate_tag_indices()
test_plan_report.status_override = status_override
test_plan_report.timer = timer
return test_plan_report
class ShallowTestGroupReportSchema(Schema):
"""
Schema for shallow serialization of ``TestGroupReport``.
"""
name = fields.String(required=True)
uid = fields.String(required=True)
timer = TimerField(required=True)
description = fields.String(allow_none=True)
part = fields.List(fields.Integer, allow_none=True)
fix_spec_path = fields.String(allow_none=True)
status_override = fields.String(allow_none=True)
status = fields.String(dump_only=True)
runtime_status = fields.String(dump_only=True)
counter = fields.Dict(dump_only=True)
suite_related = fields.Bool()
tags = TagField()
entry_uids = fields.List(fields.Str(), dump_only=True)
parent_uids = fields.List(fields.Str())
hash = fields.Integer(dump_only=True)
category = fields.String()
env_status = fields.String(allow_none=True)
@post_load
def make_testgroup_report(self, data):
status_override = data.pop("status_override", None)
timer = data.pop("timer")
group_report = TestGroupReport(**data)
group_report.status_override = status_override
group_report.timer = timer
group_report.propagate_tag_indices()
return group_report
|
[
"functools.partial",
"copy.deepcopy",
"testplan.common.utils.timing.Interval",
"marshmallow.fields.Dict",
"marshmallow.fields.Integer",
"six.iterkeys",
"marshmallow.fields.List",
"marshmallow.fields.Bool",
"json.dumps",
"marshmallow.fields.String",
"marshmallow.fields.Str",
"testplan.common.serialization.fields.GenericNested",
"testplan.common.serialization.fields.UTCDateTime"
] |
[((884, 911), 'testplan.common.serialization.fields.UTCDateTime', 'custom_fields.UTCDateTime', ([], {}), '()\n', (909, 911), True, 'from testplan.common.serialization import fields as custom_fields\n'), ((922, 964), 'testplan.common.serialization.fields.UTCDateTime', 'custom_fields.UTCDateTime', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (947, 964), True, 'from testplan.common.serialization import fields as custom_fields\n'), ((6173, 6203), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (6186, 6203), False, 'from marshmallow import Schema, fields, post_load\n'), ((6261, 6290), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (6274, 6290), False, 'from marshmallow import Schema, fields, post_load\n'), ((6312, 6341), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (6325, 6341), False, 'from marshmallow import Schema, fields, post_load\n'), ((6356, 6383), 'marshmallow.fields.Dict', 'fields.Dict', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (6367, 6383), False, 'from marshmallow import Schema, fields, post_load\n'), ((6404, 6417), 'marshmallow.fields.Bool', 'fields.Bool', ([], {}), '()\n', (6415, 6417), False, 'from marshmallow import Schema, fields, post_load\n'), ((6493, 6522), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (6506, 6522), False, 'from marshmallow import Schema, fields, post_load\n'), ((6544, 6574), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (6557, 6574), False, 'from marshmallow import Schema, fields, post_load\n'), ((7387, 7431), 'marshmallow.fields.List', 'fields.List', (['fields.Integer'], {'allow_none': '(True)'}), '(fields.Integer, allow_none=True)\n', (7398, 7431), False, 'from marshmallow import Schema, fields, post_load\n'), ((7455, 7483), 'marshmallow.fields.Dict', 'fields.Dict', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (7466, 7483), False, 'from marshmallow import Schema, fields, post_load\n'), ((7504, 7534), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (7517, 7534), False, 'from marshmallow import Schema, fields, post_load\n'), ((7552, 7582), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (7565, 7582), False, 'from marshmallow import Schema, fields, post_load\n'), ((7749, 7871), 'testplan.common.serialization.fields.GenericNested', 'custom_fields.GenericNested', ([], {'schema_context': "{TestCaseReport: TestCaseReportSchema, TestGroupReport: 'self'}", 'many': '(True)'}), "(schema_context={TestCaseReport:\n TestCaseReportSchema, TestGroupReport: 'self'}, many=True)\n", (7776, 7871), True, 'from testplan.common.serialization import fields as custom_fields\n'), ((8307, 8322), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (8320, 8322), False, 'from marshmallow import Schema, fields, post_load\n'), ((8333, 8348), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (8346, 8348), False, 'from marshmallow import Schema, fields, post_load\n'), ((8360, 8373), 'marshmallow.fields.Dict', 'fields.Dict', ([], {}), '()\n', (8371, 8373), False, 'from marshmallow import Schema, fields, post_load\n'), ((8388, 8417), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (8401, 8417), False, 'from marshmallow import Schema, fields, post_load\n'), ((8439, 8468), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (8452, 8468), False, 'from marshmallow import Schema, fields, post_load\n'), ((8533, 8563), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (8546, 8563), False, 'from marshmallow import Schema, fields, post_load\n'), ((8638, 8665), 'marshmallow.fields.Dict', 'fields.Dict', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (8649, 8665), False, 'from marshmallow import Schema, fields, post_load\n'), ((8685, 8698), 'marshmallow.fields.Dict', 'fields.Dict', ([], {}), '()\n', (8696, 8698), False, 'from marshmallow import Schema, fields, post_load\n'), ((8714, 8813), 'testplan.common.serialization.fields.GenericNested', 'custom_fields.GenericNested', ([], {'schema_context': '{TestGroupReport: TestGroupReportSchema}', 'many': '(True)'}), '(schema_context={TestGroupReport:\n TestGroupReportSchema}, many=True)\n', (8741, 8813), True, 'from testplan.common.serialization import fields as custom_fields\n'), ((8839, 8868), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (8852, 8868), False, 'from marshmallow import Schema, fields, post_load\n'), ((9736, 9764), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (9749, 9764), False, 'from marshmallow import Schema, fields, post_load\n'), ((9775, 9803), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (9788, 9803), False, 'from marshmallow import Schema, fields, post_load\n'), ((9853, 9866), 'marshmallow.fields.Dict', 'fields.Dict', ([], {}), '()\n', (9864, 9866), False, 'from marshmallow import Schema, fields, post_load\n'), ((9880, 9909), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (9893, 9909), False, 'from marshmallow import Schema, fields, post_load\n'), ((9931, 9960), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (9944, 9960), False, 'from marshmallow import Schema, fields, post_load\n'), ((10025, 10055), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (10038, 10055), False, 'from marshmallow import Schema, fields, post_load\n'), ((10070, 10097), 'marshmallow.fields.Dict', 'fields.Dict', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (10081, 10097), False, 'from marshmallow import Schema, fields, post_load\n'), ((10116, 10129), 'marshmallow.fields.Dict', 'fields.Dict', ([], {}), '()\n', (10127, 10129), False, 'from marshmallow import Schema, fields, post_load\n'), ((10244, 10274), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (10258, 10274), False, 'from marshmallow import Schema, fields, post_load\n'), ((10290, 10319), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (10303, 10319), False, 'from marshmallow import Schema, fields, post_load\n'), ((10830, 10858), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (10843, 10858), False, 'from marshmallow import Schema, fields, post_load\n'), ((10869, 10897), 'marshmallow.fields.String', 'fields.String', ([], {'required': '(True)'}), '(required=True)\n', (10882, 10897), False, 'from marshmallow import Schema, fields, post_load\n'), ((10954, 10984), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (10967, 10984), False, 'from marshmallow import Schema, fields, post_load\n'), ((10996, 11040), 'marshmallow.fields.List', 'fields.List', (['fields.Integer'], {'allow_none': '(True)'}), '(fields.Integer, allow_none=True)\n', (11007, 11040), False, 'from marshmallow import Schema, fields, post_load\n'), ((11061, 11091), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (11074, 11091), False, 'from marshmallow import Schema, fields, post_load\n'), ((11114, 11144), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (11127, 11144), False, 'from marshmallow import Schema, fields, post_load\n'), ((11158, 11187), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (11171, 11187), False, 'from marshmallow import Schema, fields, post_load\n'), ((11209, 11238), 'marshmallow.fields.String', 'fields.String', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (11222, 11238), False, 'from marshmallow import Schema, fields, post_load\n'), ((11253, 11280), 'marshmallow.fields.Dict', 'fields.Dict', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (11264, 11280), False, 'from marshmallow import Schema, fields, post_load\n'), ((11301, 11314), 'marshmallow.fields.Bool', 'fields.Bool', ([], {}), '()\n', (11312, 11314), False, 'from marshmallow import Schema, fields, post_load\n'), ((11451, 11481), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'dump_only': '(True)'}), '(dump_only=True)\n', (11465, 11481), False, 'from marshmallow import Schema, fields, post_load\n'), ((11497, 11512), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (11510, 11512), False, 'from marshmallow import Schema, fields, post_load\n'), ((11530, 11560), 'marshmallow.fields.String', 'fields.String', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (11543, 11560), False, 'from marshmallow import Schema, fields, post_load\n'), ((1102, 1125), 'testplan.common.utils.timing.Interval', 'timing.Interval', ([], {}), '(**data)\n', (1117, 1125), False, 'from testplan.common.utils import timing\n'), ((9034, 9140), 'functools.partial', 'functools.partial', (['load_tree_data'], {'node_schema': 'TestGroupReportSchema', 'leaf_schema': 'TestCaseReportSchema'}), '(load_tree_data, node_schema=TestGroupReportSchema,\n leaf_schema=TestCaseReportSchema)\n', (9051, 9140), False, 'import functools\n'), ((10159, 10171), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (10169, 10171), False, 'from marshmallow import Schema, fields, post_load\n'), ((10219, 10231), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (10229, 10231), False, 'from marshmallow import Schema, fields, post_load\n'), ((11366, 11378), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (11376, 11378), False, 'from marshmallow import Schema, fields, post_load\n'), ((11426, 11438), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (11436, 11438), False, 'from marshmallow import Schema, fields, post_load\n'), ((3353, 3367), 'copy.deepcopy', 'deepcopy', (['data'], {}), '(data)\n', (3361, 3367), False, 'from copy import deepcopy\n'), ((3433, 3470), 'json.dumps', 'json.dumps', (['datacp'], {'ensure_ascii': '(True)'}), '(datacp, ensure_ascii=True)\n', (3443, 3470), False, 'import json\n'), ((4559, 4595), 'json.dumps', 'json.dumps', (['value'], {'ensure_ascii': '(True)'}), '(value, ensure_ascii=True)\n', (4569, 4595), False, 'import json\n'), ((5300, 5320), 'six.iterkeys', 'six.iterkeys', (['valued'], {}), '(valued)\n', (5312, 5320), False, 'import six\n'), ((8606, 8621), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (8619, 8621), False, 'from marshmallow import Schema, fields, post_load\n'), ((3623, 3643), 'six.iterkeys', 'six.iterkeys', (['datacp'], {}), '(datacp)\n', (3635, 3643), False, 'import six\n')]
|
"""
Classes for running a Gretel Job as a local container
"""
from __future__ import annotations
import atexit
import io
import signal
import tarfile
import uuid
from dataclasses import dataclass
from pathlib import Path
from time import sleep
from typing import Dict, Iterator, List, Optional, Tuple, TYPE_CHECKING, Union
from urllib.parse import urlparse
import docker
import docker.errors
import smart_open
from docker.models.containers import Container
from docker.models.volumes import Volume
from docker.types.containers import DeviceRequest
from tqdm.asyncio import tqdm as asyncio_tqdm
from tqdm.auto import tqdm
from gretel_client.config import get_logger, get_session_config
from gretel_client.projects.exceptions import ContainerRunError, DockerEnvironmentError
from gretel_client.projects.jobs import ACTIVE_STATES, Job
from gretel_client.rest.api.opt_api import OptApi
if TYPE_CHECKING:
from gretel_client.projects.models import Model
else:
Model = None
DEFAULT_ARTIFACT_DIR = "/workspace"
DEFAULT_GPU_CONFIG = DeviceRequest(count=-1, capabilities=[["gpu"]])
class DataVolume:
volume: Optional[Volume] = None
volume_container: Optional[Container] = None
def __init__(self, host_dir: str, docker_client: docker.DockerClient):
self.name = f"gretel-{uuid.uuid4().hex[:5]}"
self.host_dir = host_dir
self.docker_client = docker_client
self.local_files = []
self.volume_image = "busybox:latest"
atexit.register(self.cleanup)
def add_file(self, local_file: Union[Path, str]) -> str:
if not isinstance(local_file, str):
local_file = str(local_file)
self.local_files.append(local_file)
return f"{self.host_dir}/{self._extract_file_name(local_file)}"
def _extract_file_name(self, path: str) -> str:
return Path(urlparse(path).path).name
def copy_files(self, files: List[str], volume_container: Container):
copy_stream = io.BytesIO()
with tarfile.open(fileobj=copy_stream, mode="w") as tar_archive:
for file in files:
with smart_open.open(file, "rb", ignore_ext=True) as src: # type: ignore
src.seek(0, 2)
info = tarfile.TarInfo(name=self._extract_file_name(file))
info.size = src.tell()
src.seek(0)
tar_archive.addfile(fileobj=src, tarinfo=info)
copy_stream.seek(0)
volume_container.put_archive(data=copy_stream, path=self.host_dir)
def prepare_volume(self) -> dict:
self.volume = self.docker_client.volumes.create(name=self.name) # type:ignore
self.docker_client.images.pull(self.volume_image)
self.volume_container = self.docker_client.containers.create( # type:ignore
image=self.volume_image,
volumes=[f"{self.volume.name}:{self.host_dir}"], # type: ignore
)
self.copy_files(self.local_files, self.volume_container) # type:ignore
return {self.name: {"bind": self.host_dir, "mode": "rw"}}
def cleanup(self):
if self.volume_container:
try:
self.volume_container.remove(force=True)
except Exception:
pass
if self.volume:
try:
self.volume.remove(force=True)
except Exception:
pass
class ContainerRun:
"""Runs a Gretel Job from a local container.
Args:
job: Job to run as docker container.
"""
image: str
"""The container image used for running the job"""
model: Model
"""The model associated with the container run"""
output_dir: Optional[Path]
"""Local file path to save artifacts to."""
container_output_dir: Optional[str]
"""Output directory on the container where artifacts placed."""
_docker_client: docker.DockerClient
"""Docker SDK instance"""
_container: Optional[Container] = None
"""Reference to a running or completed container run."""
def __init__(self, job: Job):
check_docker_env()
self._docker_client = docker.from_env()
self.image = job.container_image
self.input_volume = DataVolume("/in", self._docker_client)
self.device_requests = []
self.run_params = ["--disable-cloud-upload"]
self.job = job
self.configure_worker_token(job.worker_key)
self.logger = get_logger(__name__)
self.debug = False
@classmethod
def from_job(cls, job: Job) -> ContainerRun:
job._poll_job_endpoint()
return cls(job)
def start(self):
"""Run job via a local container. This method
is async and will return after the job has started.
If you wish to block until the container has finished, the
``wait`` method may be used.
"""
self._run(remove=self.debug)
def extract_output_dir(self, dest: str):
if not self.container_output_dir:
return
extract_container_path(self._container, self.container_output_dir, dest)
def enable_debug(self):
self.debug = True
def configure_worker_token(self, worker_token: str):
self.run_params.extend(["--worker-token", worker_token])
def configure_output_dir(
self, host_dir: str, container_dir: str = DEFAULT_ARTIFACT_DIR
):
self.host_dir = host_dir
self.container_output_dir = container_dir
self.run_params.extend(["--artifact-dir", container_dir])
def configure_model(self, model_path: Union[str, Path]):
if not isinstance(model_path, str):
model_path = str(model_path)
in_model_path = self.input_volume.add_file(model_path)
self.run_params.extend(["--model-path", in_model_path])
def configure_input_data(self, input_data: Union[str, Path]):
if not isinstance(input_data, str):
input_data = str(input_data)
in_data_path = self.input_volume.add_file(input_data)
self.run_params.extend(["--data-source", in_data_path])
def enable_cloud_uploads(self):
self.run_params.remove("--disable-cloud-upload")
def configure_gpu(self):
try:
self._check_gpu()
except Exception as ex:
raise ContainerRunError("GPU could not be configured") from ex
self.device_requests.append(DEFAULT_GPU_CONFIG)
def _check_gpu(self):
if "synthetics" not in self.image:
raise ContainerRunError("This image does not require a GPU")
image = self._pull()
self._docker_client.containers.run(
image,
entrypoint="bash",
command=["-c", "nvidia-smi"],
detach=False,
remove=True,
device_requests=[DEFAULT_GPU_CONFIG],
)
def stop(self, force: bool = False):
"""If there is a running container this command will stop that
container.
Args:
force: If force is ``True``, ``SIGKILL`` will be sent to the
container, otherwise ``SIGTERM``.
"""
sig = signal.SIGKILL if force else signal.SIGTERM
try:
self._container.kill(int(sig))
except Exception:
pass
def delete(self):
"""Remove the docker container"""
if self.debug:
return
try:
self._container.remove()
except Exception:
pass
def _pull(self):
self.logger.debug("Authenticating image pull")
auth, _ = _get_container_auth()
self.logger.info(f"Pulling container image {self.image}")
try:
pull = self._docker_client.api.pull(
self.image, auth_config=auth, stream=True, decode=True
)
progress_printer = _PullProgressPrinter(pull)
progress_printer.start()
except Exception as ex:
raise ContainerRunError(f"Could not pull image {self.image}") from ex
return self.image
def _run(self, remove: bool = True):
image = self._pull()
self.logger.debug("Preparing input data volume")
volume_config = self.input_volume.prepare_volume()
self._container = self._docker_client.containers.run( # type:ignore
image,
self.run_params,
detach=True,
volumes=volume_config,
device_requests=self.device_requests,
)
# ensure that the detached container stops when the process is closed
atexit.register(self._cleanup)
def get_logs(self) -> str:
try:
return self._container.logs().decode("utf-8")
except Exception as ex:
raise ContainerRunError(
"Cannot get logs. Please re-run the job with debugging enabled."
) from ex
@property
def active(self) -> bool:
"""Returns ``True`` if the container is running. ``False`` otherwise."""
return self.container_status not in {"exited", "dead", "unknown"}
@property
def container_status(self) -> Optional[str]:
"""Status from the running docker container.
Valid statuses include:
created, restarting, running, removing, paused, exited, or dead
If the container isn't running, an "unknown" status will be
returned.
"""
if self._container:
try:
self._container.reload()
return self._container.status
except docker.errors.NotFound:
pass
return "unknown"
def is_ok(self):
"""Checks to see if the container is ok.
Raises:
``ContainerRunError`` if there is a problem with the container.
"""
if self.job.status in ACTIVE_STATES and not self.active:
try:
self.logger.debug(self.get_logs())
except Exception:
pass
if not self.debug:
self.logger.warn("Re-run with debugging enabled for more details.")
raise ContainerRunError(
("Could not launch container. Please check the logs for more details.")
)
def wait(self, timeout: int = 30):
"""Blocks until a running container has completed. If the
container hasn't started yet, we wait until a ``timeout``
interval is reached.
Args:
timeout: The time in seconds to wait for a container
to start. If the timeout is reached, the function will
return.
"""
cur = 0
while self.active or cur < timeout:
cur += 1
sleep(1)
def _cleanup(self):
self.stop(force=True)
self.delete()
def graceful_shutdown(self):
"""Attempts to gracefully shutdown the container run."""
try:
self.job.cancel()
except Exception:
pass
self.wait(15)
def _get_container_auth() -> Tuple[dict, str]:
"""Exchanges a Gretel Api Key for container registry credentials.
Returns:
An authentication object and registry endpoint. The authentication
object may be passed into the docker sdk.
"""
config = get_session_config()
opt_api = config.get_api(OptApi)
cred_resp = opt_api.get_container_login()
return cred_resp.get("data").get("auth"), cred_resp.get("data").get("registry")
def extract_container_path(container: Container, container_path: str, host_path: str):
"""Writes all files from a container path to a host path"""
stream = io.BytesIO()
archive, _ = container.get_archive(container_path)
for b in archive:
stream.write(b)
stream.seek(0)
with tarfile.open(fileobj=stream, mode="r") as tar:
dest_path = Path(host_path)
dest_path.mkdir(exist_ok=True, parents=True)
members_to_extact = []
for member in tar.getmembers():
if member.isfile():
member.name = Path(member.name).name
members_to_extact.append(member)
tar.extractall(path=dest_path, members=members_to_extact)
def check_docker_env():
"""Checks that the local docker env is configured.
Raises:
``DockerEnvironmentError`` if the docker environment isn't
configured correctly.
"""
try:
client = docker.from_env()
client.ping()
except (docker.errors.APIError, docker.errors.DockerException) as ex:
raise DockerEnvironmentError(
"Can't connect to docker. Please check that docker is installed and running."
) from ex
@dataclass
class _PullUpdate:
"""The Docker daemon emits pull progress as a JSON
schema. This dataclass is responsible for deserializing
each JSON progress update from Docker.
"""
id: str
"""Update id"""
status: str
"""Update status"""
current: Optional[int]
"""Units in mb"""
total: Optional[int]
"""Units in mb"""
def __post_init__(self):
self.current = round(self.current / 2 ** 20) if self.current else None
self.total = round(self.total / 2 ** 20) if self.total else None
@classmethod
def from_dict(cls, source: dict) -> _PullUpdate:
return cls(
id=source.get("id", source.get("status")),
status=source["status"],
current=source.get("progressDetail", {}).get("current"),
total=source.get("progressDetail", {}).get("total"),
)
@property
def units(self) -> str:
return "mb"
def build_indicator(self) -> tqdm:
params = {"total": self.total, "unit": self.units}
# if we're in a notebook environment, ncols shouldn't
# be configured. in a terminal environment tqdm
# should be an instance of asyncio_tqdm.
if tqdm == asyncio_tqdm:
params["ncols"] = 80
t = tqdm(**params)
t.set_description(self.status)
return t
class _PullProgressPrinter:
"""Print docker pull progress"""
def __init__(self, pull: Iterator):
self._pull = pull
self._bars: Dict[str, tqdm] = {}
def start(self):
"""Begin iterating and printing pull updates
from the docker daemon.
"""
for update in self._iter_updates():
if update.current:
self._update_progress(update)
self._close_bars()
def _close_bars(self):
for bar in self._bars.values():
bar.close()
def _update_progress(self, update: _PullUpdate):
bar = self._get_or_create_bar(update)
self._update_bar_total(bar, update)
def _get_or_create_bar(self, update: _PullUpdate) -> tqdm:
if update.id in self._bars:
return self._bars[update.id]
else:
self._bars[update.id] = update.build_indicator()
return self._bars[update.id]
def _update_bar_total(self, bar: tqdm, update: _PullUpdate):
if bar.desc != update.status:
bar.set_description(update.status)
if update.current:
bar.update(update.current - bar.n)
def _iter_updates(self) -> Iterator[_PullUpdate]:
for raw_update in self._pull:
yield _PullUpdate.from_dict(raw_update)
|
[
"docker.from_env",
"atexit.register",
"io.BytesIO",
"uuid.uuid4",
"docker.types.containers.DeviceRequest",
"gretel_client.projects.exceptions.DockerEnvironmentError",
"smart_open.open",
"time.sleep",
"tqdm.auto.tqdm",
"pathlib.Path",
"gretel_client.projects.exceptions.ContainerRunError",
"gretel_client.config.get_logger",
"tarfile.open",
"gretel_client.config.get_session_config",
"urllib.parse.urlparse"
] |
[((1042, 1089), 'docker.types.containers.DeviceRequest', 'DeviceRequest', ([], {'count': '(-1)', 'capabilities': "[['gpu']]"}), "(count=-1, capabilities=[['gpu']])\n", (1055, 1089), False, 'from docker.types.containers import DeviceRequest\n'), ((11283, 11303), 'gretel_client.config.get_session_config', 'get_session_config', ([], {}), '()\n', (11301, 11303), False, 'from gretel_client.config import get_logger, get_session_config\n'), ((11637, 11649), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (11647, 11649), False, 'import io\n'), ((1485, 1514), 'atexit.register', 'atexit.register', (['self.cleanup'], {}), '(self.cleanup)\n', (1500, 1514), False, 'import atexit\n'), ((1973, 1985), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1983, 1985), False, 'import io\n'), ((4144, 4161), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (4159, 4161), False, 'import docker\n'), ((4454, 4474), 'gretel_client.config.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (4464, 4474), False, 'from gretel_client.config import get_logger, get_session_config\n'), ((8564, 8594), 'atexit.register', 'atexit.register', (['self._cleanup'], {}), '(self._cleanup)\n', (8579, 8594), False, 'import atexit\n'), ((11779, 11817), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'stream', 'mode': '"""r"""'}), "(fileobj=stream, mode='r')\n", (11791, 11817), False, 'import tarfile\n'), ((11846, 11861), 'pathlib.Path', 'Path', (['host_path'], {}), '(host_path)\n', (11850, 11861), False, 'from pathlib import Path\n'), ((12411, 12428), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (12426, 12428), False, 'import docker\n'), ((13957, 13971), 'tqdm.auto.tqdm', 'tqdm', ([], {}), '(**params)\n', (13961, 13971), False, 'from tqdm.auto import tqdm\n'), ((1999, 2042), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'copy_stream', 'mode': '"""w"""'}), "(fileobj=copy_stream, mode='w')\n", (2011, 2042), False, 'import tarfile\n'), ((6510, 6564), 'gretel_client.projects.exceptions.ContainerRunError', 'ContainerRunError', (['"""This image does not require a GPU"""'], {}), "('This image does not require a GPU')\n", (6527, 6564), False, 'from gretel_client.projects.exceptions import ContainerRunError, DockerEnvironmentError\n'), ((10109, 10202), 'gretel_client.projects.exceptions.ContainerRunError', 'ContainerRunError', (['"""Could not launch container. Please check the logs for more details."""'], {}), "(\n 'Could not launch container. Please check the logs for more details.')\n", (10126, 10202), False, 'from gretel_client.projects.exceptions import ContainerRunError, DockerEnvironmentError\n'), ((10711, 10719), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (10716, 10719), False, 'from time import sleep\n'), ((12539, 12650), 'gretel_client.projects.exceptions.DockerEnvironmentError', 'DockerEnvironmentError', (['"""Can\'t connect to docker. Please check that docker is installed and running."""'], {}), '(\n "Can\'t connect to docker. Please check that docker is installed and running."\n )\n', (12561, 12650), False, 'from gretel_client.projects.exceptions import ContainerRunError, DockerEnvironmentError\n'), ((6309, 6357), 'gretel_client.projects.exceptions.ContainerRunError', 'ContainerRunError', (['"""GPU could not be configured"""'], {}), "('GPU could not be configured')\n", (6326, 6357), False, 'from gretel_client.projects.exceptions import ContainerRunError, DockerEnvironmentError\n'), ((7956, 8011), 'gretel_client.projects.exceptions.ContainerRunError', 'ContainerRunError', (['f"""Could not pull image {self.image}"""'], {}), "(f'Could not pull image {self.image}')\n", (7973, 8011), False, 'from gretel_client.projects.exceptions import ContainerRunError, DockerEnvironmentError\n'), ((8748, 8836), 'gretel_client.projects.exceptions.ContainerRunError', 'ContainerRunError', (['"""Cannot get logs. Please re-run the job with debugging enabled."""'], {}), "(\n 'Cannot get logs. Please re-run the job with debugging enabled.')\n", (8765, 8836), False, 'from gretel_client.projects.exceptions import ContainerRunError, DockerEnvironmentError\n'), ((1851, 1865), 'urllib.parse.urlparse', 'urlparse', (['path'], {}), '(path)\n', (1859, 1865), False, 'from urllib.parse import urlparse\n'), ((2111, 2155), 'smart_open.open', 'smart_open.open', (['file', '"""rb"""'], {'ignore_ext': '(True)'}), "(file, 'rb', ignore_ext=True)\n", (2126, 2155), False, 'import smart_open\n'), ((12048, 12065), 'pathlib.Path', 'Path', (['member.name'], {}), '(member.name)\n', (12052, 12065), False, 'from pathlib import Path\n'), ((1303, 1315), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1313, 1315), False, 'import uuid\n')]
|
import logging
logging.basicConfig(filename='ews.log', level=logging.INFO, format='%(asctime)s - %(message)s')
default_app_config = 'ews.apps.EwsConfig'
|
[
"logging.basicConfig"
] |
[((16, 116), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""ews.log"""', 'level': 'logging.INFO', 'format': '"""%(asctime)s - %(message)s"""'}), "(filename='ews.log', level=logging.INFO, format=\n '%(asctime)s - %(message)s')\n", (35, 116), False, 'import logging\n')]
|
from __future__ import print_function
import threading
import math
from numpy import sign, clip
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle
# Mode enum
MODE_FORWARD = 0
MODE_OBSTACLE_PLAN = 1
MODE_OBSTACLE_TURN = 2
class PilotNode:
def __init__(self, loophz, turn_radians, turn_radians_tolerance, cmd_vel_pub, pcontroller):
"""
Parameters:
:param int loophz:
:param float turn_radians:
:param float turn_radians_tolerance:
:param rospy.Publisher cmd_vel_pub:
:param PContoller pcontroller:
"""
self._loophz = loophz
self._turn_radians = turn_radians
self._turn_radians_tolerance = turn_radians_tolerance
self._cmd_vel_pub = cmd_vel_pub
self._pcontroller = pcontroller
self._prox_sensor = False
self._odom = Odometry()
self._state_lock = threading.RLock()
self._current_heading = 0.0 # radians
self._mode = MODE_OBSTACLE_PLAN # MODE_XXX enum
self._heading_goal = 0.0 # radians
self._obstacle_forward = None # True/False/None
self._obstacle_right = None # True/False/None
self._obstacle_left = None # True/False/None
self._reverse_plan = False # True/False
def run(self):
looprate = rospy.Rate(self._loophz)
try:
while not rospy.is_shutdown():
# Update the heading state
with self._state_lock:
self._current_heading = heading_from_odometry(self._odom)
rospy.logdebug(
"Current heading: {} deg (goal: {} deg)".format(
round(math.degrees(normalize_theta(self._current_heading)), 2),
round(math.degrees(self._heading_goal)), 2))
self._decide()
looprate.sleep()
except rospy.ROSInterruptException:
rospy.logwarn("ROSInterruptException received in main loop")
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Subscription callbacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
def prox_callback(self, msg):
"""
:param Proximity msg: The Proximity message
"""
with self._state_lock:
self._prox_sensor = msg.sensors[0]
def odom_callback(self, msg):
"""
:param Odometry msg: The Odometry message
"""
with self._state_lock:
self._odom = msg
# ~~~~~~~~~~~~~~~~~~~~
# Non-public methods
# ~~~~~~~~~~~~~~~~~~~~
def _send_drive_cmd(self, speed):
""" Sends the Twist command for linear.x speed in meters/sec
:param float speed: Speed in meters/sec for linear.x
"""
cmd = Twist()
cmd.linear.x = speed
self._cmd_vel_pub.publish(cmd)
def _send_turn_cmd(self, radians_sec):
""" Sends the Twist command for angular.z speed in radians/sec
:param float radians_sec: Angular speed in radians/sec for angular.z
"""
cmd = Twist()
cmd.angular.z = radians_sec
self._cmd_vel_pub.publish(cmd)
rospy.logdebug("sent cmd_vel: {}".format(cmd.angular.z))
def _set_forward_mode(self):
self._obstacle_forward = None
self._obstacle_right = None
self._obstacle_left = None
self._reverse_plan = False
self._mode = MODE_FORWARD
def _decide(self):
if self._mode == MODE_FORWARD:
if self._prox_sensor is True:
# If driving forward, and center sensor detects obstacle
# --> stop and enter obstacle mode
self._send_drive_cmd(0)
self._mode = MODE_OBSTACLE_PLAN
rospy.logdebug("Obstacle detected while moving forward")
else:
# No obstacle, so command base forward some more
linear_v = self._pcontroller.linear_velocity()
self._send_drive_cmd(linear_v)
rospy.logdebug("Forward is clear, proceeding to drive forward")
else: # Mode is either _PLAN or _TURN
# Need to calculate the heading to which to turn next
if self._mode == MODE_OBSTACLE_PLAN:
rospy.logdebug("Planning next movement")
self._process_obstacle_plan()
# Execute the turn to the target heading
if self._mode == MODE_OBSTACLE_TURN:
rospy.logdebug("Turning base")
self._process_obstacle_turn()
def _process_obstacle_plan(self):
"""
Note, the logic here assumes that if self._obstacle_XXX is None
then we haven't yet been in position to test it. Once we test that
position, we set the value to either True (obstacle) or False (clear)
then calculate the turn and switch into TURN mode.
Therefore, if we are in PLAN mode, we can determine which sides we need
to test still by examiming the self._obstacle_XXX state.
Example:
If in PLAN mode and self._obstacle_forward is NONE, we need to
test the front position, and if TRUE, turn to the right side.
If in PLAN mode and self._obstacle_forward is TRUE,
and self._obstacle_right is NONE: we have turned to the right
but have not yet tested the right side for an obstacle. So test
the position and if TRUE, we need to turn to the left side.
"""
if self._obstacle_forward in (None, False):
if self._prox_sensor is True:
# Calculate the turn to check the right side
self._obstacle_forward = True
rospy.logdebug("(Planner) Forward is blocked")
self._heading_goal = normalize_theta(
self._current_heading - self._turn_radians)
rospy.logdebug(
"(Planner) Turning to check right side. New heading: {}".format(
math.degrees(self._heading_goal)))
self._mode = MODE_OBSTACLE_TURN
else:
self._set_forward_mode()
elif self._obstacle_right is None:
if self._prox_sensor is True:
# Calculate the turn to check the left side
# We've already turned to the right, so we need to turn 180 to test
# the left side
self._obstacle_right = True
rospy.logdebug("(Planner) Right side is blocked")
self._heading_goal = normalize_theta(
self._current_heading + self._turn_radians * 2)
rospy.logdebug("(Planner) Turning to check left side. New heading: {}".format(
math.degrees(self._heading_goal)))
self._mode = MODE_OBSTACLE_TURN
else:
self._set_forward_mode()
elif self._obstacle_left is None:
if self._prox_sensor is True:
# All three of fwd, right, left are blocked
self._obstacle_left = True
rospy.logdebug("(Planner) left is blocked")
self._heading_goal = normalize_theta(
self._current_heading + self._turn_radians)
rospy.logdebug("(Planner) Turning to rear to backtrack. New heading: {}".format(
math.degrees(self._heading_goal)))
self._mode = MODE_OBSTACLE_TURN
self._reverse_plan = True
else:
self._set_forward_mode()
elif self._reverse_plan is True:
# We were performing a turn to reverse. Since we're in plan mode
# again, this means the turn is complete
rospy.logdebug("(Planner) Turn to rear complete, moving forward")
self._set_forward_mode()
else:
# This should not be possible
message = "Obstacle plan logic reached else block that should not be possible"
rospy.logerr(message)
raise RuntimeError(message)
def _process_obstacle_turn(self):
steering_angle = calc_steering_angle(self._current_heading, self._heading_goal)
rospy.logdebug("Steering angle: {} radians".format(round(steering_angle, 2)))
if abs(steering_angle) > self._turn_radians_tolerance:
# We still need to turn some more
angular_v = self._pcontroller.angular_velocity(
self._current_heading, self._heading_goal)
self._send_turn_cmd(angular_v)
else:
# We are done turning, back to obstacle planning
self._mode = MODE_OBSTACLE_PLAN
rospy.logdebug(
"Turn is complete (delta {} < turn radians tolerance {})".format(
steering_angle, self._turn_radians_tolerance))
class PVelocityController:
def __init__(self, min_linear_v, max_linear_v,
min_angular_v, max_angular_v, linear_k=1, angular_k=1):
self.min_linear_v = min_linear_v
self.max_linear_v = max_linear_v
self.max_angular_v = max_angular_v
self.min_angular_v = min_angular_v
self.linear_k = linear_k
self.angular_k = angular_k
def linear_velocity(self, distance_m=1):
"""Calculat the linear velocity using a Proportional (P) method,
clamped to within the min and max linear speeds.
Parameters:
:param float distance_m: Distance to drive
Returns:
The linear velocity in m/sec
:rtype: float
"""
linear_v = self.linear_k * distance_m
_sign = sign(linear_v)
return clip(linear_v, self.min_linear_v, self.max_linear_v) * _sign
def angular_velocity(self, current_angle, target_angle):
"""Calculate the angular velocity using a Proportional (P) method,
clamped to within the min and max angular speeds.
Parameters:
:param float current_angle: The current heading of the robot in radians
:param float target_angle: The goal heading of the robot in radians
Returns:
The angular velocity in radians/sec
:rtype: float
"""
angular_v = self.angular_k * calc_steering_angle(current_angle, target_angle)
_sign = sign(angular_v)
return clip(abs(angular_v), self.min_angular_v, self.max_angular_v) * _sign
|
[
"rospy.logwarn",
"b2_logic.odometry_helpers.heading_from_odometry",
"rospy.logerr",
"nav_msgs.msg.Odometry",
"b2_logic.odometry_helpers.calc_steering_angle",
"b2_logic.odometry_helpers.normalize_theta",
"threading.RLock",
"geometry_msgs.msg.Twist",
"rospy.Rate",
"numpy.clip",
"rospy.is_shutdown",
"rospy.logdebug",
"numpy.sign",
"math.degrees"
] |
[((994, 1004), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (1002, 1004), False, 'from nav_msgs.msg import Odometry\n'), ((1032, 1049), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1047, 1049), False, 'import threading\n'), ((1480, 1504), 'rospy.Rate', 'rospy.Rate', (['self._loophz'], {}), '(self._loophz)\n', (1490, 1504), False, 'import rospy\n'), ((2916, 2923), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (2921, 2923), False, 'from geometry_msgs.msg import Twist\n'), ((3210, 3217), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (3215, 3217), False, 'from geometry_msgs.msg import Twist\n'), ((8316, 8378), 'b2_logic.odometry_helpers.calc_steering_angle', 'calc_steering_angle', (['self._current_heading', 'self._heading_goal'], {}), '(self._current_heading, self._heading_goal)\n', (8335, 8378), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((9831, 9845), 'numpy.sign', 'sign', (['linear_v'], {}), '(linear_v)\n', (9835, 9845), False, 'from numpy import sign, clip\n'), ((10506, 10521), 'numpy.sign', 'sign', (['angular_v'], {}), '(angular_v)\n', (10510, 10521), False, 'from numpy import sign, clip\n'), ((9861, 9913), 'numpy.clip', 'clip', (['linear_v', 'self.min_linear_v', 'self.max_linear_v'], {}), '(linear_v, self.min_linear_v, self.max_linear_v)\n', (9865, 9913), False, 'from numpy import sign, clip\n'), ((10441, 10489), 'b2_logic.odometry_helpers.calc_steering_angle', 'calc_steering_angle', (['current_angle', 'target_angle'], {}), '(current_angle, target_angle)\n', (10460, 10489), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((1540, 1559), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1557, 1559), False, 'import rospy\n'), ((2118, 2178), 'rospy.logwarn', 'rospy.logwarn', (['"""ROSInterruptException received in main loop"""'], {}), "('ROSInterruptException received in main loop')\n", (2131, 2178), False, 'import rospy\n'), ((3903, 3959), 'rospy.logdebug', 'rospy.logdebug', (['"""Obstacle detected while moving forward"""'], {}), "('Obstacle detected while moving forward')\n", (3917, 3959), False, 'import rospy\n'), ((4169, 4232), 'rospy.logdebug', 'rospy.logdebug', (['"""Forward is clear, proceeding to drive forward"""'], {}), "('Forward is clear, proceeding to drive forward')\n", (4183, 4232), False, 'import rospy\n'), ((4413, 4453), 'rospy.logdebug', 'rospy.logdebug', (['"""Planning next movement"""'], {}), "('Planning next movement')\n", (4427, 4453), False, 'import rospy\n'), ((4619, 4649), 'rospy.logdebug', 'rospy.logdebug', (['"""Turning base"""'], {}), "('Turning base')\n", (4633, 4649), False, 'import rospy\n'), ((5867, 5913), 'rospy.logdebug', 'rospy.logdebug', (['"""(Planner) Forward is blocked"""'], {}), "('(Planner) Forward is blocked')\n", (5881, 5913), False, 'import rospy\n'), ((5952, 6011), 'b2_logic.odometry_helpers.normalize_theta', 'normalize_theta', (['(self._current_heading - self._turn_radians)'], {}), '(self._current_heading - self._turn_radians)\n', (5967, 6011), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((1688, 1721), 'b2_logic.odometry_helpers.heading_from_odometry', 'heading_from_odometry', (['self._odom'], {}), '(self._odom)\n', (1709, 1721), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((6639, 6688), 'rospy.logdebug', 'rospy.logdebug', (['"""(Planner) Right side is blocked"""'], {}), "('(Planner) Right side is blocked')\n", (6653, 6688), False, 'import rospy\n'), ((6726, 6789), 'b2_logic.odometry_helpers.normalize_theta', 'normalize_theta', (['(self._current_heading + self._turn_radians * 2)'], {}), '(self._current_heading + self._turn_radians * 2)\n', (6741, 6789), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((6175, 6207), 'math.degrees', 'math.degrees', (['self._heading_goal'], {}), '(self._heading_goal)\n', (6187, 6207), False, 'import math\n'), ((7276, 7319), 'rospy.logdebug', 'rospy.logdebug', (['"""(Planner) left is blocked"""'], {}), "('(Planner) left is blocked')\n", (7290, 7319), False, 'import rospy\n'), ((7357, 7416), 'b2_logic.odometry_helpers.normalize_theta', 'normalize_theta', (['(self._current_heading + self._turn_radians)'], {}), '(self._current_heading + self._turn_radians)\n', (7372, 7416), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((7927, 7992), 'rospy.logdebug', 'rospy.logdebug', (['"""(Planner) Turn to rear complete, moving forward"""'], {}), "('(Planner) Turn to rear complete, moving forward')\n", (7941, 7992), False, 'import rospy\n'), ((8190, 8211), 'rospy.logerr', 'rospy.logerr', (['message'], {}), '(message)\n', (8202, 8211), False, 'import rospy\n'), ((6930, 6962), 'math.degrees', 'math.degrees', (['self._heading_goal'], {}), '(self._heading_goal)\n', (6942, 6962), False, 'import math\n'), ((1957, 1989), 'math.degrees', 'math.degrees', (['self._heading_goal'], {}), '(self._heading_goal)\n', (1969, 1989), False, 'import math\n'), ((7559, 7591), 'math.degrees', 'math.degrees', (['self._heading_goal'], {}), '(self._heading_goal)\n', (7571, 7591), False, 'import math\n'), ((1878, 1916), 'b2_logic.odometry_helpers.normalize_theta', 'normalize_theta', (['self._current_heading'], {}), '(self._current_heading)\n', (1893, 1916), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n')]
|
# for Coverage
from mock import patch, MagicMock
class TestAll:
def test_channels(self):
from pyEX import DeepChannels
DeepChannels.options()
def test_tops(self):
from pyEX import topsWS
with patch('pyEX.marketdata.ws._stream'):
topsWS()
topsWS('test')
topsWS(['test'])
def test_last(self):
from pyEX import lastWS
with patch('pyEX.marketdata.ws._stream'):
lastWS()
lastWS('test')
def test_deep(self):
from pyEX import deepWS, PyEXception
from pyEX.marketdata.ws import DeepChannels
with patch('pyEX.marketdata.ws._stream'):
deepWS()
deepWS('test')
deepWS('test', 'ssr')
deepWS('test', DeepChannels.SSR)
try:
deepWS('test', 'test')
assert False
except PyEXception:
pass
try:
deepWS('test', ['test'])
assert False
except PyEXception:
pass
try:
deepWS('test', [DeepChannels.SSR, 'test'])
assert False
except PyEXception:
pass
def test_book(self):
from pyEX import bookWS
with patch('pyEX.marketdata.ws._stream'):
bookWS()
bookWS('test')
def test_trades(self):
from pyEX import tradesWS
with patch('pyEX.marketdata.ws._stream'):
tradesWS()
tradesWS('test')
def test_systemEvent(self):
from pyEX import systemEventWS
with patch('pyEX.marketdata.ws._stream'):
systemEventWS()
def test_tradingStatus(self):
from pyEX import tradingStatusWS
with patch('pyEX.marketdata.ws._stream'):
tradingStatusWS()
tradingStatusWS('test')
def test_opHaltStatus(self):
from pyEX import opHaltStatusWS
with patch('pyEX.marketdata.ws._stream'):
opHaltStatusWS()
opHaltStatusWS('test')
def test_ssrStatus(self):
from pyEX import ssrStatusWS
with patch('pyEX.marketdata.ws._stream'):
ssrStatusWS()
ssrStatusWS('test')
def test_securityEvent(self):
from pyEX import securityEventWS
with patch('pyEX.marketdata.ws._stream'):
securityEventWS()
securityEventWS('test')
def test_tradeBreak(self):
from pyEX import tradeBreakWS
with patch('pyEX.marketdata.ws._stream'):
tradeBreakWS()
tradeBreakWS('test')
def test_auction(self):
from pyEX import auctionWS
with patch('pyEX.marketdata.ws._stream'):
auctionWS()
auctionWS('test')
def test_officialPrice(self):
from pyEX import officialPriceWS
with patch('pyEX.marketdata.ws._stream'):
officialPriceWS()
officialPriceWS('test')
|
[
"pyEX.marketdata.ws.DeepChannels.options",
"pyEX.tradingStatusWS",
"pyEX.tradeBreakWS",
"pyEX.tradesWS",
"pyEX.opHaltStatusWS",
"pyEX.systemEventWS",
"mock.patch",
"pyEX.lastWS",
"pyEX.topsWS",
"pyEX.bookWS",
"pyEX.officialPriceWS",
"pyEX.ssrStatusWS",
"pyEX.deepWS",
"pyEX.securityEventWS",
"pyEX.auctionWS"
] |
[((141, 163), 'pyEX.marketdata.ws.DeepChannels.options', 'DeepChannels.options', ([], {}), '()\n', (161, 163), False, 'from pyEX.marketdata.ws import DeepChannels\n'), ((235, 270), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (240, 270), False, 'from mock import patch, MagicMock\n'), ((284, 292), 'pyEX.topsWS', 'topsWS', ([], {}), '()\n', (290, 292), False, 'from pyEX import topsWS\n'), ((305, 319), 'pyEX.topsWS', 'topsWS', (['"""test"""'], {}), "('test')\n", (311, 319), False, 'from pyEX import topsWS\n'), ((332, 348), 'pyEX.topsWS', 'topsWS', (["['test']"], {}), "(['test'])\n", (338, 348), False, 'from pyEX import topsWS\n'), ((420, 455), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (425, 455), False, 'from mock import patch, MagicMock\n'), ((469, 477), 'pyEX.lastWS', 'lastWS', ([], {}), '()\n', (475, 477), False, 'from pyEX import lastWS\n'), ((490, 504), 'pyEX.lastWS', 'lastWS', (['"""test"""'], {}), "('test')\n", (496, 504), False, 'from pyEX import lastWS\n'), ((641, 676), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (646, 676), False, 'from mock import patch, MagicMock\n'), ((690, 698), 'pyEX.deepWS', 'deepWS', ([], {}), '()\n', (696, 698), False, 'from pyEX import deepWS, PyEXception\n'), ((711, 725), 'pyEX.deepWS', 'deepWS', (['"""test"""'], {}), "('test')\n", (717, 725), False, 'from pyEX import deepWS, PyEXception\n'), ((738, 759), 'pyEX.deepWS', 'deepWS', (['"""test"""', '"""ssr"""'], {}), "('test', 'ssr')\n", (744, 759), False, 'from pyEX import deepWS, PyEXception\n'), ((772, 804), 'pyEX.deepWS', 'deepWS', (['"""test"""', 'DeepChannels.SSR'], {}), "('test', DeepChannels.SSR)\n", (778, 804), False, 'from pyEX import deepWS, PyEXception\n'), ((1312, 1347), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (1317, 1347), False, 'from mock import patch, MagicMock\n'), ((1361, 1369), 'pyEX.bookWS', 'bookWS', ([], {}), '()\n', (1367, 1369), False, 'from pyEX import bookWS\n'), ((1382, 1396), 'pyEX.bookWS', 'bookWS', (['"""test"""'], {}), "('test')\n", (1388, 1396), False, 'from pyEX import bookWS\n'), ((1472, 1507), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (1477, 1507), False, 'from mock import patch, MagicMock\n'), ((1521, 1531), 'pyEX.tradesWS', 'tradesWS', ([], {}), '()\n', (1529, 1531), False, 'from pyEX import tradesWS\n'), ((1544, 1560), 'pyEX.tradesWS', 'tradesWS', (['"""test"""'], {}), "('test')\n", (1552, 1560), False, 'from pyEX import tradesWS\n'), ((1646, 1681), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (1651, 1681), False, 'from mock import patch, MagicMock\n'), ((1695, 1710), 'pyEX.systemEventWS', 'systemEventWS', ([], {}), '()\n', (1708, 1710), False, 'from pyEX import systemEventWS\n'), ((1800, 1835), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (1805, 1835), False, 'from mock import patch, MagicMock\n'), ((1849, 1866), 'pyEX.tradingStatusWS', 'tradingStatusWS', ([], {}), '()\n', (1864, 1866), False, 'from pyEX import tradingStatusWS\n'), ((1879, 1902), 'pyEX.tradingStatusWS', 'tradingStatusWS', (['"""test"""'], {}), "('test')\n", (1894, 1902), False, 'from pyEX import tradingStatusWS\n'), ((1990, 2025), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (1995, 2025), False, 'from mock import patch, MagicMock\n'), ((2039, 2055), 'pyEX.opHaltStatusWS', 'opHaltStatusWS', ([], {}), '()\n', (2053, 2055), False, 'from pyEX import opHaltStatusWS\n'), ((2068, 2090), 'pyEX.opHaltStatusWS', 'opHaltStatusWS', (['"""test"""'], {}), "('test')\n", (2082, 2090), False, 'from pyEX import opHaltStatusWS\n'), ((2172, 2207), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (2177, 2207), False, 'from mock import patch, MagicMock\n'), ((2221, 2234), 'pyEX.ssrStatusWS', 'ssrStatusWS', ([], {}), '()\n', (2232, 2234), False, 'from pyEX import ssrStatusWS\n'), ((2247, 2266), 'pyEX.ssrStatusWS', 'ssrStatusWS', (['"""test"""'], {}), "('test')\n", (2258, 2266), False, 'from pyEX import ssrStatusWS\n'), ((2356, 2391), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (2361, 2391), False, 'from mock import patch, MagicMock\n'), ((2405, 2422), 'pyEX.securityEventWS', 'securityEventWS', ([], {}), '()\n', (2420, 2422), False, 'from pyEX import securityEventWS\n'), ((2435, 2458), 'pyEX.securityEventWS', 'securityEventWS', (['"""test"""'], {}), "('test')\n", (2450, 2458), False, 'from pyEX import securityEventWS\n'), ((2542, 2577), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (2547, 2577), False, 'from mock import patch, MagicMock\n'), ((2591, 2605), 'pyEX.tradeBreakWS', 'tradeBreakWS', ([], {}), '()\n', (2603, 2605), False, 'from pyEX import tradeBreakWS\n'), ((2618, 2638), 'pyEX.tradeBreakWS', 'tradeBreakWS', (['"""test"""'], {}), "('test')\n", (2630, 2638), False, 'from pyEX import tradeBreakWS\n'), ((2716, 2751), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (2721, 2751), False, 'from mock import patch, MagicMock\n'), ((2765, 2776), 'pyEX.auctionWS', 'auctionWS', ([], {}), '()\n', (2774, 2776), False, 'from pyEX import auctionWS\n'), ((2789, 2806), 'pyEX.auctionWS', 'auctionWS', (['"""test"""'], {}), "('test')\n", (2798, 2806), False, 'from pyEX import auctionWS\n'), ((2896, 2931), 'mock.patch', 'patch', (['"""pyEX.marketdata.ws._stream"""'], {}), "('pyEX.marketdata.ws._stream')\n", (2901, 2931), False, 'from mock import patch, MagicMock\n'), ((2945, 2962), 'pyEX.officialPriceWS', 'officialPriceWS', ([], {}), '()\n', (2960, 2962), False, 'from pyEX import officialPriceWS\n'), ((2975, 2998), 'pyEX.officialPriceWS', 'officialPriceWS', (['"""test"""'], {}), "('test')\n", (2990, 2998), False, 'from pyEX import officialPriceWS\n'), ((838, 860), 'pyEX.deepWS', 'deepWS', (['"""test"""', '"""test"""'], {}), "('test', 'test')\n", (844, 860), False, 'from pyEX import deepWS, PyEXception\n'), ((976, 1000), 'pyEX.deepWS', 'deepWS', (['"""test"""', "['test']"], {}), "('test', ['test'])\n", (982, 1000), False, 'from pyEX import deepWS, PyEXception\n'), ((1116, 1158), 'pyEX.deepWS', 'deepWS', (['"""test"""', "[DeepChannels.SSR, 'test']"], {}), "('test', [DeepChannels.SSR, 'test'])\n", (1122, 1158), False, 'from pyEX import deepWS, PyEXception\n')]
|
from solution import Item, Cart
def test_empty_cart():
cart = Cart()
cart.cart_list == []
assert f'{cart:short}' == ''
assert f'{cart:long}' == ''
def test_cart_1_item():
cart = Cart()
assert len(cart.cart_list) == 0
item = Item(2, 'grain', 'rice', 1)
cart.add(item)
assert len(cart.cart_list) == 1
assert cart.cart_list[0] == item
assert f'{cart:short}' == 'rice'
assert f'{cart:long}' == ' 2 grain rice @ $1.00...$2.00\n'
def test_cart_2_item2():
cart = Cart()
rice = Item(2, 'grain', 'rice', 1)
bread = Item(1, 'loaf', 'bread', 1)
cart.add(rice)
cart.add(bread)
assert len(cart.cart_list) == 2
assert cart.cart_list[0] == rice
assert cart.cart_list[1] == bread
assert f'{cart:short}' == 'rice, bread'
assert f'{cart:long}' == ' 2 grain rice @ $1.00...$2.00\n 1 loaf bread @ $1.00...$1.00\n'
|
[
"solution.Cart",
"solution.Item"
] |
[((67, 73), 'solution.Cart', 'Cart', ([], {}), '()\n', (71, 73), False, 'from solution import Item, Cart\n'), ((200, 206), 'solution.Cart', 'Cart', ([], {}), '()\n', (204, 206), False, 'from solution import Item, Cart\n'), ((255, 282), 'solution.Item', 'Item', (['(2)', '"""grain"""', '"""rice"""', '(1)'], {}), "(2, 'grain', 'rice', 1)\n", (259, 282), False, 'from solution import Item, Cart\n'), ((520, 526), 'solution.Cart', 'Cart', ([], {}), '()\n', (524, 526), False, 'from solution import Item, Cart\n'), ((538, 565), 'solution.Item', 'Item', (['(2)', '"""grain"""', '"""rice"""', '(1)'], {}), "(2, 'grain', 'rice', 1)\n", (542, 565), False, 'from solution import Item, Cart\n'), ((578, 605), 'solution.Item', 'Item', (['(1)', '"""loaf"""', '"""bread"""', '(1)'], {}), "(1, 'loaf', 'bread', 1)\n", (582, 605), False, 'from solution import Item, Cart\n')]
|
from django.contrib import admin
from .models import Supplier
# Register your models here.
admin.site.register(Supplier)
|
[
"django.contrib.admin.site.register"
] |
[((94, 123), 'django.contrib.admin.site.register', 'admin.site.register', (['Supplier'], {}), '(Supplier)\n', (113, 123), False, 'from django.contrib import admin\n')]
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 <NAME> (The Compiler) <<EMAIL>>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Our own fork of shlex.split with some added and removed features."""
import re
from qutebrowser.utils import log, utils
class ShellLexer:
"""A lexical analyzer class for simple shell-like syntaxes.
Based on Python's shlex, but cleaned up, removed some features, and added
some features useful for qutebrowser.
Attributes:
FIXME
"""
def __init__(self, s):
self.string = s
self.whitespace = ' \t\r'
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.keep = False
self.quoted = False
self.escapedstate = ' '
self.token = ''
self.state = ' '
def reset(self):
"""Reset the state machine state to the defaults."""
self.quoted = False
self.escapedstate = ' '
self.token = ''
self.state = ' '
def __iter__(self): # noqa: C901 pragma: no mccabe
"""Read a raw token from the input stream."""
self.reset()
for nextchar in self.string:
if self.state == ' ':
if self.keep:
self.token += nextchar
if nextchar in self.whitespace:
if self.token or self.quoted:
yield self.token
self.reset()
elif nextchar in self.escape:
self.escapedstate = 'a'
self.state = nextchar
elif nextchar in self.quotes:
self.state = nextchar
else:
self.token = nextchar
self.state = 'a'
elif self.state in self.quotes:
self.quoted = True
if nextchar == self.state:
if self.keep:
self.token += nextchar
self.state = 'a'
elif (nextchar in self.escape and
self.state in self.escapedquotes):
if self.keep:
self.token += nextchar
self.escapedstate = self.state
self.state = nextchar
else:
self.token += nextchar
elif self.state in self.escape:
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if (self.escapedstate in self.quotes and
nextchar != self.state and
nextchar != self.escapedstate and not self.keep):
self.token += self.state
self.token += nextchar
self.state = self.escapedstate
elif self.state == 'a':
if nextchar in self.whitespace:
self.state = ' '
assert self.token or self.quoted
yield self.token
self.reset()
if self.keep:
yield nextchar
elif nextchar in self.quotes:
if self.keep:
self.token += nextchar
self.state = nextchar
elif nextchar in self.escape:
if self.keep:
self.token += nextchar
self.escapedstate = 'a'
self.state = nextchar
else:
self.token += nextchar
else:
raise utils.Unreachable(
"Invalid state {!r}!".format(self.state))
if self.state in self.escape and not self.keep:
self.token += self.state
if self.token or self.quoted:
yield self.token
def split(s, keep=False):
"""Split a string via ShellLexer.
Args:
keep: Whether to keep special chars in the split output.
"""
lexer = ShellLexer(s)
lexer.keep = keep
tokens = list(lexer)
if not tokens:
return []
out = []
spaces = ""
log.shlexer.vdebug( # type: ignore[attr-defined]
"{!r} -> {!r}".format(s, tokens))
for t in tokens:
if t.isspace():
spaces += t
else:
out.append(spaces + t)
spaces = ""
if spaces:
out.append(spaces)
return out
def _combine_ws(parts, whitespace):
"""Combine whitespace in a list with the element following it.
Args:
parts: A list of strings.
whitespace: A string containing what's considered whitespace.
Return:
The modified list.
"""
out = []
ws = ''
for part in parts:
if not part:
continue
elif part in whitespace:
ws += part
else:
out.append(ws + part)
ws = ''
if ws:
out.append(ws)
return out
def simple_split(s, keep=False, maxsplit=None):
"""Split a string on whitespace, optionally keeping the whitespace.
Args:
s: The string to split.
keep: Whether to keep whitespace.
maxsplit: The maximum count of splits.
Return:
A list of split strings.
"""
whitespace = '\n\t '
if maxsplit == 0:
# re.split with maxsplit=0 splits everything, while str.split splits
# nothing (which is the behavior we want).
if keep:
return [s]
else:
return [s.strip(whitespace)]
elif maxsplit is None:
maxsplit = 0
if keep:
pattern = '([' + whitespace + '])'
parts = re.split(pattern, s, maxsplit)
return _combine_ws(parts, whitespace)
else:
pattern = '[' + whitespace + ']'
parts = re.split(pattern, s, maxsplit)
parts[-1] = parts[-1].rstrip()
return [p for p in parts if p]
|
[
"re.split"
] |
[((6357, 6387), 're.split', 're.split', (['pattern', 's', 'maxsplit'], {}), '(pattern, s, maxsplit)\n', (6365, 6387), False, 'import re\n'), ((6501, 6531), 're.split', 're.split', (['pattern', 's', 'maxsplit'], {}), '(pattern, s, maxsplit)\n', (6509, 6531), False, 'import re\n')]
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorlayer.layers.core import Layer
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
__all__ = [
'Stack',
'UnStack',
]
class Stack(Layer):
"""
The :class:`Stack` class is a layer for stacking a list of rank-R tensors into one rank-(R+1) tensor, see `tf.stack() <https://www.tensorflow.org/api_docs/python/tf/stack>`__.
Parameters
----------
axis : int
Dimension along which to concatenate.
name : str
A unique layer name.
Examples
---------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, shape=[None, 30])
>>> net = tl.layers.Input(x, name='input')
>>> net1 = tl.layers.Dense(net, 10, name='dense1')
>>> net2 = tl.layers.Dense(net, 10, name='dense2')
>>> net3 = tl.layers.Dense(net, 10, name='dense3')
>>> net = tl.layers.Stack([net1, net2, net3], axis=1, name='stack')
(?, 3, 10)
"""
def __init__(
self,
axis=1,
name=None, #'stack',
):
# super(Stack, self).__init__(prev_layer=layers, name=name)
super().__init__(name)
self.axis = axis
logging.info("Stack %s: axis: %d" % (self.name, self.axis))
def build(self, inputs):
pass
def forward(self, inputs):
outputs = tf.stack(inputs, axis=self.axis, name=self.name)
return outputs
class UnStack(Layer):
"""
The :class:`UnStack` class is a layer for unstacking the given dimension of a rank-R tensor into rank-(R-1) tensors., see `tf.unstack() <https://www.tensorflow.org/api_docs/python/tf/unstack>`__.
Parameters
----------
num : int or None
The length of the dimension axis. Automatically inferred if None (the default).
axis : int
Dimension along which axis to concatenate.
name : str
A unique layer name.
Returns
-------
list of :class:`Layer`
The list of layer objects unstacked from the input.
"""
def __init__(self, num=None, axis=0, name=None):#'unstack'):
# super(UnStack, self).__init__(prev_layer=prev_layer, name=name)
super().__init__(name)
self.num = num
self.axis = axis
logging.info("UnStack %s: num: %s axis: %d" % (self.name, self.num, self.axis))
def build(self, inputs):
pass
def forward(self, inputs):
outputs = tf.unstack(inputs, num=self.num, axis=self.axis, name=self.name)
return outputs
|
[
"tensorflow.stack",
"tensorlayer.logging.info",
"tensorflow.unstack"
] |
[((1273, 1332), 'tensorlayer.logging.info', 'logging.info', (["('Stack %s: axis: %d' % (self.name, self.axis))"], {}), "('Stack %s: axis: %d' % (self.name, self.axis))\n", (1285, 1332), False, 'from tensorlayer import logging\n'), ((1426, 1474), 'tensorflow.stack', 'tf.stack', (['inputs'], {'axis': 'self.axis', 'name': 'self.name'}), '(inputs, axis=self.axis, name=self.name)\n', (1434, 1474), True, 'import tensorflow as tf\n'), ((2329, 2408), 'tensorlayer.logging.info', 'logging.info', (["('UnStack %s: num: %s axis: %d' % (self.name, self.num, self.axis))"], {}), "('UnStack %s: num: %s axis: %d' % (self.name, self.num, self.axis))\n", (2341, 2408), False, 'from tensorlayer import logging\n'), ((2502, 2566), 'tensorflow.unstack', 'tf.unstack', (['inputs'], {'num': 'self.num', 'axis': 'self.axis', 'name': 'self.name'}), '(inputs, num=self.num, axis=self.axis, name=self.name)\n', (2512, 2566), True, 'import tensorflow as tf\n')]
|
# Copyright 2021 AI Redefined Inc. <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import queue
import torch
from cogment_verse_torch_agents.muzero.schedule import LinearScheduleWithWarmup
from cogment_verse_torch_agents.muzero.utils import MuZeroWorker, flush_queue
def get_from_queue(q, device): # pylint: disable=invalid-name
batch = q.get(timeout=1.0)
for item in batch:
item.to(device)
return batch
class TrainWorker(MuZeroWorker):
def __init__(self, agent, config, manager):
super().__init__(config, manager)
self.agent = agent
# limit to small size so that training and sample generation don't get out of sync
max_prefetch_batch = 128
self.batch_queue = manager.Queue(max_prefetch_batch)
self.results_queue = manager.Queue(max_prefetch_batch)
self.steps_per_update = config.training.model_publication_interval
async def main(self):
# original agent sent from another process, we want to work with a copy
agent = copy.deepcopy(self.agent)
agent.set_device(self.config.train_device)
step = 0
lr_schedule = LinearScheduleWithWarmup(
self.config.training.optimizer.learning_rate,
self.config.training.optimizer.min_learning_rate,
self.config.training.optimizer.lr_decay_steps,
self.config.training.optimizer.lr_warmup_steps,
)
epsilon_schedule = LinearScheduleWithWarmup(
self.config.mcts.exploration_epsilon,
self.config.mcts.epsilon_min,
self.config.mcts.epsilon_decay_steps,
0,
)
temperature_schedule = LinearScheduleWithWarmup(
self.config.mcts.temperature,
self.config.mcts.min_temperature,
self.config.mcts.temperature_decay_steps,
0,
)
while not self.done.value:
try:
batch = get_from_queue(self.batch_queue, self.config.train_device)
except queue.Empty:
continue
lr = lr_schedule.update() # pylint: disable=invalid-name
epsilon = epsilon_schedule.update()
temperature = temperature_schedule.update()
agent.params.training.optimizer.learning_rate = lr
agent.params.mcts.exploration_epsilon = epsilon
agent.params.mcts.temperature = temperature
info = agent.learn(batch)
del batch
info = dict(
lr=lr,
epsilon=epsilon,
temperature=temperature,
**info,
)
for key, val in info.items():
if isinstance(val, torch.Tensor):
info[key] = val.detach().cpu().numpy().item()
step += 1
if step % self.steps_per_update == 0:
self.results_queue.put((info, agent.serialize_to_buffer()))
else:
self.results_queue.put((info, None))
def cleanup(self):
flush_queue(self.results_queue)
|
[
"copy.deepcopy",
"cogment_verse_torch_agents.muzero.utils.flush_queue",
"cogment_verse_torch_agents.muzero.schedule.LinearScheduleWithWarmup"
] |
[((1545, 1570), 'copy.deepcopy', 'copy.deepcopy', (['self.agent'], {}), '(self.agent)\n', (1558, 1570), False, 'import copy\n'), ((1662, 1887), 'cogment_verse_torch_agents.muzero.schedule.LinearScheduleWithWarmup', 'LinearScheduleWithWarmup', (['self.config.training.optimizer.learning_rate', 'self.config.training.optimizer.min_learning_rate', 'self.config.training.optimizer.lr_decay_steps', 'self.config.training.optimizer.lr_warmup_steps'], {}), '(self.config.training.optimizer.learning_rate, self\n .config.training.optimizer.min_learning_rate, self.config.training.\n optimizer.lr_decay_steps, self.config.training.optimizer.lr_warmup_steps)\n', (1686, 1887), False, 'from cogment_verse_torch_agents.muzero.schedule import LinearScheduleWithWarmup\n'), ((1965, 2103), 'cogment_verse_torch_agents.muzero.schedule.LinearScheduleWithWarmup', 'LinearScheduleWithWarmup', (['self.config.mcts.exploration_epsilon', 'self.config.mcts.epsilon_min', 'self.config.mcts.epsilon_decay_steps', '(0)'], {}), '(self.config.mcts.exploration_epsilon, self.config.\n mcts.epsilon_min, self.config.mcts.epsilon_decay_steps, 0)\n', (1989, 2103), False, 'from cogment_verse_torch_agents.muzero.schedule import LinearScheduleWithWarmup\n'), ((2190, 2328), 'cogment_verse_torch_agents.muzero.schedule.LinearScheduleWithWarmup', 'LinearScheduleWithWarmup', (['self.config.mcts.temperature', 'self.config.mcts.min_temperature', 'self.config.mcts.temperature_decay_steps', '(0)'], {}), '(self.config.mcts.temperature, self.config.mcts.\n min_temperature, self.config.mcts.temperature_decay_steps, 0)\n', (2214, 2328), False, 'from cogment_verse_torch_agents.muzero.schedule import LinearScheduleWithWarmup\n'), ((3563, 3594), 'cogment_verse_torch_agents.muzero.utils.flush_queue', 'flush_queue', (['self.results_queue'], {}), '(self.results_queue)\n', (3574, 3594), False, 'from cogment_verse_torch_agents.muzero.utils import MuZeroWorker, flush_queue\n')]
|
from sslcommerz_python_api import SSLCSession
from decimal import Decimal
def response():
mypayment = SSLCSession(sslc_is_sandbox=True, sslc_store_id='your_sslc_store_id', sslc_store_pass='<PASSWORD>c_store_<PASSWORD>')
mypayment.set_urls(success_url='example.com/success', fail_url='example.com/failed', cancel_url='example.com/cancel', ipn_url='example.com/payment_notification')
mypayment.set_product_integration(total_amount=Decimal('20.20'), currency='BDT', product_category='clothing', product_name='demo-product', num_of_item=2, shipping_method='YES', product_profile='None')
mypayment.set_customer_info(name='<NAME>', email='<EMAIL>', address1='demo address', address2='demo address 2', city='Dhaka', postcode='1207', country='Bangladesh', phone='01711111111')
mypayment.set_shipping_info(shipping_to='demo customer', address='demo address', city='Dhaka', postcode='1209', country='Bangladesh')
# If you want to post some additional values
mypayment.set_additional_values(value_a='<EMAIL>', value_b='portalcustomerid', value_c='1234', value_d='uuid')
return mypayment.init_payment()
def test_response():
assert test_response is not None
|
[
"sslcommerz_python_api.SSLCSession",
"decimal.Decimal"
] |
[((105, 226), 'sslcommerz_python_api.SSLCSession', 'SSLCSession', ([], {'sslc_is_sandbox': '(True)', 'sslc_store_id': '"""your_sslc_store_id"""', 'sslc_store_pass': '"""<PASSWORD>c_store_<PASSWORD>"""'}), "(sslc_is_sandbox=True, sslc_store_id='your_sslc_store_id',\n sslc_store_pass='<PASSWORD>c_store_<PASSWORD>')\n", (116, 226), False, 'from sslcommerz_python_api import SSLCSession\n'), ((436, 452), 'decimal.Decimal', 'Decimal', (['"""20.20"""'], {}), "('20.20')\n", (443, 452), False, 'from decimal import Decimal\n')]
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.gridspec as gridspec
import numpy as np
def isSampleFree(sample, obs, dimW):
for o in range(0, obs.shape[0] // (2 * dimW)):
isFree = 0
for d in range(0, sample.shape[0]):
if (sample[d] < obs[2 * dimW * o + d] or sample[d] > obs[2 * dimW * o + d + dimW]):
isFree = 1
break
if isFree == 0:
return 0
return 1
def gap2obs(condition):
dw = 0.1
dimW = 3
gap1 = condition[0:3]
gap2 = condition[3:6]
gap3 = condition[6:9]
obs1 = [0, gap1[1] - dw, -0.5, gap1[0], gap1[1], 1.5]
obs2 = [gap2[0] - dw, 0, -0.5, gap2[0], gap2[1], 1.5]
obs3 = [gap2[0] - dw, gap2[1] + dw, -0.5, gap2[0], 1, 1.5]
obs4 = [gap1[0] + dw, gap1[1] - dw, -0.5, gap3[0], gap1[1], 1.5]
obs5 = [gap3[0] + dw, gap1[1] - dw, -0.5, 1, gap1[1], 1.5]
obsBounds = [-0.1, -0.1, -0.5, 0, 1.1, 1.5,
-0.1, -0.1, -0.5, 1.1, 0, 1.5,
-0.1, 1, -0.5, 1.1, 1.1, 1.5,
1, -0.1, -0.5, 1.1, 1.1, 1.5, ]
obs = np.concatenate((obs1, obs2, obs3, obs4, obs5, obsBounds), axis=0)
return obs, dimW
def getOccGrid(gridSize):
gridPointsRange = np.linspace(0, 1, num=gridSize)
occGridSamples = np.zeros([gridSize * gridSize, 2])
idx = 0
for i in gridPointsRange:
for j in gridPointsRange:
occGridSamples[idx, 0] = i
occGridSamples[idx, 1] = j
idx += 1
return occGridSamples
def gap2occ(conditions, gridSize):
obs, dimW = gap2obs(conditions)
occGridSamples = getOccGrid(gridSize)
occGrid = np.zeros(gridSize * gridSize)
for i in range(0, gridSize * gridSize):
occGrid[i] = isSampleFree(occGridSamples[i, :], obs, dimW)
return occGrid
def plotCondition(condition):
fig1 = plt.figure(figsize=(10, 6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
obs, dimW = gap2obs(condition)
for i in range(0, obs.shape[0] // (2 * dimW)): # plot obstacle patches
ax1.add_patch(
patches.Rectangle(
(obs[i * 2 * dimW], obs[i * 2 * dimW + 1]), # (x,y)
obs[i * 2 * dimW + dimW] - obs[i * 2 * dimW], # width
obs[i * 2 * dimW + dimW + 1] - obs[i * 2 * dimW + 1], # height
alpha=0.6
))
gridSize = 11
occGrid = gap2occ(condition, gridSize)
occGridSamples = getOccGrid(gridSize)
for i in range(0, gridSize * gridSize): # plot occupancy grid
if occGrid[i] == 0:
plt.scatter(occGridSamples[i, 0], occGridSamples[i, 1], color="red", s=70, alpha=0.8)
else:
plt.scatter(occGridSamples[i, 0], occGridSamples[i, 1], color="green", s=70, alpha=0.8)
init = condition[9:15]
goal = condition[15:21]
plt.scatter(init[0], init[1], color="red", s=250, edgecolors='black') # init
plt.scatter(goal[0], goal[1], color="blue", s=250, edgecolors='black') # goal
plt.show()
def plotSample(s, condition):
fig1 = plt.figure(figsize=(10, 6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
plt.scatter(s[:, 0], s[:, 1], color="green", s=70, alpha=0.1)
obs, dimW = gap2obs(condition)
for i in range(0, obs.shape[0] // (2 * dimW)): # plot obstacle patches
ax1.add_patch(
patches.Rectangle(
(obs[i * 2 * dimW], obs[i * 2 * dimW + 1]), # (x,y)
obs[i * 2 * dimW + dimW] - obs[i * 2 * dimW], # width
obs[i * 2 * dimW + dimW + 1] - obs[i * 2 * dimW + 1], # height
alpha=0.6
))
gridSize = 11
occGrid = gap2occ(condition, gridSize)
occGridSamples = getOccGrid(gridSize)
for i in range(0, gridSize * gridSize): # plot occupancy grid
if occGrid[i] == 0:
plt.scatter(occGridSamples[i, 0], occGridSamples[i, 1], color="red", s=70, alpha=0.8)
else:
plt.scatter(occGridSamples[i, 0], occGridSamples[i, 1], color="green", s=70, alpha=0.8)
init = condition[9:15]
goal = condition[15:21]
plt.scatter(init[0], init[1], color="red", s=250, edgecolors='black') # init
plt.scatter(goal[0], goal[1], color="blue", s=250, edgecolors='black') # goal
plt.show()
def plotSpeed(s, c):
plt.figure(figsize=(10, 6), dpi=80)
viz1 = 1
viz2 = 4
dim = 6
plt.scatter(s[:, viz1], s[:, viz2], color="green", s=70, alpha=0.1)
plt.scatter(c[viz1 + 9], c[viz2 + 9], color="red", s=250, edgecolors='black') # init
plt.scatter(c[viz1 + 9 + dim], c[viz2 + 9 + dim], color="blue", s=500, edgecolors='black') # goal
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.concatenate"
] |
[((1127, 1192), 'numpy.concatenate', 'np.concatenate', (['(obs1, obs2, obs3, obs4, obs5, obsBounds)'], {'axis': '(0)'}), '((obs1, obs2, obs3, obs4, obs5, obsBounds), axis=0)\n', (1141, 1192), True, 'import numpy as np\n'), ((1264, 1295), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'gridSize'}), '(0, 1, num=gridSize)\n', (1275, 1295), True, 'import numpy as np\n'), ((1317, 1351), 'numpy.zeros', 'np.zeros', (['[gridSize * gridSize, 2]'], {}), '([gridSize * gridSize, 2])\n', (1325, 1351), True, 'import numpy as np\n'), ((1683, 1712), 'numpy.zeros', 'np.zeros', (['(gridSize * gridSize)'], {}), '(gridSize * gridSize)\n', (1691, 1712), True, 'import numpy as np\n'), ((1886, 1921), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)', 'dpi': '(80)'}), '(figsize=(10, 6), dpi=80)\n', (1896, 1921), True, 'import matplotlib.pyplot as plt\n'), ((2866, 2935), 'matplotlib.pyplot.scatter', 'plt.scatter', (['init[0]', 'init[1]'], {'color': '"""red"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(init[0], init[1], color='red', s=250, edgecolors='black')\n", (2877, 2935), True, 'import matplotlib.pyplot as plt\n'), ((2948, 3018), 'matplotlib.pyplot.scatter', 'plt.scatter', (['goal[0]', 'goal[1]'], {'color': '"""blue"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(goal[0], goal[1], color='blue', s=250, edgecolors='black')\n", (2959, 3018), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3041), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3039, 3041), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)', 'dpi': '(80)'}), '(figsize=(10, 6), dpi=80)\n', (3095, 3120), True, 'import matplotlib.pyplot as plt\n'), ((3174, 3235), 'matplotlib.pyplot.scatter', 'plt.scatter', (['s[:, 0]', 's[:, 1]'], {'color': '"""green"""', 's': '(70)', 'alpha': '(0.1)'}), "(s[:, 0], s[:, 1], color='green', s=70, alpha=0.1)\n", (3185, 3235), True, 'import matplotlib.pyplot as plt\n'), ((4133, 4202), 'matplotlib.pyplot.scatter', 'plt.scatter', (['init[0]', 'init[1]'], {'color': '"""red"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(init[0], init[1], color='red', s=250, edgecolors='black')\n", (4144, 4202), True, 'import matplotlib.pyplot as plt\n'), ((4215, 4285), 'matplotlib.pyplot.scatter', 'plt.scatter', (['goal[0]', 'goal[1]'], {'color': '"""blue"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(goal[0], goal[1], color='blue', s=250, edgecolors='black')\n", (4226, 4285), True, 'import matplotlib.pyplot as plt\n'), ((4298, 4308), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4306, 4308), True, 'import matplotlib.pyplot as plt\n'), ((4336, 4371), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)', 'dpi': '(80)'}), '(figsize=(10, 6), dpi=80)\n', (4346, 4371), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4481), 'matplotlib.pyplot.scatter', 'plt.scatter', (['s[:, viz1]', 's[:, viz2]'], {'color': '"""green"""', 's': '(70)', 'alpha': '(0.1)'}), "(s[:, viz1], s[:, viz2], color='green', s=70, alpha=0.1)\n", (4425, 4481), True, 'import matplotlib.pyplot as plt\n'), ((4486, 4563), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c[viz1 + 9]', 'c[viz2 + 9]'], {'color': '"""red"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(c[viz1 + 9], c[viz2 + 9], color='red', s=250, edgecolors='black')\n", (4497, 4563), True, 'import matplotlib.pyplot as plt\n'), ((4576, 4670), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c[viz1 + 9 + dim]', 'c[viz2 + 9 + dim]'], {'color': '"""blue"""', 's': '(500)', 'edgecolors': '"""black"""'}), "(c[viz1 + 9 + dim], c[viz2 + 9 + dim], color='blue', s=500,\n edgecolors='black')\n", (4587, 4670), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4689), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4687, 4689), True, 'import matplotlib.pyplot as plt\n'), ((2116, 2296), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(obs[i * 2 * dimW], obs[i * 2 * dimW + 1])', '(obs[i * 2 * dimW + dimW] - obs[i * 2 * dimW])', '(obs[i * 2 * dimW + dimW + 1] - obs[i * 2 * dimW + 1])'], {'alpha': '(0.6)'}), '((obs[i * 2 * dimW], obs[i * 2 * dimW + 1]), obs[i * 2 *\n dimW + dimW] - obs[i * 2 * dimW], obs[i * 2 * dimW + dimW + 1] - obs[i *\n 2 * dimW + 1], alpha=0.6)\n', (2133, 2296), True, 'import matplotlib.patches as patches\n'), ((2607, 2696), 'matplotlib.pyplot.scatter', 'plt.scatter', (['occGridSamples[i, 0]', 'occGridSamples[i, 1]'], {'color': '"""red"""', 's': '(70)', 'alpha': '(0.8)'}), "(occGridSamples[i, 0], occGridSamples[i, 1], color='red', s=70,\n alpha=0.8)\n", (2618, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2810), 'matplotlib.pyplot.scatter', 'plt.scatter', (['occGridSamples[i, 0]', 'occGridSamples[i, 1]'], {'color': '"""green"""', 's': '(70)', 'alpha': '(0.8)'}), "(occGridSamples[i, 0], occGridSamples[i, 1], color='green', s=70,\n alpha=0.8)\n", (2730, 2810), True, 'import matplotlib.pyplot as plt\n'), ((3383, 3563), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(obs[i * 2 * dimW], obs[i * 2 * dimW + 1])', '(obs[i * 2 * dimW + dimW] - obs[i * 2 * dimW])', '(obs[i * 2 * dimW + dimW + 1] - obs[i * 2 * dimW + 1])'], {'alpha': '(0.6)'}), '((obs[i * 2 * dimW], obs[i * 2 * dimW + 1]), obs[i * 2 *\n dimW + dimW] - obs[i * 2 * dimW], obs[i * 2 * dimW + dimW + 1] - obs[i *\n 2 * dimW + 1], alpha=0.6)\n', (3400, 3563), True, 'import matplotlib.patches as patches\n'), ((3873, 3962), 'matplotlib.pyplot.scatter', 'plt.scatter', (['occGridSamples[i, 0]', 'occGridSamples[i, 1]'], {'color': '"""red"""', 's': '(70)', 'alpha': '(0.8)'}), "(occGridSamples[i, 0], occGridSamples[i, 1], color='red', s=70,\n alpha=0.8)\n", (3884, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3985, 4076), 'matplotlib.pyplot.scatter', 'plt.scatter', (['occGridSamples[i, 0]', 'occGridSamples[i, 1]'], {'color': '"""green"""', 's': '(70)', 'alpha': '(0.8)'}), "(occGridSamples[i, 0], occGridSamples[i, 1], color='green', s=70,\n alpha=0.8)\n", (3996, 4076), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/python
# coding: utf-8
import requests
import json
import smtplib
import datetime
import config
def send_mail(recipients, subject, body):
headers = [
"From: " + config.MAIL_SENDER,
"Subject: " + subject,
"To: " + (', '.join(recipients) if isinstance(recipients, list) else recipients),
"MIME-Version: 1.0",
"Content-Type: text/html"
]
headers = "\r\n".join(headers)
session = smtplib.SMTP(config.MAIL_SERVER, config.MAIL_PORT)
session.ehlo()
session.starttls()
session.login(config.MAIL_USER, config.MAIL_PASSWORD)
session.sendmail(config.MAIL_SENDER, recipients, headers + "\r\n\r\n" + body)
session.quit()
# https://developer.yahoo.com/weather/
def query_weather(woeid):
payload = {
'q': 'select * from weather.forecast where woeid ="{}" and u="c"'.format(woeid),
'format': 'json'
}
response = requests.get('https://query.yahooapis.com/v1/public/yql', params=payload)
return json.loads(response.text)
def setup_mail_data(weather_data):
recipients = config.MAIL_RECIPIENTS
if weather_data['query']['count'] == 0 or weather_data['query']['results'] is None:
raise ValueError("No results found")
item = weather_data['query']['results']['channel']['item']
item['description'] = item['description'].replace('<![CDATA[', '').replace(']]>', '')
subject = "Madrid weather forecast - {}".format(datetime.date.today())
body = "{} {}".format(item['title'], item['description'])
return recipients, subject, body
def main():
# Selecting place: select woeid from geo.places(1) where text="madrid"
madrid_woeid = '766273'
data = query_weather(woeid=madrid_woeid)
recipients, subject, body = setup_mail_data(data)
send_mail(recipients=recipients, subject=subject, body=body)
if __name__ == "__main__":
main()
|
[
"requests.get",
"datetime.date.today",
"json.loads",
"smtplib.SMTP"
] |
[((447, 497), 'smtplib.SMTP', 'smtplib.SMTP', (['config.MAIL_SERVER', 'config.MAIL_PORT'], {}), '(config.MAIL_SERVER, config.MAIL_PORT)\n', (459, 497), False, 'import smtplib\n'), ((918, 991), 'requests.get', 'requests.get', (['"""https://query.yahooapis.com/v1/public/yql"""'], {'params': 'payload'}), "('https://query.yahooapis.com/v1/public/yql', params=payload)\n", (930, 991), False, 'import requests\n'), ((1003, 1028), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1013, 1028), False, 'import json\n'), ((1445, 1466), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1464, 1466), False, 'import datetime\n')]
|
#!/usr/bin/env python3
"""Run precommit checks on the repository."""
import argparse
import os
import pathlib
import re
import subprocess
import sys
def main() -> int:
"""Execute the main routine."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--overwrite",
help="Overwrites the unformatted source files with the "
"well-formatted code in place. If not set, "
"an exception is raised if any of the files do not conform "
"to the style guide.",
action='store_true')
args = parser.parse_args()
overwrite = bool(args.overwrite)
repo_root = pathlib.Path(__file__).parent
# yapf: disable
source_files = (
sorted((repo_root / "temppathlib").glob("**/*.py")) +
sorted((repo_root / "tests").glob("**/*.py")))
# yapf: enable
if overwrite:
print('Removing trailing whitespace...')
for pth in source_files:
pth.write_text(re.sub(r'[ \t]+$', '', pth.read_text(), flags=re.MULTILINE))
print("YAPF'ing...")
yapf_targets = ["tests", "temppathlib", "setup.py", "precommit.py"]
if overwrite:
# yapf: disable
subprocess.check_call(
["yapf", "--in-place", "--style=style.yapf", "--recursive"] +
yapf_targets,
cwd=str(repo_root))
# yapf: enable
else:
# yapf: disable
subprocess.check_call(
["yapf", "--diff", "--style=style.yapf", "--recursive"] +
yapf_targets,
cwd=str(repo_root))
# yapf: enable
print("Mypy'ing...")
subprocess.check_call(["mypy", "--strict", "temppathlib", "tests"], cwd=str(repo_root))
print("Isort'ing...")
# yapf: disable
isort_files = map(str, source_files)
# yapf: enable
# yapf: disable
subprocess.check_call(
["isort", "--project", "temppathlib", '--line-width', '120'] +
([] if overwrite else ['--check-only']) +
[str(pth) for pth in source_files])
# yapf: enable
print("Pydocstyle'ing...")
subprocess.check_call(["pydocstyle", "temppathlib"], cwd=str(repo_root))
print("Pylint'ing...")
subprocess.check_call(["pylint", "--rcfile=pylint.rc", "tests", "temppathlib"], cwd=str(repo_root))
print("Testing...")
env = os.environ.copy()
env['ICONTRACT_SLOW'] = 'true'
# yapf: disable
subprocess.check_call(
["coverage", "run",
"--source", "temppathlib",
"-m", "unittest", "discover", "tests"],
cwd=str(repo_root),
env=env)
# yapf: enable
subprocess.check_call(["coverage", "report"])
print("Doctesting...")
doctest_files = ([repo_root / "README.rst"] + sorted((repo_root / "temppathlib").glob("**/*.py")))
for pth in doctest_files:
subprocess.check_call([sys.executable, "-m", "doctest", str(pth)])
print("Checking setup.py sdist ...")
subprocess.check_call([sys.executable, "setup.py", "sdist"], cwd=str(repo_root))
print("Checking with twine...")
subprocess.check_call(["twine", "check", "dist/*"], cwd=str(repo_root))
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"os.environ.copy",
"argparse.ArgumentParser",
"subprocess.check_call",
"pathlib.Path"
] |
[((219, 244), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (242, 244), False, 'import argparse\n'), ((2315, 2332), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (2330, 2332), False, 'import os\n'), ((2598, 2643), 'subprocess.check_call', 'subprocess.check_call', (["['coverage', 'report']"], {}), "(['coverage', 'report'])\n", (2619, 2643), False, 'import subprocess\n'), ((627, 649), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (639, 649), False, 'import pathlib\n')]
|
import os
import sys
one_level_up = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(1, one_level_up)
from app import config
from app.tests import test_utils
from relational.scripts import generator
from relational.scripts import pred_builder
from analysis import analysis
from analysis import connections
from analysis import evaluation
from analysis import interpretability
from analysis import label
from analysis import purity
from analysis import util
|
[
"os.path.dirname",
"sys.path.insert"
] |
[((99, 131), 'sys.path.insert', 'sys.path.insert', (['(1)', 'one_level_up'], {}), '(1, one_level_up)\n', (114, 131), False, 'import sys\n'), ((65, 90), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (80, 90), False, 'import os\n')]
|
# -*- coding:utf8 -*-
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
app = Flask(__name__)
from data_adapter import *
import psycopg2
from models.datasource import DataSource
@app.route('/config')
def config():
return render_template('react_test.html')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/table')
def table():
return render_template('table.html')
@app.route('/layout')
def layout():
return render_template('layout.html')
@app.route('/datasource_config')
def form():
return render_template('datasource_config.html')
from models.meta_data import MetaData
# 保有存配置信息
@app.route('/config_list')
def config_list():
datasources = MetaData().get_all_datasource()
return render_template('datasource_config_list.html', datasources=datasources)
@app.route('/json')
def json():
pass
@app.route('/semantic/menu')
def menu():
return render_template('semantic/menu.html')
@app.route('/datasource')
def datasource():
return render_template('datasource.html')
@app.route('/chart/config')
def chart_config():
datasources = MetaData().get_all_datasource()
print(datasources)
return render_template('chart/two-dimension.html', datasources=datasources)
@app.route('/monitor')
def monitor():
return render_template('monitor/index.html')
@app.route('/test_query', methods=['POST'])
def test_query():
query = request.form['query']
datasource = request.form['datasource']
conn = psycopg2.connect(host='192.168.232.11', port=5433, database='xiaoya_crm', user='trace', password='<PASSWORD>')
cursor = conn.cursor()
cursor.execute(query)
data = cursor.fetchall()
return jsonify({'result': query, 'from': 'server',
'columns': [{}],
'description': [row[0] for row in cursor.description],
'example_data': data
})
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
# python -m SimpleHTTPServer 8080
|
[
"flask.Flask",
"flask.jsonify",
"flask.render_template",
"models.meta_data.MetaData",
"psycopg2.connect"
] |
[((139, 154), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (144, 154), False, 'from flask import Flask\n'), ((288, 322), 'flask.render_template', 'render_template', (['"""react_test.html"""'], {}), "('react_test.html')\n", (303, 322), False, 'from flask import render_template\n'), ((365, 394), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (380, 394), False, 'from flask import render_template\n'), ((442, 471), 'flask.render_template', 'render_template', (['"""table.html"""'], {}), "('table.html')\n", (457, 471), False, 'from flask import render_template\n'), ((521, 551), 'flask.render_template', 'render_template', (['"""layout.html"""'], {}), "('layout.html')\n", (536, 551), False, 'from flask import render_template\n'), ((610, 651), 'flask.render_template', 'render_template', (['"""datasource_config.html"""'], {}), "('datasource_config.html')\n", (625, 651), False, 'from flask import render_template\n'), ((809, 880), 'flask.render_template', 'render_template', (['"""datasource_config_list.html"""'], {'datasources': 'datasources'}), "('datasource_config_list.html', datasources=datasources)\n", (824, 880), False, 'from flask import render_template\n'), ((978, 1015), 'flask.render_template', 'render_template', (['"""semantic/menu.html"""'], {}), "('semantic/menu.html')\n", (993, 1015), False, 'from flask import render_template\n'), ((1073, 1107), 'flask.render_template', 'render_template', (['"""datasource.html"""'], {}), "('datasource.html')\n", (1088, 1107), False, 'from flask import render_template\n'), ((1242, 1310), 'flask.render_template', 'render_template', (['"""chart/two-dimension.html"""'], {'datasources': 'datasources'}), "('chart/two-dimension.html', datasources=datasources)\n", (1257, 1310), False, 'from flask import render_template\n'), ((1362, 1399), 'flask.render_template', 'render_template', (['"""monitor/index.html"""'], {}), "('monitor/index.html')\n", (1377, 1399), False, 'from flask import render_template\n'), ((1553, 1667), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': '"""192.168.232.11"""', 'port': '(5433)', 'database': '"""xiaoya_crm"""', 'user': '"""trace"""', 'password': '"""<PASSWORD>"""'}), "(host='192.168.232.11', port=5433, database='xiaoya_crm',\n user='trace', password='<PASSWORD>')\n", (1569, 1667), False, 'import psycopg2\n'), ((1758, 1900), 'flask.jsonify', 'jsonify', (["{'result': query, 'from': 'server', 'columns': [{}], 'description': [row[0] for\n row in cursor.description], 'example_data': data}"], {}), "({'result': query, 'from': 'server', 'columns': [{}], 'description':\n [row[0] for row in cursor.description], 'example_data': data})\n", (1765, 1900), False, 'from flask import jsonify\n'), ((766, 776), 'models.meta_data.MetaData', 'MetaData', ([], {}), '()\n', (774, 776), False, 'from models.meta_data import MetaData\n'), ((1176, 1186), 'models.meta_data.MetaData', 'MetaData', ([], {}), '()\n', (1184, 1186), False, 'from models.meta_data import MetaData\n')]
|
import sys
import inspect
import functools
import re
from codetiming import Timer
sys.path.insert(0, 'D:\\projects\\aoc2020\\')
from helper import loadingUtils, pretty
DAY = 4
def get_path():
return "day{:02d}".format(DAY)
def string_to_dict(line: str):
out = {}
for field in line.split(" "):
k, v = field.split(":")
out[k] = v
return out
def complete_passport(passport: str):
required_fields = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
optional_fields = ["cid"]
valid = False
# str to dict also removes duplicate fields
pass_dict = string_to_dict(passport)
# print(passport)
# print(pass_dict)
number_of_required = functools.reduce(lambda x,y: x + (1 if y in pass_dict else 0), required_fields, 0)
# print(number_of_required)
if number_of_required == len(required_fields):
valid = True
else:
return False
# check optional fields
return valid
def is_year(in_str: str):
return bool(re.search("^\\d{4}$", in_str))
def validate_passport(passport: str):
"""
Allowed Values:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
"""
if not complete_passport(passport):
print("passport not complete")
return False
pass_dict = string_to_dict(passport)
# validate byr
byr_s = pass_dict["byr"]
if not is_year(byr_s):
print("byr is no valid year")
return False
byr = int(byr_s)
#print("byr = {}".format(byr))
if byr < 1920 or byr > 2002:
print("byr not in range")
return False
# validate iyr
iyr_s = pass_dict["iyr"]
if not is_year(iyr_s):
print("iyr is no valid year")
return False
iyr = int(iyr_s)
#print("iyr = {}".format(iyr))
if iyr < 2010 or iyr > 2020:
print("byr not in range")
return False
# validate eyr
eyr_s = pass_dict["eyr"]
if not is_year(eyr_s):
print("eyr is no valid year")
return False
eyr = int(eyr_s)
#print("eyr = {}".format(eyr))
if eyr < 2020 or eyr > 2030:
print("eyr not in range")
return False
# validate hgt
hgt_s = pass_dict["hgt"]
match = re.search("^(\d{2,3})(cm|in)$", hgt_s)
if not bool(match):
print("hgt no valid format")
return False
size = int(match.group(1))
unit = match.group(2)
#print("hgt = {} {}".format(size, unit))
if unit == "cm":
if size < 150 or size > 193:
print("Size not in Range")
return False
if unit == "in":
if size < 59 or size > 76:
print("Size not in Range")
return False
# validate hcl
hcl_s = pass_dict["hcl"]
#print("hcl = {}".format(hcl_s))
if not bool(re.search("^#[0-9a-f]{6}$",hcl_s )):
print("hcl no valid format")
return False
# validate ecl
ecl_s = pass_dict["ecl"]
#print("ecl = {}".format(ecl_s))
if ecl_s not in [ "amb", "blu", "brn", "gry", "grn", "hzl", "oth"] :
print("ecl no valid value")
return False
# validate pid
pid_s = pass_dict["pid"]
#print("pid = {}".format(pid_s))
if not bool(re.search("^[0-9]{9}$", pid_s)):
print("pid no valid format")
return False
return True
@Timer()
def run_part_1(in_file: str, debug: bool = False) -> int:
pretty.printHeader(DAY, 1, inspect.stack()[0].function, in_file)
result = 0
passports = loadingUtils.import_multiline(in_file)
if debug: pretty.print2DMap(passports)
count = 0
for passport in passports:
if complete_passport(passport):
count += 1
# code here
result = count
print("Result = {}".format(result))
return result
@Timer()
def run_part_2(in_file: str, debug: bool = False) -> int:
pretty.printHeader(DAY, 2, inspect.stack()[0].function, in_file)
result = 0
passports = loadingUtils.import_multiline(in_file)
if debug: pretty.print2DMap(passports)
count = 0
for passport in passports:
if validate_passport(passport):
count += 1
# code here
result = count
# code here
print("Result = {}".format(result))
return result
if __name__ == "__main__":
run_part_1(get_path() + "/test1", True)
run_part_1(get_path() + "/input1")
run_part_2(get_path() + "/test1", True)
run_part_2(get_path() + "/input1")
|
[
"inspect.stack",
"codetiming.Timer",
"helper.pretty.print2DMap",
"helper.loadingUtils.import_multiline",
"sys.path.insert",
"functools.reduce",
"re.search"
] |
[((82, 127), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""D:\\\\projects\\\\aoc2020\\\\"""'], {}), "(0, 'D:\\\\projects\\\\aoc2020\\\\')\n", (97, 127), False, 'import sys\n'), ((3893, 3900), 'codetiming.Timer', 'Timer', ([], {}), '()\n', (3898, 3900), False, 'from codetiming import Timer\n'), ((4345, 4352), 'codetiming.Timer', 'Timer', ([], {}), '()\n', (4350, 4352), False, 'from codetiming import Timer\n'), ((692, 779), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x + (1 if y in pass_dict else 0))', 'required_fields', '(0)'], {}), '(lambda x, y: x + (1 if y in pass_dict else 0),\n required_fields, 0)\n', (708, 779), False, 'import functools\n'), ((2801, 2840), 're.search', 're.search', (['"""^(\\\\d{2,3})(cm|in)$"""', 'hgt_s'], {}), "('^(\\\\d{2,3})(cm|in)$', hgt_s)\n", (2810, 2840), False, 'import re\n'), ((4059, 4097), 'helper.loadingUtils.import_multiline', 'loadingUtils.import_multiline', (['in_file'], {}), '(in_file)\n', (4088, 4097), False, 'from helper import loadingUtils, pretty\n'), ((4511, 4549), 'helper.loadingUtils.import_multiline', 'loadingUtils.import_multiline', (['in_file'], {}), '(in_file)\n', (4540, 4549), False, 'from helper import loadingUtils, pretty\n'), ((1001, 1030), 're.search', 're.search', (['"""^\\\\d{4}$"""', 'in_str'], {}), "('^\\\\d{4}$', in_str)\n", (1010, 1030), False, 'import re\n'), ((4112, 4140), 'helper.pretty.print2DMap', 'pretty.print2DMap', (['passports'], {}), '(passports)\n', (4129, 4140), False, 'from helper import loadingUtils, pretty\n'), ((4564, 4592), 'helper.pretty.print2DMap', 'pretty.print2DMap', (['passports'], {}), '(passports)\n', (4581, 4592), False, 'from helper import loadingUtils, pretty\n'), ((3370, 3404), 're.search', 're.search', (['"""^#[0-9a-f]{6}$"""', 'hcl_s'], {}), "('^#[0-9a-f]{6}$', hcl_s)\n", (3379, 3404), False, 'import re\n'), ((3783, 3813), 're.search', 're.search', (['"""^[0-9]{9}$"""', 'pid_s'], {}), "('^[0-9]{9}$', pid_s)\n", (3792, 3813), False, 'import re\n'), ((3990, 4005), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (4003, 4005), False, 'import inspect\n'), ((4442, 4457), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (4455, 4457), False, 'import inspect\n')]
|
import numpy as np
import pandas as pd
import os
from psrqpy import QueryATNF
from utmost_psr import utils, plot
def UTMOST_NS_module_params():
"""
System parameters for a single UTMOST-2D North-South module.
output:
-------
UTMOST_NS_module: dict
Dictionary containing module parameters (Gain [K/Jy], Bandwidth [MHz],
Freq [MHz], T_sys [K], N_pol, Latitude [deg])
"""
UTMOST_NS_module = {
"Gain": 0.0028,
"Bandwidth": 45.0,
"Freq": "843 MHz",
"T_sys": 70.0,
"N_pol": 2.0,
"Latitude": -35.3707088333
}
return UTMOST_NS_module
def radiometer_signal_to_noise(obs_params, flux_density, period, width,
psr_Tsky, t_int=300.0):
"""
Predicted signal to noise ratio from the radiometer equation: see Equation
A1.21 in Kramer & Lorimer (2004).
input:
------
obs_params: dict
Dictionary containing observatory parameters (Gain [K/Jy],
Bandwidth [MHz], Freq [MHz], T_sys [K], N_pol)
flux_density: list, floats
Pulsar flux_density [Jy]
period: list, floats
Pulsar period [s]
width: list, floats
Pulsar width -- W50 [s]
psr_Tsky: list, floats
Sky temperature at pulsar positions (K)
t_int: float, optional
Observation length in seconds (default = 300 seconds)
output:
-------
snr: float
Radiometer signal to noise ratio
"""
# System Equivalent Flux Density: Gain / T_sys
sefd = obs_params["Gain"] / (obs_params["T_sys"] + psr_Tsky)
# Pulsar duty cycle
duty_cycle = np.sqrt((period - width)/width)
# Signal to noise ratio
snr = flux_density * sefd * np.sqrt(obs_params["N_pol"] *
t_int * obs_params["Bandwidth"]*1e6) * duty_cycle
return snr
def Zenith_angle_correction(psr_DECJ, Latitude):
"""
Corrects the detected pulsar S/N based on the pulsar distance from zenith.
input:
------
psr_DECJ: float
Declination of the pulsar in fractional degrees.
Latitude: float
Latitude of the telescope in fractional degrees.
output:
------
zenith_corrected_snr: float
S/N correction for distance from zenith.
"""
zenith_corrected_snr = np.cos((psr_DECJ - Latitude) * np.pi/180.)
return zenith_corrected_snr
def ddmmss_to_deg(position):
"""
Converts positions in deg:min:sec format to fractional degrees.
input:
------
position: str
Position in deg:min:sec format.
output:
-------
position_deg: float
Position in fractional degrees.
"""
split_position = position.split(":")
# Check if positive or negative:
if float(split_position[0]) <= 0:
if len(split_position) == 3:
position_deg = float(split_position[0]) - (
float(split_position[1])/60. + float(split_position[2])/3600.)
else:
position_deg = float(split_position[0]) - (
float(split_position[1])/60.)
else:
if len(split_position) == 3:
position_deg = float(split_position[0]) + (
float(split_position[1])/60. + float(split_position[2])/3600.)
else:
position_deg = float(split_position[0]) + (
float(split_position[1])/60.)
return position_deg
def arrival_time_uncertainty(obs_params, flux_density, period, width, psr_DECJ,
n_cassette, t_int=300.):
"""
Predicted pulse time of arrival (ToA) uncertainty: see see Equation 8.2 in
Kramer & Lorimer (2004).
input:
------
obs_params: dict
Dictionary containing observatory parameters (Gain [K/Jy],
Bandwidth [MHz], Freq [MHz], T_sys [K], N_pol)
flux_density: list, floats
Pulsar flux_density [Jy]
period: list, floats
Pulsar period [s]
width: list, floats
Pulsar width -- W50 [s]
psr_DECJ: list, floats
Pulsar declination [deg]
n_cassette: scalar, optional
Number of UTMOST-NS cassettes (default = 1)
t_int: float, optional
Observation length in seconds (default = 300 seconds)
output:
-------
sigma_toa: list, floats
Estimated ToA uncertainty (us)
"""
# System Equivalent Flux Density: Gain / T_sys
sefd = obs_params["Gain"] / obs_params["T_sys"] * n_cassette
# Pulsar duty cycle
duty_cycle = np.sqrt((period - width)/width)
snr_corr = Zenith_angle_correction(psr_DECJ, obs_params["Latitude"])
sigma_toa = (width/flux_density) * (1/(sefd)*snr_corr) * (1/np.sqrt(
obs_params["N_pol"] * t_int * obs_params["Bandwidth"]*1e6)) * (
1/duty_cycle)
return sigma_toa
def get_extrapolated_flux(flux_ref, freq_ref, spectral_index):
"""
Computes the flux density at 843 MHz extrapolated from a higher/lower flux
density measurement & some assumed spectral index.
input:
------
flux_ref: float
Reference flux density, usually S400 or S1400 [mJy].
freq_ref: float
Refrence frequency, usually 400 or 1400 MHz.
output:
-------
S843: float
Extrapolated flux density at 843 MHz [mJy]
"""
S843 = flux_ref * (843.0 / freq_ref)**(spectral_index)
return S843
|
[
"numpy.cos",
"numpy.sqrt"
] |
[((1611, 1644), 'numpy.sqrt', 'np.sqrt', (['((period - width) / width)'], {}), '((period - width) / width)\n', (1618, 1644), True, 'import numpy as np\n'), ((2264, 2309), 'numpy.cos', 'np.cos', (['((psr_DECJ - Latitude) * np.pi / 180.0)'], {}), '((psr_DECJ - Latitude) * np.pi / 180.0)\n', (2270, 2309), True, 'import numpy as np\n'), ((4411, 4444), 'numpy.sqrt', 'np.sqrt', (['((period - width) / width)'], {}), '((period - width) / width)\n', (4418, 4444), True, 'import numpy as np\n'), ((1704, 1778), 'numpy.sqrt', 'np.sqrt', (["(obs_params['N_pol'] * t_int * obs_params['Bandwidth'] * 1000000.0)"], {}), "(obs_params['N_pol'] * t_int * obs_params['Bandwidth'] * 1000000.0)\n", (1711, 1778), True, 'import numpy as np\n'), ((4582, 4656), 'numpy.sqrt', 'np.sqrt', (["(obs_params['N_pol'] * t_int * obs_params['Bandwidth'] * 1000000.0)"], {}), "(obs_params['N_pol'] * t_int * obs_params['Bandwidth'] * 1000000.0)\n", (4589, 4656), True, 'import numpy as np\n')]
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.bounding_shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorBoundingShapeOp(test_util.TensorFlowTestCase):
def testDocStringExample(self):
# This is the example from ragged.bounding_shape.__doc__.
rt = ragged_factory_ops.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9],
[10]])
self.assertAllEqual(rt.bounding_shape(), [5, 4])
def test2DRaggedTensorWithOneRaggedDimension(self):
values = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
rt1 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 2, 5, 6, 6, 7])
rt2 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 7])
rt3 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 0, 7, 7])
self.assertAllEqual(rt1.bounding_shape(), [5, 3])
self.assertAllEqual(rt2.bounding_shape(), [1, 7])
self.assertAllEqual(rt3.bounding_shape(), [3, 7])
def test3DRaggedTensorWithOneRaggedDimension(self):
values = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]
rt1 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 2, 5, 6, 6, 7])
rt2 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 7])
rt3 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 0, 7, 7])
self.assertAllEqual(rt1.bounding_shape(), [5, 3, 2])
self.assertAllEqual(rt2.bounding_shape(), [1, 7, 2])
self.assertAllEqual(rt3.bounding_shape(), [3, 7, 2])
def testExplicitAxisOptimizations(self):
rt = ragged_tensor.RaggedTensor.from_row_splits(b'a b c d e f g'.split(),
[0, 2, 5, 6, 6, 7])
self.assertAllEqual(rt.bounding_shape(0), 5)
self.assertAllEqual(rt.bounding_shape(1), 3)
self.assertAllEqual(rt.bounding_shape([1, 0]), [3, 5])
if __name__ == '__main__':
googletest.main()
|
[
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits",
"tensorflow.python.platform.googletest.main"
] |
[((2895, 2912), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (2910, 2912), False, 'from tensorflow.python.platform import googletest\n'), ((1302, 1374), 'tensorflow.python.ops.ragged.ragged_factory_ops.constant', 'ragged_factory_ops.constant', (['[[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]]'], {}), '([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])\n', (1329, 1374), False, 'from tensorflow.python.ops.ragged import ragged_factory_ops\n'), ((1586, 1656), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits', 'ragged_tensor.RaggedTensor.from_row_splits', (['values', '[0, 2, 5, 6, 6, 7]'], {}), '(values, [0, 2, 5, 6, 6, 7])\n', (1628, 1656), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((1668, 1726), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits', 'ragged_tensor.RaggedTensor.from_row_splits', (['values', '[0, 7]'], {}), '(values, [0, 7])\n', (1710, 1726), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((1738, 1802), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits', 'ragged_tensor.RaggedTensor.from_row_splits', (['values', '[0, 0, 7, 7]'], {}), '(values, [0, 0, 7, 7])\n', (1780, 1802), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((2111, 2181), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits', 'ragged_tensor.RaggedTensor.from_row_splits', (['values', '[0, 2, 5, 6, 6, 7]'], {}), '(values, [0, 2, 5, 6, 6, 7])\n', (2153, 2181), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((2193, 2251), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits', 'ragged_tensor.RaggedTensor.from_row_splits', (['values', '[0, 7]'], {}), '(values, [0, 7])\n', (2235, 2251), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((2263, 2327), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits', 'ragged_tensor.RaggedTensor.from_row_splits', (['values', '[0, 0, 7, 7]'], {}), '(values, [0, 0, 7, 7])\n', (2305, 2327), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n')]
|
# Generated by Django 3.2.7 on 2021-10-27 11:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Bake_bot', '0004_alter_order_deliivery_address'),
]
operations = [
migrations.AddField(
model_name='order',
name='delivery_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='order',
name='delivery_time',
field=models.TimeField(auto_now_add=True, null=True),
),
]
|
[
"django.db.models.DateTimeField",
"django.db.models.TimeField"
] |
[((352, 402), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (372, 402), False, 'from django.db import migrations, models\n'), ((528, 574), 'django.db.models.TimeField', 'models.TimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (544, 574), False, 'from django.db import migrations, models\n')]
|
#! /usr/bin/env python
# $Id: test_get_parser_class.py 7504 2012-08-27 07:55:20Z grubert $
# Author: <NAME>
# Maintainer: <EMAIL>
# Copyright: This module has been placed in the public domain.
"""
test get_parser_class
"""
from __init__ import DocutilsTestSupport
from docutils.parsers import get_parser_class
class GetParserClassTestCase(DocutilsTestSupport.StandardTestCase):
def test_registered_parser(self):
rdr = get_parser_class('rst')
# raises ImportError on failure
def test_bogus_parser(self):
self.assertRaises(ImportError,
get_parser_class, 'nope')
def test_local_parser(self):
# requires local-parser.py in test directory (testroot)
wr = get_parser_class('local-parser')
if __name__ == '__main__':
import unittest
unittest.main()
|
[
"unittest.main",
"docutils.parsers.get_parser_class"
] |
[((820, 835), 'unittest.main', 'unittest.main', ([], {}), '()\n', (833, 835), False, 'import unittest\n'), ((435, 458), 'docutils.parsers.get_parser_class', 'get_parser_class', (['"""rst"""'], {}), "('rst')\n", (451, 458), False, 'from docutils.parsers import get_parser_class\n'), ((735, 767), 'docutils.parsers.get_parser_class', 'get_parser_class', (['"""local-parser"""'], {}), "('local-parser')\n", (751, 767), False, 'from docutils.parsers import get_parser_class\n')]
|
#! python3
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import tkinter
from tkinter import ttk, messagebox
from PIL import Image, ImageDraw, ImageFont # pip install Pillow
import piexif # pip install piexif
префикс_имён_файлов = 'D_'
имя_папки_с_датой = 'Дата'
имя_папки_без_даты = 'Оригиналы'
размер_шрифта = 100
отступ_слева = 20
отступ_снизу = 20
цвет_шрифта = (255, 255, 255)
прозрачность_подложки = 30 # Процентов
цвет_подложки = (0, 0, 0, round(прозрачность_подложки / 100 * 255))
class Приложение(ttk.Frame):
def __init__(self, пути_к_картинкам, система):
super().__init__(система)
if not пути_к_картинкам:
система.withdraw()
messagebox.showerror('Нет картинок :(', 'Не нашёл изображений. '
'Брось мышкой нужные картинки на иконку программы. '
'Запускать двойным кликом не нужно, это так не работает...')
система.destroy()
return
система.title("Проставлялка дат")
self.система = система
self.даты = {}
self.exif = {}
for картинка in пути_к_картинкам:
self.строка = ttk.Frame(self)
self.exif[картинка] = прочитать_EXIF(картинка)
дата = self.exif[картинка]["дата"]
self.даты[картинка] = tkinter.StringVar(self.строка, value=дата)
self.надпись_файл = tkinter.Label(self.строка, text=картинка)
self.надпись_файл.pack(side='left')
self.поле_даты = ttk.Entry(self.строка, textvariable=self.даты[картинка])
self.поле_даты.pack(side='right', fill='x', expand=True)
self.строка.pack(side='top', fill="x")
if len(пути_к_картинкам) > 3:
self.проставить_всем()
self.кнопка_проставить = ttk.Button(self,
text="Проставить всем даты",
command=self.проставить_всем)
self.кнопка_проставить.pack(fill='both', expand=True)
self.pack(fill='both', expand=True)
def проставить_всем(self):
for картинка, поле_даты in self.даты.items():
дата = поле_даты.get()
проставить_дату(картинка, дата, self.exif[картинка]["ориентация"])
self.система.destroy()
return
def прочитать_EXIF(путь_к_файлу):
картинка = Image.open(путь_к_файлу).convert('RGB')
try:
exif = piexif.load(картинка.info['exif'])
дата_съемки = exif['Exif'][piexif.ExifIFD.DateTimeOriginal].decode('utf-8')
дата, время = дата_съемки.split(' ', 1)
год, месяц, число = дата.split(':', 3)
ориентация = exif["0th"][piexif.ImageIFD.Orientation] \
if piexif.ImageIFD.Orientation in exif["0th"] \
else 1
return {
"дата": f"{число}.{месяц}.{год[2:]} {время}",
"ориентация": ориентация
}
except KeyError:
return {
"дата": "пусто",
"ориентация": 1
}
def проставить_дату(путь_к_файлу, дата_съемки, ориентация):
картинка = Image.open(путь_к_файлу).convert('RGB')
if ориентация == 2:
картинка = картинка.transpose(Image.FLIP_LEFT_RIGHT)
elif ориентация == 3:
картинка = картинка.rotate(180)
elif ориентация == 4:
картинка = картинка.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)
elif ориентация == 5:
картинка = картинка.rotate(-90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif ориентация == 6:
картинка = картинка.rotate(-90, expand=True)
elif ориентация == 7:
картинка = картинка.rotate(90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif ориентация == 8:
картинка = картинка.rotate(90, expand=True)
ширина, высота = картинка.size
разрешение = (ширина**2 + высота**2)**0.5
относительный_размер_шрифта = int(размер_шрифта * разрешение / 5000)
шрифт = ImageFont.truetype('%WINDIR%\\Fonts\\arial.ttf', относительный_размер_шрифта)
относительный_отступ_снизу = int(отступ_снизу * разрешение / 5000)
относительный_отступ_слева = int(отступ_слева * разрешение / 5000)
draw = ImageDraw.Draw(картинка, 'RGBA')
ширина_текста, высота_текста = draw.textsize(дата_съемки, шрифт)
draw.rectangle([относительный_отступ_снизу,
высота - высота_текста - относительный_отступ_снизу,
ширина_текста + относительный_отступ_слева,
высота - относительный_отступ_снизу - 1],
цвет_подложки)
draw.text((относительный_отступ_слева,
высота - высота_текста - round(относительный_размер_шрифта / 10) - относительный_отступ_снизу),
дата_съемки,
цвет_шрифта,
шрифт)
папка_файла, имя_файла = os.path.split(путь_к_файлу)
путь_к_папке_с_датой = os.path.join(папка_файла, имя_папки_с_датой)
путь_к_папке_без_даты = os.path.join(папка_файла, имя_папки_без_даты)
if not os.path.isdir(путь_к_папке_с_датой): os.mkdir(путь_к_папке_с_датой)
if not os.path.isdir(путь_к_папке_без_даты): os.mkdir(путь_к_папке_без_даты)
картинка.save(os.path.join(путь_к_папке_с_датой, префикс_имён_файлов + имя_файла))
shutil.move(путь_к_файлу, os.path.join(путь_к_папке_без_даты, имя_файла))
# Здесь начинается выполнение программы.
система = tkinter.Tk() # Создаем управлялку оконной подсистемой
приложение = Приложение(sys.argv[1:], система) # Создаем приложение, описанное выше
приложение.mainloop() # Запускаем приложение
|
[
"tkinter.StringVar",
"os.mkdir",
"tkinter.Label",
"tkinter.ttk.Entry",
"os.path.isdir",
"tkinter.messagebox.showerror",
"tkinter.ttk.Frame",
"PIL.ImageFont.truetype",
"PIL.Image.open",
"piexif.load",
"tkinter.ttk.Button",
"PIL.ImageDraw.Draw",
"os.path.split",
"os.path.join",
"tkinter.Tk"
] |
[((5431, 5443), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (5441, 5443), False, 'import tkinter\n'), ((3991, 4068), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""%WINDIR%\\\\Fonts\\\\arial.ttf"""', 'относительный_размер_шрифта'], {}), "('%WINDIR%\\\\Fonts\\\\arial.ttf', относительный_размер_шрифта)\n", (4009, 4068), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4223, 4255), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['картинка', '"""RGBA"""'], {}), "(картинка, 'RGBA')\n", (4237, 4255), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4885, 4912), 'os.path.split', 'os.path.split', (['путь_к_файлу'], {}), '(путь_к_файлу)\n', (4898, 4912), False, 'import os\n'), ((4943, 4987), 'os.path.join', 'os.path.join', (['папка_файла', 'имя_папки_с_датой'], {}), '(папка_файла, имя_папки_с_датой)\n', (4955, 4987), False, 'import os\n'), ((5017, 5062), 'os.path.join', 'os.path.join', (['папка_файла', 'имя_папки_без_даты'], {}), '(папка_файла, имя_папки_без_даты)\n', (5029, 5062), False, 'import os\n'), ((1839, 1914), 'tkinter.ttk.Button', 'ttk.Button', (['self'], {'text': '"""Проставить всем даты"""', 'command': 'self.проставить_всем'}), "(self, text='Проставить всем даты', command=self.проставить_всем)\n", (1849, 1914), False, 'from tkinter import ttk, messagebox\n'), ((2463, 2497), 'piexif.load', 'piexif.load', (["картинка.info['exif']"], {}), "(картинка.info['exif'])\n", (2474, 2497), False, 'import piexif\n'), ((5057, 5092), 'os.path.isdir', 'os.path.isdir', (['путь_к_папке_с_датой'], {}), '(путь_к_папке_с_датой)\n', (5070, 5092), False, 'import os\n'), ((5110, 5140), 'os.mkdir', 'os.mkdir', (['путь_к_папке_с_датой'], {}), '(путь_к_папке_с_датой)\n', (5118, 5140), False, 'import os\n'), ((5136, 5172), 'os.path.isdir', 'os.path.isdir', (['путь_к_папке_без_даты'], {}), '(путь_к_папке_без_даты)\n', (5149, 5172), False, 'import os\n'), ((5191, 5222), 'os.mkdir', 'os.mkdir', (['путь_к_папке_без_даты'], {}), '(путь_к_папке_без_даты)\n', (5199, 5222), False, 'import os\n'), ((5232, 5299), 'os.path.join', 'os.path.join', (['путь_к_папке_с_датой', '(префикс_имён_файлов + имя_файла)'], {}), '(путь_к_папке_с_датой, префикс_имён_файлов + имя_файла)\n', (5244, 5299), False, 'import os\n'), ((5333, 5379), 'os.path.join', 'os.path.join', (['путь_к_папке_без_даты', 'имя_файла'], {}), '(путь_к_папке_без_даты, имя_файла)\n', (5345, 5379), False, 'import os\n'), ((696, 877), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Нет картинок :("""', '"""Не нашёл изображений. Брось мышкой нужные картинки на иконку программы. Запускать двойным кликом не нужно, это так не работает..."""'], {}), "('Нет картинок :(',\n 'Не нашёл изображений. Брось мышкой нужные картинки на иконку программы. Запускать двойным кликом не нужно, это так не работает...'\n )\n", (716, 877), False, 'from tkinter import ttk, messagebox\n'), ((1184, 1199), 'tkinter.ttk.Frame', 'ttk.Frame', (['self'], {}), '(self)\n', (1193, 1199), False, 'from tkinter import ttk, messagebox\n'), ((1346, 1388), 'tkinter.StringVar', 'tkinter.StringVar', (['self.строка'], {'value': 'дата'}), '(self.строка, value=дата)\n', (1363, 1388), False, 'import tkinter\n'), ((1421, 1462), 'tkinter.Label', 'tkinter.Label', (['self.строка'], {'text': 'картинка'}), '(self.строка, text=картинка)\n', (1434, 1462), False, 'import tkinter\n'), ((1537, 1593), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.строка'], {'textvariable': 'self.даты[картинка]'}), '(self.строка, textvariable=self.даты[картинка])\n', (1546, 1593), False, 'from tkinter import ttk, messagebox\n'), ((2407, 2431), 'PIL.Image.open', 'Image.open', (['путь_к_файлу'], {}), '(путь_к_файлу)\n', (2417, 2431), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3156, 3180), 'PIL.Image.open', 'Image.open', (['путь_к_файлу'], {}), '(путь_к_файлу)\n', (3166, 3180), False, 'from PIL import Image, ImageDraw, ImageFont\n')]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: peter.s
@project: EmptyModel
@time: 2019/11/6 16:42
@desc:
"""
import pickle
from aehn.preprocess.data_source import DataSource
def get_static_data_callback(data):
"""
callback generator for getting data online
:param data:
:return:
"""
def data_callback():
yield data
return data_callback
class DataLoader(object):
def __init__(self, data_config):
self._data_name = data_config['data_name']
self._data_filename = data_config['data_filename']
self._cache_dir = data_config['cache_dir']
self._use_cache = data_config['use_cache']
self._process_dim = data_config['process_dim']
def get_three_datasource(self):
""" Load the raw data, and then return three data sources containing train data, validation and test
data separately.
:return: train, validation and test DataSource.
"""
# load data
if self._use_cache:
# read data from cache
train_ds = DataSource(self._data_name + '_train', cache_dir=self._cache_dir)
valid_ds = DataSource(self._data_name + '_valid', cache_dir=self._cache_dir)
test_ds = DataSource(self._data_name + '_test', cache_dir=self._cache_dir)
else:
# keys(types, timesteps) format:n_seqs * [seq_len]
with open(self._data_filename.format('train'), 'rb') as f:
train_records = pickle.load(f)
with open(self._data_filename.format('valid'), 'rb') as f:
valid_records = pickle.load(f)
with open(self._data_filename.format('test'), 'rb') as f:
test_records = pickle.load(f)
# wrapping data into DataSource
train_ds = DataSource(self._data_name + '_train', cache_dir=self._cache_dir,
retrieve_data_callback=get_static_data_callback(train_records))
valid_ds = DataSource(self._data_name + '_valid', cache_dir=self._cache_dir,
retrieve_data_callback=get_static_data_callback(valid_records))
test_ds = DataSource(self._data_name + '_test', cache_dir=self._cache_dir,
retrieve_data_callback=get_static_data_callback(test_records))
return train_ds, valid_ds, test_ds
|
[
"pickle.load",
"aehn.preprocess.data_source.DataSource"
] |
[((1066, 1131), 'aehn.preprocess.data_source.DataSource', 'DataSource', (["(self._data_name + '_train')"], {'cache_dir': 'self._cache_dir'}), "(self._data_name + '_train', cache_dir=self._cache_dir)\n", (1076, 1131), False, 'from aehn.preprocess.data_source import DataSource\n'), ((1155, 1220), 'aehn.preprocess.data_source.DataSource', 'DataSource', (["(self._data_name + '_valid')"], {'cache_dir': 'self._cache_dir'}), "(self._data_name + '_valid', cache_dir=self._cache_dir)\n", (1165, 1220), False, 'from aehn.preprocess.data_source import DataSource\n'), ((1243, 1307), 'aehn.preprocess.data_source.DataSource', 'DataSource', (["(self._data_name + '_test')"], {'cache_dir': 'self._cache_dir'}), "(self._data_name + '_test', cache_dir=self._cache_dir)\n", (1253, 1307), False, 'from aehn.preprocess.data_source import DataSource\n'), ((1490, 1504), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1501, 1504), False, 'import pickle\n'), ((1609, 1623), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1620, 1623), False, 'import pickle\n'), ((1726, 1740), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1737, 1740), False, 'import pickle\n')]
|
from v_process import *
import config
import os
def data_process():
for folder_name in os.listdir(config.video_folder):
print(folder_name)
if 'train_without' in folder_name :
pro = Process(config.video_folder + folder_name + '/')
pro.rename()
# if 'gray' in config.process_color:
print('process train_without')
pro.cut_gray_img()
pro.cut_rgb_img()
pro.cut_black_img()
if 'train_with' in folder_name and 'train_without' not in folder_name:
print('process train_with')
pro = Process(config.video_folder + folder_name + '/')
pro.rename()
pro.cut_gray_img()
pro.cut_rgb_img()
if 'test_without' in folder_name:
print('process test_without')
pro = Process(config.video_folder + folder_name + '/')
pro.rename()
pro.cut_gray_img()
pro.cut_rgb_img()
pro.cut_black_img()
if 'test_with' in folder_name and 'test_without' not in folder_name:
print('process test_with')
pro = Process(config.video_folder + folder_name + '/')
pro.rename()
pro.cut_gray_img()
pro.cut_rgb_img()
# if 'black' in config.process_color:
# print('process black_imgs')
# pro.cut_black_img()
# if 'rgb' in config.process_color:
# print('process rgb_imgs')
# pro.cut_rgb_img()
data_process()
|
[
"os.listdir"
] |
[((92, 123), 'os.listdir', 'os.listdir', (['config.video_folder'], {}), '(config.video_folder)\n', (102, 123), False, 'import os\n')]
|
#!/usr/bin/env python
"""
Created on 2015-04-04T11:28:18
"""
from __future__ import division, print_function
import sys
import subprocess
from sqlalchemy import create_engine
try:
import pymysql
except ImportError:
print('You need pymysql installed')
sys.exit(1)
__author__ = "<NAME> (github: @mattgiguere)"
__license__ = "MIT"
__version__ = '0.0.1'
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def get_credentials_dir():
"""
PURPOSE: A routine for pointing to the credentials directory.
"""
cmd = 'echo $CredDir'
#read in the CredDir string
cdir = subprocess.check_output(cmd, shell=True)
#chop off the newline character at the end
cdir = cdir[0:len(cdir)-1]
#and return it
return cdir
def connect_aws_db(legacy=False, write_unicode=False):
"""PURPOSE:
A function for connecting to the doglodge.io AWS RDS MySQL database.
:param legacy: [optional]
If legacy is set, a PyMySQL connection will be returned.
Otherwise, a SQLAlchemy engine will be returned. This is
to handle the deprecated MySQL connections in pandas.
:param write_unicode: [optional]
If set, text will be written to the MySQL DB as unicode.
"""
#retrieve credentials:
cdir = get_credentials_dir()
credsf = open(cdir+'.credentials/SQL/cawsi', 'r')
creds = credsf.read().split('\n')
if legacy:
conn = pymysql.connect(host=creds[0],
port=int(creds[1]),
user=creds[2],
passwd=creds[3],
db=creds[4])
#cur = conn.cursor()
return conn
else:
#example:
#mysql+pymysql://<username>:<password>@<host>/<dbname>[?<options>]
cmd = "mysql+pymysql://"
cmd += creds[2]+':'
cmd += creds[3]+'@'
cmd += creds[0]+'/'
cmd += creds[4]
if write_unicode:
cmd += '?charset=utf8'
engine = create_engine(cmd, pool_recycle=600)
return engine
|
[
"sqlalchemy.create_engine",
"subprocess.check_output",
"sys.exit"
] |
[((620, 660), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (643, 660), False, 'import subprocess\n'), ((266, 277), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (274, 277), False, 'import sys\n'), ((2030, 2066), 'sqlalchemy.create_engine', 'create_engine', (['cmd'], {'pool_recycle': '(600)'}), '(cmd, pool_recycle=600)\n', (2043, 2066), False, 'from sqlalchemy import create_engine\n')]
|
from pymdc.rest import REST
from pymdc.api.files import Files
# A list of bucket objects for a user account.
class Buckets():
def __init__(self, key_pair):
self.key_pair = key_pair
def create(self, name, storage, transfer, key_pairs, callback=None):
# Get pub keys.
pub_keys = []
for key_pair in key_pairs:
pub_keys.append(key_pair.get_public_key("hex"))
params = {
"name": name,
"storage": int(storage),
"transfer": int(transfer),
"pubkeys": pub_keys
}
ret = REST(
"POST",
"/buckets",
params,
callback=callback,
auth=self.key_pair
)
return ret
def list(self, callback=None):
resource = "/buckets"
ret = REST(
"GET",
resource,
callback=callback,
auth=self.key_pair
)
return ret
# A bucket is also an object that can store + download files.
class Bucket():
def __init__(self, bucket_id, key_pair):
self.bucket_id = bucket_id
self.key_pair = key_pair
self.files = Files(self.bucket_id, self.key_pair)
def list(self, callback=None):
resource = "/buckets/%s" % self.bucket_id
ret = REST(
"GET",
resource,
callback=callback,
auth=self.key_pair
)
return ret
def delete(self, callback=None):
resource = "/buckets/%s" % self.bucket_id
ret = REST(
"DELETE",
resource,
callback=callback,
auth=self.key_pair
)
return ret
def patch(self, params, callback=None):
resource = "/buckets/%s" % self.bucket_id
ret = REST(
"PATCH",
resource,
params=params,
callback=callback,
auth=self.key_pair
)
return ret
|
[
"pymdc.api.files.Files",
"pymdc.rest.REST"
] |
[((588, 659), 'pymdc.rest.REST', 'REST', (['"""POST"""', '"""/buckets"""', 'params'], {'callback': 'callback', 'auth': 'self.key_pair'}), "('POST', '/buckets', params, callback=callback, auth=self.key_pair)\n", (592, 659), False, 'from pymdc.rest import REST\n'), ((830, 890), 'pymdc.rest.REST', 'REST', (['"""GET"""', 'resource'], {'callback': 'callback', 'auth': 'self.key_pair'}), "('GET', resource, callback=callback, auth=self.key_pair)\n", (834, 890), False, 'from pymdc.rest import REST\n'), ((1182, 1218), 'pymdc.api.files.Files', 'Files', (['self.bucket_id', 'self.key_pair'], {}), '(self.bucket_id, self.key_pair)\n', (1187, 1218), False, 'from pymdc.api.files import Files\n'), ((1323, 1383), 'pymdc.rest.REST', 'REST', (['"""GET"""', 'resource'], {'callback': 'callback', 'auth': 'self.key_pair'}), "('GET', resource, callback=callback, auth=self.key_pair)\n", (1327, 1383), False, 'from pymdc.rest import REST\n'), ((1564, 1627), 'pymdc.rest.REST', 'REST', (['"""DELETE"""', 'resource'], {'callback': 'callback', 'auth': 'self.key_pair'}), "('DELETE', resource, callback=callback, auth=self.key_pair)\n", (1568, 1627), False, 'from pymdc.rest import REST\n'), ((1815, 1892), 'pymdc.rest.REST', 'REST', (['"""PATCH"""', 'resource'], {'params': 'params', 'callback': 'callback', 'auth': 'self.key_pair'}), "('PATCH', resource, params=params, callback=callback, auth=self.key_pair)\n", (1819, 1892), False, 'from pymdc.rest import REST\n')]
|