code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from Experiments import test_knapsack_weighted, test_knapsack_unit
"""
Example Dnl experiments on weighted and unit knapsack problems.
Test boolean (boolean array): determines the variations of dnl used. The order is [Exhaustive, Exhaustive_max, Dnl, dnl_max, dnl_greedy]. exhaustive, dnl and dnl_greedy are used in the paper.
for dnl_greedy choose test boolean = [0,0,0,0,1]
Dependencies:
gcc/8.3.0
openmpi/3.1.4
python/3.7.4
scikit-learn/0.23.1-python-3.7.4
gurobi/9.0.0
numpy/1.17.3-python-3.7.4
matplotlib/3.2.1-python-3.7.4
"""
capacities = [12,24,48,72,96,120,144,172,196,220]
kfolds = [0,1,2,3,4]
test_knapsack_weighted(max_step_size_magnitude=0, min_step_size_magnitude=-1, capacities=capacities, epoch_limit=3,
kfolds=kfolds,
test_boolean=[0, 0, 0, 0, 1], core_number=8, is_shuffle=False, learning_rate=0.1,
mini_batch_size=32, n_iter=5, noise_level=0)
capacities = [5,10,15,20,25,30,35,40]
test_knapsack_unit(max_step_size_magnitude=0, min_step_size_magnitude=-1, capacities=capacities, epoch_limit=3,
kfolds=kfolds,
test_boolean=[0, 0, 0, 0, 1], core_number=8, is_shuffle=False, learning_rate=0.1,
mini_batch_size=32, n_iter=5, noise_level=0) | [
"Experiments.test_knapsack_weighted",
"Experiments.test_knapsack_unit"
] | [((613, 883), 'Experiments.test_knapsack_weighted', 'test_knapsack_weighted', ([], {'max_step_size_magnitude': '(0)', 'min_step_size_magnitude': '(-1)', 'capacities': 'capacities', 'epoch_limit': '(3)', 'kfolds': 'kfolds', 'test_boolean': '[0, 0, 0, 0, 1]', 'core_number': '(8)', 'is_shuffle': '(False)', 'learning_rate': '(0.1)', 'mini_batch_size': '(32)', 'n_iter': '(5)', 'noise_level': '(0)'}), '(max_step_size_magnitude=0, min_step_size_magnitude=-\n 1, capacities=capacities, epoch_limit=3, kfolds=kfolds, test_boolean=[0,\n 0, 0, 0, 1], core_number=8, is_shuffle=False, learning_rate=0.1,\n mini_batch_size=32, n_iter=5, noise_level=0)\n', (635, 883), False, 'from Experiments import test_knapsack_weighted, test_knapsack_unit\n'), ((980, 1245), 'Experiments.test_knapsack_unit', 'test_knapsack_unit', ([], {'max_step_size_magnitude': '(0)', 'min_step_size_magnitude': '(-1)', 'capacities': 'capacities', 'epoch_limit': '(3)', 'kfolds': 'kfolds', 'test_boolean': '[0, 0, 0, 0, 1]', 'core_number': '(8)', 'is_shuffle': '(False)', 'learning_rate': '(0.1)', 'mini_batch_size': '(32)', 'n_iter': '(5)', 'noise_level': '(0)'}), '(max_step_size_magnitude=0, min_step_size_magnitude=-1,\n capacities=capacities, epoch_limit=3, kfolds=kfolds, test_boolean=[0, 0,\n 0, 0, 1], core_number=8, is_shuffle=False, learning_rate=0.1,\n mini_batch_size=32, n_iter=5, noise_level=0)\n', (998, 1245), False, 'from Experiments import test_knapsack_weighted, test_knapsack_unit\n')] |
from io import StringIO
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from flags.state import flag_enabled
class EnableFlagTestCase(TestCase):
def test_enable_flag(self):
out = StringIO()
self.assertFalse(flag_enabled("DB_FLAG"))
call_command("enable_flag", "DB_FLAG", stdout=out)
self.assertTrue(flag_enabled("DB_FLAG"))
self.assertIn("Successfully enabled", out.getvalue())
def test_enable_flag_non_existent_flag(self):
with self.assertRaises(CommandError):
call_command("enable_flag", "FLAG_DOES_NOT_EXIST")
| [
"io.StringIO",
"flags.state.flag_enabled",
"django.core.management.call_command"
] | [((281, 291), 'io.StringIO', 'StringIO', ([], {}), '()\n', (289, 291), False, 'from io import StringIO\n'), ((350, 400), 'django.core.management.call_command', 'call_command', (['"""enable_flag"""', '"""DB_FLAG"""'], {'stdout': 'out'}), "('enable_flag', 'DB_FLAG', stdout=out)\n", (362, 400), False, 'from django.core.management import call_command\n'), ((317, 340), 'flags.state.flag_enabled', 'flag_enabled', (['"""DB_FLAG"""'], {}), "('DB_FLAG')\n", (329, 340), False, 'from flags.state import flag_enabled\n'), ((425, 448), 'flags.state.flag_enabled', 'flag_enabled', (['"""DB_FLAG"""'], {}), "('DB_FLAG')\n", (437, 448), False, 'from flags.state import flag_enabled\n'), ((621, 671), 'django.core.management.call_command', 'call_command', (['"""enable_flag"""', '"""FLAG_DOES_NOT_EXIST"""'], {}), "('enable_flag', 'FLAG_DOES_NOT_EXIST')\n", (633, 671), False, 'from django.core.management import call_command\n')] |
from PDA import PDA
import math
EMPTY_STRING = ''
def int_to_bin(x):
return x.__format__('b')
def s_2(x):
return int_to_bin(x).count('1')
def base_b(n,b):
s = EMPTY_STRING
while n > 0:
s += str(n % b)
n //= b
return s[::-1]
def s_b(n,b):
count = 0
while n > 0:
count += n % b
n //= b
return count
# def is_k_flimsy(x, k):
# return s_2(x) > s_2(k*x)
# def find_first_k_flimsy_numbers (k, limit): # Finds the k-flimsy integers in [1..limit]
# output = []
# for i in range (1, limit):
# if (is_k_flimsy(i,k)):
# output.append(i)
# return output
def create_palindrome_PDA():
states = {'S', 'END'}
alphabet = {'', 'a', 'b'}
stack_alphabet = {'Z', 'a', 'b'}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', 'a', 'Z'): [('S', 'aZ'), ('END', 'Z')],
('S', 'a', 'a'): [('S', 'aa'), ('END', 'a')],
('S', 'a', 'b'): [('S', 'ab'), ('END', 'b')],
('S', 'b', 'Z'): [('S', 'bZ'), ('END', 'Z')],
('S', 'b', 'a'): [('S', 'ba'), ('END', 'a')],
('S', 'b', 'b'): [('S', 'bb'), ('END', 'b')],
('S', '', 'Z'): [('END', 'Z')],
('S', '', 'a'): [('END', 'a')],
('S', '', 'b'): [('END', 'b')],
('END', '', 'Z'): [('END', '')],
('END', 'a', 'a'): [('END', '')],
('END', 'b', 'b'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_even_palindrome_PDA():
states = {'S', 'END'}
alphabet = {'', 'a', 'b'}
stack_alphabet = {'Z', 'a', 'b'}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', 'a', 'Z'): [('S', 'aZ')],
('S', 'a', 'a'): [('S', 'aa')],
('S', 'a', 'b'): [('S', 'ab')],
('S', 'b', 'Z'): [('S', 'bZ')],
('S', 'b', 'a'): [('S', 'ba')],
('S', 'b', 'b'): [('S', 'bb')],
('S', '', 'Z'): [('END', 'Z')],
('S', '', 'a'): [('END', 'a')],
('S', '', 'b'): [('END', 'b')],
('END', '', 'Z'): [('END', '')],
('END', 'a', 'a'): [('END', '')],
('END', 'b', 'b'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_even_palindrome_PDA_alternate():
states = {'q_0', 'q_1', 'q_2'}
alphabet = {'a', 'b', ''}
stack_alphabet = {'Z', 'a', 'b'}
start_state = 'q_0'
start_stack = 'Z'
transitions = {
('q_0', 'a', 'Z'): [('q_0', 'aZ')],
('q_0', 'a', 'a'): [('q_0', 'aa')],
('q_0', 'a', 'b'): [('q_0', 'ab')],
('q_0', 'b', 'Z'): [('q_0', 'bZ')],
('q_0', 'b', 'a'): [('q_0', 'ba')],
('q_0', 'b', 'b'): [('q_0', 'bb')],
('q_0', '', 'Z'): [('q_1', 'Z')],
('q_0', '', 'a'): [('q_1', 'a')],
('q_0', '', 'b'): [('q_1', 'b')],
('q_1', 'a', 'a'): [('q_1', '')],
('q_1', 'b', 'b'): [('q_1', '')],
('q_1', '', 'Z'): [('q_2', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_an_bn_PDA():
states = {'S', 'END'}
alphabet = {'', 'a', 'b'}
stack_alphabet = {'Z', 'a'}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', '', 'Z'): [('END', '')],
('S', 'a', 'Z'): [('S', 'aZ')],
('S', 'a', 'a'): [('S', 'aa')],
('S', 'b', 'a'): [('END', '')],
('END', '', 'Z'): [('END', '')],
('END', 'b', 'a'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_equal_as_bs_PDA():
states = {'S', 'END'}
alphabet = {'', 'a', 'b'}
stack_alphabet = {'Z', 'a', 'b'}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', '', 'Z'): [('END', '')],
('S', 'a', 'Z'): [('S', 'aZ')],
('S', 'a', 'a'): [('S', 'aa')],
('S', 'a', 'b'): [('S', '')],
('S', 'b', 'Z'): [('S', 'bZ')],
('S', 'b', 'a'): [('S', '')],
('S', 'b', 'b'): [('S', 'bb')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_dyck_PDA(): # TODO: Test this
states = {'S', 'END'}
alphabet = {'', '(', ')'}
stack_alphabet = {'Z', '('}
start_state = 'S'
start_stack = 'Z'
transitions = {
('S', '(', 'Z'): [('S', '(Z')],
('S', '(', '('): [('S', '((')],
('S', ')', '('): [('S', '')],
('S', '', 'Z'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# For the language a^m b^n c^m
def create_am_bn_cm_PDA():
states = {'q_0', 'q_1', 'q_2'}
alphabet = {'', 'a', 'b', 'c'}
stack_alphabet = {'Z', 'a'}
start_state = 'q_0'
start_stack = 'Z'
transitions = {
('q_0', 'a', 'Z'): [('q_0', 'aZ')],
('q_0', 'a', 'a'): [('q_0', 'aa')],
('q_0', '', 'Z'): [('q_0', '')],
('q_0', 'b', 'Z'): [('q_1', 'Z')],
('q_0', 'b', 'a'): [('q_1', 'a')],
('q_0', 'c', 'a'): [('q_2', '')],
('q_1', 'b', 'Z'): [('q_1', 'Z')],
('q_1', 'b', 'a'): [('q_1', 'a')],
('q_1', '', 'Z'): [('q_1', '')],
('q_1', 'c', 'a'): [('q_2', '')],
('q_2', 'c', 'a'): [('q_2', '')],
('q_2', '', 'Z'): [('q_2', '')],
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# Create a PDA to accept all 3-flimsy binary numbers
def create_3flimsy_PDA():
states = {'-0', '-1', '-2', '+2', '+1', '+0', 'END'}
alphabet = {'0', '1', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '1', 'Z'): [('-1', 'Z')],
('-0', '1', 'X'): [('-1', 'X')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '1', 'Z'): [('+2', 'Z')],
('-1', '1', 'X'): [('-2', '')],
('-2', '0', 'Z'): [('-1', 'Z')],
('-2', '0', 'X'): [('-1', 'X')],
('-2', '1', 'Z'): [('-2', 'Z')],
('-2', '1', 'X'): [('-2', 'X')],
('+2', '0', 'Z'): [('+1', 'Z')],
('+2', '0', 'X'): [('+1', 'X')],
('+2', '1', 'Z'): [('+2', 'Z')],
('+2', '1', 'X'): [('+2', 'X'), ('END', '')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '1', 'Z'): [('+2', 'XZ'), ('END', '')],
('+1', '1', 'X'): [('+2', 'XX'), ('END', '')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '0', 'X'): [('+0', 'X')],
('+0', '1', 'Z'): [('+1', 'Z')],
('+0', '1', 'X'): [('+1', 'X'), ('END', '')],
('END', '', 'Z'): [('END', '')],
('END', '', 'X'): [('END', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_3flimsy_PDA_alternate():
states = {'-0', '-1', '-2', '+2', '+1', '+0', 'END_0', 'END_1'}
alphabet = {'0', '1', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '1', 'Z'): [('-1', 'Z')],
('-0', '1', 'X'): [('-1', 'X')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '1', 'Z'): [('+2', 'Z')],
('-1', '1', 'X'): [('-2', '')],
('-2', '0', 'Z'): [('-1', 'Z')],
('-2', '0', 'X'): [('-1', 'X')],
('-2', '1', 'Z'): [('-2', 'Z')],
('-2', '1', 'X'): [('-2', 'X')],
('+2', '0', 'Z'): [('+1', 'Z')],
('+2', '0', 'X'): [('+1', 'X')],
('+2', '1', 'Z'): [('+2', 'Z')],
('+2', '1', 'X'): [('+2', 'X'), ('END_1', 'X')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '1', 'Z'): [('+2', 'XZ'), ('END_0', 'Z')],
('+1', '1', 'X'): [('+2', 'XX'), ('END_0', 'X')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '0', 'X'): [('+0', 'X')],
('+0', '1', 'Z'): [('+1', 'Z')],
('+0', '1', 'X'): [('+1', 'X'), ('END_1', 'X')],
('END_0', '', 'Z'): [('END_0', '')],
('END_0', '', 'X'): [('END_0', '')],
('END_1', '', 'X'): [('END_0', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_3equal_PDA():
states = {'-0', '-1', '-2', '+2', '+1', '+0', 'END_0'}
alphabet = {'0', '1', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '1', 'Z'): [('-1', 'Z')],
('-0', '1', 'X'): [('-1', 'X')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '1', 'Z'): [('+2', 'Z'), ('END_0', '')],
('-1', '1', 'X'): [('-2', '')],
('-2', '0', 'Z'): [('-1', 'Z')],
('-2', '0', 'X'): [('-1', 'X')],
('-2', '1', 'Z'): [('-2', 'Z')],
('-2', '1', 'X'): [('-2', 'X')],
('+2', '0', 'Z'): [('+1', 'Z')],
('+2', '0', 'X'): [('+1', 'X')],
('+2', '1', 'Z'): [('+2', 'Z'), ('END_0', '')],
('+2', '1', 'X'): [('+2', 'X')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '1', 'Z'): [('+2', 'XZ')],
('+1', '1', 'X'): [('+2', 'XX')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '0', 'X'): [('+0', 'X')],
('+0', '1', 'Z'): [('+1', 'Z'), ('END_0', '')],
('+0', '1', 'X'): [('+1', 'X')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def create_5equal_PDA():
states = {'-0', '-1', '-2', '-3', '-4', '+4', '+3', '+2', '+1', '+0', 'END_0', 'END_1'}
alphabet = {'0', '1', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('+0', '0', 'X'): [('+0', 'X')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '1', 'X'): [('+2', 'X')],
('+0', '1', 'Z'): [('+2', 'Z'), ('END_0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '1', 'X'): [('+3', 'XX')],
('+1', '1', 'Z'): [('+3', 'XZ'), ('END_0', 'Z')],
('+2', '0', 'X'): [('+1', 'X')],
('+2', '0', 'Z'): [('+1', 'Z')],
('+2', '1', 'X'): [('+3', 'X'), ('END_1', 'X')],
('+2', '1', 'Z'): [('+3', 'Z')],
('+3', '0', 'X'): [('+1', '')],
('+3', '0', 'Z'): [('-1', 'Z')],
('+3', '1', 'X'): [('+4', 'XX')],
('+3', '1', 'Z'): [('+4', 'XZ')],
('+4', '0', 'X'): [('+2', 'X')],
('+4', '0', 'Z'): [('+2', 'Z')],
('+4', '1', 'X'): [('+4', 'X')],
('+4', '1', 'Z'): [('+4', 'Z'), ('END_0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '1', 'X'): [('-2', 'X')],
('-0', '1', 'Z'): [('-2', 'Z')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '1', 'X'): [('-3', '')],
('-1', '1', 'Z'): [('+3', 'Z')],
('-2', '0', 'X'): [('-1', 'X')],
('-2', '0', 'Z'): [('-1', 'Z')],
('-2', '1', 'X'): [('-3', 'X')],
('-2', '1', 'Z'): [('-3', 'Z')],
('-3', '0', 'X'): [('-1', 'XX')],
('-3', '0', 'Z'): [('-1', 'XZ')],
('-3', '1', 'X'): [('-4', '')],
('-3', '1', 'Z'): [('+4', 'Z'), ('END_0', 'Z')],
('-4', '0', 'X'): [('-2', 'X')],
('-4', '0', 'Z'): [('-2', 'Z')],
('-4', '1', 'X'): [('-4', 'X')],
('-4', '1', 'Z'): [('-4', 'Z')],
('END_0', '', 'Z'): [('END_0', '')],
('END_1', '', 'X'): [('END_0', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# Create a PDA to accept all k-flimsy binary numbers
def create_flimsy_PDA(k):
assert (type(k) == int) and (k > 1) and (k % 2 == 1)
states = {'END_0'}
alphabet = {'0', '1', EMPTY_STRING}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {('END_0', EMPTY_STRING, 'Z'): [('END_0', EMPTY_STRING)],
('END_0', EMPTY_STRING, 'X'): [('END_0', EMPTY_STRING)]}
for carry in range(k):
s = str(carry)
states.add('-' + s)
states.add('+' + s)
for si in alphabet:
if si != EMPTY_STRING:
i = int(si)
for z in stack_alphabet:
added = i * k + carry
new_kn_digit = added % 2
new_carry = str(added // 2)
if new_kn_digit % 2 == i:
transitions[('-' + s, si, z)] = [('-' + new_carry, z)]
transitions[('+' + s, si, z)] = [('+' + new_carry, z)]
elif new_kn_digit % 2 == 1:
assert (i == 0) # n goes up by 0, kn goes up by 1
transitions[('-' + s, si, z)] = [('-' + new_carry, 'X' + z)]
if z == 'X':
transitions[('+' + s, si, z)] = [('+' + new_carry, EMPTY_STRING)]
else:
transitions[('+' + s, si, z)] = [('-' + new_carry, z)]
else:
assert (new_kn_digit % 2 == 0)
assert (i == 1) # n goes up by 1, kn goes up by 0
transitions[('+' + s, si, z)] = [('+' + new_carry, 'X' + z)]
if z == 'X':
transitions[('-' + s, si, z)] = [('-' + new_carry, EMPTY_STRING)]
else:
transitions[('-' + s, si, z)] = [('+' + new_carry, z)]
# Add new end states
# Transitions from END_{i+1} to END_{i} that read nothing but pop an X
for i in range(int(math.log2(k))):
new_state = 'END_' + str(i + 1)
states.add(new_state)
one_less = 'END_' + str(i)
transitions[(new_state, EMPTY_STRING, 'X')] = [(one_less, EMPTY_STRING)]
# 1-transitions that pop nothing from final states to END_x for some x?
for carry in range(k):
current_state = '+' + str(carry)
required_pops = s_2(k + carry) - 1
transitions[(current_state, '1', 'X')].append(('END_' + str(required_pops), 'X'))
if required_pops == 0:
transitions[(current_state, '1', 'Z')].append(('END_' + str(required_pops), 'Z'))
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# Create a PDA to accept all n where b(n) = b(kn)
def create_k_equal_PDA(k):
assert (type(k) == int) and (k > 1) and (k % 2 == 1)
states = {'END_0'}
alphabet = {'0', '1', EMPTY_STRING}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {('END_0', EMPTY_STRING, 'Z'): [('END_0', EMPTY_STRING)]}
for carry in range(k):
s = str(carry)
states.add('-' + s)
states.add('+' + s)
for si in alphabet:
if si != EMPTY_STRING:
i = int(si)
for z in stack_alphabet:
added = i * k + carry
new_kn_digit = added % 2
new_carry = str(added // 2)
if new_kn_digit % 2 == i:
transitions[('-' + s, si, z)] = [('-' + new_carry, z)]
transitions[('+' + s, si, z)] = [('+' + new_carry, z)]
elif new_kn_digit % 2 == 1:
assert (i == 0) # n goes up by 0, kn goes up by 1
transitions[('-' + s, si, z)] = [('-' + new_carry, 'X' + z)]
if z == 'X':
transitions[('+' + s, si, z)] = [('+' + new_carry, EMPTY_STRING)]
else:
transitions[('+' + s, si, z)] = [('-' + new_carry, z)]
else:
assert (new_kn_digit % 2 == 0)
assert (i == 1) # n goes up by 1, kn goes up by 0
transitions[('+' + s, si, z)] = [('+' + new_carry, 'X' + z)]
if z == 'X':
transitions[('-' + s, si, z)] = [('-' + new_carry, EMPTY_STRING)]
else:
transitions[('-' + s, si, z)] = [('+' + new_carry, z)]
# Add new end states
# Transitions from END_{i+1} to END_{i} that read nothing but pop an X
for i in range(int(math.log2(k))):
new_state = 'END_' + str(i + 1)
states.add(new_state)
one_less = 'END_' + str(i)
transitions[(new_state, EMPTY_STRING, 'X')] = [(one_less, EMPTY_STRING)]
# 1-transitions that pop Z (stack bottom) from stack iff reading 100000... would leave PDA at -0 with empty stack
b = math.floor(math.log2(k)) + 1
pda_states = {(start_state,
start_stack)} # working backwards from the state we want to get to, simulating reading last 1 plus leading zeros
for letter in ('0' * b + '1'):
temp = set()
for (state, stack) in pda_states:
# for all (q, S) such that ((state, stack_top) in transitions[(q, letter, S)])
# temp.add((q, S))
assert (len(stack) > 0)
for (q, let, S) in transitions:
if let == letter:
destinations = transitions[(q, letter, S)]
if (state, stack[-1]) in destinations: # no push or pop
new_stack = stack[:-1] + S
temp.add((q, new_stack))
if (state, EMPTY_STRING) in destinations: # pop
new_stack = stack + S
temp.add((q, new_stack))
if (len(stack) > 1) and ((state, stack[-2] + 'X') in destinations): # push
new_stack = stack[:-2] + S
temp.add((q, new_stack))
pda_states = temp
for (state, stack) in pda_states:
assert (len(stack) > 0)
stack_top = stack[-1]
required_pops = len(stack) - 1
# Add transition (to transitions) from $state to END by popping $stack_height
transitions[(state, '1', stack_top)].append(('END_' + str(required_pops), stack_top))
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
# Create a PDA to accept all 3-flimsy binary numbers
def create_2_flimsy_ternary_PDA():
states = {'-0', '-1', '+1', '+0', 'END_0', 'END_1'}
alphabet = {'0', '1', '2', ''}
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {
('-0', '0', 'Z'): [('-0', 'Z')],
('-0', '0', 'X'): [('-0', 'X')],
('-0', '1', 'Z'): [('-0', 'XZ')],
('-0', '1', 'X'): [('-0', 'XX')],
('-0', '2', 'Z'): [('+1', 'Z')],
('-0', '2', 'X'): [('-1', '')],
('-1', '0', 'Z'): [('-0', 'XZ')],
('-1', '0', 'X'): [('-0', 'XX')],
('-1', '1', 'Z'): [('+1', 'Z')],
('-1', '1', 'X'): [('-1', '')],
('-1', '2', 'Z'): [('-1', 'Z')],
('-1', '2', 'X'): [('-1', 'X')],
('+0', '0', 'Z'): [('+0', 'Z')],
('+0', '0', 'X'): [('+0', 'X')],
('+0', '1', 'Z'): [('-0', 'Z')],
('+0', '1', 'X'): [('+0', ''), ('END_1', 'X')],
('+0', '2', 'Z'): [('+1', 'XZ'), ('END_0', 'Z')],
('+0', '2', 'X'): [('+1', 'XX'), ('END_0', 'X')],
('+1', '0', 'Z'): [('-0', 'Z')],
('+1', '0', 'X'): [('+0', '')],
('+1', '1', 'Z'): [('+1', 'XZ'), ('END_0', 'Z')],
('+1', '1', 'X'): [('+1', 'XX'), ('END_0', 'X')],
('+1', '2', 'Z'): [('+1', 'Z')],
('+1', '2', 'X'): [('+1', 'X'), ('END_1', 'X')],
('END_1', '', 'X'): [('END_0', '')],
('END_0', '', 'Z'): [('END_0', '')],
('END_0', '', 'X'): [('END_0', '')]
}
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
def _char_to_int(c): # Get integer from generalized ASCII number
return ord(c) - ord('0')
def _int_to_char(i): # Get ASCII character for given number
return chr(ord('0')+i)
def _create_flimsy_transitions(states: set, transitions: dict, stack_change: int, old_carry: str, new_carry: str, read_char: str):
if stack_change == 0:
transitions[('-' + old_carry, read_char, 'Z')] = [('-' + new_carry, 'Z')]
transitions[('-' + old_carry, read_char, 'X')] = [('-' + new_carry, 'X')]
transitions[('+' + old_carry, read_char, 'Z')] = [('+' + new_carry, 'Z')]
transitions[('+' + old_carry, read_char, 'X')] = [('+' + new_carry, 'X')]
elif stack_change == 1:
transitions[('+' + old_carry, read_char, 'Z')] = [('+' + new_carry, 'XZ')]
transitions[('+' + old_carry, read_char, 'X')] = [('+' + new_carry, 'XX')]
transitions[('-' + old_carry, read_char, 'Z')] = [('+' + new_carry, 'Z')]
transitions[('-' + old_carry, read_char, 'X')] = [('-' + new_carry, EMPTY_STRING)]
elif stack_change == -1:
transitions[('-' + old_carry, read_char, 'Z')] = [('-' + new_carry, 'XZ')]
transitions[('-' + old_carry, read_char, 'X')] = [('-' + new_carry, 'XX')]
transitions[('+' + old_carry, read_char, 'Z')] = [('-' + new_carry, 'Z')]
transitions[('+' + old_carry, read_char, 'X')] = [('+' + new_carry, EMPTY_STRING)]
elif stack_change > 1:
current_state_plus = '+'+old_carry
current_state_minus = '-'+old_carry
while stack_change > 1:
stack_change -= 1
intermediate_state_plus = 'push_'+str(stack_change)+'_to_+'+new_carry
intermediate_state_minus = 'pop_'+str(stack_change)+'_to_-'+new_carry
transitions[(current_state_plus, read_char, 'Z')] = [(intermediate_state_plus, 'XZ')]
transitions[(current_state_plus, read_char, 'X')] = [(intermediate_state_plus, 'XX')]
transitions[(current_state_minus, read_char, 'Z')] = [(intermediate_state_plus, 'Z')]
transitions[(current_state_minus, read_char, 'X')] = [(intermediate_state_minus, EMPTY_STRING)]
if intermediate_state_plus in states and intermediate_state_minus in states:
return
states.add(intermediate_state_plus)
states.add(intermediate_state_minus)
current_state_plus = intermediate_state_plus
current_state_minus = intermediate_state_minus
read_char = EMPTY_STRING
final_state_plus = '+'+new_carry
final_state_minus = '-'+new_carry
transitions[(current_state_plus, read_char, 'Z')] = [(final_state_plus, 'XZ')]
transitions[(current_state_plus, read_char, 'X')] = [(final_state_plus, 'XX')]
transitions[(current_state_minus, read_char, 'Z')] = [(final_state_plus, 'Z')]
transitions[(current_state_minus, read_char, 'X')] = [(final_state_minus, EMPTY_STRING)]
elif stack_change < -1:
current_state_plus = '+' + old_carry
current_state_minus = '-' + old_carry
while stack_change < -1:
stack_change += 1
intermediate_state_plus = 'pop_' + str(-stack_change) + '_to_+' + new_carry
intermediate_state_minus = 'push_' + str(-stack_change) + '_to_-' + new_carry
transitions[(current_state_minus, read_char, 'Z')] = [(intermediate_state_minus, 'XZ')]
transitions[(current_state_minus, read_char, 'X')] = [(intermediate_state_minus, 'XX')]
transitions[(current_state_plus, read_char, 'Z')] = [(intermediate_state_minus, 'Z')]
transitions[(current_state_plus, read_char, 'X')] = [(intermediate_state_plus, EMPTY_STRING)]
if intermediate_state_plus in states and intermediate_state_minus in states:
return
states.add(intermediate_state_plus)
states.add(intermediate_state_minus)
current_state_plus = intermediate_state_plus
current_state_minus = intermediate_state_minus
read_char = EMPTY_STRING
final_state_plus = '+'+new_carry
final_state_minus = '-'+new_carry
transitions[(current_state_minus, read_char, 'Z')] = [(final_state_minus, 'XZ')]
transitions[(current_state_minus, read_char, 'X')] = [(final_state_minus, 'XX')]
transitions[(current_state_plus, read_char, 'Z')] = [(final_state_minus, 'Z')]
transitions[(current_state_plus, read_char, 'X')] = [(final_state_plus, EMPTY_STRING)]
# Create a PDA to accept all k-flimsy binary numbers
def create_base_b_k_flimsy_PDA(b, k):
assert (type(k) == int) and (type(b) == int) and (k >= 1) and (b > 1)
while k % b == 0:
k //= b
states = {'END'}
alphabet = {EMPTY_STRING}
for i in range(b):
alphabet.add(_int_to_char(i))
stack_alphabet = {'Z', 'X'}
start_state = '-0'
start_stack = 'Z'
transitions = {('END', EMPTY_STRING, 'Z'): [('END', EMPTY_STRING)],
('END', EMPTY_STRING, 'X'): [('END', EMPTY_STRING)]}
# Add END states, to pop at least/most i X's off the stack before reaching END
# Transitions from END_{i+1} to END_{i} that read nothing but pop an X
states.add('pop_at_most_0_to_END')
transitions[('pop_at_most_0_to_END', EMPTY_STRING, 'Z')] = [('END', EMPTY_STRING)]
for i in range(int(2*(b-1)*math.log(k+1, b) + 0.01)+1): # TODO: confirm this
new_state = 'pop_at_least_' + str(i + 1) + '_to_END'
one_less = 'END' if i == 0 else ('pop_at_least_' + str(i) + '_to_END')
states.add(new_state)
transitions[(new_state, EMPTY_STRING, 'X')] = [(one_less, EMPTY_STRING)]
new_state = 'pop_at_most_' + str(i + 1) + '_to_END'
one_less = 'pop_at_most_' + str(i) + '_to_END'
states.add(new_state)
transitions[(new_state, EMPTY_STRING, 'Z')] = [('END', EMPTY_STRING)]
transitions[(new_state, EMPTY_STRING, 'X')] = [(one_less, EMPTY_STRING)]
# Add main states (+/- carry)
# Transitions between those states based on reading non-final input chars
for carry in range(k):
s = _int_to_char(carry)
states.add('-' + s)
states.add('+' + s)
for si in alphabet:
if si != EMPTY_STRING:
i = _char_to_int(si)
added = i * k + carry
new_kn_digit = added % b
new_carry = _int_to_char(added // b)
stack_change = i - new_kn_digit # if positive, push on + state and pop on - state; else vice versa
_create_flimsy_transitions(states, transitions, stack_change, s, new_carry, si)
# nonzero-transitions that pop nothing from final (sign, carry) states to END_i state
for carry in range(k):
for read_char in alphabet:
if read_char != EMPTY_STRING and read_char != '0':
read_digit = _char_to_int(read_char)
plus_state = '+' + _int_to_char(carry)
min_required_pops = s_b(k*read_digit + carry, b) - read_digit
if min_required_pops > 0:
transitions[(plus_state, read_char, 'X')].append(('pop_at_least_' + str(min_required_pops) + '_to_END', 'X'))
if not ('pop_at_least_' + str(min_required_pops) + '_to_END') in states:
print("MISSING POP AT LEAST", min_required_pops, "state")
assert False
else:
transitions[(plus_state, read_char, 'Z')].append(('END', 'Z'))
transitions[(plus_state, read_char, 'X')].append(('END', 'X'))
minus_state = '-' + _int_to_char(carry)
max_required_pops = read_digit - s_b(k*read_digit + carry, b) - 1
if max_required_pops >= 0:
transitions[(minus_state, read_char, 'Z')].append(('END', 'Z'))
for i in range(1, max_required_pops + 1):
transitions[(minus_state, read_char, 'X')].append(('pop_at_most_' + str(i) + '_to_END', 'X'))
if not ('pop_at_most_' + str(i) + '_to_END') in states:
print("MISSING POP AT MOST", min_required_pops, "state")
assert False
return PDA(states, alphabet, stack_alphabet, start_state, start_stack, transitions)
| [
"PDA.PDA",
"math.log2",
"math.log"
] | [((1425, 1501), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (1428, 1501), False, 'from PDA import PDA\n'), ((2196, 2272), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (2199, 2272), False, 'from PDA import PDA\n'), ((3018, 3094), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (3021, 3094), False, 'from PDA import PDA\n'), ((3531, 3607), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (3534, 3607), False, 'from PDA import PDA\n'), ((4088, 4164), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (4091, 4164), False, 'from PDA import PDA\n'), ((4534, 4610), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (4537, 4610), False, 'from PDA import PDA\n'), ((5364, 5440), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (5367, 5440), False, 'from PDA import PDA\n'), ((6842, 6918), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (6845, 6918), False, 'from PDA import PDA\n'), ((8353, 8429), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (8356, 8429), False, 'from PDA import PDA\n'), ((9690, 9766), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (9693, 9766), False, 'from PDA import PDA\n'), ((11843, 11919), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (11846, 11919), False, 'from PDA import PDA\n'), ((14606, 14682), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (14609, 14682), False, 'from PDA import PDA\n'), ((18494, 18570), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (18497, 18570), False, 'from PDA import PDA\n'), ((20084, 20160), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (20087, 20160), False, 'from PDA import PDA\n'), ((28447, 28523), 'PDA.PDA', 'PDA', (['states', 'alphabet', 'stack_alphabet', 'start_state', 'start_stack', 'transitions'], {}), '(states, alphabet, stack_alphabet, start_state, start_stack, transitions)\n', (28450, 28523), False, 'from PDA import PDA\n'), ((13989, 14001), 'math.log2', 'math.log2', (['k'], {}), '(k)\n', (13998, 14001), False, 'import math\n'), ((16674, 16686), 'math.log2', 'math.log2', (['k'], {}), '(k)\n', (16683, 16686), False, 'import math\n'), ((17014, 17026), 'math.log2', 'math.log2', (['k'], {}), '(k)\n', (17023, 17026), False, 'import math\n'), ((25550, 25568), 'math.log', 'math.log', (['(k + 1)', 'b'], {}), '(k + 1, b)\n', (25558, 25568), False, 'import math\n')] |
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class CustomUser(AbstractUser):
date_of_birth = models.DateField(blank=True, null=True) | [
"django.db.models.DateField"
] | [((161, 200), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (177, 200), False, 'from django.db import models\n')] |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the accounts end-point.
Test-Suite to ensure that the /accounts endpoint is working as expected.
"""
import json
from datetime import timedelta
from pay_api.models import BcolPaymentAccount
from pay_api.models.payment import Payment
from pay_api.models.payment_account import PaymentAccount
from pay_api.utils.enums import StatementFrequency
from pay_api.utils.util import current_local_time, get_first_and_last_dates_of_month, get_week_start_and_end_date
from tests.utilities.base_test import (
get_claims, token_header, get_payment_request)
def test_get_default_statement_settings_weekly(session, client, jwt, app):
"""Assert that the default statement setting is weekly."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/payment-requests', data=json.dumps(get_payment_request(business_identifier='CP0002000')),
headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
bcol_account: BcolPaymentAccount = BcolPaymentAccount.find_by_id(payment.invoices[0].bcol_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(bcol_account.account_id)
rv = client.get(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings',
headers=headers)
assert rv.status_code == 200
assert rv.json.get('frequency') == StatementFrequency.WEEKLY.value
def test_post_default_statement_settings_daily(session, client, jwt, app):
"""Assert that the post endpoint works."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/payment-requests', data=json.dumps(get_payment_request(business_identifier='CP0002000')),
headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
bcol_account: BcolPaymentAccount = BcolPaymentAccount.find_by_id(payment.invoices[0].bcol_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(bcol_account.account_id)
rv = client.get(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings', data=json.dumps({}),
headers=headers)
assert rv.status_code == 200
assert rv.json.get('frequency') == StatementFrequency.WEEKLY.value
# Set the frequency to Daily and assert
daily_frequency = {'frequency': 'DAILY'}
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings',
data=json.dumps(daily_frequency),
headers=headers)
assert rv.json.get('frequency') == StatementFrequency.DAILY.value
end_date = get_week_start_and_end_date()[1]
assert rv.json.get('fromDate') == (end_date + timedelta(days=1)).strftime('%Y-%m-%d')
# Set the frequency to Monthly and assert
daily_frequency = {'frequency': 'MONTHLY'}
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings',
data=json.dumps(daily_frequency),
headers=headers)
end_date = get_first_and_last_dates_of_month(current_local_time().month, current_local_time().year)[1]
assert rv.json.get('frequency') == StatementFrequency.MONTHLY.value
assert rv.json.get('fromDate') == (end_date + timedelta(days=1)).strftime('%Y-%m-%d')
# Get the latest frequency
rv = client.get(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings', data=json.dumps({}),
headers=headers)
assert rv.status_code == 200
assert rv.json.get('frequency') == StatementFrequency.MONTHLY.value
| [
"tests.utilities.base_test.get_payment_request",
"json.dumps",
"pay_api.utils.util.current_local_time",
"tests.utilities.base_test.get_claims",
"pay_api.utils.util.get_week_start_and_end_date",
"datetime.timedelta",
"pay_api.models.BcolPaymentAccount.find_by_id",
"pay_api.models.payment_account.Paymen... | [((1706, 1772), 'pay_api.models.BcolPaymentAccount.find_by_id', 'BcolPaymentAccount.find_by_id', (['payment.invoices[0].bcol_account_id'], {}), '(payment.invoices[0].bcol_account_id)\n', (1735, 1772), False, 'from pay_api.models import BcolPaymentAccount\n'), ((1807, 1857), 'pay_api.models.payment_account.PaymentAccount.find_by_id', 'PaymentAccount.find_by_id', (['bcol_account.account_id'], {}), '(bcol_account.account_id)\n', (1832, 1857), False, 'from pay_api.models.payment_account import PaymentAccount\n'), ((2616, 2682), 'pay_api.models.BcolPaymentAccount.find_by_id', 'BcolPaymentAccount.find_by_id', (['payment.invoices[0].bcol_account_id'], {}), '(payment.invoices[0].bcol_account_id)\n', (2645, 2682), False, 'from pay_api.models import BcolPaymentAccount\n'), ((2717, 2767), 'pay_api.models.payment_account.PaymentAccount.find_by_id', 'PaymentAccount.find_by_id', (['bcol_account.account_id'], {}), '(bcol_account.account_id)\n', (2742, 2767), False, 'from pay_api.models.payment_account import PaymentAccount\n'), ((1332, 1344), 'tests.utilities.base_test.get_claims', 'get_claims', ([], {}), '()\n', (1342, 1344), False, 'from tests.utilities.base_test import get_claims, token_header, get_payment_request\n'), ((2241, 2253), 'tests.utilities.base_test.get_claims', 'get_claims', ([], {}), '()\n', (2251, 2253), False, 'from tests.utilities.base_test import get_claims, token_header, get_payment_request\n'), ((3382, 3411), 'pay_api.utils.util.get_week_start_and_end_date', 'get_week_start_and_end_date', ([], {}), '()\n', (3409, 3411), False, 'from pay_api.utils.util import current_local_time, get_first_and_last_dates_of_month, get_week_start_and_end_date\n'), ((2865, 2879), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (2875, 2879), False, 'import json\n'), ((3230, 3257), 'json.dumps', 'json.dumps', (['daily_frequency'], {}), '(daily_frequency)\n', (3240, 3257), False, 'import json\n'), ((3717, 3744), 'json.dumps', 'json.dumps', (['daily_frequency'], {}), '(daily_frequency)\n', (3727, 3744), False, 'import json\n'), ((4181, 4195), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (4191, 4195), False, 'import json\n'), ((1512, 1564), 'tests.utilities.base_test.get_payment_request', 'get_payment_request', ([], {'business_identifier': '"""CP0002000"""'}), "(business_identifier='CP0002000')\n", (1531, 1564), False, 'from tests.utilities.base_test import get_claims, token_header, get_payment_request\n'), ((2422, 2474), 'tests.utilities.base_test.get_payment_request', 'get_payment_request', ([], {'business_identifier': '"""CP0002000"""'}), "(business_identifier='CP0002000')\n", (2441, 2474), False, 'from tests.utilities.base_test import get_claims, token_header, get_payment_request\n'), ((3833, 3853), 'pay_api.utils.util.current_local_time', 'current_local_time', ([], {}), '()\n', (3851, 3853), False, 'from pay_api.utils.util import current_local_time, get_first_and_last_dates_of_month, get_week_start_and_end_date\n'), ((3861, 3881), 'pay_api.utils.util.current_local_time', 'current_local_time', ([], {}), '()\n', (3879, 3881), False, 'from pay_api.utils.util import current_local_time, get_first_and_last_dates_of_month, get_week_start_and_end_date\n'), ((3465, 3482), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3474, 3482), False, 'from datetime import timedelta\n'), ((4013, 4030), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4022, 4030), False, 'from datetime import timedelta\n')] |
# -*- coding: utf-8 -*-
import os
import pandas as pd
from progressbar import Bar, ETA, FileTransferSpeed, ProgressBar, Percentage, RotatingMarker
from six.moves.urllib.request import urlretrieve
def load_datasets(path=os.path.join(os.path.dirname(__file__), 'datasets.csv')):
datasets = pd.read_csv(path)
return datasets
def download(number=-1, name="", save_dir='./'):
"""Download pre-trained word vector
:param number: integer, default ``None``
:param save_dir: str, default './'
:return: file path for downloaded file
"""
df = load_datasets()
if number > -1:
row = df.iloc[[number]]
elif name:
row = df.loc[df["Name"] == name]
url = ''.join(row.URL)
if not url:
print('The word vector you specified was not found. Please specify correct name.')
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets)
def dlProgress(count, blockSize, totalSize):
if pbar.max_value is None:
pbar.max_value = totalSize
pbar.start()
pbar.update(min(count * blockSize, totalSize))
file_name = url.split('/')[-1]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, file_name)
path, _ = urlretrieve(url, save_path, reporthook=dlProgress)
pbar.finish()
return path
def search(lang=''):
"""Search pre-trained word vectors by their language
:param lang: str, default ''
:return: None
print search result as pandas DataFrame
"""
df = load_datasets()
if lang == '':
print(df[['Name', 'Dimension', 'Corpus', 'VocabularySize', 'Method', 'Language', 'Author']])
else:
rows = df[df.Language==lang]
print(rows[['Name', 'Dimension', 'Corpus', 'VocabularySize', 'Method', 'Language', 'Author']])
| [
"os.path.exists",
"pandas.read_csv",
"os.makedirs",
"os.path.join",
"progressbar.RotatingMarker",
"os.path.dirname",
"progressbar.Percentage",
"six.moves.urllib.request.urlretrieve",
"progressbar.ETA",
"progressbar.FileTransferSpeed",
"progressbar.ProgressBar"
] | [((295, 312), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (306, 312), True, 'import pandas as pd\n'), ((952, 980), 'progressbar.ProgressBar', 'ProgressBar', ([], {'widgets': 'widgets'}), '(widgets=widgets)\n', (963, 980), False, 'from progressbar import Bar, ETA, FileTransferSpeed, ProgressBar, Percentage, RotatingMarker\n'), ((1305, 1338), 'os.path.join', 'os.path.join', (['save_dir', 'file_name'], {}), '(save_dir, file_name)\n', (1317, 1338), False, 'import os\n'), ((1353, 1403), 'six.moves.urllib.request.urlretrieve', 'urlretrieve', (['url', 'save_path'], {'reporthook': 'dlProgress'}), '(url, save_path, reporthook=dlProgress)\n', (1364, 1403), False, 'from six.moves.urllib.request import urlretrieve\n'), ((235, 260), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (250, 260), False, 'import os\n'), ((854, 866), 'progressbar.Percentage', 'Percentage', ([], {}), '()\n', (864, 866), False, 'from progressbar import Bar, ETA, FileTransferSpeed, ProgressBar, Percentage, RotatingMarker\n'), ((908, 913), 'progressbar.ETA', 'ETA', ([], {}), '()\n', (911, 913), False, 'from progressbar import Bar, ETA, FileTransferSpeed, ProgressBar, Percentage, RotatingMarker\n'), ((920, 939), 'progressbar.FileTransferSpeed', 'FileTransferSpeed', ([], {}), '()\n', (937, 939), False, 'from progressbar import Bar, ETA, FileTransferSpeed, ProgressBar, Percentage, RotatingMarker\n'), ((1233, 1257), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1247, 1257), False, 'import os\n'), ((1267, 1288), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1278, 1288), False, 'import os\n'), ((884, 900), 'progressbar.RotatingMarker', 'RotatingMarker', ([], {}), '()\n', (898, 900), False, 'from progressbar import Bar, ETA, FileTransferSpeed, ProgressBar, Percentage, RotatingMarker\n')] |
import requests
from flask import (
Blueprint,
session,
current_app,
abort,
jsonify,
make_response,
redirect,
request,
url_for,
render_template,
g,
)
import urllib.parse as urlparse
bp = Blueprint("auth", __name__)
from app.helpers import req_helper
from app.helpers import graph_api_helper
from app.helpers import session_helper
from app.helpers import oauth_helper
from app.helpers import google_api_helper
from app.models.util import user as user_util
from app.models.util import token as token_util
@bp.route("/login-notice")
def login_notice():
return render_template(
"card.html",
title="Login Notice!",
cards=[
{
"title": "You must be logged in to do this!",
"text": [
"Please Log In and try again.",
"By logging in you agree to have cookies stored in your computer and for us to store basic personal information.",
],
"icon": '<i class="fas fa-sign-in-alt"></i>',
"link": {"text": "Click Here to Log In", "href": url_for("auth.login")},
}
],
)
@bp.route("/login")
@session_helper.load_user_if_logged_in
def login():
if g.user:
return redirect("/")
else:
return render_template("login.html", title="Login", script="login.js")
@bp.route("/logout", methods=["GET", "POST"])
@session_helper.enforce_validate_token
def logout():
token = session_helper.retirieve_token()
session_helper.destroy_session()
token_util.destroy_token(token)
if request.method == "GET":
return redirect("/")
elif request.method == "POST":
return jsonify(error=0, message="You logged out!")
@bp.route("/msft/callback")
def msft_callback():
code = request.args.get("code")
error = request.args.get("error")
if error:
# TODO: render something here?
return "Login Error"
oauth_client = oauth_helper.get_oauth_client_msft()
if not code:
# Make a Msft oauth client and redirect to auth url
return redirect(oauth_client.get_authorization_url())
else:
try:
access_token = oauth_client.request_token(code=code)
except:
abort(make_response(jsonify(message="Invalid code!"), 401))
# Retrieve user data from graph API
user_data = graph_api_helper.get_user_data(access_token)
# TODO: revoke token
# Makes or gets user with received data
user = user_util.get_or_create_user(
name=user_data["name"], email=user_data["email"]
)
# Create session tied to this user
session_helper.make_session(user=user)
return redirect("/")
@bp.route("/google/callback")
def google_callback():
code = request.args.get("code")
error = request.args.get("error")
if error:
# TODO: render something here?
return "Login Error"
oauth_client = oauth_helper.get_oauth_client_google()
if not code:
# redirect to auth url
return redirect(oauth_client.get_authorization_url())
else:
try:
access_token = oauth_client.request_token(code=code)
# Retrieve user data from graph API
except:
abort(make_response(jsonify(message="Invalid code!"), 401))
user_data = google_api_helper.get_user_data(access_token)
# TODO: revoke token
# Makes or gets user with received data
user = user_util.get_or_create_user(
name=user_data["name"], email=user_data["email"]
)
# Create session tied to this user
session_helper.make_session(user=user)
return redirect("/")
| [
"flask.render_template",
"flask.request.args.get",
"app.helpers.google_api_helper.get_user_data",
"app.helpers.oauth_helper.get_oauth_client_msft",
"app.models.util.token.destroy_token",
"app.models.util.user.get_or_create_user",
"flask.redirect",
"app.helpers.session_helper.retirieve_token",
"app.h... | [((233, 260), 'flask.Blueprint', 'Blueprint', (['"""auth"""', '__name__'], {}), "('auth', __name__)\n", (242, 260), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((1506, 1538), 'app.helpers.session_helper.retirieve_token', 'session_helper.retirieve_token', ([], {}), '()\n', (1536, 1538), False, 'from app.helpers import session_helper\n'), ((1543, 1575), 'app.helpers.session_helper.destroy_session', 'session_helper.destroy_session', ([], {}), '()\n', (1573, 1575), False, 'from app.helpers import session_helper\n'), ((1580, 1611), 'app.models.util.token.destroy_token', 'token_util.destroy_token', (['token'], {}), '(token)\n', (1604, 1611), True, 'from app.models.util import token as token_util\n'), ((1829, 1853), 'flask.request.args.get', 'request.args.get', (['"""code"""'], {}), "('code')\n", (1845, 1853), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((1866, 1891), 'flask.request.args.get', 'request.args.get', (['"""error"""'], {}), "('error')\n", (1882, 1891), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((1993, 2029), 'app.helpers.oauth_helper.get_oauth_client_msft', 'oauth_helper.get_oauth_client_msft', ([], {}), '()\n', (2027, 2029), False, 'from app.helpers import oauth_helper\n'), ((2835, 2859), 'flask.request.args.get', 'request.args.get', (['"""code"""'], {}), "('code')\n", (2851, 2859), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((2872, 2897), 'flask.request.args.get', 'request.args.get', (['"""error"""'], {}), "('error')\n", (2888, 2897), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((2999, 3037), 'app.helpers.oauth_helper.get_oauth_client_google', 'oauth_helper.get_oauth_client_google', ([], {}), '()\n', (3035, 3037), False, 'from app.helpers import oauth_helper\n'), ((1290, 1303), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (1298, 1303), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((1329, 1392), 'flask.render_template', 'render_template', (['"""login.html"""'], {'title': '"""Login"""', 'script': '"""login.js"""'}), "('login.html', title='Login', script='login.js')\n", (1344, 1392), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((1659, 1672), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (1667, 1672), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((2410, 2454), 'app.helpers.graph_api_helper.get_user_data', 'graph_api_helper.get_user_data', (['access_token'], {}), '(access_token)\n', (2440, 2454), False, 'from app.helpers import graph_api_helper\n'), ((2548, 2626), 'app.models.util.user.get_or_create_user', 'user_util.get_or_create_user', ([], {'name': "user_data['name']", 'email': "user_data['email']"}), "(name=user_data['name'], email=user_data['email'])\n", (2576, 2626), True, 'from app.models.util import user as user_util\n'), ((2701, 2739), 'app.helpers.session_helper.make_session', 'session_helper.make_session', ([], {'user': 'user'}), '(user=user)\n', (2728, 2739), False, 'from app.helpers import session_helper\n'), ((2755, 2768), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (2763, 2768), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((3393, 3438), 'app.helpers.google_api_helper.get_user_data', 'google_api_helper.get_user_data', (['access_token'], {}), '(access_token)\n', (3424, 3438), False, 'from app.helpers import google_api_helper\n'), ((3532, 3610), 'app.models.util.user.get_or_create_user', 'user_util.get_or_create_user', ([], {'name': "user_data['name']", 'email': "user_data['email']"}), "(name=user_data['name'], email=user_data['email'])\n", (3560, 3610), True, 'from app.models.util import user as user_util\n'), ((3685, 3723), 'app.helpers.session_helper.make_session', 'session_helper.make_session', ([], {'user': 'user'}), '(user=user)\n', (3712, 3723), False, 'from app.helpers import session_helper\n'), ((3739, 3752), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (3747, 3752), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((1723, 1766), 'flask.jsonify', 'jsonify', ([], {'error': '(0)', 'message': '"""You logged out!"""'}), "(error=0, message='You logged out!')\n", (1730, 1766), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((1131, 1152), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (1138, 1152), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((2305, 2337), 'flask.jsonify', 'jsonify', ([], {'message': '"""Invalid code!"""'}), "(message='Invalid code!')\n", (2312, 2337), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n'), ((3332, 3364), 'flask.jsonify', 'jsonify', ([], {'message': '"""Invalid code!"""'}), "(message='Invalid code!')\n", (3339, 3364), False, 'from flask import Blueprint, session, current_app, abort, jsonify, make_response, redirect, request, url_for, render_template, g\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import math
from common.python.utils import log_utils
from kernel.components.binning.vertfeaturebinning.base_feature_binning import BaseVertFeatureBinning
from kernel.components.featurecalculation.base import calculation_info_sync
from kernel.components.featurecalculation.base.filter_base import BaseFilterMethod
from kernel.components.featurecalculation.base.iv_value_calculate_filter import fit_iv_values
from kernel.components.featurecalculation.param import IVPercentileCalculationParam
from kernel.protobuf.generated import feature_calculation_meta_pb2
from kernel.utils import consts
LOGGER = log_utils.get_logger()
class IVPercentileFilter(BaseFilterMethod, metaclass=abc.ABCMeta):
"""
filter the columns if iv value is less than a percentile threshold
"""
def __init__(self, filter_param):
super().__init__(filter_param)
self.transfer_variable = None
self.binning_obj: BaseVertFeatureBinning = None
self.local_only = False
self.sync_obj = None
def set_transfer_variable(self, transfer_variable):
self.transfer_variable = transfer_variable
self.sync_obj.register_calculation_trans_vars(transfer_variable)
def _parse_filter_param(self, filter_param):
self.percentile_threshold = filter_param.percentile_threshold
self.local_only = filter_param.local_only
def set_binning_obj(self, binning_model):
if binning_model is None:
raise ValueError("To use iv filter, binning module should be called and setup in 'isomatric_model'"
" input for feature calculation.")
self.binning_obj = binning_model
class Promoter(IVPercentileFilter):
def __init__(self, filter_param: IVPercentileCalculationParam):
super().__init__(filter_param)
self.provider_calculation_properties = []
self.sync_obj = calculation_info_sync.Promoter()
def fit(self, data_instances, suffix):
if not self.local_only:
self.provider_calculation_properties = self.sync_obj.sync_calculate_cols(suffix=suffix)
value_threshold = self.get_value_threshold()
self.calculation_properties = fit_iv_values(self.binning_obj.binning_obj,
value_threshold,
self.calculation_properties)
if not self.local_only:
for provider_id, provider_binning_obj in enumerate(self.binning_obj.provider_results):
fit_iv_values(provider_binning_obj,
value_threshold,
self.provider_calculation_properties[provider_id])
self.sync_obj.sync_calculate_results(self.provider_calculation_properties, suffix=suffix)
return self
def get_value_threshold(self):
total_values = []
for col_name, col_results in self.binning_obj.binning_obj.bin_results.all_cols_results.items():
if col_name in self.calculation_properties.calculate_col_names:
total_values.append(col_results.iv)
if not self.local_only:
LOGGER.debug("provider_results: {}, provider_calculation_properties: {}".format(
self.binning_obj.provider_results, self.provider_calculation_properties
))
for provider_id, provider_binning_obj in enumerate(self.binning_obj.provider_results):
provider_calculate_param = self.provider_calculation_properties[provider_id]
for col_name, col_results in provider_binning_obj.bin_results.all_cols_results.items():
if col_name in provider_calculate_param.calculate_col_names:
total_values.append(col_results.iv)
sorted_value = sorted(total_values, reverse=True)
thres_idx = int(math.floor(self.percentile_threshold * len(sorted_value) - consts.FLOAT_ZERO))
return sorted_value[thres_idx]
def get_meta_obj(self, meta_dicts):
result = feature_calculation_meta_pb2.IVPercentileCalculationMeta(
percentile_threshold=self.percentile_threshold,
local_only=self.local_only)
meta_dicts['iv_percentile_meta'] = result
return meta_dicts
class Provider(IVPercentileFilter):
def __init__(self, filter_param: IVPercentileCalculationParam):
super().__init__(filter_param)
self.sync_obj = calculation_info_sync.Provider()
def _parse_filter_param(self, filter_param):
self.local_only = False
def fit(self, data_instances, suffix):
encoded_names = self.binning_obj.bin_inner_param.encode_col_name_list(
self.calculation_properties.calculate_col_names)
self.sync_obj.sync_calculate_cols(encoded_names, suffix=suffix)
self.sync_obj.sync_calculate_results(self.calculation_properties,
decode_func=self.binning_obj.bin_inner_param.decode_col_name,
suffix=suffix)
return self
def get_meta_obj(self, meta_dicts):
result = feature_calculation_meta_pb2.IVPercentileCalculationMeta(local_only=self.local_only)
meta_dicts['iv_percentile_meta'] = result
return meta_dicts
| [
"kernel.components.featurecalculation.base.calculation_info_sync.Promoter",
"kernel.protobuf.generated.feature_calculation_meta_pb2.IVPercentileCalculationMeta",
"common.python.utils.log_utils.get_logger",
"kernel.components.featurecalculation.base.calculation_info_sync.Provider",
"kernel.components.feature... | [((1871, 1893), 'common.python.utils.log_utils.get_logger', 'log_utils.get_logger', ([], {}), '()\n', (1891, 1893), False, 'from common.python.utils import log_utils\n'), ((3151, 3183), 'kernel.components.featurecalculation.base.calculation_info_sync.Promoter', 'calculation_info_sync.Promoter', ([], {}), '()\n', (3181, 3183), False, 'from kernel.components.featurecalculation.base import calculation_info_sync\n'), ((3452, 3546), 'kernel.components.featurecalculation.base.iv_value_calculate_filter.fit_iv_values', 'fit_iv_values', (['self.binning_obj.binning_obj', 'value_threshold', 'self.calculation_properties'], {}), '(self.binning_obj.binning_obj, value_threshold, self.\n calculation_properties)\n', (3465, 3546), False, 'from kernel.components.featurecalculation.base.iv_value_calculate_filter import fit_iv_values\n'), ((5299, 5436), 'kernel.protobuf.generated.feature_calculation_meta_pb2.IVPercentileCalculationMeta', 'feature_calculation_meta_pb2.IVPercentileCalculationMeta', ([], {'percentile_threshold': 'self.percentile_threshold', 'local_only': 'self.local_only'}), '(percentile_threshold\n =self.percentile_threshold, local_only=self.local_only)\n', (5355, 5436), False, 'from kernel.protobuf.generated import feature_calculation_meta_pb2\n'), ((5702, 5734), 'kernel.components.featurecalculation.base.calculation_info_sync.Provider', 'calculation_info_sync.Provider', ([], {}), '()\n', (5732, 5734), False, 'from kernel.components.featurecalculation.base import calculation_info_sync\n'), ((6392, 6481), 'kernel.protobuf.generated.feature_calculation_meta_pb2.IVPercentileCalculationMeta', 'feature_calculation_meta_pb2.IVPercentileCalculationMeta', ([], {'local_only': 'self.local_only'}), '(local_only=self.\n local_only)\n', (6448, 6481), False, 'from kernel.protobuf.generated import feature_calculation_meta_pb2\n'), ((3794, 3902), 'kernel.components.featurecalculation.base.iv_value_calculate_filter.fit_iv_values', 'fit_iv_values', (['provider_binning_obj', 'value_threshold', 'self.provider_calculation_properties[provider_id]'], {}), '(provider_binning_obj, value_threshold, self.\n provider_calculation_properties[provider_id])\n', (3807, 3902), False, 'from kernel.components.featurecalculation.base.iv_value_calculate_filter import fit_iv_values\n')] |
from tkinter import *
import tkinter.messagebox
from random import *
import datetime
now = datetime.datetime.now()
def dayandnight():
if now.strftime("%X") > '16:45:39':
return 'black'
elif now.strftime("%X") < '15:40:00':
return 'white'
def dayand():
if now.strftime("%X") > '16:45:39':
return 'white'
elif now.strftime("%X") < '15:40:00':
return 'black'
def newgui():
src = Tk()
src.title('')
src.geometry('400x360+100+200')
isi = open("times.txt","a")
isi.write("%s %s\n" % (now.strftime("%X"), str(hello.get())))
stms = open("dailytask","a")
time = "%b-%d/%m/%Y"
stms.write("%s %s\n" % (now.strftime(time), str(hello.get())))
Label(src, text = str(hello.get())).pack()
src.mainloop()
def shutdown():
try:
if tkinter.messagebox.askokcancel(title = "info",message = "are you sure to quit"):
testy.destroy()
except Exception as ex:
tkinter.messagebox.showinfo("Error", "cant find %s" % ex)
# create a app
testy = Tk()
# app title name
testy.title('Sticky Notes')
# label name
Label(testy, text = "hello").pack()
hello = Entry(testy)
hello.pack()
# button
Button(testy, text = "noteit", command = newgui).pack()
source = open("dailytask","r")
for each_item in source:
som = Tk()
(a,sdd) = each_item.split(' ',1)
som.title(str(a))
source = str(randint(700,1000))
soe = str(randint(400,600))
som.geometry(str('250x250+' + source + '+'+ soe))
#dayandnight.pack(side = 'left', padx = 10, pady = 10)
soap = Label(som, text = str(sdd),width = "10", height = "6", bg = dayandnight(), fg = dayand())
soap.pack(side = 'left', padx = 10, pady = 10)
x = datetime.datetime.now()
if x.strftime("%X") > '16:30:39':
som['bg']='black'
elif x.strftime("%X") < '15:40:00':
som['bg']='snow'
def kk():
source = str(randint(700,1000))
soe = str(randint(400,600))
som.geometry(str('250x250+' + source + '+'+ soe))
def ioto(file):
popss = open(file)
for kk in popss:
(s,sm) = kk.split(' ',1)
#print(kk)
#print(s)
x = str("%b-%d/%m/%Y")
smtsss = str(now.strftime(x))
#print(s.find(str(smtsss)))
y = s.find(smtsss)
if y == 0:
return ("you are already written some task")
else:
return ("write some task & be active")
#load for particular date
Label(testy, text = ioto("dailytask")).pack()
sou = StringVar()
sou.set(None)
testy.protocol("WM_DELETE_WINDOW",shutdown)
# full app
som.mainloop()
testy.mainloop()
| [
"datetime.datetime.now"
] | [((93, 116), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (114, 116), False, 'import datetime\n'), ((1612, 1635), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1633, 1635), False, 'import datetime\n')] |
import esphome.codegen as cg
from esphome.components import text_sensor
import esphome.config_validation as cv
from esphome.const import CONF_ICON, CONF_ID, ICON_TIMELAPSE
from . import ATORCH_DL24_COMPONENT_SCHEMA, CONF_ATORCH_DL24_ID
DEPENDENCIES = ["atorch_dl24"]
CODEOWNERS = ["@syssi"]
CONF_RUNTIME_FORMATTED = "runtime_formatted"
TEXT_SENSORS = [
CONF_RUNTIME_FORMATTED,
]
CONFIG_SCHEMA = ATORCH_DL24_COMPONENT_SCHEMA.extend(
{
cv.Optional(CONF_RUNTIME_FORMATTED): text_sensor.TEXT_SENSOR_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(text_sensor.TextSensor),
cv.Optional(CONF_ICON, default=ICON_TIMELAPSE): cv.icon,
}
),
}
)
async def to_code(config):
hub = await cg.get_variable(config[CONF_ATORCH_DL24_ID])
for key in TEXT_SENSORS:
if key in config:
conf = config[key]
sens = cg.new_Pvariable(conf[CONF_ID])
await text_sensor.register_text_sensor(sens, conf)
cg.add(getattr(hub, f"set_{key}_text_sensor")(sens))
| [
"esphome.codegen.new_Pvariable",
"esphome.codegen.get_variable",
"esphome.config_validation.Optional",
"esphome.components.text_sensor.register_text_sensor",
"esphome.config_validation.declare_id",
"esphome.config_validation.GenerateID"
] | [((456, 491), 'esphome.config_validation.Optional', 'cv.Optional', (['CONF_RUNTIME_FORMATTED'], {}), '(CONF_RUNTIME_FORMATTED)\n', (467, 491), True, 'import esphome.config_validation as cv\n'), ((769, 813), 'esphome.codegen.get_variable', 'cg.get_variable', (['config[CONF_ATORCH_DL24_ID]'], {}), '(config[CONF_ATORCH_DL24_ID])\n', (784, 813), True, 'import esphome.codegen as cg\n'), ((919, 950), 'esphome.codegen.new_Pvariable', 'cg.new_Pvariable', (['conf[CONF_ID]'], {}), '(conf[CONF_ID])\n', (935, 950), True, 'import esphome.codegen as cg\n'), ((562, 577), 'esphome.config_validation.GenerateID', 'cv.GenerateID', ([], {}), '()\n', (575, 577), True, 'import esphome.config_validation as cv\n'), ((634, 680), 'esphome.config_validation.Optional', 'cv.Optional', (['CONF_ICON'], {'default': 'ICON_TIMELAPSE'}), '(CONF_ICON, default=ICON_TIMELAPSE)\n', (645, 680), True, 'import esphome.config_validation as cv\n'), ((579, 616), 'esphome.config_validation.declare_id', 'cv.declare_id', (['text_sensor.TextSensor'], {}), '(text_sensor.TextSensor)\n', (592, 616), True, 'import esphome.config_validation as cv\n'), ((969, 1013), 'esphome.components.text_sensor.register_text_sensor', 'text_sensor.register_text_sensor', (['sens', 'conf'], {}), '(sens, conf)\n', (1001, 1013), False, 'from esphome.components import text_sensor\n')] |
# -*- coding: utf-8 -*-
"""
This module contains a method for determining the highest concentration recorded
by passed dataframes within the testing period (including sensor and/or
reference data).
================================================================================
@Author:
| <NAME>, NSSC Contractor (ORAU)
| U.S. EPA / ORD / CEMM / AMCD / SFSB
Created:
Wed Sep 8 12:11:43 2021
Last Updated:
Wed Sep 8 12:11:43 2021
"""
import numpy as np
def get_max_conc(param, df_list=None, ref_df=None, bdate=None, edate=None):
"""Determine maximum concentration measured across passed dataframes.
If both sensor dataframes are passed to ``df_list`` and a reference
dataframe is passed to ``ref_df``, the maximum will be computed across
both sensor and reference concentrations.
Args:
param (str): The name of the evaluation parameter.
df_list (list of pandas dataframes, optional): A list of sensor
dataframes. Defaults to None.
ref_df (pandas dataframe, optional): Reference dataframe. Defaults to
None. If dataframe passed, will be considered in calculation of
maximum concentration.
bdate (str, optional): The starting timestamp to begin search. Defaults
to None, will use the earliest timestamp recorded in datasets.
edate (str, optional): The ending timestamp to end search. Defaults
to None, will use the latest timestamp recorded in datasets.
Returns:
max_conc (float):
The maximum concentration indicated by the dataframes passed to the
function for the specified parameter.
Raises:
TypeError: If `df_list` and `ref_df` are both ``None`` (i.e., no
dataframes passed to function).
"""
if df_list is None and ref_df is None:
raise TypeError('Get_Max() missing required dataframe objects: '
'"df_list" and/or "ref_df"')
max_list = [df.loc[bdate:edate, param + '_Value'].max() for df in df_list]
if ref_df is not None:
ref_max = ref_df.loc[bdate:edate, param + '_Value'].max()
max_list.append(ref_max)
# Remove nans
max_list = [i for i in max_list if not np.isnan(i)]
max_conc = max(max_list)
return max_conc
| [
"numpy.isnan"
] | [((2235, 2246), 'numpy.isnan', 'np.isnan', (['i'], {}), '(i)\n', (2243, 2246), True, 'import numpy as np\n')] |
from __future__ import print_function
import os.path
import tempfile
import shutil
from pype9.cmd import convert
import ninemlcatalog
from nineml import read
from lxml import etree
import yaml
if __name__ == '__main__':
from pype9.utils.testing import DummyTestCase as TestCase # @UnusedImport
else:
from unittest import TestCase # @Reimport
class TestConvert(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_convert_version(self):
in_path = './' + os.path.join(os.path.relpath(ninemlcatalog.root),
'neuron', 'Izhikevich.xml')
out_path = os.path.join(self.tmpdir, 'Izhikevich.xml')
args = '--nineml_version 2 {} {}'.format(in_path, out_path)
convert.run(args.split())
# Check the document has been written in version 2 format
with open(out_path) as f:
xml = etree.parse(f)
root = xml.getroot()
self.assertEqual(root.tag, '{http://nineml.net/9ML/2.0}NineML')
# Check the converted document is equivalent
in_doc = read(in_path)
out_doc = read(out_path)
in_doc._url = None
out_doc._url = None
self.assertEqual(in_doc, out_doc)
def test_convert_format(self):
in_path = './' + os.path.join(os.path.relpath(ninemlcatalog.root),
'neuron', 'Izhikevich.xml')
out_path = os.path.join(self.tmpdir, 'Izhikevich.yml')
print(out_path)
args = '{} {}'.format(in_path, out_path)
convert.run(args.split())
# Check the output file is yaml
with open(out_path) as f:
contents = yaml.load(f)
self.assertEqual(list(contents.keys()), [b'NineML'])
# Check the converted document is equivalent
in_doc = read(in_path)
out_doc = read(out_path)
in_doc._url = None
out_doc._url = None
self.assertEqual(in_doc, out_doc)
| [
"nineml.read",
"lxml.etree.parse",
"yaml.load",
"tempfile.mkdtemp",
"shutil.rmtree"
] | [((427, 445), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (443, 445), False, 'import tempfile\n'), ((479, 505), 'shutil.rmtree', 'shutil.rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (492, 505), False, 'import shutil\n'), ((1157, 1170), 'nineml.read', 'read', (['in_path'], {}), '(in_path)\n', (1161, 1170), False, 'from nineml import read\n'), ((1189, 1203), 'nineml.read', 'read', (['out_path'], {}), '(out_path)\n', (1193, 1203), False, 'from nineml import read\n'), ((1889, 1902), 'nineml.read', 'read', (['in_path'], {}), '(in_path)\n', (1893, 1902), False, 'from nineml import read\n'), ((1921, 1935), 'nineml.read', 'read', (['out_path'], {}), '(out_path)\n', (1925, 1935), False, 'from nineml import read\n'), ((967, 981), 'lxml.etree.parse', 'etree.parse', (['f'], {}), '(f)\n', (978, 981), False, 'from lxml import etree\n'), ((1745, 1757), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1754, 1757), False, 'import yaml\n')] |
import torch
import misc.utils as utils
from misc.rewards import init_scorer, get_self_critical_reward
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit = utils.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit = utils.LanguageModelCriterion()
def entropy(self,input, seq):
input = to_contiguous(input)
mask_en = (seq>0).float()
mask_en = to_contiguous(torch.cat([mask_en.new(mask_en.size(0), 1).fill_(1), mask_en[:, :-1]], 1))
output = - input* mask_en
output = torch.sum(output) / torch.sum(mask_en)
return output
def forward(self, fc_feats, att_feats,densecap, labels, masks, att_masks,personality, gts, gt_indices,
sc_flag):
out = {}
if not sc_flag:
loss = self.crit(self.model(fc_feats, att_feats,densecap, labels, att_masks,personality),labels[:,1:], masks[:,1:])
out['loss'] = loss
return out
| [
"misc.utils.LabelSmoothing",
"torch.sum",
"misc.utils.LanguageModelCriterion"
] | [((454, 505), 'misc.utils.LabelSmoothing', 'utils.LabelSmoothing', ([], {'smoothing': 'opt.label_smoothing'}), '(smoothing=opt.label_smoothing)\n', (474, 505), True, 'import misc.utils as utils\n'), ((544, 574), 'misc.utils.LanguageModelCriterion', 'utils.LanguageModelCriterion', ([], {}), '()\n', (572, 574), True, 'import misc.utils as utils\n'), ((843, 860), 'torch.sum', 'torch.sum', (['output'], {}), '(output)\n', (852, 860), False, 'import torch\n'), ((863, 881), 'torch.sum', 'torch.sum', (['mask_en'], {}), '(mask_en)\n', (872, 881), False, 'import torch\n')] |
import random
import sys
class Card:
def __init__(self, suit, value, face, ace):
self.suit = suit
self.value = value
self.face = face
self.ace = ace
class Deck:
def __init__(self):
# Create a List to Hold all of the Cards in our Deck
self.card = []
# Fill self.card List With Instances of the Class "Card" Starting with Spades
self.card.append(Card("Spades", 2, False, False))
self.card.append(Card("Spades", 3, False, False))
self.card.append(Card("Spades", 4, False, False))
self.card.append(Card("Spades", 5, False, False))
self.card.append(Card("Spades", 6, False, False))
self.card.append(Card("Spades", 7, False, False))
self.card.append(Card("Spades", 8, False, False))
self.card.append(Card("Spades", 9, False, False))
self.card.append(Card("Spades", 10, False, False))
# Create Hearts
self.card.append(Card("Hearts", 2, False, False))
self.card.append(Card("Hearts", 3, False, False))
self.card.append(Card("Hearts", 4, False, False))
self.card.append(Card("Hearts", 5, False, False))
self.card.append(Card("Hearts", 6, False, False))
self.card.append(Card("Hearts", 7, False, False))
self.card.append(Card("Hearts", 8, False, False))
self.card.append(Card("Hearts", 9, False, False))
self.card.append(Card("Hearts", 10, False, False))
# Create Clubs
self.card.append(Card("Clubs", 2, False, False))
self.card.append(Card("Clubs", 3, False, False))
self.card.append(Card("Clubs", 4, False, False))
self.card.append(Card("Clubs", 5, False, False))
self.card.append(Card("Clubs", 6, False, False))
self.card.append(Card("Clubs", 7, False, False))
self.card.append(Card("Clubs", 8, False, False))
self.card.append(Card("Clubs", 9, False, False))
self.card.append(Card("Clubs", 10, False, False))
# Create Diamonds
self.card.append(Card("Diamonds", 2, False, False))
self.card.append(Card("Diamonds", 3, False, False))
self.card.append(Card("Diamonds", 4, False, False))
self.card.append(Card("Diamonds", 5, False, False))
self.card.append(Card("Diamonds", 6, False, False))
self.card.append(Card("Diamonds", 7, False, False))
self.card.append(Card("Diamonds", 8, False, False))
self.card.append(Card("Diamonds", 9, False, False))
self.card.append(Card("Diamonds", 10, False, False))
# Create Face Cards and Ace for Suit Spades
self.card.append(Card("Spades", 10, "King", False))
self.card.append(Card("Spades", 10, "Queen", False))
self.card.append(Card("Spades", 10, "Jack", False))
self.card.append(Card("Spades", 11, False, True))
# Create Face Cards and Ace for Suit Hearts
self.card.append(Card("Hearts", 10, "King", False))
self.card.append(Card("Hearts", 10, "Queen", False))
self.card.append(Card("Hearts", 10, "Jack", False))
self.card.append(Card("Hearts", 11, False, True))
# Create Face Cards and Ace for Suit Clubs
self.card.append(Card("Clubs", 10, "King", False))
self.card.append(Card("Clubs", 10, "Queen", False))
self.card.append(Card("Clubs", 10, "Jack", False))
self.card.append(Card("Clubs", 11, False, True))
# Create Face Cards and Ace for Suit Diamonds
self.card.append(Card("Diamonds", 10, "King", False))
self.card.append(Card("Diamonds", 10, "Queen", False))
self.card.append(Card("Diamonds", 10, "Jack", False))
self.card.append(Card("Diamonds", 11, False, True))
def shuffle(self):
self.top_card = 0
for i in range(1000):
firstRandomCardIndex = random.randint(0,51)
secondRandomCardIndex = random.randint(0,51)
placeholder = self.card[firstRandomCardIndex]
self.card[firstRandomCardIndex] = self.card[secondRandomCardIndex]
self.card[secondRandomCardIndex] = placeholder
def deal(self):
return self.card[self.top_card - 1]
self.top_card = self.top_card + 1
class PokerPlayer:
def __init__(self, dealer):
self.dealer = dealer # dealer is a boolean value
self.hand = []
self.number_of_cards_held = 0
player = PokerPlayer(False)
dealer = PokerPlayer(True)
deck = Deck()
deck.shuffle()
player.hand.append(deck.deal())
dealer.hand.append(deck.deal())
player.hand.append(deck.deal())
dealer.hand.append(deck.deal())
print("Dealers First Card: " + dealer.hand[0].suit)
print("Dealers Second Card: " + dealer.hand[1].suit)
print("Players First Card: " + player.hand[0].suit)
print("Players Second Card: " + player.hand[1].suit)
| [
"random.randint"
] | [((3851, 3872), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (3865, 3872), False, 'import random\n'), ((3908, 3929), 'random.randint', 'random.randint', (['(0)', '(51)'], {}), '(0, 51)\n', (3922, 3929), False, 'import random\n')] |
import unittest
import json
from typing import Any
from src.shapeandshare.dicebox.config.dicebox_config import DiceboxConfig
from src.shapeandshare.dicebox.factories.network_factory import NetworkFactory
class DiceboxNetworkTest(unittest.TestCase):
"""
The basic class that inherits unittest.TestCase
"""
TEST_DATA_BASE = "test/fixtures"
local_config_file = "%s/dicebox.config" % TEST_DATA_BASE
local_lonestar_model_file = "%s/dicebox.lonestar.json" % TEST_DATA_BASE
# local_create_fcs = True
# local_disable_data_indexing = True
# ACTIVATION = ["softmax", "elu", "softplus", "softsign", "relu", "tanh", "sigmoid", "hard_sigmoid", "linear"]
# OPTIMIZER = ["rmsprop", "adam", "sgd", "adagrad", "adadelta", "adamax", "nadam"]
def setUp(self):
self.maxDiff = None
# def test_create_random(self):
# nf: NetworkFactory = NetworkFactory(config=DiceboxConfig(config_file=self.local_config_file))
# nf.create_random_network()
#
# # dn: DiceboxNetwork = DiceboxNetwork(dc,
# # create_fsc=True,
# # disable_data_indexing=True)
# # dn.generate_random_network()
#
# # self.assertEqual(dn.__network, {})
# # dn.__network = dn.__network_factory.create_random_network()
# # self.assertIsNotNone(dn.__network)
# # logging.debug(dn.__network)
# # self.assertIsNot(dn.__network, {})
# # logging.debug(dn.__network)
# # dn = None
def test_load_network(self):
dc = DiceboxConfig(config_file=self.local_config_file)
with open(self.local_lonestar_model_file, "r") as json_file:
expected_dicebox_serialized_model = json.load(json_file)
expected_compiled_model: Any = None
with open("%s/lonestar.model.json" % self.TEST_DATA_BASE) as json_file:
expected_compiled_model = json.load(json_file)
local_input_size = 784
local_output_size = 10
local_optimizer = "adamax"
local_network_definition = {
"optimizer": local_optimizer,
"input_shape": [
local_input_size,
],
"output_size": local_output_size,
"layers": [
{"type": "dense", "size": 987, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
{"type": "dense", "size": 89, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
{"type": "dense", "size": 987, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
{"type": "dense", "size": 987, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
{"type": "dense", "size": 987, "activation": "elu"},
{"type": "dropout", "rate": 0.2},
],
}
nf = NetworkFactory(config=dc)
dn = nf.create_network(network_definition=local_network_definition)
# dn.__network_factory.create_network(network_definition=)
# dn.create_lonestar(create_model=local_create_model, weights_filename=local_weights_file)
# returned_model = dn.__model
# self.assertIsNotNone(returned_model)
# generate a sample..
# with open('%s/lonestar.__model.out.json' % self.TEST_DATA_BASE, 'w') as json_file:
# json_file.write(json.dumps(json.loads(returned_model.to_json()), indent=4))
# self.assertEqual(json.loads(returned_model.to_json()), expected_compiled_model)
# dn = None
# def test_compile_model(self):
# expected_compiled_model = None
# with open('%s/__model.json' % self.TEST_DATA_BASE) as json_file:
# expected_compiled_model = json.load(json_file)
# self.assertIsNotNone(expected_compiled_model)
#
# local_input_size = 784
# local_output_size = 10
# local_optimizer = 'adamax'
# local_dicebox_model_definition = {
# 'optimizer': local_optimizer,
# 'input_shape': [local_input_size, ],
# 'output_size': local_output_size,
# 'layers': [
# {
# 'type': 'normal',
# 'size': 987,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# },
# {
# 'type': 'normal',
# 'size': 89,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# },
# {
# 'type': 'normal',
# 'size': 987,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# },
# {
# 'type': 'normal',
# 'size': 987,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# },
# {
# 'type': 'normal',
# 'size': 987,
# 'activation': 'elu'
# },
# {
# 'type': 'dropout',
# 'rate': 0.2
# }
# ]
# }
#
# dn = DiceboxNetwork(create_fcs=self.local_create_fcs,
# disable_data_indexing=self.local_disable_data_indexing,
# config_file=self.local_config_file,
# lonestar_model_file=self.local_lonestar_model_file)
#
# local_network: Network = dn.__network_factory.create_network(local_dicebox_model_definition)
# returned_compiled_model = dn.__network_factory.compile_network(dicebox_network=local_network)
#
# serialized_result = returned_compiled_model.to_json()
#
# # # generate a sample ..
# # with open('%s/__model.out.json' % self.TEST_DATA_BASE, 'w') as json_file:
# # json_file.write(json.dumps(json.loads(serialized_result), indent=4))
#
# self.assertEqual(json.loads(serialized_result), expected_compiled_model)
# dn = None
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(DiceboxNetworkTest())
| [
"src.shapeandshare.dicebox.config.dicebox_config.DiceboxConfig",
"json.load",
"src.shapeandshare.dicebox.factories.network_factory.NetworkFactory",
"unittest.TextTestRunner"
] | [((6522, 6547), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (6545, 6547), False, 'import unittest\n'), ((1573, 1622), 'src.shapeandshare.dicebox.config.dicebox_config.DiceboxConfig', 'DiceboxConfig', ([], {'config_file': 'self.local_config_file'}), '(config_file=self.local_config_file)\n', (1586, 1622), False, 'from src.shapeandshare.dicebox.config.dicebox_config import DiceboxConfig\n'), ((2904, 2929), 'src.shapeandshare.dicebox.factories.network_factory.NetworkFactory', 'NetworkFactory', ([], {'config': 'dc'}), '(config=dc)\n', (2918, 2929), False, 'from src.shapeandshare.dicebox.factories.network_factory import NetworkFactory\n'), ((1741, 1761), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1750, 1761), False, 'import json\n'), ((1925, 1945), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1934, 1945), False, 'import json\n')] |
"""Off-loaded private stuff from `vtec.py`."""
# pylint: disable=too-many-arguments
from datetime import timedelta
import itertools
import pandas as pd
from pyiem.util import LOG
# When a VTEC product has an infinity time 000000T0000Z, we need some value
# for the database to make things logically work. We arb pick 21 days, which
# seems to be enough time to ensure a WFO issues some followup statement.
DEFAULT_EXPIRE_DELTA = timedelta(hours=(21 * 24))
def which_year(txn, prod, segment, vtec):
"""Figure out which table we should work against"""
if vtec.action in ["NEW"]:
# Lets piggyback a check to see if this ETN has been reused?
# Can this realiably be done?
txn.execute(
f"SELECT max(updated) from warnings_{prod.db_year} WHERE wfo = %s "
"and eventid = %s and significance = %s and phenomena = %s",
(vtec.office, vtec.etn, vtec.significance, vtec.phenomena),
)
row = txn.fetchone()
if row["max"] is not None:
if (prod.valid - row["max"]).total_seconds() > (21 * 86400):
prod.warnings.append(
"Possible Duplicated ETN\n"
f" max(updated) is {row['max']}, "
f"prod.valid is {prod.valid}\n year is {prod.db_year}\n"
f" VTEC: {str(vtec)}\n "
f"product_id: {prod.get_product_id()}"
)
return prod.db_year
# Lets query the database to look for any matching entries within
# the past 3, 10, 31 days, to find with the product_issue was,
# which guides the table that the data is stored within
for offset in [3, 10, 31]:
txn.execute(
"SELECT tableoid::regclass as tablename, hvtec_nwsli, "
"min(product_issue at time zone 'UTC'), "
"max(product_issue at time zone 'UTC') from warnings "
"WHERE wfo = %s and eventid = %s and significance = %s and "
"phenomena = %s and ((updated > %s and updated <= %s) "
"or expire > %s) and status not in ('UPG', 'CAN') "
"GROUP by tablename, hvtec_nwsli ORDER by tablename DESC ",
(
vtec.office,
vtec.etn,
vtec.significance,
vtec.phenomena,
prod.valid - timedelta(days=offset),
prod.valid,
prod.valid,
),
)
rows = txn.fetchall()
if not rows:
continue
if len(rows) > 1:
# We likely have a flood warning and can use the HVTEC NWSLI
# to resolve ambiguity
hvtec_nwsli = segment.get_hvtec_nwsli()
if hvtec_nwsli:
for row in rows:
if hvtec_nwsli == row["hvtec_nwsli"]:
return int(row["tablename"].replace("warnings_", ""))
prod.warnings.append(
(
"VTEC %s product: %s returned %s rows when "
"searching for current table"
)
% (str(vtec), prod.get_product_id(), txn.rowcount)
)
row = rows[0]
if row["min"] is not None:
year = row["min"].year
if row["max"].year != year:
LOG.info(
"VTEC Product appears to cross 1 Jan UTC "
"minyear: %s maxyear: %s VTEC: %s productid: %s",
year,
row["max"].year,
str(vtec),
prod.get_product_id(),
)
return int(row["tablename"].replace("warnings_", ""))
# Give up
if not prod.is_correction():
table = f"warnings_{prod.db_year}"
prod.warnings.append(
"Failed to find year of product issuance:\n"
f" VTEC:{str(vtec)}\n PRODUCT: {prod.get_product_id()}\n"
f" defaulting to use year: {prod.db_year}\n"
f" {list_rows(txn, table, vtec)}"
)
return prod.db_year
def _associate_vtec_year(prod, txn):
"""Figure out to which year each VTEC in the product belongs.
Modifies the prod.segment.vtec objects."""
for seg, _ugcs, vtec in prod.suv_iter():
if vtec.year is None:
vtec.year = which_year(txn, prod, seg, vtec)
def _load_database_status(txn, prod):
"""Build a pandas dataframe for what the database knows."""
rows = []
done = []
for _seg, _ugcs, vtec in prod.suv_iter():
if vtec.status == "NEW" or vtec.year is None:
continue
key = f"{vtec.office}.{vtec.phenomena}.{vtec.significance}.{vtec.etn}"
if key in done:
continue
done.append(key)
txn.execute(
"SELECT ugc, status, updated at time zone 'UTC' as utc_updated, "
"expire at time zone 'UTC' as utc_expire "
f"from warnings_{vtec.year} WHERE wfo = %s and "
"phenomena = %s and significance = %s and eventid = %s and "
"status not in ('CAN', 'UPG', 'EXP') and expire >= %s",
(
vtec.office,
vtec.phenomena,
vtec.significance,
vtec.etn,
prod.valid,
),
)
for row in txn.fetchall():
entry = {
"ugc": row[0],
"status": row[1],
"year": vtec.year,
"phenomena": vtec.phenomena,
"significance": vtec.significance,
"etn": vtec.etn,
"updated": row[2],
"expire": row[3],
}
rows.append(entry)
return pd.DataFrame(rows)
def check_dup_ps(segment):
"""Does this TextProductSegment have duplicated VTEC
NWS AWIPS Developer asked that alerts be made when a VTEC segment has a
phenomena and significance that are reused. In practice, this error is
in the case of having the same phenomena.significance overlap in time. The
combination of the same pheom.sig for events happening now and in the
future is OK and common
Returns:
bool
"""
combos = {}
for thisvtec in segment.vtec:
if thisvtec.begints is None or thisvtec.endts is None:
# The logic here is too difficult for now, so we ignore
continue
key = thisvtec.s2()
val = combos.setdefault(key, [])
# we can't use vtec.endts in this situation
endts = (
segment.tp.valid
if thisvtec.status in ["UPG", "CAN"]
else thisvtec.endts
)
val.append([thisvtec.begints, endts])
for key in combos:
if len(combos[key]) == 1:
continue
for one, two in itertools.permutations(combos[key], 2):
# We check for overlap
if one[0] >= two[0] and one[0] < two[1]:
return True
return False
def do_sql_hvtec(txn, segment):
"""Process the HVTEC in this product"""
nwsli = segment.hvtec[0].nwsli.id
# No point in saving these events
if nwsli == "00000":
return
if len(segment.bullets) < 4:
return
stage_text = ""
flood_text = ""
forecast_text = ""
impact_text = ""
for _, bullet in enumerate(segment.bullets):
bsu = bullet.strip().upper()
if bsu.find("FLOOD STAGE") == 0:
flood_text = bullet
if bsu.find("FORECAST") == 0:
forecast_text = bullet
if bsu.find("AT ") == 0 and stage_text == "":
stage_text = bullet
if bsu.startswith("IMPACT..."):
impact_text = bullet.strip()[9:]
txn.execute(
"INSERT into riverpro(nwsli, stage_text, flood_text, forecast_text, "
"impact_text, severity) VALUES (%s,%s,%s,%s,%s,%s)",
(
nwsli,
stage_text,
flood_text,
forecast_text,
impact_text,
segment.hvtec[0].severity,
),
)
def list_rows(txn, table, vtec):
"""Return a simple listing of what exists in the database"""
txn.execute(
(
"SELECT ugc, issue at time zone 'UTC' as ui, status, "
f"updated at time zone 'UTC' as uu from {table} "
"WHERE wfo = %s and phenomena = %s and significance = %s and "
"eventid = %s ORDER by ugc"
),
(vtec.office, vtec.phenomena, vtec.significance, vtec.etn),
)
res = (
f"Entries for VTEC within {table}\n"
" UGC STA ISSUED UPDATED\n"
)
for row in txn.fetchall():
res += f" {row['ugc']} {row['status']} {row['ui']} {row['uu']}\n"
return res
def _debug_warning(prod, txn, warning_table, vtec, segment, ets):
"""Get a more useful warning message for this failure"""
cnt = txn.rowcount
txn.execute(
"SELECT ugc, issue at time zone 'UTC' as utc_issue, "
"expire at time zone 'UTC' as utc_expire, "
"updated at time zone 'UTC' as utc_updated, "
f"status from {warning_table} WHERE wfo = %s and eventid = %s and "
"ugc in %s and significance = %s and phenomena = %s "
"ORDER by ugc ASC, issue ASC",
(
vtec.office,
vtec.etn,
segment.get_ugcs_tuple(),
vtec.significance,
vtec.phenomena,
),
)
debugmsg = "UGC STA ISSUE EXPIRE UPDATED\n"
def myfmt(val):
"""Be more careful"""
default = "%-16s" % ("((NULL))",)
return default if val is None else val.strftime("%Y-%m-%d %H:%M")
for row in txn.fetchall():
debugmsg += (
f"{row['ugc']} {row['status']} {myfmt(row['utc_issue'])} "
f"{myfmt(row['utc_expire'])} {myfmt(row['utc_updated'])}\n"
)
return (
f"Warning: {vtec.s3()} do_sql_vtec {warning_table} {vtec.action} "
f"updated {cnt} row, should {len(segment.ugcs)} rows\n"
f"UGCS: {segment.ugcs}\n"
f"valid: {prod.valid} expire: {ets}\n{debugmsg}"
)
def _resent_match(prod, txn, warning_table, vtec):
"""Check if this is a resent match."""
txn.execute(
f"SELECT max(updated) as maxtime from {warning_table} "
"WHERE eventid = %s and significance = %s and wfo = %s and "
"phenomena = %s",
(vtec.etn, vtec.significance, vtec.office, vtec.phenomena),
)
maxtime = txn.fetchone()["maxtime"]
if maxtime is not None and maxtime == prod.valid:
LOG.info("RESENT Match, skipping SQL for %s!", prod.get_product_id())
return True
return False
def _do_sql_vtec_new(prod, txn, warning_table, segment, vtec):
"""Do the NEW style actions."""
bts = prod.valid if vtec.begints is None else vtec.begints
# If this product has no expiration time, but db needs a value
ets = vtec.endts
if vtec.endts is None:
ets = bts + DEFAULT_EXPIRE_DELTA
fcster = prod.get_signature()
if fcster is not None:
fcster = fcster[:24]
# For each UGC code in this segment, we create a database entry
for ugc in segment.ugcs:
# Check to see if we have entries already for this UGC
# Some previous entries may not be in a terminated state, so
# also check the expiration time
txn.execute(
f"SELECT issue, expire, updated from {warning_table} "
"WHERE ugc = %s and eventid = %s and significance = %s "
"and wfo = %s and phenomena = %s and "
"status not in ('CAN', 'UPG') and expire > %s",
(
str(ugc),
vtec.etn,
vtec.significance,
vtec.office,
vtec.phenomena,
prod.valid,
),
)
if txn.rowcount > 0:
if prod.is_correction():
# We'll delete old entries, gulp
txn.execute(
f"DELETE from {warning_table} WHERE ugc = %s "
"and eventid = %s and significance = %s and "
"wfo = %s and phenomena = %s and "
"status in ('NEW', 'EXB', 'EXA') ",
(
str(ugc),
vtec.etn,
vtec.significance,
vtec.office,
vtec.phenomena,
),
)
if txn.rowcount != 1:
prod.warnings.append(
f"{vtec.s3()} {str(ugc)} duplicated via "
f"product correction, deleted {txn.rowcount} "
"old rows instead of 1"
)
else:
prod.warnings.append(
"Duplicate(s) WWA found, "
f"rowcount: {txn.rowcount} for UGC: {ugc}"
)
txn.execute(
f"INSERT into {warning_table} (issue, expire, updated, "
"wfo, eventid, status, fcster, report, ugc, phenomena, "
"significance, gid, init_expire, product_issue, "
"hvtec_nwsli, hvtec_severity, hvtec_cause, hvtec_record, "
"is_emergency, is_pds) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, "
"get_gid(%s, %s, %s), %s, %s, %s, %s, %s, %s, %s, %s) "
"RETURNING gid",
(
bts,
ets,
prod.valid,
vtec.office,
vtec.etn,
vtec.action,
fcster,
prod.unixtext,
str(ugc),
vtec.phenomena,
vtec.significance,
str(ugc),
prod.valid,
vtec.phenomena == "FW",
ets,
prod.valid,
segment.get_hvtec_nwsli(),
segment.get_hvtec_severity(),
segment.get_hvtec_cause(),
segment.get_hvtec_record(),
segment.is_emergency,
segment.is_pds,
),
)
# For unit tests, these mostly get filtered out
if txn.fetchone().get("gid") is None:
prod.warnings.append(
f"get_gid({str(ugc)}, {prod.valid}, {vtec.phenomena == 'FW'}) "
"was null"
)
def _do_sql_vtec_cor(prod, txn, warning_table, segment, vtec):
"""A Product Correction."""
# For corrections, we only update the SVS and updated
txn.execute(
f"UPDATE {warning_table} SET "
"svs = (CASE WHEN (svs IS NULL) THEN '__' ELSE svs END) "
"|| %s || '__', updated = %s WHERE wfo = %s and "
f"eventid = %s and ugc in %s and significance = %s "
"and phenomena = %s and (expire + '1 hour'::interval) >= %s ",
(
prod.unixtext,
prod.valid,
vtec.office,
vtec.etn,
segment.get_ugcs_tuple(),
vtec.significance,
vtec.phenomena,
prod.valid,
),
)
if txn.rowcount != len(segment.ugcs):
prod.warnings.append(
_debug_warning(prod, txn, warning_table, vtec, segment, vtec.endts)
)
def _do_sql_vtec_can(prod, txn, warning_table, segment, vtec):
"""A Product Correction."""
ets = vtec.endts
# These are terminate actions, so we act accordingly
if vtec.action in ["CAN", "UPG"]:
ets = prod.valid
# If we are extending into infinity, but need a value
if vtec.action == "EXT" and vtec.endts is None:
ets = prod.valid + DEFAULT_EXPIRE_DELTA
# An EXT action could change the issuance time, gasp
issuesql = ""
if vtec.action == "EXT" and vtec.begints is not None:
issuesql = " issue = '%s', " % (vtec.begints,)
txn.execute(
f"UPDATE {warning_table} SET {issuesql} expire = %s, "
"status = %s, updated = %s, "
"svs = (CASE WHEN (svs IS NULL) THEN '__' ELSE svs END) "
"|| %s || '__' WHERE wfo = %s and eventid = %s and ugc in "
f"%s and significance = %s and phenomena = %s "
"and status not in ('CAN', 'UPG') and "
"(expire + '1 hour'::interval) >= %s",
(
ets,
vtec.action,
prod.valid,
prod.unixtext,
vtec.office,
vtec.etn,
segment.get_ugcs_tuple(),
vtec.significance,
vtec.phenomena,
prod.valid,
),
)
if txn.rowcount != len(segment.ugcs):
if not prod.is_correction():
prod.warnings.append(
_debug_warning(prod, txn, warning_table, vtec, segment, ets)
)
def _do_sql_vtec_con(prod, txn, warning_table, segment, vtec):
"""Continue."""
# These are no-ops, just updates
ets = vtec.endts
if vtec.endts is None:
ets = prod.valid + DEFAULT_EXPIRE_DELTA
# Offices have 1 hour to expire something :), actually 30 minutes
txn.execute(
f"UPDATE {warning_table} SET status = %s, updated = %s, "
"svs = (CASE WHEN (svs IS NULL) THEN '__' ELSE svs END) "
"|| %s || '__' , expire = %s, "
"is_emergency = (case when %s then true else is_emergency end), "
"is_pds = (case when %s then true else is_pds end) "
f"WHERE wfo = %s and eventid = %s and ugc in %s "
"and significance = %s and phenomena = %s and "
"status not in ('CAN', 'UPG') and "
"(expire + '1 hour'::interval) >= %s",
(
vtec.action,
prod.valid,
prod.unixtext,
ets,
segment.is_emergency,
segment.is_pds,
vtec.office,
vtec.etn,
segment.get_ugcs_tuple(),
vtec.significance,
vtec.phenomena,
prod.valid,
),
)
if txn.rowcount != len(segment.ugcs):
prod.warnings.append(
_debug_warning(prod, txn, warning_table, vtec, segment, ets)
)
| [
"pandas.DataFrame",
"datetime.timedelta",
"itertools.permutations"
] | [((432, 456), 'datetime.timedelta', 'timedelta', ([], {'hours': '(21 * 24)'}), '(hours=21 * 24)\n', (441, 456), False, 'from datetime import timedelta\n'), ((5712, 5730), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (5724, 5730), True, 'import pandas as pd\n'), ((6793, 6831), 'itertools.permutations', 'itertools.permutations', (['combos[key]', '(2)'], {}), '(combos[key], 2)\n', (6815, 6831), False, 'import itertools\n'), ((2341, 2363), 'datetime.timedelta', 'timedelta', ([], {'days': 'offset'}), '(days=offset)\n', (2350, 2363), False, 'from datetime import timedelta\n')] |
from flask import render_template
from app.web.blue_print import web
# run code in different file
from app.web import littlered_bbc
from app.web import index
@web.app_errorhandler(404)
def not_found(e):
# AOP 思想
return render_template('404.html'), 404
| [
"flask.render_template",
"app.web.blue_print.web.app_errorhandler"
] | [((162, 187), 'app.web.blue_print.web.app_errorhandler', 'web.app_errorhandler', (['(404)'], {}), '(404)\n', (182, 187), False, 'from app.web.blue_print import web\n'), ((230, 257), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (245, 257), False, 'from flask import render_template\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetReplicationPoliciesResult',
'AwaitableGetReplicationPoliciesResult',
'get_replication_policies',
]
@pulumi.output_type
class GetReplicationPoliciesResult:
"""
A collection of values returned by getReplicationPolicies.
"""
def __init__(__self__, bucket=None, filters=None, id=None, namespace=None, replication_policies=None):
if bucket and not isinstance(bucket, str):
raise TypeError("Expected argument 'bucket' to be a str")
pulumi.set(__self__, "bucket", bucket)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if namespace and not isinstance(namespace, str):
raise TypeError("Expected argument 'namespace' to be a str")
pulumi.set(__self__, "namespace", namespace)
if replication_policies and not isinstance(replication_policies, list):
raise TypeError("Expected argument 'replication_policies' to be a list")
pulumi.set(__self__, "replication_policies", replication_policies)
@property
@pulumi.getter
def bucket(self) -> str:
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetReplicationPoliciesFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def namespace(self) -> str:
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="replicationPolicies")
def replication_policies(self) -> Sequence['outputs.GetReplicationPoliciesReplicationPolicyResult']:
"""
The list of replication_policies.
"""
return pulumi.get(self, "replication_policies")
class AwaitableGetReplicationPoliciesResult(GetReplicationPoliciesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReplicationPoliciesResult(
bucket=self.bucket,
filters=self.filters,
id=self.id,
namespace=self.namespace,
replication_policies=self.replication_policies)
def get_replication_policies(bucket: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetReplicationPoliciesFilterArgs']]] = None,
namespace: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationPoliciesResult:
"""
This data source provides the list of Replication Policies in Oracle Cloud Infrastructure Object Storage service.
List the replication policies associated with a bucket.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_replication_policies = oci.objectstorage.get_replication_policies(bucket=var["replication_policy_bucket"],
namespace=var["replication_policy_namespace"])
```
:param str bucket: The name of the bucket. Avoid entering confidential information. Example: `my-new-bucket1`
:param str namespace: The Object Storage namespace used for the request.
"""
__args__ = dict()
__args__['bucket'] = bucket
__args__['filters'] = filters
__args__['namespace'] = namespace
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:objectstorage/getReplicationPolicies:getReplicationPolicies', __args__, opts=opts, typ=GetReplicationPoliciesResult).value
return AwaitableGetReplicationPoliciesResult(
bucket=__ret__.bucket,
filters=__ret__.filters,
id=__ret__.id,
namespace=__ret__.namespace,
replication_policies=__ret__.replication_policies)
| [
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
] | [((2254, 2295), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""replicationPolicies"""'}), "(name='replicationPolicies')\n", (2267, 2295), False, 'import pulumi\n'), ((877, 915), 'pulumi.set', 'pulumi.set', (['__self__', '"""bucket"""', 'bucket'], {}), "(__self__, 'bucket', bucket)\n", (887, 915), False, 'import pulumi\n'), ((1050, 1090), 'pulumi.set', 'pulumi.set', (['__self__', '"""filters"""', 'filters'], {}), "(__self__, 'filters', filters)\n", (1060, 1090), False, 'import pulumi\n'), ((1208, 1238), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (1218, 1238), False, 'import pulumi\n'), ((1377, 1421), 'pulumi.set', 'pulumi.set', (['__self__', '"""namespace"""', 'namespace'], {}), "(__self__, 'namespace', namespace)\n", (1387, 1421), False, 'import pulumi\n'), ((1595, 1661), 'pulumi.set', 'pulumi.set', (['__self__', '"""replication_policies"""', 'replication_policies'], {}), "(__self__, 'replication_policies', replication_policies)\n", (1605, 1661), False, 'import pulumi\n'), ((1740, 1766), 'pulumi.get', 'pulumi.get', (['self', '"""bucket"""'], {}), "(self, 'bucket')\n", (1750, 1766), False, 'import pulumi\n'), ((1907, 1934), 'pulumi.get', 'pulumi.get', (['self', '"""filters"""'], {}), "(self, 'filters')\n", (1917, 1934), False, 'import pulumi\n'), ((2100, 2122), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (2110, 2122), False, 'import pulumi\n'), ((2204, 2233), 'pulumi.get', 'pulumi.get', (['self', '"""namespace"""'], {}), "(self, 'namespace')\n", (2214, 2233), False, 'import pulumi\n'), ((2482, 2522), 'pulumi.get', 'pulumi.get', (['self', '"""replication_policies"""'], {}), "(self, 'replication_policies')\n", (2492, 2522), False, 'import pulumi\n'), ((4113, 4135), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (4133, 4135), False, 'import pulumi\n'), ((4227, 4379), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""oci:objectstorage/getReplicationPolicies:getReplicationPolicies"""', '__args__'], {'opts': 'opts', 'typ': 'GetReplicationPoliciesResult'}), "(\n 'oci:objectstorage/getReplicationPolicies:getReplicationPolicies',\n __args__, opts=opts, typ=GetReplicationPoliciesResult)\n", (4248, 4379), False, 'import pulumi\n')] |
# Copyright 2019-2020 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asreview.models.deprecated import _moved_warning
from asreview.models.query.max import MaxQuery as _MaxQuery
from asreview.models.query.mixed import MixedQuery as _MixedQuery
from asreview.models.query.uncertainty import UncertaintyQuery as _UncertaintyQuery
from asreview.models.query.random import RandomQuery as _RandomQuery
from asreview.models.query.cluster import ClusterQuery as _ClusterQuery
from asreview.models.query.utils import get_query_model as _get_query_model
from asreview.models.query.utils import get_query_class as _get_query_class
from asreview.models.query.utils import list_query_strategies as _list_query_strategies
"""Deprecated, will be removed in version 1.0"""
MaxQuery = _moved_warning(
_MaxQuery, "asreview.models.query.MaxQuery",
"asreview.query_strategies.MaxQuery")
MixedQuery = _moved_warning(
_MixedQuery, "asreview.models.query.MixedQuery",
"asreview.query_strategies.MixedQuery")
UncertaintyQuery = _moved_warning(
_UncertaintyQuery, "asreview.models.query.UncertaintyQuery",
"asreview.query_strategies.UncertaintyQuery")
RandomQuery = _moved_warning(
_RandomQuery, "asreview.models.query.RandomQuery",
"asreview.query_strategies.RandomQuery")
ClusterQuery = _moved_warning(
_ClusterQuery, "asreview.models.query.ClusterQuery",
"asreview.query_strategies.ClusterQuery")
get_query_model = _moved_warning(
_get_query_model, "asreview.models.query.get_query_model",
"asreview.query_strategies.get_query_model")
get_query_class = _moved_warning(
_get_query_class, "asreview.models.query.get_query_class",
"asreview.query_strategies.get_query_class")
list_query_strategies = _moved_warning(
_list_query_strategies, "asreview.models.query.list_query_strategies",
"asreview.query_strategies.list_query_strategies")
| [
"asreview.models.deprecated._moved_warning"
] | [((1320, 1421), 'asreview.models.deprecated._moved_warning', '_moved_warning', (['_MaxQuery', '"""asreview.models.query.MaxQuery"""', '"""asreview.query_strategies.MaxQuery"""'], {}), "(_MaxQuery, 'asreview.models.query.MaxQuery',\n 'asreview.query_strategies.MaxQuery')\n", (1334, 1421), False, 'from asreview.models.deprecated import _moved_warning\n'), ((1440, 1547), 'asreview.models.deprecated._moved_warning', '_moved_warning', (['_MixedQuery', '"""asreview.models.query.MixedQuery"""', '"""asreview.query_strategies.MixedQuery"""'], {}), "(_MixedQuery, 'asreview.models.query.MixedQuery',\n 'asreview.query_strategies.MixedQuery')\n", (1454, 1547), False, 'from asreview.models.deprecated import _moved_warning\n'), ((1572, 1697), 'asreview.models.deprecated._moved_warning', '_moved_warning', (['_UncertaintyQuery', '"""asreview.models.query.UncertaintyQuery"""', '"""asreview.query_strategies.UncertaintyQuery"""'], {}), "(_UncertaintyQuery, 'asreview.models.query.UncertaintyQuery',\n 'asreview.query_strategies.UncertaintyQuery')\n", (1586, 1697), False, 'from asreview.models.deprecated import _moved_warning\n'), ((1717, 1827), 'asreview.models.deprecated._moved_warning', '_moved_warning', (['_RandomQuery', '"""asreview.models.query.RandomQuery"""', '"""asreview.query_strategies.RandomQuery"""'], {}), "(_RandomQuery, 'asreview.models.query.RandomQuery',\n 'asreview.query_strategies.RandomQuery')\n", (1731, 1827), False, 'from asreview.models.deprecated import _moved_warning\n'), ((1848, 1961), 'asreview.models.deprecated._moved_warning', '_moved_warning', (['_ClusterQuery', '"""asreview.models.query.ClusterQuery"""', '"""asreview.query_strategies.ClusterQuery"""'], {}), "(_ClusterQuery, 'asreview.models.query.ClusterQuery',\n 'asreview.query_strategies.ClusterQuery')\n", (1862, 1961), False, 'from asreview.models.deprecated import _moved_warning\n'), ((1985, 2107), 'asreview.models.deprecated._moved_warning', '_moved_warning', (['_get_query_model', '"""asreview.models.query.get_query_model"""', '"""asreview.query_strategies.get_query_model"""'], {}), "(_get_query_model, 'asreview.models.query.get_query_model',\n 'asreview.query_strategies.get_query_model')\n", (1999, 2107), False, 'from asreview.models.deprecated import _moved_warning\n'), ((2131, 2253), 'asreview.models.deprecated._moved_warning', '_moved_warning', (['_get_query_class', '"""asreview.models.query.get_query_class"""', '"""asreview.query_strategies.get_query_class"""'], {}), "(_get_query_class, 'asreview.models.query.get_query_class',\n 'asreview.query_strategies.get_query_class')\n", (2145, 2253), False, 'from asreview.models.deprecated import _moved_warning\n'), ((2283, 2427), 'asreview.models.deprecated._moved_warning', '_moved_warning', (['_list_query_strategies', '"""asreview.models.query.list_query_strategies"""', '"""asreview.query_strategies.list_query_strategies"""'], {}), "(_list_query_strategies,\n 'asreview.models.query.list_query_strategies',\n 'asreview.query_strategies.list_query_strategies')\n", (2297, 2427), False, 'from asreview.models.deprecated import _moved_warning\n')] |
# coding:utf-8
import csv
from bs4 import BeautifulSoup
import requests
if __name__ == '__main__':
user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'
headers = {'User-Agent': user_agent}
r = requests.get('http://seputu.com/', headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
lisit = []
for mulu in soup.find_all(class_='mulu'):
h2 = mulu.find('h2')
if h2 != None:
h2_title = h2.string
for a in mulu.find(class_='box').find_all('a'):
href = a.get('href')
box_title = a.string
lisit.append((h2_title, box_title, href))
headers_ = {'标题', '章节名', '链接'}
with open('qiye.csv', 'w', newline='') as fp:
# csv需要指定newline,否则每行数据之间都有空行
f_csv = csv.writer(fp)
f_csv.writerow(headers_)
f_csv.writerows(lisit)
| [
"bs4.BeautifulSoup",
"csv.writer",
"requests.get"
] | [((304, 355), 'requests.get', 'requests.get', (['"""http://seputu.com/"""'], {'headers': 'headers'}), "('http://seputu.com/', headers=headers)\n", (316, 355), False, 'import requests\n'), ((367, 403), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (380, 403), False, 'from bs4 import BeautifulSoup\n'), ((882, 896), 'csv.writer', 'csv.writer', (['fp'], {}), '(fp)\n', (892, 896), False, 'import csv\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
File: insert_probe_names_to_sqlite.py
Author: <NAME>
Created by <NAME> on 19 July 2012 09:07 PDT (-0700)
Copyright (c) 2012 <NAME>. All rights reserved.
Description:
"""
import re
import sqlite3
import argparse
from collections import defaultdict
from Bio import SeqIO
import pdb
def get_args():
"""Get arguments from CLI"""
parser = argparse.ArgumentParser(
description="""Program description""")
parser.add_argument(
"db",
help="""The database to which to add the probe names"""
)
parser.add_argument(
"probes",
help="""The probe fasta file to enter to the database"""
)
return parser.parse_args()
def get_all_probes(probes):
all_probes = defaultdict(lambda: defaultdict(list))
for record in SeqIO.parse(open(probes, 'rU'), 'fasta'):
rs = record.description.split('|')
name, span = rs[0], rs[1]
all_probes[name][span].append(record)
return all_probes
def main():
args = get_args()
conn = sqlite3.connect(args.db)
cur = conn.cursor()
all_probes = get_all_probes(args.probes)
cur.execute("PRAGMA foreign_keys = ON")
query = '''CREATE TABLE probes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
locus int,
probe int,
source text,
oldlocus text,
oldprobe text,
sequence text
)'''
cur.execute(query)
query = '''CREATE TABLE probeset (
id int REFERENCES probes,
probes500 int DEFAULT 0,
probes1k int DEFAULT 0
)'''
cur.execute(query)
for lpos, locus in enumerate(all_probes):
spans = sorted(all_probes[locus].keys())
for spos, span in enumerate(spans):
probe = all_probes[locus][span][0]
ns = probe.description.split('|')
query = '''INSERT INTO probes (locus, probe, source, oldlocus, oldprobe, sequence)
values ({0},{1},'{2}','{3}','{4}','{5}')'''.format(
lpos + 1,
spos + 1,
ns[2],
ns[0],
probe.description,
str(probe.seq)
)
cur.execute(query)
if ns[2] == '500':
cur.execute('''INSERT INTO probeset (id, probes500, probes1k) VALUES (?, 1, 0)''', (cur.lastrowid,))
elif ns[2] == '1000':
cur.execute('''INSERT INTO probeset (id, probes500, probes1k) VALUES (?, 0, 1)''', (cur.lastrowid,))
elif ns[2] == 'both':
cur.execute('''INSERT INTO probeset (id, probes500, probes1k) VALUES (?, 1, 1)''', (cur.lastrowid,))
conn.commit()
cur.close()
conn.close()
if __name__ == '__main__':
main()
| [
"collections.defaultdict",
"sqlite3.connect",
"argparse.ArgumentParser"
] | [((394, 452), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Program description"""'}), "(description='Program description')\n", (417, 452), False, 'import argparse\n'), ((1086, 1110), 'sqlite3.connect', 'sqlite3.connect', (['args.db'], {}), '(args.db)\n', (1101, 1110), False, 'import sqlite3\n'), ((814, 831), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (825, 831), False, 'from collections import defaultdict\n')] |
# This script takes as an argument the path to the folder which
# contains folders of images.
# It is assumed that name of each folder with images is
# the label for the images, that is all the images in each folder belong
# to the the same class, and the name of that class is the name of the folder.
# Images are assumed to be in the .png format. It is also assumed that
# each folder has the same number of images. It is NOT assumed that all images
# have the same dimensionality, but all the images will be rescaled to 32x32
# before being saved into the dataset file.
# The total number of images is assumed to be divisible by 10.
# The script will produce a file named "characters_dataset" which will
# contain the train/validation/test datasets and labels in numpy arrays.
# The file will also contain the names of all the
# image folders in alphabetic order.
import os
import sys
from scipy import misc
import numpy as np
# The path that you have your image folders in
path = sys.argv[1]
# We rescale each image to be of size "SHAPE"
SHAPE = (32, 32)
# folder_names is a sorted list containing names of all the folders with images
folder_names = []
for name in sorted(os.listdir(path)):
if os.path.isdir(os.path.join(path, name)):
folder_names.append(name)
# Each element of folder_files is a sorted list of file names
# that are contained within a folder from folder_names
folder_files = []
for folder_name in folder_names:
folder_files.append(sorted(os.listdir(os.path.join(path, folder_name))))
number_of_classes = len(folder_names)
# we assume that all classes have the same number of elements
number_of_examples_per_class = len(folder_files[0])
# the data samples X and the labels y
X = []
y = []
# Load the images and labels into numpy arrays
for i in range(number_of_classes):
for j in range(number_of_examples_per_class):
image_location = os.path.join(
path, folder_names[i], folder_files[i][j])
image = misc.imread(image_location)
image = misc.imresize(image, size=SHAPE, interp='bilinear', mode=None)
X.append(image)
y.append(i)
# Turn the samples into proper numpy array of type
# float32 (for use with GPU) rescaled in [0,1] interval.
X = np.float32(np.array(X)/255.0)
y = np.int32(np.array(y))
hex_codes = np.array(folder_names)
# Make so that each batch of size "number_of_classes" samples is
# balanced with respect to classes.
# That is, each batch of size "number_of_classes" samples
# will contain exactly one sample of each class.
# In this way, when we split the data into train, validation, and test
# datasets, all of them will be balanced with respect to classes
# as long as the sizes of all of them are divisible by "number_of_classes".
X = np.concatenate(
[X[i::number_of_examples_per_class]
for i in range(number_of_examples_per_class)])
y = np.concatenate(
[y[i::number_of_examples_per_class]
for i in range(number_of_examples_per_class)])
dataset_size = number_of_classes * number_of_examples_per_class
# train - validation - test split is 80% - 10% - 10%
# We also assume that the dataset_size is divisible by 10.
X_train = X[:(dataset_size*8)//10]
y_train = y[:(dataset_size*8)//10]
X_val = X[(dataset_size*8)//10:(dataset_size*9)//10]
y_val = y[(dataset_size*8)//10:(dataset_size*9)//10]
X_test = X[(dataset_size*9)//10:]
y_test = y[(dataset_size*9)//10:]
f = open("characters_dataset", "wb")
np.save(f, X_train)
np.save(f, y_train)
np.save(f, X_val)
np.save(f, y_val)
np.save(f, X_test)
np.save(f, y_test)
np.save(f, hex_codes) # hex codes of each class (same as folder names)
f.close()
| [
"os.listdir",
"os.path.join",
"numpy.array",
"scipy.misc.imread",
"scipy.misc.imresize",
"numpy.save"
] | [((2311, 2333), 'numpy.array', 'np.array', (['folder_names'], {}), '(folder_names)\n', (2319, 2333), True, 'import numpy as np\n'), ((3448, 3467), 'numpy.save', 'np.save', (['f', 'X_train'], {}), '(f, X_train)\n', (3455, 3467), True, 'import numpy as np\n'), ((3468, 3487), 'numpy.save', 'np.save', (['f', 'y_train'], {}), '(f, y_train)\n', (3475, 3487), True, 'import numpy as np\n'), ((3488, 3505), 'numpy.save', 'np.save', (['f', 'X_val'], {}), '(f, X_val)\n', (3495, 3505), True, 'import numpy as np\n'), ((3506, 3523), 'numpy.save', 'np.save', (['f', 'y_val'], {}), '(f, y_val)\n', (3513, 3523), True, 'import numpy as np\n'), ((3524, 3542), 'numpy.save', 'np.save', (['f', 'X_test'], {}), '(f, X_test)\n', (3531, 3542), True, 'import numpy as np\n'), ((3543, 3561), 'numpy.save', 'np.save', (['f', 'y_test'], {}), '(f, y_test)\n', (3550, 3561), True, 'import numpy as np\n'), ((3562, 3583), 'numpy.save', 'np.save', (['f', 'hex_codes'], {}), '(f, hex_codes)\n', (3569, 3583), True, 'import numpy as np\n'), ((1181, 1197), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1191, 1197), False, 'import os\n'), ((2286, 2297), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2294, 2297), True, 'import numpy as np\n'), ((1221, 1245), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (1233, 1245), False, 'import os\n'), ((1894, 1949), 'os.path.join', 'os.path.join', (['path', 'folder_names[i]', 'folder_files[i][j]'], {}), '(path, folder_names[i], folder_files[i][j])\n', (1906, 1949), False, 'import os\n'), ((1979, 2006), 'scipy.misc.imread', 'misc.imread', (['image_location'], {}), '(image_location)\n', (1990, 2006), False, 'from scipy import misc\n'), ((2023, 2085), 'scipy.misc.imresize', 'misc.imresize', (['image'], {'size': 'SHAPE', 'interp': '"""bilinear"""', 'mode': 'None'}), "(image, size=SHAPE, interp='bilinear', mode=None)\n", (2036, 2085), False, 'from scipy import misc\n'), ((2254, 2265), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2262, 2265), True, 'import numpy as np\n'), ((1493, 1524), 'os.path.join', 'os.path.join', (['path', 'folder_name'], {}), '(path, folder_name)\n', (1505, 1524), False, 'import os\n')] |
from keras.models import *
from keras.layers import *
from keras.optimizers import *
#from keras import backend as keras
import logging
import tensorflow.compat.v1.logging as tf_logging # to stop tensorflow from displaying depracetion messages
tf_logging.set_verbosity(tf_logging.ERROR)
logger = logging.getLogger(__name__)
def class_model(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=('accuracy'),
pretrained_weights=None, input_size=(256, 256, 1)):
"""
an impelemntation of the unet model, taken from https://github.com/zhixuhao/unet
:param optimizer:keras optimizer to use in the model
:param loss: keras loss function
:param metrics: metrics list for
:param pretrained_weights: path to possible pretrained weights which can be loaded into the model
:param input_size: the dimensions of the input images, defualt is (256,256,1) images
:return:
"""
logger.debug(f"<- unet model with input_size={input_size} andpretraind_weights={pretrained_weights} ")
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
dense1 = Dense(128, activation='relu',kernel_initializer='he_normal')(drop5)
flat = Flatten()(dense1)
dense2 = Dense(4,activation='softmax',kernel_initializer='he_normal')(flat)
model = Model(inputs=inputs, outputs=dense2)
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
if pretrained_weights:
model.load_weights(pretrained_weights)
return model | [
"logging.getLogger",
"tensorflow.compat.v1.logging.set_verbosity"
] | [((244, 286), 'tensorflow.compat.v1.logging.set_verbosity', 'tf_logging.set_verbosity', (['tf_logging.ERROR'], {}), '(tf_logging.ERROR)\n', (268, 286), True, 'import tensorflow.compat.v1.logging as tf_logging\n'), ((297, 324), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (314, 324), False, 'import logging\n')] |
''' Helper class and functions for loading SUN RGB-D objects
Author: <NAME>
Date: October 2017
Modified by <NAME>
'''
import os
import sys
import numpy as np
import pickle
import argparse
from PIL import Image
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import sunrgbd_utils as utils
from sunrgbd_object import sunrgbd_object
from sunrgbd_utils import random_shift_box2d, extract_pc_in_box3d
def ravel_hash(coord):
assert coord.ndim == 2
coord -= coord.min(0)
coord_max = coord.max(0) + 1
keys = np.zeros(coord.shape[0], dtype=np.int64)
for i in range(coord.shape[1] - 1):
keys += coord[:, i]
keys *= coord_max[i + 1]
keys += coord[:, -1]
return keys
def down_sample(x, voxel_size=(0.05, )):
if isinstance(voxel_size, float):
voxel_size = (voxel_size, )
if len(voxel_size) == 1:
voxel_size = voxel_size * 3
voxel_size = np.array(voxel_size, dtype=np.float32)
voxel_index = np.floor(x / voxel_size).astype(np.int64, copy=False)
hash_keys = ravel_hash(voxel_index)
_, idx = np.unique(hash_keys, return_index=True)
return idx
def get_box3d_dim_statistics(my_sunrgbd_dir, idx_filename, type_whitelist):
dataset = sunrgbd_object(my_sunrgbd_dir)
dimension_list = []
type_list = []
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
for data_idx in data_idx_list:
print('------------- ', data_idx)
objects = dataset.get_label_objects(data_idx)
for obj_idx in range(len(objects)):
obj = objects[obj_idx]
if obj.classname not in type_whitelist:
continue
dimension_list.append(np.array([obj.l, obj.w, obj.h]))
type_list.append(obj.classname)
print("number of objects: {} ".format(len(type_list)))
print("categories:", list(sorted(type_whitelist)))
# Get average box size for different categories
for class_type in sorted(set(type_list)):
cnt = 0
box3d_list = []
for i in range(len(dimension_list)):
if type_list[i] == class_type:
cnt += 1
box3d_list.append(dimension_list[i])
median_box3d = np.median(box3d_list, 0)
print("\'%s\': np.array([%f,%f,%f])," %
(class_type, median_box3d[0] * 2, median_box3d[1] * 2, median_box3d[2] * 2))
def read_det_file(det_file):
id_list = []
type_list = []
prob_list = []
box2d_list = []
# data_idx, type_list, prob, box2d
with open(det_file, 'rt') as f:
for line in f:
t = line.rstrip().split(" ")
id_list.append(int(t[0]))
type_list.append(t[1])
prob_list.append(float(t[2]))
box2d_list.append(np.array([float(t[i]) for i in range(3, 7)]))
return id_list, type_list, box2d_list, prob_list
def read_det_pkl_file(det_file):
classes = [
'__background__', 'bathtub', 'bed', 'bookshelf', 'box', 'chair', 'counter', 'desk', 'door', 'dresser',
'garbage_bin', 'lamp', 'monitor', 'night_stand', 'pillow', 'sink', 'sofa', 'table', 'tv', 'toilet'
]
with open(det_file, 'rb') as f:
dets = pickle.load(f)
num_classes = len(dets)
num_images = len(dets[0])
id_list = []
type_list = []
prob_list = []
box2d_list = []
for i in range(num_images):
for c in range(1, num_classes):
det = dets[c][i]
for j in range(len(det)):
id_list.append((i + 1))
type_list.append(classes[c])
prob_list.append(det[j][4])
box2d_list.append(det[j][:4])
return id_list, type_list, box2d_list, prob_list
def extract_frustum_data(sunrgbd_dir,
idx_filename,
split,
output_filename,
type_whitelist,
perturb_box2d=False,
augmentX=1,
with_down_sample=False):
dataset = sunrgbd_object(sunrgbd_dir, split)
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
id_list = [] # int number
box2d_list = [] # [xmin,ymin,xmax,ymax]
box3d_list = [] # (8,3) array in upright depth coord
input_list = [] # channel number = 6, xyz,rgb in upright depth coord
label_list = [] # 1 for roi object, 0 for clutter
type_list = [] # string e.g. bed
heading_list = [] # face of object angle, radius of clockwise angle from positive x axis in upright camera coord
box3d_size_list = [] # array of l,w,h
frustum_angle_list = [] # angle of 2d box center from pos x-axis (clockwise)
img_coord_list = []
calib_K_list = []
calib_R_list = []
pos_cnt = 0
all_cnt = 0
for data_idx in data_idx_list:
print('------------- ', data_idx)
calib = dataset.get_calibration(data_idx)
objects = dataset.get_label_objects(data_idx)
pc_upright_depth = dataset.get_pointcloud(data_idx)
pc_upright_camera = np.zeros_like(pc_upright_depth)
pc_upright_camera[:, 0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:, 0:3])
pc_upright_camera[:, 3:] = pc_upright_depth[:, 3:]
if with_down_sample:
idx = down_sample(pc_upright_camera[:, :3], 0.01)
# print(len(idx), len(pc_upright_camera))
pc_upright_camera = pc_upright_camera[idx]
pc_upright_depth = pc_upright_depth[idx]
# img = dataset.get_image(data_idx)
# img_height, img_width, img_channel = img.shape
pc_image_coord, _ = calib.project_upright_depth_to_image(pc_upright_depth)
for obj_idx in range(len(objects)):
obj = objects[obj_idx]
if obj.classname not in type_whitelist:
continue
# 2D BOX: Get pts rect backprojected
box2d = obj.box2d
for _ in range(augmentX):
if perturb_box2d:
xmin, ymin, xmax, ymax = random_shift_box2d(box2d)
# print(xmin,ymin,xmax,ymax)
else:
xmin, ymin, xmax, ymax = box2d
box_fov_inds = (pc_image_coord[:, 0] < xmax) & (pc_image_coord[:, 0] >= xmin) & (
pc_image_coord[:, 1] < ymax) & (pc_image_coord[:, 1] >= ymin)
coord_in_box_fov = pc_image_coord[box_fov_inds, :]
pc_in_box_fov = pc_upright_camera[box_fov_inds, :]
# Get frustum angle (according to center pixel in 2D BOX)
box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
uvdepth = np.zeros((1, 3))
uvdepth[0, 0:2] = box2d_center
uvdepth[0, 2] = 20 # some random depth
box2d_center_upright_camera = calib.project_image_to_upright_camera(uvdepth)
# print('UVdepth, center in upright camera: ', uvdepth, box2d_center_upright_camera)
frustum_angle = -1 * np.arctan2(
box2d_center_upright_camera[0, 2],
box2d_center_upright_camera[0, 0]) # angle as to positive x-axis as in the Zoox paper
# print('Frustum angle: ', frustum_angle)
# 3D BOX: Get pts velo in 3d box
box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib)
box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
try:
_, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
except Exception as e:
print(e)
continue
label = np.zeros((pc_in_box_fov.shape[0]))
label[inds] = 1
box3d_size = np.array([2 * obj.l, 2 * obj.w, 2 * obj.h])
# Subsample points..
num_point = pc_in_box_fov.shape[0]
if num_point > 2048:
choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False)
coord_in_box_fov = coord_in_box_fov[choice, :]
pc_in_box_fov = pc_in_box_fov[choice, :]
label = label[choice]
# Reject object with too few points
if np.sum(label) < 5:
continue
id_list.append(data_idx)
box2d_list.append(np.array([xmin, ymin, xmax, ymax], dtype=np.float32))
box3d_list.append(box3d_pts_3d)
input_list.append(pc_in_box_fov.astype(np.float32))
label_list.append(label.astype(np.bool))
type_list.append(obj.classname)
heading_list.append(obj.heading_angle)
box3d_size_list.append(box3d_size)
frustum_angle_list.append(frustum_angle)
img_coord_list.append(coord_in_box_fov.astype(np.float32))
calib_K_list.append(calib.K)
calib_R_list.append(calib.Rtilt)
# collect statistics
pos_cnt += np.sum(label)
all_cnt += pc_in_box_fov.shape[0]
print('Average pos ratio: ', pos_cnt / float(all_cnt))
print('Average npoints: ', float(all_cnt) / len(id_list))
data_dict = {
'id': id_list,
'box2d': box2d_list,
'box3d': box3d_list,
'box3d_size': box3d_size_list,
'box3d_heading': heading_list,
'type': type_list,
'input': input_list,
'frustum_angle': frustum_angle_list,
'label': label_list,
'calib_K': calib_K_list,
'calib_R': calib_R_list,
# 'image_coord': img_coord_list,
}
with open(output_filename, 'wb') as f:
pickle.dump(data_dict, f, -1)
print("save in {}".format(output_filename))
def extract_frustum_data_from_rgb_detection(sunrgbd_dir,
det_file,
split,
output_filename,
type_whitelist,
valid_id_list=None,
with_down_sample=False):
dataset = sunrgbd_object(sunrgbd_dir, split)
if det_file.split('.')[-1] == 'txt':
det_id_list, det_type_list, det_box2d_list, det_prob_list = read_det_file(det_file)
else:
det_id_list, det_type_list, det_box2d_list, det_prob_list = read_det_pkl_file(det_file)
cache_id = -1
cache = None
id_list = []
type_list = []
box2d_list = []
prob_list = []
input_list = [] # channel number = 4, xyz,intensity in rect camera coord
frustum_angle_list = [] # angle of 2d box center from pos x-axis
img_coord_list = []
calib_K_list = []
calib_R_list = []
for det_idx in range(len(det_id_list)):
data_idx = det_id_list[det_idx]
if valid_id_list is not None and data_idx not in valid_id_list:
continue
if det_type_list[det_idx] not in type_whitelist:
continue
print('det idx: %d/%d, data idx: %d' % (det_idx, len(det_id_list), data_idx))
if cache_id != data_idx:
calib = dataset.get_calibration(data_idx)
pc_upright_depth = dataset.get_pointcloud(data_idx)
pc_upright_camera = np.zeros_like(pc_upright_depth)
pc_upright_camera[:, 0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:, 0:3])
pc_upright_camera[:, 3:] = pc_upright_depth[:, 3:]
if with_down_sample:
idx = down_sample(pc_upright_camera[:, :3], 0.01)
# print(len(idx), len(pc_upright_camera))
pc_upright_camera = pc_upright_camera[idx]
pc_upright_depth = pc_upright_depth[idx]
# img = dataset.get_image(data_idx)
# img_height, img_width, img_channel = img.shape
pc_image_coord, _ = calib.project_upright_depth_to_image(pc_upright_depth)
cache = [calib, pc_upright_camera, pc_image_coord]
cache_id = data_idx
else:
calib, pc_upright_camera, pc_image_coord = cache
# 2D BOX: Get pts rect backprojected
xmin, ymin, xmax, ymax = det_box2d_list[det_idx]
box_fov_inds = (pc_image_coord[:, 0] < xmax) & (pc_image_coord[:, 0] >= xmin) & (
pc_image_coord[:, 1] < ymax) & (pc_image_coord[:, 1] >= ymin)
coord_in_box_fov = pc_image_coord[box_fov_inds, :]
pc_in_box_fov = pc_upright_camera[box_fov_inds, :]
# Get frustum angle (according to center pixel in 2D BOX)
box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
uvdepth = np.zeros((1, 3))
uvdepth[0, 0:2] = box2d_center
uvdepth[0, 2] = 20 # some random depth
box2d_center_upright_camera = calib.project_image_to_upright_camera(uvdepth)
frustum_angle = -1 * np.arctan2(
box2d_center_upright_camera[0, 2],
box2d_center_upright_camera[0, 0]) # angle as to positive x-axis as in the Zoox paper
# Subsample points..
num_point = pc_in_box_fov.shape[0]
if num_point > 2048:
choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False)
coord_in_box_fov = coord_in_box_fov[choice, :]
pc_in_box_fov = pc_in_box_fov[choice, :]
# Pass objects that are too small
if len(pc_in_box_fov) < 5:
continue
id_list.append(data_idx)
type_list.append(det_type_list[det_idx])
box2d_list.append(det_box2d_list[det_idx])
prob_list.append(det_prob_list[det_idx])
input_list.append(pc_in_box_fov.astype(np.float32))
frustum_angle_list.append(frustum_angle)
img_coord_list.append(coord_in_box_fov.astype(np.float32))
calib_K_list.append(calib.K)
calib_R_list.append(calib.Rtilt)
data_dict = {
'id': id_list,
'type': type_list,
'box2d': box2d_list,
'box2d_prob': prob_list,
'input': input_list,
'frustum_angle': frustum_angle_list,
'calib_K': calib_K_list,
'calib_R': calib_R_list,
# 'image_coord': img_coord_list,
}
with open(output_filename, 'wb') as f:
pickle.dump(data_dict, f, -1)
print("save in {}".format(output_filename))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gen_train',
action='store_true',
help='Generate train split frustum data with perturbed GT 2D boxes')
parser.add_argument('--gen_val', action='store_true', help='Generate val split frustum data with GT 2D boxes')
parser.add_argument('--gen_val_rgb_detection',
action='store_true',
help='Generate val split frustum data with RGB detection 2D boxes')
parser.add_argument('--num_classes', default=10, type=int, help='19 or 10 categories, default 10')
parser.add_argument('--save_dir',
default='sunrgbd/data/pickle_data',
type=str,
help='directory to save data, default[sunrgbd/data/pickle_data]')
parser.add_argument('--gen_avg_dim', action='store_true', help='get average dimension of each class')
args = parser.parse_args()
my_sunrgbd_dir = 'sunrgbd/mysunrgbd' # change if you do not set default path
if args.num_classes == 10:
type_whitelist = [
'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', 'night_stand', 'bookshelf', 'bathtub'
]
elif args.num_classes == 19:
type_whitelist = [
'bathtub', 'bed', 'bookshelf', 'box', 'chair', 'counter', 'desk', 'door', 'dresser', 'garbage_bin', 'lamp',
'monitor', 'night_stand', 'pillow', 'sink', 'sofa', 'table', 'tv', 'toilet'
]
else:
assert False, 'please set correct num_classes'
type_whitelist = set(type_whitelist)
if args.gen_avg_dim:
get_box3d_dim_statistics(my_sunrgbd_dir, 'sunrgbd/image_sets/train.txt', type_whitelist)
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if args.gen_train:
extract_frustum_data(my_sunrgbd_dir,
'sunrgbd/image_sets/train.txt',
'training',
output_filename=os.path.join(save_dir, 'sunrgbd_train_aug5x.pickle'),
type_whitelist=type_whitelist,
perturb_box2d=True,
augmentX=5,
with_down_sample=False)
if args.gen_val:
extract_frustum_data(my_sunrgbd_dir,
'sunrgbd/image_sets/val.txt',
'training',
output_filename=os.path.join(save_dir, 'sunrgbd_val.pickle'),
type_whitelist=type_whitelist,
perturb_box2d=False,
augmentX=1,
with_down_sample=False)
if args.gen_val_rgb_detection:
extract_frustum_data_from_rgb_detection(my_sunrgbd_dir,
'./sunrgbd/rgb_detections/sunrgbd_rgb_det_val_classes19_mAP50.2.txt',
'training',
os.path.join(save_dir,'sunrgbd_rgb_det_val.pickle'),
type_whitelist=type_whitelist)
| [
"numpy.array",
"numpy.arctan2",
"sys.path.append",
"sunrgbd_utils.random_shift_box2d",
"os.path.exists",
"argparse.ArgumentParser",
"sunrgbd_utils.compute_box_3d",
"sunrgbd_object.sunrgbd_object",
"numpy.random.choice",
"pickle.load",
"numpy.floor",
"numpy.median",
"pickle.dump",
"numpy.un... | [((268, 293), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (283, 293), False, 'import sys\n'), ((241, 266), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (256, 266), False, 'import os\n'), ((557, 597), 'numpy.zeros', 'np.zeros', (['coord.shape[0]'], {'dtype': 'np.int64'}), '(coord.shape[0], dtype=np.int64)\n', (565, 597), True, 'import numpy as np\n'), ((944, 982), 'numpy.array', 'np.array', (['voxel_size'], {'dtype': 'np.float32'}), '(voxel_size, dtype=np.float32)\n', (952, 982), True, 'import numpy as np\n'), ((1108, 1147), 'numpy.unique', 'np.unique', (['hash_keys'], {'return_index': '(True)'}), '(hash_keys, return_index=True)\n', (1117, 1147), True, 'import numpy as np\n'), ((1256, 1286), 'sunrgbd_object.sunrgbd_object', 'sunrgbd_object', (['my_sunrgbd_dir'], {}), '(my_sunrgbd_dir)\n', (1270, 1286), False, 'from sunrgbd_object import sunrgbd_object\n'), ((4083, 4117), 'sunrgbd_object.sunrgbd_object', 'sunrgbd_object', (['sunrgbd_dir', 'split'], {}), '(sunrgbd_dir, split)\n', (4097, 4117), False, 'from sunrgbd_object import sunrgbd_object\n'), ((10331, 10365), 'sunrgbd_object.sunrgbd_object', 'sunrgbd_object', (['sunrgbd_dir', 'split'], {}), '(sunrgbd_dir, split)\n', (10345, 10365), False, 'from sunrgbd_object import sunrgbd_object\n'), ((14556, 14581), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14579, 14581), False, 'import argparse\n'), ((2244, 2268), 'numpy.median', 'np.median', (['box3d_list', '(0)'], {}), '(box3d_list, 0)\n', (2253, 2268), True, 'import numpy as np\n'), ((3224, 3238), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3235, 3238), False, 'import pickle\n'), ((5107, 5138), 'numpy.zeros_like', 'np.zeros_like', (['pc_upright_depth'], {}), '(pc_upright_depth)\n', (5120, 5138), True, 'import numpy as np\n'), ((9819, 9848), 'pickle.dump', 'pickle.dump', (['data_dict', 'f', '(-1)'], {}), '(data_dict, f, -1)\n', (9830, 9848), False, 'import pickle\n'), ((12784, 12836), 'numpy.array', 'np.array', (['[(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]'], {}), '([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])\n', (12792, 12836), True, 'import numpy as np\n'), ((12855, 12871), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (12863, 12871), True, 'import numpy as np\n'), ((14434, 14463), 'pickle.dump', 'pickle.dump', (['data_dict', 'f', '(-1)'], {}), '(data_dict, f, -1)\n', (14445, 14463), False, 'import pickle\n'), ((16339, 16363), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (16353, 16363), False, 'import os\n'), ((16373, 16394), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (16384, 16394), False, 'import os\n'), ((1001, 1025), 'numpy.floor', 'np.floor', (['(x / voxel_size)'], {}), '(x / voxel_size)\n', (1009, 1025), True, 'import numpy as np\n'), ((11461, 11492), 'numpy.zeros_like', 'np.zeros_like', (['pc_upright_depth'], {}), '(pc_upright_depth)\n', (11474, 11492), True, 'import numpy as np\n'), ((13073, 13158), 'numpy.arctan2', 'np.arctan2', (['box2d_center_upright_camera[0, 2]', 'box2d_center_upright_camera[0, 0]'], {}), '(box2d_center_upright_camera[0, 2], box2d_center_upright_camera[0, 0]\n )\n', (13083, 13158), True, 'import numpy as np\n'), ((13353, 13414), 'numpy.random.choice', 'np.random.choice', (['pc_in_box_fov.shape[0]', '(2048)'], {'replace': '(False)'}), '(pc_in_box_fov.shape[0], 2048, replace=False)\n', (13369, 13414), True, 'import numpy as np\n'), ((17656, 17708), 'os.path.join', 'os.path.join', (['save_dir', '"""sunrgbd_rgb_det_val.pickle"""'], {}), "(save_dir, 'sunrgbd_rgb_det_val.pickle')\n", (17668, 17708), False, 'import os\n'), ((1723, 1754), 'numpy.array', 'np.array', (['[obj.l, obj.w, obj.h]'], {}), '([obj.l, obj.w, obj.h])\n', (1731, 1754), True, 'import numpy as np\n'), ((6665, 6717), 'numpy.array', 'np.array', (['[(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]'], {}), '([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])\n', (6673, 6717), True, 'import numpy as np\n'), ((6744, 6760), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (6752, 6760), True, 'import numpy as np\n'), ((7421, 7453), 'sunrgbd_utils.compute_box_3d', 'utils.compute_box_3d', (['obj', 'calib'], {}), '(obj, calib)\n', (7441, 7453), True, 'import sunrgbd_utils as utils\n'), ((7767, 7799), 'numpy.zeros', 'np.zeros', (['pc_in_box_fov.shape[0]'], {}), '(pc_in_box_fov.shape[0])\n', (7775, 7799), True, 'import numpy as np\n'), ((7863, 7906), 'numpy.array', 'np.array', (['[2 * obj.l, 2 * obj.w, 2 * obj.h]'], {}), '([2 * obj.l, 2 * obj.w, 2 * obj.h])\n', (7871, 7906), True, 'import numpy as np\n'), ((9160, 9173), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (9166, 9173), True, 'import numpy as np\n'), ((16611, 16663), 'os.path.join', 'os.path.join', (['save_dir', '"""sunrgbd_train_aug5x.pickle"""'], {}), "(save_dir, 'sunrgbd_train_aug5x.pickle')\n", (16623, 16663), False, 'import os\n'), ((17080, 17124), 'os.path.join', 'os.path.join', (['save_dir', '"""sunrgbd_val.pickle"""'], {}), "(save_dir, 'sunrgbd_val.pickle')\n", (17092, 17124), False, 'import os\n'), ((6098, 6123), 'sunrgbd_utils.random_shift_box2d', 'random_shift_box2d', (['box2d'], {}), '(box2d)\n', (6116, 6123), False, 'from sunrgbd_utils import random_shift_box2d, extract_pc_in_box3d\n'), ((7095, 7180), 'numpy.arctan2', 'np.arctan2', (['box2d_center_upright_camera[0, 2]', 'box2d_center_upright_camera[0, 0]'], {}), '(box2d_center_upright_camera[0, 2], box2d_center_upright_camera[0, 0]\n )\n', (7105, 7180), True, 'import numpy as np\n'), ((7596, 7644), 'sunrgbd_utils.extract_pc_in_box3d', 'extract_pc_in_box3d', (['pc_in_box_fov', 'box3d_pts_3d'], {}), '(pc_in_box_fov, box3d_pts_3d)\n', (7615, 7644), False, 'from sunrgbd_utils import random_shift_box2d, extract_pc_in_box3d\n'), ((8061, 8122), 'numpy.random.choice', 'np.random.choice', (['pc_in_box_fov.shape[0]', '(2048)'], {'replace': '(False)'}), '(pc_in_box_fov.shape[0], 2048, replace=False)\n', (8077, 8122), True, 'import numpy as np\n'), ((8364, 8377), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (8370, 8377), True, 'import numpy as np\n'), ((8488, 8540), 'numpy.array', 'np.array', (['[xmin, ymin, xmax, ymax]'], {'dtype': 'np.float32'}), '([xmin, ymin, xmax, ymax], dtype=np.float32)\n', (8496, 8540), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
# Pip package imports
from marshmallow import pre_load, post_dump, post_load, validates_schema, ValidationError
# Internal package imports
from backend.extensions.api import api
from backend.api import OneOfSchema, validates, ValidationError, GeometryModelConverter, GeometryField
from ..models import ReferenceParcel, ReferenceParcelTypes
from .reference_parcel_base import REFERENCE_PARCEL_BASE_DATA_FIELDS
from .agricultural_parcel import AgriculturalParcelSerializer
from .physical_block import PhysicalBlockSerializer
class ReferenceParcelSerializer(OneOfSchema):
model_type_field = 'parcel_type'
type_field = 'referenceParcelType'
type_field_remove = True
type_schemas = {
ReferenceParcelTypes.AgriculturalParcel.value: AgriculturalParcelSerializer,
ReferenceParcelTypes.PhysicalBlock.value: PhysicalBlockSerializer,
}
class Meta:
model = ReferenceParcel
load_instance = False
#fields = REFERENCE_PARCEL_BASE_DATA_FIELDS
model_converter = GeometryModelConverter
# load_instance = False
@api.serializer(many=True)
class ReferenceParcelListSerializer(ReferenceParcelSerializer):
class Meta:
model = ReferenceParcel
load_instance = False
#fields = REFERENCE_PARCEL_BASE_DATA_FIELDS
model_converter = GeometryModelConverter
# load_instance = False
| [
"backend.extensions.api.api.serializer"
] | [((1162, 1187), 'backend.extensions.api.api.serializer', 'api.serializer', ([], {'many': '(True)'}), '(many=True)\n', (1176, 1187), False, 'from backend.extensions.api import api\n')] |
import copy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
class DataFrame(object):
"""Minimal pd.DataFrame analog for handling n-dimensional numpy matrices with additional
support for shuffling, batching, and train/test splitting.
Args:
columns: List of names corresponding to the matrices in data.
data: List of n-dimensional data matrices ordered in correspondence with columns.
All matrices must have the same leading dimension. Data can also be fed a list of
instances of np.memmap, in which case RAM usage can be limited to the size of a
single batch.
"""
def __init__(self, columns, data):
assert len(columns) == len(data), 'columns length does not match data length'
lengths = [mat.shape[0] for mat in data]
assert len(set(lengths)) == 1, 'all matrices in data must have same first dimension'
self.length = lengths[0]
self.columns = columns
self.data = data
self.dict = dict(zip(self.columns, self.data))
self.idx = np.arange(self.length)
def shapes(self):
return pd.Series(dict(zip(self.columns, [mat.shape for mat in self.data])))
def dtypes(self):
return pd.Series(dict(zip(self.columns, [mat.dtype for mat in self.data])))
def shuffle(self):
np.random.shuffle(self.idx)
def train_test_split(self, train_size, random_state=np.random.randint(1000), stratify=None):
train_idx, test_idx = train_test_split(
self.idx,
train_size=train_size,
random_state=random_state,
stratify=stratify
)
train_df = DataFrame(copy.copy(self.columns), [mat[train_idx] for mat in self.data])
test_df = DataFrame(copy.copy(self.columns), [mat[test_idx] for mat in self.data])
return train_df, test_df
def batch_generator(self, batch_size, shuffle=True, num_epochs=10000, allow_smaller_final_batch=False):
epoch_num = 0
while epoch_num < num_epochs:
if shuffle:
self.shuffle()
for i in range(0, self.length + 1, batch_size):
batch_idx = self.idx[i: i + batch_size]
if not allow_smaller_final_batch and len(batch_idx) != batch_size:
break
yield DataFrame(
columns=copy.copy(self.columns),
data=[mat[batch_idx].copy() for mat in self.data]
)
epoch_num += 1
def iterrows(self):
for i in self.idx:
yield self[i]
def mask(self, mask):
return DataFrame(copy.copy(self.columns), [mat[mask] for mat in self.data])
def concat(self, other_df):
mats = []
for column in self.columns:
mats.append(np.concatenate([self[column], other_df[column]], axis=0))
return DataFrame(copy.copy(self.columns), mats)
def items(self):
return self.dict.items()
def __iter__(self):
return self.dict.items().__iter__()
def __len__(self):
return self.length
def __getitem__(self, key):
if isinstance(key, str):
return self.dict[key]
elif isinstance(key, int):
return pd.Series(dict(zip(self.columns, [mat[self.idx[key]] for mat in self.data])))
def __setitem__(self, key, value):
assert value.shape[0] == len(self), 'matrix first dimension does not match'
if key not in self.columns:
self.columns.append(key)
self.data.append(value)
self.dict[key] = value
| [
"sklearn.model_selection.train_test_split",
"numpy.random.randint",
"numpy.concatenate",
"copy.copy",
"numpy.arange",
"numpy.random.shuffle"
] | [((1114, 1136), 'numpy.arange', 'np.arange', (['self.length'], {}), '(self.length)\n', (1123, 1136), True, 'import numpy as np\n'), ((1383, 1410), 'numpy.random.shuffle', 'np.random.shuffle', (['self.idx'], {}), '(self.idx)\n', (1400, 1410), True, 'import numpy as np\n'), ((1468, 1491), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (1485, 1491), True, 'import numpy as np\n'), ((1539, 1638), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.idx'], {'train_size': 'train_size', 'random_state': 'random_state', 'stratify': 'stratify'}), '(self.idx, train_size=train_size, random_state=random_state,\n stratify=stratify)\n', (1555, 1638), False, 'from sklearn.model_selection import train_test_split\n'), ((1722, 1745), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (1731, 1745), False, 'import copy\n'), ((1814, 1837), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (1823, 1837), False, 'import copy\n'), ((2692, 2715), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (2701, 2715), False, 'import copy\n'), ((2945, 2968), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (2954, 2968), False, 'import copy\n'), ((2862, 2918), 'numpy.concatenate', 'np.concatenate', (['[self[column], other_df[column]]'], {'axis': '(0)'}), '([self[column], other_df[column]], axis=0)\n', (2876, 2918), True, 'import numpy as np\n'), ((2421, 2444), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (2430, 2444), False, 'import copy\n')] |
from program_synthesis.karel.dataset import executor
from program_synthesis.karel.dataset import parser_for_synthesis
branch_types = {'if', 'ifElse', 'while'}
stmt_types = {'move', 'turnLeft', 'turnRight', 'putMarker', 'pickMarker'}
class CoverageMeasurer(object):
def __init__(self, code):
self.parser = parser_for_synthesis.KarelForSynthesisParser(
build_tree=True)
self.executor = executor.KarelExecutor()
self.code = code
tree = self.parser.parse(code)
# Statement coverage: actions
self.stmt_coverage = {span: 0 for span in self.parser.action_spans}
# Branch coverage: if, ifelse, while
self.branch_coverage = {(span, cond_value): 0
for span in self.parser.cond_block_spans
for cond_value in (True, False)}
def add(self, inp):
out, trace = self.executor.execute(
self.code, None, inp, record_trace=True)
if not out:
return False
for event in trace.events:
if event.type in branch_types:
self.branch_coverage[event.span, event.cond_value] += 1
elif event.type in stmt_types:
self.stmt_coverage[event.span] += 1
return True
def uncovered(self):
return (tuple(k for k, v in self.stmt_coverage.iteritems() if v == 0),
tuple(k for k, v in self.branch_coverage.iteritems() if v == 0))
| [
"program_synthesis.karel.dataset.executor.KarelExecutor",
"program_synthesis.karel.dataset.parser_for_synthesis.KarelForSynthesisParser"
] | [((319, 380), 'program_synthesis.karel.dataset.parser_for_synthesis.KarelForSynthesisParser', 'parser_for_synthesis.KarelForSynthesisParser', ([], {'build_tree': '(True)'}), '(build_tree=True)\n', (363, 380), False, 'from program_synthesis.karel.dataset import parser_for_synthesis\n'), ((422, 446), 'program_synthesis.karel.dataset.executor.KarelExecutor', 'executor.KarelExecutor', ([], {}), '()\n', (444, 446), False, 'from program_synthesis.karel.dataset import executor\n')] |
#! /usr/bin/env python3
# coding=utf-8
'''
Loads a saved pytorch model checkpoint and an image and prints the most likely
image class and it's associated probability. If provided, uses a category to
name json file to map categories to names and print the names as well.
SPECS:
- Allows users to print out the top K classes along with associated
probabilities.
- Allows users to use the GPU to calculate the predictions.
- Allows users to load a JSON file that maps the class values to other category
names.
TODO:
- args validation,
- complete docstrings,
- write unit tests
'''
import os
import argparse
import json
from PIL import Image
import numpy as np
import torch
from torch.autograd import Variable
from torchvision import models
def main():
''''''
args = get_input_args()
# Load model from checkpoint
model = load_checkpoint(args)
# Predict and print top K classes along with their probabilities
predict(model, args)
def get_input_args():
''''''
parser = argparse.ArgumentParser(description='')
parser.add_argument('checkpoint_path', metavar='CHKPT_PATH',
help='path to chekpoint')
parser.add_argument('image_path', metavar='IMG_PATH',
help='path to image')
parser.add_argument('--gpu', dest='gpu', default=False,
action='store_true', help='use gpu for the prediction')
parser.add_argument('-k', '--topk', dest='topk', default=1,
type=int,
help='number of top K classes to print (default: 1)')
parser.add_argument('-ctn', '--cat_to_name', dest='cat_to_name',
default=None,
type=str,
help="""
The path to an alternative JSON file that maps the class
values to category names (default:None)
""")
return parser.parse_args()
def load_checkpoint(args):
''''''
checkpoint_path = os.path.relpath(args.checkpoint_path)
checkpoint = torch.load(checkpoint_path)
model = models.__dict__[checkpoint['architecture']](pretrained=True)
model.classifier = checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array'''
image_size = image.size
# Resize the image where the shortest side is 256 pixels,
# keeping the aspect ratio
shorter_side_idx = image_size.index(min(image_size))
bigger_side_idx = image_size.index(max(image_size))
aspect_ratio = image_size[bigger_side_idx] / image_size[shorter_side_idx]
new_size = [None, None]
new_size[shorter_side_idx] = 256
new_size[bigger_side_idx] = int(256 * aspect_ratio)
image = image.resize(new_size)
# Crop out the center 224x224 portion of the image
width, height = new_size
new_width, new_height = (224, 224)
left = (width - new_width) / 2
top = (height - new_height) / 2
right = (width + new_width) / 2
bottom = (height + new_height) / 2
image = image.crop((left, top, right, bottom))
# Convert image color channels from 0-255 to floats 0-1.
np_image = np.array(image)
np_image = np_image / 255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# PyTorch expects the color channel to be the first dimension but it's the
# third dimension in the PIL image and Numpy array. Traspose the numpy array
np_image = np_image.transpose((2, 0, 1))
return np_image
def predict(model, args):
''' Predict the class (or classes) of an image using a trained deep learning
model. If available, uses a category to name json file to map categories to
names and print the names as well'''
print("=> Predicting probabilities..\n")
model.eval()
# Create class to name dictionary
idx_to_class = {i: k for k, i in model.class_to_idx.items()}
# Load and process image
image_path = os.path.relpath(args.image_path)
image = process_image(Image.open(image_path))
image = torch.FloatTensor([image])
# Configure use of gpu
if args.gpu:
print(' Using GPU..\n')
model = model.cuda()
image = image.cuda()
# map model indexes to image classes
idx_to_class = {i: k for k, i in model.class_to_idx.items()}
# get top K predictions and indexes
output = model.forward(Variable(image))
ps = torch.exp(output).data[0]
cl_index = ps.topk(args.topk)
# Map to classes and names
classes = [idx_to_class[idx]
for idx in cl_index[1].cpu().numpy()]
probs = cl_index[0].cpu().numpy()
print(' Probabilities: ', probs)
if args.cat_to_name:
ctn_path = os.path.relpath(args.cat_to_name)
with open(ctn_path, 'r') as f:
cat_to_name = json.load(f)
names = [cat_to_name[cl] for cl in classes]
print(' Classes: ', [(cl, nm) for cl, nm in
zip(classes, names)])
else:
print(' Classes: ', classes)
if __name__ == '__main__':
main()
| [
"PIL.Image.open",
"argparse.ArgumentParser",
"torch.load",
"torch.exp",
"numpy.array",
"json.load",
"torch.autograd.Variable",
"torch.FloatTensor",
"os.path.relpath"
] | [((1022, 1061), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (1045, 1061), False, 'import argparse\n'), ((2041, 2078), 'os.path.relpath', 'os.path.relpath', (['args.checkpoint_path'], {}), '(args.checkpoint_path)\n', (2056, 2078), False, 'import os\n'), ((2096, 2123), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2106, 2123), False, 'import torch\n'), ((3373, 3388), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3381, 3388), True, 'import numpy as np\n'), ((3431, 3462), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (3439, 3462), True, 'import numpy as np\n'), ((3473, 3504), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (3481, 3504), True, 'import numpy as np\n'), ((4217, 4249), 'os.path.relpath', 'os.path.relpath', (['args.image_path'], {}), '(args.image_path)\n', (4232, 4249), False, 'import os\n'), ((4312, 4338), 'torch.FloatTensor', 'torch.FloatTensor', (['[image]'], {}), '([image])\n', (4329, 4338), False, 'import torch\n'), ((4276, 4298), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4286, 4298), False, 'from PIL import Image\n'), ((4651, 4666), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (4659, 4666), False, 'from torch.autograd import Variable\n'), ((4978, 5011), 'os.path.relpath', 'os.path.relpath', (['args.cat_to_name'], {}), '(args.cat_to_name)\n', (4993, 5011), False, 'import os\n'), ((4677, 4694), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (4686, 4694), False, 'import torch\n'), ((5077, 5089), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5086, 5089), False, 'import json\n')] |
import QUANTAXIS as QA
#QA.QA_Setting.client=QA.QAUtil.QA_util_sql_mongo_setting(QA.QA_Setting.QA_util_sql_mongo_ip,QA.QA_Setting.QA_util_sql_mongo_port)
market=QA.QA_Market()
bid=QA.QA_QAMarket_bid.bid
market.market_make_deal(bid,QA.QA_Setting.client)
| [
"QUANTAXIS.QA_Market"
] | [((161, 175), 'QUANTAXIS.QA_Market', 'QA.QA_Market', ([], {}), '()\n', (173, 175), True, 'import QUANTAXIS as QA\n')] |
from utils import parse_data, extract_gpa_data, merge_gpa_data
from mysql_engine import loadEngine, MySQLEngine
import pathlib
import numpy as np
import json
from tqdm import tqdm
def upload_courses(file_path: str, engine: MySQLEngine):
"""
Uploads all the courses to the MySQL database.
:param self:
:param file_path: the directory where to search the CSV files
:param engine: a MySQLEngine where the data needs to be uploaded
"""
df = parse_data(file_path)
df = df[['courseId', 'creditHours']]
# default value of Interest- will need to be hardcoded.
df['Interest'] = ['["None"]'] * len(df)
gpa_df = extract_gpa_data(file_path)
df = merge_gpa_data(df, gpa_df)
engine.insert_df(df, 'courses')
def tag_courses(file_path: str, engine: MySQLEngine):
"""
Adds all tags to courses
:param file_path (string): where to find the JSON document with course info
:param engine (MySQLEngine): used to connect to RDMS
"""
f = open(file_path, 'r')
course_tags = json.load(f)
f.close()
engine.raw_operation('SET SQL_SAFE_UPDATES = 0')
engine.raw_operation('UPDATE student_info.courses SET courses.interest = JSON_ARRAY()')
engine.raw_operation('SET SQL_SAFE_UPDATES = 1')
exec_command = "UPDATE student_info.courses " \
"SET interest = JSON_ARRAY_APPEND(`interest`, '$', '{}') " \
"WHERE course_id = '{}'"
for tag in course_tags:
tag_classes = course_tags[tag]
pbar = tqdm(total=len(tag_classes))
pbar.set_description("Processing {}".format(tag))
for tag_class in tag_classes:
engine.raw_operation(exec_command.format(tag, tag_class))
pbar.update(1)
del pbar
if __name__ == '__main__':
e = loadEngine()
upload_courses('data', e)
tag_courses('db/static/track_data.json', e)
del e
| [
"utils.parse_data",
"mysql_engine.loadEngine",
"json.load",
"utils.extract_gpa_data",
"utils.merge_gpa_data"
] | [((468, 489), 'utils.parse_data', 'parse_data', (['file_path'], {}), '(file_path)\n', (478, 489), False, 'from utils import parse_data, extract_gpa_data, merge_gpa_data\n'), ((650, 677), 'utils.extract_gpa_data', 'extract_gpa_data', (['file_path'], {}), '(file_path)\n', (666, 677), False, 'from utils import parse_data, extract_gpa_data, merge_gpa_data\n'), ((687, 713), 'utils.merge_gpa_data', 'merge_gpa_data', (['df', 'gpa_df'], {}), '(df, gpa_df)\n', (701, 713), False, 'from utils import parse_data, extract_gpa_data, merge_gpa_data\n'), ((1037, 1049), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1046, 1049), False, 'import json\n'), ((1801, 1813), 'mysql_engine.loadEngine', 'loadEngine', ([], {}), '()\n', (1811, 1813), False, 'from mysql_engine import loadEngine, MySQLEngine\n')] |
import fnmatch
import file_dir_dialog as fdd
import pandas as pd
import msgbox
import time
from datetime import date
from read_config_functions import *
import xlwings as xw
import os
class New_Excel_WB:
def __init__(self):
self.wb = xw.Book()
def df_to_excel(self, df, path):
self.wb.sheets.active.range('A1').options(index=False).value = df
self.enable_autofilter()
self.columnformat()
self.save(path=path)
def enable_autofilter(self):
# xw.Range('A1').api.AutoFilter(1)
self.wb.sheets.active.range('A1').api.AutoFilter(1)
def columnformat(self):
sht = self.wb.sheets.active
sht.api.Columns('A:B').ColumnWidth = 70
sht.api.Columns('C:C').ColumnWidth = 12
sht.api.Columns('D:D').ColumnWidth = 15
sht.api.Columns('E:E').ColumnWidth = 20
sht.api.Columns('F:F').ColumnWidth = 25
sht.api.Columns('D:D').NumberFormat = '#,##0'
sht.api.Rows('1:1').Font.Bold = True
def close(self):
self.wb.close()
def save(self, path=None):
if path:
self.wb.save(path=path)
else:
self.wb.save()
def find_files(directory, pattern, dt1=None, dt2=None):
dt1_default = '1900-01-01'
dt2_default = date.today().strftime('%Y-%m-%d')
dt1 = dt1_default if dt1 is None else dt1
dt2 = dt2_default if dt2 is None else dt2
global num_files_searched
num_files_searched = 0
for root, dirs, files in os.walk(directory):
# root: current directory, type string
# dirs: list of directories within root
# files: list of files within root
for basename in files:
num_files_searched += 1
# attempt match using regex; if failure, use fnmatch
# note that case is ignored
try:
re_pattern = re.compile(pattern, re.IGNORECASE)
matched = re_pattern.match(basename)
except Exception:
try:
matched = fnmatch.fnmatch(basename, pattern)
except:
matched = False
try:
file = os.path.join(root, basename)
except TypeError:
# return None
pass
# get the last modified date. a PermissionError might be thrown; if so, keep going
try:
dtmod = time.strftime('%Y-%m-%d', time.localtime(os.path.getmtime(file)))
except PermissionError:
dtmod = None
except FileNotFoundError:
dtmod = None
except:
dtmod = None
if dtmod and matched and dt1 <= dtmod <= dt2:
try:
size = os.path.getsize(file)
extension = os.path.splitext(file)[1][1:]
lastmod = time.strftime('%Y-%m-%d %H:%M', time.localtime(os.path.getmtime(file)))
except:
size = 0
extension = ''
lastmod = None
# another way --> lastmod = time.ctime(os.path.getmtime(filename))
# yield turns the function into a generator (form of iterator)
yield file, root, basename, extension, size, lastmod
def find_dirs(directory, pattern):
for root, dirs, files in os.walk(directory):
# root: current directory, type string
# dirs: list of directories within root
# files: list of files within root
lastdir = root.split('\\')[-1] if '\\' in root else ''
try:
re_pattern = re.compile(pattern, re.IGNORECASE)
matched = re_pattern.match(lastdir)
except Exception:
matched = fnmatch.fnmatch(lastdir, pattern)
if matched:
yield root
def search_dir_topdown(pattern, filename, dt1=None, dt2=None):
try:
fn = os.path.join(os.getcwd(), 'file_search.config')
last_dir = configsectionmap(fn, 'last path')
# create dataframe from generator, tweak it, write to excel
dir_selected = fdd.get_directory(last_dir)
# be sure directory is not None type, and that it exists before setting last path
if dir_selected and os.path.isdir(dir_selected):
update_setting(fn, 'last path', 'last', dir_selected)
if not dir_selected:
return
df = pd.DataFrame(find_files(dir_selected, pattern, dt1, dt2), columns=['file', 'directory', 'filename',
'extension', 'file_size',
'lastmod'])
df['directory'] = '=HYPERLINK("' + df['directory'] + '")'
df['filename'] = '=HYPERLINK("' + df['file'] + '", "' + df['filename'] + '")'
df = df[['directory', 'filename', 'extension', 'file_size', 'lastmod']]
files_searched_header = 'files searched: {:,.0f}'.format(num_files_searched)
df.insert(5, files_searched_header, '')
df.insert(6, pattern, '')
if df.shape[0] == 0:
msgbox.show_message('Bummer', 'No files found using that expression')
return
filename = os.path.splitext(filename)[0]
# if using pandas to save to excel:
# df.to_excel(filename + '.xlsx', index=False)
# os.startfile(filename + '.xlsx')
# if using xlwings to save to excel
try:
new_excel_wb = New_Excel_WB()
new_excel_wb.df_to_excel(df=df, path=filename + '.xlsx')
except:
# msgbox.show_message('debug', str(len(df.index)) + ': ' + str(df.shape))
# msgbox.show_error('Error', 'Error creating Excel file')
new_excel_wb.close()
msgbox.show_message('Info', 'There are too many rows to write to Excel.\nSwitching to csv.')
df.to_csv(filename + '.csv', index=False)
os.startfile(filename + '.csv')
except PermissionError:
msgbox.show_error('Permission Error', filename + '.xlsx already open')
except Exception as e:
msgbox.show_error('Error', e)
def search_dir_only(pattern, filename):
try:
fn = os.path.join(os.getcwd(), 'file_search.config')
last_dir = configsectionmap(fn, 'last path')
dir_selected = fdd.get_directory(last_dir)
if not dir_selected:
return
df = pd.DataFrame(find_dirs(dir_selected, pattern), columns=['Directory'])
df['Directory'] = '=HYPERLINK("' + df['Directory'] + '")'
if df.shape[0] == 0:
msgbox.show_message('Bummer', 'No directories found using that expression')
return
filename = os.path.splitext(filename)[0]
# if using pandas to save to excel (requires openpyxl)
# df.to_excel(filename + '.xlsx', index=False)
# os.startfile(filename + '.xlsx')
# use xlwings to save to excel
new_excel_wb = New_Excel_WB()
new_excel_wb.df_to_excel(df=df, path=filename + '.xlsx')
except PermissionError:
msgbox.show_error('Permission Error', filename + '.xlsx already open')
except Exception as e:
msgbox.show_error('Error', e)
if __name__ == "__main__":
import sys
pttn = sys.argv[1] if len(sys.argv) > 1 else '.*proj.*'
fn = sys.argv[2] if len(sys.argv) > 2 else 'search_results'
search_dir_topdown(pttn, fn, dt1=None, dt2=None)
| [
"os.path.getsize",
"file_dir_dialog.get_directory",
"os.path.splitext",
"os.path.join",
"os.getcwd",
"os.path.isdir",
"fnmatch.fnmatch",
"msgbox.show_error",
"os.startfile",
"os.path.getmtime",
"datetime.date.today",
"xlwings.Book",
"msgbox.show_message",
"os.walk"
] | [((1544, 1562), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1551, 1562), False, 'import os\n'), ((3476, 3494), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (3483, 3494), False, 'import os\n'), ((261, 270), 'xlwings.Book', 'xw.Book', ([], {}), '()\n', (268, 270), True, 'import xlwings as xw\n'), ((4241, 4268), 'file_dir_dialog.get_directory', 'fdd.get_directory', (['last_dir'], {}), '(last_dir)\n', (4258, 4268), True, 'import file_dir_dialog as fdd\n'), ((6544, 6571), 'file_dir_dialog.get_directory', 'fdd.get_directory', (['last_dir'], {}), '(last_dir)\n', (6561, 6571), True, 'import file_dir_dialog as fdd\n'), ((1327, 1339), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1337, 1339), False, 'from datetime import date\n'), ((4059, 4070), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4068, 4070), False, 'import os\n'), ((4389, 4416), 'os.path.isdir', 'os.path.isdir', (['dir_selected'], {}), '(dir_selected)\n', (4402, 4416), False, 'import os\n'), ((5297, 5366), 'msgbox.show_message', 'msgbox.show_message', (['"""Bummer"""', '"""No files found using that expression"""'], {}), "('Bummer', 'No files found using that expression')\n", (5316, 5366), False, 'import msgbox\n'), ((5407, 5433), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (5423, 5433), False, 'import os\n'), ((6211, 6281), 'msgbox.show_error', 'msgbox.show_error', (['"""Permission Error"""', "(filename + '.xlsx already open')"], {}), "('Permission Error', filename + '.xlsx already open')\n", (6228, 6281), False, 'import msgbox\n'), ((6319, 6348), 'msgbox.show_error', 'msgbox.show_error', (['"""Error"""', 'e'], {}), "('Error', e)\n", (6336, 6348), False, 'import msgbox\n'), ((6431, 6442), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6440, 6442), False, 'import os\n'), ((6816, 6891), 'msgbox.show_message', 'msgbox.show_message', (['"""Bummer"""', '"""No directories found using that expression"""'], {}), "('Bummer', 'No directories found using that expression')\n", (6835, 6891), False, 'import msgbox\n'), ((6932, 6958), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (6948, 6958), False, 'import os\n'), ((7309, 7379), 'msgbox.show_error', 'msgbox.show_error', (['"""Permission Error"""', "(filename + '.xlsx already open')"], {}), "('Permission Error', filename + '.xlsx already open')\n", (7326, 7379), False, 'import msgbox\n'), ((7417, 7446), 'msgbox.show_error', 'msgbox.show_error', (['"""Error"""', 'e'], {}), "('Error', e)\n", (7434, 7446), False, 'import msgbox\n'), ((2241, 2269), 'os.path.join', 'os.path.join', (['root', 'basename'], {}), '(root, basename)\n', (2253, 2269), False, 'import os\n'), ((3875, 3908), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['lastdir', 'pattern'], {}), '(lastdir, pattern)\n', (3890, 3908), False, 'import fnmatch\n'), ((5980, 6079), 'msgbox.show_message', 'msgbox.show_message', (['"""Info"""', '"""There are too many rows to write to Excel.\nSwitching to csv."""'], {}), '(\'Info\',\n """There are too many rows to write to Excel.\nSwitching to csv.""")\n', (5999, 6079), False, 'import msgbox\n'), ((6141, 6172), 'os.startfile', 'os.startfile', (["(filename + '.csv')"], {}), "(filename + '.csv')\n", (6153, 6172), False, 'import os\n'), ((2857, 2878), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (2872, 2878), False, 'import os\n'), ((2102, 2136), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['basename', 'pattern'], {}), '(basename, pattern)\n', (2117, 2136), False, 'import fnmatch\n'), ((2534, 2556), 'os.path.getmtime', 'os.path.getmtime', (['file'], {}), '(file)\n', (2550, 2556), False, 'import os\n'), ((2912, 2934), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (2928, 2934), False, 'import os\n'), ((3020, 3042), 'os.path.getmtime', 'os.path.getmtime', (['file'], {}), '(file)\n', (3036, 3042), False, 'import os\n')] |
"""Demo using test environment for grpc testing"""
import logging
from google.protobuf import json_format
from framework.config import settings
from tests.base_test import BaseTestCase
from utils.channel_factory import get_channel
from utils.builders.grpc_builders import build_number_from_file, build_number_from_dict
from services.doubler.doubler_pb2_grpc import DoublerStub
from services.doubler.doubler_pb2 import Number
log = logging.getLogger(__name__)
METADATA = (('key1', 'val1'), ('key2', 'val2'),)
TIMEOUT_SEC = 0.15
class ExampleGrpcTestCase(BaseTestCase):
"""Tests use server from grpc-demo/doubler"""
@classmethod
def setUpClass(cls):
"""test class setup"""
cls._channel = get_channel(settings["doubler_grpc_host"],
settings["doubler_grpc_port"],
metadata=METADATA)
cls._stub = DoublerStub(cls._channel)
@classmethod
def tearDownClass(cls):
"""tearDownClass runs after tests"""
cls._channel.close()
def test_grpc_call1(self):
"""grpc call test1"""
request = build_number_from_file('resources/requests/doubler/request1.json')
# https://grpc.io/grpc/python/grpc.html#multi-callable-interfaces
response = ExampleGrpcTestCase._stub.Double(request, timeout=TIMEOUT_SEC)
log.debug(f'response: {json_format.MessageToJson(response)}')
self.assertEqual(response.value, 10.0)
def test_grpc_call2(self):
"""grpc call test2"""
request = build_number_from_dict({'value': -4.0})
response = ExampleGrpcTestCase._stub.Double(request, timeout=TIMEOUT_SEC)
self.assertEqual(response.value, -8.0)
def test_grpc_call3(self):
"""grpc call test3"""
request = Number(value=3.0)
response = ExampleGrpcTestCase._stub.Double(request, timeout=TIMEOUT_SEC)
self.assertEqual(response.value, 6.0)
| [
"logging.getLogger",
"services.doubler.doubler_pb2.Number",
"services.doubler.doubler_pb2_grpc.DoublerStub",
"utils.builders.grpc_builders.build_number_from_dict",
"utils.builders.grpc_builders.build_number_from_file",
"utils.channel_factory.get_channel",
"google.protobuf.json_format.MessageToJson"
] | [((436, 463), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (453, 463), False, 'import logging\n'), ((721, 817), 'utils.channel_factory.get_channel', 'get_channel', (["settings['doubler_grpc_host']", "settings['doubler_grpc_port']"], {'metadata': 'METADATA'}), "(settings['doubler_grpc_host'], settings['doubler_grpc_port'],\n metadata=METADATA)\n", (732, 817), False, 'from utils.channel_factory import get_channel\n'), ((904, 929), 'services.doubler.doubler_pb2_grpc.DoublerStub', 'DoublerStub', (['cls._channel'], {}), '(cls._channel)\n', (915, 929), False, 'from services.doubler.doubler_pb2_grpc import DoublerStub\n'), ((1130, 1196), 'utils.builders.grpc_builders.build_number_from_file', 'build_number_from_file', (['"""resources/requests/doubler/request1.json"""'], {}), "('resources/requests/doubler/request1.json')\n", (1152, 1196), False, 'from utils.builders.grpc_builders import build_number_from_file, build_number_from_dict\n'), ((1550, 1589), 'utils.builders.grpc_builders.build_number_from_dict', 'build_number_from_dict', (["{'value': -4.0}"], {}), "({'value': -4.0})\n", (1572, 1589), False, 'from utils.builders.grpc_builders import build_number_from_file, build_number_from_dict\n'), ((1799, 1816), 'services.doubler.doubler_pb2.Number', 'Number', ([], {'value': '(3.0)'}), '(value=3.0)\n', (1805, 1816), False, 'from services.doubler.doubler_pb2 import Number\n'), ((1384, 1419), 'google.protobuf.json_format.MessageToJson', 'json_format.MessageToJson', (['response'], {}), '(response)\n', (1409, 1419), False, 'from google.protobuf import json_format\n')] |
import os
import torch
import numpy as np
class IoUAverager:
def __init__(self, nCls, eps=1e-5):
self.nCls = nCls
self.eps = eps
self.shape_ious = [[] for _ in range(self.nCls)]
def clear(self):
self.shape_ious = [[] for _ in range(self.nCls)]
def update(self, outputs, truths):
preds = outputs.max(dim=1)[1]
preds_np = preds.detach().cpu().numpy()
pids_np = truths.detach().cpu().numpy()
batch_size = pids_np.shape[0]
for batch in range(batch_size):
for part in range(self.nCls):
I = np.sum(np.logical_and(preds_np[batch] == part, pids_np[batch] == part))
U = np.sum(np.logical_or(preds_np[batch] == part, pids_np[batch] == part))
if U == 0: continue
else: self.shape_ious[part].append(I/U)
def measure(self):
res = []
for part in range(self.nCls):
if self.shape_ious[part] != []:
res.append(np.mean(self.shape_ious[part]))
return np.mean(res)
def better(self, A, B):
return A > B
def write(self, writer, global_step, prefix=""):
writer.add_scalar(os.path.join(prefix, "mIoU"), self.measure(), global_step)
def report(self):
text = f"mIoU = {self.measure():.4f}\n"
for part in range(self.nCls):
if self.shape_ious[part] != []:
text += f"\t Class {part}: {np.mean(self.shape_ious[part]):.4f}\n"
else:
text += f"\t Class {part}: None\n"
return text
class ClassificationAverager:
""" statistics for classification """
def __init__(self, nCls, eps=1e-5, names=None):
self.nCls = nCls
self.names = names
self.eps = eps
self.N = 0
self.table = np.zeros((self.nCls, 4), dtype=np.int32)
self.hist_preds = []
self.hist_truths = []
def clear(self):
self.N = 0
self.table = np.zeros((self.nCls, 4), dtype=np.int32)
self.hist_preds = []
self.hist_truths = []
def update(self, outputs, truths):
preds = torch.argmax(outputs, dim=1).detach().cpu().numpy() # [B, ]
labels = truths.detach().cpu().numpy() # [B, ]
self.hist_preds.extend(preds.tolist())
self.hist_truths.extend(labels.tolist())
self.N += np.prod(labels.shape)
for Cls in range(self.nCls):
true_positive = np.count_nonzero(np.bitwise_and(preds == Cls, labels == Cls))
true_negative = np.count_nonzero(np.bitwise_and(preds != Cls, labels != Cls))
false_positive = np.count_nonzero(np.bitwise_and(preds == Cls, labels != Cls))
false_negative = np.count_nonzero(np.bitwise_and(preds != Cls, labels == Cls))
self.table[Cls] += [true_positive, true_negative, false_positive, false_negative]
def measure(self):
"""Overall Accuracy"""
total_TP = np.sum(self.table[:, 0]) # all true positives
accuracy = total_TP/self.N
return accuracy
def better(self, A, B):
return A > B
def write(self, writer, global_step, prefix=""):
writer.add_scalar(os.path.join(prefix, "Accuracy"), self.measure(), global_step)
def plot_conf_mat(self):
#mat = confusion_matrix(self.hist_truths, self.hist_preds)
from .vision import plot_confusion_matrix
plot_confusion_matrix(self.hist_truths, self.hist_preds)
def report(self, each_class=False, conf_mat=False):
precisions = []
recalls = []
for Cls in range(self.nCls):
precision = self.table[Cls,0] / (self.table[Cls,0] + self.table[Cls,3] + self.eps) # TP / (TP + FN)
recall = self.table[Cls,0] / (self.table[Cls,0] + self.table[Cls,2] + self.eps) # TP / (TP + FP)
precisions.append(precision)
recalls.append(recall)
total_TP = np.sum(self.table[:, 0]) # all true positives
accuracy = total_TP/self.N
accuracy_mean_class = np.mean(precisions)
text = f"Overall Accuracy = {accuracy:.4f}({total_TP}/{self.N})\n"
text += f"\tMean-class Accuracy = {accuracy_mean_class:.4f}\n"
if each_class:
for Cls in range(self.nCls):
if precisions[Cls] != 0 or recalls[Cls] != 0:
text += f"\tClass {str(Cls)+'('+self.names[Cls]+')' if self.names is not None else Cls}: precision = {precisions[Cls]:.3f} recall = {recalls[Cls]:.3f}\n"
if conf_mat:
self.plot_conf_mat()
return text
| [
"numpy.mean",
"numpy.prod",
"numpy.logical_and",
"os.path.join",
"numpy.logical_or",
"numpy.bitwise_and",
"numpy.sum",
"numpy.zeros",
"torch.argmax"
] | [((1053, 1065), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (1060, 1065), True, 'import numpy as np\n'), ((1821, 1861), 'numpy.zeros', 'np.zeros', (['(self.nCls, 4)'], {'dtype': 'np.int32'}), '((self.nCls, 4), dtype=np.int32)\n', (1829, 1861), True, 'import numpy as np\n'), ((1983, 2023), 'numpy.zeros', 'np.zeros', (['(self.nCls, 4)'], {'dtype': 'np.int32'}), '((self.nCls, 4), dtype=np.int32)\n', (1991, 2023), True, 'import numpy as np\n'), ((2370, 2391), 'numpy.prod', 'np.prod', (['labels.shape'], {}), '(labels.shape)\n', (2377, 2391), True, 'import numpy as np\n'), ((2959, 2983), 'numpy.sum', 'np.sum', (['self.table[:, 0]'], {}), '(self.table[:, 0])\n', (2965, 2983), True, 'import numpy as np\n'), ((3929, 3953), 'numpy.sum', 'np.sum', (['self.table[:, 0]'], {}), '(self.table[:, 0])\n', (3935, 3953), True, 'import numpy as np\n'), ((4041, 4060), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (4048, 4060), True, 'import numpy as np\n'), ((1196, 1224), 'os.path.join', 'os.path.join', (['prefix', '"""mIoU"""'], {}), "(prefix, 'mIoU')\n", (1208, 1224), False, 'import os\n'), ((3195, 3227), 'os.path.join', 'os.path.join', (['prefix', '"""Accuracy"""'], {}), "(prefix, 'Accuracy')\n", (3207, 3227), False, 'import os\n'), ((2474, 2517), 'numpy.bitwise_and', 'np.bitwise_and', (['(preds == Cls)', '(labels == Cls)'], {}), '(preds == Cls, labels == Cls)\n', (2488, 2517), True, 'import numpy as np\n'), ((2564, 2607), 'numpy.bitwise_and', 'np.bitwise_and', (['(preds != Cls)', '(labels != Cls)'], {}), '(preds != Cls, labels != Cls)\n', (2578, 2607), True, 'import numpy as np\n'), ((2655, 2698), 'numpy.bitwise_and', 'np.bitwise_and', (['(preds == Cls)', '(labels != Cls)'], {}), '(preds == Cls, labels != Cls)\n', (2669, 2698), True, 'import numpy as np\n'), ((2746, 2789), 'numpy.bitwise_and', 'np.bitwise_and', (['(preds != Cls)', '(labels == Cls)'], {}), '(preds != Cls, labels == Cls)\n', (2760, 2789), True, 'import numpy as np\n'), ((608, 671), 'numpy.logical_and', 'np.logical_and', (['(preds_np[batch] == part)', '(pids_np[batch] == part)'], {}), '(preds_np[batch] == part, pids_np[batch] == part)\n', (622, 671), True, 'import numpy as np\n'), ((700, 762), 'numpy.logical_or', 'np.logical_or', (['(preds_np[batch] == part)', '(pids_np[batch] == part)'], {}), '(preds_np[batch] == part, pids_np[batch] == part)\n', (713, 762), True, 'import numpy as np\n'), ((1006, 1036), 'numpy.mean', 'np.mean', (['self.shape_ious[part]'], {}), '(self.shape_ious[part])\n', (1013, 1036), True, 'import numpy as np\n'), ((1452, 1482), 'numpy.mean', 'np.mean', (['self.shape_ious[part]'], {}), '(self.shape_ious[part])\n', (1459, 1482), True, 'import numpy as np\n'), ((2139, 2167), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (2151, 2167), False, 'import torch\n')] |
import os
import requests
import sys
import hashlib
import tarfile
def check_version(remote_ver, local_ver):
'用于判断服务器上是否有新版本的软件,有返回True,否则返回False'
if not os.path.exists(local_ver):
return True # 如果本地版本文件不存在则返回True
r = requests.get(remote_ver)
with open(local_ver) as fobj:
version = fobj.read()
if version != r.text:
return True
return False
def check_md5(md5_url, local_fname):
'文件没有损坏,返回True,否则返回False'
r = requests.get(md5_url)
m = hashlib.md5()
with open(local_fname, 'rb') as fobj:
while True:
data = fobj.read(4096)
if not data:
break
m.update(data)
if m.hexdigest() == r.text.strip():
return True # md5相同返回True,否则是False
return False
def download(url, dst_name):
r = requests.get(url)
with open(dst_name, 'wb') as fobj:
fobj.write(r.content)
def deploy(pkg, ver):
os.chdir('/var/www/packages/') # 切换到目标目录
tar = tarfile.open(pkg, 'r:gz') # 解压缩
tar.extractall()
tar.close()
src_fname = '/var/www/packages/mp-%s' % ver
if os.path.exists('/var/www/html/mysite'):
os.unlink('/var/www/html/mysite')
os.symlink(src_fname, '/var/www/html/mysite')
if __name__ == '__main__':
remote_ver = 'http://192.168.4.3/deploy/live_version'
local_ver = '/var/www/packages/live_verion'
new_ver = check_version(remote_ver, local_ver)
if not new_ver: # 如果服务器上没有新版本软件则退出程序
sys.exit(0)
r = requests.get(remote_ver)
ver = r.text.strip()
soft_url = 'http://192.168.4.3/deploy/packages/mp-%s.tar.gz' % ver
local_fname = '/var/www/packages/mp-%s.tar.gz' % ver
download(soft_url, local_fname) # 下载最新版本的文件
md5_url = 'http://192.168.4.3/deploy/packages/mp-%s.tar.gz.md5' % ver
file_ok = check_md5(md5_url, local_fname)
if not file_ok: # 如果下载的文件是损坏的,程序退出
sys.exit(1)
download(remote_ver, local_ver) # 下载版本文件到本地
deploy(local_fname, ver) # 部署服务器
| [
"os.path.exists",
"tarfile.open",
"hashlib.md5",
"os.symlink",
"requests.get",
"os.chdir",
"os.unlink",
"sys.exit"
] | [((241, 265), 'requests.get', 'requests.get', (['remote_ver'], {}), '(remote_ver)\n', (253, 265), False, 'import requests\n'), ((471, 492), 'requests.get', 'requests.get', (['md5_url'], {}), '(md5_url)\n', (483, 492), False, 'import requests\n'), ((501, 514), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (512, 514), False, 'import hashlib\n'), ((826, 843), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (838, 843), False, 'import requests\n'), ((940, 970), 'os.chdir', 'os.chdir', (['"""/var/www/packages/"""'], {}), "('/var/www/packages/')\n", (948, 970), False, 'import os\n'), ((992, 1017), 'tarfile.open', 'tarfile.open', (['pkg', '"""r:gz"""'], {}), "(pkg, 'r:gz')\n", (1004, 1017), False, 'import tarfile\n'), ((1117, 1155), 'os.path.exists', 'os.path.exists', (['"""/var/www/html/mysite"""'], {}), "('/var/www/html/mysite')\n", (1131, 1155), False, 'import os\n'), ((1203, 1248), 'os.symlink', 'os.symlink', (['src_fname', '"""/var/www/html/mysite"""'], {}), "(src_fname, '/var/www/html/mysite')\n", (1213, 1248), False, 'import os\n'), ((1505, 1529), 'requests.get', 'requests.get', (['remote_ver'], {}), '(remote_ver)\n', (1517, 1529), False, 'import requests\n'), ((163, 188), 'os.path.exists', 'os.path.exists', (['local_ver'], {}), '(local_ver)\n', (177, 188), False, 'import os\n'), ((1165, 1198), 'os.unlink', 'os.unlink', (['"""/var/www/html/mysite"""'], {}), "('/var/www/html/mysite')\n", (1174, 1198), False, 'import os\n'), ((1484, 1495), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1492, 1495), False, 'import sys\n'), ((1900, 1911), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1908, 1911), False, 'import sys\n')] |
""" The module provides utility functions to load tree object from files """
import os
import sys
from cached_property import cached_property
from . import source
from .compat.types import basestr
from .tree import Tree, flatten
class Loader(object):
"""
Configuration tree loader
:param Walker walk: Walk actor that generates list of files to load
:param Updater update: Update actor that implements syntactic sugar
:param PostProcessor postprocess: Result tree post processor
:param Tree tree: Tree object that should contain result of loading
"""
def __init__(self, walk=None, update=None, postprocess=None, tree=None):
self.walk = walk or Walker()
self.update = update or Updater()
self.postprocess = postprocess or PostProcessor()
self.tree = tree if tree is not None else Tree()
@classmethod
def fromconf(cls, path):
"""
Creates loader using configuration module ``loaderconf``
:param str path: Path to a directory that contains ``loaderconf``
:returns: Ready to use loader object
:rtype: Loader
"""
if path not in sys.path:
sys.path.append(path)
try:
import loaderconf
conf = loaderconf.__dict__
except ImportError as e:
# Get module name from exception meessage:
# Python 2.x "No module named module_name"
# Python 3.x "No module named 'module_name'"
module_name = str(e).split()[-1].strip("'")
if module_name != "loaderconf":
raise
conf = {}
keys = ("walk", "update", "postprocess", "tree")
conf = dict((k, v) for k, v in conf.items() if k in keys)
return cls(**conf)
def __call__(self, path):
"""
Loads configuration
:param str path: Path to a directory that contains configuration files.
:returns: Result tree object
:rtype: Tree
"""
from . import logger
logger.info('Walking over "%s"', path)
for f in self.walk(path):
relpath = os.path.relpath(f, path)
logger.info('Loading "%s"', relpath)
ext = os.path.splitext(f)[1]
with open(f) as data:
data = source.map[ext](data)
if not data:
continue
for key, value in flatten(data):
self.update(self.tree, key, value, f)
logger.info("Post-processing")
self.postprocess(self.tree)
return self.tree
###############################################################################
# Utilities
##
class Pipeline(object):
"""
Utility class that helps to build pipelines
.. attribute:: __pipeline__
List of workers that includes each method of the class that marked
by :meth:`worker` decorator. The list is sorted by worker priority.
Inactive workers are not included in the list.
"""
@cached_property
def __pipeline__(self):
pipeline = []
for worker in dir(self):
if worker.startswith("_"):
continue
worker = getattr(self, worker)
if not getattr(worker, "__worker__", False):
continue
pipeline.append(worker)
pipeline.sort(key=lambda worker: worker.__priority__)
return pipeline
@staticmethod
def worker(priority, enabled=True):
"""
Decorator that marks method as a worker
:param int priority: Priority of the worker
:param bool enabled: Whether worker is active or not
"""
def decorator(f):
f.__worker__ = enabled
f.__priority__ = priority
return f
return decorator
###############################################################################
# Walker
##
class Walker(Pipeline):
"""
File walker is used by :class:`Loader` to get list of files to load.
.. attribute:: params
Dictionary that contains all keyword arguments that are passed
into constructor. The dictionary is copied into each :class:`File`
object into :attr:`File.params` attribute. This attribute
can be used by workers from :attr:`__pipeline__` to make decisions
about the file priority.
Only the ``env`` parameter makes sense for :meth:`environment` worker.
All other parameters are simply ignored, but could be used
in extensions.
.. attribute:: __pipeline__
File processing pipeline. Each :class:`File` object is passed
through the following methods until some method returns ``int`` value.
The special value ``-1`` means that the passed file should be ignored.
Other values mean file priority. For instance, regular files
(see :meth:`regular`) have priorities equal to ``30`` or ``31``, and
final ones (see :meth:`final`) have ``100`` or ``101``. That means
that final files will be at the end of result list of files, and
regular files will be at the start of the list.
The list of workers is:
[:meth:`ignored`, :meth:`final`, :meth:`environment`, :meth:`regular`]
"""
def __init__(self, **params):
self.params = params
def __call__(self, path):
"""
Walks over the ``path`` and yields files to load
:param str path: Path to walk over
"""
fileobj = File(os.path.dirname(path), os.path.basename(path), self.params)
for f in self.walk(fileobj):
yield f.fullpath
def walk(self, current):
"""
Processes current traversing file
If ``current`` is regular file, it will be yielded as is.
If it is directory, the list of its files will be prioritized
using :attr:`__pipeline__`. Then the list will be sorted using
given priorities and each file will be processed using this method
recursively.
The method is low level implementation of :meth:`__call__`
and should not be used directly.
:param File current: Current traversing file
"""
if current.isfile:
yield current
elif current.isdir:
files = []
for name in os.listdir(current.fullpath):
fileobj = File(current.fullpath, name, current.params)
priority = None
for modifier in self.__pipeline__:
priority = modifier(fileobj)
if priority is not None:
break
if priority < 0:
continue
files.append((priority, fileobj))
for _, fileobj in sorted(files):
for f in self.walk(fileobj):
yield f
@Pipeline.worker(10)
def ignored(self, fileobj):
"""
Worker that filters out ignored files and directories
The file will be ignored, if its name starts with dot char
or underscore, or the file format is not supported by loader.
:param File fileobj: Current traversing file
:returns: * ``-1`` when the file is ignored one;
* ``None`` when the file is not ignored one.
.. attribute:: __priority__ = 10
Examples::
.hidden # returns -1 (file name starts with dot char)
_ignored # returns -1 (file name starts with underscore)
unsupported.txt # returns -1 (txt files are not supported)
other.yaml # returns None
"""
if fileobj.name.startswith("_") or fileobj.name.startswith("."):
return -1
if fileobj.isfile and fileobj.ext not in source.map:
return -1
@Pipeline.worker(30)
def final(self, fileobj):
"""
Worker that checks whether current traversing file is final or not.
Final files are processed at the end of current list of files.
If the file name starts with "final", it will be treated as final one.
:param File fileobj: Current traversing file
:returns: * ``100`` when the file is final one and it is directory;
* ``101`` when the file is final one and it is regular file;
* ``None`` when the file is not final one.
.. attribute:: __priority__ = 30
Examples::
final/ # returns 100
final-dir/ # returns 100
other-dir/ # returns None
final.yaml # returns 101
final-file.json # returns 101
other.yaml # returns None
"""
if not fileobj.name.startswith("final"):
return None
return 100 if fileobj.isdir else 101
@Pipeline.worker(50)
def environment(self, fileobj):
"""
Worker that checks whether current traversing file is environment
specific or not.
The file will be treated as environment specific, if its name starts
with "env-" string. The rest part of the name (without extension)
is treated as environment name. If the environment name does not
match to ``env`` parameter (see :attr:`params`), then the file
will be ignored.
:param File fileobj: Current traversing file
:returns: * ``-1`` when the file is environment specific,
but environment name is not match;
* ``50`` when the file is environment specific
and it is regular file;
* ``51`` when the file is environment specific
and it is directory;
* ``None`` when the file is not environment specific one.
.. attribute:: __priority__ = 50
Examples::
# params['env'] == "foo.bar"
env-foo.yaml # returns 50
env-bar.yaml # returns -1 (environment name is not match)
env-foo/ # returns 51
env-bar.yaml # returns 50
env-baz.yaml # returns -1
other.yaml # returns None
"""
if not fileobj.name.startswith("env-"):
return None
env = fileobj.cleanname.split("-", 1)[1]
effective_env = fileobj.params.get("env", "")
if effective_env != env and not effective_env.startswith(env + "."):
return -1
fileobj.params["env"] = effective_env[len(env) + 1 :] # noqa
return 51 if fileobj.isdir else 50
@Pipeline.worker(1000)
def regular(self, fileobj):
"""
Worker that treats any file as a regular one. The worker should be
the last in the :attr:`__pipeline__`, because it does not make
any check.
:param File fileobj: Current traversing file
:returns: * ``30`` when the file is regular file;
* ``31`` when the file is directory.
.. attribute:: __priority__ = 1000
"""
return 31 if fileobj.isdir else 30
class File(object):
"""
Represents current traversing file within :class:`Walker` routine
.. attribute:: path
Path of parent directory containing the file
.. attribute:: name
File name itself
.. attribute:: params
The copy of :attr:`Walker.params` that could be used and transformed
by workers from :attr:`Walker.__pipeline__`.
See :meth:`Walker.environment`.
.. attribute:: fullpath
Full path to the file
.. attribute:: isdir
Boolean value that means whether the file is directory or not
.. attribute:: isfile
Boolean value that means whether the file is regular file or not
.. attribute:: ext
Extension of the file (with leading dot char)
.. attribute:: cleanname
Name of the file without its extension
"""
def __init__(self, path, name, params):
self.path = path
self.name = name
self.params = params.copy()
def __lt__(self, other):
return self.name < other.name
@cached_property
def fullpath(self):
return os.path.join(self.path, self.name)
@cached_property
def isfile(self):
return os.path.isfile(self.fullpath)
@cached_property
def isdir(self):
return os.path.isdir(self.fullpath)
@cached_property
def ext(self):
return os.path.splitext(self.name)[1]
@cached_property
def cleanname(self):
return os.path.splitext(self.name)[0]
###############################################################################
# Updater
##
class Updater(Pipeline):
"""
Updater is used by :class:`Loader` to set up key-value pairs into
updating tree object. The object extends default updating mechanism
adding some syntactic sugar.
.. attribute:: params
Dictionary that contains all keyword arguments that are passed
into constructor. The attribute can be used by workers.
Only the ``namespace`` parameter makes sense for :meth:`eval_value`
worker. All other parameters are simply ignored, but could be used
in extensions.
.. attribute:: __pipeline__
Transforms :class:`UpdateAction` object that created by
:meth:`__call__`.
Each :class:`UpdateAction` object is passed through the following
methods. Each method can transform :attr:`UpdateAction.key`,
:attr:`UpdateAction.value`, or :attr:`UpdateAction.update`, attributes.
So that the default behavior of :class:`UpdateAction` can be changed.
The list of workers is:
[:meth:`set_default`, :meth:`call_method`, :meth:`format_value`,
:meth:`printf_value`, :meth:`eval_value`, :meth:`required_value`]
"""
def __init__(self, **params):
self.params = params
def __call__(self, tree, key, value, source):
"""
Updates tree
It creates :class:`UpdateAction` object. Then pass the object through
the :attr:`__pipeline__`. And finally calls the action.
:param Tree tree: Updating tree object
:param str key: Setting up key
:param value: Setting up value
:param str source: Full path to a source file
"""
action = UpdateAction(tree, key, value, source)
for modifier in self.__pipeline__:
modifier(action)
action()
@Pipeline.worker(20)
def set_default(self, action):
"""
Worker that changes default :attr:`UpdateAction.update` from
``__setitem__`` to ``setdefault`` if key ends with "?" char.
It also transforms key, i.e. strips the last char.
:param UpdateAction action: Current update action object
.. attribute:: __priority__ = 20
Example:
.. code-block:: yaml
x: 1
x?: 2 # x == 1
y?: 3 # y == 3
"""
if not action.key.endswith("?"):
return
action.key = action.key[:-1]
def update(action):
action.tree.setdefault(action.key, action.value)
action.update = update
@Pipeline.worker(30)
def call_method(self, action):
"""
Worker that changes default :attr:`UpdateAction.update` if key contains
"#" char.
It splits :attr:`UpdateAction.key` by the char. The left part is set
up as the key itself. The right part is used as a method name.
It gets value from :attr:`UpdateAction.tree` by the new key and call
its method using :attr:`UpdateAction.value` as an argument.
If any of the values is instance of :class:`Promise`, then it will be
wrapped by another :class:`Promise` object.
See :meth:`PostProcessor.resolve_promise`.
:param UpdateAction action: Current update action object
.. attribute:: __priority__ = 30
Example:
.. code-block:: yaml
foo: [1, 2]
bar: ">>> self['foo'][:]" # Get copy of the latest `foo`
bar#extend: [5, 6] # bar == [1, 2, 3, 4, 5, 6]
foo#extend: [3, 4] # foo == [1, 2, 3, 4]
"""
if "#" not in action.key:
return
action.key, method = action.key.split("#")
def update(action):
old_value = action.tree[action.key]
if isinstance(old_value, Promise) or isinstance(action.value, Promise):
def deferred():
new_value = Promise.resolve(old_value)
getattr(new_value, method)(Promise.resolve(action.value))
return new_value
action.tree[action.key] = action.promise(deferred)
else:
getattr(old_value, method)(action.value)
action.update = update
@Pipeline.worker(50)
def format_value(self, action):
"""
Worker that transforms :attr:`UpdateAction.value` that starts
with ``"$>> "`` (with trailing space char) into formatting expression
and wraps it into :class:`Promise`.
See :meth:`PostProcessor.resolve_promise`.
The expression uses :meth:`str.format`. Current tree and current
branch are passed as ``self`` and ``branch`` names into template.
Both are wrapped by :class:`ResolverProxy`.
:param UpdateAction action: Current update action object
.. attribute:: __priority__ = 50
Example:
.. code-block:: yaml
a: "foo"
b:
x: "bar"
y: "a = {self[a]!r}, b.x = {branch[x]!r}"
# == "a = 'foo', b.x = 'bar'"
"""
if not isinstance(action.value, basestr) or not action.value.startswith("$>> "):
return
value = action.value[4:]
action.value = action.promise(
lambda: value.format(
self=ResolverProxy(action.tree, action.source),
branch=ResolverProxy(action.branch),
)
)
@Pipeline.worker(60)
def printf_value(self, action):
"""
Worker that transform :attr:`UpdateAction.value` that starts
with ``"%>> "`` (with trailing space char) into formatting expression
and wraps it into :class:`Promise`.
See :meth:`PostProcessor.resolve_promise`.
The expression uses printf style, i.e. ``%`` operator.
:attr:`UpdateAction.tree` wrapped by :class:`ResolverProxy`
is used as a formatting value.
:param UpdateAction action: Current update action object
.. attribute:: __priority__ = 60
Example:
.. code-block:: yaml
name: "World"
hello: "%>> Hello %(name)s" # == "Hello World"
"""
if not isinstance(action.value, basestr) or not action.value.startswith("%>> "):
return
value = action.value[4:]
action.value = action.promise(
lambda: value % ResolverProxy(action.tree, action.source)
)
@Pipeline.worker(70)
def eval_value(self, action):
"""
Worker that transform :attr:`UpdateAction.value` that starts with
``">>> "`` (with trailing space char) into expression and wraps it
into :class:`Promise`. See :meth:`PostProcessor.resolve_promise`.
The expression uses built-in function :func:`eval`.
The value of ``namespace`` key from :attr:`params` is passed as
``gloabls`` argument of ``eval``. :attr:`UpdateAction.tree` is passed
as ``self`` and `UpdateAction.branch` is passed as ``branch`` names
via ``locals`` argument of ``eval``. Both are wrapped
by :class:`ResolverProxy`.
:param UpdateAction action: Current update action object
.. attribute:: __priority__ = 70
Example:
.. code-block:: pycon
>>> from math import floor
>>> update = Updater(namespace={'floor': floor})
.. code-block:: yaml
a: ">>> 1 + 2" # == 3
b:
x: 3
y: ">>> self['a'] * branch['x']" # == 9
z: ">>> floor(3.0 / 2)" # == 1
"""
if not isinstance(action.value, basestr) or not action.value.startswith(">>> "):
return
value = action.value[4:]
namespace = self.params.get("namespace", {})
action.value = action.promise(
lambda: eval(
value,
namespace,
{
"self": ResolverProxy(action.tree, action.source),
"branch": ResolverProxy(action.branch),
},
)
)
@Pipeline.worker(80)
def required_value(self, action):
"""
Worker that transform :attr:`UpdateAction.value` that starts with
``"!!!"`` into an instance of :class:`Required`.
See :meth:`PostProcessor.check_required`.
:param UpdateAction action: Current update action object
.. attribute:: __priority__ = 80
Example:
.. code-block:: yaml
foo: "!!!" # without comment
bar: "!!! This should be redefined" # with comment
"""
if not isinstance(action.value, basestr) or not action.value.startswith("!!!"):
return
action.value = Required(action.key, action.value[3:].strip())
class UpdateAction(object):
"""
Helper object that is used within :class:`Updater` routine.
It represents current update context.
.. attribute:: tree
Current updating :class:`configtree.tree.Tree` object
.. attribute:: branch
Property that is used to get current branch from the tree
.. code-block:: pycon
>>> tree = Tree({'a.x': 1, 'a.y': 2})
>>> action = UpdateAction(tree, 'a.z', 3, '/path/to/src.yaml')
>>> action.branch == tree['a']
True
.. attribute:: key
Current setting up key
.. attribute:: value
Current setting up value
.. attribute:: source
Path to a file processing by :class:`Loader`. Is used as a part
of debug information.
.. code-block:: pycon
>>> UpdateAction(Tree(), 'foo', 'bar', '/path/to/src.yaml')
<tree['foo'] = 'bar' from '/path/to/src.yaml'>
.. attribute:: update
Callable object that represent current update action. By default
is equal to :meth:`default_update`.
"""
def __init__(self, tree, key, value, source):
self.tree = tree
self.key = key
self.value = value
self.update = self.default_update
# Debug info
self._key = key
self._value = value
self.source = source
@property
def branch(self):
if self.tree._key_sep not in self.key:
return self.tree
key = self.key.rsplit(self.tree._key_sep, 1)[0]
return self.tree.branch(key)
def __call__(self):
""" Calls :attr:`update`, i.e. performs update action """
self.update(self)
def __repr__(self):
return "<tree[{0._key!r}] = {0._value!r} from {0.source!r}>".format(self)
def promise(self, deferred):
"""
Helper method that wraps ``deferred`` callable by try-except block.
It adds ``self`` as a first argument to any exception that might
be raised from ``deferred``. So that the exception will contain
information of what expression from which file is caused it.
:param callable deferred: Callable object that should be wrapped by
:class:`Promise`
"""
def wrapper():
try:
return deferred()
except Exception as e:
args = e.args + (self,)
raise e.__class__(*args)
return Promise(wrapper)
@staticmethod
def default_update(action):
"""
Default value of :attr:`update`. Literally performs:
.. code-block:: python
action.tree[action.key] = action.value
:param UpdateAction action: Current action object
"""
action.tree[action.key] = action.value
class Promise(object):
"""
Represents deferred expression that should be calculated at the end
of loading process. See :func:`resolve`, :meth:`Updater.eval_value`,
:meth:`Updater.format_value`, :meth:`Updater.printf_value`, and
:meth:`PostProcessor.resolve_promise`.
:param callable deferred: Deferred expression
"""
def __init__(self, deferred):
self.deferred = deferred
def __call__(self):
""" Resolves deferred value, i.e. calls it and returns its result """
return self.deferred()
@staticmethod
def resolve(value):
"""
Helper method that resolves passed promises and returns their results.
Other values are returned as is.
:param value: Value to resolve
:returns: Resolved promise or value as it is.
.. code-block:: pycon
>>> Promise.resolve(Promise(lambda: 1))
1
>>> Promise.resolve(2)
2
"""
if isinstance(value, Promise):
return value()
return value
class ResolverProxy(object):
"""
Helper object that wraps :class:`configtree.tree.Tree` objects.
It pass each extracted value through :func:`resolve`, so that one
deferred expression (see :class:`Promise`) can use another.
If ``source`` argument is not ``None``, there will be ``__file__`` and
``__dir__`` keys available.
:param Tree tree: Tree object to wrap
:param str source: Path to source file
.. code-block:: pycon
>>> tree = Tree()
>>> proxy = ResolverProxy(tree, '/path/to/src.yaml')
>>> tree['foo'] = Promise(lambda: 1)
>>> tree['bar'] = Promise(lambda: proxy['foo'] + 1)
>>> proxy['foo']
1
>>> proxy['bar']
2
>>> proxy['__file__']
'/path/to/src.yaml'
>>> proxy['__dir__']
'/path/to'
"""
def __init__(self, tree, source=None):
self.__tree = tree
self.__source = source
def __getitem__(self, key):
try:
return Promise.resolve(self.__tree[key])
except KeyError:
if self.__source is not None:
if key == "__file__":
return self.__source
elif key == "__dir__":
return os.path.dirname(self.__source)
raise
def __getattr__(self, attr):
return getattr(self.__tree, attr)
class Required(object):
"""
Helper object that indicates undefined required key.
Values of the type are set up by :meth:`Updater.required_value`
and treated as error by :meth:`PostProcessor.check_required`.
"""
def __init__(self, key, comment=""):
self.key = key
self.comment = comment
def __repr__(self):
result = "Undefined required key <%s>" % self.key
if self.comment:
result += ": " + self.comment
return result
###############################################################################
# Post Processor
##
class PostProcessor(Pipeline):
"""
Post processor is used by :class:`Loader` to perform final transformations
of its result tree object after loading process is finished.
Post processor iterates over passed :class:`configtree.tree.Tree` object
and pass its keys and values through :attr:`__pipeline__`. If any worker
of the pipeline returns non ``None`` value, this value will be treated
as an error. Such errors are accumulated and raised within
:class:`ProcessingError` exception at the end of processing.
.. attribute:: __pipeline__
The list of workers is:
[:meth:`resolve_promise`, :meth:`check_required`]
"""
def __call__(self, tree):
"""
Runs post processor
:param Tree tree: A tree object to process
"""
errors = []
for key, value in tree.items():
for modifier in self.__pipeline__:
error = modifier(tree, key, value)
if error is not None:
errors.append(error)
if errors:
errors.sort(key=lambda e: str(e))
raise ProcessingError(*errors)
@Pipeline.worker(30)
def resolve_promise(self, tree, key, value):
"""
Worker that resolves :class:`Promise` objects.
Any exception raised within promise expression will not be caught.
:param Tree tree: Current processing tree
:param str key: Current traversing key
:param value: Current traversing value
.. attribute:: __priority__ = 30
"""
if isinstance(value, Promise):
tree[key] = value()
@Pipeline.worker(50)
def check_required(self, tree, key, value):
"""
Worker that checks tree for raw :class:`Required` values.
:param Tree tree: Current processing tree
:param str key: Current traversing key
:param value: Current traversing value
:returns: * passed ``value``, if it is an an instance of
:class:`Required`;
* ``None`` for other values.
.. attribute:: __priority__ = 50
"""
if isinstance(value, Required):
return value
class ProcessingError(Exception):
""" Exception that will be raised, if post processor gets any error """
| [
"os.listdir",
"os.path.join",
"os.path.splitext",
"os.path.isfile",
"os.path.dirname",
"os.path.isdir",
"os.path.basename",
"sys.path.append",
"os.path.relpath"
] | [((12284, 12318), 'os.path.join', 'os.path.join', (['self.path', 'self.name'], {}), '(self.path, self.name)\n', (12296, 12318), False, 'import os\n'), ((12378, 12407), 'os.path.isfile', 'os.path.isfile', (['self.fullpath'], {}), '(self.fullpath)\n', (12392, 12407), False, 'import os\n'), ((12466, 12494), 'os.path.isdir', 'os.path.isdir', (['self.fullpath'], {}), '(self.fullpath)\n', (12479, 12494), False, 'import os\n'), ((1182, 1203), 'sys.path.append', 'sys.path.append', (['path'], {}), '(path)\n', (1197, 1203), False, 'import sys\n'), ((2141, 2165), 'os.path.relpath', 'os.path.relpath', (['f', 'path'], {}), '(f, path)\n', (2156, 2165), False, 'import os\n'), ((5542, 5563), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (5557, 5563), False, 'import os\n'), ((5565, 5587), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (5581, 5587), False, 'import os\n'), ((12551, 12578), 'os.path.splitext', 'os.path.splitext', (['self.name'], {}), '(self.name)\n', (12567, 12578), False, 'import os\n'), ((12644, 12671), 'os.path.splitext', 'os.path.splitext', (['self.name'], {}), '(self.name)\n', (12660, 12671), False, 'import os\n'), ((2233, 2252), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (2249, 2252), False, 'import os\n'), ((6361, 6389), 'os.listdir', 'os.listdir', (['current.fullpath'], {}), '(current.fullpath)\n', (6371, 6389), False, 'import os\n'), ((26986, 27016), 'os.path.dirname', 'os.path.dirname', (['self.__source'], {}), '(self.__source)\n', (27001, 27016), False, 'import os\n')] |
# Copyright (c) 2021 MobileCoin. All rights reserved.
import attr
import uuid
import logging
from collections import defaultdict
from typing import Optional, Iterator, List
from decimal import Decimal
from django.utils import timezone
from signald.types import Payment as SignalPayment, Message as SignalMessage
from mobot_client.core.context import ChatContext
from mobot_client.models.messages import PaymentStatus, Payment
from signald import Signal
from unittest.mock import create_autospec
from mobot_client.payments import MCClient, Payments
class MockMCC(MCClient):
def __init__(self):
super(MCClient, self).__init__()
self.logger = logging.getLogger("MCClient")
self.receipt_status_responses = {}
self.public_address = "FooAddress"
self.verbose = True
def _get_receipt(self, amount_pmob: int, status: PaymentStatus) -> dict:
"""Create a bogus receipt with an amount"""
full_service_receipt = {
"receipt_transaction_status": status,
"txo": {
"txo_id_hex": str(uuid.uuid4()),
"value_pmob": amount_pmob,
}}
return full_service_receipt
def add_mock_payment(self, amount_pmob: int = int(Decimal("1e12")), status: PaymentStatus = PaymentStatus.TransactionPending) -> str:
"""Add a mock receipt response, returning a mock receipt ID"""
mock_receipt = str(uuid.uuid4())
self.receipt_status_responses[mock_receipt] = self._get_receipt(amount_pmob, status)
return mock_receipt
def get_receipt_status(self, receipt: str) -> dict:
return self.receipt_status_responses[receipt]
@property
def minimum_fee_pmob(self) -> int:
return 400000000
@property
def account_id(self) -> str:
return "foo"
@attr.s
class TestMessage:
text = attr.ib(type=str)
phone_number = attr.ib(type=str)
timestamp = attr.ib(type=Optional[float], default=timezone.now().timestamp())
payment = attr.ib(type=Optional[int], default=None)
def mock_signal_message_with_receipt(test_message: TestMessage, mcc: MockMCC, status: PaymentStatus = PaymentStatus.TransactionSuccess):
"""Generate a mock signal message with payment at a specified state, defaulting to success"""
if test_message.payment:
receipt = mcc.add_mock_payment(test_message.payment, status)
payment = SignalPayment(
note="a payment",
receipt=receipt
)
else:
payment = None
return SignalMessage(
text=test_message.text,
username=str(test_message.phone_number),
source=dict(number=str(test_message.phone_number)),
timestamp=timezone.now().timestamp(),
payment=payment
)
class MockSignal(Signal):
def __init__(self, test_messages: List[SignalMessage] = [], store_number: str = "+14156665666"):
self.received_messages = test_messages
self.sent_messages = defaultdict(list)
self.store_number = store_number
def receive_messages(self) -> Iterator[SignalMessage]:
for message in self.received_messages:
yield message
def send_message(
self,
recipient: str,
text: str,
block: bool = True,
attachments: List[str] = [],
) -> None:
self.sent_messages[recipient].append(TestMessage(text=text, phone_number=self.store_number))
def send_read_receipt(self, recipient, timestamps, block: bool = True) -> None:
pass
class MockPayments(Payments):
def __init__(self, *args, **kwargs):
self.get_payments_address = create_autospec(super().get_payments_address)
| [
"logging.getLogger",
"uuid.uuid4",
"django.utils.timezone.now",
"signald.types.Payment",
"collections.defaultdict",
"decimal.Decimal",
"attr.ib"
] | [((1854, 1871), 'attr.ib', 'attr.ib', ([], {'type': 'str'}), '(type=str)\n', (1861, 1871), False, 'import attr\n'), ((1891, 1908), 'attr.ib', 'attr.ib', ([], {'type': 'str'}), '(type=str)\n', (1898, 1908), False, 'import attr\n'), ((2005, 2046), 'attr.ib', 'attr.ib', ([], {'type': 'Optional[int]', 'default': 'None'}), '(type=Optional[int], default=None)\n', (2012, 2046), False, 'import attr\n'), ((666, 695), 'logging.getLogger', 'logging.getLogger', (['"""MCClient"""'], {}), "('MCClient')\n", (683, 695), False, 'import logging\n'), ((2400, 2448), 'signald.types.Payment', 'SignalPayment', ([], {'note': '"""a payment"""', 'receipt': 'receipt'}), "(note='a payment', receipt=receipt)\n", (2413, 2448), True, 'from signald.types import Payment as SignalPayment, Message as SignalMessage\n'), ((2965, 2982), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2976, 2982), False, 'from collections import defaultdict\n'), ((1238, 1253), 'decimal.Decimal', 'Decimal', (['"""1e12"""'], {}), "('1e12')\n", (1245, 1253), False, 'from decimal import Decimal\n'), ((1420, 1432), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1430, 1432), False, 'import uuid\n'), ((1078, 1090), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1088, 1090), False, 'import uuid\n'), ((1963, 1977), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1975, 1977), False, 'from django.utils import timezone\n'), ((2702, 2716), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2714, 2716), False, 'from django.utils import timezone\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluation script for TensorFlow 2.0 Question Answering,
adapted by yorko from nq_eval, Natural Questions
https://ai.google.com/research/NaturalQuestions
https://www.kaggle.com/c/tensorflow2-question-answering/
------------------------------------------------------------------------------
Example usage:
nq_eval_yorko --gold_path=<path-to-gold-files> --predictions_path=<path_to_json>
This will compute both the official F1 scores as well as recall@precision
tables for both long and short answers. Note that R@P are only meaningful
if your model populates the score fields of the prediction JSON format.
gold_path should point to the five way annotated dev data in the
original download format (gzipped jsonlines).
predictions_path should point to a json file containing the predictions in
the format given below.
------------------------------------------------------------------------------
Prediction format:
{'predictions': [
{
'example_id': -2226525965842375672,
'long_answer': {
'start_byte': 62657, 'end_byte': 64776,
'start_token': 391, 'end_token': 604
},
'long_answer_score': 13.5,
'short_answers': [
{'start_byte': 64206, 'end_byte': 64280,
'start_token': 555, 'end_token': 560}, ...],
'short_answers_score': 26.4,
'yes_no_answer': 'NONE'
}, ... ]
}
"""
from collections import OrderedDict
from absl import app
from absl import flags
import eval_utils as util
flags.DEFINE_string(
'gold_path', None, 'Path to the gzip JSON data. For '
'multiple files, should be a glob '
'pattern (e.g. "/path/to/files-*"')
flags.DEFINE_string('predictions_path', None, 'Path to prediction JSON.')
flags.DEFINE_integer('num_threads', 10, 'Number of threads for reading.')
flags.DEFINE_float('score_thres_long', -100, 'Score threshold for long answers')
flags.DEFINE_float('score_thres_short', -100, 'Score threshold for short answers')
FLAGS = flags.FLAGS
def safe_divide(x, y):
"""Compute x / y, but return 0 if y is zero."""
if y == 0:
return 0
else:
return x / y
def score_long_answer(gold_label_list, pred_label, score_thres):
"""Scores a long answer as correct or not.
1) First decide if there is a gold long answer with LONG_NO_NULL_THRESHOLD.
2) The prediction will get a match if:
a. There is a gold long answer.
b. The prediction span match exactly with *one* of the non-null gold
long answer span.
Args:
gold_label_list: A list of NQLabel, could be None.
pred_label: A single NQLabel, could be None.
score_thres; score threshold
Returns:
gold_has_answer, pred_has_answer, is_correct, score
"""
gold_has_answer = util.gold_has_long_answer(gold_label_list)
is_correct = False
score = pred_label.long_score
pred_has_answer = pred_label and (
not pred_label.long_answer_span.is_null_span()) and score >= score_thres
# Both sides are non-null spans.
if gold_has_answer and pred_has_answer:
for gold_label in gold_label_list:
# while the voting results indicate there is an long answer, each
# annotator might still say there is no long answer.
if gold_label.long_answer_span.is_null_span():
continue
if util.nonnull_span_equal(gold_label.long_answer_span,
pred_label.long_answer_span):
is_correct = True
break
return gold_has_answer, pred_has_answer, is_correct, score
def score_short_answer(gold_label_list, pred_label, score_thres):
"""Scores a short answer as correct or not.
1) First decide if there is a gold short answer with SHORT_NO_NULL_THRESHOLD.
2) The prediction will get a match if:
a. There is a gold short answer.
b. The prediction span *set* match exactly with *one* of the non-null gold
short answer span *set*.
Args:
gold_label_list: A list of NQLabel.
pred_label: A single NQLabel.
score_thres: score threshold
Returns:
gold_has_answer, pred_has_answer, is_correct, score
"""
# There is a gold short answer if gold_label_list not empty and non null
# answers is over the threshold (sum over annotators).
gold_has_answer = util.gold_has_short_answer(gold_label_list)
is_correct = False
score = pred_label.short_score
# There is a pred long answer if pred_label is not empty and short answer
# set is not empty.
pred_has_answer = pred_label and (
(not util.is_null_span_list(pred_label.short_answer_span_list)) or
pred_label.yes_no_answer != 'none') and score >= score_thres
# Both sides have short answers, which contains yes/no questions.
if gold_has_answer and pred_has_answer:
if pred_label.yes_no_answer != 'none': # System thinks its y/n questions.
for gold_label in gold_label_list:
if pred_label.yes_no_answer == gold_label.yes_no_answer:
is_correct = True
break
else:
for gold_label in gold_label_list:
if util.span_set_equal(gold_label.short_answer_span_list,
pred_label.short_answer_span_list):
is_correct = True
break
return gold_has_answer, pred_has_answer, is_correct, score
def score_answers(gold_annotation_dict, pred_dict,
score_thres_long, score_thres_short):
"""Scores all answers for all documents.
Args:
gold_annotation_dict: a dict from example id to list of NQLabels.
pred_dict: a dict from example id to list of NQLabels.
score_thres_long: score threshold for long answers
score_thres_short: score threshold for short answers
Returns:
long_answer_stats: List of scores for long answers.
short_answer_stats: List of scores for short answers.
"""
gold_id_set = set(gold_annotation_dict.keys())
pred_id_set = set(pred_dict.keys())
if gold_id_set.symmetric_difference(pred_id_set):
raise ValueError('ERROR: the example ids in gold annotations and example '
'ids in the prediction are not equal.')
long_answer_stats = []
short_answer_stats = []
for example_id in gold_id_set:
gold = gold_annotation_dict[example_id]
pred = pred_dict[example_id]
long_answer_stats.append(score_long_answer(gold, pred, score_thres_long))
short_answer_stats.append(score_short_answer(gold, pred, score_thres_short))
# use the 'score' column, which is last
long_answer_stats.sort(key=lambda x: x[-1], reverse=True)
short_answer_stats.sort(key=lambda x: x[-1], reverse=True)
return long_answer_stats, short_answer_stats
def compute_f1(answer_stats, prefix=''):
"""Computes F1, precision, recall for a list of answer scores.
Args:
answer_stats: List of per-example scores.
prefix (''): Prefix to prepend to score dictionary.
Returns:
Dictionary mapping string names to scores.
"""
has_gold, has_pred, is_correct, _ = list(zip(*answer_stats))
precision = safe_divide(sum(is_correct), sum(has_pred))
recall = safe_divide(sum(is_correct), sum(has_gold))
f1 = safe_divide(2 * precision * recall, precision + recall)
return OrderedDict({
prefix + 'n': len(answer_stats),
prefix + 'f1': f1,
prefix + 'precision': precision,
prefix + 'recall': recall
})
def compute_final_f1(long_answer_stats, short_answer_stats):
"""Computes overall F1 given long and short answers, ignoring scores.
Note: this assumes that the answers have been thresholded.
Arguments:
long_answer_stats: List of long answer scores.
short_answer_stats: List of short answer scores.
Returns:
Dictionary of name (string) -> score.
"""
scores = compute_f1(long_answer_stats, prefix='long-answer-')
scores.update(compute_f1(short_answer_stats, prefix='short-answer-'))
scores.update(compute_f1(long_answer_stats + short_answer_stats, prefix='all-answer-'))
return scores
def main(_):
nq_gold_dict = util.read_annotation(FLAGS.gold_path, n_threads=FLAGS.num_threads)
nq_pred_dict = util.read_prediction_json(FLAGS.predictions_path)
long_answer_stats, short_answer_stats = score_answers(nq_gold_dict,
nq_pred_dict,
score_thres_long=FLAGS.score_thres_long,
score_thres_short=FLAGS.score_thres_short)
# reporting results
print('*' * 20)
scores = compute_final_f1(long_answer_stats, short_answer_stats)
print('*' * 20)
print('SCORES (n={}):'.format(
scores['long-answer-n']))
print(' F1 / P / R')
print('Long answer {: >7.2%} / {: >7.2%} / {: >7.2%}'.format(
scores['long-answer-f1'], scores['long-answer-precision'],
scores['long-answer-recall']))
print('Short answer {: >7.2%} / {: >7.2%} / {: >7.2%}'.format(
scores['short-answer-f1'], scores['short-answer-precision'],
scores['short-answer-recall']))
print('All answers {: >7.2%} / {: >7.2%} / {: >7.2%}'.format(
scores['all-answer-f1'], scores['all-answer-precision'],
scores['all-answer-recall']))
if __name__ == '__main__':
flags.mark_flag_as_required('gold_path')
flags.mark_flag_as_required('predictions_path')
app.run(main)
| [
"eval_utils.is_null_span_list",
"eval_utils.read_annotation",
"absl.flags.DEFINE_integer",
"eval_utils.gold_has_short_answer",
"absl.app.run",
"absl.flags.mark_flag_as_required",
"eval_utils.span_set_equal",
"eval_utils.gold_has_long_answer",
"eval_utils.nonnull_span_equal",
"eval_utils.read_predi... | [((2123, 2271), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""gold_path"""', 'None', '"""Path to the gzip JSON data. For multiple files, should be a glob pattern (e.g. "/path/to/files-*\\""""'], {}), '(\'gold_path\', None,\n \'Path to the gzip JSON data. For multiple files, should be a glob pattern (e.g. "/path/to/files-*"\'\n )\n', (2142, 2271), False, 'from absl import flags\n'), ((2320, 2393), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""predictions_path"""', 'None', '"""Path to prediction JSON."""'], {}), "('predictions_path', None, 'Path to prediction JSON.')\n", (2339, 2393), False, 'from absl import flags\n'), ((2394, 2467), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_threads"""', '(10)', '"""Number of threads for reading."""'], {}), "('num_threads', 10, 'Number of threads for reading.')\n", (2414, 2467), False, 'from absl import flags\n'), ((2468, 2553), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""score_thres_long"""', '(-100)', '"""Score threshold for long answers"""'], {}), "('score_thres_long', -100, 'Score threshold for long answers'\n )\n", (2486, 2553), False, 'from absl import flags\n'), ((2549, 2635), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""score_thres_short"""', '(-100)', '"""Score threshold for short answers"""'], {}), "('score_thres_short', -100,\n 'Score threshold for short answers')\n", (2567, 2635), False, 'from absl import flags\n'), ((3430, 3472), 'eval_utils.gold_has_long_answer', 'util.gold_has_long_answer', (['gold_label_list'], {}), '(gold_label_list)\n', (3455, 3472), True, 'import eval_utils as util\n'), ((5024, 5067), 'eval_utils.gold_has_short_answer', 'util.gold_has_short_answer', (['gold_label_list'], {}), '(gold_label_list)\n', (5050, 5067), True, 'import eval_utils as util\n'), ((8954, 9020), 'eval_utils.read_annotation', 'util.read_annotation', (['FLAGS.gold_path'], {'n_threads': 'FLAGS.num_threads'}), '(FLAGS.gold_path, n_threads=FLAGS.num_threads)\n', (8974, 9020), True, 'import eval_utils as util\n'), ((9041, 9090), 'eval_utils.read_prediction_json', 'util.read_prediction_json', (['FLAGS.predictions_path'], {}), '(FLAGS.predictions_path)\n', (9066, 9090), True, 'import eval_utils as util\n'), ((10241, 10281), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""gold_path"""'], {}), "('gold_path')\n", (10268, 10281), False, 'from absl import flags\n'), ((10286, 10333), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""predictions_path"""'], {}), "('predictions_path')\n", (10313, 10333), False, 'from absl import flags\n'), ((10338, 10351), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (10345, 10351), False, 'from absl import app\n'), ((4020, 4106), 'eval_utils.nonnull_span_equal', 'util.nonnull_span_equal', (['gold_label.long_answer_span', 'pred_label.long_answer_span'], {}), '(gold_label.long_answer_span, pred_label.\n long_answer_span)\n', (4043, 4106), True, 'import eval_utils as util\n'), ((5286, 5343), 'eval_utils.is_null_span_list', 'util.is_null_span_list', (['pred_label.short_answer_span_list'], {}), '(pred_label.short_answer_span_list)\n', (5308, 5343), True, 'import eval_utils as util\n'), ((5883, 5977), 'eval_utils.span_set_equal', 'util.span_set_equal', (['gold_label.short_answer_span_list', 'pred_label.short_answer_span_list'], {}), '(gold_label.short_answer_span_list, pred_label.\n short_answer_span_list)\n', (5902, 5977), True, 'import eval_utils as util\n')] |
import os
import re
import sys
import cffi
from ._compat import PY2
_directive_re = re.compile(r'^\s*#.*?$(?m)')
def make_ffi(module_path, crate_path, cached_header_filename=None):
"""Creates a FFI instance for the given configuration."""
if cached_header_filename is not None and \
os.path.isfile(cached_header_filename):
with open(cached_header_filename, 'rb') as f:
header = f.read()
if not PY2:
header = header.decode('utf-8')
else:
from .bindgen import generate_header
header = generate_header(crate_path)
header = _directive_re.sub('', header)
if os.environ.get('SNAEK_DEBUG_HEADER') == '1':
sys.stderr.write('/* generated header for "%s" */\n' % module_path)
sys.stderr.write(header)
sys.stderr.write('\n')
sys.stderr.flush()
ffi = cffi.FFI()
ffi.cdef(header)
ffi.set_source(module_path, None)
return ffi
| [
"re.compile",
"cffi.FFI",
"os.environ.get",
"sys.stderr.flush",
"os.path.isfile",
"sys.stderr.write"
] | [((87, 115), 're.compile', 're.compile', (['"""^\\\\s*#.*?$(?m)"""'], {}), "('^\\\\s*#.*?$(?m)')\n", (97, 115), False, 'import re\n'), ((865, 875), 'cffi.FFI', 'cffi.FFI', ([], {}), '()\n', (873, 875), False, 'import cffi\n'), ((303, 341), 'os.path.isfile', 'os.path.isfile', (['cached_header_filename'], {}), '(cached_header_filename)\n', (317, 341), False, 'import os\n'), ((642, 678), 'os.environ.get', 'os.environ.get', (['"""SNAEK_DEBUG_HEADER"""'], {}), "('SNAEK_DEBUG_HEADER')\n", (656, 678), False, 'import os\n'), ((695, 762), 'sys.stderr.write', 'sys.stderr.write', (['(\'/* generated header for "%s" */\\n\' % module_path)'], {}), '(\'/* generated header for "%s" */\\n\' % module_path)\n', (711, 762), False, 'import sys\n'), ((771, 795), 'sys.stderr.write', 'sys.stderr.write', (['header'], {}), '(header)\n', (787, 795), False, 'import sys\n'), ((804, 826), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (820, 826), False, 'import sys\n'), ((835, 853), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (851, 853), False, 'import sys\n')] |
# Based on https://github.com/eklitzke/utxodump
from typing import Tuple
import binascii
import leveldb
import config
import json
import os
def decode_varint(val: bytearray) -> Tuple[int, int]:
n = 0
for i, c in enumerate(val):
n = (n << 7) | (c & 0x7f)
if c & 0x80:
n += 1
else:
return n, i + 1
assert False # not reached
def decode_height(val: bytearray) -> int:
code, consumed = decode_varint(val)
return code >> 1
def decode_txid(key: bytearray) -> str:
assert key[0] == 67
txid = binascii.hexlify(key[1:33][::-1]).decode('utf8')
compressed_vout = key[33:]
vout, declen = decode_varint(compressed_vout)
assert declen == len(compressed_vout)
return txid, vout
def locate_db(path: str) -> str:
datadir = os.path.expanduser(path)
return os.path.join(datadir, 'chainstate')
def get_obfuscate_key(conn: leveldb.LevelDB) -> bytearray:
secret = conn.Get(bytearray(b'\x0e\x00obfuscate_key'))
assert secret[0] == 8 and len(secret) == 9
return secret[1:]
def decrypt(ciphertext: bytearray, key: bytearray):
for i, c in enumerate(ciphertext):
ciphertext[i] = c ^ key[i % len(key)]
def get_unspent(path: str, snapshot_start: str, snapshot_end: str):
conn = leveldb.LevelDB(locate_db(path))
secret = get_obfuscate_key(conn)
result = {}
for k, v in conn.RangeIter(b'C', b'D', include_value=True):
decrypt(v, secret)
txid, vout = decode_txid(k)
height = decode_height(v)
if height > snapshot_start and height < snapshot_end:
if txid not in result:
result[txid] = [vout]
else:
result[txid].append(vout)
return result
data = get_unspent(config.BLOCKCHAIN_DIR, config.SNAPSHOT_START, config.SNAPSHOT_END)
with open('{}/unspent.json'.format(config.SNAPSHOT_DIR), 'w') as file:
json.dump(data, file)
| [
"json.dump",
"os.path.join",
"os.path.expanduser",
"binascii.hexlify"
] | [((735, 759), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (753, 759), False, 'import os\n'), ((768, 803), 'os.path.join', 'os.path.join', (['datadir', '"""chainstate"""'], {}), "(datadir, 'chainstate')\n", (780, 803), False, 'import os\n'), ((1732, 1753), 'json.dump', 'json.dump', (['data', 'file'], {}), '(data, file)\n', (1741, 1753), False, 'import json\n'), ((508, 541), 'binascii.hexlify', 'binascii.hexlify', (['key[1:33][::-1]'], {}), '(key[1:33][::-1])\n', (524, 541), False, 'import binascii\n')] |
#! /usr/bin/env python3
import sys
import os
os.chdir(os.path.dirname(sys.argv[0]))
from mule_local.JobMule import *
from mule.exec_program import *
exec_program('mule.benchmark.cleanup_all', catch_output=False)
jg = JobGeneration()
jg.compile.unit_test="test_plane_fftw_wisdom_import_export"
jg.compile.plane_spectral_space="enable"
jg.compile.plane_spectral_dealiasing="enable"
jg.compile.mode="debug"
jg.unique_id_filter = ['runtime.benchmark', 'runtime.time']
jg.runtime.space_res_physical = (128, 128)
if True:
# No parallelization
jg.compile.threading = 'off'
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = 1
pspace.num_threads_per_rank = 1
pspace.num_ranks = 1
jg.setup_parallelization(pspace)
# Create plans
jg.runtime.reuse_plans = "save"
jg.gen_jobscript_directory()
# Reuse plans
jg.runtime.reuse_plans = "require_load"
jg.gen_jobscript_directory()
if True:
# Parallelization
jg.compile.threading = 'omp'
for i in range(1, jg.platform_resources.num_cores_per_socket+1):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = i
pspace.num_ranks = 1
jg.setup_parallelization(pspace)
# Create plans
jg.runtime.reuse_plans = "save"
jg.gen_jobscript_directory()
# Reuse plans
jg.runtime.reuse_plans = "require_load"
jg.gen_jobscript_directory()
import glob
print("Running jobs...")
for i in glob.glob("job_bench_*_planssave_*"):
print("Executing: "+i)
exitcode = exec_program(['mule.benchmark.jobs_run_directly', i], catch_output=False)
if exitcode != 0:
sys.exit(exitcode)
for i in glob.glob("job_bench_*_plansrequire_load_*"):
print("Executing: "+i)
exitcode = exec_program(['mule.benchmark.jobs_run_directly', i], catch_output=False)
if exitcode != 0:
sys.exit(exitcode)
exec_program('mule.benchmark.cleanup_all', catch_output=False)
| [
"os.path.dirname",
"glob.glob",
"sys.exit"
] | [((1482, 1518), 'glob.glob', 'glob.glob', (['"""job_bench_*_planssave_*"""'], {}), "('job_bench_*_planssave_*')\n", (1491, 1518), False, 'import glob\n'), ((1695, 1739), 'glob.glob', 'glob.glob', (['"""job_bench_*_plansrequire_load_*"""'], {}), "('job_bench_*_plansrequire_load_*')\n", (1704, 1739), False, 'import glob\n'), ((55, 83), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (70, 83), False, 'import os\n'), ((1666, 1684), 'sys.exit', 'sys.exit', (['exitcode'], {}), '(exitcode)\n', (1674, 1684), False, 'import sys\n'), ((1887, 1905), 'sys.exit', 'sys.exit', (['exitcode'], {}), '(exitcode)\n', (1895, 1905), False, 'import sys\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 13:54:34 2020
@author: tbarton
"""
# make sure you have tensorflow 1.15
import gpt_2_simple as gpt2
gpt2.download_gpt2()
sess = gpt2.start_tf_sess()
gpt2.finetune(sess,
dataset='big_chess_set.txt',
run_name='lets_play_chess',
print_every=1,
multi_gpu=True,
save_every=2,
combine=100,
steps=10) # steps is max number of training steps
sess.close()
| [
"gpt_2_simple.finetune",
"gpt_2_simple.download_gpt2",
"gpt_2_simple.start_tf_sess"
] | [((174, 194), 'gpt_2_simple.download_gpt2', 'gpt2.download_gpt2', ([], {}), '()\n', (192, 194), True, 'import gpt_2_simple as gpt2\n'), ((203, 223), 'gpt_2_simple.start_tf_sess', 'gpt2.start_tf_sess', ([], {}), '()\n', (221, 223), True, 'import gpt_2_simple as gpt2\n'), ((224, 372), 'gpt_2_simple.finetune', 'gpt2.finetune', (['sess'], {'dataset': '"""big_chess_set.txt"""', 'run_name': '"""lets_play_chess"""', 'print_every': '(1)', 'multi_gpu': '(True)', 'save_every': '(2)', 'combine': '(100)', 'steps': '(10)'}), "(sess, dataset='big_chess_set.txt', run_name='lets_play_chess',\n print_every=1, multi_gpu=True, save_every=2, combine=100, steps=10)\n", (237, 372), True, 'import gpt_2_simple as gpt2\n')] |
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cpp_import_manager."""
__author__ = '<EMAIL> (<NAME>)'
from google.apputils import basetest
from googleapis.codegen.cpp_import_manager import CppImportManager
class MockSchema(object):
"""Mock schema used in place of real schema objects."""
def __init__(self):
self._template_values_dict = {}
def SetTemplateValue(self, template_name, template_value):
self._template_values_dict[template_name] = template_value
def GetTemplateValue(self, template_name):
return self._template_values_dict.get(template_name)
class CppImportManagerTest(basetest.TestCase):
def setUp(self):
super(CppImportManagerTest, self).setUp()
self.mock_schema = MockSchema()
self.import_manager = CppImportManager(self.mock_schema)
def testAddImportAndCommit(self):
# Add a com.google import.
com_google_import = '"base/integral_types.h"'
self.assertTrue(self.import_manager.AddImport(com_google_import))
self.assertFalse(self.import_manager.platform_imports)
self.assertFalse(self.import_manager.other_imports)
self.assertTrue(self.import_manager.google_imports)
# There are no platform imports for C++
platform_import = '<string>'
self.assertTrue(self.import_manager.AddImport(platform_import))
self.assertTrue(self.import_manager.platform_imports)
# Add a random thing
other_import = '"Module.h"'
self.import_manager.AddImport(other_import)
# Assert the contents of google, other and java imports.
expected_google_import_set = set()
expected_google_import_set.add(com_google_import)
sorted_expected_google_import_set = sorted(expected_google_import_set)
self.assertEquals(sorted_expected_google_import_set,
list(self.import_manager.google_imports))
self.assertEquals([other_import],
list(self.import_manager.other_imports))
self.assertEquals([platform_import],
list(self.import_manager.platform_imports))
# Assert the contents of class_name_to_qualified_name map.
self.assertEquals(
com_google_import,
self.import_manager._class_name_to_qualified_name[com_google_import])
# Assert that commit import works.
# The import_manager combines the platform and google imports together
# but each segment is first sorted.
expected_import_list = [
sorted([platform_import]) + sorted_expected_google_import_set,
[other_import]]
self.assertEquals(
expected_import_list,
self.mock_schema.GetTemplateValue('importManager').ImportLists())
if __name__ == '__main__':
basetest.main()
| [
"googleapis.codegen.cpp_import_manager.CppImportManager",
"google.apputils.basetest.main"
] | [((3242, 3257), 'google.apputils.basetest.main', 'basetest.main', ([], {}), '()\n', (3255, 3257), False, 'from google.apputils import basetest\n'), ((1351, 1385), 'googleapis.codegen.cpp_import_manager.CppImportManager', 'CppImportManager', (['self.mock_schema'], {}), '(self.mock_schema)\n', (1367, 1385), False, 'from googleapis.codegen.cpp_import_manager import CppImportManager\n')] |
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet, arp, ipv4
from ryu.lib.packet import ether_types
from ryu.lib import mac
from ryu.lib.mac import haddr_to_bin
from ryu.controller import mac_to_port
from ryu.ofproto import inet
import networkx as nx
from ryu.lib.packet import icmp
from ryu.ofproto import ether
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link
from ryu.app.wsgi import ControllerBase
import array
from ryu.app.ofctl.api import get_datapath
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
global dijkstra, receive_arp, dpid_hostLookup,dijkstra_longestpath
global path2
path2 = [0]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.net = nx.DiGraph()
self.g = nx.DiGraph()
self.switch_map = {}
#self.handle_arp
# self.send_packet
# self.arp_table = {'10.1.0.1': '00:00:00:00:00:01',
# '10.1.0.2': '00:00:00:00:00:02',
# '10.1.0.3': '00:00:00:00:00:03',
# '10.0.0.1': '00:00:00:00:00:11',
# '10.0.0.2': '00:00:00:00:00:12',
# '10.0.0.3': '00:00:00:00:00:13',
# '10.0.0.4': '00:00:00:00:00:14',
# '10.0.1.1': '00:00:00:00:00:21',
# '10.1.1.2': '00:00:00:00:00:22',
# '10.1.1.3': '00:00:00:00:00:23',
# '10.1.1.4': '00:00:00:00:00:24',
# '10.0.2.1': '00:00:00:00:00:31',
# '10.1.2.2': '00:00:00:00:00:32',
# '10.1.2.3': '00:00:00:00:00:33',
# '10.1.2.4': '00:00:00:00:00:34'
# }
# self.count = 0
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
self.switch_map.update({datapath.id: datapath})
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match,inst=[],table=0):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
buffer_id = ofp.OFP_NO_BUFFER
mod = ofp_parser.OFPFlowMod(
datapath=datapath, table_id=table,
command=ofp.OFPFC_ADD, priority=priority, buffer_id=buffer_id,
out_port=ofp.OFPP_ANY, out_group=ofp.OFPG_ANY,
match=match, instructions=inst
)
datapath.send_msg(mod)
def dijkstra(graph, src, dest, visited=[], distances={}, predecessors={}):
""" calculates a shortest path tree routed in src
"""
# a few sanity checks
if src not in graph:
raise TypeError('The root of the shortest path tree cannot be found')
if dest not in graph:
raise TypeError('The target of the shortest path cannot be found')
# ending condition
if src == dest:
# We build the shortest path and display it
path = []
pred = dest
while pred != None:
path.append(pred)
pred = predecessors.get(pred, None)
print('shortest path: ' + str(path) + " cost= " + str(distances[dest]))
global path2
path2=path
else:
# if it is the initial run, initializes the cost
if not visited:
distances[src] = 0
# visit the neighbors
for neighbor in graph[src]:
if neighbor not in visited:
new_distance = distances[src] + graph[src][neighbor]
print(new_distance)
if new_distance <= distances.get(neighbor, float('inf')):
distances[neighbor] = new_distance
predecessors[neighbor] = src
# mark as visited
visited.append(src)
# now that all neighbors have been visited: recurse
# select the non visited node with lowest distance 'x'
# run Dijskstra with src='x'
unvisited = {}
for k in graph:
if k not in visited:
unvisited[k] = distances.get(k, float('inf'))
x = min(unvisited, key=unvisited.get)
dijkstra(graph, x, dest, visited, distances, predecessors)
def dijkstra_longestpath(graph, src, dest, visited=[], distances={}, predecessors={}):
""" calculates a shortest path tree routed in src
"""
# a few sanity checks
if src not in graph:
raise TypeError('The root of the shortest path tree cannot be found')
if dest not in graph:
raise TypeError('The target of the shortest path cannot be found')
# ending condition
if src == dest:
# We build the shortest path and display it
path = []
pred = dest
while pred != None:
path.append(pred)
pred = predecessors.get(pred, None)
print('shortest path: ' + str(path) + " cost= " + str(distances[dest]))
global path2
path2=path
else:
# if it is the initial run, initializes the cost
if not visited:
distances[src] = 0
# visit the neighbors
for neighbor in graph[src]:
if neighbor not in visited:
new_distance = distances[src] + graph[src][neighbor]
print(new_distance)
if new_distance <= distances.get(neighbor, float('inf')):
distances[neighbor] = new_distance
predecessors[neighbor] = src
# mark as visited
visited.append(src)
# now that all neighbors have been visited: recurse
# select the non visited node with lowest distance 'x'
# run Dijskstra with src='x'
unvisited = {}
for k in graph:
if k not in visited:
unvisited[k] = distances.get(k, float('inf'))
x = max(unvisited, key=unvisited.get)
dijkstra(graph, x, dest, visited, distances, predecessors)
def dpid_hostLookup(self, lmac):
host_locate = {1: {'00:00:00:00:00:11', '00:00:00:00:00:12'}, 2: {'00:00:00:00:00:13', '00:00:00:00:00:14'},
5: {'00:00:00:00:00:21', '00:00:00:00:00:22'}, 6: {'00:00:00:00:00:23', '00:00:00:00:00:24'},
9: {'00:00:00:00:00:31', '00:00:00:00:00:32'}, 10: {'00:00:00:00:00:33', '00:00:00:00:00:34'},
13: {'00:00:00:00:00:01'}, 14: {'00:00:00:00:00:02'}, 16: {'00:00:00:00:00:03'}}
for dpid, mac in host_locate.iteritems():
if lmac in mac:
return dpid
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
pkt = packet.Packet(ev.msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
arp_pkt = pkt.get_protocol(arp.arp)
ip4_pkt = pkt.get_protocol(ipv4.ipv4)
if arp_pkt:
pak = arp_pkt
elif ip4_pkt:
pak = ip4_pkt
else:
pak = eth
self.logger.info(' _packet_in_handler: src_mac -> %s' % eth.src)
self.logger.info(' _packet_in_handler: dst_mac -> %s' % eth.dst)
self.logger.info(' _packet_in_handler: %s' % pak)
self.logger.info(' ------')
if eth.ethertype == ether_types.ETH_TYPE_LLDP or eth.ethertype == ether_types.ETH_TYPE_IPV6:
# ignore lldp packet
return
dst = eth.src
src = eth.dst
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info(">>>>>>> packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
print(src)
print(dst)
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
switch_list = get_switch(self, None)
switches = [switch.dp.id for switch in switch_list]
links_list = get_link(self, None)
link_port={(link.src.dpid,link.dst.dpid):link.src.port_no for link in links_list}
# g = nx.DiGraph()
self.g.add_nodes_from(switches)
links = [(link.src.dpid,link.dst.dpid,{'port':link.src.port_no}) for link in links_list]
print(links)
self.g.add_edges_from(links)
links = [(link.dst.dpid,link.src.dpid,{'port':link.dst.port_no}) for link in links_list]
self.g.add_edges_from(links)
#print(links)
#print(self.g)
topo = {'1': {'3': 50, '4': 100}, '2': {'3': 100, '4': 50}, '3': {'1': 50, '2': 100, '13': 15, '14': 100},
'4': {'1': 100, '2': 50, '14': 5}, '5': {'7': 50, '8': 100}, '6': {'7': 100, '8': 50},
'7': {'5': 50, '6': 100, '13': 15, '14': 20, '15': 5}, '8': {'5': 100, '6': 50, '15': 10, '16': 15},
'9': {'11': 50, '12': 100}, '10': {'11': 100, '12': 50},
'11': {'9': 50, '10': 100, '14': 10}, '12': {'9': 100, '10': 50, '15': 15, '16': 10},
'13': {'3': 15, '7': 15}, '14': {'3': 10, '4': 5, '7': 20, '11': 10}, '15': {'7': 5, '8': 10, '12': 15},
'16': {'8': 15, '12': 10}}
dst_dpid = dpid_hostLookup(self, dst)
print("dpid",str(dpid))
print("dst",dst)
# if(dst=='ff:ff:ff:ff:ff:ff'):
# return()
path3=[]
src=str(src)
dst=str(dst)
print("dst dpid",str(dst_dpid))
if ((src == '00:00:00:00:00:01' and dst == '00:00:00:00:00:13') or (src == '00:00:00:00:00:01' and dst == '00:00:00:00:00:23') or (
src == '00:00:00:00:00:01' and dst == '00:00:00:00:00:33') or(src == '00:00:00:00:00:02' and dst == '00:00:00:00:00:12') or (
src == '00:00:00:00:00:02' and dst == '00:00:00:00:00:22') or (src == '00:00:00:00:00:02' and dst == '00:00:00:00:00:32') or
(src == '00:00:00:00:00:03' and dst == '00:00:00:00:00:14') or (src == '00:00:00:00:00:03' and dst == '00:00:00:00:00:24') or (
src == '00:00:00:00:00:03' and dst == '00:00:00:00:00:34')):
dijkstra(topo, str(dpid), str(dst_dpid))
global path2
path3= list(map(int, path2))
print(path3)
path3.reverse()
elif ((src == '00:00:00:00:00:01' and (dst == '00:00:00:00:00:11' or dst == '00:00:00:00:00:12' or dst == '00:00:00:00:00:14' or dst == '00:00:00:00:00:21' or dst == '00:00:00:00:00:22' or dst == '00:00:00:00:00:24' or dst == '00:00:00:00:00:31' or dst == '00:00:00:00:00:32' or dst == '00:00:00:00:00:34'))
or (src == '00:00:00:00:00:02' and (dst == '00:00:00:00:00:11' or dst == '00:00:00:00:00:13' or dst == '00:00:00:00:00:14' or dst == '00:00:00:00:00:21' or dst == '00:00:00:00:00:23' or dst == '00:00:00:00:00:24' or dst == '00:00:00:00:00:31' or dst == '00:00:00:00:00:33' or dst == '00:00:00:00:00:34'))
or (src == '00:00:00:00:00:03' and (dst == '00:00:00:00:00:11' or dst == '00:00:00:00:00:12' or dst == '00:00:00:00:00:13' or dst == '00:00:00:00:00:21' or dst == '00:00:00:00:00:22' or dst == '00:00:00:00:00:23' or dst == '00:00:00:00:00:31' or dst == '00:00:00:00:00:32' or dst == '00:00:00:00:00:33'))):
dijkstra_longestpath(topo, str(dpid), str(dst_dpid))
path3 = list(map(int, path2))
print(path3)
path3.reverse()
if not self.g.has_node(eth.src):
print("add %s in self.net" % eth.src)
self.g.add_node(eth.src)
self.g.add_edge(eth.src, datapath.id)
self.g.add_edge(datapath.id, eth.src, {'port': in_port})
print(self.g.node)
if not self.g.has_node(eth.dst):
print("add %s in self.net" % eth.dst)
self.g.add_node(eth.dst)
self.g.add_edge(eth.dst, datapath.id)
self.g.add_edge(datapath.id, eth.dst, {'port': in_port})
print(self.g.node)
# path3=[13,3,1]
print("before loop")
if(path3!=[]):
if self.g.has_node(eth.dst):
next_match = parser.OFPMatch(eth_dst=eth.dst)
back_match = parser.OFPMatch(eth_dst=eth.src)
print(path3)
for on_path_switch in range(1, len(path3) - 1):
print("hi in loop")
now_switch = path3[on_path_switch]
next_switch = path3[on_path_switch + 1]
back_switch = path3[on_path_switch - 1]
next_port = link_port[(now_switch,next_switch)]
back_port = link_port[(now_switch,back_switch)]
print("next_port",next_port)
print("back_port",back_port)
new_dp=get_datapath(self, next_switch)
action = parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
[parser.OFPActionOutput(next_port)])
inst = [action]
self.add_flow(datapath=new_dp, match=next_match, inst=inst, table=0)
action = parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
[parser.OFPActionOutput(back_port)])
inst = [action]
actions = [parser.OFPActionOutput(next_port)]
new_dp = get_datapath(self, back_switch)
self.add_flow(datapath=new_dp, match=back_match, inst=inst,actions=action, table=0)
print ("now switch:%s",now_switch)
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,
actions=actions)
datapath.send_msg(out)
print("final")
else:
return
else:
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, msg.in_port, dst, actions)
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,
actions=actions)
datapath.send_msg(out)
| [
"ryu.lib.packet.packet.Packet",
"ryu.topology.api.get_switch",
"networkx.DiGraph",
"ryu.app.ofctl.api.get_datapath",
"ryu.controller.handler.set_ev_cls",
"ryu.topology.api.get_link"
] | [((2191, 2254), 'ryu.controller.handler.set_ev_cls', 'set_ev_cls', (['ofp_event.EventOFPSwitchFeatures', 'CONFIG_DISPATCHER'], {}), '(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\n', (2201, 2254), False, 'from ryu.controller.handler import set_ev_cls\n'), ((7952, 8007), 'ryu.controller.handler.set_ev_cls', 'set_ev_cls', (['ofp_event.EventOFPPacketIn', 'MAIN_DISPATCHER'], {}), '(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n', (7962, 8007), False, 'from ryu.controller.handler import set_ev_cls\n'), ((1092, 1104), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1102, 1104), True, 'import networkx as nx\n'), ((1122, 1134), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1132, 1134), True, 'import networkx as nx\n'), ((8061, 8087), 'ryu.lib.packet.packet.Packet', 'packet.Packet', (['ev.msg.data'], {}), '(ev.msg.data)\n', (8074, 8087), False, 'from ryu.lib.packet import packet\n'), ((9515, 9537), 'ryu.topology.api.get_switch', 'get_switch', (['self', 'None'], {}), '(self, None)\n', (9525, 9537), False, 'from ryu.topology.api import get_switch, get_link\n'), ((9619, 9639), 'ryu.topology.api.get_link', 'get_link', (['self', 'None'], {}), '(self, None)\n', (9627, 9639), False, 'from ryu.topology.api import get_switch, get_link\n'), ((14302, 14333), 'ryu.app.ofctl.api.get_datapath', 'get_datapath', (['self', 'next_switch'], {}), '(self, next_switch)\n', (14314, 14333), False, 'from ryu.app.ofctl.api import get_datapath\n'), ((14927, 14958), 'ryu.app.ofctl.api.get_datapath', 'get_datapath', (['self', 'back_switch'], {}), '(self, back_switch)\n', (14939, 14958), False, 'from ryu.app.ofctl.api import get_datapath\n')] |
import luhn
def test_checksum_len1():
assert luhn.checksum('7') == 7
def test_checksum_len2():
assert luhn.checksum('13') == 5
def test_checksum_len3():
assert luhn.checksum('383') == 3
def test_checksum_len4():
assert luhn.checksum('2827') == 3
def test_checksum_len13():
assert luhn.checksum('4346537657597') == 9
def test_checksum_len14():
assert luhn.checksum('27184931073326') == 1
def test_valid():
assert luhn.verify('356938035643809')
def test_invalid():
assert not luhn.verify('4222222222222222')
def test_generate():
assert luhn.generate('7992739871') == 3
def test_append():
assert luhn.append('53461861341123') =='534618613411234'
| [
"luhn.verify",
"luhn.generate",
"luhn.append",
"luhn.checksum"
] | [((447, 477), 'luhn.verify', 'luhn.verify', (['"""356938035643809"""'], {}), "('356938035643809')\n", (458, 477), False, 'import luhn\n'), ((50, 68), 'luhn.checksum', 'luhn.checksum', (['"""7"""'], {}), "('7')\n", (63, 68), False, 'import luhn\n'), ((112, 131), 'luhn.checksum', 'luhn.checksum', (['"""13"""'], {}), "('13')\n", (125, 131), False, 'import luhn\n'), ((175, 195), 'luhn.checksum', 'luhn.checksum', (['"""383"""'], {}), "('383')\n", (188, 195), False, 'import luhn\n'), ((239, 260), 'luhn.checksum', 'luhn.checksum', (['"""2827"""'], {}), "('2827')\n", (252, 260), False, 'import luhn\n'), ((305, 335), 'luhn.checksum', 'luhn.checksum', (['"""4346537657597"""'], {}), "('4346537657597')\n", (318, 335), False, 'import luhn\n'), ((380, 411), 'luhn.checksum', 'luhn.checksum', (['"""27184931073326"""'], {}), "('27184931073326')\n", (393, 411), False, 'import luhn\n'), ((514, 545), 'luhn.verify', 'luhn.verify', (['"""4222222222222222"""'], {}), "('4222222222222222')\n", (525, 545), False, 'import luhn\n'), ((579, 606), 'luhn.generate', 'luhn.generate', (['"""7992739871"""'], {}), "('7992739871')\n", (592, 606), False, 'import luhn\n'), ((643, 672), 'luhn.append', 'luhn.append', (['"""53461861341123"""'], {}), "('53461861341123')\n", (654, 672), False, 'import luhn\n')] |
# Author: <NAME>
import numpy as np
import pickle
class evolutionary_strategies_model(object):
def __init__(
self, n_population, n_params, n_survival,
n_crossover = 2, sigma_init = 1, mu_init = 0, tau = None):
"""
Evolutionary strategies model loosely based on
Beyer and Schwefel, 2002, Evolution strategies - A Comprehensive Introduction
Model type (in the notation from the paper): (mu/ro, lambda) where
mu = n_survival
ro = n_crossover
lambda = n_population
Parameters
----------
n_population : integer
number of instances that are created each generation
n_params : integer
dimension of the parameter space to optimize
n_survival : integer
number of instances to be selected each generation
n_crossover : integer
number of parent instances for each new child usually 2
sigma_init : integer
standard deviation for the normal distribution the
mutation term is sampled from at the start
mu_init : integer
starting value for parameters
tau : float
learning rate like parameter
default (if None): tau = 1/sqrt(2*n_population)
"""
assert sigma_init > 0
assert n_population > n_survival
assert n_population % n_crossover == 0
assert n_population % n_survival == 0
self.n_population = n_population
self.n_survival = n_survival
self.sigma_init = sigma_init
self.n_crossover = n_crossover
if tau == None:
self.tau = 1/((2*n_population)**0.5)
else: self.tau = tau
self.n_params = n_params
self.params = np.random.normal(mu_init, sigma_init, (n_population, n_params))
self.sigmas = np.full((n_population, n_params), sigma_init, dtype = 'float64')
self.fitness = np.zeros(n_population)
self.indices_fittest = None
def mutate(self):
"""
mutate parameters : x = N(x,sigma)
mutate standard deviations : sigma = sigma * exp(N(0,tau))
"""
self.params = np.random.multivariate_normal(
self.params.reshape(self.n_population * self.n_params),
np.diag(self.sigmas.reshape(self.n_population * self.n_params)))\
.reshape((self.n_population, self.n_params))
self.sigmas *= np.exp(np.random.multivariate_normal(
np.zeros(self.n_population * self.n_params),
self.tau * np.eye(self.n_population * self.n_params)))\
.reshape((self.n_population, self.n_params))
def select(self):
"""
retreive the indices of the n_survival best instances
"""
self.indices_fittest = np.argsort(self.fitness)[-self.n_survival:]
def procreate(self):
"""
Create n_population new instances from the fittest instances of
the current generation.
Parent groups are selected randomly.
Parameters and sigmas of n_crossover parents are shuffled to create
n_crossover children per parent group.
"""
n_children = self.n_population // self.n_survival
parent_list = np.tile(self.indices_fittest, n_children)
np.random.shuffle(parent_list)
next_generation_params = self.params[parent_list,:]
next_generation_sigmas = self.sigmas[parent_list,:]
n_groups = self.n_population // self.n_crossover
for group in range(n_groups):
for i in range(self.n_params):
np.random.shuffle(
next_generation_params[
group * self.n_crossover : (group + 1) * self.n_crossover,i])
np.random.shuffle(
next_generation_sigmas[
group * self.n_crossover : (group + 1) * self.n_crossover,i])
self.params = next_generation_params
self.sigmas = next_generation_sigmas
def save(self):
"""
create/replace an object file to store the current model.
"""
filehandler = open("evolutionary_strategies_model", 'wb')
pickle.dump(self, filehandler)
filehandler.close()
print("### saved ###") | [
"numpy.random.normal",
"numpy.tile",
"numpy.eye",
"pickle.dump",
"numpy.argsort",
"numpy.zeros",
"numpy.full",
"numpy.random.shuffle"
] | [((1928, 1991), 'numpy.random.normal', 'np.random.normal', (['mu_init', 'sigma_init', '(n_population, n_params)'], {}), '(mu_init, sigma_init, (n_population, n_params))\n', (1944, 1991), True, 'import numpy as np\n'), ((2014, 2076), 'numpy.full', 'np.full', (['(n_population, n_params)', 'sigma_init'], {'dtype': '"""float64"""'}), "((n_population, n_params), sigma_init, dtype='float64')\n", (2021, 2076), True, 'import numpy as np\n'), ((2102, 2124), 'numpy.zeros', 'np.zeros', (['n_population'], {}), '(n_population)\n', (2110, 2124), True, 'import numpy as np\n'), ((3446, 3487), 'numpy.tile', 'np.tile', (['self.indices_fittest', 'n_children'], {}), '(self.indices_fittest, n_children)\n', (3453, 3487), True, 'import numpy as np\n'), ((3496, 3526), 'numpy.random.shuffle', 'np.random.shuffle', (['parent_list'], {}), '(parent_list)\n', (3513, 3526), True, 'import numpy as np\n'), ((4383, 4413), 'pickle.dump', 'pickle.dump', (['self', 'filehandler'], {}), '(self, filehandler)\n', (4394, 4413), False, 'import pickle\n'), ((2987, 3011), 'numpy.argsort', 'np.argsort', (['self.fitness'], {}), '(self.fitness)\n', (2997, 3011), True, 'import numpy as np\n'), ((3811, 3917), 'numpy.random.shuffle', 'np.random.shuffle', (['next_generation_params[group * self.n_crossover:(group + 1) * self.\n n_crossover, i]'], {}), '(next_generation_params[group * self.n_crossover:(group + \n 1) * self.n_crossover, i])\n', (3828, 3917), True, 'import numpy as np\n'), ((3968, 4074), 'numpy.random.shuffle', 'np.random.shuffle', (['next_generation_sigmas[group * self.n_crossover:(group + 1) * self.\n n_crossover, i]'], {}), '(next_generation_sigmas[group * self.n_crossover:(group + \n 1) * self.n_crossover, i])\n', (3985, 4074), True, 'import numpy as np\n'), ((2665, 2708), 'numpy.zeros', 'np.zeros', (['(self.n_population * self.n_params)'], {}), '(self.n_population * self.n_params)\n', (2673, 2708), True, 'import numpy as np\n'), ((2733, 2774), 'numpy.eye', 'np.eye', (['(self.n_population * self.n_params)'], {}), '(self.n_population * self.n_params)\n', (2739, 2774), True, 'import numpy as np\n')] |
from setuptools import setup
def _md(filename):
'''
Load md file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- all badges
'''
content = open(filename).read()
return content
long_description = '\n'.join((
_md('README.md'),
_md('CHANGELOG.md'),
''
))
exec(compile(
open('devpi_semantic_ui/__about__.py').read(),
'devpi_semantic_ui/__about__.py',
'exec'
))
setup(
name="devpi-semantic-ui",
description=__description__,
url="https://github.com/apihackers/devpi-semantic-ui",
version=__version__,
maintainer="API Hackers",
maintainer_email="<EMAIL>",
license="MIT",
entry_points={
'devpi_server': [
"devpi-semantic-ui = devpi_semantic_ui"
]
},
install_requires=['devpi-web'],
include_package_data=True,
zip_safe=False,
packages=['devpi_semantic_ui'],
keywords='devpi semantic-ui',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: System :: Software Distribution',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
],
)
| [
"setuptools.setup"
] | [((455, 1470), 'setuptools.setup', 'setup', ([], {'name': '"""devpi-semantic-ui"""', 'description': '__description__', 'url': '"""https://github.com/apihackers/devpi-semantic-ui"""', 'version': '__version__', 'maintainer': '"""API Hackers"""', 'maintainer_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'entry_points': "{'devpi_server': ['devpi-semantic-ui = devpi_semantic_ui']}", 'install_requires': "['devpi-web']", 'include_package_data': '(True)', 'zip_safe': '(False)', 'packages': "['devpi_semantic_ui']", 'keywords': '"""devpi semantic-ui"""', 'classifiers': "['Development Status :: 3 - Alpha', 'Programming Language :: Python',\n 'Environment :: Web Environment', 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Topic :: System :: Software Distribution',\n 'Programming Language :: Python', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: MIT License']"}), "(name='devpi-semantic-ui', description=__description__, url=\n 'https://github.com/apihackers/devpi-semantic-ui', version=__version__,\n maintainer='API Hackers', maintainer_email='<EMAIL>', license='MIT',\n entry_points={'devpi_server': ['devpi-semantic-ui = devpi_semantic_ui']\n }, install_requires=['devpi-web'], include_package_data=True, zip_safe=\n False, packages=['devpi_semantic_ui'], keywords='devpi semantic-ui',\n classifiers=['Development Status :: 3 - Alpha',\n 'Programming Language :: Python', 'Environment :: Web Environment',\n 'Operating System :: OS Independent', 'Intended Audience :: Developers',\n 'Topic :: System :: Software Distribution',\n 'Programming Language :: Python', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: MIT License'])\n", (460, 1470), False, 'from setuptools import setup\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import os
# uncomment these two lines so you can see all the records when you print or write instead of dot dot dot ...
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
def pretty_print(name, to_print):
print(f'{name} ')
print(f'{to_print}\n\n')
# filename where to print the output for my reference
f = open("data/my_output.txt", "w+")
# read / load the file
df = pd.read_csv(filepath_or_buffer='data/tweets.csv',
sep=',',
header=0) # header starts in first line
# 1. General information
pretty_print("Columns ", df.columns)
pretty_print("index ", df.index)
pretty_print("dtypes ", df.dtypes)
pretty_print("shape ", df.shape)
pretty_print("info ", df.info())
pretty_print("describe ", df.describe())
pretty_print("null check", df.isnull())
pretty_print("length", len(df)) # output total records
# 2 Print 'handle', 'text', 'is_retweet', 'time', 'lang', 'retweet_count', 'favorite_count' columns
# There is no need to clean other columns as I will not be using them. I can drop them however, for now, I will just
# select the columns that I need to use.
# Add ActualDate in the last column
df['actual_date'] = df['time'].str[:10]
# Change data type of actual_date
df['actual_date'] = pd.to_datetime(df['actual_date'], format='%Y/%m/%d')
df2 = df[['handle', 'text', 'is_retweet', 'time',
'actual_date', 'lang', 'retweet_count', 'favorite_count']]
pretty_print("Selected Columns ", df2.to_string())
pretty_print("Checking n unique values for each column", df2.nunique())
pretty_print("index ", df2.index)
pretty_print("dtypes ", df.dtypes)
# Filter dates > 04/01/2019
# pretty_print("Selecting rows by multiple criteria",
# df2[(df2['actual_date'] >= '2016-04-01') & (df2['actual_date'] >= '2016-09-30')])
df2.sort_values(by=['actual_date'])
df2 = df2[(df2['actual_date'] >= '2016-4-1') & (df2['actual_date'] <= '2016-9-30')]
print(df2.to_string())
# pretty_print("Filter Dataframe for date >=04/01/2016", df2[df2['actual_date'] >= '2016-04-01'].to_string())
# Correlation
pretty_print("Correlation ", df2.corr().to_string())
# Correlation output. This shows that the retweet count and favorite_count are positively correlated.
# is_retweet retweet_count favorite_count
# is_retweet 1.000000 -0.077440 -0.141131
# retweet_count -0.077440 1.000000 0.928429
# favorite_count -0.141131 0.928429 1.000000
# Split the text column - this part will handle the splitting of text column to create a new dataframe
# df2 = df[['handle', 'text', 'time', 'lang', 'retweet_count', 'favorite_count']]
#
# new_df = pd.DataFrame(df.text.str.split(' ').tolist(), index=[df.time, df.handle]).stack()
# for i in new_df:
# print(i)
# Advanced homework part
pretty_print("new dataframe", df2.to_string())
pretty_print("selected columns", df2.columns)
os.makedirs('plots', exist_ok=True)
# Plotting line chart
plt.plot(df2['retweet_count'], color='red')
plt.title('Retweet by Index')
plt.xlabel('Index')
plt.ylabel('retweet_count')
plt.savefig(f'plots/retweet_by_candidate.png', format='png')
plt.clf()
# Plotting scatterplot
plt.scatter(df2['handle'], df2['favorite_count'], color='b')
plt.title('Candidate Favorites')
plt.xlabel('handle')
plt.ylabel('favorite_count')
plt.savefig(f'plots/candidate_favorite.png', format='png')
plt.close()
# more plots coming -with group by and sorting.
| [
"matplotlib.pyplot.savefig",
"os.makedirs",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"pandas.to_datetime"
] | [((469, 537), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""data/tweets.csv"""', 'sep': '""","""', 'header': '(0)'}), "(filepath_or_buffer='data/tweets.csv', sep=',', header=0)\n", (480, 537), True, 'import pandas as pd\n'), ((1330, 1382), 'pandas.to_datetime', 'pd.to_datetime', (["df['actual_date']"], {'format': '"""%Y/%m/%d"""'}), "(df['actual_date'], format='%Y/%m/%d')\n", (1344, 1382), True, 'import pandas as pd\n'), ((2982, 3017), 'os.makedirs', 'os.makedirs', (['"""plots"""'], {'exist_ok': '(True)'}), "('plots', exist_ok=True)\n", (2993, 3017), False, 'import os\n'), ((3041, 3084), 'matplotlib.pyplot.plot', 'plt.plot', (["df2['retweet_count']"], {'color': '"""red"""'}), "(df2['retweet_count'], color='red')\n", (3049, 3084), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3114), 'matplotlib.pyplot.title', 'plt.title', (['"""Retweet by Index"""'], {}), "('Retweet by Index')\n", (3094, 3114), True, 'import matplotlib.pyplot as plt\n'), ((3115, 3134), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Index"""'], {}), "('Index')\n", (3125, 3134), True, 'import matplotlib.pyplot as plt\n'), ((3135, 3162), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""retweet_count"""'], {}), "('retweet_count')\n", (3145, 3162), True, 'import matplotlib.pyplot as plt\n'), ((3163, 3223), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""plots/retweet_by_candidate.png"""'], {'format': '"""png"""'}), "(f'plots/retweet_by_candidate.png', format='png')\n", (3174, 3223), True, 'import matplotlib.pyplot as plt\n'), ((3224, 3233), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3231, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3318), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df2['handle']", "df2['favorite_count']"], {'color': '"""b"""'}), "(df2['handle'], df2['favorite_count'], color='b')\n", (3269, 3318), True, 'import matplotlib.pyplot as plt\n'), ((3319, 3351), 'matplotlib.pyplot.title', 'plt.title', (['"""Candidate Favorites"""'], {}), "('Candidate Favorites')\n", (3328, 3351), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3372), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""handle"""'], {}), "('handle')\n", (3362, 3372), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3401), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""favorite_count"""'], {}), "('favorite_count')\n", (3383, 3401), True, 'import matplotlib.pyplot as plt\n'), ((3402, 3460), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""plots/candidate_favorite.png"""'], {'format': '"""png"""'}), "(f'plots/candidate_favorite.png', format='png')\n", (3413, 3460), True, 'import matplotlib.pyplot as plt\n'), ((3462, 3473), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3471, 3473), True, 'import matplotlib.pyplot as plt\n')] |
from typing import Any, Optional, Callable
from enum import IntEnum
import time
import uuid
import math
from commlib.node import Node
from goalee.goal import Goal, GoalState
from goalee.types import Point
class AreaGoalTag(IntEnum):
ENTER = 0
EXIT = 1
AVOID = 2
STEP = 3
class RectangleAreaGoal(Goal):
def __init__(self,
topic: str,
bottom_left_edge: Point,
length_x: float,
length_y: float,
tag: AreaGoalTag = AreaGoalTag.ENTER,
comm_node: Optional[Node] = None,
name: Optional[str] = None,
event_emitter: Optional[Any] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None):
super().__init__(comm_node, event_emitter, name=name,
max_duration=max_duration,
min_duration=min_duration)
self._topic = topic
self._msg = None
self._bottom_left_edge = bottom_left_edge
self._length_x = length_x
self._length_y = length_y
self._tag = tag
@property
def tag(self):
return self._tag
def on_enter(self):
print(f'Starting RectangleAreaGoal <{self._name}> with params:')
print(f'-> topic: {self._topic}')
print(f'-> bottom_left_edge: {self._bottom_left_edge}')
print(f'-> length_x: {self._length_x}')
print(f'-> length_y: {self._length_y}')
self._listener = self._comm_node.create_subscriber(
topic=self._topic, on_message=self._on_message
)
self._listener.run()
def on_exit(self):
self._listener.stop()
def _on_message(self, msg):
pos = msg['position']
x_axis = (pos['x'] < (self._bottom_left_edge.x + self._length_x)
and pos['x'] > self._bottom_left_edge.x)
y_axis = (pos['y'] < (self._bottom_left_edge.y + self._length_y)
and pos['y'] > self._bottom_left_edge.y)
reached = x_axis and y_axis
if reached and self.tag == AreaGoalTag.ENTER:
self.set_state(GoalState.COMPLETED)
elif not reached and self.tag == AreaGoalTag.AVOID:
self.set_state(GoalState.FAILED)
class CircularAreaGoal(Goal):
def __init__(self,
topic: str,
center: Point,
radius: float,
tag: AreaGoalTag = AreaGoalTag.ENTER,
comm_node: Optional[Node] = None,
name: Optional[str] = None,
event_emitter: Optional[Any] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None):
super().__init__(comm_node, event_emitter, name=name,
max_duration=max_duration,
min_duration=min_duration)
self._topic = topic
self._msg = None
self._center = center
self._radius = radius
self._tag = tag
@property
def tag(self):
return self._tag
def on_enter(self):
print(f'Starting CircularAreaGoal <{self._name}> with params:')
print(f'-> topic: {self._topic}')
print(f'-> center: {self._center}')
print(f'-> radius: {self._radius}')
self._listener = self._comm_node.create_subscriber(
topic=self._topic, on_message=self._on_message
)
self._listener.run()
def on_exit(self):
self._listener.stop()
def _on_message(self, msg):
pos = msg['position']
dist = self._calc_distance(pos)
if dist < self._radius and self.tag == AreaGoalTag.ENTER:
# inside the circle
self.set_state(GoalState.COMPLETED)
def _calc_distance(self, pos):
d = math.sqrt(
(pos['x'] - self._center.x)**2 + \
(pos['y'] - self._center.y)**2
)
return d
| [
"math.sqrt"
] | [((3871, 3949), 'math.sqrt', 'math.sqrt', (["((pos['x'] - self._center.x) ** 2 + (pos['y'] - self._center.y) ** 2)"], {}), "((pos['x'] - self._center.x) ** 2 + (pos['y'] - self._center.y) ** 2)\n", (3880, 3949), False, 'import math\n')] |
# Generated by Django 3.2.11 on 2022-01-07 02:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("musics", "0001_initial"),
("instruments", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Activity",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("body", models.TextField()),
],
options={
"verbose_name": "Activity",
"verbose_name_plural": "Activities",
},
),
migrations.CreateModel(
name="ActivityCategory",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
],
options={
"verbose_name": "Activity Category",
"verbose_name_plural": "Activity Categories",
},
),
migrations.CreateModel(
name="Assignment",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deadline", models.DateField(blank=True, null=True)),
(
"activity",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="assignments.activity",
),
),
(
"instrument",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="instruments.instrument",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="ActivityType",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, unique=True)),
(
"category",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="assignments.activitycategory",
),
),
],
options={
"verbose_name": "Activity Type",
"verbose_name_plural": "Activity Types",
},
),
migrations.AddField(
model_name="activity",
name="activity_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="assignments.activitytype",
),
),
migrations.AddField(
model_name="activity",
name="part",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="musics.parttransposition",
),
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BigAutoField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((248, 305), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (279, 305), False, 'from django.db import migrations, models\n'), ((3849, 3947), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""assignments.activitytype"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'assignments.activitytype')\n", (3866, 3947), False, 'from django.db import migrations, models\n'), ((4109, 4207), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""musics.parttransposition"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'musics.parttransposition')\n", (4126, 4207), False, 'from django.db import migrations, models\n'), ((556, 652), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (575, 652), False, 'from django.db import migrations, models\n'), ((813, 831), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (829, 831), False, 'from django.db import migrations, models\n'), ((1148, 1244), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1167, 1244), False, 'from django.db import migrations, models\n'), ((1405, 1437), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1421, 1437), False, 'from django.db import migrations, models\n'), ((1766, 1862), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1785, 1862), False, 'from django.db import migrations, models\n'), ((2027, 2066), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2043, 2066), False, 'from django.db import migrations, models\n'), ((2139, 2233), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""assignments.activity"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'assignments.activity')\n", (2156, 2233), False, 'from django.db import migrations, models\n'), ((2392, 2488), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""instruments.instrument"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'instruments.instrument')\n", (2409, 2488), False, 'from django.db import migrations, models\n'), ((2641, 2737), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.PROTECT, to=settings.\n AUTH_USER_MODEL)\n', (2658, 2737), False, 'from django.db import migrations, models\n'), ((3000, 3096), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3019, 3096), False, 'from django.db import migrations, models\n'), ((3257, 3302), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (3273, 3302), False, 'from django.db import migrations, models\n'), ((3375, 3477), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""assignments.activitycategory"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'assignments.activitycategory')\n", (3392, 3477), False, 'from django.db import migrations, models\n')] |
import os
def file_len(filename, comment):
line_count = 0
with open(filename) as f:
for line in f:
if line.strip().startswith(comment):
continue
if line.strip():
line_count += 1
return line_count
def walk(path='.'):
loc = 0
for root, dirs, files in os.walk(path):
if '.git' in root or 'venv' in root:
continue
for file in files:
filepath = os.path.join(root, file)
if file.endswith('.py'):
loc += file_len(filepath, '#')
elif file.endswith('.html'):
if file in ('privacy_policy.html', 'tac.html'):
continue
loc += file_len(filepath, '<!--')
elif file.endswith('.css'):
if file == 'jquery.tagsinput-revisited.css':
continue
loc += file_len(filepath, '/*')
elif file.endswith('.js'):
if file == 'jquery.tagsinput-revisited.js':
continue
loc += file_len(filepath, '//')
return loc
| [
"os.path.join",
"os.walk"
] | [((340, 353), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (347, 353), False, 'import os\n'), ((471, 495), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (483, 495), False, 'import os\n')] |
import random
def parse_args():
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description="Framestick feed-forward f0 generator",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-a", "--schema", type=str, help="architecture", required=True)
args = parser.parse_args()
return args
def generate_connections(start_index: int, l1_neurons: int, l2_neurons: int):
connections = []
l1_end_neuron = start_index + l1_neurons
for l1 in range(start_index, l1_end_neuron):
for l2 in range(l1_end_neuron, l1_end_neuron + l2_neurons):
connections.append((l1, l2))
return connections
def generate_f0(neurons, connections):
out = "//0\np:\n"
out += "n:d=Nu\n" * neurons
for c in connections:
value = random.randint(1, 25)
out += f"c:{c[0]}, {c[1]}, {value}\n"
return out
if __name__ == '__main__':
args = parse_args()
layers = [int(v) for v in args.schema.split('-')]
layers.reverse()
neurons = sum(layers)
layer_start_index = 0
connections = []
for i in range(len(layers) - 1):
l1_neurons = layers[i]
connections.extend(generate_connections(layer_start_index, l1_neurons, layers[i + 1]))
layer_start_index += l1_neurons
net = generate_f0(neurons, connections)
print(net)
| [
"random.randint",
"argparse.ArgumentParser"
] | [((143, 260), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Framestick feed-forward f0 generator"""', 'formatter_class': 'ArgumentDefaultsHelpFormatter'}), "(description='Framestick feed-forward f0 generator',\n formatter_class=ArgumentDefaultsHelpFormatter)\n", (157, 260), False, 'from argparse import ArgumentParser\n'), ((885, 906), 'random.randint', 'random.randint', (['(1)', '(25)'], {}), '(1, 25)\n', (899, 906), False, 'import random\n')] |
################################################################################
## Imports and configurations
import sys
import os
PROJ_PATH = '.'
#PROJ_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), '../../'))
#sys.path.append(PROJ_PATH)
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# feature selection
from sklearn.feature_selection import SelectFromModel
from rfpimp import importances as permutation_importances, plot_importances
# classifiers
from sklearn.ensemble import RandomForestClassifier
# reporting
from src.reporting.reports import reports
## configs
DATA_PATH = PROJ_PATH+'/data/DGT/central_pt/'
RAW_PATH = DATA_PATH+'raw/'
PROCESSED_PATH = DATA_PATH+'processed/'
TRAIN_DATA = RAW_PATH+'training.csv'
TEST_DATA = RAW_PATH+'testing.csv'
LABELS_PATH = RAW_PATH+'Class_legend.txt'
random_state = 0
################################################################################
## read data and preprocess
# read
df_train = pd.read_csv(TRAIN_DATA).drop(columns='Unnamed: 0')
X = df_train.drop(columns='CLASS')
y = df_train['CLASS'].astype(int)
# get feature names and labels
feat_labels = list(X.columns)
class_labels = pd.read_csv(LABELS_PATH, sep='\t', header=None,
index_col=0)[1].to_dict()
# standardize
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
################################################################################
## feature selection
# Split data into 40% test and 60% training
_X_tr, _X_te, _y_tr, _y_te = train_test_split(X, y, test_size=0.4,
random_state=random_state)
# Create and train a random forest classifier
clf = RandomForestClassifier(n_estimators=100, n_jobs=-1,
random_state=random_state)
clf.fit(_X_tr, _y_tr)
# Gini Index Importance Feature Selection Method
gini_imp_feat_sel = SelectFromModel(clf, prefit=True, threshold='.8*mean')
gini_accepted = gini_imp_feat_sel.get_support()
# Permutation
imp = permutation_importances(
clf,
pd.DataFrame(_X_te, columns=feat_labels),
pd.Series(_y_te, name='CLASS')
)
permutation_accepted = (imp['Importance']>0).loc[feat_labels].values
# Keep the ones accepted with both methods
accepted_feats = (gini_accepted.astype(int)+permutation_accepted.astype(int))==2
# save feature selection results
feat_sel_results = pd.DataFrame(
np.array([gini_accepted, permutation_accepted, accepted_feats]).T,
index=feat_labels,
columns=['Gini', 'Permutation', 'Selected']
)
feat_sel_results.to_csv(PROCESSED_PATH+'feature_selection_results.csv')
################################################################################
## test different methods using test set
df_train = pd.read_csv(TRAIN_DATA).drop(columns='Unnamed: 0')
X_train = df_train.drop(columns='CLASS')
y_train = df_train['CLASS'].astype(int)
df_test = pd.read_csv(TEST_DATA).drop(columns='Unnamed: 0')
X_test = df_test.drop(columns='CLASS')
y_test = df_test['CLASS'].astype(int)
features_selected = pd.read_csv(PROCESSED_PATH+'feature_selection_results.csv')\
.rename(columns={'Unnamed: 0': 'features'}).set_index('features')
features_selected['Original'] = True
#pd.DataFrame(features_selected[features_selected].count(),
# columns=['# features used'])\
# .sort_values('# features used', ascending=False)\
# .to_csv('feature_selection_count.csv')
# get feature names and labels
feat_labels = list(X_train.columns)
class_labels = pd.read_csv(LABELS_PATH, sep='\t', header=None,
index_col=0)[1].to_dict()
# standardize
scaler = StandardScaler()
scaler.fit(X_train)
scaler.transform(X_train.values, copy=False)
scaler.transform(X_test.values, copy=False)
scores = []
for method in features_selected.columns:
rfc = RandomForestClassifier(100, random_state=0)
features = features_selected[method]
_X_tr = X_train[features[features].index]
_y_tr = y_train.copy()
rfc.fit(_X_tr, _y_tr)
_X_te = X_test[features[features].index]
_y_te = y_test.copy()
_y_pred = rfc.predict(_X_te)
scores.append(reports(_y_te, _y_pred)[-1].rename({'Score': method}))
pd.DataFrame(features_selected[features_selected].count(),
columns=['# features used'])\
.join(pd.concat(scores, 1).T)\
.sort_values('# features used', ascending=False)\
.rename(index={'Selected':'Intersect'})\
.to_csv('feature_selection_results.csv')
################################################################################
## define noise introduction procedure
## define filters
## define classifiers
## setup and run experiment
## save results
## setup and train models using hyperparameters with best scores
## get testing dataset scores
| [
"pandas.Series",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"src.reporting.reports.reports",
"pandas.DataFrame",
"pandas.concat",
"sklearn.feature_selection.SelectFromModel"
] | [((1358, 1374), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1372, 1374), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1589, 1653), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.4)', 'random_state': 'random_state'}), '(X, y, test_size=0.4, random_state=random_state)\n', (1605, 1653), False, 'from sklearn.model_selection import train_test_split\n'), ((1711, 1789), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'n_jobs': '(-1)', 'random_state': 'random_state'}), '(n_estimators=100, n_jobs=-1, random_state=random_state)\n', (1733, 1789), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1886, 1940), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['clf'], {'prefit': '(True)', 'threshold': '""".8*mean"""'}), "(clf, prefit=True, threshold='.8*mean')\n", (1901, 1940), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((3578, 3594), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3592, 3594), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2048, 2088), 'pandas.DataFrame', 'pd.DataFrame', (['_X_te'], {'columns': 'feat_labels'}), '(_X_te, columns=feat_labels)\n', (2060, 2088), True, 'import pandas as pd\n'), ((2094, 2124), 'pandas.Series', 'pd.Series', (['_y_te'], {'name': '"""CLASS"""'}), "(_y_te, name='CLASS')\n", (2103, 2124), True, 'import pandas as pd\n'), ((3768, 3811), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (['(100)'], {'random_state': '(0)'}), '(100, random_state=0)\n', (3790, 3811), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1061, 1084), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_DATA'], {}), '(TRAIN_DATA)\n', (1072, 1084), True, 'import pandas as pd\n'), ((2392, 2455), 'numpy.array', 'np.array', (['[gini_accepted, permutation_accepted, accepted_feats]'], {}), '([gini_accepted, permutation_accepted, accepted_feats])\n', (2400, 2455), True, 'import numpy as np\n'), ((2738, 2761), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_DATA'], {}), '(TRAIN_DATA)\n', (2749, 2761), True, 'import pandas as pd\n'), ((2882, 2904), 'pandas.read_csv', 'pd.read_csv', (['TEST_DATA'], {}), '(TEST_DATA)\n', (2893, 2904), True, 'import pandas as pd\n'), ((1257, 1317), 'pandas.read_csv', 'pd.read_csv', (['LABELS_PATH'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(LABELS_PATH, sep='\\t', header=None, index_col=0)\n", (1268, 1317), True, 'import pandas as pd\n'), ((3476, 3536), 'pandas.read_csv', 'pd.read_csv', (['LABELS_PATH'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(LABELS_PATH, sep='\\t', header=None, index_col=0)\n", (3487, 3536), True, 'import pandas as pd\n'), ((3030, 3091), 'pandas.read_csv', 'pd.read_csv', (["(PROCESSED_PATH + 'feature_selection_results.csv')"], {}), "(PROCESSED_PATH + 'feature_selection_results.csv')\n", (3041, 3091), True, 'import pandas as pd\n'), ((4074, 4097), 'src.reporting.reports.reports', 'reports', (['_y_te', '_y_pred'], {}), '(_y_te, _y_pred)\n', (4081, 4097), False, 'from src.reporting.reports import reports\n'), ((4233, 4253), 'pandas.concat', 'pd.concat', (['scores', '(1)'], {}), '(scores, 1)\n', (4242, 4253), True, 'import pandas as pd\n')] |
import re
from PIL import Image, ImageOps
from io import BytesIO
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.core.files.uploadedfile import SimpleUploadedFile
from forum.models import Topic
MENTION_REGEX = re.compile(r'@(\w+)', re.M)
IMAGE_LARGE = 144
IMAGE_MEDIUM = 96
IMAGE_SMALL = 48
NUM_PER_PAGE = 20
def _thumbnail(upload, size, fmt):
img = ImageOps.fit(upload, size, Image.ANTIALIAS)
temp = BytesIO()
img.save(temp, fmt, quality=95)
temp.seek(0)
return temp
def create_thumbnail(src, new_name, ext):
upload = Image.open(BytesIO(src.read()))
fmt = src.content_type.split('/')[-1]
large = _thumbnail(upload, (IMAGE_LARGE, IMAGE_LARGE), fmt)
filename_l = "%s_l.%s" % (new_name, ext)
large_file = SimpleUploadedFile(filename_l, large.read(), content_type=src.content_type)
medium = _thumbnail(upload, (IMAGE_MEDIUM, IMAGE_MEDIUM), fmt)
filename_m = "%s_m.%s" % (new_name, ext)
medium_file = SimpleUploadedFile(filename_m, medium.read(), content_type=src.content_type)
small = _thumbnail(upload, (IMAGE_SMALL, IMAGE_SMALL), fmt)
filename_s = "%s_s.%s" % (new_name, ext)
small_file = SimpleUploadedFile(filename_s, small.read(), content_type=src.content_type)
return large_file, medium_file, small_file
def get_pagination(current_page, num_pages, count):
page_list = []
show_pages = 2*count+1
if show_pages >= num_pages:
page_list.extend(range(1, num_pages+1))
elif current_page - count < 1:
page_list.extend(range(1, show_pages+1))
elif current_page + count > num_pages:
page_list.extend(range(num_pages+1-show_pages, num_pages+1))
else:
page_list.extend(range(current_page-count, current_page+count+1))
return page_list
def topic_pagination(page, topics):
paginator = Paginator(topics, NUM_PER_PAGE)
try:
topic_list = paginator.page(page)
except PageNotAnInteger:
topic_list = paginator.page(1)
except EmptyPage:
topic_list = paginator.page(paginator.num_pages)
page_list = get_pagination(topic_list.number, paginator.num_pages, 2)
return topic_list, page_list
def author_required(view_func):
def _wrapped_view_func(request, *args, **kwargs):
topic_id = kwargs.get('topic_id')
topic = get_object_or_404(Topic, id=topic_id)
if topic.author == request.user:
return view_func(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return _wrapped_view_func
def get_metioned_user(sender, markdown):
mentioned = set(re.findall(MENTION_REGEX, markdown)) - set([sender.username])
# mentioned = set(re.findall(MENTION_REGEX, markdown))
if mentioned:
return User.objects.filter(username__in=mentioned)
return None
| [
"re.compile",
"django.shortcuts.get_object_or_404",
"PIL.ImageOps.fit",
"io.BytesIO",
"django.http.HttpResponseForbidden",
"django.contrib.auth.models.User.objects.filter",
"re.findall",
"django.core.paginator.Paginator"
] | [((385, 412), 're.compile', 're.compile', (['"""@(\\\\w+)"""', 're.M'], {}), "('@(\\\\w+)', re.M)\n", (395, 412), False, 'import re\n'), ((530, 573), 'PIL.ImageOps.fit', 'ImageOps.fit', (['upload', 'size', 'Image.ANTIALIAS'], {}), '(upload, size, Image.ANTIALIAS)\n', (542, 573), False, 'from PIL import Image, ImageOps\n'), ((590, 599), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (597, 599), False, 'from io import BytesIO\n'), ((2025, 2056), 'django.core.paginator.Paginator', 'Paginator', (['topics', 'NUM_PER_PAGE'], {}), '(topics, NUM_PER_PAGE)\n', (2034, 2056), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((2517, 2554), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Topic'], {'id': 'topic_id'}), '(Topic, id=topic_id)\n', (2534, 2554), False, 'from django.shortcuts import get_object_or_404\n'), ((2968, 3011), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username__in': 'mentioned'}), '(username__in=mentioned)\n', (2987, 3011), False, 'from django.contrib.auth.models import User\n'), ((2693, 2716), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', ([], {}), '()\n', (2714, 2716), False, 'from django.http import HttpResponseForbidden\n'), ((2814, 2849), 're.findall', 're.findall', (['MENTION_REGEX', 'markdown'], {}), '(MENTION_REGEX, markdown)\n', (2824, 2849), False, 'import re\n')] |
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import warnings
from time import time
from numbers import Real
from itertools import starmap, chain
import unittest
import pickle
import numpy as np
from numpy.testing import assert_array_equal
from Orange.data import (
ContinuousVariable,
DiscreteVariable,
StringVariable,
TimeVariable,
Variable,
Domain,
Table,
DomainConversion,
)
from Orange.data.domain import filter_visible
from Orange.preprocess import Continuize, Impute
from Orange.tests.base import create_pickling_tests
from Orange.util import OrangeDeprecationWarning
def create_domain(*ss):
Variable._clear_all_caches()
vars = dict(
age=ContinuousVariable(name="AGE"),
gender=DiscreteVariable(name="Gender", values=["M", "F"]),
incomeA=ContinuousVariable(name="incomeA"),
income=ContinuousVariable(name="income"),
education=DiscreteVariable(name="education", values=["GS", "HS", "C"]),
ssn=StringVariable(name="SSN"),
race=DiscreteVariable(
name="race", values=["White", "Hypsanic", "African", "Other"]
),
arrival=TimeVariable("arrival"),
)
def map_vars(s):
return [vars[x] for x in s]
return Domain(*[map_vars(s) for s in ss])
PickleDomain = create_pickling_tests(
"PickleDomain",
("empty_domain", lambda: create_domain([])),
("with_continuous_variable", lambda: create_domain(["age"])),
("with_discrete_variable", lambda: create_domain(["gender"])),
("with_mixed_variables", lambda: create_domain(["age", "gender"])),
("with_continuous_class", lambda: create_domain(["age", "gender"], ["incomeA"])),
("with_discrete_class", lambda: create_domain(["age", "gender"], ["education"])),
(
"with_multiple_classes",
lambda: create_domain(["age", "gender"], ["incomeA", "education"]),
),
("with_metas", lambda: create_domain(["age", "gender"], [], ["ssn"])),
(
"with_class_and_metas",
lambda: create_domain(["age", "gender"], ["incomeA", "education"], ["ssn"]),
),
)
age, gender, incomeA, income, education, ssn, race, arrival = create_domain(
[],
[],
["age", "gender", "incomeA", "income", "education", "ssn", "race", "arrival"],
).metas
class TestDomainInit(unittest.TestCase):
def test_init_class(self):
attributes = (age, gender, income)
d = Domain(attributes, race)
self.assertEqual(d.variables, attributes + (race,))
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, race)
self.assertEqual(d.class_vars, (race,))
self.assertEqual(d.metas, ())
def test_init_class_list(self):
attributes = (age, gender, income)
d = Domain(attributes, [race])
self.assertEqual(d.variables, attributes + (race,))
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, race)
self.assertEqual(d.class_vars, (race,))
self.assertEqual(d.metas, ())
def test_init_no_class(self):
attributes = (age, gender, income)
d = Domain(attributes)
self.assertEqual(d.variables, attributes)
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, None)
self.assertEqual(d.class_vars, ())
self.assertEqual(d.metas, ())
def test_init_no_class_false(self):
attributes = (age, gender, income)
d = Domain(attributes, None)
self.assertEqual(d.variables, attributes)
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, None)
self.assertEqual(d.class_vars, ())
self.assertEqual(d.metas, ())
def test_init_multi_class(self):
attributes = (age, gender, income)
d = Domain(attributes, (education, race))
self.assertEqual(d.variables, attributes + (education, race))
self.assertEqual(d.attributes, attributes)
self.assertIsNone(d.class_var)
self.assertEqual(d.class_vars, (education, race))
self.assertEqual(d.metas, ())
def test_init_source(self):
attributes = (age, gender, income)
d = Domain(attributes, (education, race))
d2 = Domain(["Gender", 0, income], source=d)
self.assertEqual(d2.variables, (gender, age, income))
def test_init_source_class(self):
attributes = (age, gender, income)
d = Domain(attributes, (education, race))
d2 = Domain(["Gender", 0], "income", source=d)
self.assertEqual(d2.variables, (gender, age, income))
def test_init_metas(self):
attributes = (age, gender, income)
metas = (ssn, race)
d = Domain(attributes, race, metas=metas)
self.assertEqual(d.variables, attributes + (race,))
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, race)
self.assertEqual(d.class_vars, (race,))
self.assertEqual(d.metas, metas)
def test_from_numpy_names(self):
for n_cols, name in [
(5, "Feature {}"),
(99, "Feature {:02}"),
(100, "Feature {:03}"),
]:
d = Domain.from_numpy(np.zeros((1, n_cols)))
self.assertTrue(d.anonymous)
self.assertEqual(
[var.name for var in d.attributes],
[name.format(i) for i in range(1, n_cols + 1)],
)
d = Domain.from_numpy(np.zeros((1, 1)))
self.assertTrue(d.anonymous)
self.assertEqual(d.attributes[0].name, "Feature")
d = Domain.from_numpy(np.zeros((1, 3)), np.zeros((1, 1)), np.zeros((1, 100)))
self.assertTrue(d.anonymous)
self.assertEqual(
[var.name for var in d.attributes],
["Feature {}".format(i) for i in range(1, 4)],
)
self.assertEqual(d.class_var.name, "Target")
self.assertEqual(
[var.name for var in d.metas],
["Meta {:03}".format(i) for i in range(1, 101)],
)
def test_from_numpy_dimensions(self):
for dimension in [[5], [5, 1]]:
d = Domain.from_numpy(np.zeros((1, 1)), np.zeros(dimension))
self.assertTrue(d.anonymous)
self.assertEqual(len(d.class_vars), 1)
self.assertRaises(ValueError, Domain.from_numpy, np.zeros(2))
self.assertRaises(ValueError, Domain.from_numpy, np.zeros((2, 2, 2)))
self.assertRaises(
ValueError, Domain.from_numpy, np.zeros((2, 2)), np.zeros((2, 2, 2))
)
def test_from_numpy_values(self):
for aran_min, aran_max, vartype in [
(1, 3, ContinuousVariable),
(0, 2, DiscreteVariable),
(18, 23, ContinuousVariable),
]:
n_rows, n_cols, = aran_max - aran_min, 1
d = Domain.from_numpy(
np.zeros((1, 1)), np.arange(aran_min, aran_max).reshape(n_rows, n_cols)
)
self.assertTrue(d.anonymous)
self.assertIsInstance(d.class_var, vartype)
if isinstance(vartype, DiscreteVariable):
self.assertEqual(
d.class_var.values, ["v{}".format(i) for i in range(1, 3)]
)
def test_wrong_vartypes(self):
attributes = (age, gender, income)
for args in ((attributes, ssn), (attributes + (ssn,)), ((ssn,) + attributes)):
with self.assertRaises(TypeError):
Domain(*args)
def test_wrong_vartypes_w_source(self):
d = Domain((age, gender), metas=(ssn,))
with self.assertRaises(TypeError):
Domain(-1, source=d)
def test_wrong_types(self):
with self.assertRaises(TypeError):
Domain((age, []))
with self.assertRaises(TypeError):
Domain((age, "income"))
with self.assertRaises(TypeError):
Domain(([], age))
with self.assertRaises(TypeError):
Domain(("income", age))
with self.assertRaises(TypeError):
Domain((age,), self)
with self.assertRaises(TypeError):
Domain((age,), metas=("income",))
def test_get_item(self):
d = Domain((age, gender, income), metas=(ssn, race))
for idx, var in [
(age, age),
("AGE", age),
(0, age),
(income, income),
("income", income),
(2, income),
(ssn, ssn),
("SSN", ssn),
(-1, ssn),
(-2, race),
]:
self.assertEqual(d[idx], var)
def test_index(self):
d = Domain((age, gender, income), metas=(ssn, race))
for idx, var in [
(age, 0),
("AGE", 0),
(0, 0),
(np.int_(0), 0),
(income, 2),
("income", 2),
(2, 2),
(np.int_(2), 2),
(ssn, -1),
("SSN", -1),
(-1, -1),
(np.int_(-1), -1),
(-2, -2),
(np.int_(-2), -2),
]:
self.assertEqual(d.index(idx), var)
def test_get_item_slices(self):
d = Domain((age, gender, income, race), metas=(ssn, race))
self.assertEqual(d[:2], (age, gender))
self.assertEqual(d[1:3], (gender, income))
self.assertEqual(d[2:], (income, race))
def test_get_item_error(self):
d = Domain((age, gender, income), metas=(ssn, race))
for idx in (3, -3, incomeA, "no_such_thing"):
with self.assertRaises(KeyError):
_ = d[idx]
with self.assertRaises(TypeError):
_ = d[[2]]
def test_index_error(self):
d = Domain((age, gender, income), metas=(ssn, race))
for idx in (3, np.int(3), -3, np.int(-3), incomeA, "no_such_thing"):
with self.assertRaises(ValueError):
d.index(idx)
with self.assertRaises(TypeError):
d.index([2])
def test_contains(self):
d = Domain((age, gender, income), metas=(ssn,))
for var in [
"AGE",
age,
0,
np.int_(0),
"income",
income,
2,
np.int_(2),
"SSN",
ssn,
-1,
np.int_(-1),
]:
self.assertIn(var, d)
for var in ["no_such_thing", race, 3, np.int_(3), -2, np.int_(-2)]:
self.assertNotIn(var, d)
with self.assertRaises(TypeError):
{} in d
with self.assertRaises(TypeError):
[] in d
def test_iter(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
d = Domain((age, gender, income), metas=(ssn,))
with self.assertRaises(OrangeDeprecationWarning):
list(d)
warnings.simplefilter("ignore")
self.assertEqual([var for var in d], [age, gender, income])
d = Domain((age,), metas=(ssn,))
self.assertEqual([var for var in d], [age])
d = Domain((), metas=(ssn,))
self.assertEqual([var for var in d], [])
def test_str(self):
cases = (
(((),), "[]"),
(((age,),), "[AGE]"),
(((), age), "[ | AGE]"),
(((gender,), age), "[Gender | AGE]"),
(((gender, income), None), "[Gender, income]"),
(((gender, income), age), "[Gender, income | AGE]"),
(((gender,), (age, income)), "[Gender | AGE, income]"),
(((gender,), (age, income), (ssn,)), "[Gender | AGE, income] {SSN}"),
(
((gender,), (age, income), (ssn, race)),
"[Gender | AGE, income] {SSN, race}",
),
(((), (), (ssn, race)), "[] {SSN, race}"),
)
for args, printout in cases:
self.assertEqual(str(Domain(*args)), printout)
def test_has_discrete(self):
self.assertFalse(Domain([]).has_discrete_attributes())
self.assertFalse(Domain([], [age]).has_discrete_attributes())
self.assertFalse(Domain([], race).has_discrete_attributes())
self.assertFalse(Domain([age], None).has_discrete_attributes())
self.assertTrue(Domain([race], None).has_discrete_attributes())
self.assertTrue(Domain([age, race], None).has_discrete_attributes())
self.assertTrue(Domain([race, age], None).has_discrete_attributes())
self.assertFalse(Domain([], [age]).has_discrete_attributes(True))
self.assertTrue(Domain([], [race]).has_discrete_attributes(True))
self.assertFalse(Domain([age], None).has_discrete_attributes(True))
self.assertTrue(Domain([race], None).has_discrete_attributes(True))
self.assertTrue(Domain([age], race).has_discrete_attributes(True))
self.assertTrue(Domain([race], age).has_discrete_attributes(True))
self.assertTrue(Domain([], [race, age]).has_discrete_attributes(True))
d = Domain([], None, [gender])
self.assertTrue(d.has_discrete_attributes(False, True))
d = Domain([], None, [age])
self.assertFalse(d.has_discrete_attributes(False, True))
d = Domain([], [age], [gender])
self.assertTrue(d.has_discrete_attributes(True, True))
d = Domain([], [incomeA], [age])
self.assertFalse(d.has_discrete_attributes(True, True))
def test_has_continuous(self):
self.assertFalse(Domain([]).has_continuous_attributes())
self.assertFalse(Domain([], [age]).has_continuous_attributes())
self.assertFalse(Domain([], [race]).has_continuous_attributes())
self.assertTrue(Domain([age], None).has_continuous_attributes())
self.assertFalse(Domain([race], None).has_continuous_attributes())
self.assertTrue(Domain([age, race], None).has_continuous_attributes())
self.assertTrue(Domain([race, age], None).has_continuous_attributes())
self.assertTrue(Domain([], [age]).has_continuous_attributes(True))
self.assertFalse(Domain([], [race]).has_continuous_attributes(True))
self.assertTrue(Domain([age], None).has_continuous_attributes(True))
self.assertFalse(Domain([race], None).has_continuous_attributes(True))
self.assertTrue(Domain([age], race).has_continuous_attributes(True))
self.assertTrue(Domain([race], age).has_continuous_attributes(True))
self.assertTrue(Domain([], [race, age]).has_continuous_attributes(True))
d = Domain([], None, [age])
self.assertTrue(d.has_continuous_attributes(False, True))
d = Domain([], None, [gender])
self.assertFalse(d.has_continuous_attributes(False, True))
d = Domain([], [gender], [age])
self.assertTrue(d.has_continuous_attributes(True, True))
d = Domain([], [race], [gender])
self.assertFalse(d.has_continuous_attributes(True, True))
def test_has_time(self):
self.assertFalse(Domain([]).has_time_attributes())
self.assertFalse(Domain([], [age]).has_time_attributes())
self.assertFalse(Domain([], [race]).has_time_attributes())
self.assertFalse(Domain([], [arrival]).has_time_attributes())
self.assertFalse(Domain([], [], [arrival]).has_time_attributes())
self.assertTrue(Domain([arrival], []).has_time_attributes())
self.assertTrue(Domain([], [arrival]).has_time_attributes(include_class=True))
self.assertTrue(
Domain([], [], [arrival]).has_time_attributes(include_metas=True)
)
self.assertFalse(Domain([arrival], []).has_time_class)
self.assertTrue(Domain([], [arrival]).has_time_class)
self.assertFalse(Domain([], [], [arrival]).has_time_class)
def test_get_conversion(self):
compute_value = lambda: 42
new_income = income.copy(compute_value=compute_value)
d = Domain((age, gender, income), metas=(ssn, race))
e = Domain((gender, race), None, metas=(age, gender, ssn))
f = Domain((gender,), (race, income), metas=(age, income, ssn))
g = Domain((), metas=(age, gender, ssn))
h = Domain((gender,), (race, new_income), metas=(age, new_income, ssn))
for conver, domain, attr, class_vars, metas in (
(d, e, [1, -2], [], [0, 1, -1]),
(d, f, [1], [-2, 2], [0, 2, -1]),
(f, g, [], [], [-1, 0, -3]),
(g, h, [-2], [None, compute_value], [-1, compute_value, -3]),
):
to_domain = domain.get_conversion(conver)
self.assertIs(to_domain.source, conver)
self.assertEqual(to_domain.attributes, attr)
self.assertEqual(to_domain.class_vars, class_vars)
self.assertEqual(to_domain.metas, metas)
def test_conversion(self):
domain = Domain([age, income], [race], [gender, education, ssn])
x, y, metas = domain.convert([42, 13, "White"])
assert_array_equal(x, np.array([42, 13]))
assert_array_equal(y, np.array([0]))
metas_exp = [gender.Unknown, education.Unknown, ssn.Unknown]
def equal(a, b):
if (
isinstance(a, Real)
and isinstance(b, Real)
and np.isnan(a)
and np.isnan(b)
):
return True
else:
return a == b
self.assertTrue(all(starmap(equal, zip(metas, metas_exp))))
x, y, metas = domain.convert([42, 13, "White", "M", "HS", "1234567"])
assert_array_equal(x, np.array([42, 13]))
assert_array_equal(y, np.array([0]))
assert_array_equal(metas, np.array([0, 1, "1234567"], dtype=object))
def test_conversion_size(self):
domain = Domain([age, gender, income], [race])
self.assertRaises(ValueError, domain.convert, [0] * 3)
self.assertRaises(ValueError, domain.convert, [0] * 5)
domain = Domain([age, income], [race], [gender, education, ssn])
self.assertRaises(ValueError, domain.convert, [0] * 2)
self.assertRaises(ValueError, domain.convert, [0] * 4)
self.assertRaises(ValueError, domain.convert, [0] * 7)
domain.convert([0] * 3)
domain.convert([0] * 6)
def test_preprocessor_chaining(self):
domain = Domain(
[DiscreteVariable("a", values="01"), DiscreteVariable("b", values="01")],
DiscreteVariable("y", values="01"),
)
table = Table(domain, [[0, 1], [1, np.NaN]], [0, 1])
pre1 = Continuize()(Impute()(table))
pre2 = Table(pre1.domain, table)
np.testing.assert_almost_equal(pre1.X, pre2.X)
def test_unpickling_recreates_known_domains(self):
Variable._clear_all_caches()
domain = Domain([])
unpickled_domain = pickle.loads(pickle.dumps(domain))
self.assertTrue(hasattr(unpickled_domain, "_known_domains"))
def test_different_domains_with_same_attributes_are_equal(self):
domain1 = Domain([])
domain2 = Domain([])
self.assertEqual(domain1, domain2)
var1 = ContinuousVariable("var1")
domain1.attributes = (var1,)
self.assertNotEqual(domain1, domain2)
domain2.attributes = (var1,)
self.assertEqual(domain1, domain2)
domain1.class_vars = (var1,)
self.assertNotEqual(domain1, domain2)
domain2.class_vars = (var1,)
self.assertEqual(domain1, domain2)
domain1._metas = (var1,)
self.assertNotEqual(domain1, domain2)
domain2._metas = (var1,)
self.assertEqual(domain1, domain2)
def test_domain_conversion_is_fast_enough(self):
attrs = [ContinuousVariable("f%i" % i) for i in range(10000)]
class_vars = [ContinuousVariable("c%i" % i) for i in range(10)]
metas = [ContinuousVariable("m%i" % i) for i in range(10)]
source = Domain(attrs, class_vars, metas)
start = time()
cases = (
(
(attrs[:1000], class_vars, metas),
list(range(1000)),
list(range(10000, 10010)),
list(range(-1, -11, -1)),
),
(
(metas, attrs[:1000], class_vars),
list(range(-1, -11, -1)),
list(range(1000)),
list(range(10000, 10010)),
),
(
(class_vars, metas, attrs[:1000]),
list(range(10000, 10010)),
list(range(-1, -11, -1)),
list(range(1000)),
),
)
for domain_args, attributes, class_vars, metas in cases:
c1 = DomainConversion(source, Domain(*domain_args))
self.assertEqual(c1.attributes, attributes)
self.assertEqual(c1.class_vars, class_vars)
self.assertEqual(c1.metas, metas)
self.assertLessEqual(time() - start, 1)
def test_copy(self):
age.number_of_decimals = 5
attributes = (age, gender, income)
domain = Domain(attributes, [race], [ssn])
new_domain = domain.copy()
new_domain[age].number_of_decimals = 10
self.assertEqual(domain[age].number_of_decimals, 5)
self.assertEqual(new_domain[age].number_of_decimals, 10)
def test_domain_conversion_sparsity(self):
destination = Domain(
attributes=[
ContinuousVariable(name="a"),
ContinuousVariable(name="b"),
ContinuousVariable(name="c"),
],
class_vars=[DiscreteVariable("d", values=["e"])],
metas=[StringVariable("f")],
)
# all dense
source = Domain(attributes=[])
conversion = DomainConversion(source, destination)
self.assertFalse(conversion.sparse_X)
self.assertFalse(conversion.sparse_Y)
self.assertFalse(conversion.sparse_metas)
# set destination attributes as sparse
for a in destination.attributes:
a.sparse = True
source = Domain(attributes=[])
conversion = DomainConversion(source, destination)
self.assertTrue(conversion.sparse_X)
self.assertFalse(conversion.sparse_Y)
self.assertFalse(conversion.sparse_metas)
# set all destination variable as sparse
for a in chain(destination.variables, destination.metas):
a.sparse = True
source = Domain(attributes=[])
conversion = DomainConversion(source, destination)
self.assertTrue(conversion.sparse_X)
self.assertTrue(conversion.sparse_Y)
self.assertFalse(conversion.sparse_metas)
class TestDomainFilter(unittest.TestCase):
def setUp(self):
self.iris = Table("iris")
def test_filter_visible(self):
n_feats = len(self.iris.domain.attributes)
self.iris.domain.attributes[0].attributes.update({"hidden": True})
filtered = list(filter_visible(self.iris.domain.attributes))
self.assertNotIn(self.iris.domain.attributes[0], filtered)
self.assertEqual(len(filtered), n_feats - 1)
if __name__ == "__main__":
unittest.main()
| [
"itertools.chain",
"pickle.dumps",
"Orange.preprocess.Impute",
"Orange.data.DiscreteVariable",
"numpy.array",
"unittest.main",
"numpy.arange",
"Orange.data.domain.filter_visible",
"Orange.data.DomainConversion",
"numpy.testing.assert_almost_equal",
"warnings.simplefilter",
"Orange.data.Domain"... | [((692, 720), 'Orange.data.Variable._clear_all_caches', 'Variable._clear_all_caches', ([], {}), '()\n', (718, 720), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((23186, 23201), 'unittest.main', 'unittest.main', ([], {}), '()\n', (23199, 23201), False, 'import unittest\n'), ((2469, 2493), 'Orange.data.Domain', 'Domain', (['attributes', 'race'], {}), '(attributes, race)\n', (2475, 2493), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((2827, 2853), 'Orange.data.Domain', 'Domain', (['attributes', '[race]'], {}), '(attributes, [race])\n', (2833, 2853), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((3185, 3203), 'Orange.data.Domain', 'Domain', (['attributes'], {}), '(attributes)\n', (3191, 3203), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((3526, 3550), 'Orange.data.Domain', 'Domain', (['attributes', 'None'], {}), '(attributes, None)\n', (3532, 3550), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((3870, 3907), 'Orange.data.Domain', 'Domain', (['attributes', '(education, race)'], {}), '(attributes, (education, race))\n', (3876, 3907), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4252, 4289), 'Orange.data.Domain', 'Domain', (['attributes', '(education, race)'], {}), '(attributes, (education, race))\n', (4258, 4289), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4303, 4342), 'Orange.data.Domain', 'Domain', (["['Gender', 0, income]"], {'source': 'd'}), "(['Gender', 0, income], source=d)\n", (4309, 4342), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4499, 4536), 'Orange.data.Domain', 'Domain', (['attributes', '(education, race)'], {}), '(attributes, (education, race))\n', (4505, 4536), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4550, 4591), 'Orange.data.Domain', 'Domain', (["['Gender', 0]", '"""income"""'], {'source': 'd'}), "(['Gender', 0], 'income', source=d)\n", (4556, 4591), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4769, 4806), 'Orange.data.Domain', 'Domain', (['attributes', 'race'], {'metas': 'metas'}), '(attributes, race, metas=metas)\n', (4775, 4806), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((7596, 7631), 'Orange.data.Domain', 'Domain', (['(age, gender)'], {'metas': '(ssn,)'}), '((age, gender), metas=(ssn,))\n', (7602, 7631), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8252, 8300), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (8258, 8300), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8675, 8723), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (8681, 8723), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((9208, 9262), 'Orange.data.Domain', 'Domain', (['(age, gender, income, race)'], {'metas': '(ssn, race)'}), '((age, gender, income, race), metas=(ssn, race))\n', (9214, 9262), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((9457, 9505), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (9463, 9505), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((9745, 9793), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (9751, 9793), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((10059, 10102), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn,)'}), '((age, gender, income), metas=(ssn,))\n', (10065, 10102), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13070, 13096), 'Orange.data.Domain', 'Domain', (['[]', 'None', '[gender]'], {}), '([], None, [gender])\n', (13076, 13096), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13173, 13196), 'Orange.data.Domain', 'Domain', (['[]', 'None', '[age]'], {}), '([], None, [age])\n', (13179, 13196), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13274, 13301), 'Orange.data.Domain', 'Domain', (['[]', '[age]', '[gender]'], {}), '([], [age], [gender])\n', (13280, 13301), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13377, 13405), 'Orange.data.Domain', 'Domain', (['[]', '[incomeA]', '[age]'], {}), '([], [incomeA], [age])\n', (13383, 13405), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14580, 14603), 'Orange.data.Domain', 'Domain', (['[]', 'None', '[age]'], {}), '([], None, [age])\n', (14586, 14603), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14682, 14708), 'Orange.data.Domain', 'Domain', (['[]', 'None', '[gender]'], {}), '([], None, [gender])\n', (14688, 14708), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14788, 14815), 'Orange.data.Domain', 'Domain', (['[]', '[gender]', '[age]'], {}), '([], [gender], [age])\n', (14794, 14815), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14893, 14921), 'Orange.data.Domain', 'Domain', (['[]', '[race]', '[gender]'], {}), '([], [race], [gender])\n', (14899, 14921), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15963, 16011), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (15969, 16011), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16024, 16078), 'Orange.data.Domain', 'Domain', (['(gender, race)', 'None'], {'metas': '(age, gender, ssn)'}), '((gender, race), None, metas=(age, gender, ssn))\n', (16030, 16078), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16091, 16150), 'Orange.data.Domain', 'Domain', (['(gender,)', '(race, income)'], {'metas': '(age, income, ssn)'}), '((gender,), (race, income), metas=(age, income, ssn))\n', (16097, 16150), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16163, 16199), 'Orange.data.Domain', 'Domain', (['()'], {'metas': '(age, gender, ssn)'}), '((), metas=(age, gender, ssn))\n', (16169, 16199), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16212, 16279), 'Orange.data.Domain', 'Domain', (['(gender,)', '(race, new_income)'], {'metas': '(age, new_income, ssn)'}), '((gender,), (race, new_income), metas=(age, new_income, ssn))\n', (16218, 16279), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16883, 16938), 'Orange.data.Domain', 'Domain', (['[age, income]', '[race]', '[gender, education, ssn]'], {}), '([age, income], [race], [gender, education, ssn])\n', (16889, 16938), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((17808, 17845), 'Orange.data.Domain', 'Domain', (['[age, gender, income]', '[race]'], {}), '([age, gender, income], [race])\n', (17814, 17845), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((17990, 18045), 'Orange.data.Domain', 'Domain', (['[age, income]', '[race]', '[gender, education, ssn]'], {}), '([age, income], [race], [gender, education, ssn])\n', (17996, 18045), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18527, 18571), 'Orange.data.Table', 'Table', (['domain', '[[0, 1], [1, np.NaN]]', '[0, 1]'], {}), '(domain, [[0, 1], [1, np.NaN]], [0, 1])\n', (18532, 18571), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18632, 18657), 'Orange.data.Table', 'Table', (['pre1.domain', 'table'], {}), '(pre1.domain, table)\n', (18637, 18657), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18666, 18712), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pre1.X', 'pre2.X'], {}), '(pre1.X, pre2.X)\n', (18696, 18712), True, 'import numpy as np\n'), ((18777, 18805), 'Orange.data.Variable._clear_all_caches', 'Variable._clear_all_caches', ([], {}), '()\n', (18803, 18805), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18823, 18833), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (18829, 18833), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19053, 19063), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (19059, 19063), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19082, 19092), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (19088, 19092), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19152, 19178), 'Orange.data.ContinuousVariable', 'ContinuousVariable', (['"""var1"""'], {}), "('var1')\n", (19170, 19178), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19945, 19977), 'Orange.data.Domain', 'Domain', (['attrs', 'class_vars', 'metas'], {}), '(attrs, class_vars, metas)\n', (19951, 19977), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19995, 20001), 'time.time', 'time', ([], {}), '()\n', (19999, 20001), False, 'from time import time\n'), ((21089, 21122), 'Orange.data.Domain', 'Domain', (['attributes', '[race]', '[ssn]'], {}), '(attributes, [race], [ssn])\n', (21095, 21122), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21740, 21761), 'Orange.data.Domain', 'Domain', ([], {'attributes': '[]'}), '(attributes=[])\n', (21746, 21761), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21783, 21820), 'Orange.data.DomainConversion', 'DomainConversion', (['source', 'destination'], {}), '(source, destination)\n', (21799, 21820), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22097, 22118), 'Orange.data.Domain', 'Domain', ([], {'attributes': '[]'}), '(attributes=[])\n', (22103, 22118), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22140, 22177), 'Orange.data.DomainConversion', 'DomainConversion', (['source', 'destination'], {}), '(source, destination)\n', (22156, 22177), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22386, 22433), 'itertools.chain', 'chain', (['destination.variables', 'destination.metas'], {}), '(destination.variables, destination.metas)\n', (22391, 22433), False, 'from itertools import starmap, chain\n'), ((22480, 22501), 'Orange.data.Domain', 'Domain', ([], {'attributes': '[]'}), '(attributes=[])\n', (22486, 22501), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22523, 22560), 'Orange.data.DomainConversion', 'DomainConversion', (['source', 'destination'], {}), '(source, destination)\n', (22539, 22560), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22787, 22800), 'Orange.data.Table', 'Table', (['"""iris"""'], {}), "('iris')\n", (22792, 22800), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((750, 780), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""AGE"""'}), "(name='AGE')\n", (768, 780), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((797, 847), 'Orange.data.DiscreteVariable', 'DiscreteVariable', ([], {'name': '"""Gender"""', 'values': "['M', 'F']"}), "(name='Gender', values=['M', 'F'])\n", (813, 847), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((865, 899), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""incomeA"""'}), "(name='incomeA')\n", (883, 899), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((916, 949), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""income"""'}), "(name='income')\n", (934, 949), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((969, 1029), 'Orange.data.DiscreteVariable', 'DiscreteVariable', ([], {'name': '"""education"""', 'values': "['GS', 'HS', 'C']"}), "(name='education', values=['GS', 'HS', 'C'])\n", (985, 1029), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((1043, 1069), 'Orange.data.StringVariable', 'StringVariable', ([], {'name': '"""SSN"""'}), "(name='SSN')\n", (1057, 1069), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((1084, 1163), 'Orange.data.DiscreteVariable', 'DiscreteVariable', ([], {'name': '"""race"""', 'values': "['White', 'Hypsanic', 'African', 'Other']"}), "(name='race', values=['White', 'Hypsanic', 'African', 'Other'])\n", (1100, 1163), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((1203, 1226), 'Orange.data.TimeVariable', 'TimeVariable', (['"""arrival"""'], {}), "('arrival')\n", (1215, 1226), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((5521, 5537), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (5529, 5537), True, 'import numpy as np\n'), ((5665, 5681), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (5673, 5681), True, 'import numpy as np\n'), ((5683, 5699), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (5691, 5699), True, 'import numpy as np\n'), ((5701, 5719), 'numpy.zeros', 'np.zeros', (['(1, 100)'], {}), '((1, 100))\n', (5709, 5719), True, 'import numpy as np\n'), ((6400, 6411), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (6408, 6411), True, 'import numpy as np\n'), ((6470, 6489), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (6478, 6489), True, 'import numpy as np\n'), ((6561, 6577), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (6569, 6577), True, 'import numpy as np\n'), ((6579, 6598), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (6587, 6598), True, 'import numpy as np\n'), ((7687, 7707), 'Orange.data.Domain', 'Domain', (['(-1)'], {'source': 'd'}), '(-1, source=d)\n', (7693, 7707), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((7796, 7813), 'Orange.data.Domain', 'Domain', (['(age, [])'], {}), '((age, []))\n', (7802, 7813), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((7869, 7892), 'Orange.data.Domain', 'Domain', (["(age, 'income')"], {}), "((age, 'income'))\n", (7875, 7892), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((7948, 7965), 'Orange.data.Domain', 'Domain', (['([], age)'], {}), '(([], age))\n', (7954, 7965), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8021, 8044), 'Orange.data.Domain', 'Domain', (["('income', age)"], {}), "(('income', age))\n", (8027, 8044), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8100, 8120), 'Orange.data.Domain', 'Domain', (['(age,)', 'self'], {}), '((age,), self)\n', (8106, 8120), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8176, 8209), 'Orange.data.Domain', 'Domain', (['(age,)'], {'metas': "('income',)"}), "((age,), metas=('income',))\n", (8182, 8209), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((9817, 9826), 'numpy.int', 'np.int', (['(3)'], {}), '(3)\n', (9823, 9826), True, 'import numpy as np\n'), ((9832, 9842), 'numpy.int', 'np.int', (['(-3)'], {}), '(-3)\n', (9838, 9842), True, 'import numpy as np\n'), ((10187, 10197), 'numpy.int_', 'np.int_', (['(0)'], {}), '(0)\n', (10194, 10197), True, 'import numpy as np\n'), ((10268, 10278), 'numpy.int_', 'np.int_', (['(2)'], {}), '(2)\n', (10275, 10278), True, 'import numpy as np\n'), ((10344, 10355), 'numpy.int_', 'np.int_', (['(-1)'], {}), '(-1)\n', (10351, 10355), True, 'import numpy as np\n'), ((10449, 10459), 'numpy.int_', 'np.int_', (['(3)'], {}), '(3)\n', (10456, 10459), True, 'import numpy as np\n'), ((10465, 10476), 'numpy.int_', 'np.int_', (['(-2)'], {}), '(-2)\n', (10472, 10476), True, 'import numpy as np\n'), ((10682, 10718), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (10705, 10718), False, 'import warnings\n'), ((10732, 10762), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (10753, 10762), False, 'import warnings\n'), ((10780, 10823), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn,)'}), '((age, gender, income), metas=(ssn,))\n', (10786, 10823), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((10923, 10954), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (10944, 10954), False, 'import warnings\n'), ((11044, 11072), 'Orange.data.Domain', 'Domain', (['(age,)'], {'metas': '(ssn,)'}), '((age,), metas=(ssn,))\n', (11050, 11072), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((11146, 11170), 'Orange.data.Domain', 'Domain', (['()'], {'metas': '(ssn,)'}), '((), metas=(ssn,))\n', (11152, 11170), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((17026, 17044), 'numpy.array', 'np.array', (['[42, 13]'], {}), '([42, 13])\n', (17034, 17044), True, 'import numpy as np\n'), ((17076, 17089), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (17084, 17089), True, 'import numpy as np\n'), ((17612, 17630), 'numpy.array', 'np.array', (['[42, 13]'], {}), '([42, 13])\n', (17620, 17630), True, 'import numpy as np\n'), ((17662, 17675), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (17670, 17675), True, 'import numpy as np\n'), ((17711, 17752), 'numpy.array', 'np.array', (["[0, 1, '1234567']"], {'dtype': 'object'}), "([0, 1, '1234567'], dtype=object)\n", (17719, 17752), True, 'import numpy as np\n'), ((18465, 18499), 'Orange.data.DiscreteVariable', 'DiscreteVariable', (['"""y"""'], {'values': '"""01"""'}), "('y', values='01')\n", (18481, 18499), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18587, 18599), 'Orange.preprocess.Continuize', 'Continuize', ([], {}), '()\n', (18597, 18599), False, 'from Orange.preprocess import Continuize, Impute\n'), ((18874, 18894), 'pickle.dumps', 'pickle.dumps', (['domain'], {}), '(domain)\n', (18886, 18894), False, 'import pickle\n'), ((19736, 19765), 'Orange.data.ContinuousVariable', 'ContinuousVariable', (["('f%i' % i)"], {}), "('f%i' % i)\n", (19754, 19765), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19811, 19840), 'Orange.data.ContinuousVariable', 'ContinuousVariable', (["('c%i' % i)"], {}), "('c%i' % i)\n", (19829, 19840), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19878, 19907), 'Orange.data.ContinuousVariable', 'ContinuousVariable', (["('m%i' % i)"], {}), "('m%i' % i)\n", (19896, 19907), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22988, 23031), 'Orange.data.domain.filter_visible', 'filter_visible', (['self.iris.domain.attributes'], {}), '(self.iris.domain.attributes)\n', (23002, 23031), False, 'from Orange.data.domain import filter_visible\n'), ((5266, 5287), 'numpy.zeros', 'np.zeros', (['(1, n_cols)'], {}), '((1, n_cols))\n', (5274, 5287), True, 'import numpy as np\n'), ((6211, 6227), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (6219, 6227), True, 'import numpy as np\n'), ((6229, 6248), 'numpy.zeros', 'np.zeros', (['dimension'], {}), '(dimension)\n', (6237, 6248), True, 'import numpy as np\n'), ((6928, 6944), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (6936, 6944), True, 'import numpy as np\n'), ((7525, 7538), 'Orange.data.Domain', 'Domain', (['*args'], {}), '(*args)\n', (7531, 7538), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8829, 8839), 'numpy.int_', 'np.int_', (['(0)'], {}), '(0)\n', (8836, 8839), True, 'import numpy as np\n'), ((8930, 8940), 'numpy.int_', 'np.int_', (['(2)'], {}), '(2)\n', (8937, 8940), True, 'import numpy as np\n'), ((9029, 9040), 'numpy.int_', 'np.int_', (['(-1)'], {}), '(-1)\n', (9036, 9040), True, 'import numpy as np\n'), ((9082, 9093), 'numpy.int_', 'np.int_', (['(-2)'], {}), '(-2)\n', (9089, 9093), True, 'import numpy as np\n'), ((15650, 15671), 'Orange.data.Domain', 'Domain', (['[arrival]', '[]'], {}), '([arrival], [])\n', (15656, 15671), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15712, 15733), 'Orange.data.Domain', 'Domain', (['[]', '[arrival]'], {}), '([], [arrival])\n', (15718, 15733), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15775, 15800), 'Orange.data.Domain', 'Domain', (['[]', '[]', '[arrival]'], {}), '([], [], [arrival])\n', (15781, 15800), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((17299, 17310), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (17307, 17310), True, 'import numpy as np\n'), ((17331, 17342), 'numpy.isnan', 'np.isnan', (['b'], {}), '(b)\n', (17339, 17342), True, 'import numpy as np\n'), ((18380, 18414), 'Orange.data.DiscreteVariable', 'DiscreteVariable', (['"""a"""'], {'values': '"""01"""'}), "('a', values='01')\n", (18396, 18414), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18416, 18450), 'Orange.data.DiscreteVariable', 'DiscreteVariable', (['"""b"""'], {'values': '"""01"""'}), "('b', values='01')\n", (18432, 18450), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18600, 18608), 'Orange.preprocess.Impute', 'Impute', ([], {}), '()\n', (18606, 18608), False, 'from Orange.preprocess import Continuize, Impute\n'), ((20738, 20758), 'Orange.data.Domain', 'Domain', (['*domain_args'], {}), '(*domain_args)\n', (20744, 20758), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((20948, 20954), 'time.time', 'time', ([], {}), '()\n', (20952, 20954), False, 'from time import time\n'), ((11966, 11979), 'Orange.data.Domain', 'Domain', (['*args'], {}), '(*args)\n', (11972, 11979), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12051, 12061), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (12057, 12061), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12114, 12131), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (12120, 12131), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12184, 12200), 'Orange.data.Domain', 'Domain', (['[]', 'race'], {}), '([], race)\n', (12190, 12200), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12254, 12273), 'Orange.data.Domain', 'Domain', (['[age]', 'None'], {}), '([age], None)\n', (12260, 12273), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12325, 12345), 'Orange.data.Domain', 'Domain', (['[race]', 'None'], {}), '([race], None)\n', (12331, 12345), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12397, 12422), 'Orange.data.Domain', 'Domain', (['[age, race]', 'None'], {}), '([age, race], None)\n', (12403, 12422), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12474, 12499), 'Orange.data.Domain', 'Domain', (['[race, age]', 'None'], {}), '([race, age], None)\n', (12480, 12499), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12553, 12570), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (12559, 12570), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12626, 12644), 'Orange.data.Domain', 'Domain', (['[]', '[race]'], {}), '([], [race])\n', (12632, 12644), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12701, 12720), 'Orange.data.Domain', 'Domain', (['[age]', 'None'], {}), '([age], None)\n', (12707, 12720), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12776, 12796), 'Orange.data.Domain', 'Domain', (['[race]', 'None'], {}), '([race], None)\n', (12782, 12796), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12852, 12871), 'Orange.data.Domain', 'Domain', (['[age]', 'race'], {}), '([age], race)\n', (12858, 12871), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12927, 12946), 'Orange.data.Domain', 'Domain', (['[race]', 'age'], {}), '([race], age)\n', (12933, 12946), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13002, 13025), 'Orange.data.Domain', 'Domain', (['[]', '[race, age]'], {}), '([], [race, age])\n', (13008, 13025), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13531, 13541), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (13537, 13541), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13596, 13613), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (13602, 13613), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13668, 13686), 'Orange.data.Domain', 'Domain', (['[]', '[race]'], {}), '([], [race])\n', (13674, 13686), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13741, 13760), 'Orange.data.Domain', 'Domain', (['[age]', 'None'], {}), '([age], None)\n', (13747, 13760), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13815, 13835), 'Orange.data.Domain', 'Domain', (['[race]', 'None'], {}), '([race], None)\n', (13821, 13835), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13889, 13914), 'Orange.data.Domain', 'Domain', (['[age, race]', 'None'], {}), '([age, race], None)\n', (13895, 13914), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13968, 13993), 'Orange.data.Domain', 'Domain', (['[race, age]', 'None'], {}), '([race, age], None)\n', (13974, 13993), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14048, 14065), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (14054, 14065), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14124, 14142), 'Orange.data.Domain', 'Domain', (['[]', '[race]'], {}), '([], [race])\n', (14130, 14142), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14200, 14219), 'Orange.data.Domain', 'Domain', (['[age]', 'None'], {}), '([age], None)\n', (14206, 14219), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14278, 14298), 'Orange.data.Domain', 'Domain', (['[race]', 'None'], {}), '([race], None)\n', (14284, 14298), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14356, 14375), 'Orange.data.Domain', 'Domain', (['[age]', 'race'], {}), '([age], race)\n', (14362, 14375), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14433, 14452), 'Orange.data.Domain', 'Domain', (['[race]', 'age'], {}), '([race], age)\n', (14439, 14452), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14510, 14533), 'Orange.data.Domain', 'Domain', (['[]', '[race, age]'], {}), '([], [race, age])\n', (14516, 14533), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15043, 15053), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (15049, 15053), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15102, 15119), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (15108, 15119), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15168, 15186), 'Orange.data.Domain', 'Domain', (['[]', '[race]'], {}), '([], [race])\n', (15174, 15186), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15235, 15256), 'Orange.data.Domain', 'Domain', (['[]', '[arrival]'], {}), '([], [arrival])\n', (15241, 15256), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15305, 15330), 'Orange.data.Domain', 'Domain', (['[]', '[]', '[arrival]'], {}), '([], [], [arrival])\n', (15311, 15330), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15379, 15400), 'Orange.data.Domain', 'Domain', (['[arrival]', '[]'], {}), '([arrival], [])\n', (15385, 15400), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15448, 15469), 'Orange.data.Domain', 'Domain', (['[]', '[arrival]'], {}), '([], [arrival])\n', (15454, 15469), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15548, 15573), 'Orange.data.Domain', 'Domain', (['[]', '[]', '[arrival]'], {}), '([], [], [arrival])\n', (15554, 15573), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21452, 21480), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""a"""'}), "(name='a')\n", (21470, 21480), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21498, 21526), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""b"""'}), "(name='b')\n", (21516, 21526), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21544, 21572), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""c"""'}), "(name='c')\n", (21562, 21572), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21613, 21648), 'Orange.data.DiscreteVariable', 'DiscreteVariable', (['"""d"""'], {'values': "['e']"}), "('d', values=['e'])\n", (21629, 21648), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21670, 21689), 'Orange.data.StringVariable', 'StringVariable', (['"""f"""'], {}), "('f')\n", (21684, 21689), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((6946, 6975), 'numpy.arange', 'np.arange', (['aran_min', 'aran_max'], {}), '(aran_min, aran_max)\n', (6955, 6975), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
from django.core.management import BaseCommand
from django.urls import RegexURLResolver
from djue.factories import ComponentFactory
from djue.utils import log
from djue.vue.core import SingleFileComponent
class ModuleCommand(BaseCommand):
help = 'fuyck you'
def add_arguments(self, parser: argparse.ArgumentParser):
parser.add_argument('modules', nargs='+', type=str)
parser.add_argument('--drf')
def generate_components(patterns, path):
for url in patterns:
log(f'url: {url.regex.pattern}')
if isinstance(url, RegexURLResolver):
log('URL Resolver found! Stepping down the rabbit hole...')
generate_components(url.url_patterns, path)
continue
callback = url.callback
if hasattr(callback, 'actions'):
for method, action in callback.actions.items():
comp, form = ComponentFactory.from_junk(callback, method,
action)
comp.add_context({'route': url.name})
comp.write()
form and form.write()
continue
component, form = ComponentFactory.from_callback(callback)
if not component:
log(f'No Component was generated for: {str(url)}')
continue
component.add_context({'route': url.name})
component.write()
form and form.write()
def generate_component(component: SingleFileComponent, path: str):
file_path = os.path.join(path, component.path)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'w+') as file:
log('writing to ' + file_path)
file.write(component.render())
| [
"djue.factories.ComponentFactory.from_callback",
"os.path.join",
"djue.factories.ComponentFactory.from_junk",
"os.path.dirname",
"djue.utils.log"
] | [((1589, 1623), 'os.path.join', 'os.path.join', (['path', 'component.path'], {}), '(path, component.path)\n', (1601, 1623), False, 'import os\n'), ((574, 606), 'djue.utils.log', 'log', (['f"""url: {url.regex.pattern}"""'], {}), "(f'url: {url.regex.pattern}')\n", (577, 606), False, 'from djue.utils import log\n'), ((1244, 1284), 'djue.factories.ComponentFactory.from_callback', 'ComponentFactory.from_callback', (['callback'], {}), '(callback)\n', (1274, 1284), False, 'from djue.factories import ComponentFactory\n'), ((1640, 1666), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (1655, 1666), False, 'import os\n'), ((1732, 1762), 'djue.utils.log', 'log', (["('writing to ' + file_path)"], {}), "('writing to ' + file_path)\n", (1735, 1762), False, 'from djue.utils import log\n'), ((665, 724), 'djue.utils.log', 'log', (['"""URL Resolver found! Stepping down the rabbit hole..."""'], {}), "('URL Resolver found! Stepping down the rabbit hole...')\n", (668, 724), False, 'from djue.utils import log\n'), ((965, 1017), 'djue.factories.ComponentFactory.from_junk', 'ComponentFactory.from_junk', (['callback', 'method', 'action'], {}), '(callback, method, action)\n', (991, 1017), False, 'from djue.factories import ComponentFactory\n')] |
# Generated by Django 3.1.6 on 2021-03-14 08:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('article_title', models.CharField(max_length=200, verbose_name='название статьи')),
('article_text', models.TextField(blank=True, verbose_name='текст статьи')),
('article_pub_date', models.DateTimeField(auto_now_add=True)),
('article_image', models.ImageField(blank=True, upload_to='news/', verbose_name='картинка статьи')),
('article_rating_positive', models.IntegerField(default=0, verbose_name='лайки статьи')),
('article_rating_negative', models.IntegerField(default=0, verbose_name='дизлайки статьи')),
('article_rating_value', models.IntegerField(default=0, verbose_name='рейтинг статьи')),
],
options={
'verbose_name': 'Статья',
'verbose_name_plural': 'Статьи',
'ordering': ('-id',),
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_name', models.CharField(max_length=50, verbose_name='<NAME>')),
('comment_text', models.TextField(blank=True, verbose_name='текст комментария')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_news.article')),
],
options={
'verbose_name': 'Комментарий',
'verbose_name_plural': 'Комментарии',
'ordering': ('-id',),
},
),
]
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.ImageField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((336, 429), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (352, 429), False, 'from django.db import migrations, models\n'), ((462, 526), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""название статьи"""'}), "(max_length=200, verbose_name='название статьи')\n", (478, 526), False, 'from django.db import migrations, models\n'), ((562, 619), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""текст статьи"""'}), "(blank=True, verbose_name='текст статьи')\n", (578, 619), False, 'from django.db import migrations, models\n'), ((659, 698), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (679, 698), False, 'from django.db import migrations, models\n'), ((735, 820), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'upload_to': '"""news/"""', 'verbose_name': '"""картинка статьи"""'}), "(blank=True, upload_to='news/', verbose_name='картинка статьи'\n )\n", (752, 820), False, 'from django.db import migrations, models\n'), ((862, 921), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""лайки статьи"""'}), "(default=0, verbose_name='лайки статьи')\n", (881, 921), False, 'from django.db import migrations, models\n'), ((968, 1030), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""дизлайки статьи"""'}), "(default=0, verbose_name='дизлайки статьи')\n", (987, 1030), False, 'from django.db import migrations, models\n'), ((1074, 1135), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'verbose_name': '"""рейтинг статьи"""'}), "(default=0, verbose_name='рейтинг статьи')\n", (1093, 1135), False, 'from django.db import migrations, models\n'), ((1434, 1527), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1450, 1527), False, 'from django.db import migrations, models\n'), ((1558, 1612), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""<NAME>"""'}), "(max_length=50, verbose_name='<NAME>')\n", (1574, 1612), False, 'from django.db import migrations, models\n'), ((1648, 1710), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'verbose_name': '"""текст комментария"""'}), "(blank=True, verbose_name='текст комментария')\n", (1664, 1710), False, 'from django.db import migrations, models\n'), ((1741, 1831), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""app_news.article"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'app_news.article')\n", (1758, 1831), False, 'from django.db import migrations, models\n')] |
import pathlib
import re
from typing import List, Tuple, Dict
def graph_dfs(
root: str, graph: Dict[str, List[Tuple[str, int]]]
) -> Tuple[List[str], int]:
nodes_visited = []
total_weight = 0
for neighbor, weight in graph[root]:
nodes_visited.append(neighbor)
neighbor_visited, neighbor_total_weight = graph_dfs(neighbor, graph)
nodes_visited.extend(neighbor_visited)
total_weight += weight * (1 + neighbor_total_weight)
return nodes_visited, total_weight
def build_weighted_graph(data: List[str]) -> Dict[str, List[Tuple[str, int]]]:
root_regex = re.compile(r"([\w ]+) bags contain")
children_regex = re.compile(r"(?:(?:(\d+) ([\w ]+)) bags?)+")
graph: Dict[str, List[Tuple[str, int]]] = dict()
for rule in data:
root = root_regex.findall(rule)[0]
children_nodes = children_regex.findall(rule)
graph[root] = [(color, int(weight)) for weight, color in children_nodes]
return graph
def day7_part1(data: List[str]) -> int:
graph = build_weighted_graph(data)
shiny_gold_viable = 0
for node in graph.keys():
nodes_visited, _ = graph_dfs(node, graph)
shiny_gold_viable += "shiny gold" in nodes_visited
return shiny_gold_viable
def day7_part2(data: List[str]) -> int:
graph = build_weighted_graph(data)
return graph_dfs("shiny gold", graph)[1]
def get_input_data(file: str) -> List[str]:
with pathlib.Path(file).open() as f:
content = f.readlines()
return content
if __name__ == "__main__":
print(day7_part1(get_input_data("input.txt")))
print(day7_part2(get_input_data("input.txt")))
| [
"pathlib.Path",
"re.compile"
] | [((612, 648), 're.compile', 're.compile', (['"""([\\\\w ]+) bags contain"""'], {}), "('([\\\\w ]+) bags contain')\n", (622, 648), False, 'import re\n'), ((670, 715), 're.compile', 're.compile', (['"""(?:(?:(\\\\d+) ([\\\\w ]+)) bags?)+"""'], {}), "('(?:(?:(\\\\d+) ([\\\\w ]+)) bags?)+')\n", (680, 715), False, 'import re\n'), ((1446, 1464), 'pathlib.Path', 'pathlib.Path', (['file'], {}), '(file)\n', (1458, 1464), False, 'import pathlib\n')] |
import pandas as pd
import urllib.request
# Linear pathway data
BASE_URL = "https://github.com/sys-bio/network-modeling-summer-school-2021/raw/main/"
BASE_DATA_URL = "%sdata/" % BASE_URL
BASE_MODULE_URL = "%ssrc/" % BASE_URL
BASE_MODEL_URL = "%smodels/" % BASE_URL
LOCAL_FILE = "local_file.txt"
def getData(csvFilename):
"""
Creates a dataframe from a CSV structured URL file.
Parameters
----------
csvFilename: str
Name of the CSV file (w/o ".csv" extension)
Returns
-------
pd.DataFrame
"""
url = "%s%s.csv" % (BASE_DATA_URL, csvFilename)
filename, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)
return pd.read_csv(LOCAL_FILE)
def getModule(moduleName):
"""
Obtains common codes from the github repository.
Parameters
----------
moduleName: str
name of the python module in the src directory
"""
url = "%s%s.py" % (BASE_MODULE_URL, moduleName)
_, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)
with open(LOCAL_FILE, "r") as fd:
codeStr = "".join(fd.readlines())
return codeStr
def getModel(modelName):
"""
Creates returns the string for the antimony model.
Parameters
----------
modelName: str
Name of the model w/o ".ant"
Returns
-------
str
"""
url = "%s%s.ant" % (BASE_MODEL_URL, modelName)
filename, _ = urllib.request.urlretrieve(url, filename=LOCAL_FILE)
with open(LOCAL_FILE, "r") as fd:
result = "".join(fd.readlines())
return result
# Set models
WOLF_MODEL = getModel("wolf")
WOLF_DF = getData("wolf")
WOLF_ARR = WOLF_DF.to_numpy()
LINEAR_PATHWAY_DF = getData("linear_pathway")
LINEAR_PATHWAY_ARR = LINEAR_PATHWAY_DF.to_numpy()
LINEAR_PATHWAY_MODEL = getModel("linear_pathway")
| [
"pandas.read_csv"
] | [((676, 699), 'pandas.read_csv', 'pd.read_csv', (['LOCAL_FILE'], {}), '(LOCAL_FILE)\n', (687, 699), True, 'import pandas as pd\n')] |
'''
alpine-python-falcon example
In this example, we import one of our own scripts: myfunction.
We then modify Numbers() to return myfunction.myfunction()'s result when we perform a POST request.
'''
import falcon
from middleware import JSONTranslator
from myfunction import myfunction
class Hello_world(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.context["response"] = {"response": "Hello world"}
class Numbers(object):
"""Returns numbers"""
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.context["response"] = {"numbers": [1, 2, 3, 4]}
def on_post(self, req, resp):
resp.status = falcon.HTTP_200
result = myfunction([1, 2, 3, 4])
resp.context["response"] = {"result": result}
# Falcon
APP = falcon.API(middleware=[JSONTranslator()])
# Resource class instances
HELLO_WORLD = Hello_world()
NUMBERS = Numbers()
# Falcon routes
APP.add_route("/", HELLO_WORLD)
APP.add_route("/numbers", NUMBERS)
| [
"middleware.JSONTranslator",
"myfunction.myfunction"
] | [((724, 748), 'myfunction.myfunction', 'myfunction', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (734, 748), False, 'from myfunction import myfunction\n'), ((842, 858), 'middleware.JSONTranslator', 'JSONTranslator', ([], {}), '()\n', (856, 858), False, 'from middleware import JSONTranslator\n')] |
"""empty message
Revision ID: ddf3cbc60873
Revises: x
Create Date: 2020-08-22 17:27:08.449495
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=80), nullable=False),
sa.Column("email", sa.String(length=120), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"address",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("postcode", sa.String(length=80), nullable=False),
sa.Column("address", sa.String(length=100), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("address")
op.drop_table("user")
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer",
"sqlalchemy.String"
] | [((1176, 1200), 'alembic.op.drop_table', 'op.drop_table', (['"""address"""'], {}), "('address')\n", (1189, 1200), False, 'from alembic import op\n'), ((1205, 1226), 'alembic.op.drop_table', 'op.drop_table', (['"""user"""'], {}), "('user')\n", (1218, 1226), False, 'from alembic import op\n'), ((584, 613), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (607, 613), True, 'import sqlalchemy as sa\n'), ((921, 970), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['user_id']", "['user.id']"], {}), "(['user_id'], ['user.id'])\n", (944, 970), True, 'import sqlalchemy as sa\n'), ((1015, 1044), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1038, 1044), True, 'import sqlalchemy as sa\n'), ((413, 425), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (423, 425), True, 'import sqlalchemy as sa\n'), ((470, 490), 'sqlalchemy.String', 'sa.String', ([], {'length': '(80)'}), '(length=80)\n', (479, 490), True, 'import sqlalchemy as sa\n'), ((536, 557), 'sqlalchemy.String', 'sa.String', ([], {'length': '(120)'}), '(length=120)\n', (545, 557), True, 'import sqlalchemy as sa\n'), ((685, 697), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (695, 697), True, 'import sqlalchemy as sa\n'), ((746, 766), 'sqlalchemy.String', 'sa.String', ([], {'length': '(80)'}), '(length=80)\n', (755, 766), True, 'import sqlalchemy as sa\n'), ((814, 835), 'sqlalchemy.String', 'sa.String', ([], {'length': '(100)'}), '(length=100)\n', (823, 835), True, 'import sqlalchemy as sa\n'), ((883, 895), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (893, 895), True, 'import sqlalchemy as sa\n')] |
from io import BytesIO
import pytest
from PIL import Image, ImageFile
from pyzint.zint import (
BARCODE_C25INTER,
BARCODE_CODE128,
BARCODE_EANX,
BARCODE_ISBNX,
BARCODE_QRCODE,
BARCODE_UPCA,
BARCODE_UPCE,
Zint,
)
try:
from zbar import Scanner
except ImportError:
Scanner = None
# Cannot decode barcodes:
# (BARCODE_CODE39, "ABC-1234", '', 'ZBAR_CODE39', 'CODE-39'),
# (BARCODE_PDF417, "This is a PDF417", '', 'ZBAR_PDF417', 'PDF-417'),
@pytest.mark.skipif(Scanner is None, reason="zbar is not installed")
@pytest.mark.parametrize(
"type,value, checksum, zbar_type, exp_type",
[
(BARCODE_EANX, "00090311017", "2", "ZBAR_UPCA", "UPC-A"),
(BARCODE_EANX, "978020137962", "4", "ZBAR_EAN13", "EAN-13"),
(BARCODE_CODE128, "ABC-abc-1234", "", "ZBAR_CODE128", "CODE-128"),
(BARCODE_UPCE, "0123456", "5", "ZBAR_UPCE", "UPC-E"),
(BARCODE_UPCA, "725272730706", "", "ZBAR_UPCA", "UPC-A"),
(BARCODE_ISBNX, "0123456789", "", "ZBAR_ISBN10", "ISBN-10"),
(BARCODE_ISBNX, "9781234567897", "", "ZBAR_ISBN13", "ISBN-13"),
(BARCODE_C25INTER, "1234567890", "", "ZBAR_I25", "I2/5"),
(BARCODE_QRCODE, "This is a QRCODE", "", "ZBAR_QRCODE", "QR-Code"),
],
)
def test_with_zbar(type, value, checksum, zbar_type, exp_type):
scanner = Scanner([(zbar_type, "ZBAR_CFG_ENABLE", 1)])
z = Zint(value, type)
barcode = z.render_bmp()
with BytesIO(barcode) as fp:
ImageFile.LOAD_TRUNCATED_IMAGES = True
img = Image.open(fp)
img = img.convert("L")
scanned = scanner.scan(img)
if exp_type:
assert len(scanned) == 1
assert scanned[0].type == exp_type
assert scanned[0].data.decode() == value + checksum
| [
"PIL.Image.open",
"io.BytesIO",
"pytest.mark.parametrize",
"pytest.mark.skipif",
"zbar.Scanner",
"pyzint.zint.Zint"
] | [((484, 551), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(Scanner is None)'], {'reason': '"""zbar is not installed"""'}), "(Scanner is None, reason='zbar is not installed')\n", (502, 551), False, 'import pytest\n'), ((553, 1209), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type,value, checksum, zbar_type, exp_type"""', "[(BARCODE_EANX, '00090311017', '2', 'ZBAR_UPCA', 'UPC-A'), (BARCODE_EANX,\n '978020137962', '4', 'ZBAR_EAN13', 'EAN-13'), (BARCODE_CODE128,\n 'ABC-abc-1234', '', 'ZBAR_CODE128', 'CODE-128'), (BARCODE_UPCE,\n '0123456', '5', 'ZBAR_UPCE', 'UPC-E'), (BARCODE_UPCA, '725272730706',\n '', 'ZBAR_UPCA', 'UPC-A'), (BARCODE_ISBNX, '0123456789', '',\n 'ZBAR_ISBN10', 'ISBN-10'), (BARCODE_ISBNX, '9781234567897', '',\n 'ZBAR_ISBN13', 'ISBN-13'), (BARCODE_C25INTER, '1234567890', '',\n 'ZBAR_I25', 'I2/5'), (BARCODE_QRCODE, 'This is a QRCODE', '',\n 'ZBAR_QRCODE', 'QR-Code')]"], {}), "('type,value, checksum, zbar_type, exp_type', [(\n BARCODE_EANX, '00090311017', '2', 'ZBAR_UPCA', 'UPC-A'), (BARCODE_EANX,\n '978020137962', '4', 'ZBAR_EAN13', 'EAN-13'), (BARCODE_CODE128,\n 'ABC-abc-1234', '', 'ZBAR_CODE128', 'CODE-128'), (BARCODE_UPCE,\n '0123456', '5', 'ZBAR_UPCE', 'UPC-E'), (BARCODE_UPCA, '725272730706',\n '', 'ZBAR_UPCA', 'UPC-A'), (BARCODE_ISBNX, '0123456789', '',\n 'ZBAR_ISBN10', 'ISBN-10'), (BARCODE_ISBNX, '9781234567897', '',\n 'ZBAR_ISBN13', 'ISBN-13'), (BARCODE_C25INTER, '1234567890', '',\n 'ZBAR_I25', 'I2/5'), (BARCODE_QRCODE, 'This is a QRCODE', '',\n 'ZBAR_QRCODE', 'QR-Code')])\n", (576, 1209), False, 'import pytest\n'), ((1341, 1385), 'zbar.Scanner', 'Scanner', (["[(zbar_type, 'ZBAR_CFG_ENABLE', 1)]"], {}), "([(zbar_type, 'ZBAR_CFG_ENABLE', 1)])\n", (1348, 1385), False, 'from zbar import Scanner\n'), ((1394, 1411), 'pyzint.zint.Zint', 'Zint', (['value', 'type'], {}), '(value, type)\n', (1398, 1411), False, 'from pyzint.zint import BARCODE_C25INTER, BARCODE_CODE128, BARCODE_EANX, BARCODE_ISBNX, BARCODE_QRCODE, BARCODE_UPCA, BARCODE_UPCE, Zint\n'), ((1451, 1467), 'io.BytesIO', 'BytesIO', (['barcode'], {}), '(barcode)\n', (1458, 1467), False, 'from io import BytesIO\n'), ((1536, 1550), 'PIL.Image.open', 'Image.open', (['fp'], {}), '(fp)\n', (1546, 1550), False, 'from PIL import Image, ImageFile\n')] |
# Generated by Django 3.0.5 on 2020-04-17 22:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cliente', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='endereco',
name='ruaEndereco',
),
migrations.AddField(
model_name='endereco',
name='logradouroEndereco',
field=models.CharField(default=1, max_length=40, verbose_name='Endereço do Cliente'),
preserve_default=False,
),
migrations.AlterField(
model_name='endereco',
name='nomeBairro',
field=models.CharField(max_length=45, verbose_name='Bairro'),
),
migrations.AlterField(
model_name='endereco',
name='nroEndereco',
field=models.IntegerField(verbose_name='Número do Cliente'),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((224, 289), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""endereco"""', 'name': '"""ruaEndereco"""'}), "(model_name='endereco', name='ruaEndereco')\n", (246, 289), False, 'from django.db import migrations, models\n'), ((447, 525), 'django.db.models.CharField', 'models.CharField', ([], {'default': '(1)', 'max_length': '(40)', 'verbose_name': '"""Endereço do Cliente"""'}), "(default=1, max_length=40, verbose_name='Endereço do Cliente')\n", (463, 525), False, 'from django.db import migrations, models\n'), ((689, 743), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(45)', 'verbose_name': '"""Bairro"""'}), "(max_length=45, verbose_name='Bairro')\n", (705, 743), False, 'from django.db import migrations, models\n'), ((872, 925), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Número do Cliente"""'}), "(verbose_name='Número do Cliente')\n", (891, 925), False, 'from django.db import migrations, models\n')] |
import itertools
import logging
from evm.validation import (
validate_is_bytes,
validate_length,
validate_lte,
validate_uint256,
)
from evm.utils.numeric import (
ceil32,
)
class Memory(object):
"""
VM Memory
"""
bytes = None
logger = logging.getLogger('evm.vm.memory.Memory')
def __init__(self):
self.bytes = bytearray()
def extend(self, start_position, size):
if size == 0:
return
new_size = ceil32(start_position + size)
if new_size <= len(self):
return
size_to_extend = new_size - len(self)
self.bytes.extend(itertools.repeat(0, size_to_extend))
def __len__(self):
return len(self.bytes)
def write(self, start_position, size, value):
"""
Write `value` into memory.
"""
if size:
validate_uint256(start_position)
validate_uint256(size)
validate_is_bytes(value)
validate_length(value, length=size)
validate_lte(start_position + size, maximum=len(self))
if len(self.bytes) < start_position + size:
self.bytes.extend(itertools.repeat(
0,
len(self.bytes) - (start_position + size),
))
for idx, v in enumerate(value):
self.bytes[start_position + idx] = v
def read(self, start_position, size):
"""
Read a value from memory.
"""
return bytes(self.bytes[start_position:start_position + size])
| [
"logging.getLogger",
"evm.utils.numeric.ceil32",
"evm.validation.validate_uint256",
"evm.validation.validate_is_bytes",
"evm.validation.validate_length",
"itertools.repeat"
] | [((279, 320), 'logging.getLogger', 'logging.getLogger', (['"""evm.vm.memory.Memory"""'], {}), "('evm.vm.memory.Memory')\n", (296, 320), False, 'import logging\n'), ((485, 514), 'evm.utils.numeric.ceil32', 'ceil32', (['(start_position + size)'], {}), '(start_position + size)\n', (491, 514), False, 'from evm.utils.numeric import ceil32\n'), ((641, 676), 'itertools.repeat', 'itertools.repeat', (['(0)', 'size_to_extend'], {}), '(0, size_to_extend)\n', (657, 676), False, 'import itertools\n'), ((872, 904), 'evm.validation.validate_uint256', 'validate_uint256', (['start_position'], {}), '(start_position)\n', (888, 904), False, 'from evm.validation import validate_is_bytes, validate_length, validate_lte, validate_uint256\n'), ((917, 939), 'evm.validation.validate_uint256', 'validate_uint256', (['size'], {}), '(size)\n', (933, 939), False, 'from evm.validation import validate_is_bytes, validate_length, validate_lte, validate_uint256\n'), ((952, 976), 'evm.validation.validate_is_bytes', 'validate_is_bytes', (['value'], {}), '(value)\n', (969, 976), False, 'from evm.validation import validate_is_bytes, validate_length, validate_lte, validate_uint256\n'), ((989, 1024), 'evm.validation.validate_length', 'validate_length', (['value'], {'length': 'size'}), '(value, length=size)\n', (1004, 1024), False, 'from evm.validation import validate_is_bytes, validate_length, validate_lte, validate_uint256\n')] |
import sys
import argparse
import subprocess
import os
import shutil
import glob
irene_list = ["blackscholes", "bodytrack", "canneal", "dedup"]
maxwell_list = ["facesim", "ferret", "fluidanimate", "freqmine"]
jen_list = ["raytrace", "streamcluster", "swaptions", "vips", "x264"]
suite = {
"Maxwell":"intrate",
"Irene":"core4fprate",
"Jen":"fprate",
"Last": "intspeed"
}
suite_benchmarks = {
"intrate" : ["xalancbmk_r", "deepsjeng_r", "leela_r", "xz_r"],
"intspeed" : ["mcf_s", "omnetpp_s","xalancbmk_s", "deepsjeng_s",
"leela_s", "xz_s"],
"fpspeed" : ["cactuBSSN_s", "lbm_s", "wrf_s", "cam4_s", "pop2_s",
"imagick_s", "nab_s"],
"fprate" : ["namd_r", "parest_r", "povray_r",
"lbm_r", "wrf_r"],
"core4fprate" : ["blender_r", "cam4_r", "imagick_r", "nab_r"]
}
parsec_path = "/home/ulsi/18742/parsec-3.0"
spec_dir = "/home/ulsi/18742/spec"
gem5_path="/home/ulsi/18742/InvisiSpec-1.0"
parser = argparse.ArgumentParser(description='Run benchmarks.')
parser.add_argument('--arm', action="store_true",
help="For running an ARM benchmark. Assumes you have ARM set up for GEM5")
parser.add_argument('--output', '-o', action="store_true",
help="Will output a compressed log file named after exe if set")
parser.add_argument('--fs', action="store_true",
help="If you want to use full system instead of syscall emulation");
parser.add_argument('--exe', default="attack_code/spectre_full.out",
help="The program you want to benchmark")
parser.add_argument('--flags', default="",
help="Debug flags you want set - use one string, comma separated")
parser.add_argument('--setupparsec', default="",
help="Usage: '--setup <Jen, <NAME>> (choose your name)'")
parser.add_argument('--setupspec', default="",
help="Usage: '--setup <Jen, Irene, Maxwell> (choose your name)'")
parser.add_argument('--runparsec', default="",
help="""Usage: '--runparsec <<NAME>> (choose your name).
Assumes the correct setup has been run already.'""")
parser.add_argument('--runspec', default="",
help="""Usage: '--runparsec <<NAME>> (choose your name).
Assumes the correct setup has been run already.'""")
parser.add_argument('--cpu', default="DerivO3CPU",
help="The CPU model for GEM5. Default iS Deriv03CPU")
parser.add_argument('--start', default="",
help="CPU ticks to start logging at")
def setup_command_line(args):
arch = "X86"
flags = ""
output = ""
start = ""
cpu = "DerivO3CPU"
extra = ""
if args.fs:
config="fs"
exe = "--script="
extra = "--kernel=vmlinux --disk-image=amd64-linux.img"
else:
exe = "--cmd="
config = "se"
if args.arm:
arch = "ARM"
if args.exe:
exe += args.exe
else:
exe +="spectre_full.out"
if args.output:
output = "--debug-file=" + exe.split("/")[-1].split(".")[0]+".out.gz"
if args.flags:
flags = "--debug-flags=%s"%(args.flags)
if args.start:
start = "--debug-start=%s"(args.start)
s = """build/{arch}/gem5.opt {flags} {output} {start} \
configs/example/{config}.py \
{exe} --cpu-type={cpu} --caches --l1d_size=64kB --l1i_size=16kB \
--needsTSO=0 --scheme=UnsafeBaseline {extra}""".format(
arch=arch, config=config, exe=exe, flags=flags, output=output,
cpu=cpu, start=start, extra=extra)
return s
def setup_parsec():
os.environ["M5_PATH"] = "/home/ulsi/18742/InvisiSpec-1.0/x86-system"
gcc_bldconf = os.path.join(parsec_path, "config", "gcc.bldconf")
ret_bldconf = os.path.join(parsec_path, "config", "ret.bldconf")
# Add "-case-values-threshold 1" so the case statements don't get optimized
with open(gcc_bldconf, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('-fprefetch-loop-arrays ',
'-fprefetch-loop-arrays -case-values-threshold 1 ')
# Write the file out again
with open(gcc_bldconf, 'w') as file:
file.write(filedata)
# Create the ret_bldconf by copying gcc bldconf
shutil.copyfile(gcc_bldconf, ret_bldconf)
# Add the -mindirect-branch=thunk flag
with open(ret_bldconf, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('-fprefetch-loop-arrays ',
'-fprefetch-loop-arrays \
-mindirect-branch=thunk \
--param case-values-threshold 1')
# Write the file out again
with open(ret_bldconf, 'w') as file:
file.write(filedata)
# Set up the config files
pkg_dir = os.path.join(parsec_path, "pkgs")
# For all the apps and dependencies, we need to copy local gcc.bldconf
# files to ret.bldconf
for dirs in os.listdir(pkg_dir):
app_dir = os.path.join(pkg_dir, dirs)
for apps in os.listdir(app_dir):
cfg_dir = os.path.join(app_dir, apps)
current_cfg = os.path.join(cfg_dir, "parsec", "gcc.bldconf")
new_cfg = os.path.join(cfg_dir, "parsec", "ret.bldconf")
if os.path.exists(current_cfg):
shutil.copyfile(current_cfg, new_cfg)
def build_parsec(list):
os.chdir(parsec_path)
for workload in list:
subprocess.call(["bin/parsecmgmt", "-a", "build", "-c", "ret", "-p",
workload])
subprocess.call(["bin/parsecmgmt", "-a", "build", "-c", "gcc", "-p",
workload])
def setup_spec(person):
# doing extremely secure things with sudo password to mount the SPEC CD
command_line = "echo 18664 | sudo -S \
mount -t iso9660 /dev/cdrom /media/cdrom"
subprocess.call(command_line, shell=True)
# update gcc while we're doing this
command_line = "sudo apt -y update && \
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && \
sudo apt -y install gcc-9 g++-9"
subprocess.call(command_line, shell = True)
# Install SPEC
command_line = "sudo /media/cdrom/install.sh -d \
{dir} -f".format(dir = spec_dir)
subprocess.call(command_line, shell = True)
orig_bldconf = "{dir}/config/Example-gcc-linux-x86.cfg".format(
dir = spec_dir)
gcc_bldconf = "{dir}/config/baseline.cfg".format(dir = spec_dir)
ret_bldconf = "{dir}/config/retpoline.cfg".format(dir = spec_dir)
shutil.copyfile(orig_bldconf, gcc_bldconf)
with open(gcc_bldconf, 'r') as file :
filedata = file.read()
# Update label
filedata = filedata.replace("label mytest ",
'label baseline')
# Update number of cores to build with
filedata = filedata.replace("build_ncpus 8", "build_ncpus 2")
filedata = filedata.replace("CC = $(SPECLANG)gcc ",
"CC = $(SPECLANG)gcc-9 ")
filedata = filedata.replace("CXX = $(SPECLANG)g++",
"CXX = $(SPECLANG)g++-9")
filedata = filedata.replace("FC = $(SPECLANG)fortran",
"FC = $(SPECLANG)fortran-9")
filedata = filedata.replace("gcc_dir /opt/rh/devtoolset-7/root/usr",
"gcc_dir /usr")
# Add -case-values-threshold 1 to not optimize out indirect jumps
# (do we want this?)
filedata = filedata.replace("-O3 -march=native ",
"-O3 -march=native --param case-values-threshold=1 ")
# Write the file out again
with open(gcc_bldconf, 'w') as file:
file.write(filedata)
shutil.copyfile(gcc_bldconf, ret_bldconf)
with open(ret_bldconf, 'r') as file :
filedata = file.read()
# Update label and add flags
filedata = filedata.replace("label baseline ",
'label ret')
filedata = filedata.replace("-O3 -march=native ",
"-O3 -march=native -mindirect-branch=thunk ")
# Write the file out again
with open(ret_bldconf, 'w') as file:
file.write(filedata)
# Source the shrc and test the build
subprocess.call("cd {dir} && chmod +x shrc && ./shrc \
&& runcpu --config=baseline.cfg \
--action=runsetup --threads=1 \
--size=ref \
{suite}".format(suite=suite[person],
dir = spec_dir), shell=True)
# Source the shrc and test the build
subprocess.call("cd {dir} && ./shrc \
&& runcpu --config=ret.cfg \
--action=runsetup --threads=1 \
--size=ref \
{suite}".format(suite=suite[person], dir=spec_dir),
shell=True)
def run_spec(user):
benchmarks = suite_benchmarks[suite[user]]
rate_speed = "rate"
if "speed" in suite[user]:
rate_speed = "speed"
base_dir = "run_base_ref{rs}_baseline-m64.0000".format(rs=rate_speed)
ret_dir = "run_base_ref{rs}_ret-m64.0000".format(rs=rate_speed)
for benchmark in benchmarks:
bench_top_dir = glob.glob(
"{spec_dir}/benchspec/CPU/*.{benchmark}/run".format(
spec_dir=spec_dir, benchmark=benchmark))
if not bench_top_dir:
print (
"ERROR: Could not locate benchmark top level directory for\
{}".format(benchmark))
continue
bench_top_dir = bench_top_dir[0]
bench_base_dir = os.path.join(bench_top_dir, base_dir)
bench_ret_dir = os.path.join(bench_top_dir, ret_dir)
print("Benchmark baseline: {}".format(bench_base_dir))
print("Benchmark retpoline: {}".format(bench_ret_dir))
specinvoke = subprocess.check_output(
"{spec_dir}/bin/specinvoke -n {bench_dir}/speccmds.cmd | \
grep -v '#'".format(
spec_dir=spec_dir, bench_dir=bench_base_dir), shell=True)
print(specinvoke)
specinvoke = specinvoke.split("\n")[0]
specinvoke = specinvoke.split()
idx1 = specinvoke.index(">") if ">" in specinvoke else len(specinvoke)
idx2 = specinvoke.index("<") if "<" in specinvoke else len(specinvoke)
bench_bin = specinvoke[0]
bench_opts = specinvoke[1:min(idx1, idx2)]
print("\n--- Running simulation: {} {} ---".format(
bench_bin, " ".join(bench_opts)))
# From the exp_script
run_cmd = ("{gem5_path}/build/X86/gem5.opt " +
"{gem5_path}/configs/example/se.py --output=gem5_run.log " +
"--cmd={bench_bin} --options=\'{bench_opts}\' " +
"--num-cpus=1 --mem-size=2GB " +
"--l1d_assoc=8 --l2_assoc=16 --l1i_assoc=4 " +
"--cpu-type=DerivO3CPU --needsTSO=0 --scheme=UnsafeBaseline " +
"--caches --maxinsts=2000000000 ").format(
gem5_path=gem5_path,
bench_bin=bench_bin, bench_opts=" ".join(bench_opts))
print("\n--- GEM5 run_cmd: {} ---".format(run_cmd))
try:
print("\n--- GEM5 running baseline simulation: \
{} > {} ---\n".format(
bench_base_dir, os.path.join(bench_base_dir, "gem5_run.log")))
subprocess.call("cd {} && {}".format(bench_base_dir,
run_cmd), shell=True)
except subprocess.CalledProcessError as e:
print("ERROR: GEM5 baseline simulation returned errcode {}".format(
e.returncode))
continue
# Run retpoline compiled code
specinvoke = subprocess.check_output(
"{spec_dir}/bin/specinvoke -n \
{bench_dir}/speccmds.cmd | grep -v '#'".format(
spec_dir=spec_dir, bench_dir=bench_ret_dir), shell=True)
specinvoke = specinvoke.split("\n")[0]
specinvoke = specinvoke.split()
idx1 = specinvoke.index(">") if ">" in specinvoke else len(specinvoke)
idx2 = specinvoke.index("<") if "<" in specinvoke else len(specinvoke)
bench_bin = specinvoke[0]
bench_opts = specinvoke[1:min(idx1, idx2)]
print("\n--- Running simulation: {} \
{} ---".format(bench_bin, " ".join(bench_opts)))
# From the exp_script
run_cmd = ("{gem5_path}/build/X86/gem5.opt " +
"{gem5_path}/configs/example/se.py --output=gem5_run.log " +
"--cmd={bench_bin} --options=\'{bench_opts}\' " +
"--num-cpus=1 --mem-size=2GB " +
"--l1d_assoc=8 --l2_assoc=16 --l1i_assoc=4 " +
"--cpu-type=DerivO3CPU --needsTSO=0 --scheme=UnsafeBaseline " +
"--caches --maxinsts=2000000000 ").format(
gem5_path=gem5_path,
bench_bin=bench_bin, bench_opts=" ".join(bench_opts))
print("\n--- GEM5 run_cmd: {} ---".format(run_cmd))
try:
print("\n--- GEM5 running ret simulation: {} > {} ---\n".format(
bench_ret_dir, os.path.join(bench_base_dir, "gem5_run.log")))
subprocess.call("cd {} && {}".format(bench_ret_dir,
run_cmd), shell=True)
except subprocess.CalledProcessError as e:
print("ERROR: GEM5 ret simulation returned errcode {}".format(
e.returncode))
continue
def run_parsec(list):
arch = "X86"
flags = ""
output = ""
start = ""
cpu = "DerivO3CPU"
extra = "--kernel=vmlinux --disk-image=amd64-linux.img"
if args.arm:
arch = "ARM"
if args.flags:
flags = "--debug-flags=%s"%(args.flags)
if args.start:
start = "--debug-start=%s"(args.start)
for workload in list:
# Set up and run the normal gcc version
script_name = workload + "_gcc"
if args.output:
output = "--debug-file=" + script_name +".out.gz"
s = """build/{arch}/gem5.opt {flags} {output} {start} \
configs/example/fs.py {extra} \
--script={exe} --cpu-type={cpu} --caches --l1d_size=64kB \
--l1i_size=16kB --needsTSO=0 --scheme=UnsafeBaseline \
""".format(
arch=arch, exe=script_name, flags=flags,
output=output, cpu=cpu, start=start, extra=extra)
subprocess.call(s.split())
print("\nDone running %s \n", script_name)
# Move the stats file so that running other files doesn't clobber it
old_stats_file = "/home/ulsi/18742/InvisiSpec-1.0/m5out/stats.txt"
new_stats_file = "/home/ulsi/18742/InvisiSpec-1.0/m5out/" + \
"{sname}_stats.txt".format(sname = script_name)
shutil.copyfile(old_stats_file, new_stats_file)
# Set up and run the retpoline compiled version
script_name = workload + "_ret"
if args.output:
output = "--debug-file=" + script_name +".out.gz"
s = """build/{arch}/gem5.opt {flags} {output} {start} \
configs/example/fs.py {extra} \
--script=runparsec/{exe} --cpu-type={cpu} --caches \
--l1d_size=64kB \
--l1i_size=16kB --needsTSO=0 --scheme=UnsafeBaseline \
""".format(
arch=arch, exe=script_name, flags=flags,
output=output, cpu=cpu, start=start, extra=extra)
subprocess.call(s.split())
print("\nDone running %s \n", script_name)
# Just used this to copy the gcc shell scripts so a ret version existed too
def copy_gcc_ret():
workloads = jen_list + irene_list + maxwell_list
for workload in workloads:
gcc_file = os.path.join("/home/ulsi/18742/InvisiSpec-1.0/runparsec",
workload + "_gcc")
ret_file = os.path.join("/home/ulsi/18742/InvisiSpec-1.0/runparsec",
workload + "_ret")
if (not os.path.exists(ret_file)):
shutil.copyfile(gcc_file, ret_file)
# Replace the "gcc" with "ret"
with open(ret_file, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('gcc', 'ret')
# Write the file out again
with open(ret_file, 'w') as file:
file.write(filedata)
#Make it executable
os.chmod(ret_file, 777)
if __name__ == "__main__":
os.environ["M5_PATH"] = "/home/ulsi/18742/InvisiSpec-1.0/x86-system"
args = parser.parse_args()
if (args.setupspec != ""):
setup_spec(args.setupspec)
if (args.runspec != ""):
run_spec(args.runspec)
if (args.setupparsec == "Jen"):
setup_parsec()
build_parsec(jen_list)
if (args.setupparsec == "Irene"):
setup_parsec()
build_parsec(irene_list)
if (args.setupparsec == "Maxwell"):
setup_parsec()
build_parsec(maxwell_list)
if (args.runparsec == "Jen"):
run_parsec(jen_list)
if (args.runparsec == "Irene"):
run_parsec(irene_list)
if (args.runparsec == "Maxwell"):
run_parsec(maxwell_list)
elif (args.runparsec == ""
and args.runspec == ""
and args.setupparsec == ""
and args.setupspec == ""):
command_line = setup_command_line(args).split()
print(command_line)
subprocess.call(command_line)
| [
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"os.chmod",
"os.chdir",
"shutil.copyfile",
"subprocess.call"
] | [((983, 1037), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run benchmarks."""'}), "(description='Run benchmarks.')\n", (1006, 1037), False, 'import argparse\n'), ((3493, 3543), 'os.path.join', 'os.path.join', (['parsec_path', '"""config"""', '"""gcc.bldconf"""'], {}), "(parsec_path, 'config', 'gcc.bldconf')\n", (3505, 3543), False, 'import os\n'), ((3562, 3612), 'os.path.join', 'os.path.join', (['parsec_path', '"""config"""', '"""ret.bldconf"""'], {}), "(parsec_path, 'config', 'ret.bldconf')\n", (3574, 3612), False, 'import os\n'), ((4078, 4119), 'shutil.copyfile', 'shutil.copyfile', (['gcc_bldconf', 'ret_bldconf'], {}), '(gcc_bldconf, ret_bldconf)\n', (4093, 4119), False, 'import shutil\n'), ((4586, 4619), 'os.path.join', 'os.path.join', (['parsec_path', '"""pkgs"""'], {}), "(parsec_path, 'pkgs')\n", (4598, 4619), False, 'import os\n'), ((4738, 4757), 'os.listdir', 'os.listdir', (['pkg_dir'], {}), '(pkg_dir)\n', (4748, 4757), False, 'import os\n'), ((5165, 5186), 'os.chdir', 'os.chdir', (['parsec_path'], {}), '(parsec_path)\n', (5173, 5186), False, 'import os\n'), ((5650, 5691), 'subprocess.call', 'subprocess.call', (['command_line'], {'shell': '(True)'}), '(command_line, shell=True)\n', (5665, 5691), False, 'import subprocess\n'), ((5915, 5956), 'subprocess.call', 'subprocess.call', (['command_line'], {'shell': '(True)'}), '(command_line, shell=True)\n', (5930, 5956), False, 'import subprocess\n'), ((6090, 6131), 'subprocess.call', 'subprocess.call', (['command_line'], {'shell': '(True)'}), '(command_line, shell=True)\n', (6105, 6131), False, 'import subprocess\n'), ((6425, 6467), 'shutil.copyfile', 'shutil.copyfile', (['orig_bldconf', 'gcc_bldconf'], {}), '(orig_bldconf, gcc_bldconf)\n', (6440, 6467), False, 'import shutil\n'), ((7668, 7709), 'shutil.copyfile', 'shutil.copyfile', (['gcc_bldconf', 'ret_bldconf'], {}), '(gcc_bldconf, ret_bldconf)\n', (7683, 7709), False, 'import shutil\n'), ((4777, 4804), 'os.path.join', 'os.path.join', (['pkg_dir', 'dirs'], {}), '(pkg_dir, dirs)\n', (4789, 4804), False, 'import os\n'), ((4825, 4844), 'os.listdir', 'os.listdir', (['app_dir'], {}), '(app_dir)\n', (4835, 4844), False, 'import os\n'), ((5221, 5300), 'subprocess.call', 'subprocess.call', (["['bin/parsecmgmt', '-a', 'build', '-c', 'ret', '-p', workload]"], {}), "(['bin/parsecmgmt', '-a', 'build', '-c', 'ret', '-p', workload])\n", (5236, 5300), False, 'import subprocess\n'), ((5333, 5412), 'subprocess.call', 'subprocess.call', (["['bin/parsecmgmt', '-a', 'build', '-c', 'gcc', '-p', workload]"], {}), "(['bin/parsecmgmt', '-a', 'build', '-c', 'gcc', '-p', workload])\n", (5348, 5412), False, 'import subprocess\n'), ((9503, 9540), 'os.path.join', 'os.path.join', (['bench_top_dir', 'base_dir'], {}), '(bench_top_dir, base_dir)\n', (9515, 9540), False, 'import os\n'), ((9565, 9601), 'os.path.join', 'os.path.join', (['bench_top_dir', 'ret_dir'], {}), '(bench_top_dir, ret_dir)\n', (9577, 9601), False, 'import os\n'), ((14574, 14621), 'shutil.copyfile', 'shutil.copyfile', (['old_stats_file', 'new_stats_file'], {}), '(old_stats_file, new_stats_file)\n', (14589, 14621), False, 'import shutil\n'), ((15502, 15578), 'os.path.join', 'os.path.join', (['"""/home/ulsi/18742/InvisiSpec-1.0/runparsec"""', "(workload + '_gcc')"], {}), "('/home/ulsi/18742/InvisiSpec-1.0/runparsec', workload + '_gcc')\n", (15514, 15578), False, 'import os\n'), ((15630, 15706), 'os.path.join', 'os.path.join', (['"""/home/ulsi/18742/InvisiSpec-1.0/runparsec"""', "(workload + '_ret')"], {}), "('/home/ulsi/18742/InvisiSpec-1.0/runparsec', workload + '_ret')\n", (15642, 15706), False, 'import os\n'), ((4868, 4895), 'os.path.join', 'os.path.join', (['app_dir', 'apps'], {}), '(app_dir, apps)\n', (4880, 4895), False, 'import os\n'), ((4922, 4968), 'os.path.join', 'os.path.join', (['cfg_dir', '"""parsec"""', '"""gcc.bldconf"""'], {}), "(cfg_dir, 'parsec', 'gcc.bldconf')\n", (4934, 4968), False, 'import os\n'), ((4991, 5037), 'os.path.join', 'os.path.join', (['cfg_dir', '"""parsec"""', '"""ret.bldconf"""'], {}), "(cfg_dir, 'parsec', 'ret.bldconf')\n", (5003, 5037), False, 'import os\n'), ((5053, 5080), 'os.path.exists', 'os.path.exists', (['current_cfg'], {}), '(current_cfg)\n', (5067, 5080), False, 'import os\n'), ((15755, 15779), 'os.path.exists', 'os.path.exists', (['ret_file'], {}), '(ret_file)\n', (15769, 15779), False, 'import os\n'), ((15794, 15829), 'shutil.copyfile', 'shutil.copyfile', (['gcc_file', 'ret_file'], {}), '(gcc_file, ret_file)\n', (15809, 15829), False, 'import shutil\n'), ((16221, 16244), 'os.chmod', 'os.chmod', (['ret_file', '(777)'], {}), '(ret_file, 777)\n', (16229, 16244), False, 'import os\n'), ((17222, 17251), 'subprocess.call', 'subprocess.call', (['command_line'], {}), '(command_line)\n', (17237, 17251), False, 'import subprocess\n'), ((5098, 5135), 'shutil.copyfile', 'shutil.copyfile', (['current_cfg', 'new_cfg'], {}), '(current_cfg, new_cfg)\n', (5113, 5135), False, 'import shutil\n'), ((11158, 11202), 'os.path.join', 'os.path.join', (['bench_base_dir', '"""gem5_run.log"""'], {}), "(bench_base_dir, 'gem5_run.log')\n", (11170, 11202), False, 'import os\n'), ((12926, 12970), 'os.path.join', 'os.path.join', (['bench_base_dir', '"""gem5_run.log"""'], {}), "(bench_base_dir, 'gem5_run.log')\n", (12938, 12970), False, 'import os\n')] |
from unittest.mock import patch
import dhooks_lite
from django.core.cache import cache
from django.test import TestCase
from django.test.utils import override_settings
from ..exceptions import WebhookTooManyRequests
from ..models import EveKillmail, Tracker, Webhook
from .testdata.helpers import load_killmail, load_eve_killmails, LoadTestDataMixin
from ..tasks import (
delete_stale_killmails,
run_tracker,
send_messages_to_webhook,
run_killtracker,
store_killmail,
send_test_message_to_webhook,
generate_killmail_message,
)
from ..utils import generate_invalid_pk
MODULE_PATH = "killtracker.tasks"
class TestTrackerBase(LoadTestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.tracker_1 = Tracker.objects.create(
name="Low Sec Only",
exclude_high_sec=True,
exclude_null_sec=True,
exclude_w_space=True,
webhook=cls.webhook_1,
)
cls.tracker_2 = Tracker.objects.create(
name="High Sec Only",
exclude_low_sec=True,
exclude_null_sec=True,
exclude_w_space=True,
webhook=cls.webhook_1,
)
@override_settings(CELERY_ALWAYS_EAGER=True)
@patch(MODULE_PATH + ".is_esi_online")
@patch(MODULE_PATH + ".delete_stale_killmails")
@patch(MODULE_PATH + ".store_killmail")
@patch(MODULE_PATH + ".Killmail.create_from_zkb_redisq")
@patch(MODULE_PATH + ".run_tracker")
class TestRunKilltracker(TestTrackerBase):
def setUp(self) -> None:
self.webhook_1.main_queue.clear()
self.webhook_1.error_queue.clear()
cache.clear()
@staticmethod
def my_fetch_from_zkb():
for killmail_id in [10000001, 10000002, 10000003, None]:
if killmail_id:
yield load_killmail(killmail_id)
else:
yield None
@patch(MODULE_PATH + ".KILLTRACKER_STORING_KILLMAILS_ENABLED", False)
def test_normal(
self,
mock_run_tracker,
mock_create_from_zkb_redisq,
mock_store_killmail,
mock_delete_stale_killmails,
mock_is_esi_online,
):
mock_create_from_zkb_redisq.side_effect = self.my_fetch_from_zkb()
mock_is_esi_online.return_value = True
self.webhook_1.error_queue.enqueue(load_killmail(10000004).asjson())
run_killtracker.delay()
self.assertEqual(mock_run_tracker.delay.call_count, 6)
self.assertEqual(mock_store_killmail.si.call_count, 0)
self.assertFalse(mock_delete_stale_killmails.delay.called)
self.assertEqual(self.webhook_1.main_queue.size(), 1)
self.assertEqual(self.webhook_1.error_queue.size(), 0)
@patch(MODULE_PATH + ".KILLTRACKER_STORING_KILLMAILS_ENABLED", False)
def test_stop_when_esi_is_offline(
self,
mock_run_tracker,
mock_create_from_zkb_redisq,
mock_store_killmail,
mock_delete_stale_killmails,
mock_is_esi_online,
):
mock_create_from_zkb_redisq.side_effect = self.my_fetch_from_zkb()
mock_is_esi_online.return_value = False
run_killtracker.delay()
self.assertEqual(mock_run_tracker.delay.call_count, 0)
self.assertEqual(mock_store_killmail.si.call_count, 0)
self.assertFalse(mock_delete_stale_killmails.delay.called)
@patch(MODULE_PATH + ".KILLTRACKER_PURGE_KILLMAILS_AFTER_DAYS", 30)
@patch(MODULE_PATH + ".KILLTRACKER_STORING_KILLMAILS_ENABLED", True)
def test_can_store_killmails(
self,
mock_run_tracker,
mock_create_from_zkb_redisq,
mock_store_killmail,
mock_delete_stale_killmails,
mock_is_esi_online,
):
mock_create_from_zkb_redisq.side_effect = self.my_fetch_from_zkb()
mock_is_esi_online.return_value = True
run_killtracker.delay()
self.assertEqual(mock_run_tracker.delay.call_count, 6)
self.assertEqual(mock_store_killmail.si.call_count, 3)
self.assertTrue(mock_delete_stale_killmails.delay.called)
@patch(MODULE_PATH + ".send_messages_to_webhook")
@patch(MODULE_PATH + ".generate_killmail_message")
class TestRunTracker(TestTrackerBase):
def setUp(self) -> None:
cache.clear()
def test_call_enqueue_for_matching_killmail(
self, mock_enqueue_killmail_message, mock_send_messages_to_webhook
):
"""when killmail is matching, then generate new message from it"""
killmail_json = load_killmail(10000001).asjson()
run_tracker(self.tracker_1.pk, killmail_json)
self.assertTrue(mock_enqueue_killmail_message.delay.called)
self.assertFalse(mock_send_messages_to_webhook.delay.called)
def test_do_nothing_when_no_matching_killmail(
self, mock_enqueue_killmail_message, mock_send_messages_to_webhook
):
"""when killmail is not matching and webhook queue is empty,
then do nothing
"""
killmail_json = load_killmail(10000003).asjson()
run_tracker(self.tracker_1.pk, killmail_json)
self.assertFalse(mock_enqueue_killmail_message.delay.called)
self.assertFalse(mock_send_messages_to_webhook.delay.called)
def test_start_message_sending_when_queue_non_empty(
self, mock_enqueue_killmail_message, mock_send_messages_to_webhook
):
"""when killmail is not matching and webhook queue is not empty,
then start sending anyway
"""
killmail_json = load_killmail(10000003).asjson()
self.webhook_1.enqueue_message(content="test")
run_tracker(self.tracker_1.pk, killmail_json)
self.assertFalse(mock_enqueue_killmail_message.delay.called)
self.assertTrue(mock_send_messages_to_webhook.delay.called)
@patch(MODULE_PATH + ".generate_killmail_message.retry")
@patch(MODULE_PATH + ".send_messages_to_webhook")
class TestGenerateKillmailMessage(TestTrackerBase):
def setUp(self) -> None:
cache.clear()
self.retries = 0
self.killmail_json = load_killmail(10000001).asjson()
def my_retry(self, *args, **kwargs):
self.retries += 1
if self.retries > kwargs["max_retries"]:
raise kwargs["exc"]
generate_killmail_message(self.tracker_1.pk, self.killmail_json)
def test_normal(self, mock_send_messages_to_webhook, mock_retry):
"""enqueue generated killmail and start sending"""
mock_retry.side_effect = self.my_retry
generate_killmail_message(self.tracker_1.pk, self.killmail_json)
self.assertTrue(mock_send_messages_to_webhook.delay.called)
self.assertEqual(self.webhook_1.main_queue.size(), 1)
self.assertFalse(mock_retry.called)
@patch(MODULE_PATH + ".KILLTRACKER_GENERATE_MESSAGE_MAX_RETRIES", 3)
@patch(MODULE_PATH + ".Tracker.generate_killmail_message")
def test_retry_until_maximum(
self, mock_generate_killmail_message, mock_send_messages_to_webhook, mock_retry
):
"""when message generation fails,then retry until max retries is reached"""
mock_retry.side_effect = self.my_retry
mock_generate_killmail_message.side_effect = RuntimeError
with self.assertRaises(RuntimeError):
generate_killmail_message(self.tracker_1.pk, self.killmail_json)
self.assertFalse(mock_send_messages_to_webhook.delay.called)
self.assertEqual(self.webhook_1.main_queue.size(), 0)
self.assertEqual(mock_retry.call_count, 4)
@patch(MODULE_PATH + ".send_messages_to_webhook.retry")
@patch(MODULE_PATH + ".Webhook.send_message_to_webhook")
@patch(MODULE_PATH + ".logger")
class TestSendMessagesToWebhook(TestTrackerBase):
def setUp(self) -> None:
cache.clear()
def my_retry(self, *args, **kwargs):
send_messages_to_webhook(self.webhook_1.pk)
def test_one_message(self, mock_logger, mock_send_message_to_webhook, mock_retry):
"""when one mesage in queue, then send it and retry with delay"""
mock_retry.side_effect = self.my_retry
mock_send_message_to_webhook.return_value = dhooks_lite.WebhookResponse(
{}, status_code=200
)
self.webhook_1.enqueue_message(content="Test message")
send_messages_to_webhook(self.webhook_1.pk)
self.assertEqual(mock_send_message_to_webhook.call_count, 1)
self.assertEqual(self.webhook_1.main_queue.size(), 0)
self.assertEqual(self.webhook_1.error_queue.size(), 0)
self.assertEqual(mock_retry.call_count, 1)
_, kwargs = mock_retry.call_args
self.assertEqual(kwargs["countdown"], 2)
self.assertFalse(mock_logger.error.called)
self.assertFalse(mock_logger.warning.called)
def test_three_message(self, mock_logger, mock_send_message_to_webhook, mock_retry):
"""when three mesages in queue, then sends them and returns 3"""
mock_retry.side_effect = self.my_retry
mock_send_message_to_webhook.return_value = dhooks_lite.WebhookResponse(
{}, status_code=200
)
self.webhook_1.enqueue_message(content="Test message")
self.webhook_1.enqueue_message(content="Test message")
self.webhook_1.enqueue_message(content="Test message")
send_messages_to_webhook(self.webhook_1.pk)
self.assertEqual(mock_send_message_to_webhook.call_count, 3)
self.assertEqual(self.webhook_1.main_queue.size(), 0)
self.assertEqual(self.webhook_1.error_queue.size(), 0)
self.assertTrue(mock_retry.call_count, 4)
def test_no_messages(self, mock_logger, mock_send_message_to_webhook, mock_retry):
"""when no mesages in queue, then do nothing"""
mock_retry.side_effect = self.my_retry
mock_send_message_to_webhook.return_value = dhooks_lite.WebhookResponse(
{}, status_code=200
)
send_messages_to_webhook(self.webhook_1.pk)
self.assertEqual(mock_send_message_to_webhook.call_count, 0)
self.assertEqual(self.webhook_1.main_queue.size(), 0)
self.assertEqual(self.webhook_1.error_queue.size(), 0)
self.assertEqual(mock_retry.call_count, 0)
self.assertFalse(mock_logger.error.called)
self.assertFalse(mock_logger.warning.called)
def test_failed_message(
self, mock_logger, mock_send_message_to_webhook, mock_retry
):
"""when message sending failed,
then put message in error queue and log warning
"""
mock_retry.side_effect = self.my_retry
mock_send_message_to_webhook.return_value = dhooks_lite.WebhookResponse(
{}, status_code=404
)
self.webhook_1.enqueue_message(content="Test message")
send_messages_to_webhook(self.webhook_1.pk)
self.assertEqual(mock_send_message_to_webhook.call_count, 1)
self.assertEqual(self.webhook_1.main_queue.size(), 0)
self.assertEqual(self.webhook_1.error_queue.size(), 1)
self.assertTrue(mock_logger.warning.called)
def test_abort_on_too_many_requests(
self, mock_logger, mock_send_message_to_webhook, mock_retry
):
"""
when WebhookTooManyRequests exception is raised
then message is re-queued and retry once
"""
mock_retry.side_effect = self.my_retry
mock_send_message_to_webhook.side_effect = WebhookTooManyRequests(10)
self.webhook_1.enqueue_message(content="Test message")
send_messages_to_webhook(self.webhook_1.pk)
self.assertEqual(mock_send_message_to_webhook.call_count, 1)
self.assertEqual(self.webhook_1.main_queue.size(), 1)
self.assertFalse(mock_retry.called)
def test_log_info_if_not_enabled(
self, mock_logger, mock_send_message_to_webhook, mock_retry
):
my_webhook = Webhook.objects.create(
name="disabled", url="dummy-url-2", is_enabled=False
)
send_messages_to_webhook(my_webhook.pk)
self.assertFalse(mock_send_message_to_webhook.called)
self.assertTrue(mock_logger.info.called)
@patch(MODULE_PATH + ".logger")
class TestStoreKillmail(TestTrackerBase):
def test_normal(self, mock_logger):
killmail = load_killmail(10000001)
killmail_json = killmail.asjson()
store_killmail(killmail_json)
self.assertTrue(EveKillmail.objects.filter(id=10000001).exists())
self.assertFalse(mock_logger.warning.called)
def test_already_exists(self, mock_logger):
load_eve_killmails([10000001])
killmail = load_killmail(10000001)
killmail_json = killmail.asjson()
store_killmail(killmail_json)
self.assertTrue(mock_logger.warning.called)
@override_settings(CELERY_ALWAYS_EAGER=True)
@patch("killtracker.models.dhooks_lite.Webhook.execute")
@patch(MODULE_PATH + ".logger")
class TestSendTestKillmailsToWebhook(TestTrackerBase):
def setUp(self) -> None:
self.webhook_1.main_queue.clear()
def test_log_warning_when_pk_is_invalid(self, mock_logger, mock_execute):
mock_execute.return_value = dhooks_lite.WebhookResponse(dict(), status_code=200)
send_test_message_to_webhook(generate_invalid_pk(Webhook))
self.assertFalse(mock_execute.called)
self.assertTrue(mock_logger.error.called)
def test_run_normal(self, mock_logger, mock_execute):
mock_execute.return_value = dhooks_lite.WebhookResponse(dict(), status_code=200)
send_test_message_to_webhook(self.webhook_1.pk)
self.assertTrue(mock_execute.called)
self.assertFalse(mock_logger.error.called)
@patch(MODULE_PATH + ".EveKillmail.objects.delete_stale")
class TestDeleteStaleKillmails(TestTrackerBase):
def test_normal(self, mock_delete_stale):
mock_delete_stale.return_value = (1, {"killtracker.EveKillmail": 1})
delete_stale_killmails()
self.assertTrue(mock_delete_stale.called)
| [
"django.core.cache.cache.clear",
"django.test.utils.override_settings",
"unittest.mock.patch",
"dhooks_lite.WebhookResponse"
] | [((1222, 1265), 'django.test.utils.override_settings', 'override_settings', ([], {'CELERY_ALWAYS_EAGER': '(True)'}), '(CELERY_ALWAYS_EAGER=True)\n', (1239, 1265), False, 'from django.test.utils import override_settings\n'), ((1267, 1304), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.is_esi_online')"], {}), "(MODULE_PATH + '.is_esi_online')\n", (1272, 1304), False, 'from unittest.mock import patch\n'), ((1306, 1352), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.delete_stale_killmails')"], {}), "(MODULE_PATH + '.delete_stale_killmails')\n", (1311, 1352), False, 'from unittest.mock import patch\n'), ((1354, 1392), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.store_killmail')"], {}), "(MODULE_PATH + '.store_killmail')\n", (1359, 1392), False, 'from unittest.mock import patch\n'), ((1394, 1449), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.Killmail.create_from_zkb_redisq')"], {}), "(MODULE_PATH + '.Killmail.create_from_zkb_redisq')\n", (1399, 1449), False, 'from unittest.mock import patch\n'), ((1451, 1486), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.run_tracker')"], {}), "(MODULE_PATH + '.run_tracker')\n", (1456, 1486), False, 'from unittest.mock import patch\n'), ((4074, 4122), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.send_messages_to_webhook')"], {}), "(MODULE_PATH + '.send_messages_to_webhook')\n", (4079, 4122), False, 'from unittest.mock import patch\n'), ((4124, 4173), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.generate_killmail_message')"], {}), "(MODULE_PATH + '.generate_killmail_message')\n", (4129, 4173), False, 'from unittest.mock import patch\n'), ((5772, 5827), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.generate_killmail_message.retry')"], {}), "(MODULE_PATH + '.generate_killmail_message.retry')\n", (5777, 5827), False, 'from unittest.mock import patch\n'), ((5829, 5877), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.send_messages_to_webhook')"], {}), "(MODULE_PATH + '.send_messages_to_webhook')\n", (5834, 5877), False, 'from unittest.mock import patch\n'), ((7489, 7543), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.send_messages_to_webhook.retry')"], {}), "(MODULE_PATH + '.send_messages_to_webhook.retry')\n", (7494, 7543), False, 'from unittest.mock import patch\n'), ((7545, 7600), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.Webhook.send_message_to_webhook')"], {}), "(MODULE_PATH + '.Webhook.send_message_to_webhook')\n", (7550, 7600), False, 'from unittest.mock import patch\n'), ((7602, 7632), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.logger')"], {}), "(MODULE_PATH + '.logger')\n", (7607, 7632), False, 'from unittest.mock import patch\n'), ((12059, 12089), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.logger')"], {}), "(MODULE_PATH + '.logger')\n", (12064, 12089), False, 'from unittest.mock import patch\n'), ((12690, 12733), 'django.test.utils.override_settings', 'override_settings', ([], {'CELERY_ALWAYS_EAGER': '(True)'}), '(CELERY_ALWAYS_EAGER=True)\n', (12707, 12733), False, 'from django.test.utils import override_settings\n'), ((12735, 12790), 'unittest.mock.patch', 'patch', (['"""killtracker.models.dhooks_lite.Webhook.execute"""'], {}), "('killtracker.models.dhooks_lite.Webhook.execute')\n", (12740, 12790), False, 'from unittest.mock import patch\n'), ((12792, 12822), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.logger')"], {}), "(MODULE_PATH + '.logger')\n", (12797, 12822), False, 'from unittest.mock import patch\n'), ((13587, 13643), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.EveKillmail.objects.delete_stale')"], {}), "(MODULE_PATH + '.EveKillmail.objects.delete_stale')\n", (13592, 13643), False, 'from unittest.mock import patch\n'), ((1907, 1975), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.KILLTRACKER_STORING_KILLMAILS_ENABLED')", '(False)'], {}), "(MODULE_PATH + '.KILLTRACKER_STORING_KILLMAILS_ENABLED', False)\n", (1912, 1975), False, 'from unittest.mock import patch\n'), ((2731, 2799), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.KILLTRACKER_STORING_KILLMAILS_ENABLED')", '(False)'], {}), "(MODULE_PATH + '.KILLTRACKER_STORING_KILLMAILS_ENABLED', False)\n", (2736, 2799), False, 'from unittest.mock import patch\n'), ((3372, 3438), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.KILLTRACKER_PURGE_KILLMAILS_AFTER_DAYS')", '(30)'], {}), "(MODULE_PATH + '.KILLTRACKER_PURGE_KILLMAILS_AFTER_DAYS', 30)\n", (3377, 3438), False, 'from unittest.mock import patch\n'), ((3444, 3511), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.KILLTRACKER_STORING_KILLMAILS_ENABLED')", '(True)'], {}), "(MODULE_PATH + '.KILLTRACKER_STORING_KILLMAILS_ENABLED', True)\n", (3449, 3511), False, 'from unittest.mock import patch\n'), ((6722, 6789), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.KILLTRACKER_GENERATE_MESSAGE_MAX_RETRIES')", '(3)'], {}), "(MODULE_PATH + '.KILLTRACKER_GENERATE_MESSAGE_MAX_RETRIES', 3)\n", (6727, 6789), False, 'from unittest.mock import patch\n'), ((6795, 6852), 'unittest.mock.patch', 'patch', (["(MODULE_PATH + '.Tracker.generate_killmail_message')"], {}), "(MODULE_PATH + '.Tracker.generate_killmail_message')\n", (6800, 6852), False, 'from unittest.mock import patch\n'), ((1652, 1665), 'django.core.cache.cache.clear', 'cache.clear', ([], {}), '()\n', (1663, 1665), False, 'from django.core.cache import cache\n'), ((4250, 4263), 'django.core.cache.cache.clear', 'cache.clear', ([], {}), '()\n', (4261, 4263), False, 'from django.core.cache import cache\n'), ((5967, 5980), 'django.core.cache.cache.clear', 'cache.clear', ([], {}), '()\n', (5978, 5980), False, 'from django.core.cache import cache\n'), ((7720, 7733), 'django.core.cache.cache.clear', 'cache.clear', ([], {}), '()\n', (7731, 7733), False, 'from django.core.cache import cache\n'), ((8089, 8137), 'dhooks_lite.WebhookResponse', 'dhooks_lite.WebhookResponse', (['{}'], {'status_code': '(200)'}), '({}, status_code=200)\n', (8116, 8137), False, 'import dhooks_lite\n'), ((8978, 9026), 'dhooks_lite.WebhookResponse', 'dhooks_lite.WebhookResponse', (['{}'], {'status_code': '(200)'}), '({}, status_code=200)\n', (9005, 9026), False, 'import dhooks_lite\n'), ((9779, 9827), 'dhooks_lite.WebhookResponse', 'dhooks_lite.WebhookResponse', (['{}'], {'status_code': '(200)'}), '({}, status_code=200)\n', (9806, 9827), False, 'import dhooks_lite\n'), ((10565, 10613), 'dhooks_lite.WebhookResponse', 'dhooks_lite.WebhookResponse', (['{}'], {'status_code': '(404)'}), '({}, status_code=404)\n', (10592, 10613), False, 'import dhooks_lite\n')] |
#!/usr/bin/env python
# PostgreSQL doesn't allow ADDing columns to a table in a particular position -
# because it doesn't really make sense in SQL -
# but COPY from CSV **requires** the columns in a specific order
# as the fields aren't specified in the source CSV file.
# so specify /ALL/ of the fields to import.
# This code assumes a database with exclusive access to EBatch / Ppatient /
# PpatientRawdata tables, where the latest values from each sequence have been
# committed as entries in the database. It works by trying to precompute the
# next values that will come off each sequence, then doing a direct load of
# the data as a CSV file.
# If the database state doesn't support this, you could workaround with:
# ./fix_prescr_sequences.sh
# Old workaround:
# irb> eb=EBatch.new; eb.save(validate: false)
# irb> pprd=Pseudo::PpatientRawdata.new; pprd.save(validate:false)
# irb> pp=Pseudo::Ppatient.new; pp.save(validate:false)
# $ ./create_prescr.py 2015 04 a
# $ ./load_tables.sh 2015 04 a
# irb> eb.destroy; pprd.destroy; pp.destroy
# We could make this work slightly more expensively but more reliably, by actually
# pulling a single value off each sequence below.
# use Python 3 print
from __future__ import print_function
import sys
import calendar
import psycopg2
import csv
import base64
import hashlib
import getpass
import os.path
import os
# ----------------------------------------------------------------------------------
def to_asciihex(b):
"""
Convert raw binary data to a sequence of ASCII-encoded hex bytes,
suitable for import via COPY .. CSV into a PostgreSQL bytea field.
"""
return '\\x'+''.join('%.2x' % ord(x) for x in b)
# ----------------------------------------------------------------------------------
# Get year and month parameters from command line
if len(sys.argv)!=4:
print('Usage: %s <year> <month> <part>' % sys.argv[0])
print(""" where <part> is a or b - meaning choose rows with
pseudo_id1 starting with 0-7 (a) or 8-f (b).
This is to split the CSV file into two equal (manageable) chunks
due to limited memory on the db1 server""")
exit(1)
try:
year = int(sys.argv[1])
month = int(sys.argv[2])
month2s = '%.2d' % month # string version with leading 0 if needed
part = sys.argv[3]
if part=='a':
partmatch = '01234567'
elif part=='b':
partmatch = '89abcdef'
else:
raise # part must be a or b
except:
print('Parameter error')
sys.exit(1)
DB=os.environ['DB']
DBA=os.environ['DBA']
csvpath = '/home/pgsql_recovery/source_data/static'
# Initialise empty cache for rawdata records - refreshed on per-month basis.
# key = (rawdata,decrypt_key) [i.e. (encrypted_demog,key_bundle)]
# value = ppatient_rawdataid
rawdata_cache = {}
rawdata_cache_size = 0
max_rawdata_cache_size = 30E6
password = os.environ.get('PGPASSWORD') or getpass.getpass('(create_prescr.py) DB password: ')
conn = psycopg2.connect('dbname=%s user=%s password=%s' % (DB,DBA,password))
cur = conn.cursor()
# get last of: ppatients(id), ppatient_rawdata(ppatient_rawdataid), e_batch(e_batchid)
cur.execute('SELECT MAX(id) FROM ppatients')
last_ppatients_id = cur.fetchone()[0] or 0 # return 0 if None (no rows)
cur.execute('SELECT MAX(ppatient_rawdataid) FROM ppatient_rawdata')
last_ppatient_rawdataid = cur.fetchone()[0] or 0
cur.execute('SELECT MAX(e_batchid) FROM e_batch')
last_e_batchid = cur.fetchone()[0] or 0
print('Last: ppatients(id) = %d, rawdataid = %d, e_batchid = %d' % (last_ppatients_id,last_ppatient_rawdataid,last_e_batchid))
# ----------------------------------------------------------------------------------
# Use the last e_batchid value from the e_batch table - this is the value for this month's load.
# Increment in part a only.
e_batchid = last_e_batchid
if part=='a':
e_batchid += 1
ppatients_f = open('ppatients_%d%s%s.csv' % (year,month2s,part), 'a')
ppatients_f.truncate(0)
ppatient_rawdata_f = open('ppatient_rawdata_%d%s%s.csv' % (year,month2s,part), 'a')
ppatient_rawdata_f.truncate(0)
prescription_data_f = open('prescription_data_%d%s%s.csv' % (year,month2s,part), 'a')
prescription_data_f.truncate(0)
csv_filename = os.path.join(csvpath, 'PHE_%d%s_pseudonymised.csv' % (year,month2s))
with open(csv_filename, 'r') as csvfile:
preader = csv.reader(csvfile, delimiter=',', quotechar='"')
# prescription_data_writer = csv.writer(prescription_data_f)
pseudonymisation_keyid = 1 # Hard-coded for PSPRESCRIPTION data
# first N data rows, skipping 2 header rows
rown = 0
for row in preader:
rown += 1
if rown<=2: continue
# if rown>=1000003: break # For testing: only load first 1,000,000 rows
data = row[0].split()
pseudo_id1 = data[0]
if pseudo_id1[0] not in partmatch:
# first character must match corresponding part
continue
key_bundle = to_asciihex(base64.b64decode(data[1][1:-1])) # strip () before decoding
encrypted_demog = to_asciihex(base64.b64decode(data[2]))
# Binary digest = 20 bytes.
# [Python] 20-byte string takes 52 bytes
# 10-byte string takes 47 bytes.
rawdata_key = hashlib.sha1(encrypted_demog+key_bundle).digest()
if rawdata_key in rawdata_cache:
rawdataid = rawdata_cache[rawdata_key]
# print('row %d: using rawdata_cache: %d' % (rown,rawdataid))
else:
last_ppatient_rawdataid += 1
rawdataid = last_ppatient_rawdataid
#print('row %d: not cached, using: %d' % (rown,rawdataid))
# rawdata bytea,decrypt_key bytea
# COPY ppatient_rawdata (rawdata,decrypt_key)
# FROM 'input.csv' CSV;
print('"%s","%s"' % (encrypted_demog,key_bundle), file=ppatient_rawdata_f)
# Update cache, or reset if limit reached.
# Each SHA1'ed key entry uses 160 bits = 20 bytes, but the python object size is 52 bytes.
# int takes 24 bytes, so total for hash entry is 79 bytes.
# So 10 million entries ~= 790Mb.
rawdata_cache_size += 1
if rawdata_cache_size > max_rawdata_cache_size:
print('Cache size limit (%d) reached - resetting cache.' % rawdata_cache_size)
rawdata_cache = {}
rawdata_cache_size = 0
rawdata_cache[rawdata_key] = rawdataid
# -- don't COPY id field and don't return it - use a counter here.
# COPY ppatients (e_batchid,ppatient_rawdata_id,type,pseudo_id1,pseudo_id2,pseudonymisation_keyid)
# FROM 'input.csv' CSV;
print('%d,%d,"Pseudo::Prescription","%s",,%d' % (e_batchid,rawdataid,pseudo_id1,pseudonymisation_keyid), file=ppatients_f)
last_ppatients_id += 1
# Fill in 5 deleted columns, removed in 2018-07 and later extracts:
# PCO_NAME PRACTICE_NAME PRESC_QUANTITY CHEMICAL_SUBSTANCE_BNF CHEMICAL_SUBSTANCE_BNF_DESCR
# Change row to row[0:5] + ['pco_name'] + row[5:6] + ['practice_name'] + row[6:7] + ['presc_quantity'] + row[7:21] + ['chemical_substance_bnf', 'chemical_substance_bnf_descr'] + row[21:]
if len(row) == 24:
row = row[0:5] + [''] + row[5:6] + [''] + row[6:7] + [''] + row[7:21] + ['', ''] + row[21:]
# prescription data -
# basic data cleaning based on errors from PostgreSQL's COPY importer
# - note that "" fields are already implicitly converted to <blank> from csv.reader
# i.e. acceptable for COPY (e.g. for pat_age: integer field)
if '.' in row[12]:
# must be integer pay_quantity - round down
row[12] = int(float(row[12]))
# Add additional dummy columns for PF_ID,AMPP_ID,VMPP_ID (not included in first 4 months' data)
if len(row) == 19: row += ['', '', '']
# add additional dummy columns for SEX,FORM_TYPE,CHEMICAL_SUBSTANCE_BNF,
# CHEMICAL_SUBSTANCE_BNF_DESCR,VMP_ID,VMP_NAME,VTM_NAME (not included in first 11 months' data,
# but included in 2018-07 refresh)
if len(row) == 22: row += ['', '', '', '', '', '', '']
# quote text fields, i.e. not integer
# TODO: Move to using a proper CSV library instead of manual quoting
for f in range(29):
if f not in (10,15,19,20,21,26): # ITEM_NUMBER,PAT_AGE,PF_ID,AMPP_ID,VMPP_ID,VMP_ID
row[f] = '"%s"' % row[f]
# remove DEMOG field - leave till last to avoid index confusion
del row[0]
# remove quotes from PRESC_DATE field (DATE type) - a blank field will be stored as NULL.
row[0] = row[0].replace('"','')
# COPY prescription_data
# (ppatient_id,presc_date,part_month,presc_postcode,pco_code,pco_name,practice_code,practice_name,
# nic,presc_quantity,item_number,unit_of_measure,pay_quantity,drug_paid,bnf_code,
# pat_age,pf_exempt_cat,etp_exempt_cat,etp_indicator,pf_id,ampp_id,vmpp_id,
# sex,form_type,chemical_substance_bnf,chemical_substance_bnf_descr,vmp_id,vmp_name,vtm_name)
# FROM 'input.csv' CSV;
print(','.join(['%d' % last_ppatients_id] + row), file=prescription_data_f)
# prescription_data_writer.writerow(['%d' % last_ppatients_id] + row)
if (rown%1000)==0:
sys.stdout.write('%d: %d, %d\r' % (rown,last_ppatients_id,last_ppatient_rawdataid))
sys.stdout.flush
# end of row loop
ppatients_f.close()
ppatient_rawdata_f.close()
prescription_data_f.close()
# Part a only - create an e_batch record for this month
if part=='a':
e_batch_f = open('e_batch_%d%s.csv' % (year,month2s), 'w')
# COPY e_batch
# (e_type,provider,media,original_filename,cleaned_filename,numberofrecords,
# date_reference1,date_reference2,e_batchid_traced,comments,digest,
# lock_version,inprogress,registryid,on_hold)
month = int(month)
monthend = calendar.monthrange(year,month)[1]
dateref1 = '%d-%.2d-01' % (year,month)
dateref2 = '%d-%.2d-%.2d' % (year,month,monthend)
num_rows = rown-3 # 2 header rows from 0
filename = os.path.basename(csv_filename)
print(\
""""PSPRESCRIPTION","T145Z","Hard Disk","%s","%s",%d,%s,%s,0,"Month %d batch","Not computed",0,"","X25",0""" \
% (filename,filename,num_rows,dateref1,dateref2,month), file=e_batch_f)
e_batch_f.close()
print('\nFinal cache size = %d' % (len(rawdata_cache)))
# ----------------------------------------------------------------------------------
"""
DEMOG,PRESC_DATE,PART_MONTH,PRESC_POSTCODE,PCO_CODE,PCO_NAME,PRACTICE_CODE,PRACTICE_NAME,NIC,PRESC_QUANTITY,ITEM_NUMBER,UNIT_OF_MEASURE,PAY_QUANTITY,DRUG_PAID,BNF_CODE,PAT_AGE,PF_EXEMPT_CAT,ETP_EXEMPT_CAT,ETP_INDICATOR
0 pseudoid text,
1 presc_date text,
2 part_month text,
3 presc_postcode text,
4 pco_code text,
5 pco_name text,
6 practice_code text,
7 practice_name text,
8 nic text,
9 presc_quantity text,
10 item_number integer,
11 unit_of_measure text,
12 pay_quantity integer,
13 drug_paid text,
14 bnf_code text,
15 pat_age integer,
16 pf_exempt_cat text,
17 etp_exempt_cat text,
18 etp_indicator text
# e_batchid | 1 -- autoincrement primary key
# e_type | PSPRESCRIPTION
# provider | T145Z
# media | Hard Disk -- options in era are: 'Email', 'Floppy Disk', 'CD/DVD', 'Others'
# original_filename | PHE_201504_pseudonymised_first10000.csv
# cleaned_filename | PHE_201504_pseudonymised_first10000.csv
# numberofrecords | 10000
# date_reference1 | 2015-04-01 00:00:00 -- beginning of month
# date_reference2 | 2015-04-30 00:00:00 -- end of month
# e_batchid_traced |
# comments | month 4 batch
# digest | not computed
# lock_version | 0
# inprogress |
# registryid | X25
# on_hold | 0
"""
| [
"psycopg2.connect",
"os.path.join",
"os.environ.get",
"getpass.getpass",
"base64.b64decode",
"calendar.monthrange",
"os.path.basename",
"sys.exit",
"hashlib.sha1",
"csv.reader",
"sys.stdout.write"
] | [((2911, 2982), 'psycopg2.connect', 'psycopg2.connect', (["('dbname=%s user=%s password=%s' % (DB, DBA, password))"], {}), "('dbname=%s user=%s password=%s' % (DB, DBA, password))\n", (2927, 2982), False, 'import psycopg2\n'), ((4157, 4226), 'os.path.join', 'os.path.join', (['csvpath', "('PHE_%d%s_pseudonymised.csv' % (year, month2s))"], {}), "(csvpath, 'PHE_%d%s_pseudonymised.csv' % (year, month2s))\n", (4169, 4226), False, 'import os\n'), ((2820, 2848), 'os.environ.get', 'os.environ.get', (['"""PGPASSWORD"""'], {}), "('PGPASSWORD')\n", (2834, 2848), False, 'import os\n'), ((2852, 2903), 'getpass.getpass', 'getpass.getpass', (['"""(create_prescr.py) DB password: """'], {}), "('(create_prescr.py) DB password: ')\n", (2867, 2903), False, 'import getpass\n'), ((4280, 4329), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\',\', quotechar=\'"\')\n', (4290, 4329), False, 'import csv\n'), ((9673, 9703), 'os.path.basename', 'os.path.basename', (['csv_filename'], {}), '(csv_filename)\n', (9689, 9703), False, 'import os\n'), ((2454, 2465), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2462, 2465), False, 'import sys\n'), ((9487, 9519), 'calendar.monthrange', 'calendar.monthrange', (['year', 'month'], {}), '(year, month)\n', (9506, 9519), False, 'import calendar\n'), ((4843, 4874), 'base64.b64decode', 'base64.b64decode', (['data[1][1:-1]'], {}), '(data[1][1:-1])\n', (4859, 4874), False, 'import base64\n'), ((4939, 4964), 'base64.b64decode', 'base64.b64decode', (['data[2]'], {}), '(data[2])\n', (4955, 4964), False, 'import base64\n'), ((8898, 8987), 'sys.stdout.write', 'sys.stdout.write', (["('%d: %d, %d\\r' % (rown, last_ppatients_id, last_ppatient_rawdataid))"], {}), "('%d: %d, %d\\r' % (rown, last_ppatients_id,\n last_ppatient_rawdataid))\n", (8914, 8987), False, 'import sys\n'), ((5099, 5141), 'hashlib.sha1', 'hashlib.sha1', (['(encrypted_demog + key_bundle)'], {}), '(encrypted_demog + key_bundle)\n', (5111, 5141), False, 'import hashlib\n')] |
# __init__.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Public API for flowsa
For standard dataframe formats, see https://github.com/USEPA/flowsa/tree/master/format%20specs
"""
import os
import pprint
from esupy.processed_data_mgmt import load_preprocessed_output
from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, \
DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig
from flowsa.metadata import set_fb_meta
from flowsa.flowbyfunctions import collapse_fbs_sectors, filter_by_geoscale
from flowsa.validation import check_for_nonetypes_in_sector_col, check_for_negative_flowamounts
import flowsa.flowbyactivity
import flowsa.flowbysector
from flowsa.bibliography import generate_fbs_bibliography
def getFlowByActivity(datasource, year, flowclass=None, geographic_level=None,
download_if_missing=DEFAULT_DOWNLOAD_IF_MISSING):
"""
Retrieves stored data in the FlowByActivity format
:param datasource: str, the code of the datasource.
:param year: int, a year, e.g. 2012
:param flowclass: str, a 'Class' of the flow. Optional. E.g. 'Water'
:param geographic_level: str, a geographic level of the data.
Optional. E.g. 'national', 'state', 'county'.
:param download_if_missing: bool, if True will attempt to load from remote server
prior to generating if file not found locally
:return: a pandas DataFrame in FlowByActivity format
"""
from esupy.processed_data_mgmt import download_from_remote
# Set fba metadata
name = flowsa.flowbyactivity.set_fba_name(datasource, year)
fba_meta = set_fb_meta(name, "FlowByActivity")
# Try to load a local version of fba; generate and load if missing
fba = load_preprocessed_output(fba_meta, paths)
# Remote download
if fba is None and download_if_missing:
log.info('%s %s not found in %s, downloading from remote source',
datasource, str(year), fbaoutputpath)
download_from_remote(fba_meta,paths)
fba = load_preprocessed_output(fba_meta,paths)
if fba is None:
log.info('%s %s not found in %s, running functions to generate FBA',
datasource, str(year), fbaoutputpath)
# Generate the fba
flowsa.flowbyactivity.main(year=year, source=datasource)
# Now load the fba
fba = load_preprocessed_output(fba_meta, paths)
if fba is None:
log.error('getFlowByActivity failed, FBA not found')
else:
log.info('Loaded %s %s from %s',
datasource, str(year), fbaoutputpath)
else:
log.info('Loaded %s %s from %s', datasource, str(year), fbaoutputpath)
# Address optional parameters
if flowclass is not None:
fba = fba[fba['Class'] == flowclass]
# if geographic level specified, only load rows in geo level
if geographic_level is not None:
fba = filter_by_geoscale(fba, geographic_level)
return fba
def getFlowBySector(methodname, download_if_missing=DEFAULT_DOWNLOAD_IF_MISSING):
"""
Loads stored FlowBySector output or generates it if it doesn't exist, then loads
:param methodname: string, Name of an available method for the given class
:param download_if_missing: bool, if True will attempt to load from remote server
prior to generating if file not found locally
:return: dataframe in flow by sector format
"""
from esupy.processed_data_mgmt import download_from_remote
fbs_meta = set_fb_meta(methodname, "FlowBySector")
fbs = load_preprocessed_output(fbs_meta, paths)
# Remote download
if fbs is None and download_if_missing:
log.info('%s not found in %s, downloading from remote source',
methodname, fbsoutputpath)
# download and load the FBS parquet
subdirectory_dict = {'.log': 'Log'}
download_from_remote(fbs_meta, paths, subdirectory_dict=subdirectory_dict)
fbs = load_preprocessed_output(fbs_meta, paths)
# If remote download not specified and no FBS, generate the FBS
if fbs is None:
log.info('%s not found in %s, running functions to generate FBS', methodname, fbsoutputpath)
# Generate the fba
flowsa.flowbysector.main(method=methodname)
# Now load the fba
fbs = load_preprocessed_output(fbs_meta, paths)
if fbs is None:
log.error('getFlowBySector failed, FBS not found')
else:
log.info('Loaded %s from %s', methodname, fbsoutputpath)
else:
log.info('Loaded %s from %s', methodname, fbsoutputpath)
return fbs
def collapse_FlowBySector(methodname):
"""
Returns fbs with one sector column in place of two
:param methodname: string, Name of an available method for the given class
:return: dataframe in flow by sector format
"""
fbs = flowsa.getFlowBySector(methodname)
fbs_collapsed = collapse_fbs_sectors(fbs)
# check data for NoneType in sector column
fbs_collapsed = check_for_nonetypes_in_sector_col(fbs_collapsed)
# check data for negative FlowAmount values
fbs_collapsed = check_for_negative_flowamounts(fbs_collapsed)
return fbs_collapsed
def writeFlowBySectorBibliography(methodname):
"""
Generate bibliography for FlowBySectorMethod in local directory
:param methodname: string, FBS methodname for which to create .bib file
:return: .bib file save to local directory
"""
# Generate a single .bib file for a list of Flow-By-Sector method names
# and save file to local directory
log.info('Write bibliography to %s%s.bib', biboutputpath, methodname)
generate_fbs_bibliography(methodname)
def seeAvailableFlowByModels(flowbytype):
"""
Return available Flow-By-Activity or Flow-By-Sector models
:param flowbytype: 'FBA' or 'FBS'
:return: console printout of available models
"""
# return fba directory path dependent on FBA or FBS
if flowbytype == 'FBA':
fb_directory = sourceconfigpath
else:
fb_directory = flowbysectormethodpath
# empty dictionary
fb_dict = {}
# empty df
fb_df = []
# run through all files and append
for file in os.listdir(fb_directory):
if file.endswith(".yaml"):
# drop file extension
f = os.path.splitext(file)[0]
if flowbytype == 'FBA':
s = load_sourceconfig(f)
years = s['years']
fb_dict.update({f: years})
# else if FBS
else:
fb_df.append(f)
# determine format of data to print
if flowbytype == 'FBA':
data_print = fb_dict
else:
data_print = fb_df
# print data in human-readable format
pprint.pprint(data_print, width=79, compact=True)
| [
"flowsa.bibliography.generate_fbs_bibliography",
"os.listdir",
"flowsa.validation.check_for_negative_flowamounts",
"flowsa.flowbyfunctions.filter_by_geoscale",
"flowsa.common.log.info",
"flowsa.flowbyfunctions.collapse_fbs_sectors",
"os.path.splitext",
"flowsa.metadata.set_fb_meta",
"esupy.processed... | [((1675, 1710), 'flowsa.metadata.set_fb_meta', 'set_fb_meta', (['name', '"""FlowByActivity"""'], {}), "(name, 'FlowByActivity')\n", (1686, 1710), False, 'from flowsa.metadata import set_fb_meta\n'), ((1793, 1834), 'esupy.processed_data_mgmt.load_preprocessed_output', 'load_preprocessed_output', (['fba_meta', 'paths'], {}), '(fba_meta, paths)\n', (1817, 1834), False, 'from esupy.processed_data_mgmt import load_preprocessed_output\n'), ((3568, 3607), 'flowsa.metadata.set_fb_meta', 'set_fb_meta', (['methodname', '"""FlowBySector"""'], {}), "(methodname, 'FlowBySector')\n", (3579, 3607), False, 'from flowsa.metadata import set_fb_meta\n'), ((3618, 3659), 'esupy.processed_data_mgmt.load_preprocessed_output', 'load_preprocessed_output', (['fbs_meta', 'paths'], {}), '(fbs_meta, paths)\n', (3642, 3659), False, 'from esupy.processed_data_mgmt import load_preprocessed_output\n'), ((4985, 5010), 'flowsa.flowbyfunctions.collapse_fbs_sectors', 'collapse_fbs_sectors', (['fbs'], {}), '(fbs)\n', (5005, 5010), False, 'from flowsa.flowbyfunctions import collapse_fbs_sectors, filter_by_geoscale\n'), ((5079, 5127), 'flowsa.validation.check_for_nonetypes_in_sector_col', 'check_for_nonetypes_in_sector_col', (['fbs_collapsed'], {}), '(fbs_collapsed)\n', (5112, 5127), False, 'from flowsa.validation import check_for_nonetypes_in_sector_col, check_for_negative_flowamounts\n'), ((5196, 5241), 'flowsa.validation.check_for_negative_flowamounts', 'check_for_negative_flowamounts', (['fbs_collapsed'], {}), '(fbs_collapsed)\n', (5226, 5241), False, 'from flowsa.validation import check_for_nonetypes_in_sector_col, check_for_negative_flowamounts\n'), ((5643, 5712), 'flowsa.common.log.info', 'log.info', (['"""Write bibliography to %s%s.bib"""', 'biboutputpath', 'methodname'], {}), "('Write bibliography to %s%s.bib', biboutputpath, methodname)\n", (5651, 5712), False, 'from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig\n'), ((5717, 5754), 'flowsa.bibliography.generate_fbs_bibliography', 'generate_fbs_bibliography', (['methodname'], {}), '(methodname)\n', (5742, 5754), False, 'from flowsa.bibliography import generate_fbs_bibliography\n'), ((6273, 6297), 'os.listdir', 'os.listdir', (['fb_directory'], {}), '(fb_directory)\n', (6283, 6297), False, 'import os\n'), ((6823, 6872), 'pprint.pprint', 'pprint.pprint', (['data_print'], {'width': '(79)', 'compact': '(True)'}), '(data_print, width=79, compact=True)\n', (6836, 6872), False, 'import pprint\n'), ((2038, 2075), 'esupy.processed_data_mgmt.download_from_remote', 'download_from_remote', (['fba_meta', 'paths'], {}), '(fba_meta, paths)\n', (2058, 2075), False, 'from esupy.processed_data_mgmt import download_from_remote\n'), ((2089, 2130), 'esupy.processed_data_mgmt.load_preprocessed_output', 'load_preprocessed_output', (['fba_meta', 'paths'], {}), '(fba_meta, paths)\n', (2113, 2130), False, 'from esupy.processed_data_mgmt import load_preprocessed_output\n'), ((2416, 2457), 'esupy.processed_data_mgmt.load_preprocessed_output', 'load_preprocessed_output', (['fba_meta', 'paths'], {}), '(fba_meta, paths)\n', (2440, 2457), False, 'from esupy.processed_data_mgmt import load_preprocessed_output\n'), ((2980, 3021), 'flowsa.flowbyfunctions.filter_by_geoscale', 'filter_by_geoscale', (['fba', 'geographic_level'], {}), '(fba, geographic_level)\n', (2998, 3021), False, 'from flowsa.flowbyfunctions import collapse_fbs_sectors, filter_by_geoscale\n'), ((3735, 3828), 'flowsa.common.log.info', 'log.info', (['"""%s not found in %s, downloading from remote source"""', 'methodname', 'fbsoutputpath'], {}), "('%s not found in %s, downloading from remote source', methodname,\n fbsoutputpath)\n", (3743, 3828), False, 'from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig\n'), ((3938, 4012), 'esupy.processed_data_mgmt.download_from_remote', 'download_from_remote', (['fbs_meta', 'paths'], {'subdirectory_dict': 'subdirectory_dict'}), '(fbs_meta, paths, subdirectory_dict=subdirectory_dict)\n', (3958, 4012), False, 'from esupy.processed_data_mgmt import download_from_remote\n'), ((4027, 4068), 'esupy.processed_data_mgmt.load_preprocessed_output', 'load_preprocessed_output', (['fbs_meta', 'paths'], {}), '(fbs_meta, paths)\n', (4051, 4068), False, 'from esupy.processed_data_mgmt import load_preprocessed_output\n'), ((4166, 4262), 'flowsa.common.log.info', 'log.info', (['"""%s not found in %s, running functions to generate FBS"""', 'methodname', 'fbsoutputpath'], {}), "('%s not found in %s, running functions to generate FBS',\n methodname, fbsoutputpath)\n", (4174, 4262), False, 'from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig\n'), ((4379, 4420), 'esupy.processed_data_mgmt.load_preprocessed_output', 'load_preprocessed_output', (['fbs_meta', 'paths'], {}), '(fbs_meta, paths)\n', (4403, 4420), False, 'from esupy.processed_data_mgmt import load_preprocessed_output\n'), ((4609, 4665), 'flowsa.common.log.info', 'log.info', (['"""Loaded %s from %s"""', 'methodname', 'fbsoutputpath'], {}), "('Loaded %s from %s', methodname, fbsoutputpath)\n", (4617, 4665), False, 'from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig\n'), ((2494, 2546), 'flowsa.common.log.error', 'log.error', (['"""getFlowByActivity failed, FBA not found"""'], {}), "('getFlowByActivity failed, FBA not found')\n", (2503, 2546), False, 'from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig\n'), ((4457, 4507), 'flowsa.common.log.error', 'log.error', (['"""getFlowBySector failed, FBS not found"""'], {}), "('getFlowBySector failed, FBS not found')\n", (4466, 4507), False, 'from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig\n'), ((4534, 4590), 'flowsa.common.log.info', 'log.info', (['"""Loaded %s from %s"""', 'methodname', 'fbsoutputpath'], {}), "('Loaded %s from %s', methodname, fbsoutputpath)\n", (4542, 4590), False, 'from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig\n'), ((6384, 6406), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (6400, 6406), False, 'import os\n'), ((6466, 6486), 'flowsa.common.load_sourceconfig', 'load_sourceconfig', (['f'], {}), '(f)\n', (6483, 6486), False, 'from flowsa.common import paths, biboutputpath, fbaoutputpath, fbsoutputpath, DEFAULT_DOWNLOAD_IF_MISSING, log, sourceconfigpath, flowbysectormethodpath, load_sourceconfig\n')] |
import sys
def f():
try:
return f()
except RuntimeError:
return sys.exc_info()
def do_check():
f()
assert sys.exc_info() == (None, None, None)
def recurse(n):
if n > 0:
return recurse(n-1)
else:
return do_check()
def test_recursion():
"""
Test that sys.exc_info() is cleared after RecursionError was raised.
The issue only appeared intermittently, depending on the contents of the
call stack, hence the need for the recurse() helper to trigger it reliably.
"""
for i in range(50):
recurse(i)
| [
"sys.exc_info"
] | [((140, 154), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (152, 154), False, 'import sys\n'), ((89, 103), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (101, 103), False, 'import sys\n')] |
from collections import defaultdict
from base.threedi_base.constants import CULVERT_SNAPPED_TABLE_NAME
from base.threedi_base import position
from base.threedi_base.logger import Logger
from base.threedi_base.apps import ThreediBaseConfig as conf
from base.threedi_base.exceptions import InsertError
logger = Logger.get(__name__, conf.LOG_LEVEL)
def get_chunk(l, n=2):
n = max(1, n)
return (l[i:i+n] for i in range(0, len(l), n))
class CulvertChannelLines:
def __init__(self, db, buffer_size, culvert_input_table,
channel_input_table,
culvert_output_table_name,
channel_output_table_name):
"""
:param db: ThreediDatabase instance
:param buffer_size: size to buffer the start and
endpoint of the culvert with
:param culvert_input_table: name of the culvert table
:param channel_input_table: name of the channel table
:param culvert_output_table_name: name of the culvert output table
:param channel_output_table_name: name of the channel output table
"""
self.db = db
self.buffer_size = buffer_size
self.channel_input_table_name = channel_input_table
self.culvert_input_table_name = culvert_input_table
self.corrected_valids_table_name = 'culvert_valid_corrected'
self.buffer_table_name = 'culvert_buff_pnts'
self.valids_table_name = 'culvert_valid'
self.misfits_table_name = 'culvert_misfits'
self.culvert_snapped_table_name = CULVERT_SNAPPED_TABLE_NAME
self.connection_nodes_tables = 'tmp_connection_nodes_structures'
self.channel_output_table_name = channel_output_table_name
self.culvert_channel_mapping = defaultdict(list)
self.channel_culvert_mapping = defaultdict(list)
self.channels_fully_replaced = set()
def analyze_dataset(self):
"""
separate the good from the bad, Create intermediate tables
``self.buffer_table_name`` and ``self.valids_table_name``
"""
self._create_culvert_buffers()
self._identify_misfits()
self._identify_valids()
self._create_channel_culvert_mappings()
def remove_tmp_tables(self):
"""remove intermediate tables"""
for table_name in (self.buffer_table_name,
self.valids_table_name):
self.db.drop_item(name_item=table_name, type_item='TABLE')
def _create_culvert_buffers(self):
"""
creates a buffer around the start- and endpoints of the
culvert line geometry.
Buffer geometry of the startpoint
will by accessible as column name ``start_b``
Buffer geometry of the endpoint
will by accessible as column name ``end_b``
"""
statement = """
DROP TABLE IF EXISTS {schema}.{table_name};
CREATE TABLE {schema}.{table_name} AS
SELECT
*,
ST_Buffer(
ST_Startpoint(a.geom),
{buffer_size}
) AS start_b,
ST_Buffer(
ST_Endpoint(a.geom),
{buffer_size}
) AS end_b
FROM
{schema}.{input_table_name} AS a
;
""".format(schema=self.db.schema,
input_table_name=self.culvert_input_table_name,
table_name=self.buffer_table_name,
buffer_size=self.buffer_size)
self.db.free_form(statement, fetch=False)
self.db.create_index(
self.buffer_table_name, 'idx_startb', 'start_b', gist=True)
self.db.create_index(
self.buffer_table_name, 'idx_endb', 'end_b', gist=True)
def _identify_misfits(self):
"""
identify the culverts the are (partly) too
far away from the channels
"""
self.db.drop_item(self.misfits_table_name, 'TABLE')
misfits_statement = """
-- get misfits
DROP SEQUENCE IF EXISTS misfits_id_seq;
CREATE SEQUENCE misfits_id_seq;
CREATE TABLE {schema}.{misfits_table} AS
SELECT
nextval('misfits_id_seq') AS id,
b.id as channel_id,
a.id as culvert_id,
a.geom as geom
FROM
{schema}.{buffer_table} AS a
LEFT JOIN
{schema}.{channel_input_table} AS b
ON
ST_Intersects(b.geom, a.start_b)
OR
ST_Intersects(b.geom, a.end_b)
WHERE
b.id IS NULL
ORDER by a.id
;""".format(schema=self.db.schema, buffer_table=self.buffer_table_name,
misfits_table=self.misfits_table_name,
channel_input_table=self.channel_input_table_name)
self.db.free_form(misfits_statement, fetch=False)
def get_count_misfits(self):
"""count the culverts that lay too far from a channel"""
return self.db.get_count(table_name=self.misfits_table_name)
def _identify_valids(self):
"""
create a table of the culverts that can be
linked to a channel
"""
valids_statement = """
DROP TABLE IF EXISTS {schema}.{valids_table_name};
CREATE TABLE {schema}.{valids_table_name} AS
SELECT
b.id as channel_id,
a.id as culvert_id,
a.geom as geom,
b.geom as geom_ch,
-- generalize start and endpoints
ST_LineLocatePoint(b.geom, ST_Startpoint(a.geom)) as pal_s_org,
ST_LineLocatePoint(b.geom, ST_Endpoint(a.geom)) as pal_e_org,
ST_Distance(b.geom, ST_Startpoint(a.geom)) as dist_start,
ST_Distance(b.geom, ST_Endpoint(a.geom)) as dist_end,
ST_Length(a.geom) AS l_len,
ST_Length(b.geom) AS ch_len
FROM
{schema}.{buffer_table} AS a
LEFT JOIN
{schema}.{channel_input_table_name} AS b
ON
ST_Intersects(b.geom, a.start_b)
OR
ST_Intersects(b.geom, a.end_b)
WHERE
b.id IS NOT NULL
ORDER by a.id
;
""".format(schema=self.db.schema,
valids_table_name=self.valids_table_name,
buffer_table=self.buffer_table_name,
channel_input_table_name=self.channel_input_table_name)
self.db.free_form(valids_statement, fetch=False)
def get_corrected_valids(self):
"""
select all entries from the database table ``culvert_valid_corrected``
:returns a dictionary with all entries from
database table ``culvert_valid_corrected``
"""
entries = self.db.free_form(
"""
SELECT
a.*,
b.pal_e AS pal_e,
b.pal_s AS pal_s,
ST_AsText(a.geom) AS geom,
ST_AsText(a.geom_ch) AS geom_ch
FROM
{schema}.{valids_table_name} AS a
LEFT JOIN
{schema}.{corrected_valids_table_name} AS b
ON
a.culvert_id=b.culvert_id
AND
a.channel_id=b.channel_id
;""".format(
schema=self.db.schema,
valids_table_name=self.valids_table_name,
corrected_valids_table_name=self.corrected_valids_table_name),
fetch=True, fetch_as='dict')
return entries
def get_valids(self):
"""
select all entries from the database table ``culvert_valid``
:returns a dictionary with all entries from
database table ``culvert_valid``
"""
entries = self.db.free_form(
"""
SELECT
*,
ST_AsText(geom) AS geom,
ST_AsText(geom_ch) AS geom_ch
FROM
{schema}.{valids_table_name}
;""".format(schema=self.db.schema,
valids_table_name=self.valids_table_name),
fetch=True, fetch_as='dict')
return entries
def _create_channel_culvert_mappings(self):
"""
fills the ``culvert_channel_mapping`` and ``channel_culvert_mapping``
dictionaries.
Example item ``self.culvert_channel_mapping``::
(<culvert_id>,
[{'channel_id': 218278,
'culvert_id': 18775,
'geom': 'LINESTRING(130267.399095133 503416.05146975, ...)',
'geom_ch': 'LINESTRING(130442.905 503308.345,130427.1, ...)',
# position along line of end point culvert (generalized)
'pal_e': 0.646281930928462,
# position along line of end point culvert (original)
'pal_e_org': 0.646281930928462,
# position along line of start point culvert (generalized)
'pal_s': 0.614951093082171,
# position along line of start point culvert (original)
'pal_s_org': 0.614951093082171
}])
"""
valids = self.get_valids()
corrected_entries = self._correct_entries(valids)
self.create_corrected_valids_table(
corrected_entries
)
corrected_valids = self.get_corrected_valids()
for c_entry in corrected_valids:
self.culvert_channel_mapping[
c_entry['culvert_id']].append(c_entry)
self.channel_culvert_mapping[
c_entry['channel_id']].append(c_entry)
def _correct_entries(self, valids):
"""
corrects the entries from the culvert_valid table by
* a threshold. That is, the position of the culvert
start- and endpoints along the channel line will
either be snapped to the start- or endpoint of the
channel when they are beyond the given threshold
* the measured distance of the start- and endpoint to
the given channel. That is, if the distance of the
start- and endpoints to the channel in question is
bigger than 2 * the buffer size for the start- and
endpoints, the point along line attribute will be reset
:param valids: list of entries from the ``culvert_valid`` table
:returns a list of corrected entries
"""
_corrected = defaultdict(list)
for entry in valids:
entry_cpos = position.correct_positions_by_threshold(entry)
entry_cd = position.correct_positions_by_distance(entry_cpos)
_corrected[entry['culvert_id']].append(entry_cd)
corrected_entries = []
for culvert_id, entries in _corrected.items():
if len(entries) > 1:
corrected_crossings = position.correct_crossings(
culvert_id, entries
)
corrected_entries.extend(corrected_crossings)
else:
entry = entries[0]
corrected_entries.append(
(entry['channel_id'], entry['culvert_id'],
entry['pal_e'], entry['pal_s'])
)
return corrected_entries
def create_corrected_valids_table(self, corrected_entries):
field_names = 'channel_id, culvert_id, pal_e, pal_s'
self.db.create_table(
self.corrected_valids_table_name, field_names.split(','),
['bigint', 'bigint', 'double precision', 'double precision']
)
self.db.commit_values(
self.corrected_valids_table_name, field_names, corrected_entries
)
self.db.create_index(
table_name=self.valids_table_name,
index_name='idx_valids_culvert_id', column='culvert_id'
)
self.db.create_index(
table_name=self.corrected_valids_table_name,
index_name='idx_corrected_valids_culvert_id', column='culvert_id'
)
def create_tmp_culverts(self):
# create new culverts
self.db.create_table(
self.culvert_snapped_table_name,
["culvert_id", "geom"],
["bigint", "geometry"]
)
for culvert_id, channel_map in \
self.culvert_channel_mapping.items():
self.merge_culvert_subparts(culvert_id, channel_map)
def clip_channels_by_culverts(self):
# clip channels by culverts
self.db.create_table(
table_name=self.channel_output_table_name,
field_names=["channel_id", "part_id", "geom"],
field_types=["bigint", "smallint", "geometry"]
)
channels_to_be_removed = []
for channel_id, items in self.channel_culvert_mapping.items():
ordered_positions = position.get_ordered_positions(items)
# culvert completely on channel line. Mark channel
# for deletion
if position.fully_covered(ordered_positions):
channels_to_be_removed.append(channel_id)
continue
# check for false positives
if position.must_be_skipped(ordered_positions):
continue
cleaned_ordered_positions = position.remove_duplicate_positions(
ordered_positions
)
positions = position.add_start_end_position(
cleaned_ordered_positions
)
flipped_positions = position.flip_start_end_position(positions)
self.create_channel_sub_lines(channel_id, flipped_positions)
self.add_channels_without_culvert()
def create_channel_sub_lines(self, channel_id, positions):
"""
fills the channel_output_table with columns
channel_id,
part_id,
geometry (line)
:param channel_id: id of the channel object
:param positions: all the positions that make up the
channel line geometry
"""
cnt = 0
try:
for a, b in get_chunk(positions):
statement_line_substring = """
ST_LineSubstring(
a.geom_ch, {}, {}
)
""".format(a, b)
insert_statement = """
INSERT INTO
{schema}.{output_table_name}(channel_id, part_id, geom)
SELECT DISTINCT ON(a.channel_id)
a.channel_id,
{cnt},
{st_line_substring}
FROM
{schema}.{valids_table_name} AS a
WHERE
a.channel_id={channel_id}
;
""".format(
schema=self.db.schema,
valids_table_name=self.valids_table_name,
output_table_name=self.channel_output_table_name,
channel_id=channel_id, cnt=cnt,
st_line_substring=statement_line_substring
)
self.db.free_form(insert_statement, fetch=False)
cnt += 1
except ValueError:
msg = 'Failed to create line for channel {}. ' \
'Has the following positions {}'.\
format(channel_id, positions)
logger.exception(msg)
raise InsertError(msg)
def add_channels_without_culvert(self):
statement = """
INSERT INTO {schema}.{channel_output_table}(channel_id, part_id, geom)
SELECT
a.id AS channel_id
, 0 AS part_id -- has only a single part
, a.geom AS geom
FROM
{schema}.{original_channels} AS a
LEFT JOIN
{schema}.{valids_table_name} AS b
ON
a.id=b.channel_id
WHERE
b.channel_id IS NULL
;
""".format(
schema=self.db.schema,
original_channels=self.channel_input_table_name,
channel_output_table=self.channel_output_table_name,
valids_table_name=self.valids_table_name,
)
self.db.free_form(sql_statement=statement, fetch=False)
def filter_channels(self, entries):
"""
filter channels that do not share any space with culvert
or are fully covered by culverts. Filters the current
selection against a set of channels that are already filled or
replaced by culvert geometries to avoid duplicates.
"""
current_selection = set()
fully_covered_by = set()
for entry in entries:
positions = [entry['pal_e'], entry['pal_s']]
positions.sort()
if position.must_be_skipped(positions):
continue
if position.fully_covered(positions):
fully_covered_by.add(entry['channel_id'])
current_selection.add(entry['channel_id'])
filtered_channel_ids = current_selection.difference(
self.channels_fully_replaced
)
self.channels_fully_replaced.update(fully_covered_by)
return filtered_channel_ids
def merge_culvert_subparts(self, culvert_id, entries):
"""
performs a ST_LineMerge on all culvert sub-linestrings that have
the same channel_id
"""
channel_ids = self.filter_channels(entries)
if not channel_ids:
return
if all([channel_id in self.channels_fully_replaced
for channel_id in channel_ids]):
for channel_id in channel_ids:
self.insert_into_culvert_output_table(
[channel_id], culvert_id
)
else:
self.insert_into_culvert_output_table(channel_ids, culvert_id)
def insert_into_culvert_output_table(self, channel_ids, culvert_id):
"""
"""
ids_str = ','.join([str(x) for x in channel_ids])
insert_statement = """
INSERT INTO
{schema}.{culvert_output_table_name} (culvert_id, geom)
SELECT
DISTINCT ON(culvert_id) subq.culvert_id AS culvert_id,
ST_LineMerge(
ST_Collect(
subq.culvert_n
)
) AS geom
FROM (
SELECT
DISTINCT ON(a.channel_id)
a.*,
CASE WHEN (
a.pal_s > a.pal_e)
THEN
ST_LineSubstring(b.geom_ch, a.pal_e, a.pal_s)
ELSE
ST_LineSubstring(b.geom_ch, a.pal_s, a.pal_e)
END AS culvert_n
FROM
{schema}.{corrected_valids_table_name} AS a
LEFT JOIN
{schema}.{valids_table_name} AS b
ON
a.culvert_id=b.culvert_id
AND
a.channel_id=b.channel_id
WHERE
a.channel_id IN ({channel_ids})
AND
a.culvert_id={culvert_id}
) AS subq
GROUP BY
subq.culvert_id
;
""".format(
channel_ids=ids_str,
culvert_output_table_name=self.culvert_snapped_table_name,
schema=self.db.schema,
valids_table_name=self.valids_table_name,
corrected_valids_table_name=self.corrected_valids_table_name,
culvert_id=culvert_id,
)
self.db.free_form(insert_statement, fetch=False)
@staticmethod
def get_channel_ids(items, as_string=False):
"""
:param items: list of dicts. dict must contain key channel_id
:param as_string: if True the ids as will be returned as strings,
if False (default) as integers
:returns a list of channel_ids
"""
return [str(item['channel_id'])
if as_string
else item['channel_id']
for item in items]
def get_start_or_end_point(self, geom_as_text, position):
if position == 'start':
st = 'ST_Startpoint'
elif position == 'end':
st = 'ST_Endpoint'
statement = """
SELECT
ST_AsText(
{st}(
ST_GeomFromText(
'{geom}'
)
)
)
;
""".format(st=st, geom=geom_as_text)
return self.db.free_form(statement, fetch=True)[0][0]
def move_multi_geoms_to_misfits(self):
"""
"""
insert_to_misfits_statement = """
INSERT INTO
{schema}.{misfits_table} (id, culvert_id, geom)
SELECT
nextval('misfits_id_seq') AS id,
a.id AS culvert_id,
a.geom as geom
FROM
{schema}.{culverts_snapped_table} AS a
WHERE
ST_NumGeometries(geom) > 1
;""".format(
schema=self.db.schema, misfits_table=self.misfits_table_name,
culverts_snapped_table=self.culvert_snapped_table_name
)
self.db.free_form(insert_to_misfits_statement, fetch=False)
del_from_snapped_table_statement = """
DELETE FROM
{schema}.{culverts_snapped_table}
WHERE
ST_NumGeometries(geom) > 1
;
""".format(
schema=self.db.schema,
culverts_snapped_table=self.culvert_snapped_table_name
)
self.db.free_form(del_from_snapped_table_statement, fetch=False)
def add_missing_culverts_to_misfits(self):
add_missing_statemet = """
INSERT INTO
{schema}.{misfits_table} (id, culvert_id, geom)
SELECT
nextval('misfits_id_seq') AS id,
a.id AS culvert_id,
a.geom AS geom
FROM
{schema}.{culvert_input_table_name} AS a
LEFT JOIN
{schema}.{misfits_table} AS b
ON
a.id=b.culvert_id
LEFT JOIN
{schema}.{culverts_snapped_table} AS c
ON
a.id=c.culvert_id
WHERE
c.culvert_id IS NULL
AND
b.culvert_id IS NULL
;
""".format(
schema=self.db.schema,
culverts_snapped_table=self.culvert_snapped_table_name,
culvert_input_table_name=self.culvert_input_table_name,
misfits_table=self.misfits_table_name
)
self.db.free_form(add_missing_statemet, fetch=False)
| [
"base.threedi_base.position.must_be_skipped",
"base.threedi_base.position.get_ordered_positions",
"base.threedi_base.position.flip_start_end_position",
"base.threedi_base.position.correct_positions_by_threshold",
"base.threedi_base.logger.Logger.get",
"base.threedi_base.position.correct_crossings",
"bas... | [((311, 347), 'base.threedi_base.logger.Logger.get', 'Logger.get', (['__name__', 'conf.LOG_LEVEL'], {}), '(__name__, conf.LOG_LEVEL)\n', (321, 347), False, 'from base.threedi_base.logger import Logger\n'), ((1751, 1768), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1762, 1768), False, 'from collections import defaultdict\n'), ((1808, 1825), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1819, 1825), False, 'from collections import defaultdict\n'), ((10255, 10272), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10266, 10272), False, 'from collections import defaultdict\n'), ((10327, 10373), 'base.threedi_base.position.correct_positions_by_threshold', 'position.correct_positions_by_threshold', (['entry'], {}), '(entry)\n', (10366, 10373), False, 'from base.threedi_base import position\n'), ((10397, 10447), 'base.threedi_base.position.correct_positions_by_distance', 'position.correct_positions_by_distance', (['entry_cpos'], {}), '(entry_cpos)\n', (10435, 10447), False, 'from base.threedi_base import position\n'), ((12648, 12685), 'base.threedi_base.position.get_ordered_positions', 'position.get_ordered_positions', (['items'], {}), '(items)\n', (12678, 12685), False, 'from base.threedi_base import position\n'), ((12791, 12832), 'base.threedi_base.position.fully_covered', 'position.fully_covered', (['ordered_positions'], {}), '(ordered_positions)\n', (12813, 12832), False, 'from base.threedi_base import position\n'), ((12972, 13015), 'base.threedi_base.position.must_be_skipped', 'position.must_be_skipped', (['ordered_positions'], {}), '(ordered_positions)\n', (12996, 13015), False, 'from base.threedi_base import position\n'), ((13083, 13137), 'base.threedi_base.position.remove_duplicate_positions', 'position.remove_duplicate_positions', (['ordered_positions'], {}), '(ordered_positions)\n', (13118, 13137), False, 'from base.threedi_base import position\n'), ((13192, 13250), 'base.threedi_base.position.add_start_end_position', 'position.add_start_end_position', (['cleaned_ordered_positions'], {}), '(cleaned_ordered_positions)\n', (13223, 13250), False, 'from base.threedi_base import position\n'), ((13313, 13356), 'base.threedi_base.position.flip_start_end_position', 'position.flip_start_end_position', (['positions'], {}), '(positions)\n', (13345, 13356), False, 'from base.threedi_base import position\n'), ((16513, 16548), 'base.threedi_base.position.must_be_skipped', 'position.must_be_skipped', (['positions'], {}), '(positions)\n', (16537, 16548), False, 'from base.threedi_base import position\n'), ((16590, 16623), 'base.threedi_base.position.fully_covered', 'position.fully_covered', (['positions'], {}), '(positions)\n', (16612, 16623), False, 'from base.threedi_base import position\n'), ((10667, 10714), 'base.threedi_base.position.correct_crossings', 'position.correct_crossings', (['culvert_id', 'entries'], {}), '(culvert_id, entries)\n', (10693, 10714), False, 'from base.threedi_base import position\n'), ((15181, 15197), 'base.threedi_base.exceptions.InsertError', 'InsertError', (['msg'], {}), '(msg)\n', (15192, 15197), False, 'from base.threedi_base.exceptions import InsertError\n')] |
from __future__ import unicode_literals
import copy
import importlib
import threading
from django.db import models, router
from django.db.models import Q
from django.db.models.fields.proxy import OrderWrt
from django.conf import settings
from django.contrib import admin
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.encoding import smart_text
from django.utils.timezone import now
from django.utils.translation import string_concat
try:
from django.apps import apps
except ImportError: # Django < 1.7
from django.db.models import get_app
try:
from django.db.models.fields.related import ForwardManyToOneDescriptor as ManyToOneDescriptor
except ImportError: # Django < 1.9
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor as ManyToOneDescriptor
try:
from south.modelsinspector import add_introspection_rules
except ImportError: # south not present
pass
else: # south configuration for CustomForeignKeyField
add_introspection_rules(
[], ["^simple_history.models.CustomForeignKeyField"])
from . import exceptions
from simple_history import register
from .manager import HistoryDescriptor
ALL_M2M_FIELDS = object()
registered_models = {}
def not_registered(model):
if model._meta.proxy:
return '%s%s' % (model._meta.db_table, model.__name__) not in registered_models
return model._meta.db_table not in registered_models
class HistoricalRecords(object):
thread = threading.local()
def __init__(self, verbose_name=None, bases=(models.Model,),
user_related_name='+', table_name=None, inherit=False, m2m_fields=None):
self.user_set_verbose_name = verbose_name
self.user_related_name = user_related_name
self.table_name = table_name
self.inherit = inherit
self.m2m_fields = m2m_fields
try:
if isinstance(bases, six.string_types):
raise TypeError
self.bases = tuple(bases)
except TypeError:
raise TypeError("The `bases` option must be a list or a tuple.")
def contribute_to_class(self, cls, name):
self.manager_name = name
self.module = cls.__module__
self.cls = cls
models.signals.class_prepared.connect(self.finalize, weak=False)
self.add_extra_methods(cls)
self.setup_m2m_history(cls)
def add_extra_methods(self, cls):
def save_without_historical_record(self, *args, **kwargs):
"""
Save model without saving a historical record
Make sure you know what you're doing before you use this method.
"""
self.skip_history_when_saving = True
try:
ret = self.save(*args, **kwargs)
finally:
del self.skip_history_when_saving
return ret
setattr(cls, 'save_without_historical_record',
save_without_historical_record)
def setup_m2m_history(self, cls):
m2m_history_fields = self.m2m_fields
if m2m_history_fields is ALL_M2M_FIELDS:
for field in cls._meta.many_to_many:
field = getattr(cls, field.name).field
assert isinstance(field, models.fields.related.ManyToManyField), \
('%s must be a ManyToManyField' % field.name)
if not sum([
isinstance(item, HistoricalRecords) for item in field.remote_field.through.__dict__.values()
]):
through_model = field.remote_field.through
if through_model._meta.auto_created and not_registered(through_model):
through_model.history = HistoricalRecords()
register(through_model)
elif m2m_history_fields:
assert (isinstance(m2m_history_fields, list) or isinstance(m2m_history_fields, tuple)), \
'm2m_history_fields must be a list or tuple'
for field_name in m2m_history_fields:
field = getattr(cls, field_name).field
assert isinstance(field, models.fields.related.ManyToManyField), \
('%s must be a ManyToManyField' % field_name)
if not sum([
isinstance(item, HistoricalRecords) for item in field.remote_field.through.__dict__.values()
]):
through_model = field.remote_field.through
if through_model._meta.auto_created and not_registered(through_model):
through_model.history = HistoricalRecords()
register(through_model)
def finalize(self, sender, **kwargs):
try:
hint_class = self.cls
except AttributeError: # called via `register`
pass
else:
if hint_class is not sender: # set in concrete
if not (self.inherit and issubclass(sender, hint_class)):
return # set in abstract
if hasattr(sender._meta, 'simple_history_manager_attribute'):
raise exceptions.MultipleRegistrationsError(
'{}.{} registered multiple times for history tracking.'.format(
sender._meta.app_label,
sender._meta.object_name,
)
)
if sender._meta.proxy:
original_class = [base_class for base_class in sender.__bases__ if base_class._meta.abstract is False][0]
# Parent model must be registered before the proxy model is
if not_registered(original_class):
# Ignore the `app` kwarg, since the proxy model may be in a different app than the original model
register_kwargs = {
'manager_name': self.manager_name,
'records_class': self.__class__,
'verbose_name': self.user_set_verbose_name,
'bases': self.bases,
'user_related_name': self.user_related_name,
'm2m_fields': self.m2m_fields,
}
register(original_class, **register_kwargs)
# Proxy models use their parent's history model
history_model = getattr(sender, self.manager_name).model
else:
history_model = self.create_history_model(sender)
module = importlib.import_module(self.module)
setattr(module, history_model.__name__, history_model)
# The HistoricalRecords object will be discarded,
# so the signal handlers can't use weak references.
models.signals.post_save.connect(self.post_save, sender=sender,
weak=False)
models.signals.pre_delete.connect(self.pre_delete, sender=sender,
weak=False)
models.signals.post_delete.connect(self.post_delete, sender=sender,
weak=False)
models.signals.m2m_changed.connect(self.m2m_changed, sender=sender, weak=False)
descriptor = HistoryDescriptor(history_model)
setattr(sender, self.manager_name, descriptor)
sender._meta.simple_history_manager_attribute = self.manager_name
def create_history_model(self, model):
"""
Creates a historical model to associate with the model provided.
"""
attrs = {'__module__': self.module}
app_module = '%s.models' % model._meta.app_label
if model.__module__ != self.module:
# registered under different app
attrs['__module__'] = self.module
elif app_module != self.module:
try:
# Abuse an internal API because the app registry is loading.
app = apps.app_configs[model._meta.app_label]
except NameError: # Django < 1.7
models_module = get_app(model._meta.app_label).__name__
else:
models_module = app.name
attrs['__module__'] = models_module
fields = self.copy_fields(model)
attrs.update(fields)
attrs.update(self.get_extra_fields(model, fields))
# type in python2 wants str as a first argument
attrs.update(Meta=type(str('Meta'), (), self.get_meta_options(model)))
if self.table_name is not None:
attrs['Meta'].db_table = self.table_name
name = 'Historical%s' % model._meta.object_name
registered_models[model._meta.db_table] = model
return python_2_unicode_compatible(
type(str(name), self.bases, attrs))
def copy_fields(self, model):
"""
Creates copies of the model's original fields, returning
a dictionary mapping field name to copied field object.
"""
fields = {}
for field in model._meta.fields:
field = copy.copy(field)
try:
field.remote_field = copy.copy(field.remote_field)
except AttributeError:
field.remote_field = copy.copy(field.remote_field)
if isinstance(field, OrderWrt):
# OrderWrt is a proxy field, switch to a plain IntegerField
field.__class__ = models.IntegerField
if isinstance(field, models.ForeignKey):
old_field = field
field_arguments = {'db_constraint': False}
if (getattr(old_field, 'one_to_one', False) or
isinstance(old_field, models.OneToOneField)):
FieldType = models.ForeignKey
else:
FieldType = type(old_field)
if getattr(old_field, 'to_fields', []):
field_arguments['to_field'] = old_field.to_fields[0]
if getattr(old_field, 'db_column', None):
field_arguments['db_column'] = old_field.db_column
field = FieldType(
old_field.remote_field.model,
related_name='+',
null=True,
blank=True,
primary_key=False,
db_index=True,
serialize=True,
unique=False,
on_delete=models.DO_NOTHING,
**field_arguments
)
field.name = old_field.name
else:
transform_field(field)
fields[field.name] = field
return fields
def get_extra_fields(self, model, fields):
"""Return dict of extra fields added to the historical record model"""
user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@models.permalink
def revert_url(self):
"""URL for this change in the default admin site."""
opts = model._meta
app_label, model_name = opts.app_label, opts.model_name
return ('%s:%s_%s_simple_history' %
(admin.site.name, app_label, model_name),
[getattr(self, opts.pk.attname), self.history_id])
def get_instance(self):
return model(**{
field.attname: getattr(self, field.attname)
for field in fields.values()
})
return {
'history_id': models.AutoField(primary_key=True),
'history_date': models.DateTimeField(),
'history_user': models.ForeignKey(
user_model, null=True, related_name=self.user_related_name,
on_delete=models.SET_NULL),
'history_type': models.CharField(max_length=1, choices=(
('+', 'Created'),
('~', 'Changed'),
('-', 'Deleted'),
)),
'history_object': HistoricalObjectDescriptor(model),
'instance': property(get_instance),
'instance_type': model,
'revert_url': revert_url,
'__str__': lambda self: '%s as of %s' % (self.history_object,
self.history_date)
}
def get_meta_options(self, model):
"""
Returns a dictionary of fields that will be added to
the Meta inner class of the historical record model.
"""
meta_fields = {
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
}
if self.user_set_verbose_name:
name = self.user_set_verbose_name
else:
name = string_concat('historical ',
smart_text(model._meta.verbose_name))
meta_fields['verbose_name'] = name
return meta_fields
def post_save(self, instance, created, **kwargs):
if not created and hasattr(instance, 'skip_history_when_saving'):
return
if not kwargs.get('raw', False):
self.create_historical_record(instance, created and '+' or '~')
def pre_delete(self, instance, **kwargs):
"""
Creates deletion records for the through model of m2m fields. Also creates change records for objects on the
other side of the m2m relationship.
"""
for m2m_field in instance._meta.many_to_many:
through_model = m2m_field.remote_field.through
if hasattr(through_model._meta, 'simple_history_manager_attribute'):
items = through_model.objects.filter(Q(**{m2m_field.m2m_column_name(): instance.pk}))
for item in items:
self.create_historical_record(item, '-')
for related in m2m_field.value_from_object(instance):
self.create_historical_record(related, '~')
def post_delete(self, instance, **kwargs):
self.create_historical_record(instance, '-')
def m2m_changed(self, action, instance, sender, **kwargs):
source_field_name, target_field_name = None, None
for field_name, field_value in sender.__dict__.items():
if isinstance(field_value, ManyToOneDescriptor):
if field_value.field.remote_field.model == kwargs['model']:
target_field_name = field_name
elif isinstance(instance, field_value.field.remote_field.model):
source_field_name = field_name
items = sender.objects.filter(**{source_field_name: instance})
if kwargs['pk_set']:
items = items.filter(**{target_field_name + '__id__in': kwargs['pk_set']})
for item in items:
if action == 'post_add':
if hasattr(item, 'skip_history_when_saving'):
continue
self.create_historical_record(item, '+')
elif action == 'pre_remove':
self.create_historical_record(item, '-')
elif action == 'pre_clear':
self.create_historical_record(item, '-')
if action == 'pre_clear':
setattr(instance, '__pre_clear_items', items)
elif action == 'post_add' and hasattr(instance, '__pre_clear_items'):
other_items = getattr(instance, '__pre_clear_items')
for item in other_items:
target = getattr(item, target_field_name)
if has_m2m_field(target, sender) and not [i for i in items if target == getattr(i, target_field_name)]:
self.create_historical_record(target, '~')
for item in items:
target = getattr(item, target_field_name)
if has_m2m_field(target, sender) and not [
i for i in other_items if target == getattr(i, target_field_name)
]:
self.create_historical_record(target, '~')
delattr(instance, '__pre_clear_items')
def create_historical_record(self, instance, history_type):
history_date = getattr(instance, '_history_date', now())
history_user = self.get_history_user(instance)
manager = getattr(instance, self.manager_name)
attrs = {}
for field in instance._meta.fields:
attrs[field.attname] = getattr(instance, field.attname)
manager.create(history_date=history_date, history_type=history_type,
history_user=history_user, **attrs)
def get_history_user(self, instance):
"""Get the modifying user from instance or middleware."""
try:
return instance._history_user
except AttributeError:
try:
if self.thread.request.user.is_authenticated:
return self.thread.request.user
return None
except AttributeError:
return None
def transform_field(field):
"""Customize field appropriately for use in historical model"""
field.name = field.attname
if isinstance(field, models.AutoField):
field.__class__ = convert_auto_field(field)
elif isinstance(field, models.FileField):
# Don't copy file, just path.
field.__class__ = models.TextField
# Historical instance shouldn't change create/update timestamps
field.auto_now = False
field.auto_now_add = False
if field.primary_key or field.unique:
# Unique fields can no longer be guaranteed unique,
# but they should still be indexed for faster lookups.
field.primary_key = False
field._unique = False
field.db_index = True
field.serialize = True
def has_m2m_field(instance, through):
for m2m_field in instance._meta.many_to_many:
if through is m2m_field.remote_field.through:
return True
return False
def convert_auto_field(field):
"""Convert AutoField to a non-incrementing type
The historical model gets its own AutoField, so any existing one
must be replaced with an IntegerField.
"""
connection = router.db_for_write(field.model)
if settings.DATABASES[connection]['ENGINE'] in ('django_mongodb_engine',):
# Check if AutoField is string for django-non-rel support
return models.TextField
return models.IntegerField
class HistoricalObjectDescriptor(object):
def __init__(self, model):
self.model = model
def __get__(self, instance, owner):
values = (getattr(instance, f.attname)
for f in self.model._meta.fields)
return self.model(*values)
| [
"simple_history.register",
"threading.local",
"importlib.import_module",
"django.db.models.signals.class_prepared.connect",
"django.db.models.signals.post_delete.connect",
"django.db.models.signals.m2m_changed.connect",
"django.db.models.signals.post_save.connect",
"django.db.models.ForeignKey",
"dj... | [((1037, 1114), 'south.modelsinspector.add_introspection_rules', 'add_introspection_rules', (['[]', "['^simple_history.models.CustomForeignKeyField']"], {}), "([], ['^simple_history.models.CustomForeignKeyField'])\n", (1060, 1114), False, 'from south.modelsinspector import add_introspection_rules\n'), ((1524, 1541), 'threading.local', 'threading.local', ([], {}), '()\n', (1539, 1541), False, 'import threading\n'), ((18027, 18059), 'django.db.router.db_for_write', 'router.db_for_write', (['field.model'], {}), '(field.model)\n', (18046, 18059), False, 'from django.db import models, router\n'), ((2290, 2354), 'django.db.models.signals.class_prepared.connect', 'models.signals.class_prepared.connect', (['self.finalize'], {'weak': '(False)'}), '(self.finalize, weak=False)\n', (2327, 2354), False, 'from django.db import models, router\n'), ((6688, 6763), 'django.db.models.signals.post_save.connect', 'models.signals.post_save.connect', (['self.post_save'], {'sender': 'sender', 'weak': '(False)'}), '(self.post_save, sender=sender, weak=False)\n', (6720, 6763), False, 'from django.db import models, router\n'), ((6813, 6890), 'django.db.models.signals.pre_delete.connect', 'models.signals.pre_delete.connect', (['self.pre_delete'], {'sender': 'sender', 'weak': '(False)'}), '(self.pre_delete, sender=sender, weak=False)\n', (6846, 6890), False, 'from django.db import models, router\n'), ((6941, 7020), 'django.db.models.signals.post_delete.connect', 'models.signals.post_delete.connect', (['self.post_delete'], {'sender': 'sender', 'weak': '(False)'}), '(self.post_delete, sender=sender, weak=False)\n', (6975, 7020), False, 'from django.db import models, router\n'), ((7072, 7151), 'django.db.models.signals.m2m_changed.connect', 'models.signals.m2m_changed.connect', (['self.m2m_changed'], {'sender': 'sender', 'weak': '(False)'}), '(self.m2m_changed, sender=sender, weak=False)\n', (7106, 7151), False, 'from django.db import models, router\n'), ((6458, 6494), 'importlib.import_module', 'importlib.import_module', (['self.module'], {}), '(self.module)\n', (6481, 6494), False, 'import importlib\n'), ((8966, 8982), 'copy.copy', 'copy.copy', (['field'], {}), '(field)\n', (8975, 8982), False, 'import copy\n'), ((11424, 11458), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (11440, 11458), False, 'from django.db import models, router\n'), ((11488, 11510), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (11508, 11510), False, 'from django.db import models, router\n'), ((11540, 11649), 'django.db.models.ForeignKey', 'models.ForeignKey', (['user_model'], {'null': '(True)', 'related_name': 'self.user_related_name', 'on_delete': 'models.SET_NULL'}), '(user_model, null=True, related_name=self.\n user_related_name, on_delete=models.SET_NULL)\n', (11557, 11649), False, 'from django.db import models, router\n'), ((11707, 11805), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': "(('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted'))"}), "(max_length=1, choices=(('+', 'Created'), ('~', 'Changed'),\n ('-', 'Deleted')))\n", (11723, 11805), False, 'from django.db import models, router\n'), ((16047, 16052), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (16050, 16052), False, 'from django.utils.timezone import now\n'), ((6188, 6231), 'simple_history.register', 'register', (['original_class'], {}), '(original_class, **register_kwargs)\n', (6196, 6231), False, 'from simple_history import register\n'), ((9037, 9066), 'copy.copy', 'copy.copy', (['field.remote_field'], {}), '(field.remote_field)\n', (9046, 9066), False, 'import copy\n'), ((12712, 12748), 'django.utils.encoding.smart_text', 'smart_text', (['model._meta.verbose_name'], {}), '(model._meta.verbose_name)\n', (12722, 12748), False, 'from django.utils.encoding import smart_text\n'), ((9139, 9168), 'copy.copy', 'copy.copy', (['field.remote_field'], {}), '(field.remote_field)\n', (9148, 9168), False, 'import copy\n'), ((3811, 3834), 'simple_history.register', 'register', (['through_model'], {}), '(through_model)\n', (3819, 3834), False, 'from simple_history import register\n'), ((4697, 4720), 'simple_history.register', 'register', (['through_model'], {}), '(through_model)\n', (4705, 4720), False, 'from simple_history import register\n'), ((7988, 8018), 'django.db.models.get_app', 'get_app', (['model._meta.app_label'], {}), '(model._meta.app_label)\n', (7995, 8018), False, 'from django.db.models import get_app\n')] |
import tkinter as tk
import threading
import os
from pathlib import Path
class PDFExtractionThread(threading.Thread):
def __init__(self, pdf_extractor, output_filename):
super().__init__()
self.setDaemon(True)
self.pdf_extractor = pdf_extractor
self.output_filename = output_filename
def run(self):
self.pdf_extractor.extract_to_one_pdf(self.output_filename)
class ProgressSceneController:
def __init__(self, parent, pdf_extractor, progress_label, button):
self.parent = parent
self.pdf_extractor = pdf_extractor
self.progress_label = progress_label
self.button = button
self.pdf_extractor.set_callback(self.pdf_extractor_callback)
self.number_of_patterns_found = 0
def pdf_extractor_callback(self, filename, page_num, event):
if event == 'page iterated':
filename = Path(filename).name
self.progress_label.change_text(
f'Analyzing pages of {filename}:\n{page_num}'
)
if event == 'writing pdf' or event == 'closing files':
self.progress_label.change_text('Saving ...')
if event == 'pattern found':
self.number_of_patterns_found += 1
if event == 'done':
occurrences = self.number_of_patterns_found
self.progress_label.change_text(
'Done. '
+ f'{occurrences} Occurrences of '
+ f'"{self.pdf_extractor.pattern}".'
)
self.button['state'] = 'normal'
def start_pdf_extraction(self, output_filename):
self.extraction_thread = PDFExtractionThread(self.pdf_extractor,
output_filename)
self.extraction_thread.start()
def button_action(self):
self.parent.destroy()
| [
"pathlib.Path"
] | [((899, 913), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (903, 913), False, 'from pathlib import Path\n')] |
from transitions.extensions import GraphMachine
class TocMachine(GraphMachine):
def __init__(self, **machine_configs):
self.machine = GraphMachine(
model = self,
**machine_configs
)
def is_going_to_92(self, update):
text = update.message.text
return text.lower() == '92'
def is_going_to_95(self, update):
text = update.message.text
return text.lower() == '95'
def is_going_to_98(self, update):
text = update.message.text
return text.lower() == '98'
def is_going_to_amo(self, update):
text = update.message.text
return text.lower() == 'amount'
def is_going_to_full(self, update):
text = update.message.text
return text.lower() == 'full'
def is_going_to_cash(self, update):
text = update.message.text
return text.lower() == 'cash'
def is_going_to_credit(self, update):
text = update.message.text
return text.lower() == 'credit card'
def on_enter_92(self, update):
update.message.reply_text("you need to 92")
update.message.reply_text("full or amount of oil")
def on_enter_95(self, update):
update.message.reply_text("you need to 95")
update.message.reply_text("full or amount of oil")
def on_enter_98(self, update):
update.message.reply_text("you need to 98")
update.message.reply_text("full or amount of oil")
def on_enter_full(self, update):
update.message.reply_text("you need to make it full")
update.message.reply_text("pay by cash or credit card")
def on_enter_amo(self, update):
update.message.reply_text("you need to get some oil")
update.message.reply_text("pay by cash or credit")
def on_enter_cash(self, update):
update.message.reply_text("you need to pay by cash")
update.message.reply_text("thanks")
self.go_back(update)
def on_enter_credit(self, update):
update.message.reply_text("you needto pay by credit card")
update.message.reply_text("thanks")
self.go_back(update)
# def on_exit_state1(self, update):
# print('Leaving state1')
# def on_enter_state2(self, update):
# update.message.reply_text("I'm entering state2")
# self.go_back(update)
# def on_exit_state2(self, update):
# print('Leaving state2')
| [
"transitions.extensions.GraphMachine"
] | [((148, 191), 'transitions.extensions.GraphMachine', 'GraphMachine', ([], {'model': 'self'}), '(model=self, **machine_configs)\n', (160, 191), False, 'from transitions.extensions import GraphMachine\n')] |
from ctypes import windll, c_void_p, POINTER, c_size_t, Structure, c_uint64, c_uint32, sizeof, c_wchar, c_wchar_p, byref
from ctypes.wintypes import DWORD
from pprint import pprint
from inc.errors import GWErrors
from inc.system_info import GWSystemInfo
class MEMORY_BASIC_INFORMATION(Structure):
"""https://msdn.microsoft.com/en-us/library/aa366775"""
_fields_ = (('BaseAddress', c_uint64),
('AllocationBase', c_uint64),
('AllocationProtect', DWORD),
('RegionSize', c_size_t),
('State', DWORD),
('Protect', DWORD),
('Type', DWORD))
MEM_COMMIT = 0x1000
MEM_FREE = 0x10000
MEM_RESERVE = 0x2000
MEM_IMAGE = 0x1000000
MEM_MAPPED = 0x40000
MEM_PRIVATE = 0x20000
PAGE_EXECUTE = 0x10
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
PAGE_NOACCESS = 0x01
PAGE_READONLY = 0x02
PAGE_READWRITE = 0x04
PAGE_WRITECOPY = 0x08
PAGE_TARGETS_INVALID = 0x40000000
PAGE_TARGETS_NO_UPDATE = 0x40000000
PAGE_GUARD = 0x100
PAGE_NOCACHE = 0x200
PAGE_WRITECOMBINE = 0x400
VirtualQueryEx = windll.kernel32.VirtualQueryEx
VirtualQueryEx.argtypes = [ c_void_p, c_void_p, POINTER(MEMORY_BASIC_INFORMATION), c_size_t ]
VirtualQueryEx.rettype = c_size_t
# StrFormatByteSizeW
StrFormatByteSize = windll.shlwapi.StrFormatByteSizeW
StrFormatByteSize.argtypes = [ c_uint64, POINTER(c_wchar), c_uint32 ]
StrFormatByteSize.rettype = c_wchar_p
class GWVirtualMemory:
si: GWSystemInfo = None
memory: dict = dict()
err: GWErrors = GWErrors()
handle = None
count: int = 0
size: c_uint64 = 0
# ##########################################################################
# Constructor
# ##########################################################################
def __init__(self, handle: c_void_p = None, si: GWSystemInfo = None):
self.clear_memory_list()
if handle:
self.handle = handle
if si is not None:
self.si = si
else:
self.si = GWSystemInfo()
# ##########################################################################
# Clear list
# ##########################################################################
def clear_memory_list(self):
self.memory = dict()
# ##########################################################################
# Set handle
# ##########################################################################
def handle_set(self, in_handle):
self.handle = in_handle
# ##########################################################################
# Removes handle
# ##########################################################################
def handle_remove(self):
self.handle = None
# ##########################################################################
# Get's MEMORY_BASIC_INFORMATION by Address
# ##########################################################################
def get_memory_information_by_address(self, in_address: c_uint64 = 0):
if not self.handle:
return False
mbi: MEMORY_BASIC_INFORMATION = MEMORY_BASIC_INFORMATION()
size = sizeof(mbi)
ret = VirtualQueryEx(self.handle, in_address, mbi, size)
if not ret:
return False
return mbi
# ##########################################################################
# Get's list of MEMORY_BASIC_INFORMATION
# ##########################################################################
def enum_memory_from_to(self, in_from: c_uint64 = 0, in_to: c_uint64 = 0):
self.clear_memory_list()
if not self.handle:
return False
# print(self.si)
addr_max: c_uint64 = in_to
addr_min: c_uint64 = in_from
if addr_max < self.si.lpMaximumApplicationAddress:
addr_max = self.si.lpMaximumApplicationAddress - 1
if addr_min < self.si.lpMinimumApplicationAddress:
addr_min = self.si.lpMinimumApplicationAddress + 1
address = addr_min
pid = windll.kernel32.GetProcessId(self.handle)
while address < addr_max:
mbi = self.get_memory_information_by_address(address)
if mbi is not False:
addr_base: c_uint64 = c_uint64(mbi.BaseAddress)
addr_len: c_uint64 = c_uint64(mbi.RegionSize)
if ( mbi.State and MEM_COMMIT ) and (mbi.Protect and PAGE_READWRITE ):
self.memory[mbi.BaseAddress] = mbi
address = addr_base.value + addr_len.value + 1
else:
print("Error: {} Base: 0x{:016X}".format(
self.err.get_error_string(),
address
))
return False
self.count = len(self.memory)
self.size = 0
for m in self.memory.keys():
m: dict = m
self.size += self.memory[m].RegionSize
# ##########################################################################
# Get count in list
# ##########################################################################
def get_count(self):
return self.count
# ##########################################################################
# Get Size in Bytes
# ##########################################################################
def get_size_in_byte(self):
# s = (c_wchar * 8192)(0)
# StrFormatByteSize(self.size, byref(c_wchar), 8192)
# print(c_wchar)
return self.get_sizeof_fmt(self.size)
# ##########################################################################
#
# ##########################################################################
def get_sizeof_fmt(self, num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
# ##########################################################################
#
# ##########################################################################
# ##########################################################################
#
# ##########################################################################
| [
"ctypes.POINTER",
"inc.errors.GWErrors",
"ctypes.windll.kernel32.GetProcessId",
"inc.system_info.GWSystemInfo",
"ctypes.c_uint64",
"ctypes.sizeof"
] | [((1493, 1526), 'ctypes.POINTER', 'POINTER', (['MEMORY_BASIC_INFORMATION'], {}), '(MEMORY_BASIC_INFORMATION)\n', (1500, 1526), False, 'from ctypes import windll, c_void_p, POINTER, c_size_t, Structure, c_uint64, c_uint32, sizeof, c_wchar, c_wchar_p, byref\n'), ((1721, 1737), 'ctypes.POINTER', 'POINTER', (['c_wchar'], {}), '(c_wchar)\n', (1728, 1737), False, 'from ctypes import windll, c_void_p, POINTER, c_size_t, Structure, c_uint64, c_uint32, sizeof, c_wchar, c_wchar_p, byref\n'), ((1930, 1940), 'inc.errors.GWErrors', 'GWErrors', ([], {}), '()\n', (1938, 1940), False, 'from inc.errors import GWErrors\n'), ((3651, 3662), 'ctypes.sizeof', 'sizeof', (['mbi'], {}), '(mbi)\n', (3657, 3662), False, 'from ctypes import windll, c_void_p, POINTER, c_size_t, Structure, c_uint64, c_uint32, sizeof, c_wchar, c_wchar_p, byref\n'), ((4552, 4593), 'ctypes.windll.kernel32.GetProcessId', 'windll.kernel32.GetProcessId', (['self.handle'], {}), '(self.handle)\n', (4580, 4593), False, 'from ctypes import windll, c_void_p, POINTER, c_size_t, Structure, c_uint64, c_uint32, sizeof, c_wchar, c_wchar_p, byref\n'), ((2476, 2490), 'inc.system_info.GWSystemInfo', 'GWSystemInfo', ([], {}), '()\n', (2488, 2490), False, 'from inc.system_info import GWSystemInfo\n'), ((4765, 4790), 'ctypes.c_uint64', 'c_uint64', (['mbi.BaseAddress'], {}), '(mbi.BaseAddress)\n', (4773, 4790), False, 'from ctypes import windll, c_void_p, POINTER, c_size_t, Structure, c_uint64, c_uint32, sizeof, c_wchar, c_wchar_p, byref\n'), ((4829, 4853), 'ctypes.c_uint64', 'c_uint64', (['mbi.RegionSize'], {}), '(mbi.RegionSize)\n', (4837, 4853), False, 'from ctypes import windll, c_void_p, POINTER, c_size_t, Structure, c_uint64, c_uint32, sizeof, c_wchar, c_wchar_p, byref\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-13 19:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('btc', '0023_auto_20160923_1904'),
]
operations = [
migrations.AddField(
model_name='escrowpayoutledger',
name='validated',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='profil',
name='in_escrow',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='profil',
name='wif',
field=models.CharField(default=0, max_length=255),
),
migrations.AddField(
model_name='sysvar',
name='counter',
field=models.IntegerField(default=1),
),
]
| [
"django.db.models.FloatField",
"django.db.models.IntegerField",
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((410, 444), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (429, 444), False, 'from django.db import migrations, models\n'), ((567, 595), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (584, 595), False, 'from django.db import migrations, models\n'), ((712, 755), 'django.db.models.CharField', 'models.CharField', ([], {'default': '(0)', 'max_length': '(255)'}), '(default=0, max_length=255)\n', (728, 755), False, 'from django.db import migrations, models\n'), ((876, 906), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (895, 906), False, 'from django.db import migrations, models\n')] |
from dsbattle_api_app.core.database import Base
from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime
class Submit(Base):
__tablename__ = "dsbt_submits"
id = Column(Integer, primary_key=True, index=True)
bid = Column(Integer, nullable=False, index=True)
hid = Column(Integer, nullable=False, index=True)
submit_dt = Column(DateTime, nullable=False)
uid = Column(Integer, nullable=False, index=True)
precission = Column(Float, nullable=False, default=0)
private_score = Column(Float, nullable=False, default=0)
public_score = Column(Float, nullable=False, default=0)
recall = Column(Float, nullable=False, default=0)
comment = Column(VARCHAR(100), nullable=False, default=0)
f_measure = Column(Float, nullable=False, default=0)
| [
"sqlalchemy.VARCHAR",
"sqlalchemy.Column"
] | [((180, 225), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)'}), '(Integer, primary_key=True, index=True)\n', (186, 225), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((236, 279), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)', 'index': '(True)'}), '(Integer, nullable=False, index=True)\n', (242, 279), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((290, 333), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)', 'index': '(True)'}), '(Integer, nullable=False, index=True)\n', (296, 333), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((350, 382), 'sqlalchemy.Column', 'Column', (['DateTime'], {'nullable': '(False)'}), '(DateTime, nullable=False)\n', (356, 382), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((393, 436), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)', 'index': '(True)'}), '(Integer, nullable=False, index=True)\n', (399, 436), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((455, 495), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(False)', 'default': '(0)'}), '(Float, nullable=False, default=0)\n', (461, 495), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((516, 556), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(False)', 'default': '(0)'}), '(Float, nullable=False, default=0)\n', (522, 556), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((576, 616), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(False)', 'default': '(0)'}), '(Float, nullable=False, default=0)\n', (582, 616), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((630, 670), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(False)', 'default': '(0)'}), '(Float, nullable=False, default=0)\n', (636, 670), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((749, 789), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(False)', 'default': '(0)'}), '(Float, nullable=False, default=0)\n', (755, 789), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n'), ((692, 704), 'sqlalchemy.VARCHAR', 'VARCHAR', (['(100)'], {}), '(100)\n', (699, 704), False, 'from sqlalchemy import Column, Integer, Float, VARCHAR, DateTime\n')] |
import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import unittest
from fcmpy.simulator.simulator import FcmSimulator
import pandas as pd
class TestSimulator(unittest.TestCase):
def setUp(self):
C1 = [0.0, 0.0, 0.6, 0.9, 0.0, 0.0, 0.0, 0.8]
C2 = [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.5]
C3 = [0.0, 0.7, 0.0, 0.0, 0.9, 0.0, 0.4, 0.1]
C4 = [0.4, 0.0, 0.0, 0.0, 0.0, 0.9, 0.0, 0.0]
C5 = [0.0, 0.0, 0.0, 0.0, 0.0, -0.9, 0.0, 0.3]
C6 = [-0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C7 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.8, 0.4, 0.9]
C8 =[0.1, 0.0, 0.0, 0.0, 0.0, 0.1, 0.6, 0.0]
df = pd.DataFrame([C1,C2, C3, C4, C5, C6, C7, C8],
columns=['C1','C2','C3','C4','C5','C6','C7','C8'])
self.weight_matrix = pd.DataFrame([C1,C2, C3, C4, C5, C6, C7, C8],
columns=['C1','C2','C3','C4','C5','C6','C7','C8'])
self.init_state = {'C1': 1, 'C2': 1, 'C3': 0, 'C4': 0, 'C5': 0,
'C6': 0, 'C7': 0, 'C8': 0}
self.sim = FcmSimulator()
def test_simulation(self):
res_k = self.sim.simulate(initial_state=self.init_state, weight_matrix=self.weight_matrix, transfer='sigmoid', inference='kosko', thresh=0.001, iterations=50, l=1)
res_mK = self.sim.simulate(initial_state=self.init_state, weight_matrix=self.weight_matrix, transfer='sigmoid', inference='mKosko', thresh=0.001, iterations=50, l=1)
res_r = self.sim.simulate(initial_state=self.init_state, weight_matrix=self.weight_matrix, transfer='sigmoid', inference='rescaled', thresh=0.001, iterations=50, l=1)
eql_k = res_k.loc[len(res_k)-1] # equilibruim for Kosko's menthod
eql_mK = res_mK.loc[len(res_mK)-1] # equilibruim for Kosko's menthod
eql_r = res_r.loc[len(res_r)-1] # equilibruim for rescaled menthod
# test the results against the one presented in the fcm inference package in R by Dikopoulou & Papageorgiou.
equilibrium_mK = [0.7258851, 0.7907061, 0.7694508, 0.8124733, 0.8192938, 0.8399006, 0.9099403, 0.9557739]
equilibrium_k = [0.5481291, 0.6004012, 0.5814756, 0.620872, 0.6279569, 0.6653852, 0.7617789, 0.8416584]
equilibrium_r = [0.4998427, 0.4985174, 0.4989474, 0.4984212, 0.4980938, 0.4885541, 0.4855651, 0.4853266]
# check if the initial equilibrium = to the one by Dikopoulou & Papageorgiou in R.
self.assertEqual([round(i, 4) for i in eql_k], [round(i, 4) for i in equilibrium_k])
self.assertEqual([round(i, 4) for i in eql_mK], [round(i, 4) for i in equilibrium_mK])
self.assertEqual([round(i, 4) for i in eql_r], [round(i, 4) for i in equilibrium_r])
def test_stableConcepts(self):
self.weight_matrix['C1'] = 0
res_k = self.sim.simulate(initial_state=self.init_state, weight_matrix=self.weight_matrix, transfer='sigmoid', inference='kosko', thresh=0.001, iterations=50, l=1)
self.assertEqual(len(set(res_k['C1'])), 1)
if __name__ == '__main__':
unittest.main() | [
"sys.path.insert",
"pandas.DataFrame",
"unittest.main",
"fcmpy.simulator.simulator.FcmSimulator",
"os.path.abspath"
] | [((68, 103), 'sys.path.insert', 'sys.path.insert', (['(0)', "(myPath + '/../')"], {}), "(0, myPath + '/../')\n", (83, 103), False, 'import sys, os\n'), ((41, 66), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (56, 66), False, 'import sys, os\n'), ((3088, 3103), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3101, 3103), False, 'import unittest\n'), ((707, 815), 'pandas.DataFrame', 'pd.DataFrame', (['[C1, C2, C3, C4, C5, C6, C7, C8]'], {'columns': "['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8']"}), "([C1, C2, C3, C4, C5, C6, C7, C8], columns=['C1', 'C2', 'C3',\n 'C4', 'C5', 'C6', 'C7', 'C8'])\n", (719, 815), True, 'import pandas as pd\n'), ((862, 970), 'pandas.DataFrame', 'pd.DataFrame', (['[C1, C2, C3, C4, C5, C6, C7, C8]'], {'columns': "['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8']"}), "([C1, C2, C3, C4, C5, C6, C7, C8], columns=['C1', 'C2', 'C3',\n 'C4', 'C5', 'C6', 'C7', 'C8'])\n", (874, 970), True, 'import pandas as pd\n'), ((1127, 1141), 'fcmpy.simulator.simulator.FcmSimulator', 'FcmSimulator', ([], {}), '()\n', (1139, 1141), False, 'from fcmpy.simulator.simulator import FcmSimulator\n')] |
# Generated by Django 3.1.5 on 2021-01-19 08:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('finances', '0001_initial'),
('home', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='payment',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.client'),
),
migrations.AddField(
model_name='payment',
name='fee',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='finances.fee', verbose_name='Payment For'),
),
migrations.AddField(
model_name='payment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Added By'),
),
migrations.AddField(
model_name='fee',
name='job',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.job'),
),
migrations.AddField(
model_name='expense',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Entered By'),
),
]
| [
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey"
] | [((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((512, 597), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""home.client"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='home.client'\n )\n", (529, 597), False, 'from django.db import migrations, models\n'), ((710, 848), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""finances.fee"""', 'verbose_name': '"""Payment For"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='finances.fee', verbose_name='Payment For')\n", (727, 848), False, 'from django.db import migrations, models\n'), ((962, 1083), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Added By"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL, verbose_name='Added By')\n", (979, 1083), False, 'from django.db import migrations, models\n'), ((1192, 1298), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""home.job"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='home.job')\n", (1209, 1298), False, 'from django.db import migrations, models\n'), ((1412, 1535), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Entered By"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL, verbose_name='Entered By')\n", (1429, 1535), False, 'from django.db import migrations, models\n')] |
"""
Problem:
A bridge in a connected (undirected) graph is an edge that, if removed, causes the
graph to become disconnected. Find all the bridges in a graph.
"""
from sys import maxsize
from typing import Dict, List, Optional, Set, Tuple
from DataStructures.Graph import GraphUndirectedUnweighted
def get_bridges_helper(
graph: GraphUndirectedUnweighted,
node: int,
visited: Set[int],
parent: Dict[int, Optional[int]],
low: Dict[int, int],
disc: Dict[int, int],
bridges: List[Tuple[int, int]],
) -> None:
# find all bridges using dfs
visited.add(node)
disc[node] = graph.time
low[node] = graph.time
graph.time += 1
for neighbour in graph.connections[node]:
if neighbour not in visited:
parent[neighbour] = node
get_bridges_helper(graph, neighbour, visited, parent, low, disc, bridges)
# check if the subtree rooted with neighbour has a connection to one of the
# ancestors of node
low[node] = min(low[node], low[neighbour])
# if the lowest vertex reachable from subtree under neighbour is below node
# in DFS tree, then node-neighbour is a bridge
if low[neighbour] > disc[node]:
bridges.append((node, neighbour))
elif neighbour != parent[node]:
low[node] = min(low[node], disc[neighbour])
def get_bridges(graph: GraphUndirectedUnweighted) -> List[Tuple[int, int]]:
visited = set()
disc = {node: maxsize for node in graph.connections}
low = {node: maxsize for node in graph.connections}
parent = {node: None for node in graph.connections}
bridges = []
graph.time = 0
for node in graph.connections:
if node not in visited:
get_bridges_helper(graph, node, visited, parent, low, disc, bridges)
return bridges
if __name__ == "__main__":
g1 = GraphUndirectedUnweighted()
g1.add_edge(1, 0)
g1.add_edge(0, 2)
g1.add_edge(2, 1)
g1.add_edge(0, 3)
g1.add_edge(3, 4)
print("Bridges in first graph:")
print(*get_bridges(g1))
g2 = GraphUndirectedUnweighted()
g2.add_edge(0, 1)
g2.add_edge(1, 2)
g2.add_edge(2, 3)
print("\nBridges in second graph:")
print(*get_bridges(g2))
g3 = GraphUndirectedUnweighted()
g3.add_edge(0, 1)
g3.add_edge(1, 2)
g3.add_edge(2, 0)
g3.add_edge(1, 3)
g3.add_edge(1, 4)
g3.add_edge(1, 6)
g3.add_edge(3, 5)
g3.add_edge(4, 5)
print("\nBridges in third graph:")
print(*get_bridges(g3))
"""
SPECS:
TIME COMPLEXITY: O(v + e)
SPACE COMPLEXITY: O(v)
"""
| [
"DataStructures.Graph.GraphUndirectedUnweighted"
] | [((1895, 1922), 'DataStructures.Graph.GraphUndirectedUnweighted', 'GraphUndirectedUnweighted', ([], {}), '()\n', (1920, 1922), False, 'from DataStructures.Graph import GraphUndirectedUnweighted\n'), ((2108, 2135), 'DataStructures.Graph.GraphUndirectedUnweighted', 'GraphUndirectedUnweighted', ([], {}), '()\n', (2133, 2135), False, 'from DataStructures.Graph import GraphUndirectedUnweighted\n'), ((2280, 2307), 'DataStructures.Graph.GraphUndirectedUnweighted', 'GraphUndirectedUnweighted', ([], {}), '()\n', (2305, 2307), False, 'from DataStructures.Graph import GraphUndirectedUnweighted\n')] |
#!/usr/bin/env python3
import sys
import csv
import os
import multiprocessing
from UDPNode import UDPNode
def spawn_node(node, edges):
neighbors = dict()
for (node_a, node_b), cost in edges.items():
if node_a == node:
neighbors[node_b] = cost
elif node_b == node:
neighbors[node_a] = cost
ip = node[0]
mask = node[1]
port = node[2]
new_node = UDPNode(ip, mask, port, neighbors)
new_node.start_node()
def commands_from_csv(csv_file):
# A graph is set of nodes and edges
nodes = set()
edges = dict()
with open(csv_file, newline="") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
node_a = (row[0], int(row[1]), int(row[2]))
node_b = (row[3], int(row[4]), int(row[5]))
cost = int(row[6])
nodes.add(node_a)
nodes.add(node_b)
edges[tuple([node_a, node_b])] = cost
for node in nodes:
this_node = f"{node[0]} {node[1]} {node[2]}"
neighbors = ""
for (node_a, node_b), cost in edges.items():
if node_a == node:
neighbors += f"{node_b[0]} {node_b[1]} {node_b[2]} {cost} "
elif node_b == node:
neighbors += f"{node_a[0]} {node_a[1]} {node_a[2]} {cost} "
# TODO receive this IP as an arg
if node[0]=='10.1.137.91':
os.system(f"start cmd /c UDPNode.py {this_node} {neighbors}")
if __name__ == "__main__":
if len(sys.argv) > 1:
commands_from_csv(sys.argv[1])
else:
print("ARG!")
| [
"os.system",
"csv.reader",
"UDPNode.UDPNode"
] | [((409, 443), 'UDPNode.UDPNode', 'UDPNode', (['ip', 'mask', 'port', 'neighbors'], {}), '(ip, mask, port, neighbors)\n', (416, 443), False, 'from UDPNode import UDPNode\n'), ((652, 672), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (662, 672), False, 'import csv\n'), ((1411, 1472), 'os.system', 'os.system', (['f"""start cmd /c UDPNode.py {this_node} {neighbors}"""'], {}), "(f'start cmd /c UDPNode.py {this_node} {neighbors}')\n", (1420, 1472), False, 'import os\n')] |
import torch
import time
from utils import AverageMeter, ProgressMeter
from utils import accuracy
def train_step(train_loader, model, optimizer, epoch, device, writer, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(train_loader),
[batch_time, data_time, losses],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, _) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.to(device)
# compute output
loss = model(images)
# measure perplexity and record loss
losses.update(loss.item(), images.size(0))
# compute gradient
optimizer.zero_grad()
loss.backward()
# do SGD step
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % args.print_freq == 0:
progress.display(i + 1)
writer.add_scalar('training_loss', losses.avg,
epoch * len(train_loader) + i)
def validate_step(val_loader, model, device, epoch, writer, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(len(val_loader), [batch_time, losses],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, _) in enumerate(val_loader):
images = images.to(device)
# compute output
loss = model(images)
# measure perplexity and record loss
losses.update(loss.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
writer.add_scalar('validation_loss', losses.avg,
epoch * len(val_loader) + i)
# TODO: this should also be done with the ProgressMeter
print(' * Loss {losses.avg:.3f}'.format(losses=losses))
return losses.avg
def fine_tune_train_step(train_loader, model, criterion, optimizer, epoch,
device, writer, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.to(device)
target = target.to(device)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient
optimizer.zero_grad()
loss.backward()
# do SGD step
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % args.print_freq == 0:
progress.display(i + 1)
writer.add_scalar('train_acc_1', top1.avg,
epoch * len(train_loader) + i)
writer.add_scalar('train_acc_5', top5.avg,
epoch * len(train_loader) + i)
def fine_tune_validate_step(val_loader, model, criterion, device, epoch,
writer, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.to(device)
target = target.to(device)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
writer.add_scalar('val_acc_1', top1.avg,
epoch * len(val_loader) + i)
writer.add_scalar('val_acc_5', top5.avg,
epoch * len(val_loader) + i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
top5=top5))
return top1.avg | [
"utils.AverageMeter",
"torch.no_grad",
"time.time",
"utils.accuracy"
] | [((194, 223), 'utils.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {}), "('Time', ':6.3f')\n", (206, 223), False, 'from utils import AverageMeter, ProgressMeter\n'), ((240, 269), 'utils.AverageMeter', 'AverageMeter', (['"""Data"""', '""":6.3f"""'], {}), "('Data', ':6.3f')\n", (252, 269), False, 'from utils import AverageMeter, ProgressMeter\n'), ((283, 311), 'utils.AverageMeter', 'AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (295, 311), False, 'from utils import AverageMeter, ProgressMeter\n'), ((545, 556), 'time.time', 'time.time', ([], {}), '()\n', (554, 556), False, 'import time\n'), ((1394, 1423), 'utils.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {}), "('Time', ':6.3f')\n", (1406, 1423), False, 'from utils import AverageMeter, ProgressMeter\n'), ((1437, 1465), 'utils.AverageMeter', 'AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (1449, 1465), False, 'from utils import AverageMeter, ProgressMeter\n'), ((2560, 2589), 'utils.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {}), "('Time', ':6.3f')\n", (2572, 2589), False, 'from utils import AverageMeter, ProgressMeter\n'), ((2606, 2635), 'utils.AverageMeter', 'AverageMeter', (['"""Data"""', '""":6.3f"""'], {}), "('Data', ':6.3f')\n", (2618, 2635), False, 'from utils import AverageMeter, ProgressMeter\n'), ((2649, 2677), 'utils.AverageMeter', 'AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (2661, 2677), False, 'from utils import AverageMeter, ProgressMeter\n'), ((2689, 2719), 'utils.AverageMeter', 'AverageMeter', (['"""Acc@1"""', '""":6.2f"""'], {}), "('Acc@1', ':6.2f')\n", (2701, 2719), False, 'from utils import AverageMeter, ProgressMeter\n'), ((2731, 2761), 'utils.AverageMeter', 'AverageMeter', (['"""Acc@5"""', '""":6.2f"""'], {}), "('Acc@5', ':6.2f')\n", (2743, 2761), False, 'from utils import AverageMeter, ProgressMeter\n'), ((3007, 3018), 'time.time', 'time.time', ([], {}), '()\n', (3016, 3018), False, 'import time\n'), ((4247, 4276), 'utils.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {}), "('Time', ':6.3f')\n", (4259, 4276), False, 'from utils import AverageMeter, ProgressMeter\n'), ((4290, 4318), 'utils.AverageMeter', 'AverageMeter', (['"""Loss"""', '""":.4e"""'], {}), "('Loss', ':.4e')\n", (4302, 4318), False, 'from utils import AverageMeter, ProgressMeter\n'), ((4330, 4360), 'utils.AverageMeter', 'AverageMeter', (['"""Acc@1"""', '""":6.2f"""'], {}), "('Acc@1', ':6.2f')\n", (4342, 4360), False, 'from utils import AverageMeter, ProgressMeter\n'), ((4372, 4402), 'utils.AverageMeter', 'AverageMeter', (['"""Acc@5"""', '""":6.2f"""'], {}), "('Acc@5', ':6.2f')\n", (4384, 4402), False, 'from utils import AverageMeter, ProgressMeter\n'), ((1097, 1108), 'time.time', 'time.time', ([], {}), '()\n', (1106, 1108), False, 'import time\n'), ((1638, 1653), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1651, 1653), False, 'import torch\n'), ((1669, 1680), 'time.time', 'time.time', ([], {}), '()\n', (1678, 1680), False, 'import time\n'), ((3389, 3426), 'utils.accuracy', 'accuracy', (['output', 'target'], {'topk': '(1, 5)'}), '(output, target, topk=(1, 5))\n', (3397, 3426), False, 'from utils import accuracy\n'), ((3789, 3800), 'time.time', 'time.time', ([], {}), '()\n', (3798, 3800), False, 'import time\n'), ((4587, 4602), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4600, 4602), False, 'import torch\n'), ((4618, 4629), 'time.time', 'time.time', ([], {}), '()\n', (4627, 4629), False, 'import time\n'), ((2044, 2055), 'time.time', 'time.time', ([], {}), '()\n', (2053, 2055), False, 'import time\n'), ((4949, 4986), 'utils.accuracy', 'accuracy', (['output', 'target'], {'topk': '(1, 5)'}), '(output, target, topk=(1, 5))\n', (4957, 4986), False, 'from utils import accuracy\n'), ((5243, 5254), 'time.time', 'time.time', ([], {}), '()\n', (5252, 5254), False, 'import time\n'), ((669, 680), 'time.time', 'time.time', ([], {}), '()\n', (678, 680), False, 'import time\n'), ((1064, 1075), 'time.time', 'time.time', ([], {}), '()\n', (1073, 1075), False, 'import time\n'), ((3136, 3147), 'time.time', 'time.time', ([], {}), '()\n', (3145, 3147), False, 'import time\n'), ((3756, 3767), 'time.time', 'time.time', ([], {}), '()\n', (3765, 3767), False, 'import time\n'), ((2007, 2018), 'time.time', 'time.time', ([], {}), '()\n', (2016, 2018), False, 'import time\n'), ((5206, 5217), 'time.time', 'time.time', ([], {}), '()\n', (5215, 5217), False, 'import time\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2017 <NAME>
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import bz2
import gzip
import hashlib
import json
import os
import platform
import re
import shutil
import sys
import tarfile
from distutils.errors import DistutilsOptionError
from itertools import chain, takewhile
from pkg_resources import Requirement
from setuptools import Command
from setuptools.package_index import PackageIndex
try:
import lzma
except ImportError: # pragma: no cover
try:
import backports.lzma as lzma
except ImportError:
lzma = None
try:
import pip.wheel
except ImportError:
wheel_available = False
else:
from pip._vendor.pkg_resources import Requirement as WhlRequirement
wheel_available = True
__all__ = (
'bdist_pkg',
)
class bdist_pkg(Command):
description = 'create FreeBSD pkg distribution'
user_options = [
('bdist-base=', 'b',
'Base directory for creating built distributions.'),
('dist-dir=', 'd',
'Directory to put distribute files in.'),
('format=', 'f',
'Set format as the package output format. It can be one'
' of txz, tbz, tgz or tar. If an invalid or no format is specified'
' tgz is assumed.'),
('keep-temp', None,
'Keep intermediate build directories and files.'),
('origin=', None,
'Custom origin name for build package.'),
('use-pypi-deps', None,
'Automatically convert unknown Python dependencies to package ones.'
' Note that those dependencies will be named with py{}{}- prefix and'
' assumes that you have such packages in repository.'
''.format(*sys.version_info[:2])),
('use-wheel', None,
'Use bdist_wheel to generated install layout instead of install'
' command.'),
('with-py-prefix', None,
'Prepends py{}{}- prefix to package name.'
''.format(*sys.version_info[:2])),
]
boolean_options = ('keep-temp', 'use-wheel', 'python-deps-to-pkg',
'with-py-prefix')
compressor_for_format = {
'txz': lzma,
'tgz': gzip,
'tbz': bz2,
}
def initialize_options(self):
self.bdist_base = None
self.dist_dir = None
self.format = None
self.keep_temp = False
self.name_prefix = None
self.package_index = PackageIndex()
self.requirements_mapping = None
self.selected_options = None
self.use_pypi_deps = False
self.use_wheel = False
self.with_py_prefix = False
self.initialize_manifest_options()
def initialize_manifest_options(self):
# TODO: What is it and how to use it?
# self.annotations = None
self.abi = None
self.arch = None
self.categories = None
# TODO: Could conflicts be useful for us?
# self.conflicts = None
self.comment = None
# TODO: What is it and how to use it?
# self.dep_formula = None
self.deps = None
self.desc = None
# These fields are autogenerated:
# self.directories = None
# self.dirs = None
# self.files = None
# self.flatsize = None
self.groups = None
self.license = None
self.maintainer = None
# TODO: should that be single message or multiple ones?
# self.messages = None
self.name = None
self.options = None
self.selected_options = None
# Since we use extras, which don't have either defaults or descriptions
# these fields are not supported so far:
# self.options_defaults = None
# self.options_descriptions = None
self.origin = None
# TODO: What is the path?
# self.path = None
self.prefix = None
self.provides = None
self.requires = None
self.scripts = None
# TODO: Do we need shared libs support?
# self.shlibs = None
# self.shlibs_provides = None
# self.shlibs_requires = None
# TODO: Support checksum.
# self.sum = None
self.users = None
self.version = None
# TODO: Can Python packages be vital?
# self.vital = None
self.www = None
def finalize_options(self):
self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
self.ensure_format('tgz')
self.bdist_dir = os.path.join(self.bdist_base, 'pkg')
self.install_dir = os.path.join(self.bdist_dir, 'root')
self.finalize_manifest_options()
def finalize_manifest_options(self):
project = self.distribution
self.ensure_string('abi', self.get_abi())
self.ensure_string('arch', self.get_arch())
self.ensure_categories(project)
self.ensure_string('comment', project.get_description())
self.ensure_desc(project)
self.ensure_string_list('groups')
self.ensure_string('license', self.resolve_license(project))
self.ensure_string('maintainer', self.get_maintainer(project))
self.ensure_name(project)
self.ensure_string('origin', self.get_default_origin(project))
self.ensure_prefix('/usr/local')
self.ensure_string_list('provides')
self.ensure_string_list('requires')
self.ensure_scripts()
self.ensure_string('version', project.get_version())
self.ensure_string_list('users')
self.ensure_string('www', project.get_url())
self.ensure_options()
self.ensure_deps()
self.maybe_rename_console_scripts(project)
def run(self):
self.build_and_install()
self.make_pkg(self.generate_manifest_content())
self.maybe_remove_temp(self.bdist_base)
def build_and_install(self):
if self.use_wheel:
self.build_and_install_via_wheel()
else:
self.build_and_install_via_setuptools()
def build_and_install_via_setuptools(self):
# Basically, we need the intermediate results of bdist_dumb,
# but since it's too monolithic and does the stuff that we would like
# to avoid, here short copy-paste happens /:
build = self.reinitialize_command('build', reinit_subcommands=1)
build.build_base = self.bdist_base
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.prefix = self.prefix
install.root = self.install_dir
install.warn_dir = 0
self.run_command('install')
def build_and_install_via_wheel(self):
if not wheel_available:
raise RuntimeError('The `wheel` package is not available.')
build = self.reinitialize_command('build', reinit_subcommands=1)
build.build_base = self.bdist_base
bdist_wheel = self.reinitialize_command(
'bdist_wheel',
reinit_subcommands=1
)
bdist_wheel.bdist_base = self.bdist_base
bdist_wheel.keep_temp = True
self.run_command('bdist_wheel')
name = self.distribution.get_name()
pip.wheel.move_wheel_files(
name=self.name,
req=WhlRequirement.parse('{}=={}'.format(name, self.version)),
wheeldir=bdist_wheel.bdist_dir,
root=self.install_dir,
prefix=self.prefix,
)
def generate_manifest_content(self):
manifest = {
'abi': self.abi,
'arch': self.arch,
'categories': self.categories,
'comment': self.comment,
'deps': self.deps,
'desc': self.desc,
'directories': {},
'files': {},
'flatsize': 0,
'groups': self.groups,
'licenselogic': 'single',
'licenses': [self.license] if self.license else [],
'maintainer': self.maintainer,
'name': self.name,
'options': self.options,
'origin': self.origin,
'prefix': self.prefix,
'provides': self.provides,
'requires': self.requires,
'scripts': self.scripts,
'users': self.users,
'version': self.version,
'www': self.www,
}
mdirs = manifest['directories']
mfiles = manifest['files']
for real_file_path, install_path in self.iter_install_files():
with open(real_file_path, 'rb') as fh:
data = fh.read()
manifest['flatsize'] += len(data)
mdirs[os.path.dirname(install_path)] = {
'gname': 'wheel',
'perm': '0755',
'uname': 'root',
}
mfiles[install_path] = {
'gname': 'wheel',
'perm': '0644',
'sum': hashlib.sha256(data).hexdigest(),
'uname': 'root',
}
# TODO: Should we keep UNKNOWN values?
manifest = {key: value for key, value in manifest.items()
if value and value != 'UNKNOWN'}
if 'name' not in manifest:
raise DistutilsOptionError('Project must have name defined')
if 'version' not in manifest:
raise DistutilsOptionError('Project must have version defined')
if 'comment' not in manifest:
raise DistutilsOptionError('Project must have description defined')
if 'desc' not in manifest:
raise DistutilsOptionError('Project must have long_description'
' defined')
if 'maintainer' not in manifest:
raise DistutilsOptionError('Project must have author or maintainer'
' defined')
return manifest
def make_pkg(self, manifest):
manifest_path = self.make_manifest(manifest)
compact_manifest_path = self.make_compact_manifest(manifest)
files_paths = chain([
(manifest_path, os.path.basename(manifest_path)),
(compact_manifest_path, os.path.basename(compact_manifest_path))
], self.iter_install_files())
self.mkpath(self.dist_dir)
tar_path = self.make_tar(files_paths)
ext = self.format
if ext != 'tar':
compressor = self.get_compressor(ext)
if compressor is None:
raise RuntimeError('Format {} is not supported'.format(ext))
self.compress_tar(tar_path, ext, compressor)
os.remove(tar_path)
def make_manifest(self, content):
path = os.path.join(self.bdist_dir, '+MANIFEST')
with open(path, 'w') as fobj:
json.dump(content, fobj, sort_keys=True, indent=4)
return path
def make_compact_manifest(self, content):
path = os.path.join(self.bdist_dir, '+COMPACT_MANIFEST')
compact_content = content.copy()
compact_content.pop('directories')
compact_content.pop('files')
with open(path, 'w') as fobj:
json.dump(compact_content, fobj, sort_keys=True, indent=4)
return path
def make_tar(self, files_paths):
basename = '{}-{}.tar'.format(self.name, self.version)
path = os.path.join(self.dist_dir, basename)
seen = set()
with tarfile.open(path, 'w') as tar:
for file_path, tar_path in files_paths:
tar_dir_path = os.path.dirname(tar_path)
if tar_dir_path and tar_dir_path not in seen:
tarinfo = tar.gettarinfo(os.path.dirname(file_path),
tar_dir_path)
tarinfo.name = tar_dir_path
tar.addfile(tarinfo)
seen.add(tar_dir_path)
tarinfo = tar.gettarinfo(file_path, tar_path)
tarinfo.name = tar_path
with open(file_path, 'rb') as f:
tar.addfile(tarinfo, f)
return path
def compress_tar(self, tar_path, ext, compressor):
txx_path = tar_path.rsplit('.tar', 1)[0] + '.' + ext
with compressor.open(txx_path, 'w') as txx:
with open(tar_path, 'rb') as tar:
txx.write(tar.read())
return txx_path
def get_compressor(self, format):
return self.compressor_for_format.get(format)
def get_abi(self):
if platform.system().lower() != 'freebsd':
if not self.distribution.is_pure():
raise DistutilsOptionError(
'Unable to determine default ABI value'
' since bdist_pkg call happens not on FreeBSD system.'
' Please specify this value according the target system'
' for which you build this package.'
)
return '*'
return ':'.join((
platform.system(),
# 10.1-STABLE-r273058 -> 10
platform.release().split('-', 1)[0].split('.')[0],
# TODO: ensure that platform.machine() gives correct values
platform.machine()
))
def get_arch(self):
if platform.system().lower() != 'freebsd':
if not self.distribution.is_pure():
raise DistutilsOptionError(
'Unable to determine default ARCH value'
' since bdist_pkg call happens not on FreeBSD system.'
' Please specify this value according the target system'
' for which you build this package.'
)
return '*'
return ':'.join((
platform.system(),
# 10.1-STABLE-r273058 -> 10
platform.release().split('-', 1)[0].split('.')[0],
# TODO: shouldn't there be a better way?
'x86:64' if platform.machine() == 'amd64' else 'x86:32'
))
def get_default_origin(self, project):
return 'devel/py{}{}-{}'.format(sys.version_info[0],
sys.version_info[1],
project.get_name())
def get_maintainer(self, project):
maintainer = '{} <{}>'.format(project.get_maintainer(),
project.get_maintainer_email())
if maintainer == 'UNKNOWN <UNKNOWN>':
# No explicit maintainer specified, use author contact instead
maintainer = '{} <{}>'.format(project.get_author(),
project.get_author_email())
return maintainer
def resolve_license(self, project):
# Thanks for this mapping goes to pytoport project
py2freebsd_mapping = {
'agpl-3.0': 'AGPLv3',
'apache-2.0': 'APACHE20',
'artistic-2.0': 'ART20',
'bsd-2-clause': 'BSD2CLAUSE',
'bsd-3-clause-clear': 'BSD3CLAUSE',
'bsd-3-clause': 'BSD3CLAUSE',
'cc0-1.0': 'CC0-1.0',
'epl-1.0': 'EPL',
'gpl-2.0': 'GPLv2',
'gpl-3.0': 'GPLv3',
'isc': 'ISCL',
'lgpl-2.1': 'LGPL21',
'lgpl-3.0': 'LGPL3',
'mit': 'MIT',
'mpl-2.0': 'MPL',
'ofl-1.1': 'OFL11',
}
license = project.get_license()
pkg_license = py2freebsd_mapping.get(license.lower())
if license != 'UNKNOWN' and pkg_license is None:
self.warn('Unable to convert license %s to PKG naming' % license)
return license
return pkg_license
def ensure_format(self, default):
self.ensure_string('format', default)
if self.format not in {'txz', 'tbz', 'tgz', 'tar'}:
self.warn('Unknown format {!r}, falling back to {}'
''.format(self.format, default))
self.format = default
def ensure_prefix(self, default=None):
self.ensure_string('prefix', default)
self.prefix = self.prefix.rstrip('/')
def ensure_categories(self, project):
self.categories = self.categories or project.get_keywords()
self.ensure_string_list('categories')
def ensure_deps(self):
install_requires = set(self.distribution.install_requires or [])
for option in self.selected_options:
install_requires |= set(self.distribution.extras_require[option])
mapping = self.requirements_mapping or {}
self.deps = self.deps or {}
seen_deps = set([])
for python_dep, spec in mapping.items():
if not isinstance(python_dep, str):
raise DistutilsOptionError('Invalid Python dependency: {}'
''.format(python_dep))
if python_dep not in install_requires:
raise DistutilsOptionError('{} is not in install requires list'
''.format(python_dep))
if not isinstance(spec, dict):
raise DistutilsOptionError('requirements_mapping items must be'
' dict, got {}'.format(repr(spec)))
if set(spec) != {'origin', 'version', 'name'}:
raise DistutilsOptionError('requirements_mapping items must'
' have "origin" and "version" keys,'
' got {}'.format(set(spec)))
for key in {'origin', 'version', 'name'}:
if not isinstance(spec[key], str):
raise DistutilsOptionError('"{}" value must be string, got'
' {}'.format(key, spec[key]))
self.deps[spec['name']] = {'origin': spec['origin'],
'version': spec['version']}
seen_deps.add(python_dep)
missing = seen_deps ^ install_requires
if missing and self.use_pypi_deps:
for item in missing:
requirement = Requirement.parse(item)
distribution = self.package_index.obtain(requirement)
key = 'py{1}{2}-{0}'.format(distribution.key,
*sys.version_info[:2])
self.deps[key] = {
'origin': 'pypi/py-{}'.format(distribution.key),
'version': distribution.version
}
elif missing:
raise DistutilsOptionError('These packages are listed in install'
' requirements, but not in bdist_pkg'
' requirements mapping: {}'
''.format(', '.join(missing)))
def ensure_desc(self, project):
desc = project.get_long_description()
desc = desc if desc != 'UKNOWN' else project.get_description()
desc = self.cut_changelog(desc)
self.ensure_string('desc', desc)
def ensure_name(self, project):
name = project.get_name()
if self.with_py_prefix:
name = 'py{}{}-{}'.format(
sys.version_info[0], sys.version_info[1], name
)
self.ensure_string('name', name)
def ensure_options(self):
provided_options = set(self.distribution.extras_require or {})
self.selected_options = set(self.selected_options or [])
unknown_options = self.selected_options - provided_options
if not unknown_options:
self.options = {option: option in self.selected_options
for option in provided_options}
else:
raise DistutilsOptionError('Unknown extras selected: {}'
''.format(', '.join(unknown_options)))
def ensure_scripts(self):
if self.scripts is None:
return
if not isinstance(self.scripts, dict):
raise DistutilsOptionError('scripts must be a dict, got {}'
''.format(self.scripts))
valid_keys = {
'pre-install',
'post-install',
'install',
'pre-deinstall',
'post-deinstall',
'deinstall',
'pre-upgrade',
'post-upgrade',
'upgrade',
}
bad_keys = [key for key in self.scripts if key not in valid_keys]
if bad_keys:
raise DistutilsOptionError('invalid scripts: {}'
''.format(', '.join(bad_keys)))
bad_keys = [key for key, value in self.scripts.items()
if not isinstance(value, str)]
if bad_keys:
raise DistutilsOptionError('invalid scripts: {}'
''.format(', '.join(bad_keys)))
def iter_install_files(self):
for root, dirs, files in os.walk(self.install_dir):
for file in files:
reldir = os.path.relpath(root, self.install_dir)
install_path = '/' + os.path.join(reldir, file)
install_path = install_path.replace(self.prefix + '/lib64/',
self.prefix + '/lib/')
yield os.path.join(root, file), install_path
def maybe_remove_temp(self, path):
if self.keep_temp:
return
if path is None:
return
if os.path.exists(path):
shutil.rmtree(path)
def maybe_rename_console_scripts(self, project):
if not self.with_py_prefix:
return
if not project.entry_points:
return
console_scripts = project.entry_points.get('console_scripts')
if console_scripts is None:
return
prefixed_console_scripts = []
for script in console_scripts:
name, callback = script.split('=')
name = '{}{}.{}'.format(name.strip(), *sys.version_info[:2])
prefixed_console_scripts.append(
'{} = {}'.format(name, callback.strip())
)
project.entry_points['console_scripts'] = prefixed_console_scripts
def cut_changelog(self, desc):
def match_changelog_header(line):
words = re.findall(r'\b\w+\b', line.lower())
if len(words) != 1:
return True
if 'changelog' in words or 'changes' in words:
return False
return True
return '\n'.join(takewhile(
match_changelog_header,
desc.splitlines()
))
| [
"os.path.exists",
"hashlib.sha256",
"tarfile.open",
"json.dump",
"os.path.join",
"shutil.rmtree",
"os.path.relpath",
"os.path.dirname",
"platform.system",
"platform.release",
"distutils.errors.DistutilsOptionError",
"pkg_resources.Requirement.parse",
"setuptools.package_index.PackageIndex",
... | [((2531, 2545), 'setuptools.package_index.PackageIndex', 'PackageIndex', ([], {}), '()\n', (2543, 2545), False, 'from setuptools.package_index import PackageIndex\n'), ((4659, 4695), 'os.path.join', 'os.path.join', (['self.bdist_base', '"""pkg"""'], {}), "(self.bdist_base, 'pkg')\n", (4671, 4695), False, 'import os\n'), ((4723, 4759), 'os.path.join', 'os.path.join', (['self.bdist_dir', '"""root"""'], {}), "(self.bdist_dir, 'root')\n", (4735, 4759), False, 'import os\n'), ((10853, 10894), 'os.path.join', 'os.path.join', (['self.bdist_dir', '"""+MANIFEST"""'], {}), "(self.bdist_dir, '+MANIFEST')\n", (10865, 10894), False, 'import os\n'), ((11078, 11127), 'os.path.join', 'os.path.join', (['self.bdist_dir', '"""+COMPACT_MANIFEST"""'], {}), "(self.bdist_dir, '+COMPACT_MANIFEST')\n", (11090, 11127), False, 'import os\n'), ((11494, 11531), 'os.path.join', 'os.path.join', (['self.dist_dir', 'basename'], {}), '(self.dist_dir, basename)\n', (11506, 11531), False, 'import os\n'), ((21110, 21135), 'os.walk', 'os.walk', (['self.install_dir'], {}), '(self.install_dir)\n', (21117, 21135), False, 'import os\n'), ((21651, 21671), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (21665, 21671), False, 'import os\n'), ((9400, 9454), 'distutils.errors.DistutilsOptionError', 'DistutilsOptionError', (['"""Project must have name defined"""'], {}), "('Project must have name defined')\n", (9420, 9454), False, 'from distutils.errors import DistutilsOptionError\n'), ((9512, 9569), 'distutils.errors.DistutilsOptionError', 'DistutilsOptionError', (['"""Project must have version defined"""'], {}), "('Project must have version defined')\n", (9532, 9569), False, 'from distutils.errors import DistutilsOptionError\n'), ((9627, 9688), 'distutils.errors.DistutilsOptionError', 'DistutilsOptionError', (['"""Project must have description defined"""'], {}), "('Project must have description defined')\n", (9647, 9688), False, 'from distutils.errors import DistutilsOptionError\n'), ((9743, 9809), 'distutils.errors.DistutilsOptionError', 'DistutilsOptionError', (['"""Project must have long_description defined"""'], {}), "('Project must have long_description defined')\n", (9763, 9809), False, 'from distutils.errors import DistutilsOptionError\n'), ((9912, 9982), 'distutils.errors.DistutilsOptionError', 'DistutilsOptionError', (['"""Project must have author or maintainer defined"""'], {}), "('Project must have author or maintainer defined')\n", (9932, 9982), False, 'from distutils.errors import DistutilsOptionError\n'), ((10779, 10798), 'os.remove', 'os.remove', (['tar_path'], {}), '(tar_path)\n', (10788, 10798), False, 'import os\n'), ((10945, 10995), 'json.dump', 'json.dump', (['content', 'fobj'], {'sort_keys': '(True)', 'indent': '(4)'}), '(content, fobj, sort_keys=True, indent=4)\n', (10954, 10995), False, 'import json\n'), ((11299, 11357), 'json.dump', 'json.dump', (['compact_content', 'fobj'], {'sort_keys': '(True)', 'indent': '(4)'}), '(compact_content, fobj, sort_keys=True, indent=4)\n', (11308, 11357), False, 'import json\n'), ((11566, 11589), 'tarfile.open', 'tarfile.open', (['path', '"""w"""'], {}), "(path, 'w')\n", (11578, 11589), False, 'import tarfile\n'), ((21685, 21704), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (21698, 21704), False, 'import shutil\n'), ((11681, 11706), 'os.path.dirname', 'os.path.dirname', (['tar_path'], {}), '(tar_path)\n', (11696, 11706), False, 'import os\n'), ((12763, 12974), 'distutils.errors.DistutilsOptionError', 'DistutilsOptionError', (['"""Unable to determine default ABI value since bdist_pkg call happens not on FreeBSD system. Please specify this value according the target system for which you build this package."""'], {}), "(\n 'Unable to determine default ABI value since bdist_pkg call happens not on FreeBSD system. Please specify this value according the target system for which you build this package.'\n )\n", (12783, 12974), False, 'from distutils.errors import DistutilsOptionError\n'), ((13133, 13150), 'platform.system', 'platform.system', ([], {}), '()\n', (13148, 13150), False, 'import platform\n'), ((13339, 13357), 'platform.machine', 'platform.machine', ([], {}), '()\n', (13355, 13357), False, 'import platform\n'), ((13515, 13727), 'distutils.errors.DistutilsOptionError', 'DistutilsOptionError', (['"""Unable to determine default ARCH value since bdist_pkg call happens not on FreeBSD system. Please specify this value according the target system for which you build this package."""'], {}), "(\n 'Unable to determine default ARCH value since bdist_pkg call happens not on FreeBSD system. Please specify this value according the target system for which you build this package.'\n )\n", (13535, 13727), False, 'from distutils.errors import DistutilsOptionError\n'), ((13886, 13903), 'platform.system', 'platform.system', ([], {}), '()\n', (13901, 13903), False, 'import platform\n'), ((18248, 18271), 'pkg_resources.Requirement.parse', 'Requirement.parse', (['item'], {}), '(item)\n', (18265, 18271), False, 'from pkg_resources import Requirement\n'), ((21193, 21232), 'os.path.relpath', 'os.path.relpath', (['root', 'self.install_dir'], {}), '(root, self.install_dir)\n', (21208, 21232), False, 'import os\n'), ((8784, 8813), 'os.path.dirname', 'os.path.dirname', (['install_path'], {}), '(install_path)\n', (8799, 8813), False, 'import os\n'), ((10265, 10296), 'os.path.basename', 'os.path.basename', (['manifest_path'], {}), '(manifest_path)\n', (10281, 10296), False, 'import os\n'), ((10335, 10374), 'os.path.basename', 'os.path.basename', (['compact_manifest_path'], {}), '(compact_manifest_path)\n', (10351, 10374), False, 'import os\n'), ((12653, 12670), 'platform.system', 'platform.system', ([], {}), '()\n', (12668, 12670), False, 'import platform\n'), ((13405, 13422), 'platform.system', 'platform.system', ([], {}), '()\n', (13420, 13422), False, 'import platform\n'), ((21270, 21296), 'os.path.join', 'os.path.join', (['reldir', 'file'], {}), '(reldir, file)\n', (21282, 21296), False, 'import os\n'), ((11814, 11840), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (11829, 11840), False, 'import os\n'), ((14085, 14103), 'platform.machine', 'platform.machine', ([], {}), '()\n', (14101, 14103), False, 'import platform\n'), ((21471, 21495), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (21483, 21495), False, 'import os\n'), ((9090, 9110), 'hashlib.sha256', 'hashlib.sha256', (['data'], {}), '(data)\n', (9104, 9110), False, 'import hashlib\n'), ((13204, 13222), 'platform.release', 'platform.release', ([], {}), '()\n', (13220, 13222), False, 'import platform\n'), ((13957, 13975), 'platform.release', 'platform.release', ([], {}), '()\n', (13973, 13975), False, 'import platform\n')] |
import logging
import numpy as np
__author__ = "<NAME>"
def class_to_dict(obj, *args):
"""
Create dict which contains values of given fields
:type obj: object
:type args: list[str]
:return:
"""
res = dict()
for name in args:
res[name] = getattr(obj, name)
return res
def dict_set_class(obj, dic, *args):
"""
Set fields of given object based on values from dict.
If *args contains no names all values from dict are used
:type obj: object
:type dic: dict[str,object]
:param args: list[str]
:return:
"""
if len(args) == 0:
li = dic.keys()
else:
li = args
for name in li:
try:
getattr(obj, name)
setattr(obj, name, dic[name])
except AttributeError as ae:
logging.warning(ae)
def bisect(arr, val, comp):
left = -1
right = len(arr)
while right - left > 1:
mid = (left + right) >> 1
if comp(arr[mid], val):
left = mid
else:
right = mid
return right
def numpy_repr(val: np.ndarray):
if val is None:
return repr(val)
if val.size < 20:
return repr(val)
return f"array(size={val.size}, shape={val.shape}, dtype={val.dtype}, min={val.min()}, max={val.max()})"
| [
"logging.warning"
] | [((813, 832), 'logging.warning', 'logging.warning', (['ae'], {}), '(ae)\n', (828, 832), False, 'import logging\n')] |
"""
Count and Sort the times a word appear in a text.
Using regular expressions
"""
import re
from pyspark import SparkConf, SparkContext
def normalize_words(text):
return re.compile(r'\W+', re.UNICODE).split(text.lower())
conf = SparkConf().setMaster("local").setAppName("WordCount")
sc = SparkContext(conf=conf)
lines = sc.textFile("file:///SparkCourse/files/book.txt")
words = lines.flatMap(normalize_words)
wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
wordCountsSorted = wordCounts.map(lambda x: (x[1], x[0])).sortByKey()
results = wordCountsSorted.collect()
for result in results:
count = str(result[0])
word = result[1].encode('ascii', 'ignore')
if word:
print(word.decode() + ":\t\t" + count)
| [
"pyspark.SparkContext",
"pyspark.SparkConf",
"re.compile"
] | [((298, 321), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (310, 321), False, 'from pyspark import SparkConf, SparkContext\n'), ((178, 208), 're.compile', 're.compile', (['"""\\\\W+"""', 're.UNICODE'], {}), "('\\\\W+', re.UNICODE)\n", (188, 208), False, 'import re\n'), ((238, 249), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (247, 249), False, 'from pyspark import SparkConf, SparkContext\n')] |
from terragpu.ai.deep_learning.datasets.segmentation_dataset \
import SegmentationDataset
prepare_data = True
images_regex = '/Users/jacaraba/Desktop/development/ilab/vhr-cloudmask/data/images/*.tif'
labels_regex = '/Users/jacaraba/Desktop/development/ilab/vhr-cloudmask/data/labels/*.tif'
dataset_dir = '/Users/jacaraba/Desktop/development/ilab/vhr-cloudmask/data/dataset'
dataset = SegmentationDataset(
prepare_data=prepare_data,
images_regex=images_regex,
labels_regex=labels_regex,
dataset_dir=dataset_dir,
tile_size=128,
seed=24,
max_patches=0.000001,
augment=True,
chunks={'band': 1, 'x': 2048, 'y': 2048},
input_bands=['CB', 'B', 'G', 'Y', 'Red', 'RE', 'N1', 'N2'],
output_bands=['B', 'G', 'Red'],
pytorch=True)
| [
"terragpu.ai.deep_learning.datasets.segmentation_dataset.SegmentationDataset"
] | [((390, 737), 'terragpu.ai.deep_learning.datasets.segmentation_dataset.SegmentationDataset', 'SegmentationDataset', ([], {'prepare_data': 'prepare_data', 'images_regex': 'images_regex', 'labels_regex': 'labels_regex', 'dataset_dir': 'dataset_dir', 'tile_size': '(128)', 'seed': '(24)', 'max_patches': '(1e-06)', 'augment': '(True)', 'chunks': "{'band': 1, 'x': 2048, 'y': 2048}", 'input_bands': "['CB', 'B', 'G', 'Y', 'Red', 'RE', 'N1', 'N2']", 'output_bands': "['B', 'G', 'Red']", 'pytorch': '(True)'}), "(prepare_data=prepare_data, images_regex=images_regex,\n labels_regex=labels_regex, dataset_dir=dataset_dir, tile_size=128, seed\n =24, max_patches=1e-06, augment=True, chunks={'band': 1, 'x': 2048, 'y':\n 2048}, input_bands=['CB', 'B', 'G', 'Y', 'Red', 'RE', 'N1', 'N2'],\n output_bands=['B', 'G', 'Red'], pytorch=True)\n", (409, 737), False, 'from terragpu.ai.deep_learning.datasets.segmentation_dataset import SegmentationDataset\n')] |
from collections import deque
from itertools import combinations
from grafo import Grafo
def edmonds_karp(grafo, v_inicial, v_sorvedouro):
rede_residual = [[None for x in range(grafo.qtd_vertices())] for y in range(grafo.qtd_vertices())]
for u in range(len(grafo.arestas)):
for v in range(len(grafo.arestas[u])):
if grafo.arestas[u][v] != None:
rede_residual[u][v] = 0
caminho_aumentante = busca_em_largura_fluxo(grafo,v_inicial, v_sorvedouro, rede_residual)
total_caminhos = 0
fluxo_maximo = 0
while caminho_aumentante != None:
fluxos = []
# print("Fluxo do caminho:", end=" ")
for i in range(len(caminho_aumentante)-1):
IN = caminho_aumentante[i+1]
OUT = caminho_aumentante[i]
fluxo = grafo.arestas[IN][OUT] - rede_residual[IN][OUT]
# print(f"{fluxo}", end="-")
fluxos.append(fluxo)
fluxo_caminho = minimo(fluxos)
# print(f"= {fluxo_caminho}")
fluxo_maximo += fluxo_caminho
for i in range(len(caminho_aumentante)-1):
IN = caminho_aumentante[i+1]
OUT = caminho_aumentante[i]
rede_residual[IN][OUT] += fluxo_caminho
total_caminhos += 1
caminho_aumentante = busca_em_largura_fluxo(grafo,v_inicial, v_sorvedouro, rede_residual)
# print(f"Total de caminhos: {total_caminhos}")
# print(f"Fluxo máximo: {fluxo_maximo}")
return fluxo_maximo
def hopcroft_karp(grafo):
distancias = [float("inf") for x in range(len(grafo.vertices))]
mates = [None for x in range(len(grafo.vertices))]
distNone = [0]
X = (int)(grafo.qtd_vertices()/2 - 1)
m = 0
while(busca_em_largura_emparelhamento(grafo, mates, distancias, distNone)):
for x in range(X+1):
if mates[x] == None:
if busca_em_profundidade_emparelhamento(grafo, mates, x, distancias, distNone):
m += 1
print(f"Emparelhamento máximo: {m}")
return mates
def busca_em_largura_fluxo(grafo, v_inicial, v_sorvedouro, rede_residual):
visitados = [False for x in range(len(grafo.vertices))]
antecessores = [None for x in range(len(grafo.vertices))]
visitados[v_inicial] = True
fila = []
fila.append(v_inicial)
while(len(fila) > 0):
u = fila.pop()
for v in range(len(grafo.arestas[u])):
if grafo.arestas[u][v] is None or v == u or (grafo.arestas[u][v] - rede_residual[u][v] <= 0):
continue
elif visitados[v] == False:
visitados[v] = True
antecessores[v] = u
if v == v_sorvedouro:
caminho_aumentante = []
while v != None:
caminho_aumentante.append(v)
v = antecessores[v]
return caminho_aumentante
fila.append(v)
return None
def minimo(valores):
if valores == None or len(valores)<1:
print("Erro ao retornar valor mínimo")
return None
resultado = valores[0]
for valor in valores:
if valor < resultado:
resultado = valor
return resultado
def busca_em_largura_emparelhamento(grafo, mates, distancias, distNone):
Q = []
X = (int)(grafo.qtd_vertices()/2 - 1)
INF = float("inf")
for x in range(X+1):
if mates[x]== None:
distancias[x] = 0
Q.append(x)
else:
distancias[x] = INF
distNone[0] = INF
while len(Q) > 0:
x = Q.pop()
if distancias[x] < distNone[0]:
for y in grafo.vizinhos(x):
if mates[y] == None:
if distNone[0] == INF:
distNone[0] = distancias[x] + 1
else:
if distancias[mates[y]] == INF:
distancias[mates[y]] = distancias[x] + 1
Q.append(mates[y])
return distNone[0] != INF
def busca_em_profundidade_emparelhamento(grafo, mates, x, distancias, distNone):
INF = float("inf")
if x != None:
for y in grafo.vizinhos(x):
if mates[y] == None:
if distNone[0] == distancias[x] + 1:
if busca_em_profundidade_emparelhamento(grafo, mates, mates[y], distancias, distNone):
# mates[y] = x
mates[x] = y
return True
else:
if distancias[mates[y]] == distancias[x] + 1:
if busca_em_profundidade_emparelhamento(grafo, mates, mates[y], distancias, distNone):
mates[y] = x
mates[x] = y
return True
distancias[x] = INF
return False
return True
def lawler(grafo):
X = [n for n in range(2**grafo.qtd_vertices())]
S = conjunto_potencia_ordenado(list(grafo.vertices.keys()))
#for x in S:
# print(x)
for indice, s in enumerate(S):
if indice == 0:
X[0] = 0
continue
X[indice] = float('inf')
G = sub_grafo(grafo, s)
for I in conjuntos_independentes_maximais(G):
s_copy = s.copy()
for v in I:
if v in s_copy:
s_copy.remove(v)
i = S.index(s_copy)
if X[i] +1 < X[indice]:
X[indice] = X[i] + 1
# X[indice] = float("inf")
# print(indice)
# print(X)
# pass
return X[-1]
def conjunto_potencia_ordenado(valores):
entradas = (list(y) for n in range(len(valores) + 1) for y in combinations(valores, n))
def indice_binario(s):
return sum(2 ** list(reversed(valores)).index(y) for y in s)
return sorted(entradas, key=indice_binario)
def sub_grafo(grafo, entradas):
vertices = {chave: grafo.vertices[chave] for chave in entradas}
indice_vertices = {grafo.vertices[chave]: chave for chave in entradas}
arestas = [[grafo.arestas[x][y] if x in entradas and y in entradas else None for x in range(max(vertices)+1)] for y in range(max(vertices)+1)]
return Grafo(vertices, indice_vertices, arestas, grafo.dirigido)
def conjuntos_independentes_maximais(grafo):
conjuntos = list()
for v in grafo.vertices:
conj_max = set()
for u in grafo.vertices:
if grafo.arestas[v][u] is None:
conj_max.add(u)
to_remove = list()
for x in conj_max:
for y in conj_max:
if x == y:
continue
if x in to_remove:
continue
if grafo.arestas[x][y] is not None:
to_remove.append(y)
for x in to_remove:
conj_max.discard(x)
is_subset = False
for c in conjuntos:
if conj_max.issubset(c):
is_subset = True
break
if is_subset:
continue
conjuntos.append(conj_max)
return conjuntos | [
"itertools.combinations",
"grafo.Grafo"
] | [((6280, 6337), 'grafo.Grafo', 'Grafo', (['vertices', 'indice_vertices', 'arestas', 'grafo.dirigido'], {}), '(vertices, indice_vertices, arestas, grafo.dirigido)\n', (6285, 6337), False, 'from grafo import Grafo\n'), ((5774, 5798), 'itertools.combinations', 'combinations', (['valores', 'n'], {}), '(valores, n)\n', (5786, 5798), False, 'from itertools import combinations\n')] |
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
import boundaryUtils
import regionToolset
import geomHoney
# Create a model
tmpModel = mdb.Model(name='tmp')
if mdb.models.has_key('Model-1'):
del mdb.models['Model-1']
if mdb.models.has_key('Honeycomb'):
del mdb.models['Honeycomb']
myModel = mdb.Model(name='Honeycomb')
myAssembly = myModel.rootAssembly
del mdb.models['tmp']
# Input Parameters
rc = 0.003175
rp = 0.01
ri = 0.007125
hCore = 0.0127
hFace = 0.000457
hComb = 0.00015
hIns = hCore
nx = 11
ny = 18
nx = 4
ny = 11
bQuadratic = 0
bExplicit = 1
bDisplace = 0
# Step Parameters
uLoad = 0.001
uForce = 1334/4
uTime = 40.0
uMaxInc = 0.05
uMinInc = 1.e-7
uMaxNumInc = 2000
uInitialInc = 0.01
mScaleInc = 1.e-4
# Derived Parameters
lx = 4 * rc / (2 + sqrt(3))
s = lx*sin(pi/3)
c = lx*cos(pi/3)
xmax = 2*nx*(lx+c)-lx
ymax = 2*ny*s
A = ri*ri * pi / 4
P = -uForce / A
xc = xmax - c - lx/2
yc = ymax - s
cx = xc - 0.0254
cy = yc - 0.0255
# Mesh Parameters
hMesh = 0.001
scMesh = 2
# Create Insert
skInsert = myModel.ConstrainedSketch( name='insertProfile', sheetSize = 2*ri )
skInsert.CircleByCenterPerimeter( center=(xc,yc), point1=(xc+ri,yc) )
prtInsert = myModel.Part( dimensionality=THREE_D, name='Insert', type=DEFORMABLE_BODY )
prtInsert.BaseSolidExtrude( depth=hIns, sketch=skInsert )
# Create Honeycomb
skHoney = myModel.ConstrainedSketch( name='honeyProfile', sheetSize=2*xmax )
cnrs = geomHoney.getCorners( lx, nx, ny )
for i in range( len(cnrs) ):
for j in range( 6 ):
lines = geomHoney.getLines( cnrs[i], lx )
skHoney.Line( point1=lines[j][0], point2=lines[j][1] )
prtCells = myModel.Part( dimensionality=THREE_D, name='Cells', type=DEFORMABLE_BODY )
prtCells.BaseShellExtrude( depth=hCore, sketch=skHoney )
# Create Potting Base
skBlock = myModel.ConstrainedSketch( name='blockProfile', sheetSize=3*lx )
lines = geomHoney.getLines( (0,0), lx )
for i in range( 6 ):
skBlock.Line( point1=lines[i][0], point2=lines[i][1] )
prtBlock = myModel.Part( dimensionality=THREE_D, name='Block', type=DEFORMABLE_BODY )
prtBlock.BaseSolidExtrude( depth=hCore, sketch=skBlock )
instBlks = []
nBlks = 0
for i in range( len(cnrs) ):
vtxs = geomHoney.getVertices( cnrs[i], lx )
if ( geomHoney.getMinDist( (xc,yc), vtxs ) < rp ):
nBlk = 'Blk ' + str(nBlks)
instBlks.append( myAssembly.Instance( name=nBlk, part=prtBlock, dependent=ON ) )
instBlks[nBlks].translate( vector=cnrs[i] )
nBlks = nBlks + 1
prtEpox0 = myAssembly.PartFromBooleanMerge( name='Epox0', instances=instBlks, keepIntersections=False )
for i in range( nBlks ):
nBlk = 'Blk ' + str(i)
del myAssembly.instances[nBlk]
del instBlks
# Create Potting
instEpox0 = myAssembly.Instance( name='instEpox0', part=prtEpox0, dependent=ON )
instInsert = myAssembly.Instance( name='instInsert', part=prtInsert, dependent=ON )
prtPotting = myAssembly.PartFromBooleanCut( name='Potting', instanceToBeCut=instEpox0, cuttingInstances=(instInsert,) )
instPotting = myAssembly.Instance( name='instPotting', part=prtPotting, dependent=ON )
del myModel.parts['Block']
# Create Face Sheets
skSheet = myModel.ConstrainedSketch( name='sheetProfile', sheetSize=3*nx*lx )
skSheet.rectangle( point1=(0,0), point2=(xmax,ymax) )
prtFace = myModel.Part( dimensionality=THREE_D, name='Face', type=DEFORMABLE_BODY )
prtFace.BaseShell( sketch=skSheet )
prtFace.PartitionFaceBySketch( faces=prtFace.faces, sketch=skHoney )
instFace = myAssembly.Instance( name='instFace', part=prtFace, dependent=ON )
prtFace = myAssembly.PartFromBooleanCut( name='Face', instanceToBeCut=instFace, cuttingInstances=(instPotting,instInsert,) )
del myAssembly.instances['instFace']
# Subtract Potting From Core
instCells = myAssembly.Instance( name='instCells', part=prtCells, dependent=ON )
prtCore = myAssembly.PartFromBooleanCut( name='Core', instanceToBeCut=instCells, cuttingInstances=(instEpox0,) )
del myAssembly.instances['instCells']
del myAssembly.instances['instEpox0']
del myModel.parts['Cells']
del myModel.parts['Epox0']
# Create Panel
instBotFace = myAssembly.Instance( name='instBotFace', part=prtFace, dependent=ON )
instCore = myAssembly.Instance( name='instCore', part=prtCore, dependent=ON )
instPotting = myAssembly.Instance( name='instPotting', part=prtPotting, dependent=ON )
instTopFace = myAssembly.Instance( name='instTopFace', part=prtFace, dependent=ON )
instTopFace.translate( vector=(0,0,hCore) )
prtPanel0 = myAssembly.PartFromBooleanMerge( name='Panel0', instances=(instBotFace,instCore,instPotting,instInsert,instTopFace),
keepIntersections=True )
instPanel0 = myAssembly.Instance( name='instPanel0', part=prtPanel0, dependent=ON )
# Create Symmetry Block
skSymBlock = myModel.ConstrainedSketch( name='symBlockProfile', sheetSize = xmax )
skSymBlock.Line( point1=(0,2*ymax), point2=(2*xmax,2*ymax) )
skSymBlock.Line( point1=(2*xmax,2*ymax), point2=(2*xmax,0) )
skSymBlock.Line( point1=(2*xmax,0), point2=(xc,0) )
skSymBlock.Line( point1=(xc,0), point2=(xc,yc) )
skSymBlock.Line( point1=(xc,yc), point2=(0,yc) )
skSymBlock.Line( point1=(0,yc), point2=(0,2*ymax) )
prtSymBlock = myModel.Part( dimensionality=THREE_D, name='SymBlock', type=DEFORMABLE_BODY )
prtSymBlock.BaseSolidExtrude( depth=hCore, sketch=skSymBlock )
# Cut Parts
instSymBlock = myAssembly.Instance( name='instSymBlock', part=prtSymBlock, dependent=ON )
prtPanel = myAssembly.PartFromBooleanCut( name='Panel', instanceToBeCut=instPanel0, cuttingInstances=(instSymBlock,) )
prtSymBotFace = myAssembly.PartFromBooleanCut( name='SymBotFace', instanceToBeCut=instBotFace, cuttingInstances=(instSymBlock,) )
prtSymCore = myAssembly.PartFromBooleanCut( name='SymCore', instanceToBeCut=instCore, cuttingInstances=(instSymBlock,) )
prtSymPotting = myAssembly.PartFromBooleanCut( name='SymPotting', instanceToBeCut=instPotting, cuttingInstances=(instSymBlock,) )
prtSymInsert = myAssembly.PartFromBooleanCut( name='SymInsert', instanceToBeCut=instInsert, cuttingInstances=(instSymBlock,) )
prtSymTopFace = myAssembly.PartFromBooleanCut( name='SymTopFace', instanceToBeCut=instTopFace, cuttingInstances=(instSymBlock,) )
# Delete Original Parts and Instances
del myAssembly.instances['instBotFace']
del myAssembly.instances['instCore']
del myAssembly.instances['instPotting']
del myAssembly.instances['instTopFace']
del myAssembly.instances['instSymBlock']
del myAssembly.instances['instInsert']
del myAssembly.instances['instPanel0']
del myModel.parts['Insert']
del myModel.parts['Panel0']
del myModel.parts['Face']
del myModel.parts['Core']
del myModel.parts['Potting']
del myModel.parts['SymBlock']
# Recreate Half Instances
instBotFace = myAssembly.Instance( name='instBotFace', part=prtSymBotFace, dependent=ON )
instCore = myAssembly.Instance( name='instCore', part=prtSymCore, dependent=ON )
instPotting = myAssembly.Instance( name='instPotting', part=prtSymPotting, dependent=ON )
instInsert = myAssembly.Instance( name='instInsert', part=prtSymInsert, dependent=ON )
instTopFace = myAssembly.Instance( name='instTopFace', part=prtSymTopFace, dependent=ON )
# Partition Parts
for i in range( len( myModel.parts ) ):
tmpPart = myModel.parts[ myModel.parts.keys()[i] ]
tmpPart.DatumPlaneByPrincipalPlane( offset=cx, principalPlane=YZPLANE )
tmpPart.DatumPlaneByPrincipalPlane( offset=cy, principalPlane=XZPLANE )
for j in range( len( tmpPart.datums ) ):
datum = tmpPart.datums[ tmpPart.datums.keys()[j] ]
try:
tmpPart.PartitionCellByDatumPlane( cells=tmpPart.cells, datumPlane=datum )
except:
x = 0
try:
tmpPart.PartitionFaceByDatumPlane( faces=tmpPart.faces, datumPlane=datum )
except:
x = 0
del datum
del tmpPart
# Create Material Properties
tabLaminate = ( 17.9e9, 17.9e9, 6.e9, 0.3, 0.3, 0.3, 6.0e9, 1.0e9, 1.0e9 )
tabHoneycomb = ( 1.e9, 0.3 )
tabPotting = ( 890.e6, 0.3 )
tabSteel = ( 2.1e11, 0.3 )
matLaminate = myModel.Material( name='Laminate' )
matLaminate.Elastic( type=ENGINEERING_CONSTANTS, table=( tabLaminate, ) )
matHoneycomb = myModel.Material( name='Honeycomb' )
matHoneycomb.Elastic( type=ISOTROPIC, table=( tabHoneycomb, ) )
matPotting = myModel.Material( name='Potting' )
matPotting.Elastic( type=ISOTROPIC, table=( tabPotting, ) )
matSteel = myModel.Material( name='Steel' )
matSteel.Elastic( type=ISOTROPIC, table=( tabSteel, ) )
matHoneycomb.Density(table=((1000.0, ), ))
matLaminate.Density(table=((1652.0, ), ))
matPotting.Density(table=((1100.0, ), ))
matSteel.Density(table=((7700.0, ), ))
# Create Section Definitions
secSteel = myModel.HomogeneousSolidSection( name='secSteel', material='Steel' )
secPotting = myModel.HomogeneousSolidSection( name='secPotting', material='Potting' )
secLaminate = myModel.HomogeneousShellSection(idealization=NO_IDEALIZATION, integrationRule=SIMPSON, material='Laminate',
name='secLaminate', nodalThicknessField='', numIntPts=5, poissonDefinition=DEFAULT,
preIntegrate=OFF, temperature=GRADIENT, thickness=hFace, thicknessField='',
thicknessModulus=None, thicknessType=UNIFORM, useDensity=OFF)
secHoneycomb = myModel.HomogeneousShellSection(idealization=NO_IDEALIZATION, integrationRule=SIMPSON, material='Honeycomb',
name='secHoneycomb', nodalThicknessField='', numIntPts=5, poissonDefinition=DEFAULT,
preIntegrate=OFF, temperature=GRADIENT, thickness=hComb, thicknessField='',
thicknessModulus=None, thicknessType=UNIFORM, useDensity=OFF)
# Create Skins on Top and Bottom of Potting
seqPotSkinBase = boundaryUtils.getFacesFromCntnr( prtPanel, instPotting )
seqTopPotSkin = boundaryUtils.getFacesList( prtPanel, seqPotSkinBase, 2, hCore )
seqBotPotSkin = boundaryUtils.getFacesList( prtPanel, seqPotSkinBase, 2, 0 )
seqPotSkin = seqTopPotSkin + seqBotPotSkin
prtPanel.Skin( faces=seqTopPotSkin, name='PottingTop' )
prtPanel.Skin( faces=seqBotPotSkin, name='PottingBot' )
# Get Sequences of Faces/Cells for Each Material
seqSteel = boundaryUtils.getCellsFromCntnr( prtPanel, instInsert )
seqPotting = boundaryUtils.getCellsFromCntnr( prtPanel, instPotting )
seqTopLaminate = boundaryUtils.getFacesFromCntnr( prtPanel, instTopFace )
seqBotLaminate = boundaryUtils.getFacesFromCntnr( prtPanel, instBotFace )
seqLaminate = seqTopLaminate + seqBotLaminate
seqCore = boundaryUtils.getFacesFromCntnr( prtPanel, instCore )
# Create Regions for Section Assignments
regLaminate = regionToolset.Region( faces=seqLaminate )
regSkin = regionToolset.Region( skinFaces=(('PottingTop', seqTopPotSkin),('PottingBot', seqBotPotSkin),) )
regSteel = regionToolset.Region( cells=seqSteel )
regPotting = regionToolset.Region( cells=seqPotting )
regCore = regionToolset.Region( faces=seqCore )
# Create Section Assignments
prtPanel.SectionAssignment( region=regLaminate, sectionName='secLaminate' )
prtPanel.SectionAssignment( region=regSkin, sectionName='secLaminate' )
prtPanel.SectionAssignment( region=regCore, sectionName='secHoneycomb' )
prtPanel.SectionAssignment( region=regPotting, sectionName='secPotting' )
prtPanel.SectionAssignment( region=regSteel, sectionName='secSteel' )
# Assign Material Directions
regTopLaminate = regionToolset.Region( faces=seqTopLaminate )
regBotLaminate = regionToolset.Region( faces=seqBotLaminate )
regTopSkin = regionToolset.Region( skinFaces=(('PottingTop', seqTopPotSkin),) )
regBotSkin = regionToolset.Region( skinFaces=(('PottingBot', seqBotPotSkin),) )
prtPanel.DatumCsysByThreePoints( name='Global', coordSysType=CARTESIAN, origin=(0,0,0), point1=(1,0,0), line2=(0,1,0))
prtPanel.DatumCsysByThreePoints( name='Global', coordSysType=CARTESIAN, origin=(0,0,0), point1=(1,0,0), line2=(0,-1,0))
k = len(prtPanel.datums.keys()) - 2
panelDatum1 = prtPanel.datums[ prtPanel.datums.keys()[k] ]
panelDatum2 = prtPanel.datums[ prtPanel.datums.keys()[k+1] ]
prtPanel.MaterialOrientation( axis=AXIS_3, localCsys=panelDatum1, region=regTopLaminate, stackDirection=STACK_3 )
prtPanel.MaterialOrientation( axis=AXIS_3, localCsys=panelDatum1, region=regTopSkin, stackDirection=STACK_3 )
prtPanel.MaterialOrientation( axis=AXIS_3, localCsys=panelDatum1, region=regBotLaminate, stackDirection=STACK_3 )
prtPanel.MaterialOrientation( axis=AXIS_3, localCsys=panelDatum2, region=regBotSkin, stackDirection=STACK_3 )
del panelDatum1, panelDatum2
# Set Element Types
if( bQuadratic == 1 ):
prtPanel.setElementType( elemTypes=( ElemType(elemCode=S8R, elemLibrary=STANDARD), ElemType(elemCode=STRI65, elemLibrary=STANDARD)),
regions=regLaminate )
prtPanel.setElementType( elemTypes=( ElemType(elemCode=S8R, elemLibrary=STANDARD), ElemType(elemCode=STRI65, elemLibrary=STANDARD)),
regions=regCore )
prtPanel.setElementType( elemTypes=( ElemType(elemCode=S8R, elemLibrary=STANDARD), ElemType(elemCode=STRI65, elemLibrary=STANDARD)),
regions=regSkin )
prtPanel.setElementType(elemTypes=( ElemType(elemCode=C3D20R, elemLibrary=STANDARD), ElemType(elemCode=C3D15, elemLibrary=STANDARD),
ElemType(elemCode=C3D10M, elemLibrary=STANDARD)), regions=regSteel )
prtPanel.setElementType(elemTypes=( ElemType(elemCode=C3D20R, elemLibrary=STANDARD), ElemType(elemCode=C3D15, elemLibrary=STANDARD),
ElemType(elemCode=C3D10M, elemLibrary=STANDARD)), regions=regPotting )
else:
prtPanel.setElementType( elemTypes=( ElemType(elemCode=S4, elemLibrary=STANDARD), ElemType(elemCode=S3, elemLibrary=STANDARD)),
regions=regLaminate )
prtPanel.setElementType( elemTypes=( ElemType(elemCode=S4, elemLibrary=STANDARD), ElemType(elemCode=S3, elemLibrary=STANDARD)),
regions=regCore )
prtPanel.setElementType( elemTypes=( ElemType(elemCode=S4, elemLibrary=STANDARD), ElemType(elemCode=S3, elemLibrary=STANDARD)),
regions=regSkin )
prtPanel.setElementType(elemTypes=( ElemType(elemCode=C3D8, elemLibrary=STANDARD), ElemType(elemCode=C3D6, elemLibrary=STANDARD),
ElemType(elemCode=C3D4, elemLibrary=STANDARD)), regions=regSteel )
prtPanel.setElementType(elemTypes=( ElemType(elemCode=C3D8, elemLibrary=STANDARD), ElemType(elemCode=C3D6, elemLibrary=STANDARD),
ElemType(elemCode=C3D4, elemLibrary=STANDARD)), regions=regPotting )
# Mesh Panel and Create Part Instance
prtPanel.setMeshControls( allowMapped=True, elemShape=QUAD, regions=seqCore )
prtPanel.setMeshControls( allowMapped=True, elemShape=QUAD, regions=seqLaminate )
prtPanel.setMeshControls( allowMapped=True, elemShape=HEX, regions=seqSteel )
prtPanel.setMeshControls( allowMapped=True, elemShape=HEX, regions=seqPotting )
prtPanel.seedPart( deviationFactor=0.1, size=hMesh )
seqCoarseEdges = boundaryUtils.getColinearEdges( prtPanel, (0,0,1) )
prtPanel.seedEdgeBySize( edges=seqCoarseEdges, size=scMesh*hMesh )
prtPanel.generateMesh()
instPanel = myAssembly.Instance( name='instPanel', part=prtPanel, dependent=ON )
# Create Step
if( bExplicit == 1 ):
stepPullout = myModel.ExplicitDynamicsStep( description='Explicit Pullout', name='Pullout', previous='Initial' )
stepPullout.setValues( timePeriod = uTime )
stepPullout.setValues( massScaling = ((SEMI_AUTOMATIC, MODEL, THROUGHOUT_STEP, 0.0, mScaleInc, BELOW_MIN, 100, 0, 0.0, 0.0, 0, None), ))
else:
stepPullout = myModel.StaticStep( description='Pullout Load', name='Pullout', nlgeom=ON, previous='Initial' )
stepPullout.setValues( timePeriod=uTime )
stepPullout.setValues( maxNumInc=uMaxNumInc )
stepPullout.setValues( initialInc=uInitialInc )
stepPullout.setValues( minInc=uMinInc )
stepPullout.setValues( maxInc=uMaxInc )
# Apply Load Condition
if( bDisplace == 0 ):
seqPress = instPanel.faces[0:0]
for i in range( len( instPanel.faces ) ):
x = prtPanel.faces[i].getCentroid()
if( abs( x[0][2] - hCore ) < 1.e-5 ):
if( boundaryUtils.dist( x[0], (xc,yc,hCore) ) < ri ):
seqPress = seqPress + instPanel.faces.findAt( x, printWarning=False )
myModel.Pressure( createStepName='Pullout', distributionType=UNIFORM, field='', magnitude=P, name='Pressure',
region=Region( side1Faces=seqPress ) )
myModel.EquallySpacedAmplitude(begin=0.0, data=(0.0, 0.5, 1.0, 1.0),
fixedInterval=uTime/4, name='EqAmp', smooth=SOLVER_DEFAULT, timeSpan=STEP)
myModel.SmoothStepAmplitude(data=((0.0, 0.0), (uTime/2,1.0), (uTime, 1.0)), name='SmoothAmp', timeSpan=STEP)
myModel.loads['Pressure'].setValues(amplitude='EqAmp')
else:
seqInsertBase = boundaryUtils.getFacesFromCntnr( instPanel, instInsert )
seqTopInsert = boundaryUtils.getFacesList( instPanel, seqInsertBase, 2, hCore )
myModel.DisplacementBC( amplitude=UNSET, createStepName='Pullout', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None,
name='Load', region=Region( faces=seqTopInsert ), u1=UNSET, u2=UNSET, u3=uLoad, ur1=UNSET, ur2=UNSET, ur3=UNSET )
# Apply Clamp Boundary Condition
seqClamp = instPanel.faces[0:0]
for i in range( len( instPanel.faces ) ):
x = instPanel.faces[i].getCentroid()
if( abs( x[0][2] - hCore ) < 1.e-5 ):
if( x[0][0] < cx ):
seqClamp = seqClamp + instPanel.faces.findAt( x, printWarning=False )
if( x[0][1] < cy ):
seqClamp = seqClamp + instPanel.faces.findAt( x, printWarning=False )
myModel.DisplacementBC( amplitude=UNSET, createStepName='Initial', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None,
name='Clamp', region=Region( faces=seqClamp ), u1=UNSET, u2=UNSET, u3=0, ur1=UNSET, ur2=UNSET, ur3=UNSET )
# Apply Symmetry Boundary Condition
seqSymFaces = boundaryUtils.getFaces( instPanel, 1, yc )
seqSymEdges = boundaryUtils.getEdges( instPanel, 1, yc )
myModel.YsymmBC(createStepName='Initial', name='YSym', region=Region( faces=seqSymFaces, edges=seqSymEdges ))
seqSymFaces = boundaryUtils.getFaces( instPanel, 0, xc )
seqSymEdges = boundaryUtils.getEdges( instPanel, 0, xc )
myModel.XsymmBC(createStepName='Initial', name='XSym', region=Region( faces=seqSymFaces, edges=seqSymEdges ))
# Create Set on Insert Top
seqInsertTop = prtPanel.faces[0:0]
for i in range( len( prtPanel.faces ) ):
x = prtPanel.faces[i].getCentroid()
if( abs( x[0][2] - hCore ) < 1.e-5 ):
if( boundaryUtils.dist( x[0], (xc,yc,hCore) ) < ri ):
seqInsertTop = seqInsertTop + prtPanel.faces.findAt( x, printWarning=False )
prtPanel.Set( faces=seqInsertTop, name='Insert-Top' )
# Create Reaction Force History Output Request
myModel.HistoryOutputRequest( createStepName='Pullout', name='Insert-ReactionForce', rebar=EXCLUDE,
region=instPanel.sets['Insert-Top'], sectionPoints=DEFAULT, variables=('U3', 'RF3') )
# Delete Extra Instances and Parts
del myAssembly.instances['instBotFace']
del myAssembly.instances['instCore']
del myAssembly.instances['instPotting']
del myAssembly.instances['instInsert']
del myAssembly.instances['instTopFace']
del myModel.parts['SymBotFace']
del myModel.parts['SymCore']
del myModel.parts['SymInsert']
del myModel.parts['SymPotting']
del myModel.parts['SymTopFace']
# Create Job
mdb.Job( contactPrint=OFF, description='Pullout Load Applied', echoPrint=OFF, explicitPrecision=SINGLE,
historyPrint=OFF, memory=90, memoryUnits=PERCENTAGE, model='Honeycomb', modelPrint=OFF, multiprocessingMode=DEFAULT,
name='Pullout-NB221-1e9-1in', nodalOutputPrecision=SINGLE, numCpus=1, numDomains=1, parallelizationMethodExplicit=DOMAIN, scratch='',
type=ANALYSIS, userSubroutine='' )
"""
"""
| [
"boundaryUtils.getFacesFromCntnr",
"geomHoney.getLines",
"geomHoney.getVertices",
"boundaryUtils.getEdges",
"boundaryUtils.dist",
"regionToolset.Region",
"geomHoney.getMinDist",
"geomHoney.getCorners",
"boundaryUtils.getColinearEdges",
"boundaryUtils.getFaces",
"boundaryUtils.getFacesList",
"b... | [((1637, 1669), 'geomHoney.getCorners', 'geomHoney.getCorners', (['lx', 'nx', 'ny'], {}), '(lx, nx, ny)\n', (1657, 1669), False, 'import geomHoney\n'), ((2080, 2110), 'geomHoney.getLines', 'geomHoney.getLines', (['(0, 0)', 'lx'], {}), '((0, 0), lx)\n', (2098, 2110), False, 'import geomHoney\n'), ((9977, 10031), 'boundaryUtils.getFacesFromCntnr', 'boundaryUtils.getFacesFromCntnr', (['prtPanel', 'instPotting'], {}), '(prtPanel, instPotting)\n', (10008, 10031), False, 'import boundaryUtils\n'), ((10050, 10112), 'boundaryUtils.getFacesList', 'boundaryUtils.getFacesList', (['prtPanel', 'seqPotSkinBase', '(2)', 'hCore'], {}), '(prtPanel, seqPotSkinBase, 2, hCore)\n', (10076, 10112), False, 'import boundaryUtils\n'), ((10131, 10189), 'boundaryUtils.getFacesList', 'boundaryUtils.getFacesList', (['prtPanel', 'seqPotSkinBase', '(2)', '(0)'], {}), '(prtPanel, seqPotSkinBase, 2, 0)\n', (10157, 10189), False, 'import boundaryUtils\n'), ((10410, 10463), 'boundaryUtils.getCellsFromCntnr', 'boundaryUtils.getCellsFromCntnr', (['prtPanel', 'instInsert'], {}), '(prtPanel, instInsert)\n', (10441, 10463), False, 'import boundaryUtils\n'), ((10479, 10533), 'boundaryUtils.getCellsFromCntnr', 'boundaryUtils.getCellsFromCntnr', (['prtPanel', 'instPotting'], {}), '(prtPanel, instPotting)\n', (10510, 10533), False, 'import boundaryUtils\n'), ((10554, 10608), 'boundaryUtils.getFacesFromCntnr', 'boundaryUtils.getFacesFromCntnr', (['prtPanel', 'instTopFace'], {}), '(prtPanel, instTopFace)\n', (10585, 10608), False, 'import boundaryUtils\n'), ((10628, 10682), 'boundaryUtils.getFacesFromCntnr', 'boundaryUtils.getFacesFromCntnr', (['prtPanel', 'instBotFace'], {}), '(prtPanel, instBotFace)\n', (10659, 10682), False, 'import boundaryUtils\n'), ((10741, 10792), 'boundaryUtils.getFacesFromCntnr', 'boundaryUtils.getFacesFromCntnr', (['prtPanel', 'instCore'], {}), '(prtPanel, instCore)\n', (10772, 10792), False, 'import boundaryUtils\n'), ((10853, 10892), 'regionToolset.Region', 'regionToolset.Region', ([], {'faces': 'seqLaminate'}), '(faces=seqLaminate)\n', (10873, 10892), False, 'import regionToolset\n'), ((10905, 11004), 'regionToolset.Region', 'regionToolset.Region', ([], {'skinFaces': "(('PottingTop', seqTopPotSkin), ('PottingBot', seqBotPotSkin))"}), "(skinFaces=(('PottingTop', seqTopPotSkin), (\n 'PottingBot', seqBotPotSkin)))\n", (10925, 11004), False, 'import regionToolset\n'), ((11013, 11049), 'regionToolset.Region', 'regionToolset.Region', ([], {'cells': 'seqSteel'}), '(cells=seqSteel)\n', (11033, 11049), False, 'import regionToolset\n'), ((11065, 11103), 'regionToolset.Region', 'regionToolset.Region', ([], {'cells': 'seqPotting'}), '(cells=seqPotting)\n', (11085, 11103), False, 'import regionToolset\n'), ((11116, 11151), 'regionToolset.Region', 'regionToolset.Region', ([], {'faces': 'seqCore'}), '(faces=seqCore)\n', (11136, 11151), False, 'import regionToolset\n'), ((11608, 11650), 'regionToolset.Region', 'regionToolset.Region', ([], {'faces': 'seqTopLaminate'}), '(faces=seqTopLaminate)\n', (11628, 11650), False, 'import regionToolset\n'), ((11670, 11712), 'regionToolset.Region', 'regionToolset.Region', ([], {'faces': 'seqBotLaminate'}), '(faces=seqBotLaminate)\n', (11690, 11712), False, 'import regionToolset\n'), ((11728, 11792), 'regionToolset.Region', 'regionToolset.Region', ([], {'skinFaces': "(('PottingTop', seqTopPotSkin),)"}), "(skinFaces=(('PottingTop', seqTopPotSkin),))\n", (11748, 11792), False, 'import regionToolset\n'), ((11808, 11872), 'regionToolset.Region', 'regionToolset.Region', ([], {'skinFaces': "(('PottingBot', seqBotPotSkin),)"}), "(skinFaces=(('PottingBot', seqBotPotSkin),))\n", (11828, 11872), False, 'import regionToolset\n'), ((15284, 15335), 'boundaryUtils.getColinearEdges', 'boundaryUtils.getColinearEdges', (['prtPanel', '(0, 0, 1)'], {}), '(prtPanel, (0, 0, 1))\n', (15314, 15335), False, 'import boundaryUtils\n'), ((18176, 18216), 'boundaryUtils.getFaces', 'boundaryUtils.getFaces', (['instPanel', '(1)', 'yc'], {}), '(instPanel, 1, yc)\n', (18198, 18216), False, 'import boundaryUtils\n'), ((18233, 18273), 'boundaryUtils.getEdges', 'boundaryUtils.getEdges', (['instPanel', '(1)', 'yc'], {}), '(instPanel, 1, yc)\n', (18255, 18273), False, 'import boundaryUtils\n'), ((18401, 18441), 'boundaryUtils.getFaces', 'boundaryUtils.getFaces', (['instPanel', '(0)', 'xc'], {}), '(instPanel, 0, xc)\n', (18423, 18441), False, 'import boundaryUtils\n'), ((18458, 18498), 'boundaryUtils.getEdges', 'boundaryUtils.getEdges', (['instPanel', '(0)', 'xc'], {}), '(instPanel, 0, xc)\n', (18480, 18498), False, 'import boundaryUtils\n'), ((2396, 2430), 'geomHoney.getVertices', 'geomHoney.getVertices', (['cnrs[i]', 'lx'], {}), '(cnrs[i], lx)\n', (2417, 2430), False, 'import geomHoney\n'), ((17054, 17108), 'boundaryUtils.getFacesFromCntnr', 'boundaryUtils.getFacesFromCntnr', (['instPanel', 'instInsert'], {}), '(instPanel, instInsert)\n', (17085, 17108), False, 'import boundaryUtils\n'), ((17128, 17190), 'boundaryUtils.getFacesList', 'boundaryUtils.getFacesList', (['instPanel', 'seqInsertBase', '(2)', 'hCore'], {}), '(instPanel, seqInsertBase, 2, hCore)\n', (17154, 17190), False, 'import boundaryUtils\n'), ((1736, 1767), 'geomHoney.getLines', 'geomHoney.getLines', (['cnrs[i]', 'lx'], {}), '(cnrs[i], lx)\n', (1754, 1767), False, 'import geomHoney\n'), ((2440, 2476), 'geomHoney.getMinDist', 'geomHoney.getMinDist', (['(xc, yc)', 'vtxs'], {}), '((xc, yc), vtxs)\n', (2460, 2476), False, 'import geomHoney\n'), ((18803, 18844), 'boundaryUtils.dist', 'boundaryUtils.dist', (['x[0]', '(xc, yc, hCore)'], {}), '(x[0], (xc, yc, hCore))\n', (18821, 18844), False, 'import boundaryUtils\n'), ((16409, 16450), 'boundaryUtils.dist', 'boundaryUtils.dist', (['x[0]', '(xc, yc, hCore)'], {}), '(x[0], (xc, yc, hCore))\n', (16427, 16450), False, 'import boundaryUtils\n')] |
from django.conf import settings
from django.urls import include, path
from . import views
urlpatterns = [
path(f"{settings.BASE_PATH}callback", views.callback, name="callback"),
path(f"{settings.BASE_PATH}index", views.index, name="index"),
path(f"{settings.BASE_PATH}v0/auth/login", views.login, name="openmaps_login"),
path(f"{settings.BASE_PATH}v0/auth/logout", views.logout, name="openmaps_logout"),
path(f"{settings.BASE_PATH}v0/auth/status", views.status, name="status"),
path(f"{settings.BASE_PATH}v0/auth/valid", views.valid, name="valid"),
path(
f"{settings.BASE_PATH}v0/social/",
include("social_django.urls", namespace="social"),
),
]
# Only care about using admin during development.
if settings.DEBUG:
from django.contrib import admin
urlpatterns += [
path(f"{settings.BASE_PATH}v0/admin/", admin.site.urls),
]
| [
"django.urls.path",
"django.urls.include"
] | [((113, 183), 'django.urls.path', 'path', (['f"""{settings.BASE_PATH}callback"""', 'views.callback'], {'name': '"""callback"""'}), "(f'{settings.BASE_PATH}callback', views.callback, name='callback')\n", (117, 183), False, 'from django.urls import include, path\n'), ((189, 250), 'django.urls.path', 'path', (['f"""{settings.BASE_PATH}index"""', 'views.index'], {'name': '"""index"""'}), "(f'{settings.BASE_PATH}index', views.index, name='index')\n", (193, 250), False, 'from django.urls import include, path\n'), ((256, 334), 'django.urls.path', 'path', (['f"""{settings.BASE_PATH}v0/auth/login"""', 'views.login'], {'name': '"""openmaps_login"""'}), "(f'{settings.BASE_PATH}v0/auth/login', views.login, name='openmaps_login')\n", (260, 334), False, 'from django.urls import include, path\n'), ((340, 426), 'django.urls.path', 'path', (['f"""{settings.BASE_PATH}v0/auth/logout"""', 'views.logout'], {'name': '"""openmaps_logout"""'}), "(f'{settings.BASE_PATH}v0/auth/logout', views.logout, name=\n 'openmaps_logout')\n", (344, 426), False, 'from django.urls import include, path\n'), ((427, 499), 'django.urls.path', 'path', (['f"""{settings.BASE_PATH}v0/auth/status"""', 'views.status'], {'name': '"""status"""'}), "(f'{settings.BASE_PATH}v0/auth/status', views.status, name='status')\n", (431, 499), False, 'from django.urls import include, path\n'), ((505, 574), 'django.urls.path', 'path', (['f"""{settings.BASE_PATH}v0/auth/valid"""', 'views.valid'], {'name': '"""valid"""'}), "(f'{settings.BASE_PATH}v0/auth/valid', views.valid, name='valid')\n", (509, 574), False, 'from django.urls import include, path\n'), ((637, 686), 'django.urls.include', 'include', (['"""social_django.urls"""'], {'namespace': '"""social"""'}), "('social_django.urls', namespace='social')\n", (644, 686), False, 'from django.urls import include, path\n'), ((834, 889), 'django.urls.path', 'path', (['f"""{settings.BASE_PATH}v0/admin/"""', 'admin.site.urls'], {}), "(f'{settings.BASE_PATH}v0/admin/', admin.site.urls)\n", (838, 889), False, 'from django.urls import include, path\n')] |
from PIL import Image
def create_thumbnail(infile, outfile, size=(129, 129), file_format="JPEG"):
if infile != outfile:
try:
im = Image.open(infile)
im.thumbnail(size)
im.save(outfile, file_format)
return im
except IOError:
raise IOError
| [
"PIL.Image.open"
] | [((156, 174), 'PIL.Image.open', 'Image.open', (['infile'], {}), '(infile)\n', (166, 174), False, 'from PIL import Image\n')] |
import gym
from stable_baselines3 import SAC
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
import active_reward_learning
from active_reward_learning.drlhp.reward_model import (
RewardModelEnvWrapper,
RewardModelNN,
TrueRewardCallback,
)
def main():
env = gym.make("InvertedPendulum-Penalty-v2")
reward_model = RewardModelNN.load("pendulum_reward_model.pt")
env = RewardModelEnvWrapper(env, reward_model)
env = Monitor(env, info_keywords=("true_reward",))
venv = DummyVecEnv([lambda: env])
venv = VecNormalize(venv)
callback = TrueRewardCallback()
model = SAC("MlpPolicy", venv, verbose=1)
model.learn(total_timesteps=30000, log_interval=1, callback=callback)
model.save("sac_model_pendulum_30k.zip")
obs = venv.reset()
while True:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = venv.step(action)
venv.render()
if done:
obs = venv.reset()
if __name__ == "__main__":
main()
| [
"stable_baselines3.SAC",
"stable_baselines3.common.vec_env.VecNormalize",
"stable_baselines3.common.monitor.Monitor",
"active_reward_learning.drlhp.reward_model.RewardModelNN.load",
"active_reward_learning.drlhp.reward_model.RewardModelEnvWrapper",
"active_reward_learning.drlhp.reward_model.TrueRewardCall... | [((412, 451), 'gym.make', 'gym.make', (['"""InvertedPendulum-Penalty-v2"""'], {}), "('InvertedPendulum-Penalty-v2')\n", (420, 451), False, 'import gym\n'), ((472, 518), 'active_reward_learning.drlhp.reward_model.RewardModelNN.load', 'RewardModelNN.load', (['"""pendulum_reward_model.pt"""'], {}), "('pendulum_reward_model.pt')\n", (490, 518), False, 'from active_reward_learning.drlhp.reward_model import RewardModelEnvWrapper, RewardModelNN, TrueRewardCallback\n'), ((529, 569), 'active_reward_learning.drlhp.reward_model.RewardModelEnvWrapper', 'RewardModelEnvWrapper', (['env', 'reward_model'], {}), '(env, reward_model)\n', (550, 569), False, 'from active_reward_learning.drlhp.reward_model import RewardModelEnvWrapper, RewardModelNN, TrueRewardCallback\n'), ((580, 624), 'stable_baselines3.common.monitor.Monitor', 'Monitor', (['env'], {'info_keywords': "('true_reward',)"}), "(env, info_keywords=('true_reward',))\n", (587, 624), False, 'from stable_baselines3.common.monitor import Monitor\n'), ((637, 664), 'stable_baselines3.common.vec_env.DummyVecEnv', 'DummyVecEnv', (['[lambda : env]'], {}), '([lambda : env])\n', (648, 664), False, 'from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize\n'), ((675, 693), 'stable_baselines3.common.vec_env.VecNormalize', 'VecNormalize', (['venv'], {}), '(venv)\n', (687, 693), False, 'from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize\n'), ((710, 730), 'active_reward_learning.drlhp.reward_model.TrueRewardCallback', 'TrueRewardCallback', ([], {}), '()\n', (728, 730), False, 'from active_reward_learning.drlhp.reward_model import RewardModelEnvWrapper, RewardModelNN, TrueRewardCallback\n'), ((743, 776), 'stable_baselines3.SAC', 'SAC', (['"""MlpPolicy"""', 'venv'], {'verbose': '(1)'}), "('MlpPolicy', venv, verbose=1)\n", (746, 776), False, 'from stable_baselines3 import SAC\n')] |
# Attention-based Feature-level Distillation
# Original Source : https://github.com/HobbitLong/RepDistiller
import os
from torchvision import transforms, datasets
import torch.utils.data as data
import torch
def create_loader(batch_size, data_dir, data):
data_dir = os.path.join(data_dir, data)
if data == 'CIFAR100':
transform_train = transforms.Compose(
[transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),])
transform_test = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),])
trainset = datasets.CIFAR100(root=data_dir, train=True, download=False, transform=transform_train)
testset = datasets.CIFAR100(root=data_dir, train=False, download=False, transform=transform_test)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
num_classes = 100
image_size = 32
return train_loader, test_loader, num_classes, image_size
if data.lower() == 'cub_200_2011':
n_class = 200
elif data.lower() == 'dogs':
n_class = 120
elif data.lower() == 'mit67':
n_class = 67
elif data.lower() == 'stanford40':
n_class = 40
else:
n_class = 1000
image_size = 224
transform_train = transforms.Compose(
[transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ])
transform_test = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224),
transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ])
trainset = datasets.ImageFolder(root=os.path.join(data_dir, 'train'), transform=transform_train)
testset = datasets.ImageFolder(root=os.path.join(data_dir, 'valid'), transform=transform_test)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True,
num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, pin_memory=True,
num_workers=2)
return train_loader, test_loader, n_class, image_size
| [
"torchvision.transforms.CenterCrop",
"torchvision.datasets.CIFAR100",
"os.path.join",
"torchvision.transforms.RandomHorizontalFlip",
"torch.utils.data.lower",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"t... | [((283, 311), 'os.path.join', 'os.path.join', (['data_dir', 'data'], {}), '(data_dir, data)\n', (295, 311), False, 'import os\n'), ((2235, 2345), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True,\n num_workers=4, pin_memory=True)\n', (2262, 2345), False, 'import torch\n'), ((2409, 2519), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(2)'}), '(testset, batch_size=batch_size, shuffle=False,\n pin_memory=True, num_workers=2)\n', (2436, 2519), False, 'import torch\n'), ((768, 860), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', ([], {'root': 'data_dir', 'train': '(True)', 'download': '(False)', 'transform': 'transform_train'}), '(root=data_dir, train=True, download=False, transform=\n transform_train)\n', (785, 860), False, 'from torchvision import transforms, datasets\n'), ((875, 967), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', ([], {'root': 'data_dir', 'train': '(False)', 'download': '(False)', 'transform': 'transform_test'}), '(root=data_dir, train=False, download=False, transform=\n transform_test)\n', (892, 967), False, 'from torchvision import transforms, datasets\n'), ((989, 1063), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True)\n', (1016, 1063), False, 'import torch\n'), ((1087, 1161), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(testset, batch_size=batch_size, shuffle=False)\n', (1114, 1161), False, 'import torch\n'), ((1293, 1305), 'torch.utils.data.lower', 'data.lower', ([], {}), '()\n', (1303, 1305), True, 'import torch.utils.data as data\n'), ((1358, 1370), 'torch.utils.data.lower', 'data.lower', ([], {}), '()\n', (1368, 1370), True, 'import torch.utils.data as data\n'), ((1636, 1669), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (1664, 1669), False, 'from torchvision import transforms, datasets\n'), ((1671, 1704), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1702, 1704), False, 'from torchvision import transforms, datasets\n'), ((1706, 1727), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1725, 1727), False, 'from torchvision import transforms, datasets\n'), ((1739, 1805), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (1759, 1805), False, 'from torchvision import transforms, datasets\n'), ((1854, 1876), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1871, 1876), False, 'from torchvision import transforms, datasets\n'), ((1878, 1904), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1899, 1904), False, 'from torchvision import transforms, datasets\n'), ((1915, 1936), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1934, 1936), False, 'from torchvision import transforms, datasets\n'), ((1938, 2004), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (1958, 2004), False, 'from torchvision import transforms, datasets\n'), ((2053, 2084), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (2065, 2084), False, 'import os\n'), ((2154, 2185), 'os.path.join', 'os.path.join', (['data_dir', '"""valid"""'], {}), "(data_dir, 'valid')\n", (2166, 2185), False, 'import os\n'), ((401, 437), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (422, 437), False, 'from torchvision import transforms, datasets\n'), ((439, 472), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (470, 472), False, 'from torchvision import transforms, datasets\n'), ((474, 495), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (493, 495), False, 'from torchvision import transforms, datasets\n'), ((511, 583), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5071, 0.4867, 0.4408)', '(0.2675, 0.2565, 0.2761)'], {}), '((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))\n', (531, 583), False, 'from torchvision import transforms, datasets\n'), ((647, 668), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (666, 668), False, 'from torchvision import transforms, datasets\n'), ((670, 742), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5071, 0.4867, 0.4408)', '(0.2675, 0.2565, 0.2761)'], {}), '((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))\n', (690, 742), False, 'from torchvision import transforms, datasets\n'), ((1415, 1427), 'torch.utils.data.lower', 'data.lower', ([], {}), '()\n', (1425, 1427), True, 'import torch.utils.data as data\n'), ((1472, 1484), 'torch.utils.data.lower', 'data.lower', ([], {}), '()\n', (1482, 1484), True, 'import torch.utils.data as data\n')] |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import sys
import traceback
import json
from functools import partial
from datetime import datetime
from subprocess import Popen, PIPE
from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT
from moi.group import create_info
from moi.context import Context
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value
def _status_change(id, new_status):
"""Update the status of a job
The status associated with the id is updated, an update command is
issued to the job's pubsub, and and the old status is returned.
Parameters
----------
id : str
The job ID
new_status : str
The status change
Returns
-------
str
The old status
"""
job_info = json.loads(r_client.get(id))
old_status = job_info['status']
job_info['status'] = new_status
_deposit_payload(job_info)
return old_status
def _deposit_payload(to_deposit):
"""Store job info, and publish an update
Parameters
----------
to_deposit : dict
The job info
"""
pubsub = to_deposit['pubsub']
id = to_deposit['id']
with r_client.pipeline() as pipe:
pipe.set(id, json.dumps(to_deposit), ex=REDIS_KEY_TIMEOUT)
pipe.publish(pubsub, json.dumps({"update": [id]}))
pipe.execute()
def _redis_wrap(job_info, func, *args, **kwargs):
"""Wrap something to compute
The function that will have available, via kwargs['moi_update_status'], a
method to modify the job status. This method can be used within the
executing function by:
old_status = kwargs['moi_update_status']('my new status')
Parameters
----------
job_info : dict
Redis job details
func : function
A function to execute. This function must accept ``**kwargs``, and will
have ``moi_update_status``, ``moi_context`` and ``moi_parent_id``
available.
Raises
------
Exception
If the function called raises, that exception is propagated.
Returns
-------
Anything the function executed returns.
"""
status_changer = partial(_status_change, job_info['id'])
kwargs['moi_update_status'] = status_changer
kwargs['moi_context'] = job_info['context']
kwargs['moi_parent_id'] = job_info['parent']
job_info['status'] = 'Running'
job_info['date_start'] = str(datetime.now())
_deposit_payload(job_info)
caught = None
try:
result = func(*args, **kwargs)
job_info['status'] = 'Success'
except Exception as e:
result = traceback.format_exception(*sys.exc_info())
job_info['status'] = 'Failed'
caught = e
finally:
job_info['result'] = result
job_info['date_end'] = str(datetime.now())
_deposit_payload(job_info)
if caught is None:
return result
else:
raise caught
def submit(ctx_name, parent_id, name, url, func, *args, **kwargs):
"""Submit through a context
Parameters
----------
ctx_name : str
The name of the context to submit through
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
if isinstance(ctx_name, Context):
ctx = ctx_name
else:
ctx = ctxs.get(ctx_name, ctxs[ctx_default])
return _submit(ctx, parent_id, name, url, func, *args, **kwargs)
def _submit(ctx, parent_id, name, url, func, *args, **kwargs):
"""Submit a function to a cluster
Parameters
----------
parent_id : str
The ID of the group that the job is a part of.
name : str
The name of the job
url : str
The handler that can take the results (e.g., /beta_diversity/)
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key. This
function should raise if the method fails.
args : tuple or None
Any args for ``func``
kwargs : dict or None
Any kwargs for ``func``
Returns
-------
tuple, (str, str, AsyncResult)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
parent_info = r_client.get(parent_id)
if parent_info is None:
parent_info = create_info('unnamed', 'group', id=parent_id)
parent_id = parent_info['id']
r_client.set(parent_id, json.dumps(parent_info))
parent_pubsub_key = parent_id + ':pubsub'
job_info = create_info(name, 'job', url=url, parent=parent_id,
context=ctx.name, store=True)
job_info['status'] = 'Queued'
job_id = job_info['id']
with r_client.pipeline() as pipe:
pipe.set(job_id, json.dumps(job_info))
pipe.publish(parent_pubsub_key, json.dumps({'add': [job_id]}))
pipe.execute()
ar = ctx.bv.apply_async(_redis_wrap, job_info, func, *args, **kwargs)
return job_id, parent_id, ar
def submit_nouser(func, *args, **kwargs):
"""Submit a function to a cluster without an associated user
Parameters
----------
func : function
The function to execute. Any returns from this function will be
serialized and deposited into Redis using the uuid for a key.
args : tuple or None
Any args for ``f``
kwargs : dict or None
Any kwargs for ``f``
Returns
-------
tuple, (str, str)
The job ID, parent ID and the IPython's AsyncResult object of the job
"""
return submit(ctx_default, "no-user", "unnamed", None, func, *args,
**kwargs)
| [
"subprocess.Popen",
"json.dumps",
"moi.ctxs.get",
"datetime.datetime.now",
"moi.r_client.pipeline",
"sys.exc_info",
"moi.r_client.get",
"functools.partial",
"moi.group.create_info"
] | [((1299, 1372), 'subprocess.Popen', 'Popen', (['cmd'], {'universal_newlines': '(True)', 'shell': '(True)', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, universal_newlines=True, shell=True, stdout=PIPE, stderr=PIPE)\n', (1304, 1372), False, 'from subprocess import Popen, PIPE\n'), ((3586, 3625), 'functools.partial', 'partial', (['_status_change', "job_info['id']"], {}), "(_status_change, job_info['id'])\n", (3593, 3625), False, 'from functools import partial\n'), ((6257, 6280), 'moi.r_client.get', 'r_client.get', (['parent_id'], {}), '(parent_id)\n', (6269, 6280), False, 'from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT\n'), ((6535, 6621), 'moi.group.create_info', 'create_info', (['name', '"""job"""'], {'url': 'url', 'parent': 'parent_id', 'context': 'ctx.name', 'store': '(True)'}), "(name, 'job', url=url, parent=parent_id, context=ctx.name, store\n =True)\n", (6546, 6621), False, 'from moi.group import create_info\n'), ((2227, 2243), 'moi.r_client.get', 'r_client.get', (['id'], {}), '(id)\n', (2239, 2243), False, 'from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT\n'), ((2605, 2624), 'moi.r_client.pipeline', 'r_client.pipeline', ([], {}), '()\n', (2622, 2624), False, 'from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT\n'), ((3841, 3855), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3853, 3855), False, 'from datetime import datetime\n'), ((5318, 5355), 'moi.ctxs.get', 'ctxs.get', (['ctx_name', 'ctxs[ctx_default]'], {}), '(ctx_name, ctxs[ctx_default])\n', (5326, 5355), False, 'from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT\n'), ((6331, 6376), 'moi.group.create_info', 'create_info', (['"""unnamed"""', '"""group"""'], {'id': 'parent_id'}), "('unnamed', 'group', id=parent_id)\n", (6342, 6376), False, 'from moi.group import create_info\n'), ((6716, 6735), 'moi.r_client.pipeline', 'r_client.pipeline', ([], {}), '()\n', (6733, 6735), False, 'from moi import r_client, ctxs, ctx_default, REDIS_KEY_TIMEOUT\n'), ((2655, 2677), 'json.dumps', 'json.dumps', (['to_deposit'], {}), '(to_deposit)\n', (2665, 2677), False, 'import json\n'), ((2730, 2758), 'json.dumps', 'json.dumps', (["{'update': [id]}"], {}), "({'update': [id]})\n", (2740, 2758), False, 'import json\n'), ((4224, 4238), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4236, 4238), False, 'from datetime import datetime\n'), ((6447, 6470), 'json.dumps', 'json.dumps', (['parent_info'], {}), '(parent_info)\n', (6457, 6470), False, 'import json\n'), ((6770, 6790), 'json.dumps', 'json.dumps', (['job_info'], {}), '(job_info)\n', (6780, 6790), False, 'import json\n'), ((6832, 6861), 'json.dumps', 'json.dumps', (["{'add': [job_id]}"], {}), "({'add': [job_id]})\n", (6842, 6861), False, 'import json\n'), ((4067, 4081), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4079, 4081), False, 'import sys\n')] |
import os
from unittest import TestCase
from keras_gpt_2 import get_bpe_from_files
class TestBPE(TestCase):
def test_encode_and_decode(self):
current_path = os.path.dirname(os.path.abspath(__file__))
toy_checkpoint_path = os.path.join(current_path, 'toy_checkpoint')
encoder_path = os.path.join(toy_checkpoint_path, 'encoder.json')
vocab_path = os.path.join(toy_checkpoint_path, 'vocab.bpe')
bpe = get_bpe_from_files(encoder_path, vocab_path)
text = 'Power, give me more power!'
indices = bpe.encode(text)
self.assertEqual([13434, 11, 1577, 502, 517, 1176, 0], indices)
self.assertEqual(text, bpe.decode(indices))
self.assertEqual(text, bpe.decode(bpe.encode(text)))
| [
"os.path.abspath",
"keras_gpt_2.get_bpe_from_files",
"os.path.join"
] | [((245, 289), 'os.path.join', 'os.path.join', (['current_path', '"""toy_checkpoint"""'], {}), "(current_path, 'toy_checkpoint')\n", (257, 289), False, 'import os\n'), ((313, 362), 'os.path.join', 'os.path.join', (['toy_checkpoint_path', '"""encoder.json"""'], {}), "(toy_checkpoint_path, 'encoder.json')\n", (325, 362), False, 'import os\n'), ((384, 430), 'os.path.join', 'os.path.join', (['toy_checkpoint_path', '"""vocab.bpe"""'], {}), "(toy_checkpoint_path, 'vocab.bpe')\n", (396, 430), False, 'import os\n'), ((445, 489), 'keras_gpt_2.get_bpe_from_files', 'get_bpe_from_files', (['encoder_path', 'vocab_path'], {}), '(encoder_path, vocab_path)\n', (463, 489), False, 'from keras_gpt_2 import get_bpe_from_files\n'), ((188, 213), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (203, 213), False, 'import os\n')] |
#!/usr/bin/env python
import PySimpleGUI as sg
# Simple example of TabGroup element and the options available to it
sg.theme('DarkAmber') # Please always add color to your window
cookiesStat = [sg.Text("Cookies: "), sg.Text('0', size = (50,1), key="cookieBank"), sg.Text("CpS: "), sg.Text("0", size=(50,1), key="cps")]
buttonBuilding = [sg.Button("Nonna"), sg.Button("Farm"), sg.Button("Mine")]
costBuilding = [sg.Text("Costo"), sg.Text("10", size = (10,1), key="costNonna"), sg.Text("Costo"), sg.Text("100", size = (10,1), key="costFarm"), sg.Text("Costo"), sg.Text("1000", size = (10,1), key="costMine")]
textBuilding = [sg.Text("Num: "), sg.Text("0", size = (10,1), key="incNonna"), sg.Text("Num: "), sg.Text("0", size = (10,1), key="incFarm"), sg.Text("Num: "), sg.Text("0", size = (10,1), key="incMine")]
primosTab = [cookiesStat, buttonBuilding, costBuilding, textBuilding, [sg.Button("Bake!"), sg.Button("Exit")]]
# The tab 1, 2, 3 layouts - what goes inside the tab
tab1_layout = primosTab
tab2_layout = [[sg.Text('Tab 2')]]
tab3_layout = [[sg.Text('Tab 3')]]
tab4_layout = [[sg.Text('Tab 3')]]
# The TabgGroup layout - it must contain only Tabs
tab_group_layout = [[sg.Tab('Primero giuego', tab1_layout, font='Courier 15', key='-TAB1-'),
sg.Tab('Tab 2', tab2_layout, visible=False, key='-TAB2-'),
sg.Tab('Tab 3', tab3_layout, key='-TAB3-'),
sg.Tab('Tab 4', tab4_layout, visible=False, key='-TAB4-'),
]]
# The window layout - defines the entire window
layout = [[sg.TabGroup(tab_group_layout,
enable_events=True,
key='-TABGROUP-')],
[sg.Text('Make tab number'), sg.Input(key='-IN-', size=(3,1)), sg.Button('Invisible'), sg.Button('Visible'), sg.Button('Select')]]
window = sg.Window('My window with tabs', layout, no_titlebar=False)
tab_keys = ('-TAB1-','-TAB2-','-TAB3-', '-TAB4-') # map from an input value to a key
while True:
event, values = window.read() # type: str, dict
print(event, values)
if event == sg.WIN_CLOSED or event == "Exit":
break
# handle button clicks
if event == 'Invisible':
window[tab_keys[int(values['-IN-'])-1]].update(visible=False)
if event == 'Visible':
window[tab_keys[int(values['-IN-'])-1]].update(visible=True)
if event == 'Select':
window[tab_keys[int(values['-IN-'])-1]].select()
window.close() | [
"PySimpleGUI.Text",
"PySimpleGUI.TabGroup",
"PySimpleGUI.Button",
"PySimpleGUI.theme",
"PySimpleGUI.Tab",
"PySimpleGUI.Input",
"PySimpleGUI.Window"
] | [((117, 138), 'PySimpleGUI.theme', 'sg.theme', (['"""DarkAmber"""'], {}), "('DarkAmber')\n", (125, 138), True, 'import PySimpleGUI as sg\n'), ((1834, 1893), 'PySimpleGUI.Window', 'sg.Window', (['"""My window with tabs"""', 'layout'], {'no_titlebar': '(False)'}), "('My window with tabs', layout, no_titlebar=False)\n", (1843, 1893), True, 'import PySimpleGUI as sg\n'), ((200, 220), 'PySimpleGUI.Text', 'sg.Text', (['"""Cookies: """'], {}), "('Cookies: ')\n", (207, 220), True, 'import PySimpleGUI as sg\n'), ((222, 266), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(50, 1)', 'key': '"""cookieBank"""'}), "('0', size=(50, 1), key='cookieBank')\n", (229, 266), True, 'import PySimpleGUI as sg\n'), ((269, 285), 'PySimpleGUI.Text', 'sg.Text', (['"""CpS: """'], {}), "('CpS: ')\n", (276, 285), True, 'import PySimpleGUI as sg\n'), ((287, 324), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(50, 1)', 'key': '"""cps"""'}), "('0', size=(50, 1), key='cps')\n", (294, 324), True, 'import PySimpleGUI as sg\n'), ((343, 361), 'PySimpleGUI.Button', 'sg.Button', (['"""Nonna"""'], {}), "('Nonna')\n", (352, 361), True, 'import PySimpleGUI as sg\n'), ((363, 380), 'PySimpleGUI.Button', 'sg.Button', (['"""Farm"""'], {}), "('Farm')\n", (372, 380), True, 'import PySimpleGUI as sg\n'), ((382, 399), 'PySimpleGUI.Button', 'sg.Button', (['"""Mine"""'], {}), "('Mine')\n", (391, 399), True, 'import PySimpleGUI as sg\n'), ((417, 433), 'PySimpleGUI.Text', 'sg.Text', (['"""Costo"""'], {}), "('Costo')\n", (424, 433), True, 'import PySimpleGUI as sg\n'), ((435, 479), 'PySimpleGUI.Text', 'sg.Text', (['"""10"""'], {'size': '(10, 1)', 'key': '"""costNonna"""'}), "('10', size=(10, 1), key='costNonna')\n", (442, 479), True, 'import PySimpleGUI as sg\n'), ((482, 498), 'PySimpleGUI.Text', 'sg.Text', (['"""Costo"""'], {}), "('Costo')\n", (489, 498), True, 'import PySimpleGUI as sg\n'), ((500, 544), 'PySimpleGUI.Text', 'sg.Text', (['"""100"""'], {'size': '(10, 1)', 'key': '"""costFarm"""'}), "('100', size=(10, 1), key='costFarm')\n", (507, 544), True, 'import PySimpleGUI as sg\n'), ((547, 563), 'PySimpleGUI.Text', 'sg.Text', (['"""Costo"""'], {}), "('Costo')\n", (554, 563), True, 'import PySimpleGUI as sg\n'), ((565, 610), 'PySimpleGUI.Text', 'sg.Text', (['"""1000"""'], {'size': '(10, 1)', 'key': '"""costMine"""'}), "('1000', size=(10, 1), key='costMine')\n", (572, 610), True, 'import PySimpleGUI as sg\n'), ((629, 645), 'PySimpleGUI.Text', 'sg.Text', (['"""Num: """'], {}), "('Num: ')\n", (636, 645), True, 'import PySimpleGUI as sg\n'), ((647, 689), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(10, 1)', 'key': '"""incNonna"""'}), "('0', size=(10, 1), key='incNonna')\n", (654, 689), True, 'import PySimpleGUI as sg\n'), ((692, 708), 'PySimpleGUI.Text', 'sg.Text', (['"""Num: """'], {}), "('Num: ')\n", (699, 708), True, 'import PySimpleGUI as sg\n'), ((710, 751), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(10, 1)', 'key': '"""incFarm"""'}), "('0', size=(10, 1), key='incFarm')\n", (717, 751), True, 'import PySimpleGUI as sg\n'), ((754, 770), 'PySimpleGUI.Text', 'sg.Text', (['"""Num: """'], {}), "('Num: ')\n", (761, 770), True, 'import PySimpleGUI as sg\n'), ((772, 813), 'PySimpleGUI.Text', 'sg.Text', (['"""0"""'], {'size': '(10, 1)', 'key': '"""incMine"""'}), "('0', size=(10, 1), key='incMine')\n", (779, 813), True, 'import PySimpleGUI as sg\n'), ((888, 906), 'PySimpleGUI.Button', 'sg.Button', (['"""Bake!"""'], {}), "('Bake!')\n", (897, 906), True, 'import PySimpleGUI as sg\n'), ((908, 925), 'PySimpleGUI.Button', 'sg.Button', (['"""Exit"""'], {}), "('Exit')\n", (917, 925), True, 'import PySimpleGUI as sg\n'), ((1024, 1040), 'PySimpleGUI.Text', 'sg.Text', (['"""Tab 2"""'], {}), "('Tab 2')\n", (1031, 1040), True, 'import PySimpleGUI as sg\n'), ((1059, 1075), 'PySimpleGUI.Text', 'sg.Text', (['"""Tab 3"""'], {}), "('Tab 3')\n", (1066, 1075), True, 'import PySimpleGUI as sg\n'), ((1094, 1110), 'PySimpleGUI.Text', 'sg.Text', (['"""Tab 3"""'], {}), "('Tab 3')\n", (1101, 1110), True, 'import PySimpleGUI as sg\n'), ((1186, 1256), 'PySimpleGUI.Tab', 'sg.Tab', (['"""Primero giuego"""', 'tab1_layout'], {'font': '"""Courier 15"""', 'key': '"""-TAB1-"""'}), "('Primero giuego', tab1_layout, font='Courier 15', key='-TAB1-')\n", (1192, 1256), True, 'import PySimpleGUI as sg\n'), ((1279, 1336), 'PySimpleGUI.Tab', 'sg.Tab', (['"""Tab 2"""', 'tab2_layout'], {'visible': '(False)', 'key': '"""-TAB2-"""'}), "('Tab 2', tab2_layout, visible=False, key='-TAB2-')\n", (1285, 1336), True, 'import PySimpleGUI as sg\n'), ((1359, 1401), 'PySimpleGUI.Tab', 'sg.Tab', (['"""Tab 3"""', 'tab3_layout'], {'key': '"""-TAB3-"""'}), "('Tab 3', tab3_layout, key='-TAB3-')\n", (1365, 1401), True, 'import PySimpleGUI as sg\n'), ((1424, 1481), 'PySimpleGUI.Tab', 'sg.Tab', (['"""Tab 4"""', 'tab4_layout'], {'visible': '(False)', 'key': '"""-TAB4-"""'}), "('Tab 4', tab4_layout, visible=False, key='-TAB4-')\n", (1430, 1481), True, 'import PySimpleGUI as sg\n'), ((1567, 1634), 'PySimpleGUI.TabGroup', 'sg.TabGroup', (['tab_group_layout'], {'enable_events': '(True)', 'key': '"""-TABGROUP-"""'}), "(tab_group_layout, enable_events=True, key='-TABGROUP-')\n", (1578, 1634), True, 'import PySimpleGUI as sg\n'), ((1694, 1720), 'PySimpleGUI.Text', 'sg.Text', (['"""Make tab number"""'], {}), "('Make tab number')\n", (1701, 1720), True, 'import PySimpleGUI as sg\n'), ((1722, 1755), 'PySimpleGUI.Input', 'sg.Input', ([], {'key': '"""-IN-"""', 'size': '(3, 1)'}), "(key='-IN-', size=(3, 1))\n", (1730, 1755), True, 'import PySimpleGUI as sg\n'), ((1756, 1778), 'PySimpleGUI.Button', 'sg.Button', (['"""Invisible"""'], {}), "('Invisible')\n", (1765, 1778), True, 'import PySimpleGUI as sg\n'), ((1780, 1800), 'PySimpleGUI.Button', 'sg.Button', (['"""Visible"""'], {}), "('Visible')\n", (1789, 1800), True, 'import PySimpleGUI as sg\n'), ((1802, 1821), 'PySimpleGUI.Button', 'sg.Button', (['"""Select"""'], {}), "('Select')\n", (1811, 1821), True, 'import PySimpleGUI as sg\n')] |
#!/usr/bin/env python
import PySimpleGUI as sg
import os
'''
Simple Image Browser based on PySimpleGUI
'''
def main():
# Get the folder containing the images from the user
folder = sg.popup_get_folder('Image folder to open')
if folder is None:
sg.popup_cancel('Cancelling')
return
# get list of PNG files in folder
png_files = [folder + '\\' + f for f in os.listdir(folder) if '.png' in f]
filenames_only = [f for f in os.listdir(folder) if '.png' in f]
if len(png_files) == 0:
sg.popup('No PNG images in folder')
return
# define menu layout
menu = [['File', ['Open Folder', 'Exit']], ['Help', ['About', ]]]
# define layout, show and read the window
col = [[sg.Text(png_files[0], size=(80, 3), key='filename')],
[sg.Image(filename=png_files[0], key='image')],
[sg.Button('Next', size=(8, 2)), sg.Button('Prev', size=(8, 2)),
sg.Text('File 1 of {}'.format(len(png_files)), size=(15, 1), key='filenum')]]
col_files = [[sg.Listbox(values=filenames_only, size=(60, 30), key='listbox')],
[sg.Button('Read')]]
layout = [[sg.Menu(menu)], [sg.Col(col_files), sg.Col(col)]]
window = sg.Window('Image Browser', layout,
return_keyboard_events=True,
location=(0, 0),
use_default_focus=False)
# loop reading the user input and displaying image, filename
i = 0
while True:
event, values = window.read()
# --------------------- Button & Keyboard ---------------------
if event is None:
break
elif event in ('Next', 'MouseWheel:Down', 'Down:40', 'Next:34') and i < len(png_files)-1:
i += 1
elif event in ('Prev', 'MouseWheel:Up', 'Up:38', 'Prior:33') and i > 0:
i -= 1
elif event == 'Exit':
break
if event == 'Read':
filename = folder + '/' + values['listbox'][0]
else:
filename = png_files[i]
# ----------------- Menu choices -----------------
if event == 'Open Folder':
newfolder = sg.popup_get_folder('New folder', no_window=True)
if newfolder is None:
continue
folder = newfolder
png_files = [folder + '/' +
f for f in os.listdir(folder) if '.png' in f]
filenames_only = [f for f in os.listdir(folder) if '.png' in f]
window['listbox'].update(values=filenames_only)
window.refresh()
i = 0
elif event == 'About':
sg.popup('Demo PNG Viewer Program',
'Please give PySimpleGUI a try!')
# update window with new image
window['image'].update(filename=filename)
# update window with filename
window['filename'].update(filename)
# update page display
window['filenum'].update('File {} of {}'.format(i+1, len(png_files)))
window.close()
if __name__ == '__main__':
main() | [
"PySimpleGUI.popup_get_folder",
"os.listdir",
"PySimpleGUI.Listbox",
"PySimpleGUI.popup",
"PySimpleGUI.Text",
"PySimpleGUI.popup_cancel",
"PySimpleGUI.Button",
"PySimpleGUI.Col",
"PySimpleGUI.Image",
"PySimpleGUI.Menu",
"PySimpleGUI.Window"
] | [((197, 240), 'PySimpleGUI.popup_get_folder', 'sg.popup_get_folder', (['"""Image folder to open"""'], {}), "('Image folder to open')\n", (216, 240), True, 'import PySimpleGUI as sg\n'), ((1229, 1338), 'PySimpleGUI.Window', 'sg.Window', (['"""Image Browser"""', 'layout'], {'return_keyboard_events': '(True)', 'location': '(0, 0)', 'use_default_focus': '(False)'}), "('Image Browser', layout, return_keyboard_events=True, location=(0,\n 0), use_default_focus=False)\n", (1238, 1338), True, 'import PySimpleGUI as sg\n'), ((272, 301), 'PySimpleGUI.popup_cancel', 'sg.popup_cancel', (['"""Cancelling"""'], {}), "('Cancelling')\n", (287, 301), True, 'import PySimpleGUI as sg\n'), ((540, 575), 'PySimpleGUI.popup', 'sg.popup', (['"""No PNG images in folder"""'], {}), "('No PNG images in folder')\n", (548, 575), True, 'import PySimpleGUI as sg\n'), ((400, 418), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (410, 418), False, 'import os\n'), ((468, 486), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (478, 486), False, 'import os\n'), ((746, 797), 'PySimpleGUI.Text', 'sg.Text', (['png_files[0]'], {'size': '(80, 3)', 'key': '"""filename"""'}), "(png_files[0], size=(80, 3), key='filename')\n", (753, 797), True, 'import PySimpleGUI as sg\n'), ((812, 856), 'PySimpleGUI.Image', 'sg.Image', ([], {'filename': 'png_files[0]', 'key': '"""image"""'}), "(filename=png_files[0], key='image')\n", (820, 856), True, 'import PySimpleGUI as sg\n'), ((871, 901), 'PySimpleGUI.Button', 'sg.Button', (['"""Next"""'], {'size': '(8, 2)'}), "('Next', size=(8, 2))\n", (880, 901), True, 'import PySimpleGUI as sg\n'), ((903, 933), 'PySimpleGUI.Button', 'sg.Button', (['"""Prev"""'], {'size': '(8, 2)'}), "('Prev', size=(8, 2))\n", (912, 933), True, 'import PySimpleGUI as sg\n'), ((1047, 1110), 'PySimpleGUI.Listbox', 'sg.Listbox', ([], {'values': 'filenames_only', 'size': '(60, 30)', 'key': '"""listbox"""'}), "(values=filenames_only, size=(60, 30), key='listbox')\n", (1057, 1110), True, 'import PySimpleGUI as sg\n'), ((1131, 1148), 'PySimpleGUI.Button', 'sg.Button', (['"""Read"""'], {}), "('Read')\n", (1140, 1148), True, 'import PySimpleGUI as sg\n'), ((1166, 1179), 'PySimpleGUI.Menu', 'sg.Menu', (['menu'], {}), '(menu)\n', (1173, 1179), True, 'import PySimpleGUI as sg\n'), ((1183, 1200), 'PySimpleGUI.Col', 'sg.Col', (['col_files'], {}), '(col_files)\n', (1189, 1200), True, 'import PySimpleGUI as sg\n'), ((1202, 1213), 'PySimpleGUI.Col', 'sg.Col', (['col'], {}), '(col)\n', (1208, 1213), True, 'import PySimpleGUI as sg\n'), ((2139, 2188), 'PySimpleGUI.popup_get_folder', 'sg.popup_get_folder', (['"""New folder"""'], {'no_window': '(True)'}), "('New folder', no_window=True)\n", (2158, 2188), True, 'import PySimpleGUI as sg\n'), ((2619, 2688), 'PySimpleGUI.popup', 'sg.popup', (['"""Demo PNG Viewer Program"""', '"""Please give PySimpleGUI a try!"""'], {}), "('Demo PNG Viewer Program', 'Please give PySimpleGUI a try!')\n", (2627, 2688), True, 'import PySimpleGUI as sg\n'), ((2356, 2374), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2366, 2374), False, 'import os\n'), ((2432, 2450), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2442, 2450), False, 'import os\n')] |
import os
import sys
import torch
import models
import logging
import argparse
import datetime
from amp import AMP
from data_utils import load_data
class Instructor:
def __init__(self, args):
self.args = args
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler(sys.stdout))
self.logger.addHandler(logging.FileHandler(args.log_name))
self.logger.info(f"> creating model {args.model}")
self.model = models.__dict__[args.model](num_classes=args.num_classes, dropout=args.dropout)
self.model.to(args.device)
if args.device.type == 'cuda':
self.logger.info(f"> cuda memory allocated: {torch.cuda.memory_allocated(args.device.index)}")
self._print_args()
def _print_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.size()))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
self.logger.info(f"> n_trainable_params: {n_trainable_params}, n_nontrainable_params: {n_nontrainable_params}")
self.logger.info('> training arguments:')
for arg in vars(self.args):
self.logger.info(f">>> {arg}: {getattr(self.args, arg)}")
def _train(self, train_dataloader, criterion, optimizer):
train_loss, n_correct, n_train = 0, 0, 0
n_batch = len(train_dataloader)
self.model.train()
for i_batch, (inputs, targets) in enumerate(train_dataloader):
inputs, targets = inputs.to(self.args.device), targets.to(self.args.device)
def closure():
optimizer.zero_grad()
outputs = self.model(inputs)
loss = criterion(outputs, targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_norm)
return outputs, loss
outputs, loss = optimizer.step(closure)
train_loss += loss.item() * targets.size(0)
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_train += targets.size(0)
ratio = int((i_batch+1)*50/n_batch)
sys.stdout.write(f"\r[{'>'*ratio}{' '*(50-ratio)}] {i_batch+1}/{n_batch} {(i_batch+1)*100/n_batch:.2f}%")
sys.stdout.flush()
print()
return train_loss / n_train, n_correct / n_train
def _test(self, test_dataloader, criterion):
test_loss, n_correct, n_test = 0, 0, 0
n_batch = len(test_dataloader)
self.model.eval()
with torch.no_grad():
for i_batch, (inputs, targets) in enumerate(test_dataloader):
inputs, targets = inputs.to(self.args.device), targets.to(self.args.device)
outputs = self.model(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item() * targets.size(0)
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_test += targets.size(0)
ratio = int((i_batch+1)*50/n_batch)
sys.stdout.write(f"\r[{'>'*ratio}{' '*(50-ratio)}] {i_batch+1}/{n_batch} {(i_batch+1)*100/n_batch:.2f}%")
sys.stdout.flush()
print()
return test_loss / n_test, n_correct / n_test
def run(self):
train_dataloader, test_dataloader = load_data(batch_size=self.args.batch_size,
workers=0,
dataset=self.args.dataset,
data_target_dir=os.path.join(self.args.data_dir, self.args.dataset),
data_aug=(self.args.no_data_aug==False),
cutout=self.args.cutout,
autoaug=self.args.autoaug)
criterion = torch.nn.CrossEntropyLoss()
optimizer = AMP(params=filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.args.lr,
epsilon=self.args.epsilon,
inner_lr=self.args.inner_lr,
inner_iter=self.args.inner_iter,
base_optimizer=torch.optim.SGD,
momentum=self.args.momentum,
weight_decay=self.args.decay,
nesterov=True)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, self.args.milestones, self.args.gamma)
best_loss, best_acc = 0, 0
for epoch in range(self.args.num_epoch):
train_loss, train_acc = self._train(train_dataloader, criterion, optimizer)
test_loss, test_acc = self._test(test_dataloader, criterion)
scheduler.step()
if test_acc > best_acc or (test_acc == best_acc and test_loss < best_loss):
best_acc, best_loss = test_acc, test_loss
self.logger.info(f"{epoch+1}/{self.args.num_epoch} - {100*(epoch+1)/self.args.num_epoch:.2f}%")
self.logger.info(f"[train] loss: {train_loss:.4f}, acc: {train_acc*100:.2f}, err: {100-train_acc*100:.2f}")
self.logger.info(f"[test] loss: {test_loss:.4f}, acc: {test_acc*100:.2f}, err: {100-test_acc*100:.2f}")
self.logger.info(f"best loss: {best_loss:.4f}, best acc: {best_acc*100:.2f}, best err: {100-best_acc*100:.2f}")
self.logger.info(f"log saved: {self.args.log_name}")
if __name__ == '__main__':
model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name]))
num_classes = {'svhn': 10, 'cifar10': 10, 'cifar100': 100}
parser = argparse.ArgumentParser(description='Trainer', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', type=str, default='cifar10', choices=list(num_classes.keys()), help='Dataset name.')
parser.add_argument('--data_dir', type=str, default='data', help='Dictionary for dataset.')
parser.add_argument('--no_data_aug', default=False, action='store_true', help='Disable data augmentation.')
parser.add_argument('--cutout', default=False, action='store_true', help='Enable Cutout augmentation.')
parser.add_argument('--autoaug', default=False, action='store_true', help='Enable AutoAugment.')
parser.add_argument('--model', default='preactresnet18', choices=model_names, help='Model architecture.')
parser.add_argument('--num_epoch', type=int, default=200, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=50, help='Number of samples in a batch.')
parser.add_argument('--lr', type=float, default=0.1, help='Outer learning rate.')
parser.add_argument('--epsilon', type=float, default=0.5, help='Perturbation norm ball radius.')
parser.add_argument('--inner_lr', type=float, default=1, help='Inner learning rate.')
parser.add_argument('--inner_iter', type=int, default=1, help='Inner iteration number.')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', type=float, default=1e-4, help='Weight decay (L2 penalty).')
parser.add_argument('--dropout', type=float, default=0, help='Dropout applied to the model.')
parser.add_argument('--clip_norm', type=int, default=50, help='Maximum norm of parameter gradient.')
parser.add_argument('--milestones', type=int, nargs='+', default=[100, 150], help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on each milstone.')
parser.add_argument('--device', type=str, default=None, choices=['cpu', 'cuda'], help='Device.')
args = parser.parse_args()
args.num_classes = num_classes[args.dataset]
args.log_name = f"{args.dataset}_{args.model}_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')[2:]}.log"
args.device = torch.device(args.device) if args.device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ins = Instructor(args)
ins.run()
| [
"logging.getLogger",
"logging.StreamHandler",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"torch.cuda.memory_allocated",
"os.path.join",
"torch.argmax",
"sys.stdout.write",
"datetime.datetime.now",
"torch.cuda.is_available",
"logging.FileHand... | [((5963, 6070), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Trainer"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Trainer', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (5986, 6070), False, 'import argparse\n'), ((246, 265), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (263, 265), False, 'import logging\n'), ((4139, 4166), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (4164, 4166), False, 'import torch\n'), ((4682, 4773), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer', 'self.args.milestones', 'self.args.gamma'], {}), '(optimizer, self.args.milestones, self.\n args.gamma)\n', (4718, 4773), False, 'import torch\n'), ((8217, 8242), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (8229, 8242), False, 'import torch\n'), ((340, 373), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (361, 373), False, 'import logging\n'), ((406, 440), 'logging.FileHandler', 'logging.FileHandler', (['args.log_name'], {}), '(args.log_name)\n', (425, 440), False, 'import logging\n'), ((2363, 2492), 'sys.stdout.write', 'sys.stdout.write', (['f"\\r[{\'>\' * ratio}{\' \' * (50 - ratio)}] {i_batch + 1}/{n_batch} {(i_batch + 1) * 100 / n_batch:.2f}%"'], {}), '(\n f"\\r[{\'>\' * ratio}{\' \' * (50 - ratio)}] {i_batch + 1}/{n_batch} {(i_batch + 1) * 100 / n_batch:.2f}%"\n )\n', (2379, 2492), False, 'import sys\n'), ((2481, 2499), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2497, 2499), False, 'import sys\n'), ((2748, 2763), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2761, 2763), False, 'import torch\n'), ((3277, 3406), 'sys.stdout.write', 'sys.stdout.write', (['f"\\r[{\'>\' * ratio}{\' \' * (50 - ratio)}] {i_batch + 1}/{n_batch} {(i_batch + 1) * 100 / n_batch:.2f}%"'], {}), '(\n f"\\r[{\'>\' * ratio}{\' \' * (50 - ratio)}] {i_batch + 1}/{n_batch} {(i_batch + 1) * 100 / n_batch:.2f}%"\n )\n', (3293, 3406), False, 'import sys\n'), ((3399, 3417), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3415, 3417), False, 'import sys\n'), ((3811, 3862), 'os.path.join', 'os.path.join', (['self.args.data_dir', 'self.args.dataset'], {}), '(self.args.data_dir, self.args.dataset)\n', (3823, 3862), False, 'import os\n'), ((8286, 8311), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8309, 8311), False, 'import torch\n'), ((733, 779), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', (['args.device.index'], {}), '(args.device.index)\n', (760, 779), False, 'import torch\n'), ((8135, 8158), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8156, 8158), False, 'import datetime\n'), ((2213, 2238), 'torch.argmax', 'torch.argmax', (['outputs', '(-1)'], {}), '(outputs, -1)\n', (2225, 2238), False, 'import torch\n'), ((3116, 3141), 'torch.argmax', 'torch.argmax', (['outputs', '(-1)'], {}), '(outputs, -1)\n', (3128, 3141), False, 'import torch\n')] |
import re
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
class FormulaProcessor:
def __init__(self, formula):
self.formula = formula
yX = self.stringFilter(re.split(r'\~+', self.formula))
if len(yX) != 2:
raise Exception(f"{formula} does not have $a~b$ format")
self.fieldY = yX[0]
self.formulaX = yX[1]
self.fieldsX = self.stringFilter(re.split(r'\++', self.formulaX))
pass
def stringFilter(self, strings):
filtered = []
for s in strings:
s = s.strip()
if len(s) > 0:
filtered.append(s)
return filtered
def getDataFromSQLDB(self, dataDb, trainingProfile, randomSeed = 42):
"""returns (XTrain, XValidation, yTrain, yValidation) training and validation X,y, does not support categorical data yet"""
df = pd.read_sql(trainingProfile.source, dataDb.connection())
X = df[self.fieldsX].values
y = df[self.fieldY].values
if trainingProfile.validationSplit <= 0:
return X, None, y, None
return train_test_split(X, y, test_size = trainingProfile.validationSplit , random_state = randomSeed)
def getXFromSQL(self, dataDb, sql):
"""returns (XTest) training and validation X,y, does not support categorical data yet"""
df = self.getDfFromSQL(dataDb, sql)
return df[self.fieldsX].values
def getDfFromSQL(self, dataDb, sql):
"""returns (XTest) training and validation X,y, does not support categorical data yet"""
df = pd.read_sql(sql, dataDb.connection())
return df
def getDfAndXFromSQL(self, dataDb, sql, onlyPredictors=True):
"""returns (XTest) training and validation X,y, does not support categorical data yet"""
df = pd.read_sql(sql, dataDb.connection())
if onlyPredictors:
return df[self.fieldsX], df[self.fieldsX].values
else:
return df, df[self.fieldsX].values
| [
"sklearn.model_selection.train_test_split",
"re.split"
] | [((1184, 1278), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'trainingProfile.validationSplit', 'random_state': 'randomSeed'}), '(X, y, test_size=trainingProfile.validationSplit,\n random_state=randomSeed)\n', (1200, 1278), False, 'from sklearn.model_selection import train_test_split\n'), ((223, 253), 're.split', 're.split', (['"""\\\\~+"""', 'self.formula'], {}), "('\\\\~+', self.formula)\n", (231, 253), False, 'import re\n'), ((458, 489), 're.split', 're.split', (['"""\\\\++"""', 'self.formulaX'], {}), "('\\\\++', self.formulaX)\n", (466, 489), False, 'import re\n')] |
from django.shortcuts import get_object_or_404
from menu.models import Menu
""" Credits to CI's Boutique Ado Tutorial """
def cart_contents(request):
cart_items = []
total = 0
item_count = 0
cart = request.session.get('order', {})
for item_id, quantity in cart.items():
menu = get_object_or_404(Menu, pk=item_id)
total += quantity * menu.price
item_count += quantity
cart_items.append({
'item_id': item_id,
'quantity': quantity,
'menu': menu,
})
context = {
'cart_items': cart_items,
'total': total,
'item_count': item_count,
}
return context
| [
"django.shortcuts.get_object_or_404"
] | [((310, 345), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Menu'], {'pk': 'item_id'}), '(Menu, pk=item_id)\n', (327, 345), False, 'from django.shortcuts import get_object_or_404\n')] |
from arlo import Arlo
import datetime
import glob
import re
import os
import imageio
import timeout_decorator
import yaml
import logging
import logging.handlers
CONFIG_PATH = './cfg/'
LAPSE_PATH = './lapse/'
SNAPSHOT_PATH = './raw/'
PURGE_DURATION_HOURS = 24
LAPSE_DURATION = 20
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
HANDLER = logging.handlers.SysLogHandler('/dev/log')
FORMATTER = logging.Formatter('%(levelname)s: %(module)s.%(funcName)s: %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(HANDLER)
class ArloLapse:
"""
This is a class to generate videos of Arlo stills.
Everything is pulled in through config.yaml which should
live in the sub-directory cfg/
Attributes:
username (str): Arlo username
password (str): <PASSWORD>
camera_names (list of strings): List of camera names to use
lapse_path (str): Path to resulting videos
snapshot_path (str): Path to raw snapshots
purge_duration_hours (float): Time in hours to retain images
laps_duration (float): Target duration of resulting video
"""
def __init__(self):
""" Constructor for ArloGIF class """
LOGGER.info('Initializing...')
with open(CONFIG_PATH + 'config.yaml', 'r') as f:
try:
cfg_yaml = yaml.load(f)
except yaml.YAMLError as exc:
print(exc)
if 'username' in cfg_yaml:
self.username = cfg_yaml['username']
else:
raise ValueError('Cannot find username in config.yaml')
if 'password' in cfg_yaml:
self.password = cfg_yaml['password']
else:
raise ValueError('Cannot find password in config.yaml')
self.camera_names = cfg_yaml.get('camera_names', [])
self.lapse_path = cfg_yaml.get('lapse_path', LAPSE_PATH)
self.snapshot_path = cfg_yaml.get('snapshot_path', SNAPSHOT_PATH)
self.purge_duration_hours = cfg_yaml.get('purge_duration_hours', PURGE_DURATION_HOURS)
self.lapse_duration = cfg_yaml.get('lapse_duration', LAPSE_DURATION)
@timeout_decorator.timeout(60)
def get_snapshot_url(self, arlo, basestation, camera):
"""
Method to return snapshot URL with timeout
Parameters:
arlo (Arlo): logged in Arlo instance
basestation (str): basestation name
camera (str): camera name
Returns:
snapshot url (str)
"""
return arlo.TriggerFullFrameSnapshot(basestation,camera)
def get_snapshots(self):
"""
Method to get snapshots for a list of cameras.
If the camera list is give in config.yaml, they are checked to exist.
If the camera list wasn't given, get all cameras from Arlo
"""
LOGGER.info('Getting snapshots...')
try:
arlo = Arlo(self.username, self.password)
basestations = arlo.GetDevices('basestation')
cameras = arlo.GetDevices('camera')
now = datetime.datetime.now()
now_str = now.strftime('%Y%m%d%H%M%S')
camera_names = []
for camera in cameras:
camera_name = camera['deviceName'].replace(' ', '_')
camera_names.append(camera_name)
if not self.camera_names:
LOGGER.debug('No camera names given, getting from Arlo')
self.camera_names = camera_names
else:
LOGGER.debug('Checking if given camera names are in Arlo')
self.camera_names = list(set(self.camera_names) & set(camera_names))
LOGGER.debug('Final list of cameras: ' + ', '.join(self.camera_names))
for camera in cameras:
camera_name = camera['deviceName'].replace(' ', '_')
if camera_name in self.camera_names:
LOGGER.debug('Getting snapshot for ' + camera_name)
snapshot_file = self.snapshot_path + camera_name + '_' + now_str + '.jpg'
try:
snapshot_url = self.get_snapshot_url(arlo, basestations[0], camera)
if snapshot_url is None:
LOGGER.warning('Returned None URL for ' + camera_name)
else:
arlo.DownloadSnapshot(snapshot_url,snapshot_file)
except timeout_decorator.TimeoutError:
LOGGER.warning('Timeout ' + camera_name)
except Exception as e:
print(e)
def purge_snapshots(self):
"""
Method to purge old snapshots that exceed the time to keep.
Pulls age of snapshot from filename.
"""
LOGGER.info('Purging Snapshots...')
now = datetime.datetime.now()
for camera_name in self.camera_names:
files = glob.glob(self.snapshot_path + camera_name + '*.jpg')
regex = r'([a-zA-Z_]+)([0-9]+)'
for file in files:
match = re.search(regex, file)
date = datetime.datetime.strptime(match.group(2), '%Y%m%d%H%M%S')
if date < now - datetime.timedelta(hours=self.purge_duration_hours):
LOGGER.debug('Purging ' + file)
os.remove(file)
def make_lapse(self):
""" Method to generate GIF from available snapshots. """
LOGGER.info('Making Time Lapses...')
for camera_name in self.camera_names:
files = sorted(glob.glob(self.snapshot_path + camera_name + '*.jpg'))
num_files = len(files)
LOGGER.debug('Found ' + str(num_files) + ' images for ' + camera_name)
if num_files>0:
fps = num_files/self.lapse_duration
images = []
for file in files:
images.append(imageio.imread(file))
output_file = self.snapshot_path + camera_name + '.gif'
final_file = self.lapse_path + camera_name + '.gif'
imageio.mimwrite(output_file, images, fps=fps)
command = 'gifsicle -O3 --colors 128 --resize-width 512 {} > {}'.format(output_file, final_file)
os.system(command)
if __name__ =='__main__':
arlo_gif = ArloLapse()
arlo_gif.get_snapshots()
arlo_gif.purge_snapshots()
arlo_gif.make_lapse()
LOGGER.info('Script complete.')
| [
"logging.getLogger",
"re.search",
"timeout_decorator.timeout",
"logging.Formatter",
"imageio.mimwrite",
"yaml.load",
"os.remove",
"datetime.datetime.now",
"glob.glob",
"imageio.imread",
"os.system",
"datetime.timedelta",
"logging.handlers.SysLogHandler",
"arlo.Arlo"
] | [((309, 336), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (326, 336), False, 'import logging\n'), ((380, 422), 'logging.handlers.SysLogHandler', 'logging.handlers.SysLogHandler', (['"""/dev/log"""'], {}), "('/dev/log')\n", (410, 422), False, 'import logging\n'), ((436, 508), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s: %(module)s.%(funcName)s: %(message)s"""'], {}), "('%(levelname)s: %(module)s.%(funcName)s: %(message)s')\n", (453, 508), False, 'import logging\n'), ((2208, 2237), 'timeout_decorator.timeout', 'timeout_decorator.timeout', (['(60)'], {}), '(60)\n', (2233, 2237), False, 'import timeout_decorator\n'), ((4980, 5003), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5001, 5003), False, 'import datetime\n'), ((3003, 3037), 'arlo.Arlo', 'Arlo', (['self.username', 'self.password'], {}), '(self.username, self.password)\n', (3007, 3037), False, 'from arlo import Arlo\n'), ((3165, 3188), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3186, 3188), False, 'import datetime\n'), ((5074, 5127), 'glob.glob', 'glob.glob', (["(self.snapshot_path + camera_name + '*.jpg')"], {}), "(self.snapshot_path + camera_name + '*.jpg')\n", (5083, 5127), False, 'import glob\n'), ((1391, 1403), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1400, 1403), False, 'import yaml\n'), ((5234, 5256), 're.search', 're.search', (['regex', 'file'], {}), '(regex, file)\n', (5243, 5256), False, 'import re\n'), ((5736, 5789), 'glob.glob', 'glob.glob', (["(self.snapshot_path + camera_name + '*.jpg')"], {}), "(self.snapshot_path + camera_name + '*.jpg')\n", (5745, 5789), False, 'import glob\n'), ((6276, 6322), 'imageio.mimwrite', 'imageio.mimwrite', (['output_file', 'images'], {'fps': 'fps'}), '(output_file, images, fps=fps)\n', (6292, 6322), False, 'import imageio\n'), ((6456, 6474), 'os.system', 'os.system', (['command'], {}), '(command)\n', (6465, 6474), False, 'import os\n'), ((5500, 5515), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (5509, 5515), False, 'import os\n'), ((5373, 5424), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'self.purge_duration_hours'}), '(hours=self.purge_duration_hours)\n', (5391, 5424), False, 'import datetime\n'), ((6095, 6115), 'imageio.imread', 'imageio.imread', (['file'], {}), '(file)\n', (6109, 6115), False, 'import imageio\n')] |
"""
.. module:: mixtures
:platform: Unix, Windows
:synopsis: a module for defining the class :class:`Mixture`.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pandas as pd
import mics
from mics.funcs import deltaMethod
from mics.funcs import diff
from mics.funcs import func
from mics.utils import InputError
from mics.utils import bennett
from mics.utils import cases
from mics.utils import crypto
from mics.utils import errorTitle
from mics.utils import info
from mics.utils import multimap
from mics.utils import propertyDict
from mics.utils import stdError
class mixture:
"""
A mixture of independently collected samples (MICS).
Parameters
----------
samples : :class:`pooledsample` or list(:class:`sample`)
A list of samples.
engine : :class:`MICS` or :class:`MBAR`
A method for mixture-model analysis.
"""
def __init__(self, samples, engine):
self.samples = samples
self.engine = engine
m = self.m = len(samples)
if mics.verbose:
# np.set_printoptions(precision=4, threshold=15, edgeitems=4, suppress=True)
info("\n=== Setting up mixture ===")
info("Analysis method: ", self.engine.__class__.__name__)
info("Number of samples:", m)
if m == 0:
raise InputError("list of samples is empty")
self.n = np.array([len(sample.dataset) for sample in samples])
self.neff = np.array([sample.neff for sample in samples])
names = self.names = list(samples[0].dataset.columns)
if mics.verbose:
info("Sample sizes:", self.n)
info("Effective sample sizes:", self.neff)
info("Properties:", ", ".join(names))
potentials = [sample.potential.lambdify() for sample in samples]
self.u = [multimap(potentials, sample.dataset) for sample in samples]
self.f = bennett(self.u)
mics.verbose and info("Initial free-energy guess:", self.f)
self.engine.__initialize__(self)
# ======================================================================================
def __compute__(self, functions, constants):
try:
if isinstance(functions, str):
funcs = [func(functions, self.names, constants).lambdify()]
else:
funcs = [func(f, self.names, constants).lambdify() for f in functions]
return [multimap(funcs, sample.dataset) for sample in self.samples]
except (InputError, KeyError):
return None
# ======================================================================================
def free_energies(self, reference=0):
"""
Computes the free energies of all sampled states relative to a given
reference state, as well as their standard errors.
Parameters
----------
reference : int, optional, default=0
Specifies which sampled state will be considered as a reference
for computing free-energy differences.
Returns
-------
pandas.DataFrame
A data frame containing the free-energy differences and their
computed standard errors for all sampled states.
"""
frame = self.samples.__qualifiers__()
frame["f"] = self.f - self.f[reference]
T = self.Theta
frame["df"] = np.sqrt(np.diag(T) - 2*T[:, reference] + T[reference, reference])
return frame
# ======================================================================================
def reweighting(self, potential, properties={}, derivatives={}, combinations={},
conditions={}, reference=0, **constants):
"""
Computes averages of specified properties at target states defined by
a given reduced `potential` function with distinct passed parameter
values, as well as the free energies of such states with respect to a
sampled `reference` state. Also, computes derivatives of these averages
and free energies with respect to the mentioned parameters. In addition,
evaluates combinations of free energies, averages, and derivatives. In
all cases, uncertainty propagation is handled automatically by means of
the delta method.
Parameters
----------
potential : str
A mathematical expression defining the reduced potential of the
target states. It might depend on the collective variables of
the mixture samples, as well as on external parameters whose
values will be passed via `conditions` or `constants`, such as
explained below.
properties : dict(str: str), optional, default={}
A dictionary associating names to mathematical expressions, thus
defining a set of properties whose averages must be evaluated at
the target states. If it is omitted, then only the relative free
energies of the target states will be evaluated. The expressions
might depend on the same collective variables and parameters
mentioned above for `potential`.
derivatives : dict(str: (str, str)), optional, default={}
A dictionary associating names to (property, parameter) pairs,
thus specifying derivatives of average properties at the target
states or relative free energies of these states with respect
to external parameters. For each pair, property must be either
"f" (for free energy) or a name defined in `properties`, while
parameter must be an external parameter such as described above
for `potential`.
combinations : dict(str: str), optional, default={}
A dictionary associating names to mathematical expressions, thus
defining combinations among average properties at the target
states, the relative free energies of these states, and their
derivatives with respect to external parameters. The expressions
might depend on "f" (for free energy) or on the names defined in
`properties`, as well as on external parameters such as described
above for `potential`.
conditions : pandas.DataFrame or dict, optional, default={}
A data frame whose column names are external parameters present
in mathematical expressions specified in arguments `potential`,
`properties`, and `combinations`. The rows of the data frame
contain sets of values of these parameters, in such as way that
the reweighting is carried out for every single set. This is a
way of defining multiple target states from a single `potential`
expression. The same information can be passed as a dictionary
associating names to lists of numerical values, provided that
all lists are equally sized. If it is empty, then a unique
target state will be considered and all external parameters in
`potential`, if any, must be passed as keyword arguments.
reference : int, optional, default=0
The index of a sampled state to be considered as a reference for
computing relative free energies.
**constants : keyword arguments
A set of keyword arguments passed as name=value, aimed to define
external parameter values for the evaluation of mathematical
expressions. These values will be repeated at all target states
specified via `potential` and `conditions`.
Returns
-------
pandas.DataFrame
A data frame containing the computed quantities, along with
their estimated uncertainties, at all target states specified
via `potential` and `conditions`.
"""
if mics.verbose:
info("\n=== Performing reweighting with %s ===" % self.engine.__class__.__name__)
info("Reduced potential:", potential)
constants and info("Provided constants: ", constants)
freeEnergy = "f"
if freeEnergy in properties.keys():
raise InputError("Word % is reserved for free energies" % freeEnergy)
condframe = pd.DataFrame(data=conditions) if isinstance(conditions, dict) else conditions
propfuncs = list(properties.values())
if not derivatives:
propnames = [freeEnergy] + list(properties.keys())
combs = combinations.values()
gProps = self.__compute__(propfuncs, constants)
if combinations:
gDelta = deltaMethod(combs, propnames, constants)
results = list()
for (index, condition) in cases(condframe):
mics.verbose and condition and info("Condition[%s]" % index, condition)
consts = dict(condition, **constants)
u = self.__compute__(potential, consts)
y = gProps if gProps else self.__compute__(propfuncs, consts)
(yu, Theta) = self.engine.__reweight__(self, u, y, reference)
result = propertyDict(propnames, yu, stdError(Theta))
if combinations:
delta = gDelta if gDelta.valid else deltaMethod(combs, propnames, consts)
(h, dh) = delta.evaluate(yu, Theta)
result.update(propertyDict(combinations.keys(), h, dh))
results.append(result.to_frame(index))
return condframe.join(pd.concat(results))
else:
symbols = list(condframe.columns) + list(constants.keys())
parameters = set(x for (y, x) in derivatives.values())
props = dict()
for x in parameters:
props[crypto(x)] = diff(potential, x, symbols)
combs = dict()
for (z, (y, x)) in derivatives.items():
if y == freeEnergy:
combs[z] = crypto(x)
else:
dydx = diff(properties[y], x, symbols)
props[crypto(z)] = "%s - (%s)*(%s)" % (dydx, props[crypto(x)], properties[y])
combs[z] = "%s + (%s)*(%s)" % (crypto(z), crypto(x), y)
unwanted = sum([[x, errorTitle(x)] for x in props.keys()], [])
return self.reweighting(potential, dict(properties, **props), {},
dict(combs, **combinations), condframe, reference,
**constants).drop(unwanted, axis=1)
# ======================================================================================
def pmf(self, potential, property, bins=10, interval=None, **constants):
if mics.verbose:
info("\n=== Computing PMF with %s ===" % self.engine.__class__.__name__)
info("Reduced potential:", potential)
u = self.__compute__(potential, constants)
z = self.__compute__(property, constants)
if interval:
(zmin, zmax) = interval
else:
zmin = min(np.amin(x[0, :]) for x in z)
zmax = max(np.amax(x[0, :]) for x in z)
delta = (zmax - zmin)/bins
ibin = [np.floor((x[0:1, :] - zmin)/delta).astype(int) for x in z]
results = list()
for i in range(bins):
zc = zmin + delta*(i + 0.5)
mics.verbose and info("Bin[%d]:" % (i + 1), "%s = %s" % (property, str(zc)))
y = [np.equal(x, i).astype(np.float) for x in ibin]
(yu, Theta) = self.engine.__reweight__(self, u, y)
if yu[1] > 0.0:
dyu = np.sqrt(max(0.0, Theta[1, 1]))
results.append([zc, -np.log(yu[1]), dyu/yu[1]])
return pd.DataFrame(results, columns=[property, "pmf", errorTitle("pmf")])
# ======================================================================================
def histograms(self, property="u0", bins=100, **constants):
if property == "u0":
y = self.u0
elif property == "state":
w = np.arange(self.m) + 1
wsum = sum(w)
y = [wsum*np.average(p, axis=0, weights=w) for p in self.P]
elif property == "potential":
y = [self.u[i][i, :] for i in range(self.m)]
else:
y = self.__compute__(property, constants)
ymin = min([np.amin(x) for x in y])
ymax = max([np.amax(x) for x in y])
delta = (ymax - ymin)/bins
center = [ymin + delta*(i + 0.5) for i in range(bins)]
frame = pd.DataFrame({property: center})
for i in range(self.m):
frame["state %s" % (i+1)] = np.histogram(y[i], bins, (ymin, ymax))[0]
return frame
| [
"mics.utils.InputError",
"numpy.log",
"numpy.equal",
"mics.utils.stdError",
"numpy.array",
"mics.utils.multimap",
"numpy.arange",
"numpy.histogram",
"mics.utils.cases",
"mics.funcs.diff",
"pandas.concat",
"pandas.DataFrame",
"mics.utils.info",
"mics.funcs.deltaMethod",
"numpy.amin",
"n... | [((1484, 1529), 'numpy.array', 'np.array', (['[sample.neff for sample in samples]'], {}), '([sample.neff for sample in samples])\n', (1492, 1529), True, 'import numpy as np\n'), ((1933, 1948), 'mics.utils.bennett', 'bennett', (['self.u'], {}), '(self.u)\n', (1940, 1948), False, 'from mics.utils import bennett\n'), ((12889, 12921), 'pandas.DataFrame', 'pd.DataFrame', (['{property: center}'], {}), '({property: center})\n', (12901, 12921), True, 'import pandas as pd\n'), ((1166, 1205), 'mics.utils.info', 'info', (['"""\n=== Setting up mixture ==="""'], {}), '("""\n=== Setting up mixture ===""")\n', (1170, 1205), False, 'from mics.utils import info\n'), ((1215, 1272), 'mics.utils.info', 'info', (['"""Analysis method: """', 'self.engine.__class__.__name__'], {}), "('Analysis method: ', self.engine.__class__.__name__)\n", (1219, 1272), False, 'from mics.utils import info\n'), ((1285, 1314), 'mics.utils.info', 'info', (['"""Number of samples:"""', 'm'], {}), "('Number of samples:', m)\n", (1289, 1314), False, 'from mics.utils import info\n'), ((1353, 1391), 'mics.utils.InputError', 'InputError', (['"""list of samples is empty"""'], {}), "('list of samples is empty')\n", (1363, 1391), False, 'from mics.utils import InputError\n'), ((1629, 1658), 'mics.utils.info', 'info', (['"""Sample sizes:"""', 'self.n'], {}), "('Sample sizes:', self.n)\n", (1633, 1658), False, 'from mics.utils import info\n'), ((1671, 1713), 'mics.utils.info', 'info', (['"""Effective sample sizes:"""', 'self.neff'], {}), "('Effective sample sizes:', self.neff)\n", (1675, 1713), False, 'from mics.utils import info\n'), ((1856, 1892), 'mics.utils.multimap', 'multimap', (['potentials', 'sample.dataset'], {}), '(potentials, sample.dataset)\n', (1864, 1892), False, 'from mics.utils import multimap\n'), ((1974, 2016), 'mics.utils.info', 'info', (['"""Initial free-energy guess:"""', 'self.f'], {}), "('Initial free-energy guess:', self.f)\n", (1978, 2016), False, 'from mics.utils import info\n'), ((8222, 8311), 'mics.utils.info', 'info', (['("""\n=== Performing reweighting with %s ===""" % self.engine.__class__.__name__\n )'], {}), '("""\n=== Performing reweighting with %s ===""" % self.engine.__class__.\n __name__)\n', (8226, 8311), False, 'from mics.utils import info\n'), ((8316, 8353), 'mics.utils.info', 'info', (['"""Reduced potential:"""', 'potential'], {}), "('Reduced potential:', potential)\n", (8320, 8353), False, 'from mics.utils import info\n'), ((8508, 8571), 'mics.utils.InputError', 'InputError', (["('Word % is reserved for free energies' % freeEnergy)"], {}), "('Word % is reserved for free energies' % freeEnergy)\n", (8518, 8571), False, 'from mics.utils import InputError\n'), ((8592, 8621), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'conditions'}), '(data=conditions)\n', (8604, 8621), True, 'import pandas as pd\n'), ((9074, 9090), 'mics.utils.cases', 'cases', (['condframe'], {}), '(condframe)\n', (9079, 9090), False, 'from mics.utils import cases\n'), ((11093, 11168), 'mics.utils.info', 'info', (['("""\n=== Computing PMF with %s ===""" % self.engine.__class__.__name__)'], {}), '("""\n=== Computing PMF with %s ===""" % self.engine.__class__.__name__)\n', (11097, 11168), False, 'from mics.utils import info\n'), ((11178, 11215), 'mics.utils.info', 'info', (['"""Reduced potential:"""', 'potential'], {}), "('Reduced potential:', potential)\n", (11182, 11215), False, 'from mics.utils import info\n'), ((2458, 2489), 'mics.utils.multimap', 'multimap', (['funcs', 'sample.dataset'], {}), '(funcs, sample.dataset)\n', (2466, 2489), False, 'from mics.utils import multimap\n'), ((8380, 8419), 'mics.utils.info', 'info', (['"""Provided constants: """', 'constants'], {}), "('Provided constants: ', constants)\n", (8384, 8419), False, 'from mics.utils import info\n'), ((8965, 9005), 'mics.funcs.deltaMethod', 'deltaMethod', (['combs', 'propnames', 'constants'], {}), '(combs, propnames, constants)\n', (8976, 9005), False, 'from mics.funcs import deltaMethod\n'), ((9865, 9883), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (9874, 9883), True, 'import pandas as pd\n'), ((10133, 10160), 'mics.funcs.diff', 'diff', (['potential', 'x', 'symbols'], {}), '(potential, x, symbols)\n', (10137, 10160), False, 'from mics.funcs import diff\n'), ((12707, 12717), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (12714, 12717), True, 'import numpy as np\n'), ((12751, 12761), 'numpy.amax', 'np.amax', (['x'], {}), '(x)\n', (12758, 12761), True, 'import numpy as np\n'), ((12994, 13032), 'numpy.histogram', 'np.histogram', (['y[i]', 'bins', '(ymin, ymax)'], {}), '(y[i], bins, (ymin, ymax))\n', (13006, 13032), True, 'import numpy as np\n'), ((3453, 3463), 'numpy.diag', 'np.diag', (['T'], {}), '(T)\n', (3460, 3463), True, 'import numpy as np\n'), ((9139, 9179), 'mics.utils.info', 'info', (["('Condition[%s]' % index)", 'condition'], {}), "('Condition[%s]' % index, condition)\n", (9143, 9179), False, 'from mics.utils import info\n'), ((9499, 9514), 'mics.utils.stdError', 'stdError', (['Theta'], {}), '(Theta)\n', (9507, 9514), False, 'from mics.utils import stdError\n'), ((10120, 10129), 'mics.utils.crypto', 'crypto', (['x'], {}), '(x)\n', (10126, 10129), False, 'from mics.utils import crypto\n'), ((10307, 10316), 'mics.utils.crypto', 'crypto', (['x'], {}), '(x)\n', (10313, 10316), False, 'from mics.utils import crypto\n'), ((10366, 10397), 'mics.funcs.diff', 'diff', (['properties[y]', 'x', 'symbols'], {}), '(properties[y], x, symbols)\n', (10370, 10397), False, 'from mics.funcs import diff\n'), ((11411, 11427), 'numpy.amin', 'np.amin', (['x[0, :]'], {}), '(x[0, :])\n', (11418, 11427), True, 'import numpy as np\n'), ((11463, 11479), 'numpy.amax', 'np.amax', (['x[0, :]'], {}), '(x[0, :])\n', (11470, 11479), True, 'import numpy as np\n'), ((11543, 11579), 'numpy.floor', 'np.floor', (['((x[0:1, :] - zmin) / delta)'], {}), '((x[0:1, :] - zmin) / delta)\n', (11551, 11579), True, 'import numpy as np\n'), ((12123, 12140), 'mics.utils.errorTitle', 'errorTitle', (['"""pmf"""'], {}), "('pmf')\n", (12133, 12140), False, 'from mics.utils import errorTitle\n'), ((12404, 12421), 'numpy.arange', 'np.arange', (['self.m'], {}), '(self.m)\n', (12413, 12421), True, 'import numpy as np\n'), ((9605, 9642), 'mics.funcs.deltaMethod', 'deltaMethod', (['combs', 'propnames', 'consts'], {}), '(combs, propnames, consts)\n', (9616, 9642), False, 'from mics.funcs import deltaMethod\n'), ((10424, 10433), 'mics.utils.crypto', 'crypto', (['z'], {}), '(z)\n', (10430, 10433), False, 'from mics.utils import crypto\n'), ((10604, 10617), 'mics.utils.errorTitle', 'errorTitle', (['x'], {}), '(x)\n', (10614, 10617), False, 'from mics.utils import errorTitle\n'), ((11804, 11818), 'numpy.equal', 'np.equal', (['x', 'i'], {}), '(x, i)\n', (11812, 11818), True, 'import numpy as np\n'), ((12474, 12506), 'numpy.average', 'np.average', (['p'], {'axis': '(0)', 'weights': 'w'}), '(p, axis=0, weights=w)\n', (12484, 12506), True, 'import numpy as np\n'), ((2282, 2320), 'mics.funcs.func', 'func', (['functions', 'self.names', 'constants'], {}), '(functions, self.names, constants)\n', (2286, 2320), False, 'from mics.funcs import func\n'), ((2376, 2406), 'mics.funcs.func', 'func', (['f', 'self.names', 'constants'], {}), '(f, self.names, constants)\n', (2380, 2406), False, 'from mics.funcs import func\n'), ((10547, 10556), 'mics.utils.crypto', 'crypto', (['z'], {}), '(z)\n', (10553, 10556), False, 'from mics.utils import crypto\n'), ((10558, 10567), 'mics.utils.crypto', 'crypto', (['x'], {}), '(x)\n', (10564, 10567), False, 'from mics.utils import crypto\n'), ((12032, 12045), 'numpy.log', 'np.log', (['yu[1]'], {}), '(yu[1])\n', (12038, 12045), True, 'import numpy as np\n'), ((10469, 10478), 'mics.utils.crypto', 'crypto', (['x'], {}), '(x)\n', (10475, 10478), False, 'from mics.utils import crypto\n')] |
from django.test import TestCase
from dojo.tools.scantist.parser import ScantistJSONParser
from dojo.models import Test
class TestScantistJSONParser(TestCase):
def test_parse_without_file_has_no_findings(self):
parser = ScantistJSONParser(None, Test())
self.assertEqual(0, len(parser.items))
def test_parse_file_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/scantist/scantist-no-vuln.json")
parser = ScantistJSONParser(testfile, Test())
self.assertEqual(0, len(parser.items))
def test_parse_file_with_one_vuln_has_one_finding(self):
testfile = open("dojo/unittests/scans/scantist/scantist-one-vuln.json")
parser = ScantistJSONParser(testfile, Test())
self.assertEqual(1, len(parser.items))
findings = parser.items[0]
self.assertEqual(findings.title, findings.cve + '|' + findings.component_name)
self.assertEqual(
findings.description,
"Integer overflow in the crypt_raw method in the key-stretching implementation in jBCrypt before 0.4 "
"makes it easier for remote attackers to determine cleartext values of password hashes via a brute-force "
"attack against hashes associated with the maximum exponent.",
)
self.assertEqual(
findings.severity, "Medium"
) # Negligible is translated to Informational
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
testfile = open("dojo/unittests/scans/scantist/scantist-many-vuln.json")
parser = ScantistJSONParser(testfile, Test())
self.assertTrue(len(parser.items) > 2)
| [
"dojo.models.Test"
] | [((260, 266), 'dojo.models.Test', 'Test', ([], {}), '()\n', (264, 266), False, 'from dojo.models import Test\n'), ((501, 507), 'dojo.models.Test', 'Test', ([], {}), '()\n', (505, 507), False, 'from dojo.models import Test\n'), ((744, 750), 'dojo.models.Test', 'Test', ([], {}), '()\n', (748, 750), False, 'from dojo.models import Test\n'), ((1622, 1628), 'dojo.models.Test', 'Test', ([], {}), '()\n', (1626, 1628), False, 'from dojo.models import Test\n')] |